]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/exynos/exynos_drm_buf.c
Merge remote-tracking branch 'regulator/topic/max8997' into regulator-next
[karo-tx-linux.git] / drivers / gpu / drm / exynos / exynos_drm_buf.c
1 /* exynos_drm_buf.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11
12 #include <drm/drmP.h>
13 #include <drm/exynos_drm.h>
14
15 #include "exynos_drm_drv.h"
16 #include "exynos_drm_gem.h"
17 #include "exynos_drm_buf.h"
18 #include "exynos_drm_iommu.h"
19
20 static int lowlevel_buffer_allocate(struct drm_device *dev,
21                 unsigned int flags, struct exynos_drm_gem_buf *buf)
22 {
23         int ret = 0;
24         enum dma_attr attr;
25         unsigned int nr_pages;
26
27         DRM_DEBUG_KMS("%s\n", __FILE__);
28
29         if (buf->dma_addr) {
30                 DRM_DEBUG_KMS("already allocated.\n");
31                 return 0;
32         }
33
34         init_dma_attrs(&buf->dma_attrs);
35
36         /*
37          * if EXYNOS_BO_CONTIG, fully physically contiguous memory
38          * region will be allocated else physically contiguous
39          * as possible.
40          */
41         if (!(flags & EXYNOS_BO_NONCONTIG))
42                 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
43
44         /*
45          * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
46          * else cachable mapping.
47          */
48         if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
49                 attr = DMA_ATTR_WRITE_COMBINE;
50         else
51                 attr = DMA_ATTR_NON_CONSISTENT;
52
53         dma_set_attr(attr, &buf->dma_attrs);
54         dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
55
56         nr_pages = buf->size >> PAGE_SHIFT;
57
58         if (!is_drm_iommu_supported(dev)) {
59                 dma_addr_t start_addr;
60                 unsigned int i = 0;
61
62                 buf->pages = kzalloc(sizeof(struct page) * nr_pages,
63                                         GFP_KERNEL);
64                 if (!buf->pages) {
65                         DRM_ERROR("failed to allocate pages.\n");
66                         return -ENOMEM;
67                 }
68
69                 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
70                                         &buf->dma_addr, GFP_KERNEL,
71                                         &buf->dma_attrs);
72                 if (!buf->kvaddr) {
73                         DRM_ERROR("failed to allocate buffer.\n");
74                         kfree(buf->pages);
75                         return -ENOMEM;
76                 }
77
78                 start_addr = buf->dma_addr;
79                 while (i < nr_pages) {
80                         buf->pages[i] = phys_to_page(start_addr);
81                         start_addr += PAGE_SIZE;
82                         i++;
83                 }
84         } else {
85
86                 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
87                                         &buf->dma_addr, GFP_KERNEL,
88                                         &buf->dma_attrs);
89                 if (!buf->pages) {
90                         DRM_ERROR("failed to allocate buffer.\n");
91                         return -ENOMEM;
92                 }
93         }
94
95         buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
96         if (!buf->sgt) {
97                 DRM_ERROR("failed to get sg table.\n");
98                 ret = -ENOMEM;
99                 goto err_free_attrs;
100         }
101
102         DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
103                         (unsigned long)buf->dma_addr,
104                         buf->size);
105
106         return ret;
107
108 err_free_attrs:
109         dma_free_attrs(dev->dev, buf->size, buf->pages,
110                         (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
111         buf->dma_addr = (dma_addr_t)NULL;
112
113         if (!is_drm_iommu_supported(dev))
114                 kfree(buf->pages);
115
116         return ret;
117 }
118
119 static void lowlevel_buffer_deallocate(struct drm_device *dev,
120                 unsigned int flags, struct exynos_drm_gem_buf *buf)
121 {
122         DRM_DEBUG_KMS("%s.\n", __FILE__);
123
124         if (!buf->dma_addr) {
125                 DRM_DEBUG_KMS("dma_addr is invalid.\n");
126                 return;
127         }
128
129         DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
130                         (unsigned long)buf->dma_addr,
131                         buf->size);
132
133         sg_free_table(buf->sgt);
134
135         kfree(buf->sgt);
136         buf->sgt = NULL;
137
138         if (!is_drm_iommu_supported(dev)) {
139                 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
140                                 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
141                 kfree(buf->pages);
142         } else
143                 dma_free_attrs(dev->dev, buf->size, buf->pages,
144                                 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
145
146         buf->dma_addr = (dma_addr_t)NULL;
147 }
148
149 struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
150                                                 unsigned int size)
151 {
152         struct exynos_drm_gem_buf *buffer;
153
154         DRM_DEBUG_KMS("%s.\n", __FILE__);
155         DRM_DEBUG_KMS("desired size = 0x%x\n", size);
156
157         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
158         if (!buffer) {
159                 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
160                 return NULL;
161         }
162
163         buffer->size = size;
164         return buffer;
165 }
166
167 void exynos_drm_fini_buf(struct drm_device *dev,
168                                 struct exynos_drm_gem_buf *buffer)
169 {
170         DRM_DEBUG_KMS("%s.\n", __FILE__);
171
172         if (!buffer) {
173                 DRM_DEBUG_KMS("buffer is null.\n");
174                 return;
175         }
176
177         kfree(buffer);
178         buffer = NULL;
179 }
180
181 int exynos_drm_alloc_buf(struct drm_device *dev,
182                 struct exynos_drm_gem_buf *buf, unsigned int flags)
183 {
184
185         /*
186          * allocate memory region and set the memory information
187          * to vaddr and dma_addr of a buffer object.
188          */
189         if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
190                 return -ENOMEM;
191
192         return 0;
193 }
194
195 void exynos_drm_free_buf(struct drm_device *dev,
196                 unsigned int flags, struct exynos_drm_gem_buf *buffer)
197 {
198
199         lowlevel_buffer_deallocate(dev, flags, buffer);
200 }