]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drm/exynos: consider DMA_NONE flag to dmabuf import
[karo-tx-linux.git] / drivers / gpu / drm / exynos / exynos_drm_dmabuf.c
1 /* exynos_drm_dmabuf.c
2  *
3  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11
12 #include <drm/drmP.h>
13 #include <drm/exynos_drm.h>
14 #include "exynos_drm_drv.h"
15 #include "exynos_drm_gem.h"
16
17 #include <linux/dma-buf.h>
18
19 struct exynos_drm_dmabuf_attachment {
20         struct sg_table sgt;
21         enum dma_data_direction dir;
22         bool is_mapped;
23 };
24
25 static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
26                                         struct device *dev,
27                                         struct dma_buf_attachment *attach)
28 {
29         struct exynos_drm_dmabuf_attachment *exynos_attach;
30
31         exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
32         if (!exynos_attach)
33                 return -ENOMEM;
34
35         exynos_attach->dir = DMA_NONE;
36         attach->priv = exynos_attach;
37
38         return 0;
39 }
40
41 static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
42                                         struct dma_buf_attachment *attach)
43 {
44         struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
45         struct sg_table *sgt;
46
47         if (!exynos_attach)
48                 return;
49
50         sgt = &exynos_attach->sgt;
51
52         if (exynos_attach->dir != DMA_NONE)
53                 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
54                                 exynos_attach->dir);
55
56         sg_free_table(sgt);
57         kfree(exynos_attach);
58         attach->priv = NULL;
59 }
60
61 static struct sg_table *
62                 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
63                                         enum dma_data_direction dir)
64 {
65         struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
66         struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
67         struct drm_device *dev = gem_obj->base.dev;
68         struct exynos_drm_gem_buf *buf;
69         struct scatterlist *rd, *wr;
70         struct sg_table *sgt = NULL;
71         unsigned int i;
72         int nents, ret;
73
74         DRM_DEBUG_PRIME("%s\n", __FILE__);
75
76         /* just return current sgt if already requested. */
77         if (exynos_attach->dir == dir && exynos_attach->is_mapped)
78                 return &exynos_attach->sgt;
79
80         buf = gem_obj->buffer;
81         if (!buf) {
82                 DRM_ERROR("buffer is null.\n");
83                 return ERR_PTR(-ENOMEM);
84         }
85
86         sgt = &exynos_attach->sgt;
87
88         ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
89         if (ret) {
90                 DRM_ERROR("failed to alloc sgt.\n");
91                 return ERR_PTR(-ENOMEM);
92         }
93
94         mutex_lock(&dev->struct_mutex);
95
96         rd = buf->sgt->sgl;
97         wr = sgt->sgl;
98         for (i = 0; i < sgt->orig_nents; ++i) {
99                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
100                 rd = sg_next(rd);
101                 wr = sg_next(wr);
102         }
103
104         if (dir != DMA_NONE) {
105                 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
106                 if (!nents) {
107                         DRM_ERROR("failed to map sgl with iommu.\n");
108                         sg_free_table(sgt);
109                         sgt = ERR_PTR(-EIO);
110                         goto err_unlock;
111                 }
112         }
113
114         exynos_attach->is_mapped = true;
115         exynos_attach->dir = dir;
116         attach->priv = exynos_attach;
117
118         DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
119
120 err_unlock:
121         mutex_unlock(&dev->struct_mutex);
122         return sgt;
123 }
124
125 static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
126                                                 struct sg_table *sgt,
127                                                 enum dma_data_direction dir)
128 {
129         /* Nothing to do. */
130 }
131
132 static void exynos_dmabuf_release(struct dma_buf *dmabuf)
133 {
134         struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
135
136         DRM_DEBUG_PRIME("%s\n", __FILE__);
137
138         /*
139          * exynos_dmabuf_release() call means that file object's
140          * f_count is 0 and it calls drm_gem_object_handle_unreference()
141          * to drop the references that these values had been increased
142          * at drm_prime_handle_to_fd()
143          */
144         if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
145                 exynos_gem_obj->base.export_dma_buf = NULL;
146
147                 /*
148                  * drop this gem object refcount to release allocated buffer
149                  * and resources.
150                  */
151                 drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
152         }
153 }
154
155 static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
156                                                 unsigned long page_num)
157 {
158         /* TODO */
159
160         return NULL;
161 }
162
163 static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
164                                                 unsigned long page_num,
165                                                 void *addr)
166 {
167         /* TODO */
168 }
169
170 static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
171                                         unsigned long page_num)
172 {
173         /* TODO */
174
175         return NULL;
176 }
177
178 static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
179                                         unsigned long page_num, void *addr)
180 {
181         /* TODO */
182 }
183
184 static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
185         struct vm_area_struct *vma)
186 {
187         return -ENOTTY;
188 }
189
190 static struct dma_buf_ops exynos_dmabuf_ops = {
191         .attach                 = exynos_gem_attach_dma_buf,
192         .detach                 = exynos_gem_detach_dma_buf,
193         .map_dma_buf            = exynos_gem_map_dma_buf,
194         .unmap_dma_buf          = exynos_gem_unmap_dma_buf,
195         .kmap                   = exynos_gem_dmabuf_kmap,
196         .kmap_atomic            = exynos_gem_dmabuf_kmap_atomic,
197         .kunmap                 = exynos_gem_dmabuf_kunmap,
198         .kunmap_atomic          = exynos_gem_dmabuf_kunmap_atomic,
199         .mmap                   = exynos_gem_dmabuf_mmap,
200         .release                = exynos_dmabuf_release,
201 };
202
203 struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
204                                 struct drm_gem_object *obj, int flags)
205 {
206         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
207
208         return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
209                                 exynos_gem_obj->base.size, flags);
210 }
211
212 struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
213                                 struct dma_buf *dma_buf)
214 {
215         struct dma_buf_attachment *attach;
216         struct sg_table *sgt;
217         struct scatterlist *sgl;
218         struct exynos_drm_gem_obj *exynos_gem_obj;
219         struct exynos_drm_gem_buf *buffer;
220         int ret;
221
222         DRM_DEBUG_PRIME("%s\n", __FILE__);
223
224         /* is this one of own objects? */
225         if (dma_buf->ops == &exynos_dmabuf_ops) {
226                 struct drm_gem_object *obj;
227
228                 exynos_gem_obj = dma_buf->priv;
229                 obj = &exynos_gem_obj->base;
230
231                 /* is it from our device? */
232                 if (obj->dev == drm_dev) {
233                         /*
234                          * Importing dmabuf exported from out own gem increases
235                          * refcount on gem itself instead of f_count of dmabuf.
236                          */
237                         drm_gem_object_reference(obj);
238                         dma_buf_put(dma_buf);
239                         return obj;
240                 }
241         }
242
243         attach = dma_buf_attach(dma_buf, drm_dev->dev);
244         if (IS_ERR(attach))
245                 return ERR_PTR(-EINVAL);
246
247
248         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
249         if (IS_ERR_OR_NULL(sgt)) {
250                 ret = PTR_ERR(sgt);
251                 goto err_buf_detach;
252         }
253
254         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
255         if (!buffer) {
256                 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
257                 ret = -ENOMEM;
258                 goto err_unmap_attach;
259         }
260
261         exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
262         if (!exynos_gem_obj) {
263                 ret = -ENOMEM;
264                 goto err_free_buffer;
265         }
266
267         sgl = sgt->sgl;
268
269         buffer->size = dma_buf->size;
270         buffer->dma_addr = sg_dma_address(sgl);
271
272         if (sgt->nents == 1) {
273                 /* always physically continuous memory if sgt->nents is 1. */
274                 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
275         } else {
276                 /*
277                  * this case could be CONTIG or NONCONTIG type but for now
278                  * sets NONCONTIG.
279                  * TODO. we have to find a way that exporter can notify
280                  * the type of its own buffer to importer.
281                  */
282                 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
283         }
284
285         exynos_gem_obj->buffer = buffer;
286         buffer->sgt = sgt;
287         exynos_gem_obj->base.import_attach = attach;
288
289         DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
290                                                                 buffer->size);
291
292         return &exynos_gem_obj->base;
293
294 err_free_buffer:
295         kfree(buffer);
296         buffer = NULL;
297 err_unmap_attach:
298         dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
299 err_buf_detach:
300         dma_buf_detach(dma_buf, attach);
301         return ERR_PTR(ret);
302 }
303
304 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
305 MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
306 MODULE_LICENSE("GPL");