]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem_dmabuf.c
Merge branch 'linux-4.13' of git://github.com/skeggsb/linux into drm-fixes
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_dmabuf.c
1 /*
2  * Copyright 2012 Red Hat Inc
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Dave Airlie <airlied@redhat.com>
25  */
26
27 #include <linux/dma-buf.h>
28 #include <linux/reservation.h>
29
30 #include <drm/drmP.h>
31
32 #include "i915_drv.h"
33
34 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
35 {
36         return to_intel_bo(buf->priv);
37 }
38
39 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
40                                              enum dma_data_direction dir)
41 {
42         struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
43         struct sg_table *st;
44         struct scatterlist *src, *dst;
45         int ret, i;
46
47         ret = i915_gem_object_pin_pages(obj);
48         if (ret)
49                 goto err;
50
51         /* Copy sg so that we make an independent mapping */
52         st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
53         if (st == NULL) {
54                 ret = -ENOMEM;
55                 goto err_unpin_pages;
56         }
57
58         ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
59         if (ret)
60                 goto err_free;
61
62         src = obj->mm.pages->sgl;
63         dst = st->sgl;
64         for (i = 0; i < obj->mm.pages->nents; i++) {
65                 sg_set_page(dst, sg_page(src), src->length, 0);
66                 dst = sg_next(dst);
67                 src = sg_next(src);
68         }
69
70         if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71                 ret = -ENOMEM;
72                 goto err_free_sg;
73         }
74
75         return st;
76
77 err_free_sg:
78         sg_free_table(st);
79 err_free:
80         kfree(st);
81 err_unpin_pages:
82         i915_gem_object_unpin_pages(obj);
83 err:
84         return ERR_PTR(ret);
85 }
86
87 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
88                                    struct sg_table *sg,
89                                    enum dma_data_direction dir)
90 {
91         struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
92
93         dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
94         sg_free_table(sg);
95         kfree(sg);
96
97         i915_gem_object_unpin_pages(obj);
98 }
99
100 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
101 {
102         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
103
104         return i915_gem_object_pin_map(obj, I915_MAP_WB);
105 }
106
107 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
108 {
109         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
110
111         i915_gem_object_unpin_map(obj);
112 }
113
114 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
115 {
116         return NULL;
117 }
118
119 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
120 {
121
122 }
123 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
124 {
125         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
126         struct page *page;
127
128         if (page_num >= obj->base.size >> PAGE_SHIFT)
129                 return NULL;
130
131         if (!i915_gem_object_has_struct_page(obj))
132                 return NULL;
133
134         if (i915_gem_object_pin_pages(obj))
135                 return NULL;
136
137         /* Synchronisation is left to the caller (via .begin_cpu_access()) */
138         page = i915_gem_object_get_page(obj, page_num);
139         if (IS_ERR(page))
140                 goto err_unpin;
141
142         return kmap(page);
143
144 err_unpin:
145         i915_gem_object_unpin_pages(obj);
146         return NULL;
147 }
148
149 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
150 {
151         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
152
153         kunmap(virt_to_page(addr));
154         i915_gem_object_unpin_pages(obj);
155 }
156
157 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
158 {
159         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
160         int ret;
161
162         if (obj->base.size < vma->vm_end - vma->vm_start)
163                 return -EINVAL;
164
165         if (!obj->base.filp)
166                 return -ENODEV;
167
168         ret = call_mmap(obj->base.filp, vma);
169         if (ret)
170                 return ret;
171
172         fput(vma->vm_file);
173         vma->vm_file = get_file(obj->base.filp);
174
175         return 0;
176 }
177
178 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
179 {
180         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
181         struct drm_device *dev = obj->base.dev;
182         bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
183         int err;
184
185         err = i915_gem_object_pin_pages(obj);
186         if (err)
187                 return err;
188
189         err = i915_mutex_lock_interruptible(dev);
190         if (err)
191                 goto out;
192
193         err = i915_gem_object_set_to_cpu_domain(obj, write);
194         mutex_unlock(&dev->struct_mutex);
195
196 out:
197         i915_gem_object_unpin_pages(obj);
198         return err;
199 }
200
201 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
202 {
203         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
204         struct drm_device *dev = obj->base.dev;
205         int err;
206
207         err = i915_gem_object_pin_pages(obj);
208         if (err)
209                 return err;
210
211         err = i915_mutex_lock_interruptible(dev);
212         if (err)
213                 goto out;
214
215         err = i915_gem_object_set_to_gtt_domain(obj, false);
216         mutex_unlock(&dev->struct_mutex);
217
218 out:
219         i915_gem_object_unpin_pages(obj);
220         return err;
221 }
222
223 static const struct dma_buf_ops i915_dmabuf_ops =  {
224         .map_dma_buf = i915_gem_map_dma_buf,
225         .unmap_dma_buf = i915_gem_unmap_dma_buf,
226         .release = drm_gem_dmabuf_release,
227         .map = i915_gem_dmabuf_kmap,
228         .map_atomic = i915_gem_dmabuf_kmap_atomic,
229         .unmap = i915_gem_dmabuf_kunmap,
230         .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
231         .mmap = i915_gem_dmabuf_mmap,
232         .vmap = i915_gem_dmabuf_vmap,
233         .vunmap = i915_gem_dmabuf_vunmap,
234         .begin_cpu_access = i915_gem_begin_cpu_access,
235         .end_cpu_access = i915_gem_end_cpu_access,
236 };
237
238 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
239                                       struct drm_gem_object *gem_obj, int flags)
240 {
241         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
242         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
243
244         exp_info.ops = &i915_dmabuf_ops;
245         exp_info.size = gem_obj->size;
246         exp_info.flags = flags;
247         exp_info.priv = gem_obj;
248         exp_info.resv = obj->resv;
249
250         if (obj->ops->dmabuf_export) {
251                 int ret = obj->ops->dmabuf_export(obj);
252                 if (ret)
253                         return ERR_PTR(ret);
254         }
255
256         return drm_gem_dmabuf_export(dev, &exp_info);
257 }
258
259 static struct sg_table *
260 i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
261 {
262         return dma_buf_map_attachment(obj->base.import_attach,
263                                       DMA_BIDIRECTIONAL);
264 }
265
266 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
267                                              struct sg_table *pages)
268 {
269         dma_buf_unmap_attachment(obj->base.import_attach, pages,
270                                  DMA_BIDIRECTIONAL);
271 }
272
273 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
274         .get_pages = i915_gem_object_get_pages_dmabuf,
275         .put_pages = i915_gem_object_put_pages_dmabuf,
276 };
277
278 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
279                                              struct dma_buf *dma_buf)
280 {
281         struct dma_buf_attachment *attach;
282         struct drm_i915_gem_object *obj;
283         int ret;
284
285         /* is this one of own objects? */
286         if (dma_buf->ops == &i915_dmabuf_ops) {
287                 obj = dma_buf_to_obj(dma_buf);
288                 /* is it from our device? */
289                 if (obj->base.dev == dev) {
290                         /*
291                          * Importing dmabuf exported from out own gem increases
292                          * refcount on gem itself instead of f_count of dmabuf.
293                          */
294                         return &i915_gem_object_get(obj)->base;
295                 }
296         }
297
298         /* need to attach */
299         attach = dma_buf_attach(dma_buf, dev->dev);
300         if (IS_ERR(attach))
301                 return ERR_CAST(attach);
302
303         get_dma_buf(dma_buf);
304
305         obj = i915_gem_object_alloc(to_i915(dev));
306         if (obj == NULL) {
307                 ret = -ENOMEM;
308                 goto fail_detach;
309         }
310
311         drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
312         i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
313         obj->base.import_attach = attach;
314         obj->resv = dma_buf->resv;
315
316         /* We use GTT as shorthand for a coherent domain, one that is
317          * neither in the GPU cache nor in the CPU cache, where all
318          * writes are immediately visible in memory. (That's not strictly
319          * true, but it's close! There are internal buffers such as the
320          * write-combined buffer or a delay through the chipset for GTT
321          * writes that do require us to treat GTT as a separate cache domain.)
322          */
323         obj->base.read_domains = I915_GEM_DOMAIN_GTT;
324         obj->base.write_domain = 0;
325
326         return &obj->base;
327
328 fail_detach:
329         dma_buf_detach(dma_buf, attach);
330         dma_buf_put(dma_buf);
331
332         return ERR_PTR(ret);
333 }
334
335 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
336 #include "selftests/mock_dmabuf.c"
337 #include "selftests/i915_gem_dmabuf.c"
338 #endif