2 * Copyright © 2012 Red Hat
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Dave Airlie <airlied@redhat.com>
25 * Rob Clark <rob.clark@linaro.org>
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
34 * DMA-BUF/GEM Object references and lifetime overview:
36 * On the export the dma_buf holds a reference to the exporting GEM
37 * object. It takes this reference in handle_to_fd_ioctl, when it
38 * first calls .prime_export and stores the exporting GEM object in
39 * the dma_buf priv. This reference is released when the dma_buf
40 * object goes away in the driver .release function.
42 * On the import the importing GEM object holds a reference to the
43 * dma_buf (which in turn holds a ref to the exporting GEM object).
44 * It takes that reference in the fd_to_handle ioctl.
45 * It calls dma_buf_get, creates an attachment to it and stores the
46 * attachment in the GEM object. When this attachment is destroyed
47 * when the imported object is destroyed, we remove the attachment
48 * and drop the reference to the dma_buf.
50 * Thus the chain of references always flows in one direction
51 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
53 * Self-importing: if userspace is using PRIME as a replacement for flink
54 * then it will get a fd->handle request for a GEM object that it created.
55 * Drivers should detect this situation and return back the gem object
56 * from the dma-buf private. Prime will do this automatically for drivers that
57 * use the drm_gem_prime_{import,export} helpers.
60 struct drm_prime_member {
61 struct list_head entry;
62 struct dma_buf *dma_buf;
65 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
67 static int drm_gem_map_attach(struct dma_buf *dma_buf,
68 struct device *target_dev,
69 struct dma_buf_attachment *attach)
71 struct drm_gem_object *obj = dma_buf->priv;
72 struct drm_device *dev = obj->dev;
74 if (!dev->driver->gem_prime_pin)
77 return dev->driver->gem_prime_pin(obj);
80 static void drm_gem_map_detach(struct dma_buf *dma_buf,
81 struct dma_buf_attachment *attach)
83 struct drm_gem_object *obj = dma_buf->priv;
84 struct drm_device *dev = obj->dev;
86 if (dev->driver->gem_prime_unpin)
87 dev->driver->gem_prime_unpin(obj);
90 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
91 enum dma_data_direction dir)
93 struct drm_gem_object *obj = attach->dmabuf->priv;
96 mutex_lock(&obj->dev->struct_mutex);
98 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
101 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
104 sgt = ERR_PTR(-ENOMEM);
108 mutex_unlock(&obj->dev->struct_mutex);
112 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
113 struct sg_table *sgt, enum dma_data_direction dir)
115 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
120 static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
122 struct drm_gem_object *obj = dma_buf->priv;
124 if (obj->export_dma_buf == dma_buf) {
125 /* drop the reference on the export fd holds */
126 obj->export_dma_buf = NULL;
127 drm_gem_object_unreference_unlocked(obj);
131 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
133 struct drm_gem_object *obj = dma_buf->priv;
134 struct drm_device *dev = obj->dev;
136 return dev->driver->gem_prime_vmap(obj);
139 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
141 struct drm_gem_object *obj = dma_buf->priv;
142 struct drm_device *dev = obj->dev;
144 dev->driver->gem_prime_vunmap(obj, vaddr);
147 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
148 unsigned long page_num)
153 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
154 unsigned long page_num, void *addr)
158 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
159 unsigned long page_num)
164 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
165 unsigned long page_num, void *addr)
170 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
171 struct vm_area_struct *vma)
176 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
177 .attach = drm_gem_map_attach,
178 .detach = drm_gem_map_detach,
179 .map_dma_buf = drm_gem_map_dma_buf,
180 .unmap_dma_buf = drm_gem_unmap_dma_buf,
181 .release = drm_gem_dmabuf_release,
182 .kmap = drm_gem_dmabuf_kmap,
183 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
184 .kunmap = drm_gem_dmabuf_kunmap,
185 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
186 .mmap = drm_gem_dmabuf_mmap,
187 .vmap = drm_gem_dmabuf_vmap,
188 .vunmap = drm_gem_dmabuf_vunmap,
194 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
195 * simpler APIs by using the helper functions @drm_gem_prime_export and
196 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
197 * five lower-level driver callbacks:
201 * - @gem_prime_pin (optional): prepare a GEM object for exporting
203 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
205 * - @gem_prime_vmap: vmap a buffer exported by your driver
207 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
211 * - @gem_prime_import_sg_table (import): produce a GEM object from another
212 * driver's scatter/gather table
215 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
216 struct drm_gem_object *obj, int flags)
218 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
220 EXPORT_SYMBOL(drm_gem_prime_export);
222 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
223 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
226 struct drm_gem_object *obj;
229 struct dma_buf *dmabuf;
231 obj = drm_gem_object_lookup(dev, file_priv, handle);
235 mutex_lock(&file_priv->prime.lock);
236 /* re-export the original imported object */
237 if (obj->import_attach) {
238 dmabuf = obj->import_attach->dmabuf;
242 if (obj->export_dma_buf) {
243 dmabuf = obj->export_dma_buf;
247 buf = dev->driver->gem_prime_export(dev, obj, flags);
249 /* normally the created dma-buf takes ownership of the ref,
250 * but if that fails then drop the ref
255 obj->export_dma_buf = buf;
257 /* if we've exported this buffer the cheat and add it to the import list
258 * so we get the correct handle back
260 ret = drm_prime_add_buf_handle(&file_priv->prime,
261 obj->export_dma_buf, handle);
265 *prime_fd = dma_buf_fd(buf, flags);
266 mutex_unlock(&file_priv->prime.lock);
271 *prime_fd = dma_buf_fd(dmabuf, flags);
273 drm_gem_object_unreference_unlocked(obj);
274 mutex_unlock(&file_priv->prime.lock);
277 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
279 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
280 struct dma_buf *dma_buf)
282 struct dma_buf_attachment *attach;
283 struct sg_table *sgt;
284 struct drm_gem_object *obj;
287 if (!dev->driver->gem_prime_import_sg_table)
288 return ERR_PTR(-EINVAL);
290 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
292 if (obj->dev == dev) {
294 * Importing dmabuf exported from out own gem increases
295 * refcount on gem itself instead of f_count of dmabuf.
297 drm_gem_object_reference(obj);
302 attach = dma_buf_attach(dma_buf, dev->dev);
304 return ERR_CAST(attach);
306 get_dma_buf(dma_buf);
308 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
309 if (IS_ERR_OR_NULL(sgt)) {
314 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
320 obj->import_attach = attach;
325 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
327 dma_buf_detach(dma_buf, attach);
328 dma_buf_put(dma_buf);
332 EXPORT_SYMBOL(drm_gem_prime_import);
334 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
335 struct drm_file *file_priv, int prime_fd, uint32_t *handle)
337 struct dma_buf *dma_buf;
338 struct drm_gem_object *obj;
341 dma_buf = dma_buf_get(prime_fd);
343 return PTR_ERR(dma_buf);
345 mutex_lock(&file_priv->prime.lock);
347 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
354 /* never seen this one, need to import */
355 obj = dev->driver->gem_prime_import(dev, dma_buf);
361 ret = drm_gem_handle_create(file_priv, obj, handle);
362 drm_gem_object_unreference_unlocked(obj);
366 ret = drm_prime_add_buf_handle(&file_priv->prime,
371 mutex_unlock(&file_priv->prime.lock);
373 dma_buf_put(dma_buf);
378 /* hmm, if driver attached, we are relying on the free-object path
379 * to detach.. which seems ok..
381 drm_gem_object_handle_unreference_unlocked(obj);
383 dma_buf_put(dma_buf);
384 mutex_unlock(&file_priv->prime.lock);
387 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
389 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
390 struct drm_file *file_priv)
392 struct drm_prime_handle *args = data;
395 if (!drm_core_check_feature(dev, DRIVER_PRIME))
398 if (!dev->driver->prime_handle_to_fd)
401 /* check flags are valid */
402 if (args->flags & ~DRM_CLOEXEC)
405 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
406 flags = args->flags & DRM_CLOEXEC;
408 return dev->driver->prime_handle_to_fd(dev, file_priv,
409 args->handle, flags, &args->fd);
412 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
413 struct drm_file *file_priv)
415 struct drm_prime_handle *args = data;
417 if (!drm_core_check_feature(dev, DRIVER_PRIME))
420 if (!dev->driver->prime_fd_to_handle)
423 return dev->driver->prime_fd_to_handle(dev, file_priv,
424 args->fd, &args->handle);
428 * drm_prime_pages_to_sg
430 * this helper creates an sg table object from a set of pages
431 * the driver is responsible for mapping the pages into the
432 * importers address space
434 struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
436 struct sg_table *sg = NULL;
439 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
445 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
446 nr_pages << PAGE_SHIFT, GFP_KERNEL);
455 EXPORT_SYMBOL(drm_prime_pages_to_sg);
457 /* export an sg table into an array of pages and addresses
458 this is currently required by the TTM driver in order to do correct fault
460 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
461 dma_addr_t *addrs, int max_pages)
464 struct scatterlist *sg;
471 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
475 addr = sg_dma_address(sg);
478 if (WARN_ON(pg_index >= max_pages))
480 pages[pg_index] = page;
482 addrs[pg_index] = addr;
492 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
493 /* helper function to cleanup a GEM/prime object */
494 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
496 struct dma_buf_attachment *attach;
497 struct dma_buf *dma_buf;
498 attach = obj->import_attach;
500 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
501 dma_buf = attach->dmabuf;
502 dma_buf_detach(attach->dmabuf, attach);
503 /* remove the reference */
504 dma_buf_put(dma_buf);
506 EXPORT_SYMBOL(drm_prime_gem_destroy);
508 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
510 INIT_LIST_HEAD(&prime_fpriv->head);
511 mutex_init(&prime_fpriv->lock);
513 EXPORT_SYMBOL(drm_prime_init_file_private);
515 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
517 /* by now drm_gem_release should've made sure the list is empty */
518 WARN_ON(!list_empty(&prime_fpriv->head));
520 EXPORT_SYMBOL(drm_prime_destroy_file_private);
522 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
524 struct drm_prime_member *member;
526 member = kmalloc(sizeof(*member), GFP_KERNEL);
530 get_dma_buf(dma_buf);
531 member->dma_buf = dma_buf;
532 member->handle = handle;
533 list_add(&member->entry, &prime_fpriv->head);
537 int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
539 struct drm_prime_member *member;
541 list_for_each_entry(member, &prime_fpriv->head, entry) {
542 if (member->dma_buf == dma_buf) {
543 *handle = member->handle;
549 EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
551 void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
553 struct drm_prime_member *member, *safe;
555 mutex_lock(&prime_fpriv->lock);
556 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
557 if (member->dma_buf == dma_buf) {
558 dma_buf_put(dma_buf);
559 list_del(&member->entry);
563 mutex_unlock(&prime_fpriv->lock);
565 EXPORT_SYMBOL(drm_prime_remove_buf_handle);