]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
drm/prime: proper locking+refcounting for obj->dma_buf link
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 14 Aug 2013 22:02:46 +0000 (00:02 +0200)
committerDave Airlie <airlied@redhat.com>
Wed, 21 Aug 2013 02:58:17 +0000 (12:58 +1000)
The export dma-buf cache is semantically similar to an flink name. So
semantically it makes sense to treat it the same and remove the name
(i.e. the dma_buf pointer) and its references when the last gem handle
disappears.

Again we need to be careful, but double so: Not just could someone
race and export with a gem close ioctl (so we need to recheck
obj->handle_count again when assigning the new name), but multiple
exports can also race against each another. This is prevented by
holding the dev->object_name_lock across the entire section which
touches obj->dma_buf.

With the new scheme we also need to reinstate the obj->dma_buf link at
import time (in case the only reference userspace has held in-between
was through the dma-buf fd and not through any native gem handle). For
simplicity we don't check whether it's a native object but
unconditionally set up that link - with the new scheme of removing the
obj->dma_buf reference when the last handle disappears we can do that.

To make it clear that this is not just for exported buffers anymore
als rename it from export_dma_buf to dma_buf.

To make sure that now one can race a fd_to_handle or handle_to_fd with
gem_close we use the same tricks as in flink of extending the
dev->object_name_locking critical section. With this change we finally
have a guaranteed 1:1 relationship (at least for native objects)
between gem objects and dma-bufs, even accounting for races (which can
happen since the dma-buf itself holds a reference while in-flight).

This prevent igt/prime_self_import/export-vs-gem_close-race from
Oopsing the kernel. There is still a leak though since the per-file
priv dma-buf/handle cache handling is racy. That will be fixed in a
later patch.

v2: Remove the bogus dma_buf_put from the export_and_register_object
failure path if we've raced with the handle count dropping to 0.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_prime.c
include/drm/drmP.h

index 59f4592910937acc21306296735433891a83a202..2d2401e9c5aea7d814430466e920857cdcfe1484 100644 (file)
@@ -486,6 +486,7 @@ int drm_release(struct inode *inode, struct file *filp)
        if (dev->driver->postclose)
                dev->driver->postclose(dev, file_priv);
 
+
        if (drm_core_check_feature(dev, DRIVER_PRIME))
                drm_prime_destroy_file_private(&file_priv->prime);
 
index d47aa774d64b3d1fe7e20870f3fc06052494da01..4b3c533be859c80f987206e62a6364930e8c9927 100644 (file)
@@ -195,9 +195,14 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
                drm_prime_remove_buf_handle(&filp->prime,
                                obj->import_attach->dmabuf);
        }
-       if (obj->export_dma_buf) {
+
+       /*
+        * Note: obj->dma_buf can't disappear as long as we still hold a
+        * handle reference in obj->handle_count.
+        */
+       if (obj->dma_buf) {
                drm_prime_remove_buf_handle(&filp->prime,
-                               obj->export_dma_buf);
+                               obj->dma_buf);
        }
 }
 
@@ -231,6 +236,15 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj)
        }
 }
 
+static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+{
+       /* Unbreak the reference cycle if we have an exported dma_buf. */
+       if (obj->dma_buf) {
+               dma_buf_put(obj->dma_buf);
+               obj->dma_buf = NULL;
+       }
+}
+
 static void
 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
 {
@@ -244,8 +258,10 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
        */
 
        mutex_lock(&obj->dev->object_name_lock);
-       if (--obj->handle_count == 0)
+       if (--obj->handle_count == 0) {
                drm_gem_object_handle_free(obj);
+               drm_gem_object_exported_dma_buf_free(obj);
+       }
        mutex_unlock(&obj->dev->object_name_lock);
 
        drm_gem_object_unreference_unlocked(obj);
@@ -712,6 +728,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 void
 drm_gem_object_release(struct drm_gem_object *obj)
 {
+       WARN_ON(obj->dma_buf);
+
        if (obj->filp)
            fput(obj->filp);
 }
index 3d576018893ab8182894ebbe01679efb6daa90d0..5e543e9264d72dbbde8d3d89a7d1b9cffe7551b3 100644 (file)
@@ -193,11 +193,8 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
 {
        struct drm_gem_object *obj = dma_buf->priv;
 
-       if (obj->export_dma_buf == dma_buf) {
-               /* drop the reference on the export fd holds */
-               obj->export_dma_buf = NULL;
-               drm_gem_object_unreference_unlocked(obj);
-       }
+       /* drop the reference on the export fd holds */
+       drm_gem_object_unreference_unlocked(obj);
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_release);
 
@@ -298,6 +295,37 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_gem_prime_export);
 
+static struct dma_buf *export_and_register_object(struct drm_device *dev,
+                                                 struct drm_gem_object *obj,
+                                                 uint32_t flags)
+{
+       struct dma_buf *dmabuf;
+
+       /* prevent races with concurrent gem_close. */
+       if (obj->handle_count == 0) {
+               dmabuf = ERR_PTR(-ENOENT);
+               return dmabuf;
+       }
+
+       dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+       if (IS_ERR(dmabuf)) {
+               /* normally the created dma-buf takes ownership of the ref,
+                * but if that fails then drop the ref
+                */
+               return dmabuf;
+       }
+
+       /*
+        * Note that callers do not need to clean up the export cache
+        * since the check for obj->handle_count guarantees that someone
+        * will clean it up.
+        */
+       obj->dma_buf = dmabuf;
+       get_dma_buf(obj->dma_buf);
+
+       return dmabuf;
+}
+
 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
                struct drm_file *file_priv, uint32_t handle, uint32_t flags,
                int *prime_fd)
@@ -313,15 +341,20 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
        /* re-export the original imported object */
        if (obj->import_attach) {
                dmabuf = obj->import_attach->dmabuf;
+               get_dma_buf(dmabuf);
                goto out_have_obj;
        }
 
-       if (obj->export_dma_buf) {
-               dmabuf = obj->export_dma_buf;
+       mutex_lock(&dev->object_name_lock);
+       if (obj->dma_buf) {
+               get_dma_buf(obj->dma_buf);
+               dmabuf = obj->dma_buf;
+               mutex_unlock(&dev->object_name_lock);
                goto out_have_obj;
        }
 
-       dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+       dmabuf = export_and_register_object(dev, obj, flags);
+       mutex_unlock(&dev->object_name_lock);
        if (IS_ERR(dmabuf)) {
                /* normally the created dma-buf takes ownership of the ref,
                 * but if that fails then drop the ref
@@ -329,14 +362,13 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
                ret = PTR_ERR(dmabuf);
                goto out;
        }
-       obj->export_dma_buf = dmabuf;
 
        mutex_lock(&file_priv->prime.lock);
        /* if we've exported this buffer the cheat and add it to the import list
         * so we get the correct handle back
         */
        ret = drm_prime_add_buf_handle(&file_priv->prime,
-                                      obj->export_dma_buf, handle);
+                                      dmabuf, handle);
        if (ret)
                goto fail_put_dmabuf;
 
@@ -349,7 +381,6 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
        return 0;
 
 out_have_obj:
-       get_dma_buf(dmabuf);
        ret = dma_buf_fd(dmabuf, flags);
        if (ret < 0) {
                dma_buf_put(dmabuf);
@@ -365,8 +396,6 @@ fail_rm_handle:
                                           dmabuf);
        mutex_unlock(&file_priv->prime.lock);
 fail_put_dmabuf:
-       /* clear NOT to be checked when releasing dma_buf */
-       obj->export_dma_buf = NULL;
        dma_buf_put(dmabuf);
 out:
        drm_gem_object_unreference_unlocked(obj);
@@ -448,13 +477,22 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
                goto out_put;
 
        /* never seen this one, need to import */
+       mutex_lock(&dev->object_name_lock);
        obj = dev->driver->gem_prime_import(dev, dma_buf);
        if (IS_ERR(obj)) {
                ret = PTR_ERR(obj);
-               goto out_put;
+               goto out_unlock;
+       }
+
+       if (obj->dma_buf) {
+               WARN_ON(obj->dma_buf != dma_buf);
+       } else {
+               obj->dma_buf = dma_buf;
+               get_dma_buf(dma_buf);
        }
 
-       ret = drm_gem_handle_create(file_priv, obj, handle);
+       /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+       ret = drm_gem_handle_create_tail(file_priv, obj, handle);
        drm_gem_object_unreference_unlocked(obj);
        if (ret)
                goto out_put;
@@ -475,6 +513,8 @@ fail:
         * to detach.. which seems ok..
         */
        drm_gem_handle_delete(file_priv, *handle);
+out_unlock:
+       mutex_lock(&dev->object_name_lock);
 out_put:
        dma_buf_put(dma_buf);
        mutex_unlock(&file_priv->prime.lock);
index 063eac31b97bec7d07b283cae0120d766e6271e7..a95db49b3f9e0cb63584ce066162878332635128 100644 (file)
@@ -667,8 +667,16 @@ struct drm_gem_object {
 
        void *driver_private;
 
-       /* dma buf exported from this GEM object */
-       struct dma_buf *export_dma_buf;
+       /**
+        * dma_buf - dma buf associated with this GEM object
+        *
+        * Pointer to the dma-buf associated with this gem object (either
+        * through importing or exporting). We break the resulting reference
+        * loop when the last gem handle for this object is released.
+        *
+        * Protected by obj->object_name_lock
+        */
+       struct dma_buf *dma_buf;
 
        /**
         * import_attach - dma buf attachment backing this object