]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/drm_gem.c
drm/prime: Always add exported buffers to the handle cache
[karo-tx-linux.git] / drivers / gpu / drm / drm_gem.c
index a8ba7da83d45819ccf45dbe5a0170cdc79c9eecf..1ce88c3301a144ff6f66a06912ae8c34868a6b01 100644 (file)
@@ -93,7 +93,7 @@ drm_gem_init(struct drm_device *dev)
 {
        struct drm_gem_mm *mm;
 
-       spin_lock_init(&dev->object_name_lock);
+       mutex_init(&dev->object_name_lock);
        idr_init(&dev->object_name_idr);
 
        mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
@@ -154,7 +154,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
        obj->filp = NULL;
 
        kref_init(&obj->refcount);
-       atomic_set(&obj->handle_count, 0);
+       obj->handle_count = 0;
        obj->size = size;
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -191,14 +191,77 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
 static void
 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 {
-       if (obj->import_attach) {
-               drm_prime_remove_buf_handle(&filp->prime,
-                               obj->import_attach->dmabuf);
+       /*
+        * Note: obj->dma_buf can't disappear as long as we still hold a
+        * handle reference in obj->handle_count.
+        */
+       mutex_lock(&filp->prime.lock);
+       if (obj->dma_buf) {
+               drm_prime_remove_buf_handle_locked(&filp->prime,
+                                                  obj->dma_buf);
+       }
+       mutex_unlock(&filp->prime.lock);
+}
+
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+       BUG();
+}
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+static void drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+
+       /* Remove any name for this object */
+       if (obj->name) {
+               idr_remove(&dev->object_name_idr, obj->name);
+               obj->name = 0;
+               /*
+                * The object name held a reference to this object, drop
+                * that now.
+               *
+               * This cannot be the last reference, since the handle holds one too.
+                */
+               kref_put(&obj->refcount, drm_gem_object_ref_bug);
+       }
+}
+
+static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+{
+       /* Unbreak the reference cycle if we have an exported dma_buf. */
+       if (obj->dma_buf) {
+               dma_buf_put(obj->dma_buf);
+               obj->dma_buf = NULL;
        }
-       if (obj->export_dma_buf) {
-               drm_prime_remove_buf_handle(&filp->prime,
-                               obj->export_dma_buf);
+}
+
+static void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+       if (WARN_ON(obj->handle_count == 0))
+               return;
+
+       /*
+       * Must bump handle count first as this may be the last
+       * ref, in which case the object would disappear before we
+       * checked for a name
+       */
+
+       mutex_lock(&obj->dev->object_name_lock);
+       if (--obj->handle_count == 0) {
+               drm_gem_object_handle_free(obj);
+               drm_gem_object_exported_dma_buf_free(obj);
        }
+       mutex_unlock(&obj->dev->object_name_lock);
+
+       drm_gem_object_unreference_unlocked(obj);
 }
 
 /**
@@ -258,18 +321,22 @@ int drm_gem_dumb_destroy(struct drm_file *file,
 EXPORT_SYMBOL(drm_gem_dumb_destroy);
 
 /**
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
+ * drm_gem_handle_create_tail - internal functions to create a handle
+ * 
+ * This expects the dev->object_name_lock to be held already and will drop it
+ * before returning. Used to avoid races in establishing new handles when
+ * importing an object from either an flink name or a dma-buf.
  */
 int
-drm_gem_handle_create(struct drm_file *file_priv,
-                      struct drm_gem_object *obj,
-                      u32 *handlep)
+drm_gem_handle_create_tail(struct drm_file *file_priv,
+                          struct drm_gem_object *obj,
+                          u32 *handlep)
 {
        struct drm_device *dev = obj->dev;
        int ret;
 
+       WARN_ON(!mutex_is_locked(&dev->object_name_lock));
+
        /*
         * Get the user-visible handle using idr.  Preload and perform
         * allocation under our spinlock.
@@ -278,14 +345,17 @@ drm_gem_handle_create(struct drm_file *file_priv,
        spin_lock(&file_priv->table_lock);
 
        ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
-
+       drm_gem_object_reference(obj);
+       obj->handle_count++;
        spin_unlock(&file_priv->table_lock);
        idr_preload_end();
-       if (ret < 0)
+       mutex_unlock(&dev->object_name_lock);
+       if (ret < 0) {
+               drm_gem_object_handle_unreference_unlocked(obj);
                return ret;
+       }
        *handlep = ret;
 
-       drm_gem_object_handle_reference(obj);
 
        if (dev->driver->gem_open_object) {
                ret = dev->driver->gem_open_object(obj, file_priv);
@@ -297,6 +367,21 @@ drm_gem_handle_create(struct drm_file *file_priv,
 
        return 0;
 }
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+                      struct drm_gem_object *obj,
+                      u32 *handlep)
+{
+       mutex_lock(&obj->dev->object_name_lock);
+
+       return drm_gem_handle_create_tail(file_priv, obj, handlep);
+}
 EXPORT_SYMBOL(drm_gem_handle_create);
 
 
@@ -358,6 +443,109 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
 }
 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
 
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * from shmem
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+       struct inode *inode;
+       struct address_space *mapping;
+       struct page *p, **pages;
+       int i, npages;
+
+       /* This is the shared memory object that backs the GEM resource */
+       inode = file_inode(obj->filp);
+       mapping = inode->i_mapping;
+
+       /* We already BUG_ON() for non-page-aligned sizes in
+        * drm_gem_object_init(), so we should never hit this unless
+        * driver author is doing something really wrong:
+        */
+       WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+       npages = obj->size >> PAGE_SHIFT;
+
+       pages = drm_malloc_ab(npages, sizeof(struct page *));
+       if (pages == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       gfpmask |= mapping_gfp_mask(mapping);
+
+       for (i = 0; i < npages; i++) {
+               p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+               if (IS_ERR(p))
+                       goto fail;
+               pages[i] = p;
+
+               /* There is a hypothetical issue w/ drivers that require
+                * buffer memory in the low 4GB.. if the pages are un-
+                * pinned, and swapped out, they can end up swapped back
+                * in above 4GB.  If pages are already in memory, then
+                * shmem_read_mapping_page_gfp will ignore the gfpmask,
+                * even if the already in-memory page disobeys the mask.
+                *
+                * It is only a theoretical issue today, because none of
+                * the devices with this limitation can be populated with
+                * enough memory to trigger the issue.  But this BUG_ON()
+                * is here as a reminder in case the problem with
+                * shmem_read_mapping_page_gfp() isn't solved by the time
+                * it does become a real issue.
+                *
+                * See this thread: http://lkml.org/lkml/2011/7/11/238
+                */
+               BUG_ON((gfpmask & __GFP_DMA32) &&
+                               (page_to_pfn(p) >= 0x00100000UL));
+       }
+
+       return pages;
+
+fail:
+       while (i--)
+               page_cache_release(pages[i]);
+
+       drm_free_large(pages);
+       return ERR_CAST(p);
+}
+EXPORT_SYMBOL(drm_gem_get_pages);
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ * @dirty: if true, pages will be marked as dirty
+ * @accessed: if true, the pages will be marked as accessed
+ */
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+               bool dirty, bool accessed)
+{
+       int i, npages;
+
+       /* We already BUG_ON() for non-page-aligned sizes in
+        * drm_gem_object_init(), so we should never hit this unless
+        * driver author is doing something really wrong:
+        */
+       WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+       npages = obj->size >> PAGE_SHIFT;
+
+       for (i = 0; i < npages; i++) {
+               if (dirty)
+                       set_page_dirty(pages[i]);
+
+               if (accessed)
+                       mark_page_accessed(pages[i]);
+
+               /* Undo the reference we took when populating the table */
+               page_cache_release(pages[i]);
+       }
+
+       drm_free_large(pages);
+}
+EXPORT_SYMBOL(drm_gem_put_pages);
+
 /** Returns a reference to the object named by the handle. */
 struct drm_gem_object *
 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
@@ -421,8 +609,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
        if (obj == NULL)
                return -ENOENT;
 
+       mutex_lock(&dev->object_name_lock);
        idr_preload(GFP_KERNEL);
-       spin_lock(&dev->object_name_lock);
+       /* prevent races with concurrent gem_close. */
+       if (obj->handle_count == 0) {
+               ret = -ENOENT;
+               goto err;
+       }
+
        if (!obj->name) {
                ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
                if (ret < 0)
@@ -438,8 +632,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
        ret = 0;
 
 err:
-       spin_unlock(&dev->object_name_lock);
        idr_preload_end();
+       mutex_unlock(&dev->object_name_lock);
        drm_gem_object_unreference_unlocked(obj);
        return ret;
 }
@@ -462,15 +656,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       spin_lock(&dev->object_name_lock);
+       mutex_lock(&dev->object_name_lock);
        obj = idr_find(&dev->object_name_idr, (int) args->name);
-       if (obj)
+       if (obj) {
                drm_gem_object_reference(obj);
-       spin_unlock(&dev->object_name_lock);
-       if (!obj)
+       } else {
+               mutex_unlock(&dev->object_name_lock);
                return -ENOENT;
+       }
 
-       ret = drm_gem_handle_create(file_priv, obj, &handle);
+       /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+       ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
        drm_gem_object_unreference_unlocked(obj);
        if (ret)
                return ret;
@@ -529,6 +725,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 void
 drm_gem_object_release(struct drm_gem_object *obj)
 {
+       WARN_ON(obj->dma_buf);
+
        if (obj->filp)
            fput(obj->filp);
 }
@@ -553,41 +751,6 @@ drm_gem_object_free(struct kref *kref)
 }
 EXPORT_SYMBOL(drm_gem_object_free);
 
-static void drm_gem_object_ref_bug(struct kref *list_kref)
-{
-       BUG();
-}
-
-/**
- * Called after the last handle to the object has been closed
- *
- * Removes any name for the object. Note that this must be
- * called before drm_gem_object_free or we'll be touching
- * freed memory
- */
-void drm_gem_object_handle_free(struct drm_gem_object *obj)
-{
-       struct drm_device *dev = obj->dev;
-
-       /* Remove any name for this object */
-       spin_lock(&dev->object_name_lock);
-       if (obj->name) {
-               idr_remove(&dev->object_name_idr, obj->name);
-               obj->name = 0;
-               spin_unlock(&dev->object_name_lock);
-               /*
-                * The object name held a reference to this object, drop
-                * that now.
-               *
-               * This cannot be the last reference, since the handle holds one too.
-                */
-               kref_put(&obj->refcount, drm_gem_object_ref_bug);
-       } else
-               spin_unlock(&dev->object_name_lock);
-
-}
-EXPORT_SYMBOL(drm_gem_object_handle_free);
-
 void drm_gem_vm_open(struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = vma->vm_private_data;