X-Git-Url: https://git.kernelconcepts.de/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fdrm_gem.c;h=1ce88c3301a144ff6f66a06912ae8c34868a6b01;hb=d0b2c5334f41bdd18adaa3fbc1f7b5f1daab7eac;hp=603f256152efed25852072a49f32b11c7ddf16cd;hpb=858655116bfc722837e3aec0909b8e9d08f96996;p=karo-tx-linux.git diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 603f256152ef..1ce88c3301a1 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -37,6 +37,7 @@ #include #include #include +#include /** @file drm_gem.c * @@ -92,7 +93,7 @@ drm_gem_init(struct drm_device *dev) { struct drm_gem_mm *mm; - spin_lock_init(&dev->object_name_lock); + mutex_init(&dev->object_name_lock); idr_init(&dev->object_name_idr); mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); @@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev) } dev->mm_private = mm; - - if (drm_ht_create(&mm->offset_hash, 12)) { - kfree(mm); - return -ENOMEM; - } - - drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, - DRM_FILE_PAGE_OFFSET_SIZE); + drm_vma_offset_manager_init(&mm->vma_manager, + DRM_FILE_PAGE_OFFSET_START, + DRM_FILE_PAGE_OFFSET_SIZE); return 0; } @@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev) { struct drm_gem_mm *mm = dev->mm_private; - drm_mm_takedown(&mm->offset_manager); - drm_ht_remove(&mm->offset_hash); + drm_vma_offset_manager_destroy(&mm->vma_manager); kfree(mm); dev->mm_private = NULL; } @@ -132,16 +127,14 @@ drm_gem_destroy(struct drm_device *dev) int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size) { - BUG_ON((size & (PAGE_SIZE - 1)) != 0); + struct file *filp; - obj->dev = dev; - obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); - if (IS_ERR(obj->filp)) - return PTR_ERR(obj->filp); + filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); + if (IS_ERR(filp)) + return PTR_ERR(filp); - kref_init(&obj->refcount); - atomic_set(&obj->handle_count, 0); - obj->size = size; + drm_gem_private_object_init(dev, obj, size); + obj->filp = filp; return 0; } @@ -152,8 +145,8 @@ EXPORT_SYMBOL(drm_gem_object_init); * no GEM provided backing store. Instead the caller is responsible for * backing the object and handling it. */ -int drm_gem_private_object_init(struct drm_device *dev, - struct drm_gem_object *obj, size_t size) +void drm_gem_private_object_init(struct drm_device *dev, + struct drm_gem_object *obj, size_t size) { BUG_ON((size & (PAGE_SIZE - 1)) != 0); @@ -161,10 +154,8 @@ int drm_gem_private_object_init(struct drm_device *dev, obj->filp = NULL; kref_init(&obj->refcount); - atomic_set(&obj->handle_count, 0); + obj->handle_count = 0; obj->size = size; - - return 0; } EXPORT_SYMBOL(drm_gem_private_object_init); @@ -200,16 +191,79 @@ EXPORT_SYMBOL(drm_gem_object_alloc); static void drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) { - if (obj->import_attach) { - drm_prime_remove_buf_handle(&filp->prime, - obj->import_attach->dmabuf); + /* + * Note: obj->dma_buf can't disappear as long as we still hold a + * handle reference in obj->handle_count. + */ + mutex_lock(&filp->prime.lock); + if (obj->dma_buf) { + drm_prime_remove_buf_handle_locked(&filp->prime, + obj->dma_buf); } - if (obj->export_dma_buf) { - drm_prime_remove_buf_handle(&filp->prime, - obj->export_dma_buf); + mutex_unlock(&filp->prime.lock); +} + +static void drm_gem_object_ref_bug(struct kref *list_kref) +{ + BUG(); +} + +/** + * Called after the last handle to the object has been closed + * + * Removes any name for the object. Note that this must be + * called before drm_gem_object_free or we'll be touching + * freed memory + */ +static void drm_gem_object_handle_free(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + /* Remove any name for this object */ + if (obj->name) { + idr_remove(&dev->object_name_idr, obj->name); + obj->name = 0; + /* + * The object name held a reference to this object, drop + * that now. + * + * This cannot be the last reference, since the handle holds one too. + */ + kref_put(&obj->refcount, drm_gem_object_ref_bug); } } +static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) +{ + /* Unbreak the reference cycle if we have an exported dma_buf. */ + if (obj->dma_buf) { + dma_buf_put(obj->dma_buf); + obj->dma_buf = NULL; + } +} + +static void +drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) +{ + if (WARN_ON(obj->handle_count == 0)) + return; + + /* + * Must bump handle count first as this may be the last + * ref, in which case the object would disappear before we + * checked for a name + */ + + mutex_lock(&obj->dev->object_name_lock); + if (--obj->handle_count == 0) { + drm_gem_object_handle_free(obj); + drm_gem_object_exported_dma_buf_free(obj); + } + mutex_unlock(&obj->dev->object_name_lock); + + drm_gem_object_unreference_unlocked(obj); +} + /** * Removes the mapping from handle to filp for this object. */ @@ -253,18 +307,36 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) EXPORT_SYMBOL(drm_gem_handle_delete); /** - * Create a handle for this object. This adds a handle reference - * to the object, which includes a regular reference count. Callers - * will likely want to dereference the object afterwards. + * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers + * + * This implements the ->dumb_destroy kms driver callback for drivers which use + * gem to manage their backing storage. + */ +int drm_gem_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file, handle); +} +EXPORT_SYMBOL(drm_gem_dumb_destroy); + +/** + * drm_gem_handle_create_tail - internal functions to create a handle + * + * This expects the dev->object_name_lock to be held already and will drop it + * before returning. Used to avoid races in establishing new handles when + * importing an object from either an flink name or a dma-buf. */ int -drm_gem_handle_create(struct drm_file *file_priv, - struct drm_gem_object *obj, - u32 *handlep) +drm_gem_handle_create_tail(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep) { struct drm_device *dev = obj->dev; int ret; + WARN_ON(!mutex_is_locked(&dev->object_name_lock)); + /* * Get the user-visible handle using idr. Preload and perform * allocation under our spinlock. @@ -273,14 +345,17 @@ drm_gem_handle_create(struct drm_file *file_priv, spin_lock(&file_priv->table_lock); ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); - + drm_gem_object_reference(obj); + obj->handle_count++; spin_unlock(&file_priv->table_lock); idr_preload_end(); - if (ret < 0) + mutex_unlock(&dev->object_name_lock); + if (ret < 0) { + drm_gem_object_handle_unreference_unlocked(obj); return ret; + } *handlep = ret; - drm_gem_object_handle_reference(obj); if (dev->driver->gem_open_object) { ret = dev->driver->gem_open_object(obj, file_priv); @@ -292,6 +367,21 @@ drm_gem_handle_create(struct drm_file *file_priv, return 0; } + +/** + * Create a handle for this object. This adds a handle reference + * to the object, which includes a regular reference count. Callers + * will likely want to dereference the object afterwards. + */ +int +drm_gem_handle_create(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep) +{ + mutex_lock(&obj->dev->object_name_lock); + + return drm_gem_handle_create_tail(file_priv, obj, handlep); +} EXPORT_SYMBOL(drm_gem_handle_create); @@ -306,81 +396,155 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list = &obj->map_list; - drm_ht_remove_item(&mm->offset_hash, &list->hash); - drm_mm_put_block(list->file_offset_node); - kfree(list->map); - list->map = NULL; + drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node); } EXPORT_SYMBOL(drm_gem_free_mmap_offset); /** - * drm_gem_create_mmap_offset - create a fake mmap offset for an object + * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object * @obj: obj in question + * @size: the virtual size * * GEM memory mapping works by handing back to userspace a fake mmap offset * it can use in a subsequent mmap(2) call. The DRM core code then looks * up the object based on the offset and sets up the various memory mapping * structures. * - * This routine allocates and attaches a fake offset for @obj. + * This routine allocates and attaches a fake offset for @obj, in cases where + * the virtual size differs from the physical size (ie. obj->size). Otherwise + * just use drm_gem_create_mmap_offset(). */ int -drm_gem_create_mmap_offset(struct drm_gem_object *obj) +drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) { struct drm_device *dev = obj->dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list; - struct drm_local_map *map; - int ret; - - /* Set the object up for mmap'ing */ - list = &obj->map_list; - list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); - if (!list->map) - return -ENOMEM; - map = list->map; - map->type = _DRM_GEM; - map->size = obj->size; - map->handle = obj; + return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node, + size / PAGE_SIZE); +} +EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); - /* Get a DRM GEM mmap offset allocated... */ - list->file_offset_node = drm_mm_search_free(&mm->offset_manager, - obj->size / PAGE_SIZE, 0, false); +/** + * drm_gem_create_mmap_offset - create a fake mmap offset for an object + * @obj: obj in question + * + * GEM memory mapping works by handing back to userspace a fake mmap offset + * it can use in a subsequent mmap(2) call. The DRM core code then looks + * up the object based on the offset and sets up the various memory mapping + * structures. + * + * This routine allocates and attaches a fake offset for @obj. + */ +int drm_gem_create_mmap_offset(struct drm_gem_object *obj) +{ + return drm_gem_create_mmap_offset_size(obj, obj->size); +} +EXPORT_SYMBOL(drm_gem_create_mmap_offset); - if (!list->file_offset_node) { - DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); - ret = -ENOSPC; - goto out_free_list; +/** + * drm_gem_get_pages - helper to allocate backing pages for a GEM object + * from shmem + * @obj: obj in question + * @gfpmask: gfp mask of requested pages + */ +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) +{ + struct inode *inode; + struct address_space *mapping; + struct page *p, **pages; + int i, npages; + + /* This is the shared memory object that backs the GEM resource */ + inode = file_inode(obj->filp); + mapping = inode->i_mapping; + + /* We already BUG_ON() for non-page-aligned sizes in + * drm_gem_object_init(), so we should never hit this unless + * driver author is doing something really wrong: + */ + WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); + + npages = obj->size >> PAGE_SHIFT; + + pages = drm_malloc_ab(npages, sizeof(struct page *)); + if (pages == NULL) + return ERR_PTR(-ENOMEM); + + gfpmask |= mapping_gfp_mask(mapping); + + for (i = 0; i < npages; i++) { + p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); + if (IS_ERR(p)) + goto fail; + pages[i] = p; + + /* There is a hypothetical issue w/ drivers that require + * buffer memory in the low 4GB.. if the pages are un- + * pinned, and swapped out, they can end up swapped back + * in above 4GB. If pages are already in memory, then + * shmem_read_mapping_page_gfp will ignore the gfpmask, + * even if the already in-memory page disobeys the mask. + * + * It is only a theoretical issue today, because none of + * the devices with this limitation can be populated with + * enough memory to trigger the issue. But this BUG_ON() + * is here as a reminder in case the problem with + * shmem_read_mapping_page_gfp() isn't solved by the time + * it does become a real issue. + * + * See this thread: http://lkml.org/lkml/2011/7/11/238 + */ + BUG_ON((gfpmask & __GFP_DMA32) && + (page_to_pfn(p) >= 0x00100000UL)); } - list->file_offset_node = drm_mm_get_block(list->file_offset_node, - obj->size / PAGE_SIZE, 0); - if (!list->file_offset_node) { - ret = -ENOMEM; - goto out_free_list; - } + return pages; - list->hash.key = list->file_offset_node->start; - ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); - if (ret) { - DRM_ERROR("failed to add to map hash\n"); - goto out_free_mm; - } +fail: + while (i--) + page_cache_release(pages[i]); - return 0; + drm_free_large(pages); + return ERR_CAST(p); +} +EXPORT_SYMBOL(drm_gem_get_pages); -out_free_mm: - drm_mm_put_block(list->file_offset_node); -out_free_list: - kfree(list->map); - list->map = NULL; +/** + * drm_gem_put_pages - helper to free backing pages for a GEM object + * @obj: obj in question + * @pages: pages to free + * @dirty: if true, pages will be marked as dirty + * @accessed: if true, the pages will be marked as accessed + */ +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, + bool dirty, bool accessed) +{ + int i, npages; - return ret; + /* We already BUG_ON() for non-page-aligned sizes in + * drm_gem_object_init(), so we should never hit this unless + * driver author is doing something really wrong: + */ + WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); + + npages = obj->size >> PAGE_SHIFT; + + for (i = 0; i < npages; i++) { + if (dirty) + set_page_dirty(pages[i]); + + if (accessed) + mark_page_accessed(pages[i]); + + /* Undo the reference we took when populating the table */ + page_cache_release(pages[i]); + } + + drm_free_large(pages); } -EXPORT_SYMBOL(drm_gem_create_mmap_offset); +EXPORT_SYMBOL(drm_gem_put_pages); /** Returns a reference to the object named by the handle. */ struct drm_gem_object * @@ -445,8 +609,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, if (obj == NULL) return -ENOENT; + mutex_lock(&dev->object_name_lock); idr_preload(GFP_KERNEL); - spin_lock(&dev->object_name_lock); + /* prevent races with concurrent gem_close. */ + if (obj->handle_count == 0) { + ret = -ENOENT; + goto err; + } + if (!obj->name) { ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); if (ret < 0) @@ -462,8 +632,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, ret = 0; err: - spin_unlock(&dev->object_name_lock); idr_preload_end(); + mutex_unlock(&dev->object_name_lock); drm_gem_object_unreference_unlocked(obj); return ret; } @@ -486,15 +656,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - spin_lock(&dev->object_name_lock); + mutex_lock(&dev->object_name_lock); obj = idr_find(&dev->object_name_idr, (int) args->name); - if (obj) + if (obj) { drm_gem_object_reference(obj); - spin_unlock(&dev->object_name_lock); - if (!obj) + } else { + mutex_unlock(&dev->object_name_lock); return -ENOENT; + } - ret = drm_gem_handle_create(file_priv, obj, &handle); + /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ + ret = drm_gem_handle_create_tail(file_priv, obj, &handle); drm_gem_object_unreference_unlocked(obj); if (ret) return ret; @@ -553,6 +725,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) void drm_gem_object_release(struct drm_gem_object *obj) { + WARN_ON(obj->dma_buf); + if (obj->filp) fput(obj->filp); } @@ -577,41 +751,6 @@ drm_gem_object_free(struct kref *kref) } EXPORT_SYMBOL(drm_gem_object_free); -static void drm_gem_object_ref_bug(struct kref *list_kref) -{ - BUG(); -} - -/** - * Called after the last handle to the object has been closed - * - * Removes any name for the object. Note that this must be - * called before drm_gem_object_free or we'll be touching - * freed memory - */ -void drm_gem_object_handle_free(struct drm_gem_object *obj) -{ - struct drm_device *dev = obj->dev; - - /* Remove any name for this object */ - spin_lock(&dev->object_name_lock); - if (obj->name) { - idr_remove(&dev->object_name_idr, obj->name); - obj->name = 0; - spin_unlock(&dev->object_name_lock); - /* - * The object name held a reference to this object, drop - * that now. - * - * This cannot be the last reference, since the handle holds one too. - */ - kref_put(&obj->refcount, drm_gem_object_ref_bug); - } else - spin_unlock(&dev->object_name_lock); - -} -EXPORT_SYMBOL(drm_gem_object_handle_free); - void drm_gem_vm_open(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; @@ -707,8 +846,8 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_local_map *map = NULL; - struct drm_hash_item *hash; + struct drm_gem_object *obj; + struct drm_vma_offset_node *node; int ret = 0; if (drm_device_is_unplugged(dev)) @@ -716,21 +855,16 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) mutex_lock(&dev->struct_mutex); - if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { + node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff, + vma_pages(vma)); + if (!node) { mutex_unlock(&dev->struct_mutex); return drm_mmap(filp, vma); } - map = drm_hash_entry(hash, struct drm_map_list, hash)->map; - if (!map || - ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { - ret = -EPERM; - goto out_unlock; - } - - ret = drm_gem_mmap_obj(map->handle, map->size, vma); + obj = container_of(node, struct drm_gem_object, vma_node); + ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); -out_unlock: mutex_unlock(&dev->struct_mutex); return ret;