]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 6 Feb 2010 21:01:39 +0000 (13:01 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 6 Feb 2010 21:01:39 +0000 (13:01 -0800)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel:
  drm/i915: Fix leak of relocs along do_execbuffer error path
  drm/i915: slow acpi_lid_open() causes flickering - V2
  drm/i915: Disable SR when more than one pipe is enabled
  drm/i915: page flip support for Ironlake
  drm/i915: Fix the incorrect DMI string for Samsung SX20S laptop
  drm/i915: Add support for SDVO composite TV
  drm/i915: don't trigger ironlake vblank interrupt at irq install
  drm/i915: handle non-flip pending case when unpinning the scanout buffer
  drm/i915: Fix the device info of Pineview
  drm/i915: enable vblank interrupt on ironlake
  drm/i915: Prevent use of uninitialized pointers along error path.
  drm/i915: disable hotplug detect before Ironlake CRT detect

1  2 
drivers/gpu/drm/i915/i915_gem.c

index dda787aafcc626ce064c3d59daf183c588fb5b69,be0fd1a633212367dd8f5926e1f42df7f127c740..b4c8c0230689038ba4c841ea4ce48b038a83c585
@@@ -277,7 -277,7 +277,7 @@@ i915_gem_shmem_pread_fast(struct drm_de
  
        mutex_lock(&dev->struct_mutex);
  
 -      ret = i915_gem_object_get_pages(obj);
 +      ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
                goto fail_unlock;
  
@@@ -321,24 -321,40 +321,24 @@@ fail_unlock
        return ret;
  }
  
 -static inline gfp_t
 -i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
 -{
 -      return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
 -}
 -
 -static inline void
 -i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
 -{
 -      mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
 -}
 -
  static int
  i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
  {
        int ret;
  
 -      ret = i915_gem_object_get_pages(obj);
 +      ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
  
        /* If we've insufficient memory to map in the pages, attempt
         * to make some space by throwing out some old buffers.
         */
        if (ret == -ENOMEM) {
                struct drm_device *dev = obj->dev;
 -              gfp_t gfp;
  
                ret = i915_gem_evict_something(dev, obj->size);
                if (ret)
                        return ret;
  
 -              gfp = i915_gem_object_get_page_gfp_mask(obj);
 -              i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
 -              ret = i915_gem_object_get_pages(obj);
 -              i915_gem_object_set_page_gfp_mask (obj, gfp);
 +              ret = i915_gem_object_get_pages(obj, 0);
        }
  
        return ret;
@@@ -774,7 -790,7 +774,7 @@@ i915_gem_shmem_pwrite_fast(struct drm_d
  
        mutex_lock(&dev->struct_mutex);
  
 -      ret = i915_gem_object_get_pages(obj);
 +      ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
                goto fail_unlock;
  
@@@ -2214,8 -2230,7 +2214,8 @@@ i915_gem_evict_something(struct drm_dev
  }
  
  int
 -i915_gem_object_get_pages(struct drm_gem_object *obj)
 +i915_gem_object_get_pages(struct drm_gem_object *obj,
 +                        gfp_t gfpmask)
  {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        int page_count, i;
        inode = obj->filp->f_path.dentry->d_inode;
        mapping = inode->i_mapping;
        for (i = 0; i < page_count; i++) {
 -              page = read_mapping_page(mapping, i, NULL);
 +              page = read_cache_page_gfp(mapping, i,
 +                                         mapping_gfp_mask (mapping) |
 +                                         __GFP_COLD |
 +                                         gfpmask);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        i915_gem_object_put_pages(obj);
@@@ -2567,7 -2579,7 +2567,7 @@@ i915_gem_object_bind_to_gtt(struct drm_
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
        struct drm_mm_node *free_space;
 -      bool retry_alloc = false;
 +      gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
        int ret;
  
        if (obj_priv->madv != I915_MADV_WILLNEED) {
        DRM_INFO("Binding object of size %zd at 0x%08x\n",
                 obj->size, obj_priv->gtt_offset);
  #endif
 -      if (retry_alloc) {
 -              i915_gem_object_set_page_gfp_mask (obj,
 -                                                 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
 -      }
 -      ret = i915_gem_object_get_pages(obj);
 -      if (retry_alloc) {
 -              i915_gem_object_set_page_gfp_mask (obj,
 -                                                 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
 -      }
 +      ret = i915_gem_object_get_pages(obj, gfpmask);
        if (ret) {
                drm_mm_put_block(obj_priv->gtt_space);
                obj_priv->gtt_space = NULL;
                        ret = i915_gem_evict_something(dev, obj->size);
                        if (ret) {
                                /* now try to shrink everyone else */
 -                              if (! retry_alloc) {
 -                                  retry_alloc = true;
 -                                  goto search_free;
 +                              if (gfpmask) {
 +                                      gfpmask = 0;
 +                                      goto search_free;
                                }
  
                                return ret;
@@@ -3564,6 -3584,9 +3564,9 @@@ i915_gem_put_relocs_to_user(struct drm_
        uint32_t reloc_count = 0, i;
        int ret = 0;
  
+       if (relocs == NULL)
+           return 0;
        for (i = 0; i < buffer_count; i++) {
                struct drm_i915_gem_relocation_entry __user *user_relocs;
                int unwritten;
@@@ -3653,7 -3676,7 +3656,7 @@@ i915_gem_do_execbuffer(struct drm_devic
        struct drm_gem_object *batch_obj;
        struct drm_i915_gem_object *obj_priv;
        struct drm_clip_rect *cliprects = NULL;
-       struct drm_i915_gem_relocation_entry *relocs;
+       struct drm_i915_gem_relocation_entry *relocs = NULL;
        int ret = 0, ret2, i, pinned = 0;
        uint64_t exec_offset;
        uint32_t seqno, flush_domains, reloc_index;
                if (object_list[i] == NULL) {
                        DRM_ERROR("Invalid object handle %d at index %d\n",
                                   exec_list[i].handle, i);
+                       /* prevent error path from reading uninitialized data */
+                       args->buffer_count = i + 1;
                        ret = -EBADF;
                        goto err;
                }
                if (obj_priv->in_execbuffer) {
                        DRM_ERROR("Object %p appears more than once in object list\n",
                                   object_list[i]);
+                       /* prevent error path from reading uninitialized data */
+                       args->buffer_count = i + 1;
                        ret = -EBADF;
                        goto err;
                }
@@@ -3926,6 -3953,7 +3933,7 @@@ err
  
        mutex_unlock(&dev->struct_mutex);
  
+ pre_mutex_err:
        /* Copy the updated relocations out regardless of current error
         * state.  Failure to update the relocs would mean that the next
         * time userland calls execbuf, it would do so with presumed offset
                        ret = ret2;
        }
  
- pre_mutex_err:
        drm_free_large(object_list);
        kfree(cliprects);
  
@@@ -4926,7 -4953,7 +4933,7 @@@ void i915_gem_detach_phys_object(struc
        if (!obj_priv->phys_obj)
                return;
  
 -      ret = i915_gem_object_get_pages(obj);
 +      ret = i915_gem_object_get_pages(obj, 0);
        if (ret)
                goto out;
  
@@@ -4984,7 -5011,7 +4991,7 @@@ i915_gem_attach_phys_object(struct drm_
        obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
        obj_priv->phys_obj->cur_obj = obj;
  
 -      ret = i915_gem_object_get_pages(obj);
 +      ret = i915_gem_object_get_pages(obj, 0);
        if (ret) {
                DRM_ERROR("failed to get page list\n");
                goto out;