]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
Merge remote-tracking branch 'regmap/fix/debugfs' into tmp
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index 742206e45103c508bd9c2d9ebac2281e704d5ff2..8febea6daa0840b08f8aae13418b859204b7bc2d 100644 (file)
@@ -1517,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
        if (obj->base.map_list.map)
                return 0;
 
+       dev_priv->mm.shrinker_no_lock_stealing = true;
+
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
-               return ret;
+               goto out;
 
        /* Badly fragmented mmap space? The only way we can recover
         * space is by destroying unwanted objects. We can't randomly release
@@ -1531,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
        i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
-               return ret;
+               goto out;
 
        i915_gem_shrink_all(dev_priv);
-       return drm_gem_create_mmap_offset(&obj->base);
+       ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+       dev_priv->mm.shrinker_no_lock_stealing = false;
+
+       return ret;
 }
 
 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -1711,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 }
 
 static long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
+                 bool purgeable_only)
 {
        struct drm_i915_gem_object *obj, *next;
        long count = 0;
@@ -1719,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.unbound_list,
                                 gtt_list) {
-               if (i915_gem_object_is_purgeable(obj) &&
+               if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
                    i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
                        if (count >= target)
@@ -1730,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.inactive_list,
                                 mm_list) {
-               if (i915_gem_object_is_purgeable(obj) &&
+               if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
                    i915_gem_object_unbind(obj) == 0 &&
                    i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
@@ -1742,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
        return count;
 }
 
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+       return __i915_gem_shrink(dev_priv, target, true);
+}
+
 static void
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
@@ -2890,7 +2903,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_mm_node *free_space;
+       struct drm_mm_node *node;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
        int ret;
@@ -2936,66 +2949,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (node == NULL) {
+               i915_gem_object_unpin_pages(obj);
+               return -ENOMEM;
+       }
+
  search_free:
        if (map_and_fenceable)
-               free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
-                                                              size, alignment, obj->cache_level,
-                                                              0, dev_priv->mm.gtt_mappable_end,
-                                                              false);
+               ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+                                                         size, alignment, obj->cache_level,
+                                                         0, dev_priv->mm.gtt_mappable_end);
        else
-               free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
-                                                     size, alignment, obj->cache_level,
-                                                     false);
-
-       if (free_space != NULL) {
-               if (map_and_fenceable)
-                       free_space =
-                               drm_mm_get_block_range_generic(free_space,
-                                                              size, alignment, obj->cache_level,
-                                                              0, dev_priv->mm.gtt_mappable_end,
-                                                              false);
-               else
-                       free_space =
-                               drm_mm_get_block_generic(free_space,
-                                                        size, alignment, obj->cache_level,
-                                                        false);
-       }
-       if (free_space == NULL) {
+               ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
+                                                size, alignment, obj->cache_level);
+       if (ret) {
                ret = i915_gem_evict_something(dev, size, alignment,
                                               obj->cache_level,
                                               map_and_fenceable,
                                               nonblocking);
-               if (ret) {
-                       i915_gem_object_unpin_pages(obj);
-                       return ret;
-               }
+               if (ret == 0)
+                       goto search_free;
 
-               goto search_free;
+               i915_gem_object_unpin_pages(obj);
+               kfree(node);
+               return ret;
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(dev,
-                                             free_space,
-                                             obj->cache_level))) {
+       if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
                i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(free_space);
+               drm_mm_put_block(node);
                return -EINVAL;
        }
 
        ret = i915_gem_gtt_prepare_object(obj);
        if (ret) {
                i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(free_space);
+               drm_mm_put_block(node);
                return ret;
        }
 
        list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
        list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-       obj->gtt_space = free_space;
-       obj->gtt_offset = free_space->start;
+       obj->gtt_space = node;
+       obj->gtt_offset = node->start;
 
        fenceable =
-               free_space->size == fence_size &&
-               (free_space->start & (fence_alignment - 1)) == 0;
+               node->size == fence_size &&
+               (node->start & (fence_alignment - 1)) == 0;
 
        mappable =
                obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@ -3528,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       obj->user_pin_count++;
-       obj->pin_filp = file;
-       if (obj->user_pin_count == 1) {
+       if (obj->user_pin_count == 0) {
                ret = i915_gem_object_pin(obj, args->alignment, true, false);
                if (ret)
                        goto out;
        }
 
+       obj->user_pin_count++;
+       obj->pin_filp = file;
+
        /* XXX - flush the CPU caches for pinned objects
         * as the X server doesn't manage domains yet
         */
@@ -4392,11 +4394,17 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
                if (!mutex_is_locked_by(&dev->struct_mutex, current))
                        return 0;
 
+               if (dev_priv->mm.shrinker_no_lock_stealing)
+                       return 0;
+
                unlock = false;
        }
 
        if (nr_to_scan) {
                nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
+               if (nr_to_scan > 0)
+                       nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
+                                                       false);
                if (nr_to_scan > 0)
                        i915_gem_shrink_all(dev_priv);
        }
@@ -4405,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
                if (obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+       list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;