]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-airlied' of git://people.freedesktop.org/~mlankhorst/linux into...
authorDave Airlie <airlied@redhat.com>
Fri, 8 Feb 2013 04:02:32 +0000 (14:02 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 8 Feb 2013 04:02:32 +0000 (14:02 +1000)
TTM reservations changes, preparing for new reservation mutex system.

* 'for-airlied' of git://people.freedesktop.org/~mlankhorst/linux:
  drm/ttm: unexport ttm_bo_wait_unreserved
  drm/nouveau: use ttm_bo_reserve_slowpath in validate_init, v2
  drm/ttm: use ttm_bo_reserve_slowpath_nolru in ttm_eu_reserve_buffers, v2
  drm/ttm: add ttm_bo_reserve_slowpath
  drm/ttm: cleanup ttm_eu_reserve_buffers handling
  drm/ttm: remove lru_lock around ttm_bo_reserve
  drm/nouveau: increase reservation sequence every retry
  drm/vmwgfx: always use ttm_bo_is_reserved

1  2 
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/ttm/ttm_bo.c

index 24e0aabda03c47109e869486f2b99fda77e30d33,7fa6882c2942fce07913f6ee7ca9c4061d65b175..d98bee012cab9ef84ae5835b383583af97c73ed1
@@@ -24,6 -24,8 +24,6 @@@
   *
   */
  
 -#include <linux/dma-buf.h>
 -
  #include <subdev/fb.h>
  
  #include "nouveau_drm.h"
@@@ -318,6 -320,7 +318,7 @@@ validate_init(struct nouveau_channel *c
        uint32_t sequence;
        int trycnt = 0;
        int ret, i;
+       struct nouveau_bo *res_bo = NULL;
  
        sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
  retry:
                        return -ENOENT;
                }
                nvbo = gem->driver_private;
+               if (nvbo == res_bo) {
+                       res_bo = NULL;
+                       drm_gem_object_unreference_unlocked(gem);
+                       continue;
+               }
  
                if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
                        NV_ERROR(drm, "multiple instances of buffer %d on "
                ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
                if (ret) {
                        validate_fini(op, NULL);
-                       if (unlikely(ret == -EAGAIN))
-                               ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
-                       drm_gem_object_unreference_unlocked(gem);
+                       if (unlikely(ret == -EAGAIN)) {
+                               sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
+                               ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
+                                                             sequence);
+                               if (!ret)
+                                       res_bo = nvbo;
+                       }
                        if (unlikely(ret)) {
+                               drm_gem_object_unreference_unlocked(gem);
                                if (ret != -ERESTARTSYS)
                                        NV_ERROR(drm, "fail reserve\n");
                                return ret;
                        }
-                       goto retry;
                }
  
                b->user_priv = (uint64_t)(unsigned long)nvbo;
                        validate_fini(op, NULL);
                        return -EINVAL;
                }
+               if (nvbo == res_bo)
+                       goto retry;
        }
  
        return 0;
index 52b20b12c83a3859da1b5fbbc1449fd3d4e907a4,4df47f72214a381a0d537ff170ccdf3a6fb4c5ce..9b07b7d44a58b318eb05727a0545a8325a1a5efd
@@@ -158,7 -158,8 +158,8 @@@ static void ttm_bo_release_list(struct 
        ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
  }
  
- int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
+ static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
+                                 bool interruptible)
  {
        if (interruptible) {
                return wait_event_interruptible(bo->event_queue,
                return 0;
        }
  }
- EXPORT_SYMBOL(ttm_bo_wait_unreserved);
  
  void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
  {
@@@ -213,14 -213,13 +213,13 @@@ int ttm_bo_del_from_lru(struct ttm_buff
        return put_count;
  }
  
- int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
                          bool interruptible,
                          bool no_wait, bool use_sequence, uint32_t sequence)
  {
-       struct ttm_bo_global *glob = bo->glob;
        int ret;
  
-       while (unlikely(atomic_read(&bo->reserved) != 0)) {
+       while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
                /**
                 * Deadlock avoidance for multi-bo reserving.
                 */
                if (no_wait)
                        return -EBUSY;
  
-               spin_unlock(&glob->lru_lock);
                ret = ttm_bo_wait_unreserved(bo, interruptible);
-               spin_lock(&glob->lru_lock);
  
                if (unlikely(ret))
                        return ret;
        }
  
-       atomic_set(&bo->reserved, 1);
        if (use_sequence) {
+               bool wake_up = false;
                /**
                 * Wake up waiters that may need to recheck for deadlock,
                 * if we decreased the sequence number.
                 */
                if (unlikely((bo->val_seq - sequence < (1 << 31))
                             || !bo->seq_valid))
-                       wake_up_all(&bo->event_queue);
+                       wake_up = true;
  
+               /*
+                * In the worst case with memory ordering these values can be
+                * seen in the wrong order. However since we call wake_up_all
+                * in that case, this will hopefully not pose a problem,
+                * and the worst case would only cause someone to accidentally
+                * hit -EAGAIN in ttm_bo_reserve when they see old value of
+                * val_seq. However this would only happen if seq_valid was
+                * written before val_seq was, and just means some slightly
+                * increased cpu usage
+                */
                bo->val_seq = sequence;
                bo->seq_valid = true;
+               if (wake_up)
+                       wake_up_all(&bo->event_queue);
        } else {
                bo->seq_valid = false;
        }
@@@ -289,17 -298,64 +298,64 @@@ int ttm_bo_reserve(struct ttm_buffer_ob
        int put_count = 0;
        int ret;
  
-       spin_lock(&glob->lru_lock);
-       ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
-                                   sequence);
-       if (likely(ret == 0))
+       ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
+                                  sequence);
+       if (likely(ret == 0)) {
+               spin_lock(&glob->lru_lock);
                put_count = ttm_bo_del_from_lru(bo);
-       spin_unlock(&glob->lru_lock);
+               spin_unlock(&glob->lru_lock);
+               ttm_bo_list_ref_sub(bo, put_count, true);
+       }
  
-       ttm_bo_list_ref_sub(bo, put_count, true);
+       return ret;
+ }
+ int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+                                 bool interruptible, uint32_t sequence)
+ {
+       bool wake_up = false;
+       int ret;
+       while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
+               WARN_ON(bo->seq_valid && sequence == bo->val_seq);
+               ret = ttm_bo_wait_unreserved(bo, interruptible);
  
+               if (unlikely(ret))
+                       return ret;
+       }
+       if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
+               wake_up = true;
+       /**
+        * Wake up waiters that may need to recheck for deadlock,
+        * if we decreased the sequence number.
+        */
+       bo->val_seq = sequence;
+       bo->seq_valid = true;
+       if (wake_up)
+               wake_up_all(&bo->event_queue);
+       return 0;
+ }
+ int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+                           bool interruptible, uint32_t sequence)
+ {
+       struct ttm_bo_global *glob = bo->glob;
+       int put_count, ret;
+       ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
+       if (likely(!ret)) {
+               spin_lock(&glob->lru_lock);
+               put_count = ttm_bo_del_from_lru(bo);
+               spin_unlock(&glob->lru_lock);
+               ttm_bo_list_ref_sub(bo, put_count, true);
+       }
        return ret;
  }
+ EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
  
  void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
  {
@@@ -434,7 -490,6 +490,7 @@@ static int ttm_bo_handle_move_mem(struc
                        bo->mem = tmp_mem;
                        bdev->driver->move_notify(bo, mem);
                        bo->mem = *mem;
 +                      *mem = tmp_mem;
                }
  
                goto out_err;
@@@ -511,7 -566,7 +567,7 @@@ static void ttm_bo_cleanup_refs_or_queu
        int ret;
  
        spin_lock(&glob->lru_lock);
-       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+       ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
  
        spin_lock(&bdev->fence_lock);
        (void) ttm_bo_wait(bo, false, false, true);
@@@ -604,7 -659,7 +660,7 @@@ static int ttm_bo_cleanup_refs_and_unlo
                        return ret;
  
                spin_lock(&glob->lru_lock);
-               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+               ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
  
                /*
                 * We raced, and lost, someone else holds the reservation now,
@@@ -668,7 -723,14 +724,14 @@@ static int ttm_bo_delayed_delete(struc
                        kref_get(&nentry->list_kref);
                }
  
-               ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+               ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
+               if (remove_all && ret) {
+                       spin_unlock(&glob->lru_lock);
+                       ret = ttm_bo_reserve_nolru(entry, false, false,
+                                                  false, 0);
+                       spin_lock(&glob->lru_lock);
+               }
                if (!ret)
                        ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
                                                             !remove_all);
@@@ -816,7 -878,7 +879,7 @@@ static int ttm_mem_evict_first(struct t
  
        spin_lock(&glob->lru_lock);
        list_for_each_entry(bo, &man->lru, lru) {
-               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+               ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
                if (!ret)
                        break;
        }
@@@ -1797,7 -1859,7 +1860,7 @@@ static int ttm_bo_swapout(struct ttm_me
  
        spin_lock(&glob->lru_lock);
        list_for_each_entry(bo, &glob->swap_lru, swap) {
-               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+               ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
                if (!ret)
                        break;
        }