]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
drm/ttm: cleanup ttm_eu_reserve_buffers handling
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>
Tue, 15 Jan 2013 13:56:48 +0000 (14:56 +0100)
committerMaarten Lankhorst <maarten.lankhorst@canonical.com>
Tue, 15 Jan 2013 13:56:48 +0000 (14:56 +0100)
With the lru lock no longer required for protecting reservations we
can just do a ttm_bo_reserve_nolru on -EBUSY, and handle all errors
in a single path.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
drivers/gpu/drm/ttm/ttm_execbuf_util.c

index bd37b5cb8553d80102d12d04a3ba5828848fdec3..c7d3236577984a0aab4c99b2e424a88a2504b386 100644 (file)
@@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
        }
 }
 
-static int ttm_eu_wait_unreserved_locked(struct list_head *list,
-                                        struct ttm_buffer_object *bo)
-{
-       struct ttm_bo_global *glob = bo->glob;
-       int ret;
-
-       ttm_eu_del_from_lru_locked(list);
-       spin_unlock(&glob->lru_lock);
-       ret = ttm_bo_wait_unreserved(bo, true);
-       spin_lock(&glob->lru_lock);
-       if (unlikely(ret != 0))
-               ttm_eu_backoff_reservation_locked(list);
-       return ret;
-}
-
-
 void ttm_eu_backoff_reservation(struct list_head *list)
 {
        struct ttm_validate_buffer *entry;
@@ -152,19 +136,23 @@ retry:
        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
 
-retry_this_bo:
                ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
                switch (ret) {
                case 0:
                        break;
                case -EBUSY:
-                       ret = ttm_eu_wait_unreserved_locked(list, bo);
-                       if (unlikely(ret != 0)) {
-                               spin_unlock(&glob->lru_lock);
-                               ttm_eu_list_ref_sub(list);
-                               return ret;
-                       }
-                       goto retry_this_bo;
+                       ttm_eu_del_from_lru_locked(list);
+                       spin_unlock(&glob->lru_lock);
+                       ret = ttm_bo_reserve_nolru(bo, true, false,
+                                                  true, val_seq);
+                       spin_lock(&glob->lru_lock);
+                       if (!ret)
+                               break;
+
+                       if (unlikely(ret != -EAGAIN))
+                               goto err;
+
+                       /* fallthrough */
                case -EAGAIN:
                        ttm_eu_backoff_reservation_locked(list);
                        spin_unlock(&glob->lru_lock);
@@ -174,18 +162,13 @@ retry_this_bo:
                                return ret;
                        goto retry;
                default:
-                       ttm_eu_backoff_reservation_locked(list);
-                       spin_unlock(&glob->lru_lock);
-                       ttm_eu_list_ref_sub(list);
-                       return ret;
+                       goto err;
                }
 
                entry->reserved = true;
                if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-                       ttm_eu_backoff_reservation_locked(list);
-                       spin_unlock(&glob->lru_lock);
-                       ttm_eu_list_ref_sub(list);
-                       return -EBUSY;
+                       ret = -EBUSY;
+                       goto err;
                }
        }
 
@@ -194,6 +177,12 @@ retry_this_bo:
        ttm_eu_list_ref_sub(list);
 
        return 0;
+
+err:
+       ttm_eu_backoff_reservation_locked(list);
+       spin_unlock(&glob->lru_lock);
+       ttm_eu_list_ref_sub(list);
+       return ret;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);