]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
drm/amdgpu: improve sa_bo->fence by kernel fence
authorChunming Zhou <david1.zhou@amd.com>
Wed, 19 Aug 2015 08:41:19 +0000 (16:41 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 25 Aug 2015 14:38:41 +0000 (10:38 -0400)
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index 4addac5f676315fce5f3edfdf971d7f06ae39089..80f2ceaf6af6452a8a06045217f07f827ec1f621 100644 (file)
@@ -441,7 +441,7 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
 signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
-                                      struct amdgpu_fence **array,
+                                      struct fence **array,
                                       uint32_t count,
                                       bool wait_all,
                                       bool intr,
@@ -654,7 +654,7 @@ struct amdgpu_sa_bo {
        struct amdgpu_sa_manager        *manager;
        unsigned                        soffset;
        unsigned                        eoffset;
-       struct amdgpu_fence             *fence;
+       struct fence                    *fence;
 };
 
 /*
@@ -696,7 +696,7 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
                                struct amdgpu_semaphore *semaphore);
 void amdgpu_semaphore_free(struct amdgpu_device *adev,
                           struct amdgpu_semaphore **semaphore,
-                          struct amdgpu_fence *fence);
+                          struct fence *fence);
 
 /*
  * Synchronization
@@ -717,7 +717,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 int amdgpu_sync_rings(struct amdgpu_sync *sync,
                      struct amdgpu_ring *ring);
 void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-                     struct amdgpu_fence *fence);
+                     struct fence *fence);
 
 /*
  * GART structures, functions & helpers
index ae014fcf524e3d2ccb8c95b4e69dea802a41278d..9a87372c3c792f35e8ef5184b9183f251569bc1f 100644 (file)
@@ -836,30 +836,30 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
        return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
 }
 
-static bool amdgpu_test_signaled_any(struct amdgpu_fence **fences, uint32_t count)
+static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count)
 {
        int idx;
-       struct amdgpu_fence *fence;
+       struct fence *fence;
 
        for (idx = 0; idx < count; ++idx) {
                fence = fences[idx];
                if (fence) {
-                       if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+                       if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
                                return true;
                }
        }
        return false;
 }
 
-static bool amdgpu_test_signaled_all(struct amdgpu_fence **fences, uint32_t count)
+static bool amdgpu_test_signaled_all(struct fence **fences, uint32_t count)
 {
        int idx;
-       struct amdgpu_fence *fence;
+       struct fence *fence;
 
        for (idx = 0; idx < count; ++idx) {
                fence = fences[idx];
                if (fence) {
-                       if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+                       if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
                                return false;
                }
        }
@@ -885,7 +885,7 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
        struct amdgpu_fence *fence = to_amdgpu_fence(f);
        struct amdgpu_device *adev = fence->ring->adev;
 
-       return amdgpu_fence_wait_multiple(adev, &fence, 1, false, intr, t);
+       return amdgpu_fence_wait_multiple(adev, &f, 1, false, intr, t);
 }
 
 /**
@@ -902,7 +902,7 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
  * If wait_all is false, it will return when any fence is signaled or timeout.
  */
 signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
-                                      struct amdgpu_fence **array,
+                                      struct fence **array,
                                       uint32_t count,
                                       bool wait_all,
                                       bool intr,
@@ -910,7 +910,7 @@ signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
 {
        long idx = 0;
        struct amdgpu_wait_cb *cb;
-       struct amdgpu_fence *fence;
+       struct fence *fence;
 
        BUG_ON(!array);
 
@@ -924,7 +924,7 @@ signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
                fence = array[idx];
                if (fence) {
                        cb[idx].task = current;
-                       if (fence_add_callback(&fence->base,
+                       if (fence_add_callback(fence,
                                        &cb[idx].base, amdgpu_fence_wait_cb)) {
                                /* The fence is already signaled */
                                if (wait_all)
@@ -967,7 +967,7 @@ fence_rm_cb:
        for (idx = 0; idx < count; ++idx) {
                fence = array[idx];
                if (fence)
-                       fence_remove_callback(&fence->base, &cb[idx].base);
+                       fence_remove_callback(fence, &cb[idx].base);
        }
 
 err_free_cb:
index 1c237f5e3365aacd468f3ab3b2ff09ea6be567f3..13c5978ac69b900928f3d4a4c7728c1519ad3cb4 100644 (file)
@@ -93,8 +93,8 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
  */
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
 {
-       amdgpu_sync_free(adev, &ib->sync, ib->fence);
-       amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence);
+       amdgpu_sync_free(adev, &ib->sync, &ib->fence->base);
+       amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
        amdgpu_fence_unref(&ib->fence);
 }
 
index 238465a9ac5564ebeaa1e142a07871557e96ed39..6ea18dcec561624bf534dda206719691e7a4f04d 100644 (file)
@@ -193,7 +193,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
                            unsigned size, unsigned align);
 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
                              struct amdgpu_sa_bo **sa_bo,
-                             struct amdgpu_fence *fence);
+                             struct fence *fence);
 #if defined(CONFIG_DEBUG_FS)
 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
                                         struct seq_file *m);
index 4597899e9758d58d01e6dd6010d5dfac0e5d2d28..b7cbaa9d532e420cf2dadd57ebf7c1e3d0e39f8b 100644 (file)
@@ -139,6 +139,20 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
        return r;
 }
 
+static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
+{
+       struct amdgpu_fence *a_fence;
+       struct amd_sched_fence *s_fence;
+
+       s_fence = to_amd_sched_fence(f);
+       if (s_fence)
+               return s_fence->entity->scheduler->ring_id;
+       a_fence = to_amdgpu_fence(f);
+       if (a_fence)
+               return a_fence->ring->idx;
+       return 0;
+}
+
 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
 {
        struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
@@ -147,7 +161,7 @@ static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
        }
        list_del_init(&sa_bo->olist);
        list_del_init(&sa_bo->flist);
-       amdgpu_fence_unref(&sa_bo->fence);
+       fence_put(sa_bo->fence);
        kfree(sa_bo);
 }
 
@@ -161,7 +175,7 @@ static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
        sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
        list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
                if (sa_bo->fence == NULL ||
-                   !fence_is_signaled(&sa_bo->fence->base)) {
+                   !fence_is_signaled(sa_bo->fence)) {
                        return;
                }
                amdgpu_sa_bo_remove_locked(sa_bo);
@@ -246,7 +260,7 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
 }
 
 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
-                                  struct amdgpu_fence **fences,
+                                  struct fence **fences,
                                   unsigned *tries)
 {
        struct amdgpu_sa_bo *best_bo = NULL;
@@ -275,7 +289,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
                sa_bo = list_first_entry(&sa_manager->flist[i],
                                         struct amdgpu_sa_bo, flist);
 
-               if (!fence_is_signaled(&sa_bo->fence->base)) {
+               if (!fence_is_signaled(sa_bo->fence)) {
                        fences[i] = sa_bo->fence;
                        continue;
                }
@@ -299,7 +313,8 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
        }
 
        if (best_bo) {
-               ++tries[best_bo->fence->ring->idx];
+               uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
+               ++tries[idx];
                sa_manager->hole = best_bo->olist.prev;
 
                /* we knew that this one is signaled,
@@ -315,7 +330,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
                     struct amdgpu_sa_bo **sa_bo,
                     unsigned size, unsigned align)
 {
-       struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
+       struct fence *fences[AMDGPU_MAX_RINGS];
        unsigned tries[AMDGPU_MAX_RINGS];
        int i, r;
        signed long t;
@@ -373,7 +388,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
 }
 
 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
-                      struct amdgpu_fence *fence)
+                      struct fence *fence)
 {
        struct amdgpu_sa_manager *sa_manager;
 
@@ -383,10 +398,11 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
 
        sa_manager = (*sa_bo)->manager;
        spin_lock(&sa_manager->wq.lock);
-       if (fence && !fence_is_signaled(&fence->base)) {
-               (*sa_bo)->fence = amdgpu_fence_ref(fence);
-               list_add_tail(&(*sa_bo)->flist,
-                             &sa_manager->flist[fence->ring->idx]);
+       if (fence && !fence_is_signaled(fence)) {
+               uint32_t idx;
+               (*sa_bo)->fence = fence_get(fence);
+               idx = amdgpu_sa_get_ring_from_fence(fence);
+               list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
        } else {
                amdgpu_sa_bo_remove_locked(*sa_bo);
        }
@@ -413,8 +429,16 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
                seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
                           soffset, eoffset, eoffset - soffset);
                if (i->fence) {
-                       seq_printf(m, " protected by 0x%016llx on ring %d",
-                                  i->fence->seq, i->fence->ring->idx);
+                       struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
+                       struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
+                       if (a_fence)
+                               seq_printf(m, " protected by 0x%016llx on ring %d",
+                                          a_fence->seq, a_fence->ring->idx);
+                       if (s_fence)
+                               seq_printf(m, " protected by 0x%016llx on ring %d",
+                                          s_fence->v_seq,
+                                          s_fence->entity->scheduler->ring_id);
+
                }
                seq_printf(m, "\n");
        }
index d6d41a42ab6548bd9734aebc5d6f6f941d9a32c5..ff3ca52ec6fe5543ea85ed4a31e2c2dc2538a83f 100644 (file)
@@ -87,7 +87,7 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
 
 void amdgpu_semaphore_free(struct amdgpu_device *adev,
                           struct amdgpu_semaphore **semaphore,
-                          struct amdgpu_fence *fence)
+                          struct fence *fence)
 {
        if (semaphore == NULL || *semaphore == NULL) {
                return;
index 7cb711fc1ee29490df558cc60cdc5a278d6ef135..ee68eebfded1043b28a9b774701b437f1720fb4f 100644 (file)
@@ -234,7 +234,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
  */
 void amdgpu_sync_free(struct amdgpu_device *adev,
                      struct amdgpu_sync *sync,
-                     struct amdgpu_fence *fence)
+                     struct fence *fence)
 {
        unsigned i;
 
index dd3415d2e45dcbb2f3cba5fa1ca6688ef779cfd5..d7c02e1a309e230392165ebd14a4304cb902f241 100644 (file)
@@ -1042,7 +1042,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
        }
 
        amdgpu_ring_unlock_commit(ring);
-       amdgpu_sync_free(adev, &sync, *fence);
+       amdgpu_sync_free(adev, &sync, &(*fence)->base);
 
        return 0;
 }