]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
[karo-tx-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_sync.c
index 21accbdd0a1afc0bc494dcafd4db39d44fdbe0a5..068aeaff7183b8227f0a27873af71213ac968e30 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
+struct amdgpu_sync_entry {
+       struct hlist_node       node;
+       struct fence            *fence;
+};
+
 /**
  * amdgpu_sync_create - zero init sync object
  *
@@ -49,36 +54,104 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                sync->sync_to[i] = NULL;
 
+       hash_init(sync->fences);
        sync->last_vm_update = NULL;
 }
 
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+{
+       struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+       struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+       if (a_fence)
+               return a_fence->ring->adev == adev;
+       if (s_fence)
+               return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
+       return false;
+}
+
+static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
+{
+       struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+       struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+       if (s_fence)
+               return s_fence->owner == owner;
+       if (a_fence)
+               return a_fence->owner == owner;
+       return false;
+}
+
 /**
- * amdgpu_sync_fence - use the semaphore to sync to a fence
+ * amdgpu_sync_fence - remember to sync to this fence
  *
  * @sync: sync object to add fence to
  * @fence: fence to sync to
  *
- * Sync to the fence using the semaphore objects
  */
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
-                      struct amdgpu_fence *fence)
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+                     struct fence *f)
 {
+       struct amdgpu_sync_entry *e;
+       struct amdgpu_fence *fence;
        struct amdgpu_fence *other;
+       struct fence *tmp, *later;
 
-       if (!fence)
-               return;
+       if (!f)
+               return 0;
+
+       if (amdgpu_sync_same_dev(adev, f) &&
+           amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) {
+               if (sync->last_vm_update) {
+                       tmp = sync->last_vm_update;
+                       BUG_ON(f->context != tmp->context);
+                       later = (f->seqno - tmp->seqno <= INT_MAX) ? f : tmp;
+                       sync->last_vm_update = fence_get(later);
+                       fence_put(tmp);
+               } else
+                       sync->last_vm_update = fence_get(f);
+       }
+
+       fence = to_amdgpu_fence(f);
+       if (!fence || fence->ring->adev != adev) {
+               hash_for_each_possible(sync->fences, e, node, f->context) {
+                       struct fence *new;
+                       if (unlikely(e->fence->context != f->context))
+                               continue;
+                       new = fence_get(fence_later(e->fence, f));
+                       if (new) {
+                               fence_put(e->fence);
+                               e->fence = new;
+                       }
+                       return 0;
+               }
+
+               e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
+               if (!e)
+                       return -ENOMEM;
+
+               hash_add(sync->fences, &e->node, f->context);
+               e->fence = fence_get(f);
+               return 0;
+       }
 
        other = sync->sync_to[fence->ring->idx];
        sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
                amdgpu_fence_later(fence, other));
        amdgpu_fence_unref(&other);
 
-       if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
-               other = sync->last_vm_update;
-               sync->last_vm_update = amdgpu_fence_ref(
-                       amdgpu_fence_later(fence, other));
-               amdgpu_fence_unref(&other);
-       }
+       return 0;
+}
+
+static void *amdgpu_sync_get_owner(struct fence *f)
+{
+       struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+       struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+       if (s_fence)
+               return s_fence->owner;
+       else if (a_fence)
+               return a_fence->owner;
+       return AMDGPU_FENCE_OWNER_UNDEFINED;
 }
 
 /**
@@ -97,7 +170,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 {
        struct reservation_object_list *flist;
        struct fence *f;
-       struct amdgpu_fence *fence;
+       void *fence_owner;
        unsigned i;
        int r = 0;
 
@@ -106,11 +179,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 
        /* always sync to the exclusive fence */
        f = reservation_object_get_excl(resv);
-       fence = f ? to_amdgpu_fence(f) : NULL;
-       if (fence && fence->ring->adev == adev)
-               amdgpu_sync_fence(sync, fence);
-       else if (f)
-               r = fence_wait(f, true);
+       r = amdgpu_sync_fence(adev, sync, f);
 
        flist = reservation_object_get_list(resv);
        if (!flist || r)
@@ -119,20 +188,72 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
        for (i = 0; i < flist->shared_count; ++i) {
                f = rcu_dereference_protected(flist->shared[i],
                                              reservation_object_held(resv));
-               fence = f ? to_amdgpu_fence(f) : NULL;
-               if (fence && fence->ring->adev == adev) {
-                       if (fence->owner != owner ||
-                           fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED)
-                               amdgpu_sync_fence(sync, fence);
-               } else if (f) {
-                       r = fence_wait(f, true);
-                       if (r)
-                               break;
+               if (amdgpu_sync_same_dev(adev, f)) {
+                       /* VM updates are only interesting
+                        * for other VM updates and moves.
+                        */
+                       fence_owner = amdgpu_sync_get_owner(f);
+                       if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
+                           (fence_owner != AMDGPU_FENCE_OWNER_MOVE) &&
+                           ((owner == AMDGPU_FENCE_OWNER_VM) !=
+                            (fence_owner == AMDGPU_FENCE_OWNER_VM)))
+                               continue;
+
+                       /* Ignore fence from the same owner as
+                        * long as it isn't undefined.
+                        */
+                       if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
+                           fence_owner == owner)
+                               continue;
                }
+
+               r = amdgpu_sync_fence(adev, sync, f);
+               if (r)
+                       break;
        }
        return r;
 }
 
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+{
+       struct amdgpu_sync_entry *e;
+       struct hlist_node *tmp;
+       struct fence *f;
+       int i;
+
+       hash_for_each_safe(sync->fences, i, tmp, e, node) {
+
+               f = e->fence;
+
+               hash_del(&e->node);
+               kfree(e);
+
+               if (!fence_is_signaled(f))
+                       return f;
+
+               fence_put(f);
+       }
+       return NULL;
+}
+
+int amdgpu_sync_wait(struct amdgpu_sync *sync)
+{
+       struct amdgpu_sync_entry *e;
+       struct hlist_node *tmp;
+       int i, r;
+
+       hash_for_each_safe(sync->fences, i, tmp, e, node) {
+               r = fence_wait(e->fence, false);
+               if (r)
+                       return r;
+
+               hash_del(&e->node);
+               fence_put(e->fence);
+               kfree(e);
+       }
+       return 0;
+}
+
 /**
  * amdgpu_sync_rings - sync ring to all registered fences
  *
@@ -164,9 +285,9 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
                        return -EINVAL;
                }
 
-               if (count >= AMDGPU_NUM_SYNCS) {
+               if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
                        /* not enough room, wait manually */
-                       r = amdgpu_fence_wait(fence, false);
+                       r = fence_wait(&fence->base, false);
                        if (r)
                                return r;
                        continue;
@@ -186,7 +307,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
                if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
                        /* signaling wasn't successful wait manually */
                        amdgpu_ring_undo(other);
-                       r = amdgpu_fence_wait(fence, false);
+                       r = fence_wait(&fence->base, false);
                        if (r)
                                return r;
                        continue;
@@ -196,7 +317,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
                if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
                        /* waiting wasn't successful wait manually */
                        amdgpu_ring_undo(other);
-                       r = amdgpu_fence_wait(fence, false);
+                       r = fence_wait(&fence->base, false);
                        if (r)
                                return r;
                        continue;
@@ -220,15 +341,23 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
  */
 void amdgpu_sync_free(struct amdgpu_device *adev,
                      struct amdgpu_sync *sync,
-                     struct amdgpu_fence *fence)
+                     struct fence *fence)
 {
+       struct amdgpu_sync_entry *e;
+       struct hlist_node *tmp;
        unsigned i;
 
+       hash_for_each_safe(sync->fences, i, tmp, e, node) {
+               hash_del(&e->node);
+               fence_put(e->fence);
+               kfree(e);
+       }
+
        for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
                amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                amdgpu_fence_unref(&sync->sync_to[i]);
 
-       amdgpu_fence_unref(&sync->last_vm_update);
+       fence_put(sync->last_vm_update);
 }