]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
drm/amdgpu: add amd_sched_job_recovery
authorChunming Zhou <David1.Zhou@amd.com>
Wed, 29 Jun 2016 07:23:55 +0000 (15:23 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 7 Jul 2016 19:06:15 +0000 (15:06 -0400)
Which is to recover hw jobs when gpu reset.

Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

index cf2d64bef0dfda2705580c3ad7ccae7ef0243148..70ff09d10885f3d19828233f6bddcc1e227add5c 100644 (file)
@@ -32,6 +32,7 @@
 
 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
 
 struct kmem_cache *sched_fence_slab;
 atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -395,6 +396,38 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
        spin_unlock(&sched->job_list_lock);
 }
 
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+{
+       struct amd_sched_job *s_job;
+       int r;
+
+       spin_lock(&sched->job_list_lock);
+       s_job = list_first_entry_or_null(&sched->ring_mirror_list,
+                                        struct amd_sched_job, node);
+       if (s_job)
+               schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+
+       list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+               struct amd_sched_fence *s_fence = s_job->s_fence;
+               struct fence *fence = sched->ops->run_job(s_job);
+               if (fence) {
+                       s_fence->parent = fence_get(fence);
+                       r = fence_add_callback(fence, &s_fence->cb,
+                                              amd_sched_process_job);
+                       if (r == -ENOENT)
+                               amd_sched_process_job(fence, &s_fence->cb);
+                       else if (r)
+                               DRM_ERROR("fence add callback failed (%d)\n",
+                                         r);
+                       fence_put(fence);
+               } else {
+                       DRM_ERROR("Failed to run job!\n");
+                       amd_sched_process_job(NULL, &s_fence->cb);
+               }
+       }
+       spin_unlock(&sched->job_list_lock);
+}
+
 /**
  * Submit a job to the job queue
  *
index fdcd8fbf5e266c5223231ba462e630b604b9545b..7cbbbfb502ef1342caa2a5b36aa970368fed5055 100644 (file)
@@ -154,4 +154,5 @@ int amd_sched_job_init(struct amd_sched_job *job,
                       struct amd_sched_entity *entity,
                       void *owner);
 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
 #endif