]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
drm/amdgpu: fix coding style in the scheduler v2
authorChristian König <christian.koenig@amd.com>
Wed, 18 May 2016 07:43:07 +0000 (09:43 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 7 Jul 2016 18:50:50 +0000 (14:50 -0400)
v2: fix even more

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Monk.Liu <monk.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drivers/gpu/drm/amd/scheduler/sched_fence.c

index c16248cee7799221799c15a1dc707c83fd2b295f..f5ac01db287bda7b2e2bc0894db05b8510979bff 100644 (file)
@@ -320,7 +320,9 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
 }
 
 static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
-       struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
+       struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+                                                cb_free_job);
+
        schedule_work(&job->work_free_job);
 }
 
@@ -341,7 +343,8 @@ void amd_sched_job_finish(struct amd_sched_job *s_job)
                                                struct amd_sched_job, node);
 
                if (next) {
-                       INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
+                       INIT_DELAYED_WORK(&next->work_tdr,
+                                         s_job->timeout_callback);
                        amd_sched_job_get(next);
                        schedule_delayed_work(&next->work_tdr, sched->timeout);
                }
@@ -353,7 +356,8 @@ void amd_sched_job_begin(struct amd_sched_job *s_job)
        struct amd_gpu_scheduler *sched = s_job->sched;
 
        if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
-               list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
+           list_first_entry_or_null(&sched->ring_mirror_list,
+                                    struct amd_sched_job, node) == s_job)
        {
                INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
                amd_sched_job_get(s_job);
@@ -374,7 +378,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 
        sched_job->use_sched = 1;
        fence_add_callback(&sched_job->s_fence->base,
-                                       &sched_job->cb_free_job, amd_sched_free_job);
+                          &sched_job->cb_free_job, amd_sched_free_job);
        trace_amd_sched_job(sched_job);
        wait_event(entity->sched->job_scheduled,
                   amd_sched_entity_in(sched_job));
@@ -382,11 +386,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 
 /* init a sched_job with basic field */
 int amd_sched_job_init(struct amd_sched_job *job,
-                                               struct amd_gpu_scheduler *sched,
-                                               struct amd_sched_entity *entity,
-                                               void (*timeout_cb)(struct work_struct *work),
-                                               void (*free_cb)(struct kref *refcount),
-                                               void *owner, struct fence **fence)
+                      struct amd_gpu_scheduler *sched,
+                      struct amd_sched_entity *entity,
+                      void (*timeout_cb)(struct work_struct *work),
+                      void (*free_cb)(struct kref *refcount),
+                      void *owner, struct fence **fence)
 {
        INIT_LIST_HEAD(&job->node);
        kref_init(&job->refcount);
@@ -504,7 +508,8 @@ static int amd_sched_main(void *param)
                        if (r == -ENOENT)
                                amd_sched_process_job(fence, &s_fence->cb);
                        else if (r)
-                               DRM_ERROR("fence add callback failed (%d)\n", r);
+                               DRM_ERROR("fence add callback failed (%d)\n",
+                                         r);
                        fence_put(fence);
                } else {
                        DRM_ERROR("Failed to run job!\n");
index 070095a9433c3f9e2aac2a25484e21169e462329..690ae4b0c67354e0ba63649c67f60e866de56040 100644 (file)
@@ -94,7 +94,8 @@ struct amd_sched_job {
 extern const struct fence_ops amd_sched_fence_ops;
 static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
 {
-       struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+       struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence,
+                                                  base);
 
        if (__f->base.ops == &amd_sched_fence_ops)
                return __f;
@@ -154,21 +155,23 @@ struct amd_sched_fence *amd_sched_fence_create(
 void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
 void amd_sched_fence_signal(struct amd_sched_fence *fence);
 int amd_sched_job_init(struct amd_sched_job *job,
-                                       struct amd_gpu_scheduler *sched,
-                                       struct amd_sched_entity *entity,
-                                       void (*timeout_cb)(struct work_struct *work),
-                                       void (*free_cb)(struct kref* refcount),
-                                       void *owner, struct fence **fence);
+                      struct amd_gpu_scheduler *sched,
+                      struct amd_sched_entity *entity,
+                      void (*timeout_cb)(struct work_struct *work),
+                      void (*free_cb)(struct kref* refcount),
+                      void *owner, struct fence **fence);
 void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
-                                                               struct amd_sched_job *s_job);
+                               struct amd_sched_job *s_job);
 void amd_sched_job_finish(struct amd_sched_job *s_job);
 void amd_sched_job_begin(struct amd_sched_job *s_job);
-static inline void amd_sched_job_get(struct amd_sched_job *job) {
+static inline void amd_sched_job_get(struct amd_sched_job *job)
+{
        if (job)
                kref_get(&job->refcount);
 }
 
-static inline void amd_sched_job_put(struct amd_sched_job *job) {
+static inline void amd_sched_job_put(struct amd_sched_job *job)
+{
        if (job)
                kref_put(&job->refcount, job->free_callback);
 }
index 2a732c4903755db7d3838f9b6c8ae00ee2804309..6bdc9b7169d2946744714e233284751071d3aaa1 100644 (file)
@@ -27,7 +27,8 @@
 #include <drm/drmP.h>
 #include "gpu_scheduler.h"
 
-struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
+struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+                                              void *owner)
 {
        struct amd_sched_fence *fence = NULL;
        unsigned seq;
@@ -38,12 +39,12 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
 
        INIT_LIST_HEAD(&fence->scheduled_cb);
        fence->owner = owner;
-       fence->sched = s_entity->sched;
+       fence->sched = entity->sched;
        spin_lock_init(&fence->lock);
 
-       seq = atomic_inc_return(&s_entity->fence_seq);
+       seq = atomic_inc_return(&entity->fence_seq);
        fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
-                  s_entity->fence_context, seq);
+                  entity->fence_context, seq);
 
        return fence;
 }