]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
Merge tag 'mfd-fixes-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[karo-tx-linux.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "gpu_scheduler.h"
29
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
32
33 static struct amd_sched_job *
34 amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36
37 /* Initialize a given run queue struct */
38 static void amd_sched_rq_init(struct amd_sched_rq *rq)
39 {
40         spin_lock_init(&rq->lock);
41         INIT_LIST_HEAD(&rq->entities);
42         rq->current_entity = NULL;
43 }
44
45 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
46                                     struct amd_sched_entity *entity)
47 {
48         spin_lock(&rq->lock);
49         list_add_tail(&entity->list, &rq->entities);
50         spin_unlock(&rq->lock);
51 }
52
53 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
54                                        struct amd_sched_entity *entity)
55 {
56         spin_lock(&rq->lock);
57         list_del_init(&entity->list);
58         if (rq->current_entity == entity)
59                 rq->current_entity = NULL;
60         spin_unlock(&rq->lock);
61 }
62
63 /**
64  * Select next job from a specified run queue with round robin policy.
65  * Return NULL if nothing available.
66  */
67 static struct amd_sched_job *
68 amd_sched_rq_select_job(struct amd_sched_rq *rq)
69 {
70         struct amd_sched_entity *entity;
71         struct amd_sched_job *sched_job;
72
73         spin_lock(&rq->lock);
74
75         entity = rq->current_entity;
76         if (entity) {
77                 list_for_each_entry_continue(entity, &rq->entities, list) {
78                         sched_job = amd_sched_entity_pop_job(entity);
79                         if (sched_job) {
80                                 rq->current_entity = entity;
81                                 spin_unlock(&rq->lock);
82                                 return sched_job;
83                         }
84                 }
85         }
86
87         list_for_each_entry(entity, &rq->entities, list) {
88
89                 sched_job = amd_sched_entity_pop_job(entity);
90                 if (sched_job) {
91                         rq->current_entity = entity;
92                         spin_unlock(&rq->lock);
93                         return sched_job;
94                 }
95
96                 if (entity == rq->current_entity)
97                         break;
98         }
99
100         spin_unlock(&rq->lock);
101
102         return NULL;
103 }
104
105 /**
106  * Init a context entity used by scheduler when submit to HW ring.
107  *
108  * @sched       The pointer to the scheduler
109  * @entity      The pointer to a valid amd_sched_entity
110  * @rq          The run queue this entity belongs
111  * @kernel      If this is an entity for the kernel
112  * @jobs        The max number of jobs in the job queue
113  *
114  * return 0 if succeed. negative error code on failure
115 */
116 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
117                           struct amd_sched_entity *entity,
118                           struct amd_sched_rq *rq,
119                           uint32_t jobs)
120 {
121         int r;
122
123         if (!(sched && entity && rq))
124                 return -EINVAL;
125
126         memset(entity, 0, sizeof(struct amd_sched_entity));
127         INIT_LIST_HEAD(&entity->list);
128         entity->rq = rq;
129         entity->sched = sched;
130
131         spin_lock_init(&entity->queue_lock);
132         r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
133         if (r)
134                 return r;
135
136         atomic_set(&entity->fence_seq, 0);
137         entity->fence_context = fence_context_alloc(1);
138
139         /* Add the entity to the run queue */
140         amd_sched_rq_add_entity(rq, entity);
141
142         return 0;
143 }
144
145 /**
146  * Query if entity is initialized
147  *
148  * @sched       Pointer to scheduler instance
149  * @entity      The pointer to a valid scheduler entity
150  *
151  * return true if entity is initialized, false otherwise
152 */
153 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
154                                             struct amd_sched_entity *entity)
155 {
156         return entity->sched == sched &&
157                 entity->rq != NULL;
158 }
159
160 /**
161  * Check if entity is idle
162  *
163  * @entity      The pointer to a valid scheduler entity
164  *
165  * Return true if entity don't has any unscheduled jobs.
166  */
167 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
168 {
169         rmb();
170         if (kfifo_is_empty(&entity->job_queue))
171                 return true;
172
173         return false;
174 }
175
176 /**
177  * Destroy a context entity
178  *
179  * @sched       Pointer to scheduler instance
180  * @entity      The pointer to a valid scheduler entity
181  *
182  * Cleanup and free the allocated resources.
183  */
184 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
185                            struct amd_sched_entity *entity)
186 {
187         struct amd_sched_rq *rq = entity->rq;
188
189         if (!amd_sched_entity_is_initialized(sched, entity))
190                 return;
191
192         /**
193          * The client will not queue more IBs during this fini, consume existing
194          * queued IBs
195         */
196         wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
197
198         amd_sched_rq_remove_entity(rq, entity);
199         kfifo_free(&entity->job_queue);
200 }
201
202 static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
203 {
204         struct amd_sched_entity *entity =
205                 container_of(cb, struct amd_sched_entity, cb);
206         entity->dependency = NULL;
207         fence_put(f);
208         amd_sched_wakeup(entity->sched);
209 }
210
211 static struct amd_sched_job *
212 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
213 {
214         struct amd_gpu_scheduler *sched = entity->sched;
215         struct amd_sched_job *sched_job;
216
217         if (ACCESS_ONCE(entity->dependency))
218                 return NULL;
219
220         if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
221                 return NULL;
222
223         while ((entity->dependency = sched->ops->dependency(sched_job))) {
224
225                 if (fence_add_callback(entity->dependency, &entity->cb,
226                                        amd_sched_entity_wakeup))
227                         fence_put(entity->dependency);
228                 else
229                         return NULL;
230         }
231
232         return sched_job;
233 }
234
235 /**
236  * Helper to submit a job to the job queue
237  *
238  * @sched_job           The pointer to job required to submit
239  *
240  * Returns true if we could submit the job.
241  */
242 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
243 {
244         struct amd_sched_entity *entity = sched_job->s_entity;
245         bool added, first = false;
246
247         spin_lock(&entity->queue_lock);
248         added = kfifo_in(&entity->job_queue, &sched_job,
249                         sizeof(sched_job)) == sizeof(sched_job);
250
251         if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
252                 first = true;
253
254         spin_unlock(&entity->queue_lock);
255
256         /* first job wakes up scheduler */
257         if (first)
258                 amd_sched_wakeup(sched_job->sched);
259
260         return added;
261 }
262
263 /**
264  * Submit a job to the job queue
265  *
266  * @sched_job           The pointer to job required to submit
267  *
268  * Returns 0 for success, negative error code otherwise.
269  */
270 int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
271 {
272         struct amd_sched_entity *entity = sched_job->s_entity;
273         struct amd_sched_fence *fence = amd_sched_fence_create(
274                 entity, sched_job->owner);
275
276         if (!fence)
277                 return -ENOMEM;
278
279         fence_get(&fence->base);
280         sched_job->s_fence = fence;
281
282         wait_event(entity->sched->job_scheduled,
283                    amd_sched_entity_in(sched_job));
284         trace_amd_sched_job(sched_job);
285         return 0;
286 }
287
288 /**
289  * Return ture if we can push more jobs to the hw.
290  */
291 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
292 {
293         return atomic_read(&sched->hw_rq_count) <
294                 sched->hw_submission_limit;
295 }
296
297 /**
298  * Wake up the scheduler when it is ready
299  */
300 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
301 {
302         if (amd_sched_ready(sched))
303                 wake_up_interruptible(&sched->wake_up_worker);
304 }
305
306 /**
307  * Select next to run
308 */
309 static struct amd_sched_job *
310 amd_sched_select_job(struct amd_gpu_scheduler *sched)
311 {
312         struct amd_sched_job *sched_job;
313
314         if (!amd_sched_ready(sched))
315                 return NULL;
316
317         /* Kernel run queue has higher priority than normal run queue*/
318         sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
319         if (sched_job == NULL)
320                 sched_job = amd_sched_rq_select_job(&sched->sched_rq);
321
322         return sched_job;
323 }
324
325 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
326 {
327         struct amd_sched_fence *s_fence =
328                 container_of(cb, struct amd_sched_fence, cb);
329         struct amd_gpu_scheduler *sched = s_fence->sched;
330
331         atomic_dec(&sched->hw_rq_count);
332         amd_sched_fence_signal(s_fence);
333         fence_put(&s_fence->base);
334         wake_up_interruptible(&sched->wake_up_worker);
335 }
336
337 static int amd_sched_main(void *param)
338 {
339         struct sched_param sparam = {.sched_priority = 1};
340         struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
341         int r, count;
342
343         sched_setscheduler(current, SCHED_FIFO, &sparam);
344
345         while (!kthread_should_stop()) {
346                 struct amd_sched_entity *entity;
347                 struct amd_sched_fence *s_fence;
348                 struct amd_sched_job *sched_job;
349                 struct fence *fence;
350
351                 wait_event_interruptible(sched->wake_up_worker,
352                         kthread_should_stop() ||
353                         (sched_job = amd_sched_select_job(sched)));
354
355                 if (!sched_job)
356                         continue;
357
358                 entity = sched_job->s_entity;
359                 s_fence = sched_job->s_fence;
360                 atomic_inc(&sched->hw_rq_count);
361                 fence = sched->ops->run_job(sched_job);
362                 if (fence) {
363                         r = fence_add_callback(fence, &s_fence->cb,
364                                                amd_sched_process_job);
365                         if (r == -ENOENT)
366                                 amd_sched_process_job(fence, &s_fence->cb);
367                         else if (r)
368                                 DRM_ERROR("fence add callback failed (%d)\n", r);
369                         fence_put(fence);
370                 } else {
371                         DRM_ERROR("Failed to run job!\n");
372                         amd_sched_process_job(NULL, &s_fence->cb);
373                 }
374
375                 count = kfifo_out(&entity->job_queue, &sched_job,
376                                 sizeof(sched_job));
377                 WARN_ON(count != sizeof(sched_job));
378                 wake_up(&sched->job_scheduled);
379         }
380         return 0;
381 }
382
383 /**
384  * Init a gpu scheduler instance
385  *
386  * @sched               The pointer to the scheduler
387  * @ops                 The backend operations for this scheduler.
388  * @hw_submissions      Number of hw submissions to do.
389  * @name                Name used for debugging
390  *
391  * Return 0 on success, otherwise error code.
392 */
393 int amd_sched_init(struct amd_gpu_scheduler *sched,
394                    struct amd_sched_backend_ops *ops,
395                    unsigned hw_submission, const char *name)
396 {
397         sched->ops = ops;
398         sched->hw_submission_limit = hw_submission;
399         sched->name = name;
400         amd_sched_rq_init(&sched->sched_rq);
401         amd_sched_rq_init(&sched->kernel_rq);
402
403         init_waitqueue_head(&sched->wake_up_worker);
404         init_waitqueue_head(&sched->job_scheduled);
405         atomic_set(&sched->hw_rq_count, 0);
406
407         /* Each scheduler will run on a seperate kernel thread */
408         sched->thread = kthread_run(amd_sched_main, sched, sched->name);
409         if (IS_ERR(sched->thread)) {
410                 DRM_ERROR("Failed to create scheduler for %s.\n", name);
411                 return PTR_ERR(sched->thread);
412         }
413
414         return 0;
415 }
416
417 /**
418  * Destroy a gpu scheduler
419  *
420  * @sched       The pointer to the scheduler
421  */
422 void amd_sched_fini(struct amd_gpu_scheduler *sched)
423 {
424         kthread_stop(sched->thread);
425 }