]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drm/amdgpu: cleanup a scheduler function name
[karo-tx-linux.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "gpu_scheduler.h"
29
30 /* Initialize a given run queue struct */
31 static void amd_sched_rq_init(struct amd_sched_rq *rq)
32 {
33         spin_lock_init(&rq->lock);
34         INIT_LIST_HEAD(&rq->entities);
35         rq->current_entity = NULL;
36 }
37
38 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
39                                     struct amd_sched_entity *entity)
40 {
41         spin_lock(&rq->lock);
42         list_add_tail(&entity->list, &rq->entities);
43         spin_unlock(&rq->lock);
44 }
45
46 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
47                                        struct amd_sched_entity *entity)
48 {
49         spin_lock(&rq->lock);
50         list_del_init(&entity->list);
51         if (rq->current_entity == entity)
52                 rq->current_entity = NULL;
53         spin_unlock(&rq->lock);
54 }
55
56 /**
57  * Select next entity from a specified run queue with round robin policy.
58  * It could return the same entity as current one if current is the only
59  * available one in the queue. Return NULL if nothing available.
60  */
61 static struct amd_sched_entity *
62 amd_sched_rq_select_entity(struct amd_sched_rq *rq)
63 {
64         struct amd_sched_entity *entity;
65
66         spin_lock(&rq->lock);
67
68         entity = rq->current_entity;
69         if (entity) {
70                 list_for_each_entry_continue(entity, &rq->entities, list) {
71                         if (!kfifo_is_empty(&entity->job_queue)) {
72                                 rq->current_entity = entity;
73                                 spin_unlock(&rq->lock);
74                                 return rq->current_entity;
75                         }
76                 }
77         }
78
79         list_for_each_entry(entity, &rq->entities, list) {
80
81                 if (!kfifo_is_empty(&entity->job_queue)) {
82                         rq->current_entity = entity;
83                         spin_unlock(&rq->lock);
84                         return rq->current_entity;
85                 }
86
87                 if (entity == rq->current_entity)
88                         break;
89         }
90
91         spin_unlock(&rq->lock);
92
93         return NULL;
94 }
95
96 /**
97  * Init a context entity used by scheduler when submit to HW ring.
98  *
99  * @sched       The pointer to the scheduler
100  * @entity      The pointer to a valid amd_sched_entity
101  * @rq          The run queue this entity belongs
102  * @kernel      If this is an entity for the kernel
103  * @jobs        The max number of jobs in the job queue
104  *
105  * return 0 if succeed. negative error code on failure
106 */
107 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
108                           struct amd_sched_entity *entity,
109                           struct amd_sched_rq *rq,
110                           uint32_t jobs)
111 {
112         char name[20];
113
114         if (!(sched && entity && rq))
115                 return -EINVAL;
116
117         memset(entity, 0, sizeof(struct amd_sched_entity));
118         entity->belongto_rq = rq;
119         entity->scheduler = sched;
120         init_waitqueue_head(&entity->wait_queue);
121         entity->fence_context = fence_context_alloc(1);
122         snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context);
123         memcpy(entity->name, name, 20);
124         entity->need_wakeup = false;
125         if(kfifo_alloc(&entity->job_queue,
126                        jobs * sizeof(void *),
127                        GFP_KERNEL))
128                 return -EINVAL;
129
130         spin_lock_init(&entity->queue_lock);
131         atomic_set(&entity->fence_seq, 0);
132
133         /* Add the entity to the run queue */
134         amd_sched_rq_add_entity(rq, entity);
135         return 0;
136 }
137
138 /**
139  * Query if entity is initialized
140  *
141  * @sched       Pointer to scheduler instance
142  * @entity      The pointer to a valid scheduler entity
143  *
144  * return true if entity is initialized, false otherwise
145 */
146 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
147                                             struct amd_sched_entity *entity)
148 {
149         return entity->scheduler == sched &&
150                 entity->belongto_rq != NULL;
151 }
152
153 /**
154  * Check if entity is idle
155  *
156  * @entity      The pointer to a valid scheduler entity
157  *
158  * Return true if entity don't has any unscheduled jobs.
159  */
160 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
161 {
162         rmb();
163         if (kfifo_is_empty(&entity->job_queue))
164                 return true;
165
166         return false;
167 }
168
169 /**
170  * Destroy a context entity
171  *
172  * @sched       Pointer to scheduler instance
173  * @entity      The pointer to a valid scheduler entity
174  *
175  * return 0 if succeed. negative error code on failure
176  */
177 int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
178                             struct amd_sched_entity *entity)
179 {
180         struct amd_sched_rq *rq = entity->belongto_rq;
181         long r;
182
183         if (!amd_sched_entity_is_initialized(sched, entity))
184                 return 0;
185         entity->need_wakeup = true;
186         /**
187          * The client will not queue more IBs during this fini, consume existing
188          * queued IBs
189         */
190         r = wait_event_timeout(entity->wait_queue,
191                 amd_sched_entity_is_idle(entity),
192                 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS));
193
194         if (r <= 0)
195                 DRM_INFO("Entity %p is in waiting state during fini\n",
196                          entity);
197
198         amd_sched_rq_remove_entity(rq, entity);
199         kfifo_free(&entity->job_queue);
200         return r;
201 }
202
203 /**
204  * Submit a normal job to the job queue
205  *
206  * @sched       The pointer to the scheduler
207  * @c_entity    The pointer to amd_sched_entity
208  * @job         The pointer to job required to submit
209  * return 0 if succeed. -1 if failed.
210  *        -2 indicate queue is full for this client, client should wait untill
211  *           scheduler consum some queued command.
212  *        -1 other fail.
213 */
214 int amd_sched_push_job(struct amd_sched_job *sched_job)
215 {
216         struct amd_sched_fence  *fence =
217                 amd_sched_fence_create(sched_job->s_entity);
218         if (!fence)
219                 return -EINVAL;
220         fence_get(&fence->base);
221         sched_job->s_fence = fence;
222         while (kfifo_in_spinlocked(&sched_job->s_entity->job_queue,
223                                    &sched_job, sizeof(void *),
224                                    &sched_job->s_entity->queue_lock) !=
225                sizeof(void *)) {
226                 /**
227                  * Current context used up all its IB slots
228                  * wait here, or need to check whether GPU is hung
229                 */
230                 schedule();
231         }
232         /* first job wake up scheduler */
233         if ((kfifo_len(&sched_job->s_entity->job_queue) / sizeof(void *)) == 1)
234                 wake_up_interruptible(&sched_job->sched->wait_queue);
235         return 0;
236 }
237
238 /**
239  * Return ture if we can push more jobs to the hw.
240  */
241 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
242 {
243         return atomic_read(&sched->hw_rq_count) <
244                 sched->hw_submission_limit;
245 }
246
247 /**
248  * Select next entity containing real IB submissions
249 */
250 static struct amd_sched_entity *
251 amd_sched_select_context(struct amd_gpu_scheduler *sched)
252 {
253         struct amd_sched_entity *tmp;
254
255         if (!amd_sched_ready(sched))
256                 return NULL;
257
258         /* Kernel run queue has higher priority than normal run queue*/
259         tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
260         if (tmp == NULL)
261                 tmp = amd_sched_rq_select_entity(&sched->sched_rq);
262
263         return tmp;
264 }
265
266 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
267 {
268         struct amd_sched_job *sched_job =
269                 container_of(cb, struct amd_sched_job, cb);
270         struct amd_gpu_scheduler *sched;
271
272         sched = sched_job->sched;
273         amd_sched_fence_signal(sched_job->s_fence);
274         atomic_dec(&sched->hw_rq_count);
275         fence_put(&sched_job->s_fence->base);
276         sched->ops->process_job(sched, sched_job);
277         wake_up_interruptible(&sched->wait_queue);
278 }
279
280 static int amd_sched_main(void *param)
281 {
282         struct sched_param sparam = {.sched_priority = 1};
283         struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
284         int r;
285
286         sched_setscheduler(current, SCHED_FIFO, &sparam);
287
288         while (!kthread_should_stop()) {
289                 struct amd_sched_entity *c_entity = NULL;
290                 struct amd_sched_job *job;
291                 struct fence *fence;
292
293                 wait_event_interruptible(sched->wait_queue,
294                         kthread_should_stop() ||
295                         (c_entity = amd_sched_select_context(sched)));
296
297                 if (!c_entity)
298                         continue;
299
300                 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
301                 if (r != sizeof(void *))
302                         continue;
303                 r = 0;
304                 if (sched->ops->prepare_job)
305                         r = sched->ops->prepare_job(sched, c_entity, job);
306                 if (!r) {
307                         atomic_inc(&sched->hw_rq_count);
308                 }
309                 mutex_lock(&sched->sched_lock);
310                 fence = sched->ops->run_job(sched, c_entity, job);
311                 if (fence) {
312                         r = fence_add_callback(fence, &job->cb,
313                                                amd_sched_process_job);
314                         if (r == -ENOENT)
315                                 amd_sched_process_job(fence, &job->cb);
316                         else if (r)
317                                 DRM_ERROR("fence add callback failed (%d)\n", r);
318                         fence_put(fence);
319                 }
320                 mutex_unlock(&sched->sched_lock);
321
322                 if (c_entity->need_wakeup) {
323                         c_entity->need_wakeup = false;
324                         wake_up(&c_entity->wait_queue);
325                 }
326
327         }
328         return 0;
329 }
330
331 /**
332  * Create a gpu scheduler
333  *
334  * @device      The device context for this scheduler
335  * @ops         The backend operations for this scheduler.
336  * @id          The scheduler is per ring, here is ring id.
337  * @granularity The minumum ms unit the scheduler will scheduled.
338  * @preemption  Indicate whether this ring support preemption, 0 is no.
339  *
340  * return the pointer to scheduler for success, otherwise return NULL
341 */
342 struct amd_gpu_scheduler *amd_sched_create(void *device,
343                                            struct amd_sched_backend_ops *ops,
344                                            unsigned ring,
345                                            unsigned granularity,
346                                            unsigned preemption,
347                                            unsigned hw_submission)
348 {
349         struct amd_gpu_scheduler *sched;
350         char name[20];
351
352         sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
353         if (!sched)
354                 return NULL;
355
356         sched->device = device;
357         sched->ops = ops;
358         sched->granularity = granularity;
359         sched->ring_id = ring;
360         sched->preemption = preemption;
361         sched->hw_submission_limit = hw_submission;
362         snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
363         mutex_init(&sched->sched_lock);
364         amd_sched_rq_init(&sched->sched_rq);
365         amd_sched_rq_init(&sched->kernel_rq);
366
367         init_waitqueue_head(&sched->wait_queue);
368         atomic_set(&sched->hw_rq_count, 0);
369         /* Each scheduler will run on a seperate kernel thread */
370         sched->thread = kthread_run(amd_sched_main, sched, name);
371         if (IS_ERR(sched->thread)) {
372                 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
373                 kfree(sched);
374                 return NULL;
375         }
376
377         return sched;
378 }
379
380 /**
381  * Destroy a gpu scheduler
382  *
383  * @sched       The pointer to the scheduler
384  *
385  * return 0 if succeed. -1 if failed.
386  */
387 int amd_sched_destroy(struct amd_gpu_scheduler *sched)
388 {
389         kthread_stop(sched->thread);
390         kfree(sched);
391         return  0;
392 }