2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 static struct amd_sched_job *
31 amd_sched_entity_pop_job(struct amd_sched_entity *entity);
32 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
34 /* Initialize a given run queue struct */
35 static void amd_sched_rq_init(struct amd_sched_rq *rq)
37 spin_lock_init(&rq->lock);
38 INIT_LIST_HEAD(&rq->entities);
39 rq->current_entity = NULL;
42 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
43 struct amd_sched_entity *entity)
46 list_add_tail(&entity->list, &rq->entities);
47 spin_unlock(&rq->lock);
50 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
51 struct amd_sched_entity *entity)
54 list_del_init(&entity->list);
55 if (rq->current_entity == entity)
56 rq->current_entity = NULL;
57 spin_unlock(&rq->lock);
61 * Select next job from a specified run queue with round robin policy.
62 * Return NULL if nothing available.
64 static struct amd_sched_job *
65 amd_sched_rq_select_job(struct amd_sched_rq *rq)
67 struct amd_sched_entity *entity;
68 struct amd_sched_job *job;
72 entity = rq->current_entity;
74 list_for_each_entry_continue(entity, &rq->entities, list) {
75 job = amd_sched_entity_pop_job(entity);
77 rq->current_entity = entity;
78 spin_unlock(&rq->lock);
84 list_for_each_entry(entity, &rq->entities, list) {
86 job = amd_sched_entity_pop_job(entity);
88 rq->current_entity = entity;
89 spin_unlock(&rq->lock);
93 if (entity == rq->current_entity)
97 spin_unlock(&rq->lock);
103 * Init a context entity used by scheduler when submit to HW ring.
105 * @sched The pointer to the scheduler
106 * @entity The pointer to a valid amd_sched_entity
107 * @rq The run queue this entity belongs
108 * @kernel If this is an entity for the kernel
109 * @jobs The max number of jobs in the job queue
111 * return 0 if succeed. negative error code on failure
113 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
114 struct amd_sched_entity *entity,
115 struct amd_sched_rq *rq,
118 if (!(sched && entity && rq))
121 memset(entity, 0, sizeof(struct amd_sched_entity));
122 entity->belongto_rq = rq;
123 entity->scheduler = sched;
124 entity->fence_context = fence_context_alloc(1);
125 if(kfifo_alloc(&entity->job_queue,
126 jobs * sizeof(void *),
130 spin_lock_init(&entity->queue_lock);
131 atomic_set(&entity->fence_seq, 0);
133 /* Add the entity to the run queue */
134 amd_sched_rq_add_entity(rq, entity);
139 * Query if entity is initialized
141 * @sched Pointer to scheduler instance
142 * @entity The pointer to a valid scheduler entity
144 * return true if entity is initialized, false otherwise
146 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
147 struct amd_sched_entity *entity)
149 return entity->scheduler == sched &&
150 entity->belongto_rq != NULL;
154 * Check if entity is idle
156 * @entity The pointer to a valid scheduler entity
158 * Return true if entity don't has any unscheduled jobs.
160 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
163 if (kfifo_is_empty(&entity->job_queue))
170 * Destroy a context entity
172 * @sched Pointer to scheduler instance
173 * @entity The pointer to a valid scheduler entity
175 * Cleanup and free the allocated resources.
177 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
178 struct amd_sched_entity *entity)
180 struct amd_sched_rq *rq = entity->belongto_rq;
182 if (!amd_sched_entity_is_initialized(sched, entity))
186 * The client will not queue more IBs during this fini, consume existing
189 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
191 amd_sched_rq_remove_entity(rq, entity);
192 kfifo_free(&entity->job_queue);
195 static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
197 struct amd_sched_entity *entity =
198 container_of(cb, struct amd_sched_entity, cb);
199 entity->dependency = NULL;
201 amd_sched_wakeup(entity->scheduler);
204 static struct amd_sched_job *
205 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
207 struct amd_gpu_scheduler *sched = entity->scheduler;
208 struct amd_sched_job *job;
210 if (ACCESS_ONCE(entity->dependency))
213 if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
216 while ((entity->dependency = sched->ops->dependency(job))) {
218 if (fence_add_callback(entity->dependency, &entity->cb,
219 amd_sched_entity_wakeup))
220 fence_put(entity->dependency);
229 * Helper to submit a job to the job queue
231 * @job The pointer to job required to submit
233 * Returns true if we could submit the job.
235 static bool amd_sched_entity_in(struct amd_sched_job *job)
237 struct amd_sched_entity *entity = job->s_entity;
238 bool added, first = false;
240 spin_lock(&entity->queue_lock);
241 added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
243 if (added && kfifo_len(&entity->job_queue) == sizeof(job))
246 spin_unlock(&entity->queue_lock);
248 /* first job wakes up scheduler */
250 amd_sched_wakeup(job->sched);
256 * Submit a job to the job queue
258 * @job The pointer to job required to submit
260 * Returns 0 for success, negative error code otherwise.
262 int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
264 struct amd_sched_entity *entity = sched_job->s_entity;
265 struct amd_sched_fence *fence = amd_sched_fence_create(
266 entity, sched_job->owner);
271 fence_get(&fence->base);
272 sched_job->s_fence = fence;
274 wait_event(entity->scheduler->job_scheduled,
275 amd_sched_entity_in(sched_job));
281 * Return ture if we can push more jobs to the hw.
283 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
285 return atomic_read(&sched->hw_rq_count) <
286 sched->hw_submission_limit;
290 * Wake up the scheduler when it is ready
292 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
294 if (amd_sched_ready(sched))
295 wake_up_interruptible(&sched->wake_up_worker);
301 static struct amd_sched_job *
302 amd_sched_select_job(struct amd_gpu_scheduler *sched)
304 struct amd_sched_job *job;
306 if (!amd_sched_ready(sched))
309 /* Kernel run queue has higher priority than normal run queue*/
310 job = amd_sched_rq_select_job(&sched->kernel_rq);
312 job = amd_sched_rq_select_job(&sched->sched_rq);
317 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
319 struct amd_sched_job *sched_job =
320 container_of(cb, struct amd_sched_job, cb);
321 struct amd_gpu_scheduler *sched;
323 sched = sched_job->sched;
324 amd_sched_fence_signal(sched_job->s_fence);
325 atomic_dec(&sched->hw_rq_count);
326 fence_put(&sched_job->s_fence->base);
327 sched->ops->process_job(sched_job);
328 wake_up_interruptible(&sched->wake_up_worker);
331 static int amd_sched_main(void *param)
333 struct sched_param sparam = {.sched_priority = 1};
334 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
337 sched_setscheduler(current, SCHED_FIFO, &sparam);
339 while (!kthread_should_stop()) {
340 struct amd_sched_entity *entity;
341 struct amd_sched_job *job;
344 wait_event_interruptible(sched->wake_up_worker,
345 kthread_should_stop() ||
346 (job = amd_sched_select_job(sched)));
351 entity = job->s_entity;
352 atomic_inc(&sched->hw_rq_count);
353 fence = sched->ops->run_job(job);
355 r = fence_add_callback(fence, &job->cb,
356 amd_sched_process_job);
358 amd_sched_process_job(fence, &job->cb);
360 DRM_ERROR("fence add callback failed (%d)\n", r);
364 count = kfifo_out(&entity->job_queue, &job, sizeof(job));
365 WARN_ON(count != sizeof(job));
366 wake_up(&sched->job_scheduled);
372 * Create a gpu scheduler
374 * @ops The backend operations for this scheduler.
375 * @ring The the ring id for the scheduler.
376 * @hw_submissions Number of hw submissions to do.
378 * Return the pointer to scheduler for success, otherwise return NULL
380 struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
381 unsigned ring, unsigned hw_submission,
384 struct amd_gpu_scheduler *sched;
386 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
391 sched->ring_id = ring;
392 sched->hw_submission_limit = hw_submission;
394 snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
395 amd_sched_rq_init(&sched->sched_rq);
396 amd_sched_rq_init(&sched->kernel_rq);
398 init_waitqueue_head(&sched->wake_up_worker);
399 init_waitqueue_head(&sched->job_scheduled);
400 atomic_set(&sched->hw_rq_count, 0);
401 /* Each scheduler will run on a seperate kernel thread */
402 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
403 if (IS_ERR(sched->thread)) {
404 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
413 * Destroy a gpu scheduler
415 * @sched The pointer to the scheduler
417 * return 0 if succeed. -1 if failed.
419 int amd_sched_destroy(struct amd_gpu_scheduler *sched)
421 kthread_stop(sched->thread);