2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 /* Initialize a given run queue struct */
31 static void amd_sched_rq_init(struct amd_sched_rq *rq)
33 spin_lock_init(&rq->lock);
34 INIT_LIST_HEAD(&rq->entities);
35 rq->current_entity = NULL;
38 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
39 struct amd_sched_entity *entity)
42 list_add_tail(&entity->list, &rq->entities);
43 spin_unlock(&rq->lock);
46 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
47 struct amd_sched_entity *entity)
50 list_del_init(&entity->list);
51 if (rq->current_entity == entity)
52 rq->current_entity = NULL;
53 spin_unlock(&rq->lock);
57 * Select next entity from a specified run queue with round robin policy.
58 * It could return the same entity as current one if current is the only
59 * available one in the queue. Return NULL if nothing available.
61 static struct amd_sched_entity *
62 amd_sched_rq_select_entity(struct amd_sched_rq *rq)
64 struct amd_sched_entity *entity;
68 entity = rq->current_entity;
70 list_for_each_entry_continue(entity, &rq->entities, list) {
71 if (!kfifo_is_empty(&entity->job_queue)) {
72 rq->current_entity = entity;
73 spin_unlock(&rq->lock);
74 return rq->current_entity;
79 list_for_each_entry(entity, &rq->entities, list) {
81 if (!kfifo_is_empty(&entity->job_queue)) {
82 rq->current_entity = entity;
83 spin_unlock(&rq->lock);
84 return rq->current_entity;
87 if (entity == rq->current_entity)
91 spin_unlock(&rq->lock);
97 * Return ture if we can push more jobs to the hw.
99 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
101 return atomic_read(&sched->hw_rq_count) <
102 sched->hw_submission_limit;
106 * Select next entity containing real IB submissions
108 static struct amd_sched_entity *
109 select_context(struct amd_gpu_scheduler *sched)
111 struct amd_sched_entity *wake_entity = NULL;
112 struct amd_sched_entity *tmp;
114 if (!amd_sched_ready(sched))
117 /* Kernel run queue has higher priority than normal run queue*/
118 tmp = amd_sched_rq_select_entity(&sched->kernel_rq);
120 tmp = amd_sched_rq_select_entity(&sched->sched_rq);
122 if (sched->current_entity && (sched->current_entity != tmp))
123 wake_entity = sched->current_entity;
124 sched->current_entity = tmp;
125 if (wake_entity && wake_entity->need_wakeup)
126 wake_up(&wake_entity->wait_queue);
131 * Init a context entity used by scheduler when submit to HW ring.
133 * @sched The pointer to the scheduler
134 * @entity The pointer to a valid amd_sched_entity
135 * @rq The run queue this entity belongs
136 * @kernel If this is an entity for the kernel
137 * @jobs The max number of jobs in the job queue
139 * return 0 if succeed. negative error code on failure
141 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
142 struct amd_sched_entity *entity,
143 struct amd_sched_rq *rq,
148 if (!(sched && entity && rq))
151 memset(entity, 0, sizeof(struct amd_sched_entity));
152 spin_lock_init(&entity->lock);
153 entity->belongto_rq = rq;
154 entity->scheduler = sched;
155 init_waitqueue_head(&entity->wait_queue);
156 init_waitqueue_head(&entity->wait_emit);
157 entity->fence_context = fence_context_alloc(1);
158 snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context);
159 memcpy(entity->name, name, 20);
160 entity->need_wakeup = false;
161 if(kfifo_alloc(&entity->job_queue,
162 jobs * sizeof(void *),
166 spin_lock_init(&entity->queue_lock);
167 atomic_set(&entity->fence_seq, 0);
169 /* Add the entity to the run queue */
170 amd_sched_rq_add_entity(rq, entity);
175 * Query if entity is initialized
177 * @sched Pointer to scheduler instance
178 * @entity The pointer to a valid scheduler entity
180 * return true if entity is initialized, false otherwise
182 static bool is_context_entity_initialized(struct amd_gpu_scheduler *sched,
183 struct amd_sched_entity *entity)
185 return entity->scheduler == sched &&
186 entity->belongto_rq != NULL;
189 static bool is_context_entity_idle(struct amd_gpu_scheduler *sched,
190 struct amd_sched_entity *entity)
193 * Idle means no pending IBs, and the entity is not
194 * currently being used.
197 if ((sched->current_entity != entity) &&
198 kfifo_is_empty(&entity->job_queue))
205 * Destroy a context entity
207 * @sched Pointer to scheduler instance
208 * @entity The pointer to a valid scheduler entity
210 * return 0 if succeed. negative error code on failure
212 int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
213 struct amd_sched_entity *entity)
216 struct amd_sched_rq *rq = entity->belongto_rq;
218 if (!is_context_entity_initialized(sched, entity))
220 entity->need_wakeup = true;
222 * The client will not queue more IBs during this fini, consume existing
225 r = wait_event_timeout(
227 is_context_entity_idle(sched, entity),
228 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS)
232 if (entity->is_pending)
233 DRM_INFO("Entity %p is in waiting state during fini,\
234 all pending ibs will be canceled.\n",
238 amd_sched_rq_remove_entity(rq, entity);
239 kfifo_free(&entity->job_queue);
244 * Submit a normal job to the job queue
246 * @sched The pointer to the scheduler
247 * @c_entity The pointer to amd_sched_entity
248 * @job The pointer to job required to submit
249 * return 0 if succeed. -1 if failed.
250 * -2 indicate queue is full for this client, client should wait untill
251 * scheduler consum some queued command.
254 int amd_sched_push_job(struct amd_sched_job *sched_job)
256 struct amd_sched_fence *fence =
257 amd_sched_fence_create(sched_job->s_entity);
260 fence_get(&fence->base);
261 sched_job->s_fence = fence;
262 while (kfifo_in_spinlocked(&sched_job->s_entity->job_queue,
263 &sched_job, sizeof(void *),
264 &sched_job->s_entity->queue_lock) !=
267 * Current context used up all its IB slots
268 * wait here, or need to check whether GPU is hung
272 /* first job wake up scheduler */
273 if ((kfifo_len(&sched_job->s_entity->job_queue) / sizeof(void *)) == 1)
274 wake_up_interruptible(&sched_job->sched->wait_queue);
278 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
280 struct amd_sched_job *sched_job =
281 container_of(cb, struct amd_sched_job, cb);
282 struct amd_gpu_scheduler *sched;
284 sched = sched_job->sched;
285 amd_sched_fence_signal(sched_job->s_fence);
286 atomic_dec(&sched->hw_rq_count);
287 fence_put(&sched_job->s_fence->base);
288 sched->ops->process_job(sched, sched_job);
289 wake_up_interruptible(&sched->wait_queue);
292 static int amd_sched_main(void *param)
295 struct amd_sched_job *job;
296 struct sched_param sparam = {.sched_priority = 1};
297 struct amd_sched_entity *c_entity = NULL;
298 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
300 sched_setscheduler(current, SCHED_FIFO, &sparam);
302 while (!kthread_should_stop()) {
305 wait_event_interruptible(sched->wait_queue,
306 amd_sched_ready(sched) &&
307 (c_entity = select_context(sched)));
308 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
309 if (r != sizeof(void *))
312 if (sched->ops->prepare_job)
313 r = sched->ops->prepare_job(sched, c_entity, job);
315 atomic_inc(&sched->hw_rq_count);
317 mutex_lock(&sched->sched_lock);
318 fence = sched->ops->run_job(sched, c_entity, job);
320 r = fence_add_callback(fence, &job->cb,
321 amd_sched_process_job);
323 amd_sched_process_job(fence, &job->cb);
325 DRM_ERROR("fence add callback failed (%d)\n", r);
328 mutex_unlock(&sched->sched_lock);
334 * Create a gpu scheduler
336 * @device The device context for this scheduler
337 * @ops The backend operations for this scheduler.
338 * @id The scheduler is per ring, here is ring id.
339 * @granularity The minumum ms unit the scheduler will scheduled.
340 * @preemption Indicate whether this ring support preemption, 0 is no.
342 * return the pointer to scheduler for success, otherwise return NULL
344 struct amd_gpu_scheduler *amd_sched_create(void *device,
345 struct amd_sched_backend_ops *ops,
347 unsigned granularity,
349 unsigned hw_submission)
351 struct amd_gpu_scheduler *sched;
354 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
358 sched->device = device;
360 sched->granularity = granularity;
361 sched->ring_id = ring;
362 sched->preemption = preemption;
363 sched->hw_submission_limit = hw_submission;
364 snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
365 mutex_init(&sched->sched_lock);
366 amd_sched_rq_init(&sched->sched_rq);
367 amd_sched_rq_init(&sched->kernel_rq);
369 init_waitqueue_head(&sched->wait_queue);
370 atomic_set(&sched->hw_rq_count, 0);
371 /* Each scheduler will run on a seperate kernel thread */
372 sched->thread = kthread_create(amd_sched_main, sched, name);
374 wake_up_process(sched->thread);
378 DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
384 * Destroy a gpu scheduler
386 * @sched The pointer to the scheduler
388 * return 0 if succeed. -1 if failed.
390 int amd_sched_destroy(struct amd_gpu_scheduler *sched)
392 kthread_stop(sched->thread);