2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/types.h>
27 #include <linux/printk.h>
28 #include <linux/bitops.h>
29 #include <linux/sched.h>
31 #include "kfd_device_queue_manager.h"
32 #include "kfd_mqd_manager.h"
34 #include "kfd_kernel_queue.h"
36 /* Size of the per-pipe EOP queue */
37 #define CIK_HPD_EOP_BYTES_LOG2 11
38 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
40 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
41 unsigned int pasid, unsigned int vmid);
43 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
45 struct qcm_process_device *qpd);
47 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
48 static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
50 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
52 struct qcm_process_device *qpd);
54 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
55 unsigned int sdma_queue_id);
58 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
60 if (type == KFD_QUEUE_TYPE_SDMA)
61 return KFD_MQD_TYPE_SDMA;
62 return KFD_MQD_TYPE_CP;
65 unsigned int get_first_pipe(struct device_queue_manager *dqm)
67 BUG_ON(!dqm || !dqm->dev);
68 return dqm->dev->shared_resources.first_compute_pipe;
71 unsigned int get_pipes_num(struct device_queue_manager *dqm)
73 BUG_ON(!dqm || !dqm->dev);
74 return dqm->dev->shared_resources.compute_pipe_count;
77 static inline unsigned int get_pipes_num_cpsch(void)
79 return PIPE_PER_ME_CP_SCHEDULING;
82 void program_sh_mem_settings(struct device_queue_manager *dqm,
83 struct qcm_process_device *qpd)
85 return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
87 qpd->sh_mem_ape1_base,
88 qpd->sh_mem_ape1_limit,
92 static int allocate_vmid(struct device_queue_manager *dqm,
93 struct qcm_process_device *qpd,
96 int bit, allocated_vmid;
98 if (dqm->vmid_bitmap == 0)
101 bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
102 clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
104 /* Kaveri kfd vmid's starts from vmid 8 */
105 allocated_vmid = bit + KFD_VMID_START_OFFSET;
106 pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
107 qpd->vmid = allocated_vmid;
108 q->properties.vmid = allocated_vmid;
110 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
111 program_sh_mem_settings(dqm, qpd);
116 static void deallocate_vmid(struct device_queue_manager *dqm,
117 struct qcm_process_device *qpd,
120 int bit = qpd->vmid - KFD_VMID_START_OFFSET;
122 /* Release the vmid mapping */
123 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
125 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
127 q->properties.vmid = 0;
130 static int create_queue_nocpsch(struct device_queue_manager *dqm,
132 struct qcm_process_device *qpd,
137 BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
139 pr_debug("kfd: In func %s\n", __func__);
142 mutex_lock(&dqm->lock);
144 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
145 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
146 dqm->total_queue_count);
147 mutex_unlock(&dqm->lock);
151 if (list_empty(&qpd->queues_list)) {
152 retval = allocate_vmid(dqm, qpd, q);
154 mutex_unlock(&dqm->lock);
158 *allocated_vmid = qpd->vmid;
159 q->properties.vmid = qpd->vmid;
161 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
162 retval = create_compute_queue_nocpsch(dqm, q, qpd);
163 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
164 retval = create_sdma_queue_nocpsch(dqm, q, qpd);
167 if (list_empty(&qpd->queues_list)) {
168 deallocate_vmid(dqm, qpd, q);
171 mutex_unlock(&dqm->lock);
175 list_add(&q->list, &qpd->queues_list);
176 if (q->properties.is_active)
179 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
180 dqm->sdma_queue_count++;
183 * Unconditionally increment this counter, regardless of the queue's
184 * type or whether the queue is active.
186 dqm->total_queue_count++;
187 pr_debug("Total of %d queues are accountable so far\n",
188 dqm->total_queue_count);
190 mutex_unlock(&dqm->lock);
194 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
201 for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_num(dqm);
202 pipe = ((pipe + 1) % get_pipes_num(dqm)), ++i) {
203 if (dqm->allocated_queues[pipe] != 0) {
204 bit = find_first_bit(
205 (unsigned long *)&dqm->allocated_queues[pipe],
209 (unsigned long *)&dqm->allocated_queues[pipe]);
220 pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
221 __func__, q->pipe, q->queue);
222 /* horizontal hqd allocation */
223 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
228 static inline void deallocate_hqd(struct device_queue_manager *dqm,
231 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
234 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
236 struct qcm_process_device *qpd)
239 struct mqd_manager *mqd;
241 BUG_ON(!dqm || !q || !qpd);
243 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
247 retval = allocate_hqd(dqm, q);
251 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
252 &q->gart_mqd_addr, &q->properties);
254 deallocate_hqd(dqm, q);
258 pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
262 retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
263 q->queue, (uint32_t __user *) q->properties.write_ptr);
265 deallocate_hqd(dqm, q);
266 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
273 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
274 struct qcm_process_device *qpd,
278 struct mqd_manager *mqd;
280 BUG_ON(!dqm || !q || !q->mqd || !qpd);
284 pr_debug("kfd: In Func %s\n", __func__);
286 mutex_lock(&dqm->lock);
288 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
289 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
294 deallocate_hqd(dqm, q);
295 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
296 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
301 dqm->sdma_queue_count--;
302 deallocate_sdma_queue(dqm, q->sdma_id);
304 pr_debug("q->properties.type is invalid (%d)\n",
310 retval = mqd->destroy_mqd(mqd, q->mqd,
311 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
312 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
318 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
321 if (list_empty(&qpd->queues_list))
322 deallocate_vmid(dqm, qpd, q);
323 if (q->properties.is_active)
327 * Unconditionally decrement this counter, regardless of the queue's
330 dqm->total_queue_count--;
331 pr_debug("Total of %d queues are accountable so far\n",
332 dqm->total_queue_count);
335 mutex_unlock(&dqm->lock);
339 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
342 struct mqd_manager *mqd;
343 bool prev_active = false;
345 BUG_ON(!dqm || !q || !q->mqd);
347 mutex_lock(&dqm->lock);
348 mqd = dqm->ops.get_mqd_manager(dqm,
349 get_mqd_type_from_queue_type(q->properties.type));
351 mutex_unlock(&dqm->lock);
355 if (q->properties.is_active == true)
360 * check active state vs. the previous state
361 * and modify counter accordingly
363 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
364 if ((q->properties.is_active == true) && (prev_active == false))
366 else if ((q->properties.is_active == false) && (prev_active == true))
369 if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
370 retval = execute_queues_cpsch(dqm, false);
372 mutex_unlock(&dqm->lock);
376 static struct mqd_manager *get_mqd_manager_nocpsch(
377 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
379 struct mqd_manager *mqd;
381 BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
383 pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
385 mqd = dqm->mqds[type];
387 mqd = mqd_manager_init(type, dqm->dev);
389 pr_err("kfd: mqd manager is NULL");
390 dqm->mqds[type] = mqd;
396 static int register_process_nocpsch(struct device_queue_manager *dqm,
397 struct qcm_process_device *qpd)
399 struct device_process_node *n;
402 BUG_ON(!dqm || !qpd);
404 pr_debug("kfd: In func %s\n", __func__);
406 n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
412 mutex_lock(&dqm->lock);
413 list_add(&n->list, &dqm->queues);
415 retval = dqm->ops_asic_specific.register_process(dqm, qpd);
417 dqm->processes_count++;
419 mutex_unlock(&dqm->lock);
424 static int unregister_process_nocpsch(struct device_queue_manager *dqm,
425 struct qcm_process_device *qpd)
428 struct device_process_node *cur, *next;
430 BUG_ON(!dqm || !qpd);
432 BUG_ON(!list_empty(&qpd->queues_list));
434 pr_debug("kfd: In func %s\n", __func__);
437 mutex_lock(&dqm->lock);
439 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
440 if (qpd == cur->qpd) {
441 list_del(&cur->list);
443 dqm->processes_count--;
447 /* qpd not found in dqm list */
450 mutex_unlock(&dqm->lock);
455 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
458 uint32_t pasid_mapping;
460 pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
461 ATC_VMID_PASID_MAPPING_VALID;
462 return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping,
466 int init_pipelines(struct device_queue_manager *dqm,
467 unsigned int pipes_num, unsigned int first_pipe)
470 struct mqd_manager *mqd;
471 unsigned int i, err, inx;
472 uint64_t pipe_hpd_addr;
474 BUG_ON(!dqm || !dqm->dev);
476 pr_debug("kfd: In func %s\n", __func__);
479 * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
480 * The driver never accesses this memory after zeroing it.
481 * It doesn't even have to be saved/restored on suspend/resume
482 * because it contains no data when there are no active queues.
485 err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num,
489 pr_err("kfd: error allocate vidmem num pipes: %d\n",
494 hpdptr = dqm->pipeline_mem->cpu_ptr;
495 dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
497 memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
499 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
501 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
505 for (i = 0; i < pipes_num; i++) {
506 inx = i + first_pipe;
508 * HPD buffer on GTT is allocated by amdkfd, no need to waste
509 * space in GTT for pipelines we don't initialize
511 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
512 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
513 /* = log2(bytes/4)-1 */
514 kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
515 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
521 static int init_scheduler(struct device_queue_manager *dqm)
527 pr_debug("kfd: In %s\n", __func__);
529 retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
533 static int initialize_nocpsch(struct device_queue_manager *dqm)
539 pr_debug("kfd: In func %s num of pipes: %d\n",
540 __func__, get_pipes_num(dqm));
542 mutex_init(&dqm->lock);
543 INIT_LIST_HEAD(&dqm->queues);
544 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
545 dqm->sdma_queue_count = 0;
546 dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
547 sizeof(unsigned int), GFP_KERNEL);
548 if (!dqm->allocated_queues) {
549 mutex_destroy(&dqm->lock);
553 for (i = 0; i < get_pipes_num(dqm); i++)
554 dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
556 dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
557 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
563 static void uninitialize_nocpsch(struct device_queue_manager *dqm)
569 BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
571 kfree(dqm->allocated_queues);
572 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
574 mutex_destroy(&dqm->lock);
575 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
578 static int start_nocpsch(struct device_queue_manager *dqm)
583 static int stop_nocpsch(struct device_queue_manager *dqm)
588 static int allocate_sdma_queue(struct device_queue_manager *dqm,
589 unsigned int *sdma_queue_id)
593 if (dqm->sdma_bitmap == 0)
596 bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
599 clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
600 *sdma_queue_id = bit;
605 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
606 unsigned int sdma_queue_id)
608 if (sdma_queue_id >= CIK_SDMA_QUEUES)
610 set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
613 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
614 struct qcm_process_device *qpd)
616 uint32_t value = SDMA_ATC;
618 if (q->process->is_32bit_user_mode)
619 value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
621 value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
623 q->properties.sdma_vm_addr = value;
626 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
628 struct qcm_process_device *qpd)
630 struct mqd_manager *mqd;
633 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
637 retval = allocate_sdma_queue(dqm, &q->sdma_id);
641 q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
642 q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
644 pr_debug("kfd: sdma id is: %d\n", q->sdma_id);
645 pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
646 pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
648 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
649 &q->gart_mqd_addr, &q->properties);
651 deallocate_sdma_queue(dqm, q->sdma_id);
655 init_sdma_vm(dqm, q, qpd);
660 * Device Queue Manager implementation for cp scheduler
663 static int set_sched_resources(struct device_queue_manager *dqm)
665 struct scheduling_resources res;
666 unsigned int queue_num, queue_mask;
670 pr_debug("kfd: In func %s\n", __func__);
672 queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
673 queue_mask = (1 << queue_num) - 1;
674 res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
675 res.vmid_mask <<= KFD_VMID_START_OFFSET;
676 res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
677 res.gws_mask = res.oac_mask = res.gds_heap_base =
678 res.gds_heap_size = 0;
680 pr_debug("kfd: scheduling resources:\n"
681 " vmid mask: 0x%8X\n"
682 " queue mask: 0x%8llX\n",
683 res.vmid_mask, res.queue_mask);
685 return pm_send_set_resources(&dqm->packets, &res);
688 static int initialize_cpsch(struct device_queue_manager *dqm)
694 pr_debug("kfd: In func %s num of pipes: %d\n",
695 __func__, get_pipes_num_cpsch());
697 mutex_init(&dqm->lock);
698 INIT_LIST_HEAD(&dqm->queues);
699 dqm->queue_count = dqm->processes_count = 0;
700 dqm->sdma_queue_count = 0;
701 dqm->active_runlist = false;
702 retval = dqm->ops_asic_specific.initialize(dqm);
704 goto fail_init_pipelines;
709 mutex_destroy(&dqm->lock);
713 static int start_cpsch(struct device_queue_manager *dqm)
715 struct device_process_node *node;
722 retval = pm_init(&dqm->packets, dqm);
724 goto fail_packet_manager_init;
726 retval = set_sched_resources(dqm);
728 goto fail_set_sched_resources;
730 pr_debug("kfd: allocating fence memory\n");
732 /* allocate fence memory on the gart */
733 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
737 goto fail_allocate_vidmem;
739 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
740 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
741 list_for_each_entry(node, &dqm->queues, list)
742 if (node->qpd->pqm->process && dqm->dev)
743 kfd_bind_process_to_device(dqm->dev,
744 node->qpd->pqm->process);
746 execute_queues_cpsch(dqm, true);
749 fail_allocate_vidmem:
750 fail_set_sched_resources:
751 pm_uninit(&dqm->packets);
752 fail_packet_manager_init:
756 static int stop_cpsch(struct device_queue_manager *dqm)
758 struct device_process_node *node;
759 struct kfd_process_device *pdd;
763 destroy_queues_cpsch(dqm, true);
765 list_for_each_entry(node, &dqm->queues, list) {
766 pdd = qpd_to_pdd(node->qpd);
769 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
770 pm_uninit(&dqm->packets);
775 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
776 struct kernel_queue *kq,
777 struct qcm_process_device *qpd)
779 BUG_ON(!dqm || !kq || !qpd);
781 pr_debug("kfd: In func %s\n", __func__);
783 mutex_lock(&dqm->lock);
784 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
785 pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
786 dqm->total_queue_count);
787 mutex_unlock(&dqm->lock);
792 * Unconditionally increment this counter, regardless of the queue's
793 * type or whether the queue is active.
795 dqm->total_queue_count++;
796 pr_debug("Total of %d queues are accountable so far\n",
797 dqm->total_queue_count);
799 list_add(&kq->list, &qpd->priv_queue_list);
801 qpd->is_debug = true;
802 execute_queues_cpsch(dqm, false);
803 mutex_unlock(&dqm->lock);
808 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
809 struct kernel_queue *kq,
810 struct qcm_process_device *qpd)
814 pr_debug("kfd: In %s\n", __func__);
816 mutex_lock(&dqm->lock);
817 destroy_queues_cpsch(dqm, false);
820 qpd->is_debug = false;
821 execute_queues_cpsch(dqm, false);
823 * Unconditionally decrement this counter, regardless of the queue's
826 dqm->total_queue_count--;
827 pr_debug("Total of %d queues are accountable so far\n",
828 dqm->total_queue_count);
829 mutex_unlock(&dqm->lock);
832 static void select_sdma_engine_id(struct queue *q)
836 q->sdma_id = sdma_id;
837 sdma_id = (sdma_id + 1) % 2;
840 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
841 struct qcm_process_device *qpd, int *allocate_vmid)
844 struct mqd_manager *mqd;
846 BUG_ON(!dqm || !q || !qpd);
853 mutex_lock(&dqm->lock);
855 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
856 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
857 dqm->total_queue_count);
862 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
863 select_sdma_engine_id(q);
865 mqd = dqm->ops.get_mqd_manager(dqm,
866 get_mqd_type_from_queue_type(q->properties.type));
869 mutex_unlock(&dqm->lock);
873 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
874 &q->gart_mqd_addr, &q->properties);
878 list_add(&q->list, &qpd->queues_list);
879 if (q->properties.is_active) {
881 retval = execute_queues_cpsch(dqm, false);
884 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
885 dqm->sdma_queue_count++;
887 * Unconditionally increment this counter, regardless of the queue's
888 * type or whether the queue is active.
890 dqm->total_queue_count++;
892 pr_debug("Total of %d queues are accountable so far\n",
893 dqm->total_queue_count);
896 mutex_unlock(&dqm->lock);
900 static int fence_wait_timeout(unsigned int *fence_addr,
901 unsigned int fence_value,
902 unsigned long timeout)
907 while (*fence_addr != fence_value) {
908 if (time_after(jiffies, timeout)) {
909 pr_err("kfd: qcm fence wait loop timeout expired\n");
918 static int destroy_sdma_queues(struct device_queue_manager *dqm,
919 unsigned int sdma_engine)
921 return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
922 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false,
926 static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
935 mutex_lock(&dqm->lock);
936 if (dqm->active_runlist == false)
939 pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
940 dqm->sdma_queue_count);
942 if (dqm->sdma_queue_count > 0) {
943 destroy_sdma_queues(dqm, 0);
944 destroy_sdma_queues(dqm, 1);
947 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
948 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
952 *dqm->fence_addr = KFD_FENCE_INIT;
953 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
954 KFD_FENCE_COMPLETED);
955 /* should be timed out */
956 fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
957 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
958 pm_release_ib(&dqm->packets);
959 dqm->active_runlist = false;
963 mutex_unlock(&dqm->lock);
967 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
974 mutex_lock(&dqm->lock);
976 retval = destroy_queues_cpsch(dqm, false);
978 pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
982 if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
987 if (dqm->active_runlist) {
992 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
994 pr_err("kfd: failed to execute runlist");
997 dqm->active_runlist = true;
1001 mutex_unlock(&dqm->lock);
1005 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1006 struct qcm_process_device *qpd,
1010 struct mqd_manager *mqd;
1012 BUG_ON(!dqm || !qpd || !q);
1016 /* remove queue from list to prevent rescheduling after preemption */
1017 mutex_lock(&dqm->lock);
1018 mqd = dqm->ops.get_mqd_manager(dqm,
1019 get_mqd_type_from_queue_type(q->properties.type));
1025 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1026 dqm->sdma_queue_count--;
1029 if (q->properties.is_active)
1032 execute_queues_cpsch(dqm, false);
1034 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1037 * Unconditionally decrement this counter, regardless of the queue's
1040 dqm->total_queue_count--;
1041 pr_debug("Total of %d queues are accountable so far\n",
1042 dqm->total_queue_count);
1044 mutex_unlock(&dqm->lock);
1049 mutex_unlock(&dqm->lock);
1054 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1055 * stay in user mode.
1057 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1058 /* APE1 limit is inclusive and 64K aligned. */
1059 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1061 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1062 struct qcm_process_device *qpd,
1063 enum cache_policy default_policy,
1064 enum cache_policy alternate_policy,
1065 void __user *alternate_aperture_base,
1066 uint64_t alternate_aperture_size)
1070 pr_debug("kfd: In func %s\n", __func__);
1072 mutex_lock(&dqm->lock);
1074 if (alternate_aperture_size == 0) {
1075 /* base > limit disables APE1 */
1076 qpd->sh_mem_ape1_base = 1;
1077 qpd->sh_mem_ape1_limit = 0;
1080 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1081 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1082 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1083 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1084 * Verify that the base and size parameters can be
1085 * represented in this format and convert them.
1086 * Additionally restrict APE1 to user-mode addresses.
1089 uint64_t base = (uintptr_t)alternate_aperture_base;
1090 uint64_t limit = base + alternate_aperture_size - 1;
1095 if ((base & APE1_FIXED_BITS_MASK) != 0)
1098 if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
1101 qpd->sh_mem_ape1_base = base >> 16;
1102 qpd->sh_mem_ape1_limit = limit >> 16;
1105 retval = dqm->ops_asic_specific.set_cache_memory_policy(
1110 alternate_aperture_base,
1111 alternate_aperture_size);
1113 if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1114 program_sh_mem_settings(dqm, qpd);
1116 pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1117 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1118 qpd->sh_mem_ape1_limit);
1120 mutex_unlock(&dqm->lock);
1124 mutex_unlock(&dqm->lock);
1128 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1130 struct device_queue_manager *dqm;
1134 pr_debug("kfd: loading device queue manager\n");
1136 dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
1141 switch (sched_policy) {
1142 case KFD_SCHED_POLICY_HWS:
1143 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1144 /* initialize dqm for cp scheduling */
1145 dqm->ops.create_queue = create_queue_cpsch;
1146 dqm->ops.initialize = initialize_cpsch;
1147 dqm->ops.start = start_cpsch;
1148 dqm->ops.stop = stop_cpsch;
1149 dqm->ops.destroy_queue = destroy_queue_cpsch;
1150 dqm->ops.update_queue = update_queue;
1151 dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
1152 dqm->ops.register_process = register_process_nocpsch;
1153 dqm->ops.unregister_process = unregister_process_nocpsch;
1154 dqm->ops.uninitialize = uninitialize_nocpsch;
1155 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1156 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1157 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1159 case KFD_SCHED_POLICY_NO_HWS:
1160 /* initialize dqm for no cp scheduling */
1161 dqm->ops.start = start_nocpsch;
1162 dqm->ops.stop = stop_nocpsch;
1163 dqm->ops.create_queue = create_queue_nocpsch;
1164 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1165 dqm->ops.update_queue = update_queue;
1166 dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
1167 dqm->ops.register_process = register_process_nocpsch;
1168 dqm->ops.unregister_process = unregister_process_nocpsch;
1169 dqm->ops.initialize = initialize_nocpsch;
1170 dqm->ops.uninitialize = uninitialize_nocpsch;
1171 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1178 switch (dev->device_info->asic_family) {
1180 device_queue_manager_init_vi(&dqm->ops_asic_specific);
1184 device_queue_manager_init_cik(&dqm->ops_asic_specific);
1188 if (dqm->ops.initialize(dqm) != 0) {
1196 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1200 dqm->ops.uninitialize(dqm);