2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
30 #include "amdgpu_vce.h"
32 #include "soc15_common.h"
33 #include "mmsch_v1_0.h"
35 #include "vega10/soc15ip.h"
36 #include "vega10/VCE/vce_4_0_offset.h"
37 #include "vega10/VCE/vce_4_0_default.h"
38 #include "vega10/VCE/vce_4_0_sh_mask.h"
39 #include "vega10/MMHUB/mmhub_1_0_offset.h"
40 #include "vega10/MMHUB/mmhub_1_0_sh_mask.h"
42 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
44 #define VCE_V4_0_FW_SIZE (384 * 1024)
45 #define VCE_V4_0_STACK_SIZE (64 * 1024)
46 #define VCE_V4_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
48 static void vce_v4_0_mc_resume(struct amdgpu_device *adev);
49 static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev);
50 static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev);
53 * vce_v4_0_ring_get_rptr - get read pointer
55 * @ring: amdgpu_ring pointer
57 * Returns the current hardware read pointer
59 static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
61 struct amdgpu_device *adev = ring->adev;
63 if (ring == &adev->vce.ring[0])
64 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
65 else if (ring == &adev->vce.ring[1])
66 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
68 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
72 * vce_v4_0_ring_get_wptr - get write pointer
74 * @ring: amdgpu_ring pointer
76 * Returns the current hardware write pointer
78 static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
80 struct amdgpu_device *adev = ring->adev;
82 if (ring->use_doorbell)
83 return adev->wb.wb[ring->wptr_offs];
85 if (ring == &adev->vce.ring[0])
86 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
87 else if (ring == &adev->vce.ring[1])
88 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
90 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
94 * vce_v4_0_ring_set_wptr - set write pointer
96 * @ring: amdgpu_ring pointer
98 * Commits the write pointer to the hardware
100 static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
102 struct amdgpu_device *adev = ring->adev;
104 if (ring->use_doorbell) {
105 /* XXX check if swapping is necessary on BE */
106 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
107 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
111 if (ring == &adev->vce.ring[0])
112 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
113 lower_32_bits(ring->wptr));
114 else if (ring == &adev->vce.ring[1])
115 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
116 lower_32_bits(ring->wptr));
118 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3),
119 lower_32_bits(ring->wptr));
122 static int vce_v4_0_firmware_loaded(struct amdgpu_device *adev)
126 for (i = 0; i < 10; ++i) {
127 for (j = 0; j < 100; ++j) {
129 RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS));
131 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
136 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
137 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
138 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
139 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
141 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
142 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
150 static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
151 struct amdgpu_mm_table *table)
153 uint32_t data = 0, loop;
154 uint64_t addr = table->gpu_addr;
155 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
158 size = header->header_size + header->vce_table_size + header->uvd_table_size;
160 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
161 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO), lower_32_bits(addr));
162 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI), upper_32_bits(addr));
164 /* 2, update vmid of descriptor */
165 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID));
166 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
167 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
168 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID), data);
170 /* 3, notify mmsch about the size of this descriptor */
171 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE), size);
173 /* 4, set resp to zero */
174 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
176 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
177 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
179 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
181 while ((data & 0x10000002) != 0x10000002) {
183 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
190 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
193 WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
198 static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
200 struct amdgpu_ring *ring;
201 uint32_t offset, size;
202 uint32_t table_size = 0;
203 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
204 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
205 struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
206 struct mmsch_v1_0_cmd_end end = { { 0 } };
207 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
208 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
210 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
211 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
212 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
213 end.cmd_header.command_type = MMSCH_COMMAND__END;
215 if (header->vce_table_offset == 0 && header->vce_table_size == 0) {
216 header->version = MMSCH_VERSION;
217 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
219 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0)
220 header->vce_table_offset = header->header_size;
222 header->vce_table_offset = header->uvd_table_size + header->uvd_table_offset;
224 init_table += header->vce_table_offset;
226 ring = &adev->vce.ring[0];
227 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO),
228 lower_32_bits(ring->gpu_addr));
229 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI),
230 upper_32_bits(ring->gpu_addr));
231 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE),
232 ring->ring_size / 4);
234 /* BEGING OF MC_RESUME */
235 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x398000);
236 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), ~0x1, 0);
237 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
238 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
239 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
241 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
242 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
243 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
244 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
245 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
246 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
247 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
249 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
250 adev->vce.gpu_addr >> 8);
251 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
252 adev->vce.gpu_addr >> 8);
253 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
254 adev->vce.gpu_addr >> 8);
257 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
258 size = VCE_V4_0_FW_SIZE;
259 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
260 offset & 0x7FFFFFFF);
261 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
264 size = VCE_V4_0_STACK_SIZE;
265 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1),
266 offset & 0x7FFFFFFF);
267 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
270 size = VCE_V4_0_DATA_SIZE;
271 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2),
272 offset & 0x7FFFFFFF);
273 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
275 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
276 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
277 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
279 /* end of MC_RESUME */
280 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
281 VCE_STATUS__JOB_BUSY_MASK, ~VCE_STATUS__JOB_BUSY_MASK);
282 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL),
283 ~0x200001, VCE_VCPU_CNTL__CLK_EN_MASK);
284 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
285 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 0);
287 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
288 VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK,
289 VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK);
291 /* clear BUSY flag */
292 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
293 ~VCE_STATUS__JOB_BUSY_MASK, 0);
296 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
297 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
298 header->vce_table_size = table_size;
300 return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
303 return -EINVAL; /* already initializaed ? */
307 * vce_v4_0_start - start VCE block
309 * @adev: amdgpu_device pointer
311 * Setup and start the VCE block
313 static int vce_v4_0_start(struct amdgpu_device *adev)
315 struct amdgpu_ring *ring;
318 ring = &adev->vce.ring[0];
320 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), lower_32_bits(ring->wptr));
321 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), lower_32_bits(ring->wptr));
322 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), ring->gpu_addr);
323 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
324 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
326 ring = &adev->vce.ring[1];
328 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2), lower_32_bits(ring->wptr));
329 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), lower_32_bits(ring->wptr));
330 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO2), ring->gpu_addr);
331 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
332 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE2), ring->ring_size / 4);
334 ring = &adev->vce.ring[2];
336 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3), lower_32_bits(ring->wptr));
337 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3), lower_32_bits(ring->wptr));
338 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO3), ring->gpu_addr);
339 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI3), upper_32_bits(ring->gpu_addr));
340 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE3), ring->ring_size / 4);
342 vce_v4_0_mc_resume(adev);
343 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), VCE_STATUS__JOB_BUSY_MASK,
344 ~VCE_STATUS__JOB_BUSY_MASK);
346 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 1, ~0x200001);
348 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
349 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
352 r = vce_v4_0_firmware_loaded(adev);
354 /* clear BUSY flag */
355 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
358 DRM_ERROR("VCE not responding, giving up!!!\n");
365 static int vce_v4_0_stop(struct amdgpu_device *adev)
368 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
371 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
372 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
373 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
375 /* clear BUSY flag */
376 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
378 /* Set Clock-Gating off */
379 /* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
380 vce_v4_0_set_vce_sw_clock_gating(adev, false);
386 static int vce_v4_0_early_init(void *handle)
388 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
390 if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
391 adev->vce.num_rings = 1;
393 adev->vce.num_rings = 3;
395 vce_v4_0_set_ring_funcs(adev);
396 vce_v4_0_set_irq_funcs(adev);
401 static int vce_v4_0_sw_init(void *handle)
403 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
404 struct amdgpu_ring *ring;
408 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
412 size = (VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE) * 2;
413 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
414 size += VCE_V4_0_FW_SIZE;
416 r = amdgpu_vce_sw_init(adev, size);
420 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
421 const struct common_firmware_header *hdr;
422 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
424 adev->vce.saved_bo = kmalloc(size, GFP_KERNEL);
425 if (!adev->vce.saved_bo)
428 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
429 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].ucode_id = AMDGPU_UCODE_ID_VCE;
430 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw;
431 adev->firmware.fw_size +=
432 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
433 DRM_INFO("PSP loading VCE firmware\n");
435 r = amdgpu_vce_resume(adev);
440 for (i = 0; i < adev->vce.num_rings; i++) {
441 ring = &adev->vce.ring[i];
442 sprintf(ring->name, "vce%d", i);
443 if (amdgpu_sriov_vf(adev)) {
444 /* DOORBELL only works under SRIOV */
445 ring->use_doorbell = true;
447 ring->doorbell_index = AMDGPU_DOORBELL64_RING0_1 * 2;
449 ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2;
451 ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2 + 1;
453 r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
458 r = amdgpu_virt_alloc_mm_table(adev);
465 static int vce_v4_0_sw_fini(void *handle)
468 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
471 amdgpu_virt_free_mm_table(adev);
473 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
474 kfree(adev->vce.saved_bo);
475 adev->vce.saved_bo = NULL;
478 r = amdgpu_vce_suspend(adev);
482 return amdgpu_vce_sw_fini(adev);
485 static int vce_v4_0_hw_init(void *handle)
488 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
490 if (amdgpu_sriov_vf(adev))
491 r = vce_v4_0_sriov_start(adev);
493 r = vce_v4_0_start(adev);
497 for (i = 0; i < adev->vce.num_rings; i++)
498 adev->vce.ring[i].ready = false;
500 for (i = 0; i < adev->vce.num_rings; i++) {
501 r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
505 adev->vce.ring[i].ready = true;
508 DRM_INFO("VCE initialized successfully.\n");
513 static int vce_v4_0_hw_fini(void *handle)
515 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
518 if (!amdgpu_sriov_vf(adev)) {
519 /* vce_v4_0_wait_for_idle(handle); */
522 /* full access mode, so don't touch any VCE register */
523 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
526 for (i = 0; i < adev->vce.num_rings; i++)
527 adev->vce.ring[i].ready = false;
532 static int vce_v4_0_suspend(void *handle)
534 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
537 if (adev->vce.vcpu_bo == NULL)
540 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
541 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
542 void *ptr = adev->vce.cpu_addr;
544 memcpy_fromio(adev->vce.saved_bo, ptr, size);
547 r = vce_v4_0_hw_fini(adev);
551 return amdgpu_vce_suspend(adev);
554 static int vce_v4_0_resume(void *handle)
556 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
559 if (adev->vce.vcpu_bo == NULL)
562 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
563 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
564 void *ptr = adev->vce.cpu_addr;
566 memcpy_toio(ptr, adev->vce.saved_bo, size);
568 r = amdgpu_vce_resume(adev);
573 return vce_v4_0_hw_init(adev);
576 static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
578 uint32_t offset, size;
580 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16));
581 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, ~0xFF9FF000);
582 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), 0x3F, ~0x3F);
583 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), 0x1FF);
585 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x00398000);
586 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), 0x0, ~0x1);
587 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
588 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
589 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
591 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
592 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
593 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8));
594 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
595 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
597 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
598 (adev->vce.gpu_addr >> 8));
599 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
600 (adev->vce.gpu_addr >> 40) & 0xff);
603 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
604 size = VCE_V4_0_FW_SIZE;
605 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000);
606 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
608 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), (adev->vce.gpu_addr >> 8));
609 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR1), (adev->vce.gpu_addr >> 40) & 0xff);
610 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
611 size = VCE_V4_0_STACK_SIZE;
612 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), (offset & ~0x0f000000) | (1 << 24));
613 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
615 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), (adev->vce.gpu_addr >> 8));
616 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), (adev->vce.gpu_addr >> 40) & 0xff);
618 size = VCE_V4_0_DATA_SIZE;
619 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), (offset & ~0x0f000000) | (2 << 24));
620 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
622 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), 0x0, ~0x100);
623 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
624 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
625 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
628 static int vce_v4_0_set_clockgating_state(void *handle,
629 enum amd_clockgating_state state)
631 /* needed for driver unload*/
636 static bool vce_v4_0_is_idle(void *handle)
638 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
641 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
642 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
644 return !(RREG32(mmSRBM_STATUS2) & mask);
647 static int vce_v4_0_wait_for_idle(void *handle)
650 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
652 for (i = 0; i < adev->usec_timeout; i++)
653 if (vce_v4_0_is_idle(handle))
659 #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
660 #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
661 #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
662 #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
663 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
665 static bool vce_v4_0_check_soft_reset(void *handle)
667 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
668 u32 srbm_soft_reset = 0;
670 /* According to VCE team , we should use VCE_STATUS instead
671 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
672 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
673 * instance's registers are accessed
674 * (0 for 1st instance, 10 for 2nd instance).
677 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
678 *|----+----+-----------+----+----+----+----------+---------+----|
679 *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
681 * VCE team suggest use bit 3--bit 6 for busy status check
683 mutex_lock(&adev->grbm_idx_mutex);
684 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
685 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
686 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
687 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
689 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
690 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
691 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
692 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
694 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
695 mutex_unlock(&adev->grbm_idx_mutex);
697 if (srbm_soft_reset) {
698 adev->vce.srbm_soft_reset = srbm_soft_reset;
701 adev->vce.srbm_soft_reset = 0;
706 static int vce_v4_0_soft_reset(void *handle)
708 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
711 if (!adev->vce.srbm_soft_reset)
713 srbm_soft_reset = adev->vce.srbm_soft_reset;
715 if (srbm_soft_reset) {
718 tmp = RREG32(mmSRBM_SOFT_RESET);
719 tmp |= srbm_soft_reset;
720 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
721 WREG32(mmSRBM_SOFT_RESET, tmp);
722 tmp = RREG32(mmSRBM_SOFT_RESET);
726 tmp &= ~srbm_soft_reset;
727 WREG32(mmSRBM_SOFT_RESET, tmp);
728 tmp = RREG32(mmSRBM_SOFT_RESET);
730 /* Wait a little for things to settle down */
737 static int vce_v4_0_pre_soft_reset(void *handle)
739 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
741 if (!adev->vce.srbm_soft_reset)
746 return vce_v4_0_suspend(adev);
750 static int vce_v4_0_post_soft_reset(void *handle)
752 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
754 if (!adev->vce.srbm_soft_reset)
759 return vce_v4_0_resume(adev);
762 static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
766 tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
768 data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
770 data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
773 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
776 static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
781 /* Set Override to disable Clock Gating */
782 vce_v4_0_override_vce_clock_gating(adev, true);
784 /* This function enables MGCG which is controlled by firmware.
785 With the clocks in the gated state the core is still
786 accessible but the firmware will throttle the clocks on the
790 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
793 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
795 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
798 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
800 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
803 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
805 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
807 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
809 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
810 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
811 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
812 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
814 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
816 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
819 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
821 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
823 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
825 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
827 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
829 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
831 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
833 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
834 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
835 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
836 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
838 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
840 vce_v4_0_override_vce_clock_gating(adev, false);
843 static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
845 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
848 tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
850 tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
852 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
855 static int vce_v4_0_set_clockgating_state(void *handle,
856 enum amd_clockgating_state state)
858 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
859 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
862 if ((adev->asic_type == CHIP_POLARIS10) ||
863 (adev->asic_type == CHIP_TONGA) ||
864 (adev->asic_type == CHIP_FIJI))
865 vce_v4_0_set_bypass_mode(adev, enable);
867 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
870 mutex_lock(&adev->grbm_idx_mutex);
871 for (i = 0; i < 2; i++) {
872 /* Program VCE Instance 0 or 1 if not harvested */
873 if (adev->vce.harvest_config & (1 << i))
876 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
879 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
880 uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A);
881 data &= ~(0xf | 0xff0);
882 data |= ((0x0 << 0) | (0x04 << 4));
883 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data);
885 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
886 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING);
887 data &= ~(0xf | 0xff0);
888 data |= ((0x0 << 0) | (0x04 << 4));
889 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
892 vce_v4_0_set_vce_sw_clock_gating(adev, enable);
895 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
896 mutex_unlock(&adev->grbm_idx_mutex);
901 static int vce_v4_0_set_powergating_state(void *handle,
902 enum amd_powergating_state state)
904 /* This doesn't actually powergate the VCE block.
905 * That's done in the dpm code via the SMC. This
906 * just re-inits the block as necessary. The actual
907 * gating still happens in the dpm code. We should
908 * revisit this when there is a cleaner line between
909 * the smc and the hw blocks
911 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
913 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
916 if (state == AMD_PG_STATE_GATE)
917 /* XXX do we need a vce_v4_0_stop()? */
920 return vce_v4_0_start(adev);
924 static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
925 struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
927 amdgpu_ring_write(ring, VCE_CMD_IB_VM);
928 amdgpu_ring_write(ring, vm_id);
929 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
930 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
931 amdgpu_ring_write(ring, ib->length_dw);
934 static void vce_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
935 u64 seq, unsigned flags)
937 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
939 amdgpu_ring_write(ring, VCE_CMD_FENCE);
940 amdgpu_ring_write(ring, addr);
941 amdgpu_ring_write(ring, upper_32_bits(addr));
942 amdgpu_ring_write(ring, seq);
943 amdgpu_ring_write(ring, VCE_CMD_TRAP);
946 static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
948 amdgpu_ring_write(ring, VCE_CMD_END);
951 static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
952 unsigned int vm_id, uint64_t pd_addr)
954 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
955 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
956 unsigned eng = ring->vm_inv_eng;
958 pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
959 pd_addr |= AMDGPU_PTE_VALID;
961 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
962 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
963 amdgpu_ring_write(ring, upper_32_bits(pd_addr));
965 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
966 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
967 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
969 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
970 amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
971 amdgpu_ring_write(ring, 0xffffffff);
972 amdgpu_ring_write(ring, lower_32_bits(pd_addr));
975 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
976 amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
977 amdgpu_ring_write(ring, req);
980 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
981 amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
982 amdgpu_ring_write(ring, 1 << vm_id);
983 amdgpu_ring_write(ring, 1 << vm_id);
986 static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
987 struct amdgpu_irq_src *source,
989 enum amdgpu_interrupt_state state)
993 if (state == AMDGPU_IRQ_STATE_ENABLE)
994 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
996 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
997 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
1001 static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
1002 struct amdgpu_irq_src *source,
1003 struct amdgpu_iv_entry *entry)
1005 DRM_DEBUG("IH: VCE\n");
1007 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_STATUS),
1008 VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
1009 ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
1011 switch (entry->src_data[0]) {
1015 amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
1018 DRM_ERROR("Unhandled interrupt: %d %d\n",
1019 entry->src_id, entry->src_data[0]);
1026 const struct amd_ip_funcs vce_v4_0_ip_funcs = {
1028 .early_init = vce_v4_0_early_init,
1030 .sw_init = vce_v4_0_sw_init,
1031 .sw_fini = vce_v4_0_sw_fini,
1032 .hw_init = vce_v4_0_hw_init,
1033 .hw_fini = vce_v4_0_hw_fini,
1034 .suspend = vce_v4_0_suspend,
1035 .resume = vce_v4_0_resume,
1036 .is_idle = NULL /* vce_v4_0_is_idle */,
1037 .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */,
1038 .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */,
1039 .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */,
1040 .soft_reset = NULL /* vce_v4_0_soft_reset */,
1041 .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
1042 .set_clockgating_state = vce_v4_0_set_clockgating_state,
1043 .set_powergating_state = NULL /* vce_v4_0_set_powergating_state */,
1046 static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
1047 .type = AMDGPU_RING_TYPE_VCE,
1049 .nop = VCE_CMD_NO_OP,
1050 .support_64bit_ptrs = false,
1051 .vmhub = AMDGPU_MMHUB,
1052 .get_rptr = vce_v4_0_ring_get_rptr,
1053 .get_wptr = vce_v4_0_ring_get_wptr,
1054 .set_wptr = vce_v4_0_ring_set_wptr,
1055 .parse_cs = amdgpu_vce_ring_parse_cs_vm,
1057 17 + /* vce_v4_0_emit_vm_flush */
1058 5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
1059 1, /* vce_v4_0_ring_insert_end */
1060 .emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
1061 .emit_ib = vce_v4_0_ring_emit_ib,
1062 .emit_vm_flush = vce_v4_0_emit_vm_flush,
1063 .emit_fence = vce_v4_0_ring_emit_fence,
1064 .test_ring = amdgpu_vce_ring_test_ring,
1065 .test_ib = amdgpu_vce_ring_test_ib,
1066 .insert_nop = amdgpu_ring_insert_nop,
1067 .insert_end = vce_v4_0_ring_insert_end,
1068 .pad_ib = amdgpu_ring_generic_pad_ib,
1069 .begin_use = amdgpu_vce_ring_begin_use,
1070 .end_use = amdgpu_vce_ring_end_use,
1073 static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
1077 for (i = 0; i < adev->vce.num_rings; i++)
1078 adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
1079 DRM_INFO("VCE enabled in VM mode\n");
1082 static const struct amdgpu_irq_src_funcs vce_v4_0_irq_funcs = {
1083 .set = vce_v4_0_set_interrupt_state,
1084 .process = vce_v4_0_process_interrupt,
1087 static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev)
1089 adev->vce.irq.num_types = 1;
1090 adev->vce.irq.funcs = &vce_v4_0_irq_funcs;
1093 const struct amdgpu_ip_block_version vce_v4_0_ip_block =
1095 .type = AMD_IP_BLOCK_TYPE_VCE,
1099 .funcs = &vce_v4_0_ip_funcs,