1 /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
14 #include <linux/types.h>
15 #include <linux/cpumask.h>
16 #include <linux/qcom_scm.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/of_reserved_mem.h>
19 #include <linux/soc/qcom/mdt_loader.h>
24 extern bool hang_debug;
25 static void a5xx_dump(struct msm_gpu *gpu);
29 #if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
31 static int zap_shader_load_mdt(struct device *dev, const char *fwname)
33 const struct firmware *fw;
36 void *mem_region = NULL;
39 /* Request the MDT file for the firmware */
40 ret = request_firmware(&fw, fwname, dev);
42 DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
46 /* Figure out how much memory we need */
47 mem_size = qcom_mdt_get_size(fw);
53 /* Allocate memory for the firmware image */
54 mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
60 /* Load the rest of the MDT */
61 ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID, mem_region, mem_phys,
66 /* Send the image to the secure world */
67 ret = qcom_scm_pas_auth_and_reset(GPU_PAS_ID);
69 DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
77 static int zap_shader_load_mdt(struct device *dev, const char *fwname)
83 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
84 struct msm_file_private *ctx)
86 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
87 struct msm_drm_private *priv = gpu->dev->dev_private;
88 struct msm_ringbuffer *ring = gpu->rb;
89 unsigned int i, ibs = 0;
91 for (i = 0; i < submit->nr_cmds; i++) {
92 switch (submit->cmd[i].type) {
93 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
95 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
96 if (priv->lastctx == ctx)
98 case MSM_SUBMIT_CMD_BUF:
99 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
100 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
101 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
102 OUT_RING(ring, submit->cmd[i].size);
108 OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
109 OUT_RING(ring, submit->fence->seqno);
111 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
112 OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
113 OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence)));
114 OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence)));
115 OUT_RING(ring, submit->fence->seqno);
117 gpu->funcs->flush(gpu);
125 static const struct a5xx_hwcg a530_hwcg[] = {
126 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
127 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
128 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
129 {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
130 {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
131 {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
132 {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
133 {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
134 {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
135 {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
136 {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
137 {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
138 {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
139 {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
140 {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
141 {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
142 {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
143 {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
144 {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
145 {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
146 {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
147 {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
148 {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
149 {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
150 {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
151 {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
152 {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
153 {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
154 {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
155 {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
156 {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
157 {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
158 {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
159 {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
160 {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
161 {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
162 {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
163 {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
164 {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
165 {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
166 {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
167 {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
168 {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
169 {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
170 {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
171 {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
172 {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
173 {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
174 {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
175 {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
176 {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
177 {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
178 {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
179 {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
180 {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
181 {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
182 {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
183 {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
184 {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
185 {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
186 {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
187 {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
188 {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
189 {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
190 {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
191 {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
192 {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
193 {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
194 {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
195 {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
196 {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
197 {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
198 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
199 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
200 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
201 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
202 {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
203 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
204 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
205 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
206 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
207 {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
208 {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
209 {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
210 {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
211 {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
212 {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
213 {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
214 {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
215 {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
216 {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
217 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
220 static const struct {
221 int (*test)(struct adreno_gpu *gpu);
222 const struct a5xx_hwcg *regs;
224 } a5xx_hwcg_regs[] = {
225 { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
228 static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
229 const struct a5xx_hwcg *regs, unsigned int count)
233 for (i = 0; i < count; i++)
234 gpu_write(gpu, regs[i].offset, regs[i].value);
236 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
237 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
240 static void a5xx_enable_hwcg(struct msm_gpu *gpu)
242 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
245 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
246 if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
247 _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
248 a5xx_hwcg_regs[i].count);
254 static int a5xx_me_init(struct msm_gpu *gpu)
256 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
257 struct msm_ringbuffer *ring = gpu->rb;
259 OUT_PKT7(ring, CP_ME_INIT, 8);
261 OUT_RING(ring, 0x0000002F);
263 /* Enable multiple hardware contexts */
264 OUT_RING(ring, 0x00000003);
266 /* Enable error detection */
267 OUT_RING(ring, 0x20000000);
269 /* Don't enable header dump */
270 OUT_RING(ring, 0x00000000);
271 OUT_RING(ring, 0x00000000);
273 /* Specify workarounds for various microcode issues */
274 if (adreno_is_a530(adreno_gpu)) {
275 /* Workaround for token end syncs
276 * Force a WFI after every direct-render 3D mode draw and every
279 OUT_RING(ring, 0x0000000B);
281 /* No workarounds enabled */
282 OUT_RING(ring, 0x00000000);
285 OUT_RING(ring, 0x00000000);
286 OUT_RING(ring, 0x00000000);
288 gpu->funcs->flush(gpu);
290 return a5xx_idle(gpu) ? 0 : -EINVAL;
293 static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
294 const struct firmware *fw, u64 *iova)
296 struct drm_device *drm = gpu->dev;
297 struct drm_gem_object *bo;
300 bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
304 ptr = msm_gem_get_vaddr(bo);
306 drm_gem_object_unreference(bo);
307 return ERR_PTR(-ENOMEM);
311 int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
314 drm_gem_object_unreference(bo);
319 memcpy(ptr, &fw->data[4], fw->size - 4);
321 msm_gem_put_vaddr(bo);
325 static int a5xx_ucode_init(struct msm_gpu *gpu)
327 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
328 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
331 if (!a5xx_gpu->pm4_bo) {
332 a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
333 &a5xx_gpu->pm4_iova);
335 if (IS_ERR(a5xx_gpu->pm4_bo)) {
336 ret = PTR_ERR(a5xx_gpu->pm4_bo);
337 a5xx_gpu->pm4_bo = NULL;
338 dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
344 if (!a5xx_gpu->pfp_bo) {
345 a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
346 &a5xx_gpu->pfp_iova);
348 if (IS_ERR(a5xx_gpu->pfp_bo)) {
349 ret = PTR_ERR(a5xx_gpu->pfp_bo);
350 a5xx_gpu->pfp_bo = NULL;
351 dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
357 gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
358 REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
360 gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
361 REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
366 #define SCM_GPU_ZAP_SHADER_RESUME 0
368 static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
372 ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
374 DRM_ERROR("%s: zap-shader resume failed: %d\n",
380 /* Set up a child device to "own" the zap shader */
381 static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
383 struct device_node *node;
389 /* Find the sub-node for the zap shader */
390 node = of_get_child_by_name(parent->of_node, "zap-shader");
392 DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
396 dev->parent = parent;
398 dev_set_name(dev, "adreno_zap_shader");
400 ret = device_register(dev);
402 DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
406 ret = of_reserved_mem_device_init(dev);
408 DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
409 device_unregister(dev);
419 static int a5xx_zap_shader_init(struct msm_gpu *gpu)
422 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
423 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
424 struct platform_device *pdev = a5xx_gpu->pdev;
428 * If the zap shader is already loaded into memory we just need to kick
429 * the remote processor to reinitialize it
432 return a5xx_zap_shader_resume(gpu);
434 /* We need SCM to be able to load the firmware */
435 if (!qcom_scm_is_available()) {
436 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
437 return -EPROBE_DEFER;
440 /* Each GPU has a target specific zap shader firmware name to use */
441 if (!adreno_gpu->info->zapfw) {
442 DRM_DEV_ERROR(&pdev->dev,
443 "Zap shader firmware file not specified for this target\n");
447 ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
450 ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
451 adreno_gpu->info->zapfw);
458 #define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
459 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
460 A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
461 A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
462 A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
463 A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
464 A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
465 A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
466 A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
467 A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
469 static int a5xx_hw_init(struct msm_gpu *gpu)
471 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
474 gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
476 /* Make all blocks contribute to the GPU BUSY perf counter */
477 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
479 /* Enable RBBM error reporting bits */
480 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
482 if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
484 * Mask out the activity signals from RB1-3 to avoid false
488 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
490 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
492 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
494 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
496 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
498 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
500 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
502 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
506 /* Enable fault detection */
507 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
510 /* Turn on performance counters */
511 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
513 /* Increase VFD cache access so LRZ and other data gets evicted less */
514 gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
516 /* Disable L2 bypass in the UCHE */
517 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
518 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
519 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
520 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
522 /* Set the GMEM VA range (0 to gpu->gmem) */
523 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
524 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
525 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
526 0x00100000 + adreno_gpu->gmem - 1);
527 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
529 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
530 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
531 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
532 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
534 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
536 if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
537 gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
539 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
541 /* Enable USE_RETENTION_FLOPS */
542 gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
544 /* Enable ME/PFP split notification */
545 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
548 a5xx_enable_hwcg(gpu);
550 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
552 /* Set the highest bank bit */
553 gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
554 gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
556 /* Protect registers from the CP */
557 gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
560 gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
561 gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
562 gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
563 gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
564 gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
565 gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
567 /* Content protect */
568 gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
569 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
571 gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
572 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
575 gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
576 gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
577 gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
578 gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
581 gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
582 gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
585 gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
586 gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
589 gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
591 if (adreno_is_a530(adreno_gpu))
592 gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
593 ADRENO_PROTECT_RW(0x10000, 0x8000));
595 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
597 * Disable the trusted memory range - we don't actually supported secure
598 * memory rendering at this point in time and we don't want to block off
599 * part of the virtual memory space.
601 gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
602 REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
603 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
605 /* Load the GPMU firmware before starting the HW init */
606 a5xx_gpmu_ucode_init(gpu);
608 ret = adreno_hw_init(gpu);
612 ret = a5xx_ucode_init(gpu);
616 /* Disable the interrupts through the initial bringup stage */
617 gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
619 /* Clear ME_HALT to start the micro engine */
620 gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
621 ret = a5xx_me_init(gpu);
625 ret = a5xx_power_init(gpu);
630 * Send a pipeline event stat to get misbehaving counters to start
633 if (adreno_is_a530(adreno_gpu)) {
634 OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1);
635 OUT_RING(gpu->rb, 0x0F);
637 gpu->funcs->flush(gpu);
643 * Try to load a zap shader into the secure world. If successful
644 * we can use the CP to switch out of secure mode. If not then we
645 * have no resource but to try to switch ourselves out manually. If we
646 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
647 * be blocked and a permissions violation will soon follow.
649 ret = a5xx_zap_shader_init(gpu);
651 OUT_PKT7(gpu->rb, CP_SET_SECURE_MODE, 1);
652 OUT_RING(gpu->rb, 0x00000000);
654 gpu->funcs->flush(gpu);
658 /* Print a warning so if we die, we know why */
659 dev_warn_once(gpu->dev->dev,
660 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
661 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
667 static void a5xx_recover(struct msm_gpu *gpu)
671 adreno_dump_info(gpu);
673 for (i = 0; i < 8; i++) {
674 printk("CP_SCRATCH_REG%d: %u\n", i,
675 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
681 gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
682 gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
683 gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
687 static void a5xx_destroy(struct msm_gpu *gpu)
689 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
690 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
692 DBG("%s", gpu->name);
694 if (a5xx_gpu->zap_dev.parent)
695 device_unregister(&a5xx_gpu->zap_dev);
697 if (a5xx_gpu->pm4_bo) {
698 if (a5xx_gpu->pm4_iova)
699 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
700 drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
703 if (a5xx_gpu->pfp_bo) {
704 if (a5xx_gpu->pfp_iova)
705 msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
706 drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
709 if (a5xx_gpu->gpmu_bo) {
710 if (a5xx_gpu->gpmu_iova)
711 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
712 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
715 adreno_gpu_cleanup(adreno_gpu);
719 static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
721 if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
725 * Nearly every abnormality ends up pausing the GPU and triggering a
726 * fault so we can safely just watch for this one interrupt to fire
728 return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
729 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
732 bool a5xx_idle(struct msm_gpu *gpu)
734 /* wait for CP to drain ringbuffer: */
735 if (!adreno_idle(gpu))
738 if (spin_until(_a5xx_check_idle(gpu))) {
739 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n",
740 gpu->name, __builtin_return_address(0),
741 gpu_read(gpu, REG_A5XX_RBBM_STATUS),
742 gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
750 static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
752 struct msm_gpu *gpu = arg;
753 pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
755 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
756 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
757 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
758 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
763 static void a5xx_cp_err_irq(struct msm_gpu *gpu)
765 u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
767 if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
770 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
773 * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
777 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
778 val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
780 dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
784 if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
785 dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
786 gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
788 if (status & A5XX_CP_INT_CP_DMA_ERROR)
789 dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
791 if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
792 u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
794 dev_err_ratelimited(gpu->dev->dev,
795 "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
796 val & (1 << 24) ? "WRITE" : "READ",
797 (val & 0xFFFFF) >> 2, val);
800 if (status & A5XX_CP_INT_CP_AHB_ERROR) {
801 u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
802 const char *access[16] = { "reserved", "reserved",
803 "timestamp lo", "timestamp hi", "pfp read", "pfp write",
804 "", "", "me read", "me write", "", "", "crashdump read",
807 dev_err_ratelimited(gpu->dev->dev,
808 "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
809 status & 0xFFFFF, access[(status >> 24) & 0xF],
810 (status & (1 << 31)), status);
814 static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
816 if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
817 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
819 dev_err_ratelimited(gpu->dev->dev,
820 "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
821 val & (1 << 28) ? "WRITE" : "READ",
822 (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
825 /* Clear the error */
826 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
828 /* Clear the interrupt */
829 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
830 A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
833 if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
834 dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
836 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
837 dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
838 gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
840 if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
841 dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
842 gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
844 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
845 dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
846 gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
848 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
849 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
851 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
852 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
855 static void a5xx_uche_err_irq(struct msm_gpu *gpu)
857 uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
859 addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
861 dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
865 static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
867 dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
870 #define RBBM_ERROR_MASK \
871 (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
872 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
873 A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
874 A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
875 A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
876 A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
878 static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
880 u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
883 * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
884 * before the source is cleared the interrupt will storm.
886 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
887 status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
889 /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
890 if (status & RBBM_ERROR_MASK)
891 a5xx_rbbm_err_irq(gpu, status);
893 if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
894 a5xx_cp_err_irq(gpu);
896 if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
897 a5xx_uche_err_irq(gpu);
899 if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
900 a5xx_gpmu_err_irq(gpu);
902 if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
908 static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
909 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
910 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
911 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
912 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
913 REG_A5XX_CP_RB_RPTR_ADDR_HI),
914 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
915 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
916 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
919 static const u32 a5xx_registers[] = {
920 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
921 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
922 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
923 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
924 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
925 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
926 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
927 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
928 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
929 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
930 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
931 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
932 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
933 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
934 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
935 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
936 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
937 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
938 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
939 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
940 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
941 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
942 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
943 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
944 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
945 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
946 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
950 static void a5xx_dump(struct msm_gpu *gpu)
952 dev_info(gpu->dev->dev, "status: %08x\n",
953 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
957 static int a5xx_pm_resume(struct msm_gpu *gpu)
961 /* Turn on the core power */
962 ret = msm_gpu_pm_resume(gpu);
966 /* Turn the RBCCU domain first to limit the chances of voltage droop */
967 gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
969 /* Wait 3 usecs before polling */
972 ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
973 (1 << 20), (1 << 20));
975 DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
977 gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
981 /* Turn on the SP domain */
982 gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
983 ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
984 (1 << 20), (1 << 20));
986 DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
992 static int a5xx_pm_suspend(struct msm_gpu *gpu)
994 /* Clear the VBIF pipe before shutting down */
995 gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
996 spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
998 gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
1001 * Reset the VBIF before power collapse to avoid issue with FIFO
1004 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
1005 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
1007 return msm_gpu_pm_suspend(gpu);
1010 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
1012 *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
1013 REG_A5XX_RBBM_PERFCTR_CP_0_HI);
1018 #ifdef CONFIG_DEBUG_FS
1019 static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
1021 seq_printf(m, "status: %08x\n",
1022 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
1023 adreno_show(gpu, m);
1027 static const struct adreno_gpu_funcs funcs = {
1029 .get_param = adreno_get_param,
1030 .hw_init = a5xx_hw_init,
1031 .pm_suspend = a5xx_pm_suspend,
1032 .pm_resume = a5xx_pm_resume,
1033 .recover = a5xx_recover,
1034 .last_fence = adreno_last_fence,
1035 .submit = a5xx_submit,
1036 .flush = adreno_flush,
1038 .destroy = a5xx_destroy,
1039 #ifdef CONFIG_DEBUG_FS
1043 .get_timestamp = a5xx_get_timestamp,
1046 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
1048 struct msm_drm_private *priv = dev->dev_private;
1049 struct platform_device *pdev = priv->gpu_pdev;
1050 struct a5xx_gpu *a5xx_gpu = NULL;
1051 struct adreno_gpu *adreno_gpu;
1052 struct msm_gpu *gpu;
1056 dev_err(dev->dev, "No A5XX device is defined\n");
1057 return ERR_PTR(-ENXIO);
1060 a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
1062 return ERR_PTR(-ENOMEM);
1064 adreno_gpu = &a5xx_gpu->base;
1065 gpu = &adreno_gpu->base;
1067 a5xx_gpu->pdev = pdev;
1068 adreno_gpu->registers = a5xx_registers;
1069 adreno_gpu->reg_offsets = a5xx_register_offsets;
1071 a5xx_gpu->lm_leakage = 0x4E001A;
1073 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
1075 a5xx_destroy(&(a5xx_gpu->base.base));
1076 return ERR_PTR(ret);
1080 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);