2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/module.h>
33 #include <drm/radeon_drm.h>
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
40 #include "radeon_ucode.h"
43 MODULE_FIRMWARE("radeon/R600_pfp.bin");
44 MODULE_FIRMWARE("radeon/R600_me.bin");
45 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
46 MODULE_FIRMWARE("radeon/RV610_me.bin");
47 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
48 MODULE_FIRMWARE("radeon/RV630_me.bin");
49 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
50 MODULE_FIRMWARE("radeon/RV620_me.bin");
51 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
52 MODULE_FIRMWARE("radeon/RV635_me.bin");
53 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
54 MODULE_FIRMWARE("radeon/RV670_me.bin");
55 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
56 MODULE_FIRMWARE("radeon/RS780_me.bin");
57 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV770_me.bin");
59 MODULE_FIRMWARE("radeon/RV770_smc.bin");
60 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV730_me.bin");
62 MODULE_FIRMWARE("radeon/RV730_smc.bin");
63 MODULE_FIRMWARE("radeon/RV740_smc.bin");
64 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65 MODULE_FIRMWARE("radeon/RV710_me.bin");
66 MODULE_FIRMWARE("radeon/RV710_smc.bin");
67 MODULE_FIRMWARE("radeon/R600_rlc.bin");
68 MODULE_FIRMWARE("radeon/R700_rlc.bin");
69 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
70 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
71 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
72 MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
73 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
74 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
75 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
76 MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
77 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
78 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
79 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
81 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
82 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
85 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
86 MODULE_FIRMWARE("radeon/PALM_me.bin");
87 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
88 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
89 MODULE_FIRMWARE("radeon/SUMO_me.bin");
90 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
91 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
93 static const u32 crtc_offsets[2] =
96 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
99 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
101 /* r600,rv610,rv630,rv620,rv635,rv670 */
102 int r600_mc_wait_for_idle(struct radeon_device *rdev);
103 static void r600_gpu_init(struct radeon_device *rdev);
104 void r600_fini(struct radeon_device *rdev);
105 void r600_irq_disable(struct radeon_device *rdev);
106 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
107 extern int evergreen_rlc_resume(struct radeon_device *rdev);
110 * r600_get_xclk - get the xclk
112 * @rdev: radeon_device pointer
114 * Returns the reference clock used by the gfx engine
115 * (r6xx, IGPs, APUs).
117 u32 r600_get_xclk(struct radeon_device *rdev)
119 return rdev->clock.spll.reference_freq;
122 int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
127 /* get temperature in millidegrees */
128 int rv6xx_get_temp(struct radeon_device *rdev)
130 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
132 int actual_temp = temp & 0xff;
137 return actual_temp * 1000;
140 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
144 rdev->pm.dynpm_can_upclock = true;
145 rdev->pm.dynpm_can_downclock = true;
147 /* power state array is low to high, default is first */
148 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
149 int min_power_state_index = 0;
151 if (rdev->pm.num_power_states > 2)
152 min_power_state_index = 1;
154 switch (rdev->pm.dynpm_planned_action) {
155 case DYNPM_ACTION_MINIMUM:
156 rdev->pm.requested_power_state_index = min_power_state_index;
157 rdev->pm.requested_clock_mode_index = 0;
158 rdev->pm.dynpm_can_downclock = false;
160 case DYNPM_ACTION_DOWNCLOCK:
161 if (rdev->pm.current_power_state_index == min_power_state_index) {
162 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
163 rdev->pm.dynpm_can_downclock = false;
165 if (rdev->pm.active_crtc_count > 1) {
166 for (i = 0; i < rdev->pm.num_power_states; i++) {
167 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
169 else if (i >= rdev->pm.current_power_state_index) {
170 rdev->pm.requested_power_state_index =
171 rdev->pm.current_power_state_index;
174 rdev->pm.requested_power_state_index = i;
179 if (rdev->pm.current_power_state_index == 0)
180 rdev->pm.requested_power_state_index =
181 rdev->pm.num_power_states - 1;
183 rdev->pm.requested_power_state_index =
184 rdev->pm.current_power_state_index - 1;
187 rdev->pm.requested_clock_mode_index = 0;
188 /* don't use the power state if crtcs are active and no display flag is set */
189 if ((rdev->pm.active_crtc_count > 0) &&
190 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
191 clock_info[rdev->pm.requested_clock_mode_index].flags &
192 RADEON_PM_MODE_NO_DISPLAY)) {
193 rdev->pm.requested_power_state_index++;
196 case DYNPM_ACTION_UPCLOCK:
197 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
198 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
199 rdev->pm.dynpm_can_upclock = false;
201 if (rdev->pm.active_crtc_count > 1) {
202 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
203 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
205 else if (i <= rdev->pm.current_power_state_index) {
206 rdev->pm.requested_power_state_index =
207 rdev->pm.current_power_state_index;
210 rdev->pm.requested_power_state_index = i;
215 rdev->pm.requested_power_state_index =
216 rdev->pm.current_power_state_index + 1;
218 rdev->pm.requested_clock_mode_index = 0;
220 case DYNPM_ACTION_DEFAULT:
221 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
222 rdev->pm.requested_clock_mode_index = 0;
223 rdev->pm.dynpm_can_upclock = false;
225 case DYNPM_ACTION_NONE:
227 DRM_ERROR("Requested mode for not defined action\n");
231 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
232 /* for now just select the first power state and switch between clock modes */
233 /* power state array is low to high, default is first (0) */
234 if (rdev->pm.active_crtc_count > 1) {
235 rdev->pm.requested_power_state_index = -1;
236 /* start at 1 as we don't want the default mode */
237 for (i = 1; i < rdev->pm.num_power_states; i++) {
238 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
240 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
241 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
242 rdev->pm.requested_power_state_index = i;
246 /* if nothing selected, grab the default state. */
247 if (rdev->pm.requested_power_state_index == -1)
248 rdev->pm.requested_power_state_index = 0;
250 rdev->pm.requested_power_state_index = 1;
252 switch (rdev->pm.dynpm_planned_action) {
253 case DYNPM_ACTION_MINIMUM:
254 rdev->pm.requested_clock_mode_index = 0;
255 rdev->pm.dynpm_can_downclock = false;
257 case DYNPM_ACTION_DOWNCLOCK:
258 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
259 if (rdev->pm.current_clock_mode_index == 0) {
260 rdev->pm.requested_clock_mode_index = 0;
261 rdev->pm.dynpm_can_downclock = false;
263 rdev->pm.requested_clock_mode_index =
264 rdev->pm.current_clock_mode_index - 1;
266 rdev->pm.requested_clock_mode_index = 0;
267 rdev->pm.dynpm_can_downclock = false;
269 /* don't use the power state if crtcs are active and no display flag is set */
270 if ((rdev->pm.active_crtc_count > 0) &&
271 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
272 clock_info[rdev->pm.requested_clock_mode_index].flags &
273 RADEON_PM_MODE_NO_DISPLAY)) {
274 rdev->pm.requested_clock_mode_index++;
277 case DYNPM_ACTION_UPCLOCK:
278 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
279 if (rdev->pm.current_clock_mode_index ==
280 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
281 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
282 rdev->pm.dynpm_can_upclock = false;
284 rdev->pm.requested_clock_mode_index =
285 rdev->pm.current_clock_mode_index + 1;
287 rdev->pm.requested_clock_mode_index =
288 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
289 rdev->pm.dynpm_can_upclock = false;
292 case DYNPM_ACTION_DEFAULT:
293 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
294 rdev->pm.requested_clock_mode_index = 0;
295 rdev->pm.dynpm_can_upclock = false;
297 case DYNPM_ACTION_NONE:
299 DRM_ERROR("Requested mode for not defined action\n");
304 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
305 rdev->pm.power_state[rdev->pm.requested_power_state_index].
306 clock_info[rdev->pm.requested_clock_mode_index].sclk,
307 rdev->pm.power_state[rdev->pm.requested_power_state_index].
308 clock_info[rdev->pm.requested_clock_mode_index].mclk,
309 rdev->pm.power_state[rdev->pm.requested_power_state_index].
313 void rs780_pm_init_profile(struct radeon_device *rdev)
315 if (rdev->pm.num_power_states == 2) {
317 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
318 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
319 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
320 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
322 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
323 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
325 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
327 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
328 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
330 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
332 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
333 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
334 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
335 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
337 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
338 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
339 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
340 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
342 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
343 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
344 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
345 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
347 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
348 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
349 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
350 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
351 } else if (rdev->pm.num_power_states == 3) {
353 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
354 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
355 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
356 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
358 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
359 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
360 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
361 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
363 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
364 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
365 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
366 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
368 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
369 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
370 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
371 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
373 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
374 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
375 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
376 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
378 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
379 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
380 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
381 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
383 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
384 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
385 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
386 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
389 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
390 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
391 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
392 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
394 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
395 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
396 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
397 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
399 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
400 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
401 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
402 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
404 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
405 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
406 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
407 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
409 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
410 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
411 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
412 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
414 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
415 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
416 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
417 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
419 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
420 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
421 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
422 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
426 void r600_pm_init_profile(struct radeon_device *rdev)
430 if (rdev->family == CHIP_R600) {
433 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
434 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
435 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
436 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
438 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
439 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
440 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
441 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
443 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
444 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
445 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
446 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
448 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
449 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
450 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
451 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
453 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
454 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
455 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
456 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
458 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
459 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
460 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
461 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
463 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
464 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
465 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
466 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
468 if (rdev->pm.num_power_states < 4) {
470 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
471 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
472 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
473 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
475 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
476 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
477 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
478 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
480 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
481 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
482 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
483 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
485 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
486 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
487 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
488 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
490 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
491 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
492 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
493 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
495 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
496 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
497 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
498 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
500 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
501 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
502 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
503 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
506 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
507 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
508 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
509 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
511 if (rdev->flags & RADEON_IS_MOBILITY)
512 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
514 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
515 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
516 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
517 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
518 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
520 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
521 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
522 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
523 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
525 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
526 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
527 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
528 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
529 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
531 if (rdev->flags & RADEON_IS_MOBILITY)
532 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
534 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
535 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
536 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
537 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
538 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
540 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
541 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
542 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
543 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
545 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
546 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
547 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
548 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
549 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
554 void r600_pm_misc(struct radeon_device *rdev)
556 int req_ps_idx = rdev->pm.requested_power_state_index;
557 int req_cm_idx = rdev->pm.requested_clock_mode_index;
558 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
559 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
561 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
562 /* 0xff01 is a flag rather then an actual voltage */
563 if (voltage->voltage == 0xff01)
565 if (voltage->voltage != rdev->pm.current_vddc) {
566 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
567 rdev->pm.current_vddc = voltage->voltage;
568 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
573 bool r600_gui_idle(struct radeon_device *rdev)
575 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
581 /* hpd for digital panel detect/disconnect */
582 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
584 bool connected = false;
586 if (ASIC_IS_DCE3(rdev)) {
589 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
593 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
597 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
601 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
606 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
610 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
619 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
623 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
627 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
637 void r600_hpd_set_polarity(struct radeon_device *rdev,
638 enum radeon_hpd_id hpd)
641 bool connected = r600_hpd_sense(rdev, hpd);
643 if (ASIC_IS_DCE3(rdev)) {
646 tmp = RREG32(DC_HPD1_INT_CONTROL);
648 tmp &= ~DC_HPDx_INT_POLARITY;
650 tmp |= DC_HPDx_INT_POLARITY;
651 WREG32(DC_HPD1_INT_CONTROL, tmp);
654 tmp = RREG32(DC_HPD2_INT_CONTROL);
656 tmp &= ~DC_HPDx_INT_POLARITY;
658 tmp |= DC_HPDx_INT_POLARITY;
659 WREG32(DC_HPD2_INT_CONTROL, tmp);
662 tmp = RREG32(DC_HPD3_INT_CONTROL);
664 tmp &= ~DC_HPDx_INT_POLARITY;
666 tmp |= DC_HPDx_INT_POLARITY;
667 WREG32(DC_HPD3_INT_CONTROL, tmp);
670 tmp = RREG32(DC_HPD4_INT_CONTROL);
672 tmp &= ~DC_HPDx_INT_POLARITY;
674 tmp |= DC_HPDx_INT_POLARITY;
675 WREG32(DC_HPD4_INT_CONTROL, tmp);
678 tmp = RREG32(DC_HPD5_INT_CONTROL);
680 tmp &= ~DC_HPDx_INT_POLARITY;
682 tmp |= DC_HPDx_INT_POLARITY;
683 WREG32(DC_HPD5_INT_CONTROL, tmp);
687 tmp = RREG32(DC_HPD6_INT_CONTROL);
689 tmp &= ~DC_HPDx_INT_POLARITY;
691 tmp |= DC_HPDx_INT_POLARITY;
692 WREG32(DC_HPD6_INT_CONTROL, tmp);
700 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
702 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
704 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
705 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
708 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
710 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
712 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
713 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
716 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
718 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
720 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
721 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
729 void r600_hpd_init(struct radeon_device *rdev)
731 struct drm_device *dev = rdev->ddev;
732 struct drm_connector *connector;
735 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
736 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
738 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
739 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
740 /* don't try to enable hpd on eDP or LVDS avoid breaking the
741 * aux dp channel on imac and help (but not completely fix)
742 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
746 if (ASIC_IS_DCE3(rdev)) {
747 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
748 if (ASIC_IS_DCE32(rdev))
751 switch (radeon_connector->hpd.hpd) {
753 WREG32(DC_HPD1_CONTROL, tmp);
756 WREG32(DC_HPD2_CONTROL, tmp);
759 WREG32(DC_HPD3_CONTROL, tmp);
762 WREG32(DC_HPD4_CONTROL, tmp);
766 WREG32(DC_HPD5_CONTROL, tmp);
769 WREG32(DC_HPD6_CONTROL, tmp);
775 switch (radeon_connector->hpd.hpd) {
777 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
780 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
783 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
789 enable |= 1 << radeon_connector->hpd.hpd;
790 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
792 radeon_irq_kms_enable_hpd(rdev, enable);
795 void r600_hpd_fini(struct radeon_device *rdev)
797 struct drm_device *dev = rdev->ddev;
798 struct drm_connector *connector;
799 unsigned disable = 0;
801 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
802 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
803 if (ASIC_IS_DCE3(rdev)) {
804 switch (radeon_connector->hpd.hpd) {
806 WREG32(DC_HPD1_CONTROL, 0);
809 WREG32(DC_HPD2_CONTROL, 0);
812 WREG32(DC_HPD3_CONTROL, 0);
815 WREG32(DC_HPD4_CONTROL, 0);
819 WREG32(DC_HPD5_CONTROL, 0);
822 WREG32(DC_HPD6_CONTROL, 0);
828 switch (radeon_connector->hpd.hpd) {
830 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
833 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
836 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
842 disable |= 1 << radeon_connector->hpd.hpd;
844 radeon_irq_kms_disable_hpd(rdev, disable);
850 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
855 /* flush hdp cache so updates hit vram */
856 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
857 !(rdev->flags & RADEON_IS_AGP)) {
858 void __iomem *ptr = (void *)rdev->gart.ptr;
861 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
862 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
863 * This seems to cause problems on some AGP cards. Just use the old
866 WREG32(HDP_DEBUG1, 0);
867 tmp = readl((void __iomem *)ptr);
869 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
871 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
872 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
873 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
874 for (i = 0; i < rdev->usec_timeout; i++) {
876 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
877 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
879 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
889 int r600_pcie_gart_init(struct radeon_device *rdev)
893 if (rdev->gart.robj) {
894 WARN(1, "R600 PCIE GART already initialized\n");
897 /* Initialize common gart structure */
898 r = radeon_gart_init(rdev);
901 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
902 return radeon_gart_table_vram_alloc(rdev);
905 static int r600_pcie_gart_enable(struct radeon_device *rdev)
910 if (rdev->gart.robj == NULL) {
911 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
914 r = radeon_gart_table_vram_pin(rdev);
917 radeon_gart_restore(rdev);
920 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
921 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
922 EFFECTIVE_L2_QUEUE_SIZE(7));
923 WREG32(VM_L2_CNTL2, 0);
924 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
925 /* Setup TLB control */
926 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
927 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
928 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
929 ENABLE_WAIT_L2_QUERY;
930 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
931 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
932 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
933 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
934 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
935 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
936 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
937 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
938 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
939 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
940 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
941 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
942 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
943 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
944 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
945 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
946 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
947 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
948 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
949 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
950 (u32)(rdev->dummy_page.addr >> 12));
951 for (i = 1; i < 7; i++)
952 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
954 r600_pcie_gart_tlb_flush(rdev);
955 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
956 (unsigned)(rdev->mc.gtt_size >> 20),
957 (unsigned long long)rdev->gart.table_addr);
958 rdev->gart.ready = true;
962 static void r600_pcie_gart_disable(struct radeon_device *rdev)
967 /* Disable all tables */
968 for (i = 0; i < 7; i++)
969 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
971 /* Disable L2 cache */
972 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
973 EFFECTIVE_L2_QUEUE_SIZE(7));
974 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
975 /* Setup L1 TLB control */
976 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
977 ENABLE_WAIT_L2_QUERY;
978 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
979 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
980 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
981 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
982 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
983 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
984 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
985 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
986 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
987 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
988 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
989 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
990 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
991 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
992 radeon_gart_table_vram_unpin(rdev);
995 static void r600_pcie_gart_fini(struct radeon_device *rdev)
997 radeon_gart_fini(rdev);
998 r600_pcie_gart_disable(rdev);
999 radeon_gart_table_vram_free(rdev);
1002 static void r600_agp_enable(struct radeon_device *rdev)
1007 /* Setup L2 cache */
1008 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1009 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1010 EFFECTIVE_L2_QUEUE_SIZE(7));
1011 WREG32(VM_L2_CNTL2, 0);
1012 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1013 /* Setup TLB control */
1014 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1015 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1016 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1017 ENABLE_WAIT_L2_QUERY;
1018 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1019 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1020 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1021 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1022 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1023 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1024 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1025 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1026 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1027 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1028 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1029 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1030 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1031 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1032 for (i = 0; i < 7; i++)
1033 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1036 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1041 for (i = 0; i < rdev->usec_timeout; i++) {
1042 /* read MC_STATUS */
1043 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1051 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1053 unsigned long flags;
1056 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1057 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1058 r = RREG32(R_0028FC_MC_DATA);
1059 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1060 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1064 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1066 unsigned long flags;
1068 spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1069 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1070 S_0028F8_MC_IND_WR_EN(1));
1071 WREG32(R_0028FC_MC_DATA, v);
1072 WREG32(R_0028F8_MC_INDEX, 0x7F);
1073 spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1076 static void r600_mc_program(struct radeon_device *rdev)
1078 struct rv515_mc_save save;
1082 /* Initialize HDP */
1083 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1084 WREG32((0x2c14 + j), 0x00000000);
1085 WREG32((0x2c18 + j), 0x00000000);
1086 WREG32((0x2c1c + j), 0x00000000);
1087 WREG32((0x2c20 + j), 0x00000000);
1088 WREG32((0x2c24 + j), 0x00000000);
1090 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1092 rv515_mc_stop(rdev, &save);
1093 if (r600_mc_wait_for_idle(rdev)) {
1094 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1096 /* Lockout access through VGA aperture (doesn't exist before R600) */
1097 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1098 /* Update configuration */
1099 if (rdev->flags & RADEON_IS_AGP) {
1100 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1101 /* VRAM before AGP */
1102 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1103 rdev->mc.vram_start >> 12);
1104 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1105 rdev->mc.gtt_end >> 12);
1107 /* VRAM after AGP */
1108 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1109 rdev->mc.gtt_start >> 12);
1110 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1111 rdev->mc.vram_end >> 12);
1114 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1115 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1117 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1118 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1119 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1120 WREG32(MC_VM_FB_LOCATION, tmp);
1121 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1122 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1123 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1124 if (rdev->flags & RADEON_IS_AGP) {
1125 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1126 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1127 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1129 WREG32(MC_VM_AGP_BASE, 0);
1130 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1131 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1133 if (r600_mc_wait_for_idle(rdev)) {
1134 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1136 rv515_mc_resume(rdev, &save);
1137 /* we need to own VRAM, so turn off the VGA renderer here
1138 * to stop it overwriting our objects */
1139 rv515_vga_render_disable(rdev);
1143 * r600_vram_gtt_location - try to find VRAM & GTT location
1144 * @rdev: radeon device structure holding all necessary informations
1145 * @mc: memory controller structure holding memory informations
1147 * Function will place try to place VRAM at same place as in CPU (PCI)
1148 * address space as some GPU seems to have issue when we reprogram at
1149 * different address space.
1151 * If there is not enough space to fit the unvisible VRAM after the
1152 * aperture then we limit the VRAM size to the aperture.
1154 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1155 * them to be in one from GPU point of view so that we can program GPU to
1156 * catch access outside them (weird GPU policy see ??).
1158 * This function will never fails, worst case are limiting VRAM or GTT.
1160 * Note: GTT start, end, size should be initialized before calling this
1161 * function on AGP platform.
1163 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1165 u64 size_bf, size_af;
1167 if (mc->mc_vram_size > 0xE0000000) {
1168 /* leave room for at least 512M GTT */
1169 dev_warn(rdev->dev, "limiting VRAM\n");
1170 mc->real_vram_size = 0xE0000000;
1171 mc->mc_vram_size = 0xE0000000;
1173 if (rdev->flags & RADEON_IS_AGP) {
1174 size_bf = mc->gtt_start;
1175 size_af = mc->mc_mask - mc->gtt_end;
1176 if (size_bf > size_af) {
1177 if (mc->mc_vram_size > size_bf) {
1178 dev_warn(rdev->dev, "limiting VRAM\n");
1179 mc->real_vram_size = size_bf;
1180 mc->mc_vram_size = size_bf;
1182 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1184 if (mc->mc_vram_size > size_af) {
1185 dev_warn(rdev->dev, "limiting VRAM\n");
1186 mc->real_vram_size = size_af;
1187 mc->mc_vram_size = size_af;
1189 mc->vram_start = mc->gtt_end + 1;
1191 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1192 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1193 mc->mc_vram_size >> 20, mc->vram_start,
1194 mc->vram_end, mc->real_vram_size >> 20);
1197 if (rdev->flags & RADEON_IS_IGP) {
1198 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1201 radeon_vram_location(rdev, &rdev->mc, base);
1202 rdev->mc.gtt_base_align = 0;
1203 radeon_gtt_location(rdev, mc);
1207 static int r600_mc_init(struct radeon_device *rdev)
1210 int chansize, numchan;
1211 uint32_t h_addr, l_addr;
1212 unsigned long long k8_addr;
1214 /* Get VRAM informations */
1215 rdev->mc.vram_is_ddr = true;
1216 tmp = RREG32(RAMCFG);
1217 if (tmp & CHANSIZE_OVERRIDE) {
1219 } else if (tmp & CHANSIZE_MASK) {
1224 tmp = RREG32(CHMAP);
1225 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1240 rdev->mc.vram_width = numchan * chansize;
1241 /* Could aper size report 0 ? */
1242 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1243 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1244 /* Setup GPU memory space */
1245 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1246 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1247 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1248 r600_vram_gtt_location(rdev, &rdev->mc);
1250 if (rdev->flags & RADEON_IS_IGP) {
1251 rs690_pm_info(rdev);
1252 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1254 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1255 /* Use K8 direct mapping for fast fb access. */
1256 rdev->fastfb_working = false;
1257 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1258 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1259 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1260 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1261 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1264 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1265 * memory is present.
1267 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1268 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1269 (unsigned long long)rdev->mc.aper_base, k8_addr);
1270 rdev->mc.aper_base = (resource_size_t)k8_addr;
1271 rdev->fastfb_working = true;
1277 radeon_update_bandwidth_info(rdev);
1281 int r600_vram_scratch_init(struct radeon_device *rdev)
1285 if (rdev->vram_scratch.robj == NULL) {
1286 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1287 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1288 NULL, &rdev->vram_scratch.robj);
1294 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1295 if (unlikely(r != 0))
1297 r = radeon_bo_pin(rdev->vram_scratch.robj,
1298 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1300 radeon_bo_unreserve(rdev->vram_scratch.robj);
1303 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1304 (void **)&rdev->vram_scratch.ptr);
1306 radeon_bo_unpin(rdev->vram_scratch.robj);
1307 radeon_bo_unreserve(rdev->vram_scratch.robj);
1312 void r600_vram_scratch_fini(struct radeon_device *rdev)
1316 if (rdev->vram_scratch.robj == NULL) {
1319 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1320 if (likely(r == 0)) {
1321 radeon_bo_kunmap(rdev->vram_scratch.robj);
1322 radeon_bo_unpin(rdev->vram_scratch.robj);
1323 radeon_bo_unreserve(rdev->vram_scratch.robj);
1325 radeon_bo_unref(&rdev->vram_scratch.robj);
1328 void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1330 u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1333 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1335 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1337 WREG32(R600_BIOS_3_SCRATCH, tmp);
1340 static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1342 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1343 RREG32(R_008010_GRBM_STATUS));
1344 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1345 RREG32(R_008014_GRBM_STATUS2));
1346 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1347 RREG32(R_000E50_SRBM_STATUS));
1348 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1349 RREG32(CP_STALLED_STAT1));
1350 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1351 RREG32(CP_STALLED_STAT2));
1352 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1353 RREG32(CP_BUSY_STAT));
1354 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1356 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1357 RREG32(DMA_STATUS_REG));
1360 static bool r600_is_display_hung(struct radeon_device *rdev)
1366 for (i = 0; i < rdev->num_crtc; i++) {
1367 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1368 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1369 crtc_hung |= (1 << i);
1373 for (j = 0; j < 10; j++) {
1374 for (i = 0; i < rdev->num_crtc; i++) {
1375 if (crtc_hung & (1 << i)) {
1376 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1377 if (tmp != crtc_status[i])
1378 crtc_hung &= ~(1 << i);
1389 u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1395 tmp = RREG32(R_008010_GRBM_STATUS);
1396 if (rdev->family >= CHIP_RV770) {
1397 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1398 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1399 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1400 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1401 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1402 reset_mask |= RADEON_RESET_GFX;
1404 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1405 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1406 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1407 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1408 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1409 reset_mask |= RADEON_RESET_GFX;
1412 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1413 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1414 reset_mask |= RADEON_RESET_CP;
1416 if (G_008010_GRBM_EE_BUSY(tmp))
1417 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1419 /* DMA_STATUS_REG */
1420 tmp = RREG32(DMA_STATUS_REG);
1421 if (!(tmp & DMA_IDLE))
1422 reset_mask |= RADEON_RESET_DMA;
1425 tmp = RREG32(R_000E50_SRBM_STATUS);
1426 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1427 reset_mask |= RADEON_RESET_RLC;
1429 if (G_000E50_IH_BUSY(tmp))
1430 reset_mask |= RADEON_RESET_IH;
1432 if (G_000E50_SEM_BUSY(tmp))
1433 reset_mask |= RADEON_RESET_SEM;
1435 if (G_000E50_GRBM_RQ_PENDING(tmp))
1436 reset_mask |= RADEON_RESET_GRBM;
1438 if (G_000E50_VMC_BUSY(tmp))
1439 reset_mask |= RADEON_RESET_VMC;
1441 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1442 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1443 G_000E50_MCDW_BUSY(tmp))
1444 reset_mask |= RADEON_RESET_MC;
1446 if (r600_is_display_hung(rdev))
1447 reset_mask |= RADEON_RESET_DISPLAY;
1449 /* Skip MC reset as it's mostly likely not hung, just busy */
1450 if (reset_mask & RADEON_RESET_MC) {
1451 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1452 reset_mask &= ~RADEON_RESET_MC;
1458 static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1460 struct rv515_mc_save save;
1461 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1464 if (reset_mask == 0)
1467 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1469 r600_print_gpu_status_regs(rdev);
1471 /* Disable CP parsing/prefetching */
1472 if (rdev->family >= CHIP_RV770)
1473 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1475 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1477 /* disable the RLC */
1478 WREG32(RLC_CNTL, 0);
1480 if (reset_mask & RADEON_RESET_DMA) {
1482 tmp = RREG32(DMA_RB_CNTL);
1483 tmp &= ~DMA_RB_ENABLE;
1484 WREG32(DMA_RB_CNTL, tmp);
1489 rv515_mc_stop(rdev, &save);
1490 if (r600_mc_wait_for_idle(rdev)) {
1491 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1494 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1495 if (rdev->family >= CHIP_RV770)
1496 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1497 S_008020_SOFT_RESET_CB(1) |
1498 S_008020_SOFT_RESET_PA(1) |
1499 S_008020_SOFT_RESET_SC(1) |
1500 S_008020_SOFT_RESET_SPI(1) |
1501 S_008020_SOFT_RESET_SX(1) |
1502 S_008020_SOFT_RESET_SH(1) |
1503 S_008020_SOFT_RESET_TC(1) |
1504 S_008020_SOFT_RESET_TA(1) |
1505 S_008020_SOFT_RESET_VC(1) |
1506 S_008020_SOFT_RESET_VGT(1);
1508 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1509 S_008020_SOFT_RESET_DB(1) |
1510 S_008020_SOFT_RESET_CB(1) |
1511 S_008020_SOFT_RESET_PA(1) |
1512 S_008020_SOFT_RESET_SC(1) |
1513 S_008020_SOFT_RESET_SMX(1) |
1514 S_008020_SOFT_RESET_SPI(1) |
1515 S_008020_SOFT_RESET_SX(1) |
1516 S_008020_SOFT_RESET_SH(1) |
1517 S_008020_SOFT_RESET_TC(1) |
1518 S_008020_SOFT_RESET_TA(1) |
1519 S_008020_SOFT_RESET_VC(1) |
1520 S_008020_SOFT_RESET_VGT(1);
1523 if (reset_mask & RADEON_RESET_CP) {
1524 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1525 S_008020_SOFT_RESET_VGT(1);
1527 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1530 if (reset_mask & RADEON_RESET_DMA) {
1531 if (rdev->family >= CHIP_RV770)
1532 srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1534 srbm_soft_reset |= SOFT_RESET_DMA;
1537 if (reset_mask & RADEON_RESET_RLC)
1538 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1540 if (reset_mask & RADEON_RESET_SEM)
1541 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1543 if (reset_mask & RADEON_RESET_IH)
1544 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1546 if (reset_mask & RADEON_RESET_GRBM)
1547 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1549 if (!(rdev->flags & RADEON_IS_IGP)) {
1550 if (reset_mask & RADEON_RESET_MC)
1551 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1554 if (reset_mask & RADEON_RESET_VMC)
1555 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1557 if (grbm_soft_reset) {
1558 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1559 tmp |= grbm_soft_reset;
1560 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1561 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1562 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1566 tmp &= ~grbm_soft_reset;
1567 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1568 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1571 if (srbm_soft_reset) {
1572 tmp = RREG32(SRBM_SOFT_RESET);
1573 tmp |= srbm_soft_reset;
1574 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1575 WREG32(SRBM_SOFT_RESET, tmp);
1576 tmp = RREG32(SRBM_SOFT_RESET);
1580 tmp &= ~srbm_soft_reset;
1581 WREG32(SRBM_SOFT_RESET, tmp);
1582 tmp = RREG32(SRBM_SOFT_RESET);
1585 /* Wait a little for things to settle down */
1588 rv515_mc_resume(rdev, &save);
1591 r600_print_gpu_status_regs(rdev);
1594 int r600_asic_reset(struct radeon_device *rdev)
1598 reset_mask = r600_gpu_check_soft_reset(rdev);
1601 r600_set_bios_scratch_engine_hung(rdev, true);
1603 r600_gpu_soft_reset(rdev, reset_mask);
1605 reset_mask = r600_gpu_check_soft_reset(rdev);
1608 r600_set_bios_scratch_engine_hung(rdev, false);
1614 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1616 * @rdev: radeon_device pointer
1617 * @ring: radeon_ring structure holding ring information
1619 * Check if the GFX engine is locked up.
1620 * Returns true if the engine appears to be locked up, false if not.
1622 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1624 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1626 if (!(reset_mask & (RADEON_RESET_GFX |
1627 RADEON_RESET_COMPUTE |
1628 RADEON_RESET_CP))) {
1629 radeon_ring_lockup_update(ring);
1632 /* force CP activities */
1633 radeon_ring_force_activity(rdev, ring);
1634 return radeon_ring_test_lockup(rdev, ring);
1637 u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1638 u32 tiling_pipe_num,
1640 u32 total_max_rb_num,
1641 u32 disabled_rb_mask)
1643 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1644 u32 pipe_rb_ratio, pipe_rb_remain, tmp;
1645 u32 data = 0, mask = 1 << (max_rb_num - 1);
1648 /* mask out the RBs that don't exist on that asic */
1649 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1650 /* make sure at least one RB is available */
1651 if ((tmp & 0xff) != 0xff)
1652 disabled_rb_mask = tmp;
1654 rendering_pipe_num = 1 << tiling_pipe_num;
1655 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1656 BUG_ON(rendering_pipe_num < req_rb_num);
1658 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1659 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1661 if (rdev->family <= CHIP_RV740) {
1669 for (i = 0; i < max_rb_num; i++) {
1670 if (!(mask & disabled_rb_mask)) {
1671 for (j = 0; j < pipe_rb_ratio; j++) {
1672 data <<= rb_num_width;
1673 data |= max_rb_num - i - 1;
1675 if (pipe_rb_remain) {
1676 data <<= rb_num_width;
1677 data |= max_rb_num - i - 1;
1687 int r600_count_pipe_bits(uint32_t val)
1689 return hweight32(val);
1692 static void r600_gpu_init(struct radeon_device *rdev)
1696 u32 cc_rb_backend_disable;
1697 u32 cc_gc_shader_pipe_config;
1701 u32 sq_gpr_resource_mgmt_1 = 0;
1702 u32 sq_gpr_resource_mgmt_2 = 0;
1703 u32 sq_thread_resource_mgmt = 0;
1704 u32 sq_stack_resource_mgmt_1 = 0;
1705 u32 sq_stack_resource_mgmt_2 = 0;
1706 u32 disabled_rb_mask;
1708 rdev->config.r600.tiling_group_size = 256;
1709 switch (rdev->family) {
1711 rdev->config.r600.max_pipes = 4;
1712 rdev->config.r600.max_tile_pipes = 8;
1713 rdev->config.r600.max_simds = 4;
1714 rdev->config.r600.max_backends = 4;
1715 rdev->config.r600.max_gprs = 256;
1716 rdev->config.r600.max_threads = 192;
1717 rdev->config.r600.max_stack_entries = 256;
1718 rdev->config.r600.max_hw_contexts = 8;
1719 rdev->config.r600.max_gs_threads = 16;
1720 rdev->config.r600.sx_max_export_size = 128;
1721 rdev->config.r600.sx_max_export_pos_size = 16;
1722 rdev->config.r600.sx_max_export_smx_size = 128;
1723 rdev->config.r600.sq_num_cf_insts = 2;
1727 rdev->config.r600.max_pipes = 2;
1728 rdev->config.r600.max_tile_pipes = 2;
1729 rdev->config.r600.max_simds = 3;
1730 rdev->config.r600.max_backends = 1;
1731 rdev->config.r600.max_gprs = 128;
1732 rdev->config.r600.max_threads = 192;
1733 rdev->config.r600.max_stack_entries = 128;
1734 rdev->config.r600.max_hw_contexts = 8;
1735 rdev->config.r600.max_gs_threads = 4;
1736 rdev->config.r600.sx_max_export_size = 128;
1737 rdev->config.r600.sx_max_export_pos_size = 16;
1738 rdev->config.r600.sx_max_export_smx_size = 128;
1739 rdev->config.r600.sq_num_cf_insts = 2;
1745 rdev->config.r600.max_pipes = 1;
1746 rdev->config.r600.max_tile_pipes = 1;
1747 rdev->config.r600.max_simds = 2;
1748 rdev->config.r600.max_backends = 1;
1749 rdev->config.r600.max_gprs = 128;
1750 rdev->config.r600.max_threads = 192;
1751 rdev->config.r600.max_stack_entries = 128;
1752 rdev->config.r600.max_hw_contexts = 4;
1753 rdev->config.r600.max_gs_threads = 4;
1754 rdev->config.r600.sx_max_export_size = 128;
1755 rdev->config.r600.sx_max_export_pos_size = 16;
1756 rdev->config.r600.sx_max_export_smx_size = 128;
1757 rdev->config.r600.sq_num_cf_insts = 1;
1760 rdev->config.r600.max_pipes = 4;
1761 rdev->config.r600.max_tile_pipes = 4;
1762 rdev->config.r600.max_simds = 4;
1763 rdev->config.r600.max_backends = 4;
1764 rdev->config.r600.max_gprs = 192;
1765 rdev->config.r600.max_threads = 192;
1766 rdev->config.r600.max_stack_entries = 256;
1767 rdev->config.r600.max_hw_contexts = 8;
1768 rdev->config.r600.max_gs_threads = 16;
1769 rdev->config.r600.sx_max_export_size = 128;
1770 rdev->config.r600.sx_max_export_pos_size = 16;
1771 rdev->config.r600.sx_max_export_smx_size = 128;
1772 rdev->config.r600.sq_num_cf_insts = 2;
1778 /* Initialize HDP */
1779 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1780 WREG32((0x2c14 + j), 0x00000000);
1781 WREG32((0x2c18 + j), 0x00000000);
1782 WREG32((0x2c1c + j), 0x00000000);
1783 WREG32((0x2c20 + j), 0x00000000);
1784 WREG32((0x2c24 + j), 0x00000000);
1787 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1791 ramcfg = RREG32(RAMCFG);
1792 switch (rdev->config.r600.max_tile_pipes) {
1794 tiling_config |= PIPE_TILING(0);
1797 tiling_config |= PIPE_TILING(1);
1800 tiling_config |= PIPE_TILING(2);
1803 tiling_config |= PIPE_TILING(3);
1808 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1809 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1810 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1811 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1813 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1815 tiling_config |= ROW_TILING(3);
1816 tiling_config |= SAMPLE_SPLIT(3);
1818 tiling_config |= ROW_TILING(tmp);
1819 tiling_config |= SAMPLE_SPLIT(tmp);
1821 tiling_config |= BANK_SWAPS(1);
1823 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1824 tmp = R6XX_MAX_BACKENDS -
1825 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1826 if (tmp < rdev->config.r600.max_backends) {
1827 rdev->config.r600.max_backends = tmp;
1830 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1831 tmp = R6XX_MAX_PIPES -
1832 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1833 if (tmp < rdev->config.r600.max_pipes) {
1834 rdev->config.r600.max_pipes = tmp;
1836 tmp = R6XX_MAX_SIMDS -
1837 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1838 if (tmp < rdev->config.r600.max_simds) {
1839 rdev->config.r600.max_simds = tmp;
1842 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1843 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1844 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1845 R6XX_MAX_BACKENDS, disabled_rb_mask);
1846 tiling_config |= tmp << 16;
1847 rdev->config.r600.backend_map = tmp;
1849 rdev->config.r600.tile_config = tiling_config;
1850 WREG32(GB_TILING_CONFIG, tiling_config);
1851 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1852 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1853 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
1855 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1856 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1857 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1859 /* Setup some CP states */
1860 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1861 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1863 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1864 SYNC_WALKER | SYNC_ALIGNER));
1865 /* Setup various GPU states */
1866 if (rdev->family == CHIP_RV670)
1867 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1869 tmp = RREG32(SX_DEBUG_1);
1870 tmp |= SMX_EVENT_RELEASE;
1871 if ((rdev->family > CHIP_R600))
1872 tmp |= ENABLE_NEW_SMX_ADDRESS;
1873 WREG32(SX_DEBUG_1, tmp);
1875 if (((rdev->family) == CHIP_R600) ||
1876 ((rdev->family) == CHIP_RV630) ||
1877 ((rdev->family) == CHIP_RV610) ||
1878 ((rdev->family) == CHIP_RV620) ||
1879 ((rdev->family) == CHIP_RS780) ||
1880 ((rdev->family) == CHIP_RS880)) {
1881 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1883 WREG32(DB_DEBUG, 0);
1885 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1886 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1888 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1889 WREG32(VGT_NUM_INSTANCES, 0);
1891 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1892 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1894 tmp = RREG32(SQ_MS_FIFO_SIZES);
1895 if (((rdev->family) == CHIP_RV610) ||
1896 ((rdev->family) == CHIP_RV620) ||
1897 ((rdev->family) == CHIP_RS780) ||
1898 ((rdev->family) == CHIP_RS880)) {
1899 tmp = (CACHE_FIFO_SIZE(0xa) |
1900 FETCH_FIFO_HIWATER(0xa) |
1901 DONE_FIFO_HIWATER(0xe0) |
1902 ALU_UPDATE_FIFO_HIWATER(0x8));
1903 } else if (((rdev->family) == CHIP_R600) ||
1904 ((rdev->family) == CHIP_RV630)) {
1905 tmp &= ~DONE_FIFO_HIWATER(0xff);
1906 tmp |= DONE_FIFO_HIWATER(0x4);
1908 WREG32(SQ_MS_FIFO_SIZES, tmp);
1910 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1911 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1913 sq_config = RREG32(SQ_CONFIG);
1914 sq_config &= ~(PS_PRIO(3) |
1918 sq_config |= (DX9_CONSTS |
1925 if ((rdev->family) == CHIP_R600) {
1926 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1928 NUM_CLAUSE_TEMP_GPRS(4));
1929 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1931 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1932 NUM_VS_THREADS(48) |
1935 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1936 NUM_VS_STACK_ENTRIES(128));
1937 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1938 NUM_ES_STACK_ENTRIES(0));
1939 } else if (((rdev->family) == CHIP_RV610) ||
1940 ((rdev->family) == CHIP_RV620) ||
1941 ((rdev->family) == CHIP_RS780) ||
1942 ((rdev->family) == CHIP_RS880)) {
1943 /* no vertex cache */
1944 sq_config &= ~VC_ENABLE;
1946 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1948 NUM_CLAUSE_TEMP_GPRS(2));
1949 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1951 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1952 NUM_VS_THREADS(78) |
1954 NUM_ES_THREADS(31));
1955 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1956 NUM_VS_STACK_ENTRIES(40));
1957 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1958 NUM_ES_STACK_ENTRIES(16));
1959 } else if (((rdev->family) == CHIP_RV630) ||
1960 ((rdev->family) == CHIP_RV635)) {
1961 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1963 NUM_CLAUSE_TEMP_GPRS(2));
1964 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1966 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1967 NUM_VS_THREADS(78) |
1969 NUM_ES_THREADS(31));
1970 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1971 NUM_VS_STACK_ENTRIES(40));
1972 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1973 NUM_ES_STACK_ENTRIES(16));
1974 } else if ((rdev->family) == CHIP_RV670) {
1975 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1977 NUM_CLAUSE_TEMP_GPRS(2));
1978 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1980 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1981 NUM_VS_THREADS(78) |
1983 NUM_ES_THREADS(31));
1984 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1985 NUM_VS_STACK_ENTRIES(64));
1986 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1987 NUM_ES_STACK_ENTRIES(64));
1990 WREG32(SQ_CONFIG, sq_config);
1991 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1992 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1993 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1994 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1995 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1997 if (((rdev->family) == CHIP_RV610) ||
1998 ((rdev->family) == CHIP_RV620) ||
1999 ((rdev->family) == CHIP_RS780) ||
2000 ((rdev->family) == CHIP_RS880)) {
2001 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2003 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2006 /* More default values. 2D/3D driver should adjust as needed */
2007 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2008 S1_X(0x4) | S1_Y(0xc)));
2009 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2010 S1_X(0x2) | S1_Y(0x2) |
2011 S2_X(0xa) | S2_Y(0x6) |
2012 S3_X(0x6) | S3_Y(0xa)));
2013 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2014 S1_X(0x4) | S1_Y(0xc) |
2015 S2_X(0x1) | S2_Y(0x6) |
2016 S3_X(0xa) | S3_Y(0xe)));
2017 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2018 S5_X(0x0) | S5_Y(0x0) |
2019 S6_X(0xb) | S6_Y(0x4) |
2020 S7_X(0x7) | S7_Y(0x8)));
2022 WREG32(VGT_STRMOUT_EN, 0);
2023 tmp = rdev->config.r600.max_pipes * 16;
2024 switch (rdev->family) {
2040 WREG32(VGT_ES_PER_GS, 128);
2041 WREG32(VGT_GS_PER_ES, tmp);
2042 WREG32(VGT_GS_PER_VS, 2);
2043 WREG32(VGT_GS_VERTEX_REUSE, 16);
2045 /* more default values. 2D/3D driver should adjust as needed */
2046 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2047 WREG32(VGT_STRMOUT_EN, 0);
2049 WREG32(PA_SC_MODE_CNTL, 0);
2050 WREG32(PA_SC_AA_CONFIG, 0);
2051 WREG32(PA_SC_LINE_STIPPLE, 0);
2052 WREG32(SPI_INPUT_Z, 0);
2053 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2054 WREG32(CB_COLOR7_FRAG, 0);
2056 /* Clear render buffer base addresses */
2057 WREG32(CB_COLOR0_BASE, 0);
2058 WREG32(CB_COLOR1_BASE, 0);
2059 WREG32(CB_COLOR2_BASE, 0);
2060 WREG32(CB_COLOR3_BASE, 0);
2061 WREG32(CB_COLOR4_BASE, 0);
2062 WREG32(CB_COLOR5_BASE, 0);
2063 WREG32(CB_COLOR6_BASE, 0);
2064 WREG32(CB_COLOR7_BASE, 0);
2065 WREG32(CB_COLOR7_FRAG, 0);
2067 switch (rdev->family) {
2072 tmp = TC_L2_SIZE(8);
2076 tmp = TC_L2_SIZE(4);
2079 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2082 tmp = TC_L2_SIZE(0);
2085 WREG32(TC_CNTL, tmp);
2087 tmp = RREG32(HDP_HOST_PATH_CNTL);
2088 WREG32(HDP_HOST_PATH_CNTL, tmp);
2090 tmp = RREG32(ARB_POP);
2091 tmp |= ENABLE_TC128;
2092 WREG32(ARB_POP, tmp);
2094 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2095 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2097 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
2098 WREG32(VC_ENHANCE, 0);
2103 * Indirect registers accessor
2105 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2107 unsigned long flags;
2110 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2111 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2112 (void)RREG32(PCIE_PORT_INDEX);
2113 r = RREG32(PCIE_PORT_DATA);
2114 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2118 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2120 unsigned long flags;
2122 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2123 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2124 (void)RREG32(PCIE_PORT_INDEX);
2125 WREG32(PCIE_PORT_DATA, (v));
2126 (void)RREG32(PCIE_PORT_DATA);
2127 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2133 void r600_cp_stop(struct radeon_device *rdev)
2135 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2136 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
2137 WREG32(SCRATCH_UMSK, 0);
2138 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2141 int r600_init_microcode(struct radeon_device *rdev)
2143 const char *chip_name;
2144 const char *rlc_chip_name;
2145 const char *smc_chip_name = "RV770";
2146 size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
2152 switch (rdev->family) {
2155 rlc_chip_name = "R600";
2158 chip_name = "RV610";
2159 rlc_chip_name = "R600";
2162 chip_name = "RV630";
2163 rlc_chip_name = "R600";
2166 chip_name = "RV620";
2167 rlc_chip_name = "R600";
2170 chip_name = "RV635";
2171 rlc_chip_name = "R600";
2174 chip_name = "RV670";
2175 rlc_chip_name = "R600";
2179 chip_name = "RS780";
2180 rlc_chip_name = "R600";
2183 chip_name = "RV770";
2184 rlc_chip_name = "R700";
2185 smc_chip_name = "RV770";
2186 smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
2189 chip_name = "RV730";
2190 rlc_chip_name = "R700";
2191 smc_chip_name = "RV730";
2192 smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
2195 chip_name = "RV710";
2196 rlc_chip_name = "R700";
2197 smc_chip_name = "RV710";
2198 smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
2201 chip_name = "RV730";
2202 rlc_chip_name = "R700";
2203 smc_chip_name = "RV740";
2204 smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2207 chip_name = "CEDAR";
2208 rlc_chip_name = "CEDAR";
2209 smc_chip_name = "CEDAR";
2210 smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
2213 chip_name = "REDWOOD";
2214 rlc_chip_name = "REDWOOD";
2215 smc_chip_name = "REDWOOD";
2216 smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
2219 chip_name = "JUNIPER";
2220 rlc_chip_name = "JUNIPER";
2221 smc_chip_name = "JUNIPER";
2222 smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
2226 chip_name = "CYPRESS";
2227 rlc_chip_name = "CYPRESS";
2228 smc_chip_name = "CYPRESS";
2229 smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
2233 rlc_chip_name = "SUMO";
2237 rlc_chip_name = "SUMO";
2240 chip_name = "SUMO2";
2241 rlc_chip_name = "SUMO";
2246 if (rdev->family >= CHIP_CEDAR) {
2247 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2248 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2249 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2250 } else if (rdev->family >= CHIP_RV770) {
2251 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2252 me_req_size = R700_PM4_UCODE_SIZE * 4;
2253 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2255 pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2256 me_req_size = R600_PM4_UCODE_SIZE * 12;
2257 rlc_req_size = R600_RLC_UCODE_SIZE * 4;
2260 DRM_INFO("Loading %s Microcode\n", chip_name);
2262 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2263 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2266 if (rdev->pfp_fw->size != pfp_req_size) {
2268 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2269 rdev->pfp_fw->size, fw_name);
2274 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2275 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2278 if (rdev->me_fw->size != me_req_size) {
2280 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2281 rdev->me_fw->size, fw_name);
2285 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2286 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2289 if (rdev->rlc_fw->size != rlc_req_size) {
2291 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2292 rdev->rlc_fw->size, fw_name);
2296 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2297 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2298 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2301 "smc: error loading firmware \"%s\"\n",
2303 release_firmware(rdev->smc_fw);
2304 rdev->smc_fw = NULL;
2306 } else if (rdev->smc_fw->size != smc_req_size) {
2308 "smc: Bogus length %zu in firmware \"%s\"\n",
2309 rdev->smc_fw->size, fw_name);
2318 "r600_cp: Failed to load firmware \"%s\"\n",
2320 release_firmware(rdev->pfp_fw);
2321 rdev->pfp_fw = NULL;
2322 release_firmware(rdev->me_fw);
2324 release_firmware(rdev->rlc_fw);
2325 rdev->rlc_fw = NULL;
2326 release_firmware(rdev->smc_fw);
2327 rdev->smc_fw = NULL;
2332 static int r600_cp_load_microcode(struct radeon_device *rdev)
2334 const __be32 *fw_data;
2337 if (!rdev->me_fw || !rdev->pfp_fw)
2346 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2349 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2350 RREG32(GRBM_SOFT_RESET);
2352 WREG32(GRBM_SOFT_RESET, 0);
2354 WREG32(CP_ME_RAM_WADDR, 0);
2356 fw_data = (const __be32 *)rdev->me_fw->data;
2357 WREG32(CP_ME_RAM_WADDR, 0);
2358 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
2359 WREG32(CP_ME_RAM_DATA,
2360 be32_to_cpup(fw_data++));
2362 fw_data = (const __be32 *)rdev->pfp_fw->data;
2363 WREG32(CP_PFP_UCODE_ADDR, 0);
2364 for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
2365 WREG32(CP_PFP_UCODE_DATA,
2366 be32_to_cpup(fw_data++));
2368 WREG32(CP_PFP_UCODE_ADDR, 0);
2369 WREG32(CP_ME_RAM_WADDR, 0);
2370 WREG32(CP_ME_RAM_RADDR, 0);
2374 int r600_cp_start(struct radeon_device *rdev)
2376 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2380 r = radeon_ring_lock(rdev, ring, 7);
2382 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2385 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2386 radeon_ring_write(ring, 0x1);
2387 if (rdev->family >= CHIP_RV770) {
2388 radeon_ring_write(ring, 0x0);
2389 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2391 radeon_ring_write(ring, 0x3);
2392 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2394 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2395 radeon_ring_write(ring, 0);
2396 radeon_ring_write(ring, 0);
2397 radeon_ring_unlock_commit(rdev, ring);
2400 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2404 int r600_cp_resume(struct radeon_device *rdev)
2406 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2412 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2413 RREG32(GRBM_SOFT_RESET);
2415 WREG32(GRBM_SOFT_RESET, 0);
2417 /* Set ring buffer size */
2418 rb_bufsz = order_base_2(ring->ring_size / 8);
2419 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2421 tmp |= BUF_SWAP_32BIT;
2423 WREG32(CP_RB_CNTL, tmp);
2424 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2426 /* Set the write pointer delay */
2427 WREG32(CP_RB_WPTR_DELAY, 0);
2429 /* Initialize the ring buffer's read and write pointers */
2430 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2431 WREG32(CP_RB_RPTR_WR, 0);
2433 WREG32(CP_RB_WPTR, ring->wptr);
2435 /* set the wb address whether it's enabled or not */
2436 WREG32(CP_RB_RPTR_ADDR,
2437 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2438 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2439 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2441 if (rdev->wb.enabled)
2442 WREG32(SCRATCH_UMSK, 0xff);
2444 tmp |= RB_NO_UPDATE;
2445 WREG32(SCRATCH_UMSK, 0);
2449 WREG32(CP_RB_CNTL, tmp);
2451 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2452 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2454 ring->rptr = RREG32(CP_RB_RPTR);
2456 r600_cp_start(rdev);
2458 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2460 ring->ready = false;
2466 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2471 /* Align ring size */
2472 rb_bufsz = order_base_2(ring_size / 8);
2473 ring_size = (1 << (rb_bufsz + 1)) * 4;
2474 ring->ring_size = ring_size;
2475 ring->align_mask = 16 - 1;
2477 if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2478 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2480 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2481 ring->rptr_save_reg = 0;
2486 void r600_cp_fini(struct radeon_device *rdev)
2488 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2490 radeon_ring_fini(rdev, ring);
2491 radeon_scratch_free(rdev, ring->rptr_save_reg);
2495 * GPU scratch registers helpers function.
2497 void r600_scratch_init(struct radeon_device *rdev)
2501 rdev->scratch.num_reg = 7;
2502 rdev->scratch.reg_base = SCRATCH_REG0;
2503 for (i = 0; i < rdev->scratch.num_reg; i++) {
2504 rdev->scratch.free[i] = true;
2505 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2509 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2516 r = radeon_scratch_get(rdev, &scratch);
2518 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2521 WREG32(scratch, 0xCAFEDEAD);
2522 r = radeon_ring_lock(rdev, ring, 3);
2524 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2525 radeon_scratch_free(rdev, scratch);
2528 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2529 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2530 radeon_ring_write(ring, 0xDEADBEEF);
2531 radeon_ring_unlock_commit(rdev, ring);
2532 for (i = 0; i < rdev->usec_timeout; i++) {
2533 tmp = RREG32(scratch);
2534 if (tmp == 0xDEADBEEF)
2538 if (i < rdev->usec_timeout) {
2539 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2541 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2542 ring->idx, scratch, tmp);
2545 radeon_scratch_free(rdev, scratch);
2550 * CP fences/semaphores
2553 void r600_fence_ring_emit(struct radeon_device *rdev,
2554 struct radeon_fence *fence)
2556 struct radeon_ring *ring = &rdev->ring[fence->ring];
2558 if (rdev->wb.use_event) {
2559 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2560 /* flush read cache over gart */
2561 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2562 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2563 PACKET3_VC_ACTION_ENA |
2564 PACKET3_SH_ACTION_ENA);
2565 radeon_ring_write(ring, 0xFFFFFFFF);
2566 radeon_ring_write(ring, 0);
2567 radeon_ring_write(ring, 10); /* poll interval */
2568 /* EVENT_WRITE_EOP - flush caches, send int */
2569 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2570 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2571 radeon_ring_write(ring, addr & 0xffffffff);
2572 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2573 radeon_ring_write(ring, fence->seq);
2574 radeon_ring_write(ring, 0);
2576 /* flush read cache over gart */
2577 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2578 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2579 PACKET3_VC_ACTION_ENA |
2580 PACKET3_SH_ACTION_ENA);
2581 radeon_ring_write(ring, 0xFFFFFFFF);
2582 radeon_ring_write(ring, 0);
2583 radeon_ring_write(ring, 10); /* poll interval */
2584 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2585 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2586 /* wait for 3D idle clean */
2587 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2588 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2589 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2590 /* Emit fence sequence & fire IRQ */
2591 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2592 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2593 radeon_ring_write(ring, fence->seq);
2594 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2595 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2596 radeon_ring_write(ring, RB_INT_STAT);
2600 void r600_semaphore_ring_emit(struct radeon_device *rdev,
2601 struct radeon_ring *ring,
2602 struct radeon_semaphore *semaphore,
2605 uint64_t addr = semaphore->gpu_addr;
2606 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2608 if (rdev->family < CHIP_CAYMAN)
2609 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2611 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2612 radeon_ring_write(ring, addr & 0xffffffff);
2613 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2617 * r600_copy_cpdma - copy pages using the CP DMA engine
2619 * @rdev: radeon_device pointer
2620 * @src_offset: src GPU address
2621 * @dst_offset: dst GPU address
2622 * @num_gpu_pages: number of GPU pages to xfer
2623 * @fence: radeon fence object
2625 * Copy GPU paging using the CP DMA engine (r6xx+).
2626 * Used by the radeon ttm implementation to move pages if
2627 * registered as the asic copy callback.
2629 int r600_copy_cpdma(struct radeon_device *rdev,
2630 uint64_t src_offset, uint64_t dst_offset,
2631 unsigned num_gpu_pages,
2632 struct radeon_fence **fence)
2634 struct radeon_semaphore *sem = NULL;
2635 int ring_index = rdev->asic->copy.blit_ring_index;
2636 struct radeon_ring *ring = &rdev->ring[ring_index];
2637 u32 size_in_bytes, cur_size_in_bytes, tmp;
2641 r = radeon_semaphore_create(rdev, &sem);
2643 DRM_ERROR("radeon: moving bo (%d).\n", r);
2647 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2648 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2649 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
2651 DRM_ERROR("radeon: moving bo (%d).\n", r);
2652 radeon_semaphore_free(rdev, &sem, NULL);
2656 if (radeon_fence_need_sync(*fence, ring->idx)) {
2657 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
2659 radeon_fence_note_sync(*fence, ring->idx);
2661 radeon_semaphore_free(rdev, &sem, NULL);
2664 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2665 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2666 radeon_ring_write(ring, WAIT_3D_IDLE_bit);
2667 for (i = 0; i < num_loops; i++) {
2668 cur_size_in_bytes = size_in_bytes;
2669 if (cur_size_in_bytes > 0x1fffff)
2670 cur_size_in_bytes = 0x1fffff;
2671 size_in_bytes -= cur_size_in_bytes;
2672 tmp = upper_32_bits(src_offset) & 0xff;
2673 if (size_in_bytes == 0)
2674 tmp |= PACKET3_CP_DMA_CP_SYNC;
2675 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
2676 radeon_ring_write(ring, src_offset & 0xffffffff);
2677 radeon_ring_write(ring, tmp);
2678 radeon_ring_write(ring, dst_offset & 0xffffffff);
2679 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
2680 radeon_ring_write(ring, cur_size_in_bytes);
2681 src_offset += cur_size_in_bytes;
2682 dst_offset += cur_size_in_bytes;
2684 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2685 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2686 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
2688 r = radeon_fence_emit(rdev, fence, ring->idx);
2690 radeon_ring_unlock_undo(rdev, ring);
2694 radeon_ring_unlock_commit(rdev, ring);
2695 radeon_semaphore_free(rdev, &sem, *fence);
2700 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2701 uint32_t tiling_flags, uint32_t pitch,
2702 uint32_t offset, uint32_t obj_size)
2704 /* FIXME: implement */
2708 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2710 /* FIXME: implement */
2713 static int r600_startup(struct radeon_device *rdev)
2715 struct radeon_ring *ring;
2718 /* enable pcie gen2 link */
2719 r600_pcie_gen2_enable(rdev);
2721 /* scratch needs to be initialized before MC */
2722 r = r600_vram_scratch_init(rdev);
2726 r600_mc_program(rdev);
2728 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2729 r = r600_init_microcode(rdev);
2731 DRM_ERROR("Failed to load firmware!\n");
2736 if (rdev->flags & RADEON_IS_AGP) {
2737 r600_agp_enable(rdev);
2739 r = r600_pcie_gart_enable(rdev);
2743 r600_gpu_init(rdev);
2745 /* allocate wb buffer */
2746 r = radeon_wb_init(rdev);
2750 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2752 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2756 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2758 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2763 if (!rdev->irq.installed) {
2764 r = radeon_irq_kms_init(rdev);
2769 r = r600_irq_init(rdev);
2771 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2772 radeon_irq_kms_fini(rdev);
2777 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2778 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2779 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
2784 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2785 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2786 DMA_RB_RPTR, DMA_RB_WPTR,
2787 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2791 r = r600_cp_load_microcode(rdev);
2794 r = r600_cp_resume(rdev);
2798 r = r600_dma_resume(rdev);
2802 r = radeon_ib_pool_init(rdev);
2804 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2808 r = r600_audio_init(rdev);
2810 DRM_ERROR("radeon: audio init failed\n");
2817 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2821 temp = RREG32(CONFIG_CNTL);
2822 if (state == false) {
2828 WREG32(CONFIG_CNTL, temp);
2831 int r600_resume(struct radeon_device *rdev)
2835 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2836 * posting will perform necessary task to bring back GPU into good
2840 atom_asic_init(rdev->mode_info.atom_context);
2842 rdev->accel_working = true;
2843 r = r600_startup(rdev);
2845 DRM_ERROR("r600 startup failed on resume\n");
2846 rdev->accel_working = false;
2853 int r600_suspend(struct radeon_device *rdev)
2855 r600_audio_fini(rdev);
2857 r600_dma_stop(rdev);
2858 r600_irq_suspend(rdev);
2859 radeon_wb_disable(rdev);
2860 r600_pcie_gart_disable(rdev);
2865 /* Plan is to move initialization in that function and use
2866 * helper function so that radeon_device_init pretty much
2867 * do nothing more than calling asic specific function. This
2868 * should also allow to remove a bunch of callback function
2871 int r600_init(struct radeon_device *rdev)
2875 if (r600_debugfs_mc_info_init(rdev)) {
2876 DRM_ERROR("Failed to register debugfs file for mc !\n");
2879 if (!radeon_get_bios(rdev)) {
2880 if (ASIC_IS_AVIVO(rdev))
2883 /* Must be an ATOMBIOS */
2884 if (!rdev->is_atom_bios) {
2885 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2888 r = radeon_atombios_init(rdev);
2891 /* Post card if necessary */
2892 if (!radeon_card_posted(rdev)) {
2894 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2897 DRM_INFO("GPU not posted. posting now...\n");
2898 atom_asic_init(rdev->mode_info.atom_context);
2900 /* Initialize scratch registers */
2901 r600_scratch_init(rdev);
2902 /* Initialize surface registers */
2903 radeon_surface_init(rdev);
2904 /* Initialize clocks */
2905 radeon_get_clock_info(rdev->ddev);
2907 r = radeon_fence_driver_init(rdev);
2910 if (rdev->flags & RADEON_IS_AGP) {
2911 r = radeon_agp_init(rdev);
2913 radeon_agp_disable(rdev);
2915 r = r600_mc_init(rdev);
2918 /* Memory manager */
2919 r = radeon_bo_init(rdev);
2923 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2924 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2926 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
2927 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
2929 rdev->ih.ring_obj = NULL;
2930 r600_ih_ring_init(rdev, 64 * 1024);
2932 r = r600_pcie_gart_init(rdev);
2936 rdev->accel_working = true;
2937 r = r600_startup(rdev);
2939 dev_err(rdev->dev, "disabling GPU acceleration\n");
2941 r600_dma_fini(rdev);
2942 r600_irq_fini(rdev);
2943 radeon_wb_fini(rdev);
2944 radeon_ib_pool_fini(rdev);
2945 radeon_irq_kms_fini(rdev);
2946 r600_pcie_gart_fini(rdev);
2947 rdev->accel_working = false;
2953 void r600_fini(struct radeon_device *rdev)
2955 r600_audio_fini(rdev);
2957 r600_dma_fini(rdev);
2958 r600_irq_fini(rdev);
2959 radeon_wb_fini(rdev);
2960 radeon_ib_pool_fini(rdev);
2961 radeon_irq_kms_fini(rdev);
2962 r600_pcie_gart_fini(rdev);
2963 r600_vram_scratch_fini(rdev);
2964 radeon_agp_fini(rdev);
2965 radeon_gem_fini(rdev);
2966 radeon_fence_driver_fini(rdev);
2967 radeon_bo_fini(rdev);
2968 radeon_atombios_fini(rdev);
2977 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2979 struct radeon_ring *ring = &rdev->ring[ib->ring];
2982 if (ring->rptr_save_reg) {
2983 next_rptr = ring->wptr + 3 + 4;
2984 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2985 radeon_ring_write(ring, ((ring->rptr_save_reg -
2986 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2987 radeon_ring_write(ring, next_rptr);
2988 } else if (rdev->wb.enabled) {
2989 next_rptr = ring->wptr + 5 + 4;
2990 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
2991 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2992 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
2993 radeon_ring_write(ring, next_rptr);
2994 radeon_ring_write(ring, 0);
2997 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2998 radeon_ring_write(ring,
3002 (ib->gpu_addr & 0xFFFFFFFC));
3003 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3004 radeon_ring_write(ring, ib->length_dw);
3007 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3009 struct radeon_ib ib;
3015 r = radeon_scratch_get(rdev, &scratch);
3017 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3020 WREG32(scratch, 0xCAFEDEAD);
3021 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3023 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3026 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3027 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3028 ib.ptr[2] = 0xDEADBEEF;
3030 r = radeon_ib_schedule(rdev, &ib, NULL);
3032 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3035 r = radeon_fence_wait(ib.fence, false);
3037 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3040 for (i = 0; i < rdev->usec_timeout; i++) {
3041 tmp = RREG32(scratch);
3042 if (tmp == 0xDEADBEEF)
3046 if (i < rdev->usec_timeout) {
3047 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3049 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3054 radeon_ib_free(rdev, &ib);
3056 radeon_scratch_free(rdev, scratch);
3063 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3064 * the same as the CP ring buffer, but in reverse. Rather than the CPU
3065 * writing to the ring and the GPU consuming, the GPU writes to the ring
3066 * and host consumes. As the host irq handler processes interrupts, it
3067 * increments the rptr. When the rptr catches up with the wptr, all the
3068 * current interrupts have been processed.
3071 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3075 /* Align ring size */
3076 rb_bufsz = order_base_2(ring_size / 4);
3077 ring_size = (1 << rb_bufsz) * 4;
3078 rdev->ih.ring_size = ring_size;
3079 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3083 int r600_ih_ring_alloc(struct radeon_device *rdev)
3087 /* Allocate ring buffer */
3088 if (rdev->ih.ring_obj == NULL) {
3089 r = radeon_bo_create(rdev, rdev->ih.ring_size,
3091 RADEON_GEM_DOMAIN_GTT,
3092 NULL, &rdev->ih.ring_obj);
3094 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3097 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3098 if (unlikely(r != 0))
3100 r = radeon_bo_pin(rdev->ih.ring_obj,
3101 RADEON_GEM_DOMAIN_GTT,
3102 &rdev->ih.gpu_addr);
3104 radeon_bo_unreserve(rdev->ih.ring_obj);
3105 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3108 r = radeon_bo_kmap(rdev->ih.ring_obj,
3109 (void **)&rdev->ih.ring);
3110 radeon_bo_unreserve(rdev->ih.ring_obj);
3112 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3119 void r600_ih_ring_fini(struct radeon_device *rdev)
3122 if (rdev->ih.ring_obj) {
3123 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3124 if (likely(r == 0)) {
3125 radeon_bo_kunmap(rdev->ih.ring_obj);
3126 radeon_bo_unpin(rdev->ih.ring_obj);
3127 radeon_bo_unreserve(rdev->ih.ring_obj);
3129 radeon_bo_unref(&rdev->ih.ring_obj);
3130 rdev->ih.ring = NULL;
3131 rdev->ih.ring_obj = NULL;
3135 void r600_rlc_stop(struct radeon_device *rdev)
3138 if ((rdev->family >= CHIP_RV770) &&
3139 (rdev->family <= CHIP_RV740)) {
3140 /* r7xx asics need to soft reset RLC before halting */
3141 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3142 RREG32(SRBM_SOFT_RESET);
3144 WREG32(SRBM_SOFT_RESET, 0);
3145 RREG32(SRBM_SOFT_RESET);
3148 WREG32(RLC_CNTL, 0);
3151 static void r600_rlc_start(struct radeon_device *rdev)
3153 WREG32(RLC_CNTL, RLC_ENABLE);
3156 static int r600_rlc_resume(struct radeon_device *rdev)
3159 const __be32 *fw_data;
3164 r600_rlc_stop(rdev);
3166 WREG32(RLC_HB_CNTL, 0);
3168 WREG32(RLC_HB_BASE, 0);
3169 WREG32(RLC_HB_RPTR, 0);
3170 WREG32(RLC_HB_WPTR, 0);
3171 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3172 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3173 WREG32(RLC_MC_CNTL, 0);
3174 WREG32(RLC_UCODE_CNTL, 0);
3176 fw_data = (const __be32 *)rdev->rlc_fw->data;
3177 if (rdev->family >= CHIP_RV770) {
3178 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3179 WREG32(RLC_UCODE_ADDR, i);
3180 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3183 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3184 WREG32(RLC_UCODE_ADDR, i);
3185 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3188 WREG32(RLC_UCODE_ADDR, 0);
3190 r600_rlc_start(rdev);
3195 static void r600_enable_interrupts(struct radeon_device *rdev)
3197 u32 ih_cntl = RREG32(IH_CNTL);
3198 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3200 ih_cntl |= ENABLE_INTR;
3201 ih_rb_cntl |= IH_RB_ENABLE;
3202 WREG32(IH_CNTL, ih_cntl);
3203 WREG32(IH_RB_CNTL, ih_rb_cntl);
3204 rdev->ih.enabled = true;
3207 void r600_disable_interrupts(struct radeon_device *rdev)
3209 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3210 u32 ih_cntl = RREG32(IH_CNTL);
3212 ih_rb_cntl &= ~IH_RB_ENABLE;
3213 ih_cntl &= ~ENABLE_INTR;
3214 WREG32(IH_RB_CNTL, ih_rb_cntl);
3215 WREG32(IH_CNTL, ih_cntl);
3216 /* set rptr, wptr to 0 */
3217 WREG32(IH_RB_RPTR, 0);
3218 WREG32(IH_RB_WPTR, 0);
3219 rdev->ih.enabled = false;
3223 static void r600_disable_interrupt_state(struct radeon_device *rdev)
3227 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3228 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3229 WREG32(DMA_CNTL, tmp);
3230 WREG32(GRBM_INT_CNTL, 0);
3231 WREG32(DxMODE_INT_MASK, 0);
3232 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3233 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3234 if (ASIC_IS_DCE3(rdev)) {
3235 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3236 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3237 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3238 WREG32(DC_HPD1_INT_CONTROL, tmp);
3239 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3240 WREG32(DC_HPD2_INT_CONTROL, tmp);
3241 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3242 WREG32(DC_HPD3_INT_CONTROL, tmp);
3243 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3244 WREG32(DC_HPD4_INT_CONTROL, tmp);
3245 if (ASIC_IS_DCE32(rdev)) {
3246 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3247 WREG32(DC_HPD5_INT_CONTROL, tmp);
3248 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3249 WREG32(DC_HPD6_INT_CONTROL, tmp);
3250 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3251 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3252 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3253 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3255 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3256 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3257 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3258 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3261 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3262 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3263 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3264 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3265 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3266 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3267 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3268 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3269 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3270 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3271 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3272 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3276 int r600_irq_init(struct radeon_device *rdev)
3280 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3283 ret = r600_ih_ring_alloc(rdev);
3288 r600_disable_interrupts(rdev);
3291 if (rdev->family >= CHIP_CEDAR)
3292 ret = evergreen_rlc_resume(rdev);
3294 ret = r600_rlc_resume(rdev);
3296 r600_ih_ring_fini(rdev);
3300 /* setup interrupt control */
3301 /* set dummy read address to ring address */
3302 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3303 interrupt_cntl = RREG32(INTERRUPT_CNTL);
3304 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3305 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3307 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3308 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3309 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3310 WREG32(INTERRUPT_CNTL, interrupt_cntl);
3312 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3313 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
3315 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3316 IH_WPTR_OVERFLOW_CLEAR |
3319 if (rdev->wb.enabled)
3320 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3322 /* set the writeback address whether it's enabled or not */
3323 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3324 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3326 WREG32(IH_RB_CNTL, ih_rb_cntl);
3328 /* set rptr, wptr to 0 */
3329 WREG32(IH_RB_RPTR, 0);
3330 WREG32(IH_RB_WPTR, 0);
3332 /* Default settings for IH_CNTL (disabled at first) */
3333 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3334 /* RPTR_REARM only works if msi's are enabled */
3335 if (rdev->msi_enabled)
3336 ih_cntl |= RPTR_REARM;
3337 WREG32(IH_CNTL, ih_cntl);
3339 /* force the active interrupt state to all disabled */
3340 if (rdev->family >= CHIP_CEDAR)
3341 evergreen_disable_interrupt_state(rdev);
3343 r600_disable_interrupt_state(rdev);
3345 /* at this point everything should be setup correctly to enable master */
3346 pci_set_master(rdev->pdev);
3349 r600_enable_interrupts(rdev);
3354 void r600_irq_suspend(struct radeon_device *rdev)
3356 r600_irq_disable(rdev);
3357 r600_rlc_stop(rdev);
3360 void r600_irq_fini(struct radeon_device *rdev)
3362 r600_irq_suspend(rdev);
3363 r600_ih_ring_fini(rdev);
3366 int r600_irq_set(struct radeon_device *rdev)
3368 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3370 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3371 u32 grbm_int_cntl = 0;
3373 u32 d1grph = 0, d2grph = 0;
3375 u32 thermal_int = 0;
3377 if (!rdev->irq.installed) {
3378 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3381 /* don't enable anything if the ih is disabled */
3382 if (!rdev->ih.enabled) {
3383 r600_disable_interrupts(rdev);
3384 /* force the active interrupt state to all disabled */
3385 r600_disable_interrupt_state(rdev);
3389 if (ASIC_IS_DCE3(rdev)) {
3390 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3391 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3392 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3393 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3394 if (ASIC_IS_DCE32(rdev)) {
3395 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3396 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3397 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3398 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3400 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3401 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3404 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3405 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3406 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3407 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3408 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3411 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3413 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3414 thermal_int = RREG32(CG_THERMAL_INT) &
3415 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3416 } else if (rdev->family >= CHIP_RV770) {
3417 thermal_int = RREG32(RV770_CG_THERMAL_INT) &
3418 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3420 if (rdev->irq.dpm_thermal) {
3421 DRM_DEBUG("dpm thermal\n");
3422 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
3425 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3426 DRM_DEBUG("r600_irq_set: sw int\n");
3427 cp_int_cntl |= RB_INT_ENABLE;
3428 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3431 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3432 DRM_DEBUG("r600_irq_set: sw int dma\n");
3433 dma_cntl |= TRAP_ENABLE;
3436 if (rdev->irq.crtc_vblank_int[0] ||
3437 atomic_read(&rdev->irq.pflip[0])) {
3438 DRM_DEBUG("r600_irq_set: vblank 0\n");
3439 mode_int |= D1MODE_VBLANK_INT_MASK;
3441 if (rdev->irq.crtc_vblank_int[1] ||
3442 atomic_read(&rdev->irq.pflip[1])) {
3443 DRM_DEBUG("r600_irq_set: vblank 1\n");
3444 mode_int |= D2MODE_VBLANK_INT_MASK;
3446 if (rdev->irq.hpd[0]) {
3447 DRM_DEBUG("r600_irq_set: hpd 1\n");
3448 hpd1 |= DC_HPDx_INT_EN;
3450 if (rdev->irq.hpd[1]) {
3451 DRM_DEBUG("r600_irq_set: hpd 2\n");
3452 hpd2 |= DC_HPDx_INT_EN;
3454 if (rdev->irq.hpd[2]) {
3455 DRM_DEBUG("r600_irq_set: hpd 3\n");
3456 hpd3 |= DC_HPDx_INT_EN;
3458 if (rdev->irq.hpd[3]) {
3459 DRM_DEBUG("r600_irq_set: hpd 4\n");
3460 hpd4 |= DC_HPDx_INT_EN;
3462 if (rdev->irq.hpd[4]) {
3463 DRM_DEBUG("r600_irq_set: hpd 5\n");
3464 hpd5 |= DC_HPDx_INT_EN;
3466 if (rdev->irq.hpd[5]) {
3467 DRM_DEBUG("r600_irq_set: hpd 6\n");
3468 hpd6 |= DC_HPDx_INT_EN;
3470 if (rdev->irq.afmt[0]) {
3471 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3472 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3474 if (rdev->irq.afmt[1]) {
3475 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3476 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3479 WREG32(CP_INT_CNTL, cp_int_cntl);
3480 WREG32(DMA_CNTL, dma_cntl);
3481 WREG32(DxMODE_INT_MASK, mode_int);
3482 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3483 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3484 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3485 if (ASIC_IS_DCE3(rdev)) {
3486 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3487 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3488 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3489 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3490 if (ASIC_IS_DCE32(rdev)) {
3491 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3492 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3493 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3494 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3496 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3497 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3500 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3501 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3502 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3503 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3504 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3506 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3507 WREG32(CG_THERMAL_INT, thermal_int);
3508 } else if (rdev->family >= CHIP_RV770) {
3509 WREG32(RV770_CG_THERMAL_INT, thermal_int);
3515 static void r600_irq_ack(struct radeon_device *rdev)
3519 if (ASIC_IS_DCE3(rdev)) {
3520 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3521 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3522 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3523 if (ASIC_IS_DCE32(rdev)) {
3524 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3525 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3527 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3528 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3531 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3532 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3533 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3534 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3535 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3537 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3538 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3540 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3541 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3542 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3543 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3544 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3545 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3546 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3547 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3548 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3549 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3550 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3551 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3552 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3553 if (ASIC_IS_DCE3(rdev)) {
3554 tmp = RREG32(DC_HPD1_INT_CONTROL);
3555 tmp |= DC_HPDx_INT_ACK;
3556 WREG32(DC_HPD1_INT_CONTROL, tmp);
3558 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3559 tmp |= DC_HPDx_INT_ACK;
3560 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3563 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3564 if (ASIC_IS_DCE3(rdev)) {
3565 tmp = RREG32(DC_HPD2_INT_CONTROL);
3566 tmp |= DC_HPDx_INT_ACK;
3567 WREG32(DC_HPD2_INT_CONTROL, tmp);
3569 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3570 tmp |= DC_HPDx_INT_ACK;
3571 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3574 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3575 if (ASIC_IS_DCE3(rdev)) {
3576 tmp = RREG32(DC_HPD3_INT_CONTROL);
3577 tmp |= DC_HPDx_INT_ACK;
3578 WREG32(DC_HPD3_INT_CONTROL, tmp);
3580 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3581 tmp |= DC_HPDx_INT_ACK;
3582 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3585 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3586 tmp = RREG32(DC_HPD4_INT_CONTROL);
3587 tmp |= DC_HPDx_INT_ACK;
3588 WREG32(DC_HPD4_INT_CONTROL, tmp);
3590 if (ASIC_IS_DCE32(rdev)) {
3591 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3592 tmp = RREG32(DC_HPD5_INT_CONTROL);
3593 tmp |= DC_HPDx_INT_ACK;
3594 WREG32(DC_HPD5_INT_CONTROL, tmp);
3596 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3597 tmp = RREG32(DC_HPD5_INT_CONTROL);
3598 tmp |= DC_HPDx_INT_ACK;
3599 WREG32(DC_HPD6_INT_CONTROL, tmp);
3601 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3602 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3603 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3604 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3606 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3607 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3608 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3609 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3612 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3613 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3614 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3615 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3617 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3618 if (ASIC_IS_DCE3(rdev)) {
3619 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3620 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3621 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3623 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3624 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3625 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3631 void r600_irq_disable(struct radeon_device *rdev)
3633 r600_disable_interrupts(rdev);
3634 /* Wait and acknowledge irq */
3637 r600_disable_interrupt_state(rdev);
3640 static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3644 if (rdev->wb.enabled)
3645 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3647 wptr = RREG32(IH_RB_WPTR);
3649 if (wptr & RB_OVERFLOW) {
3650 /* When a ring buffer overflow happen start parsing interrupt
3651 * from the last not overwritten vector (wptr + 16). Hopefully
3652 * this should allow us to catchup.
3654 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3655 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3656 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3657 tmp = RREG32(IH_RB_CNTL);
3658 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3659 WREG32(IH_RB_CNTL, tmp);
3661 return (wptr & rdev->ih.ptr_mask);
3665 * Each IV ring entry is 128 bits:
3666 * [7:0] - interrupt source id
3668 * [59:32] - interrupt source data
3669 * [127:60] - reserved
3671 * The basic interrupt vector entries
3672 * are decoded as follows:
3673 * src_id src_data description
3678 * 19 0 FP Hot plug detection A
3679 * 19 1 FP Hot plug detection B
3680 * 19 2 DAC A auto-detection
3681 * 19 3 DAC B auto-detection
3687 * 181 - EOP Interrupt
3690 * Note, these are based on r600 and may need to be
3691 * adjusted or added to on newer asics
3694 int r600_irq_process(struct radeon_device *rdev)
3698 u32 src_id, src_data;
3700 bool queue_hotplug = false;
3701 bool queue_hdmi = false;
3702 bool queue_thermal = false;
3704 if (!rdev->ih.enabled || rdev->shutdown)
3707 /* No MSIs, need a dummy read to flush PCI DMAs */
3708 if (!rdev->msi_enabled)
3711 wptr = r600_get_ih_wptr(rdev);
3714 /* is somebody else already processing irqs? */
3715 if (atomic_xchg(&rdev->ih.lock, 1))
3718 rptr = rdev->ih.rptr;
3719 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3721 /* Order reading of wptr vs. reading of IH ring data */
3724 /* display interrupts */
3727 while (rptr != wptr) {
3728 /* wptr/rptr are in bytes! */
3729 ring_index = rptr / 4;
3730 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3731 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3734 case 1: /* D1 vblank/vline */
3736 case 0: /* D1 vblank */
3737 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3738 if (rdev->irq.crtc_vblank_int[0]) {
3739 drm_handle_vblank(rdev->ddev, 0);
3740 rdev->pm.vblank_sync = true;
3741 wake_up(&rdev->irq.vblank_queue);
3743 if (atomic_read(&rdev->irq.pflip[0]))
3744 radeon_crtc_handle_flip(rdev, 0);
3745 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3746 DRM_DEBUG("IH: D1 vblank\n");
3749 case 1: /* D1 vline */
3750 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3751 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3752 DRM_DEBUG("IH: D1 vline\n");
3756 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3760 case 5: /* D2 vblank/vline */
3762 case 0: /* D2 vblank */
3763 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3764 if (rdev->irq.crtc_vblank_int[1]) {
3765 drm_handle_vblank(rdev->ddev, 1);
3766 rdev->pm.vblank_sync = true;
3767 wake_up(&rdev->irq.vblank_queue);
3769 if (atomic_read(&rdev->irq.pflip[1]))
3770 radeon_crtc_handle_flip(rdev, 1);
3771 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3772 DRM_DEBUG("IH: D2 vblank\n");
3775 case 1: /* D1 vline */
3776 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3777 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3778 DRM_DEBUG("IH: D2 vline\n");
3782 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3786 case 19: /* HPD/DAC hotplug */
3789 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3790 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3791 queue_hotplug = true;
3792 DRM_DEBUG("IH: HPD1\n");
3796 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3797 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3798 queue_hotplug = true;
3799 DRM_DEBUG("IH: HPD2\n");
3803 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3804 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3805 queue_hotplug = true;
3806 DRM_DEBUG("IH: HPD3\n");
3810 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3811 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3812 queue_hotplug = true;
3813 DRM_DEBUG("IH: HPD4\n");
3817 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3818 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3819 queue_hotplug = true;
3820 DRM_DEBUG("IH: HPD5\n");
3824 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3825 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3826 queue_hotplug = true;
3827 DRM_DEBUG("IH: HPD6\n");
3831 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3838 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3839 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3841 DRM_DEBUG("IH: HDMI0\n");
3845 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3846 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3848 DRM_DEBUG("IH: HDMI1\n");
3852 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3856 case 176: /* CP_INT in ring buffer */
3857 case 177: /* CP_INT in IB1 */
3858 case 178: /* CP_INT in IB2 */
3859 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3860 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3862 case 181: /* CP EOP event */
3863 DRM_DEBUG("IH: CP EOP\n");
3864 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3866 case 224: /* DMA trap event */
3867 DRM_DEBUG("IH: DMA trap\n");
3868 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3870 case 230: /* thermal low to high */
3871 DRM_DEBUG("IH: thermal low to high\n");
3872 rdev->pm.dpm.thermal.high_to_low = false;
3873 queue_thermal = true;
3875 case 231: /* thermal high to low */
3876 DRM_DEBUG("IH: thermal high to low\n");
3877 rdev->pm.dpm.thermal.high_to_low = true;
3878 queue_thermal = true;
3880 case 233: /* GUI IDLE */
3881 DRM_DEBUG("IH: GUI idle\n");
3884 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3888 /* wptr/rptr are in bytes! */
3890 rptr &= rdev->ih.ptr_mask;
3893 schedule_work(&rdev->hotplug_work);
3895 schedule_work(&rdev->audio_work);
3896 if (queue_thermal && rdev->pm.dpm_enabled)
3897 schedule_work(&rdev->pm.dpm.thermal.work);
3898 rdev->ih.rptr = rptr;
3899 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3900 atomic_set(&rdev->ih.lock, 0);
3902 /* make sure wptr hasn't changed while processing */
3903 wptr = r600_get_ih_wptr(rdev);
3913 #if defined(CONFIG_DEBUG_FS)
3915 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3917 struct drm_info_node *node = (struct drm_info_node *) m->private;
3918 struct drm_device *dev = node->minor->dev;
3919 struct radeon_device *rdev = dev->dev_private;
3921 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3922 DREG32_SYS(m, rdev, VM_L2_STATUS);
3926 static struct drm_info_list r600_mc_info_list[] = {
3927 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3931 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3933 #if defined(CONFIG_DEBUG_FS)
3934 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3941 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3942 * rdev: radeon device structure
3943 * bo: buffer object struct which userspace is waiting for idle
3945 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3946 * through ring buffer, this leads to corruption in rendering, see
3947 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3948 * directly perform HDP flush by writing register through MMIO.
3950 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3952 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3953 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3954 * This seems to cause problems on some AGP cards. Just use the old
3957 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3958 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
3959 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3962 WREG32(HDP_DEBUG1, 0);
3963 tmp = readl((void __iomem *)ptr);
3965 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3968 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3970 u32 link_width_cntl, mask;
3972 if (rdev->flags & RADEON_IS_IGP)
3975 if (!(rdev->flags & RADEON_IS_PCIE))
3978 /* x2 cards have a special sequence */
3979 if (ASIC_IS_X2(rdev))
3982 radeon_gui_idle(rdev);
3986 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
3989 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
3992 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
3995 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
3998 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4001 /* not actually supported */
4002 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4005 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4008 DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4012 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4013 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4014 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4015 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4016 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4018 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4021 int r600_get_pcie_lanes(struct radeon_device *rdev)
4023 u32 link_width_cntl;
4025 if (rdev->flags & RADEON_IS_IGP)
4028 if (!(rdev->flags & RADEON_IS_PCIE))
4031 /* x2 cards have a special sequence */
4032 if (ASIC_IS_X2(rdev))
4035 radeon_gui_idle(rdev);
4037 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4039 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4040 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4042 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4044 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4046 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4048 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4049 /* not actually supported */
4051 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4052 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4058 static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4060 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4063 if (radeon_pcie_gen2 == 0)
4066 if (rdev->flags & RADEON_IS_IGP)
4069 if (!(rdev->flags & RADEON_IS_PCIE))
4072 /* x2 cards have a special sequence */
4073 if (ASIC_IS_X2(rdev))
4076 /* only RV6xx+ chips are supported */
4077 if (rdev->family <= CHIP_R600)
4080 if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
4081 (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
4084 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4085 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4086 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4090 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4092 /* 55 nm r6xx asics */
4093 if ((rdev->family == CHIP_RV670) ||
4094 (rdev->family == CHIP_RV620) ||
4095 (rdev->family == CHIP_RV635)) {
4096 /* advertise upconfig capability */
4097 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4098 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4099 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4100 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4101 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4102 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4103 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4104 LC_RECONFIG_ARC_MISSING_ESCAPE);
4105 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4106 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4108 link_width_cntl |= LC_UPCONFIGURE_DIS;
4109 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4113 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4114 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4115 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4117 /* 55 nm r6xx asics */
4118 if ((rdev->family == CHIP_RV670) ||
4119 (rdev->family == CHIP_RV620) ||
4120 (rdev->family == CHIP_RV635)) {
4121 WREG32(MM_CFGREGS_CNTL, 0x8);
4122 link_cntl2 = RREG32(0x4088);
4123 WREG32(MM_CFGREGS_CNTL, 0);
4124 /* not supported yet */
4125 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4129 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4130 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4131 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4132 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4133 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4134 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4136 tmp = RREG32(0x541c);
4137 WREG32(0x541c, tmp | 0x8);
4138 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4139 link_cntl2 = RREG16(0x4088);
4140 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4142 WREG16(0x4088, link_cntl2);
4143 WREG32(MM_CFGREGS_CNTL, 0);
4145 if ((rdev->family == CHIP_RV670) ||
4146 (rdev->family == CHIP_RV620) ||
4147 (rdev->family == CHIP_RV635)) {
4148 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
4149 training_cntl &= ~LC_POINT_7_PLUS_EN;
4150 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
4152 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4153 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4154 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4157 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4158 speed_cntl |= LC_GEN2_EN_STRAP;
4159 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4162 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4163 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4165 link_width_cntl |= LC_UPCONFIGURE_DIS;
4167 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4168 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4173 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4175 * @rdev: radeon_device pointer
4177 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4178 * Returns the 64 bit clock counter snapshot.
4180 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
4184 mutex_lock(&rdev->gpu_clock_mutex);
4185 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4186 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4187 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4188 mutex_unlock(&rdev->gpu_clock_mutex);