2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
32 #include <linux/module.h>
34 #include <drm/radeon_drm.h>
36 #include "radeon_asic.h"
37 #include "radeon_mode.h"
42 #define PFP_UCODE_SIZE 576
43 #define PM4_UCODE_SIZE 1792
44 #define RLC_UCODE_SIZE 768
45 #define R700_PFP_UCODE_SIZE 848
46 #define R700_PM4_UCODE_SIZE 1360
47 #define R700_RLC_UCODE_SIZE 1024
48 #define EVERGREEN_PFP_UCODE_SIZE 1120
49 #define EVERGREEN_PM4_UCODE_SIZE 1376
50 #define EVERGREEN_RLC_UCODE_SIZE 768
51 #define CAYMAN_RLC_UCODE_SIZE 1024
52 #define ARUBA_RLC_UCODE_SIZE 1536
55 MODULE_FIRMWARE("radeon/R600_pfp.bin");
56 MODULE_FIRMWARE("radeon/R600_me.bin");
57 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV610_me.bin");
59 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
60 MODULE_FIRMWARE("radeon/RV630_me.bin");
61 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
62 MODULE_FIRMWARE("radeon/RV620_me.bin");
63 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
64 MODULE_FIRMWARE("radeon/RV635_me.bin");
65 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
66 MODULE_FIRMWARE("radeon/RV670_me.bin");
67 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
68 MODULE_FIRMWARE("radeon/RS780_me.bin");
69 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
70 MODULE_FIRMWARE("radeon/RV770_me.bin");
71 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
72 MODULE_FIRMWARE("radeon/RV730_me.bin");
73 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
74 MODULE_FIRMWARE("radeon/RV710_me.bin");
75 MODULE_FIRMWARE("radeon/R600_rlc.bin");
76 MODULE_FIRMWARE("radeon/R700_rlc.bin");
77 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
78 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
79 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
80 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
81 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
82 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
83 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
84 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
85 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
86 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
87 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
88 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
89 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
90 MODULE_FIRMWARE("radeon/PALM_me.bin");
91 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
92 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
93 MODULE_FIRMWARE("radeon/SUMO_me.bin");
94 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
95 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
97 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
99 /* r600,rv610,rv630,rv620,rv635,rv670 */
100 int r600_mc_wait_for_idle(struct radeon_device *rdev);
101 static void r600_gpu_init(struct radeon_device *rdev);
102 void r600_fini(struct radeon_device *rdev);
103 void r600_irq_disable(struct radeon_device *rdev);
104 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
106 /* get temperature in millidegrees */
107 int rv6xx_get_temp(struct radeon_device *rdev)
109 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
111 int actual_temp = temp & 0xff;
116 return actual_temp * 1000;
119 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
123 rdev->pm.dynpm_can_upclock = true;
124 rdev->pm.dynpm_can_downclock = true;
126 /* power state array is low to high, default is first */
127 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
128 int min_power_state_index = 0;
130 if (rdev->pm.num_power_states > 2)
131 min_power_state_index = 1;
133 switch (rdev->pm.dynpm_planned_action) {
134 case DYNPM_ACTION_MINIMUM:
135 rdev->pm.requested_power_state_index = min_power_state_index;
136 rdev->pm.requested_clock_mode_index = 0;
137 rdev->pm.dynpm_can_downclock = false;
139 case DYNPM_ACTION_DOWNCLOCK:
140 if (rdev->pm.current_power_state_index == min_power_state_index) {
141 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
142 rdev->pm.dynpm_can_downclock = false;
144 if (rdev->pm.active_crtc_count > 1) {
145 for (i = 0; i < rdev->pm.num_power_states; i++) {
146 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
148 else if (i >= rdev->pm.current_power_state_index) {
149 rdev->pm.requested_power_state_index =
150 rdev->pm.current_power_state_index;
153 rdev->pm.requested_power_state_index = i;
158 if (rdev->pm.current_power_state_index == 0)
159 rdev->pm.requested_power_state_index =
160 rdev->pm.num_power_states - 1;
162 rdev->pm.requested_power_state_index =
163 rdev->pm.current_power_state_index - 1;
166 rdev->pm.requested_clock_mode_index = 0;
167 /* don't use the power state if crtcs are active and no display flag is set */
168 if ((rdev->pm.active_crtc_count > 0) &&
169 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
170 clock_info[rdev->pm.requested_clock_mode_index].flags &
171 RADEON_PM_MODE_NO_DISPLAY)) {
172 rdev->pm.requested_power_state_index++;
175 case DYNPM_ACTION_UPCLOCK:
176 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
177 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
178 rdev->pm.dynpm_can_upclock = false;
180 if (rdev->pm.active_crtc_count > 1) {
181 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
182 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
184 else if (i <= rdev->pm.current_power_state_index) {
185 rdev->pm.requested_power_state_index =
186 rdev->pm.current_power_state_index;
189 rdev->pm.requested_power_state_index = i;
194 rdev->pm.requested_power_state_index =
195 rdev->pm.current_power_state_index + 1;
197 rdev->pm.requested_clock_mode_index = 0;
199 case DYNPM_ACTION_DEFAULT:
200 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
201 rdev->pm.requested_clock_mode_index = 0;
202 rdev->pm.dynpm_can_upclock = false;
204 case DYNPM_ACTION_NONE:
206 DRM_ERROR("Requested mode for not defined action\n");
210 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
211 /* for now just select the first power state and switch between clock modes */
212 /* power state array is low to high, default is first (0) */
213 if (rdev->pm.active_crtc_count > 1) {
214 rdev->pm.requested_power_state_index = -1;
215 /* start at 1 as we don't want the default mode */
216 for (i = 1; i < rdev->pm.num_power_states; i++) {
217 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
219 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
220 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
221 rdev->pm.requested_power_state_index = i;
225 /* if nothing selected, grab the default state. */
226 if (rdev->pm.requested_power_state_index == -1)
227 rdev->pm.requested_power_state_index = 0;
229 rdev->pm.requested_power_state_index = 1;
231 switch (rdev->pm.dynpm_planned_action) {
232 case DYNPM_ACTION_MINIMUM:
233 rdev->pm.requested_clock_mode_index = 0;
234 rdev->pm.dynpm_can_downclock = false;
236 case DYNPM_ACTION_DOWNCLOCK:
237 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
238 if (rdev->pm.current_clock_mode_index == 0) {
239 rdev->pm.requested_clock_mode_index = 0;
240 rdev->pm.dynpm_can_downclock = false;
242 rdev->pm.requested_clock_mode_index =
243 rdev->pm.current_clock_mode_index - 1;
245 rdev->pm.requested_clock_mode_index = 0;
246 rdev->pm.dynpm_can_downclock = false;
248 /* don't use the power state if crtcs are active and no display flag is set */
249 if ((rdev->pm.active_crtc_count > 0) &&
250 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
251 clock_info[rdev->pm.requested_clock_mode_index].flags &
252 RADEON_PM_MODE_NO_DISPLAY)) {
253 rdev->pm.requested_clock_mode_index++;
256 case DYNPM_ACTION_UPCLOCK:
257 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
258 if (rdev->pm.current_clock_mode_index ==
259 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
260 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
261 rdev->pm.dynpm_can_upclock = false;
263 rdev->pm.requested_clock_mode_index =
264 rdev->pm.current_clock_mode_index + 1;
266 rdev->pm.requested_clock_mode_index =
267 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
268 rdev->pm.dynpm_can_upclock = false;
271 case DYNPM_ACTION_DEFAULT:
272 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
273 rdev->pm.requested_clock_mode_index = 0;
274 rdev->pm.dynpm_can_upclock = false;
276 case DYNPM_ACTION_NONE:
278 DRM_ERROR("Requested mode for not defined action\n");
283 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
284 rdev->pm.power_state[rdev->pm.requested_power_state_index].
285 clock_info[rdev->pm.requested_clock_mode_index].sclk,
286 rdev->pm.power_state[rdev->pm.requested_power_state_index].
287 clock_info[rdev->pm.requested_clock_mode_index].mclk,
288 rdev->pm.power_state[rdev->pm.requested_power_state_index].
292 void rs780_pm_init_profile(struct radeon_device *rdev)
294 if (rdev->pm.num_power_states == 2) {
296 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
297 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
298 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
299 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
301 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
302 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
303 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
304 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
306 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
307 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
308 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
309 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
311 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
312 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
313 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
314 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
316 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
317 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
318 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
321 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
322 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
323 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
326 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
327 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
328 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
330 } else if (rdev->pm.num_power_states == 3) {
332 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
333 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
334 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
335 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
337 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
338 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
339 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
340 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
342 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
343 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
344 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
345 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
347 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
348 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
349 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
350 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
352 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
353 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
354 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
355 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
357 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
358 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
359 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
360 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
362 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
363 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
364 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
365 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
368 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
369 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
370 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
371 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
373 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
374 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
375 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
376 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
378 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
379 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
380 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
381 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
383 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
384 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
385 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
386 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
388 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
389 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
390 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
391 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
393 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
394 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
395 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
396 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
398 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
399 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
400 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
401 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
405 void r600_pm_init_profile(struct radeon_device *rdev)
409 if (rdev->family == CHIP_R600) {
412 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
413 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
414 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
415 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
417 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
418 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
419 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
420 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
422 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
423 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
424 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
425 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
427 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
428 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
429 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
430 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
432 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
433 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
434 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
435 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
437 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
438 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
439 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
440 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
442 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
443 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
444 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
445 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
447 if (rdev->pm.num_power_states < 4) {
449 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
450 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
451 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
452 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
454 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
455 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
456 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
457 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
459 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
460 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
461 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
462 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
464 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
465 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
466 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
467 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
469 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
470 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
471 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
472 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
474 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
475 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
476 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
477 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
479 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
480 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
481 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
482 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
485 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
486 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
487 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
488 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
490 if (rdev->flags & RADEON_IS_MOBILITY)
491 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
493 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
495 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
497 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
499 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
500 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
501 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
502 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
504 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
505 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
506 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
507 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
508 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
510 if (rdev->flags & RADEON_IS_MOBILITY)
511 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
513 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
514 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
515 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
516 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
517 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
519 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
520 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
521 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
522 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
524 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
525 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
526 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
527 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
528 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
533 void r600_pm_misc(struct radeon_device *rdev)
535 int req_ps_idx = rdev->pm.requested_power_state_index;
536 int req_cm_idx = rdev->pm.requested_clock_mode_index;
537 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
538 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
540 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
541 /* 0xff01 is a flag rather then an actual voltage */
542 if (voltage->voltage == 0xff01)
544 if (voltage->voltage != rdev->pm.current_vddc) {
545 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
546 rdev->pm.current_vddc = voltage->voltage;
547 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
552 bool r600_gui_idle(struct radeon_device *rdev)
554 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
560 /* hpd for digital panel detect/disconnect */
561 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
563 bool connected = false;
565 if (ASIC_IS_DCE3(rdev)) {
568 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
572 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
576 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
580 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
585 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
589 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
598 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
602 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
606 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
616 void r600_hpd_set_polarity(struct radeon_device *rdev,
617 enum radeon_hpd_id hpd)
620 bool connected = r600_hpd_sense(rdev, hpd);
622 if (ASIC_IS_DCE3(rdev)) {
625 tmp = RREG32(DC_HPD1_INT_CONTROL);
627 tmp &= ~DC_HPDx_INT_POLARITY;
629 tmp |= DC_HPDx_INT_POLARITY;
630 WREG32(DC_HPD1_INT_CONTROL, tmp);
633 tmp = RREG32(DC_HPD2_INT_CONTROL);
635 tmp &= ~DC_HPDx_INT_POLARITY;
637 tmp |= DC_HPDx_INT_POLARITY;
638 WREG32(DC_HPD2_INT_CONTROL, tmp);
641 tmp = RREG32(DC_HPD3_INT_CONTROL);
643 tmp &= ~DC_HPDx_INT_POLARITY;
645 tmp |= DC_HPDx_INT_POLARITY;
646 WREG32(DC_HPD3_INT_CONTROL, tmp);
649 tmp = RREG32(DC_HPD4_INT_CONTROL);
651 tmp &= ~DC_HPDx_INT_POLARITY;
653 tmp |= DC_HPDx_INT_POLARITY;
654 WREG32(DC_HPD4_INT_CONTROL, tmp);
657 tmp = RREG32(DC_HPD5_INT_CONTROL);
659 tmp &= ~DC_HPDx_INT_POLARITY;
661 tmp |= DC_HPDx_INT_POLARITY;
662 WREG32(DC_HPD5_INT_CONTROL, tmp);
666 tmp = RREG32(DC_HPD6_INT_CONTROL);
668 tmp &= ~DC_HPDx_INT_POLARITY;
670 tmp |= DC_HPDx_INT_POLARITY;
671 WREG32(DC_HPD6_INT_CONTROL, tmp);
679 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
681 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
683 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
684 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
687 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
689 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
691 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
692 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
695 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
697 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
699 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
700 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
708 void r600_hpd_init(struct radeon_device *rdev)
710 struct drm_device *dev = rdev->ddev;
711 struct drm_connector *connector;
714 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
715 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
717 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
718 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
719 /* don't try to enable hpd on eDP or LVDS avoid breaking the
720 * aux dp channel on imac and help (but not completely fix)
721 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
725 if (ASIC_IS_DCE3(rdev)) {
726 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
727 if (ASIC_IS_DCE32(rdev))
730 switch (radeon_connector->hpd.hpd) {
732 WREG32(DC_HPD1_CONTROL, tmp);
735 WREG32(DC_HPD2_CONTROL, tmp);
738 WREG32(DC_HPD3_CONTROL, tmp);
741 WREG32(DC_HPD4_CONTROL, tmp);
745 WREG32(DC_HPD5_CONTROL, tmp);
748 WREG32(DC_HPD6_CONTROL, tmp);
754 switch (radeon_connector->hpd.hpd) {
756 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
759 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
762 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
768 enable |= 1 << radeon_connector->hpd.hpd;
769 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
771 radeon_irq_kms_enable_hpd(rdev, enable);
774 void r600_hpd_fini(struct radeon_device *rdev)
776 struct drm_device *dev = rdev->ddev;
777 struct drm_connector *connector;
778 unsigned disable = 0;
780 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
781 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
782 if (ASIC_IS_DCE3(rdev)) {
783 switch (radeon_connector->hpd.hpd) {
785 WREG32(DC_HPD1_CONTROL, 0);
788 WREG32(DC_HPD2_CONTROL, 0);
791 WREG32(DC_HPD3_CONTROL, 0);
794 WREG32(DC_HPD4_CONTROL, 0);
798 WREG32(DC_HPD5_CONTROL, 0);
801 WREG32(DC_HPD6_CONTROL, 0);
807 switch (radeon_connector->hpd.hpd) {
809 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
812 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
815 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
821 disable |= 1 << radeon_connector->hpd.hpd;
823 radeon_irq_kms_disable_hpd(rdev, disable);
829 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
834 /* flush hdp cache so updates hit vram */
835 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
836 !(rdev->flags & RADEON_IS_AGP)) {
837 void __iomem *ptr = (void *)rdev->gart.ptr;
840 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
841 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
842 * This seems to cause problems on some AGP cards. Just use the old
845 WREG32(HDP_DEBUG1, 0);
846 tmp = readl((void __iomem *)ptr);
848 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
850 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
851 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
852 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
853 for (i = 0; i < rdev->usec_timeout; i++) {
855 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
856 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
858 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
868 int r600_pcie_gart_init(struct radeon_device *rdev)
872 if (rdev->gart.robj) {
873 WARN(1, "R600 PCIE GART already initialized\n");
876 /* Initialize common gart structure */
877 r = radeon_gart_init(rdev);
880 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
881 return radeon_gart_table_vram_alloc(rdev);
884 static int r600_pcie_gart_enable(struct radeon_device *rdev)
889 if (rdev->gart.robj == NULL) {
890 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
893 r = radeon_gart_table_vram_pin(rdev);
896 radeon_gart_restore(rdev);
899 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
900 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
901 EFFECTIVE_L2_QUEUE_SIZE(7));
902 WREG32(VM_L2_CNTL2, 0);
903 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
904 /* Setup TLB control */
905 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
906 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
907 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
908 ENABLE_WAIT_L2_QUERY;
909 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
910 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
911 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
912 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
913 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
914 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
915 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
916 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
917 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
918 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
919 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
920 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
921 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
922 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
923 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
924 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
925 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
926 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
927 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
928 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
929 (u32)(rdev->dummy_page.addr >> 12));
930 for (i = 1; i < 7; i++)
931 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
933 r600_pcie_gart_tlb_flush(rdev);
934 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
935 (unsigned)(rdev->mc.gtt_size >> 20),
936 (unsigned long long)rdev->gart.table_addr);
937 rdev->gart.ready = true;
941 static void r600_pcie_gart_disable(struct radeon_device *rdev)
946 /* Disable all tables */
947 for (i = 0; i < 7; i++)
948 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
950 /* Disable L2 cache */
951 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
952 EFFECTIVE_L2_QUEUE_SIZE(7));
953 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
954 /* Setup L1 TLB control */
955 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
956 ENABLE_WAIT_L2_QUERY;
957 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
958 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
959 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
960 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
961 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
962 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
963 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
964 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
965 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
966 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
967 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
968 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
969 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
970 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
971 radeon_gart_table_vram_unpin(rdev);
974 static void r600_pcie_gart_fini(struct radeon_device *rdev)
976 radeon_gart_fini(rdev);
977 r600_pcie_gart_disable(rdev);
978 radeon_gart_table_vram_free(rdev);
981 static void r600_agp_enable(struct radeon_device *rdev)
987 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
988 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
989 EFFECTIVE_L2_QUEUE_SIZE(7));
990 WREG32(VM_L2_CNTL2, 0);
991 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
992 /* Setup TLB control */
993 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
994 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
995 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
996 ENABLE_WAIT_L2_QUERY;
997 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
998 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
999 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1000 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1001 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1002 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1003 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1004 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1005 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1006 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1007 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1008 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1009 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1010 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1011 for (i = 0; i < 7; i++)
1012 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1015 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1020 for (i = 0; i < rdev->usec_timeout; i++) {
1021 /* read MC_STATUS */
1022 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1030 static void r600_mc_program(struct radeon_device *rdev)
1032 struct rv515_mc_save save;
1036 /* Initialize HDP */
1037 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1038 WREG32((0x2c14 + j), 0x00000000);
1039 WREG32((0x2c18 + j), 0x00000000);
1040 WREG32((0x2c1c + j), 0x00000000);
1041 WREG32((0x2c20 + j), 0x00000000);
1042 WREG32((0x2c24 + j), 0x00000000);
1044 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1046 rv515_mc_stop(rdev, &save);
1047 if (r600_mc_wait_for_idle(rdev)) {
1048 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1050 /* Lockout access through VGA aperture (doesn't exist before R600) */
1051 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1052 /* Update configuration */
1053 if (rdev->flags & RADEON_IS_AGP) {
1054 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1055 /* VRAM before AGP */
1056 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1057 rdev->mc.vram_start >> 12);
1058 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1059 rdev->mc.gtt_end >> 12);
1061 /* VRAM after AGP */
1062 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1063 rdev->mc.gtt_start >> 12);
1064 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1065 rdev->mc.vram_end >> 12);
1068 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1069 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1071 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1072 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1073 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1074 WREG32(MC_VM_FB_LOCATION, tmp);
1075 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1076 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1077 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1078 if (rdev->flags & RADEON_IS_AGP) {
1079 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1080 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1081 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1083 WREG32(MC_VM_AGP_BASE, 0);
1084 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1085 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1087 if (r600_mc_wait_for_idle(rdev)) {
1088 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1090 rv515_mc_resume(rdev, &save);
1091 /* we need to own VRAM, so turn off the VGA renderer here
1092 * to stop it overwriting our objects */
1093 rv515_vga_render_disable(rdev);
1097 * r600_vram_gtt_location - try to find VRAM & GTT location
1098 * @rdev: radeon device structure holding all necessary informations
1099 * @mc: memory controller structure holding memory informations
1101 * Function will place try to place VRAM at same place as in CPU (PCI)
1102 * address space as some GPU seems to have issue when we reprogram at
1103 * different address space.
1105 * If there is not enough space to fit the unvisible VRAM after the
1106 * aperture then we limit the VRAM size to the aperture.
1108 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1109 * them to be in one from GPU point of view so that we can program GPU to
1110 * catch access outside them (weird GPU policy see ??).
1112 * This function will never fails, worst case are limiting VRAM or GTT.
1114 * Note: GTT start, end, size should be initialized before calling this
1115 * function on AGP platform.
1117 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1119 u64 size_bf, size_af;
1121 if (mc->mc_vram_size > 0xE0000000) {
1122 /* leave room for at least 512M GTT */
1123 dev_warn(rdev->dev, "limiting VRAM\n");
1124 mc->real_vram_size = 0xE0000000;
1125 mc->mc_vram_size = 0xE0000000;
1127 if (rdev->flags & RADEON_IS_AGP) {
1128 size_bf = mc->gtt_start;
1129 size_af = 0xFFFFFFFF - mc->gtt_end;
1130 if (size_bf > size_af) {
1131 if (mc->mc_vram_size > size_bf) {
1132 dev_warn(rdev->dev, "limiting VRAM\n");
1133 mc->real_vram_size = size_bf;
1134 mc->mc_vram_size = size_bf;
1136 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1138 if (mc->mc_vram_size > size_af) {
1139 dev_warn(rdev->dev, "limiting VRAM\n");
1140 mc->real_vram_size = size_af;
1141 mc->mc_vram_size = size_af;
1143 mc->vram_start = mc->gtt_end + 1;
1145 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1146 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1147 mc->mc_vram_size >> 20, mc->vram_start,
1148 mc->vram_end, mc->real_vram_size >> 20);
1151 if (rdev->flags & RADEON_IS_IGP) {
1152 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1155 radeon_vram_location(rdev, &rdev->mc, base);
1156 rdev->mc.gtt_base_align = 0;
1157 radeon_gtt_location(rdev, mc);
1161 static int r600_mc_init(struct radeon_device *rdev)
1164 int chansize, numchan;
1166 /* Get VRAM informations */
1167 rdev->mc.vram_is_ddr = true;
1168 tmp = RREG32(RAMCFG);
1169 if (tmp & CHANSIZE_OVERRIDE) {
1171 } else if (tmp & CHANSIZE_MASK) {
1176 tmp = RREG32(CHMAP);
1177 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1192 rdev->mc.vram_width = numchan * chansize;
1193 /* Could aper size report 0 ? */
1194 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1195 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1196 /* Setup GPU memory space */
1197 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1198 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1199 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1200 r600_vram_gtt_location(rdev, &rdev->mc);
1202 if (rdev->flags & RADEON_IS_IGP) {
1203 rs690_pm_info(rdev);
1204 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1206 radeon_update_bandwidth_info(rdev);
1210 int r600_vram_scratch_init(struct radeon_device *rdev)
1214 if (rdev->vram_scratch.robj == NULL) {
1215 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1216 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1217 NULL, &rdev->vram_scratch.robj);
1223 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1224 if (unlikely(r != 0))
1226 r = radeon_bo_pin(rdev->vram_scratch.robj,
1227 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1229 radeon_bo_unreserve(rdev->vram_scratch.robj);
1232 r = radeon_bo_kmap(rdev->vram_scratch.robj,
1233 (void **)&rdev->vram_scratch.ptr);
1235 radeon_bo_unpin(rdev->vram_scratch.robj);
1236 radeon_bo_unreserve(rdev->vram_scratch.robj);
1241 void r600_vram_scratch_fini(struct radeon_device *rdev)
1245 if (rdev->vram_scratch.robj == NULL) {
1248 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1249 if (likely(r == 0)) {
1250 radeon_bo_kunmap(rdev->vram_scratch.robj);
1251 radeon_bo_unpin(rdev->vram_scratch.robj);
1252 radeon_bo_unreserve(rdev->vram_scratch.robj);
1254 radeon_bo_unref(&rdev->vram_scratch.robj);
1257 /* We doesn't check that the GPU really needs a reset we simply do the
1258 * reset, it's up to the caller to determine if the GPU needs one. We
1259 * might add an helper function to check that.
1261 static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
1263 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1264 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1265 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1266 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1267 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1268 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1269 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1270 S_008010_GUI_ACTIVE(1);
1271 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1272 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1273 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1274 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1275 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1276 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1277 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1278 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1281 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1284 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1285 RREG32(R_008010_GRBM_STATUS));
1286 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1287 RREG32(R_008014_GRBM_STATUS2));
1288 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1289 RREG32(R_000E50_SRBM_STATUS));
1290 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1291 RREG32(CP_STALLED_STAT1));
1292 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1293 RREG32(CP_STALLED_STAT2));
1294 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1295 RREG32(CP_BUSY_STAT));
1296 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1299 /* Disable CP parsing/prefetching */
1300 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1302 /* Check if any of the rendering block is busy and reset it */
1303 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1304 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1305 tmp = S_008020_SOFT_RESET_CR(1) |
1306 S_008020_SOFT_RESET_DB(1) |
1307 S_008020_SOFT_RESET_CB(1) |
1308 S_008020_SOFT_RESET_PA(1) |
1309 S_008020_SOFT_RESET_SC(1) |
1310 S_008020_SOFT_RESET_SMX(1) |
1311 S_008020_SOFT_RESET_SPI(1) |
1312 S_008020_SOFT_RESET_SX(1) |
1313 S_008020_SOFT_RESET_SH(1) |
1314 S_008020_SOFT_RESET_TC(1) |
1315 S_008020_SOFT_RESET_TA(1) |
1316 S_008020_SOFT_RESET_VC(1) |
1317 S_008020_SOFT_RESET_VGT(1);
1318 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1319 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1320 RREG32(R_008020_GRBM_SOFT_RESET);
1322 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1324 /* Reset CP (we always reset CP) */
1325 tmp = S_008020_SOFT_RESET_CP(1);
1326 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1327 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1328 RREG32(R_008020_GRBM_SOFT_RESET);
1330 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1332 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1333 RREG32(R_008010_GRBM_STATUS));
1334 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1335 RREG32(R_008014_GRBM_STATUS2));
1336 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1337 RREG32(R_000E50_SRBM_STATUS));
1338 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1339 RREG32(CP_STALLED_STAT1));
1340 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1341 RREG32(CP_STALLED_STAT2));
1342 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1343 RREG32(CP_BUSY_STAT));
1344 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1349 static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
1353 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1356 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1357 RREG32(DMA_STATUS_REG));
1360 tmp = RREG32(DMA_RB_CNTL);
1361 tmp &= ~DMA_RB_ENABLE;
1362 WREG32(DMA_RB_CNTL, tmp);
1365 if (rdev->family >= CHIP_RV770)
1366 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
1368 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
1369 RREG32(SRBM_SOFT_RESET);
1371 WREG32(SRBM_SOFT_RESET, 0);
1373 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1374 RREG32(DMA_STATUS_REG));
1377 static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1379 struct rv515_mc_save save;
1381 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1382 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
1384 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1385 reset_mask &= ~RADEON_RESET_DMA;
1387 if (reset_mask == 0)
1390 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1392 rv515_mc_stop(rdev, &save);
1393 if (r600_mc_wait_for_idle(rdev)) {
1394 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1397 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
1398 r600_gpu_soft_reset_gfx(rdev);
1400 if (reset_mask & RADEON_RESET_DMA)
1401 r600_gpu_soft_reset_dma(rdev);
1403 /* Wait a little for things to settle down */
1406 rv515_mc_resume(rdev, &save);
1410 bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1416 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1417 grbm_status = RREG32(R_008010_GRBM_STATUS);
1418 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1419 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1420 radeon_ring_lockup_update(ring);
1423 /* force CP activities */
1424 radeon_ring_force_activity(rdev, ring);
1425 return radeon_ring_test_lockup(rdev, ring);
1429 * r600_dma_is_lockup - Check if the DMA engine is locked up
1431 * @rdev: radeon_device pointer
1432 * @ring: radeon_ring structure holding ring information
1434 * Check if the async DMA engine is locked up (r6xx-evergreen).
1435 * Returns true if the engine appears to be locked up, false if not.
1437 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1441 dma_status_reg = RREG32(DMA_STATUS_REG);
1442 if (dma_status_reg & DMA_IDLE) {
1443 radeon_ring_lockup_update(ring);
1446 /* force ring activities */
1447 radeon_ring_force_activity(rdev, ring);
1448 return radeon_ring_test_lockup(rdev, ring);
1451 int r600_asic_reset(struct radeon_device *rdev)
1453 return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
1454 RADEON_RESET_COMPUTE |
1458 u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1459 u32 tiling_pipe_num,
1461 u32 total_max_rb_num,
1462 u32 disabled_rb_mask)
1464 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1465 u32 pipe_rb_ratio, pipe_rb_remain, tmp;
1466 u32 data = 0, mask = 1 << (max_rb_num - 1);
1469 /* mask out the RBs that don't exist on that asic */
1470 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1471 /* make sure at least one RB is available */
1472 if ((tmp & 0xff) != 0xff)
1473 disabled_rb_mask = tmp;
1475 rendering_pipe_num = 1 << tiling_pipe_num;
1476 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1477 BUG_ON(rendering_pipe_num < req_rb_num);
1479 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1480 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1482 if (rdev->family <= CHIP_RV740) {
1490 for (i = 0; i < max_rb_num; i++) {
1491 if (!(mask & disabled_rb_mask)) {
1492 for (j = 0; j < pipe_rb_ratio; j++) {
1493 data <<= rb_num_width;
1494 data |= max_rb_num - i - 1;
1496 if (pipe_rb_remain) {
1497 data <<= rb_num_width;
1498 data |= max_rb_num - i - 1;
1508 int r600_count_pipe_bits(uint32_t val)
1510 return hweight32(val);
1513 static void r600_gpu_init(struct radeon_device *rdev)
1517 u32 cc_rb_backend_disable;
1518 u32 cc_gc_shader_pipe_config;
1522 u32 sq_gpr_resource_mgmt_1 = 0;
1523 u32 sq_gpr_resource_mgmt_2 = 0;
1524 u32 sq_thread_resource_mgmt = 0;
1525 u32 sq_stack_resource_mgmt_1 = 0;
1526 u32 sq_stack_resource_mgmt_2 = 0;
1527 u32 disabled_rb_mask;
1529 rdev->config.r600.tiling_group_size = 256;
1530 switch (rdev->family) {
1532 rdev->config.r600.max_pipes = 4;
1533 rdev->config.r600.max_tile_pipes = 8;
1534 rdev->config.r600.max_simds = 4;
1535 rdev->config.r600.max_backends = 4;
1536 rdev->config.r600.max_gprs = 256;
1537 rdev->config.r600.max_threads = 192;
1538 rdev->config.r600.max_stack_entries = 256;
1539 rdev->config.r600.max_hw_contexts = 8;
1540 rdev->config.r600.max_gs_threads = 16;
1541 rdev->config.r600.sx_max_export_size = 128;
1542 rdev->config.r600.sx_max_export_pos_size = 16;
1543 rdev->config.r600.sx_max_export_smx_size = 128;
1544 rdev->config.r600.sq_num_cf_insts = 2;
1548 rdev->config.r600.max_pipes = 2;
1549 rdev->config.r600.max_tile_pipes = 2;
1550 rdev->config.r600.max_simds = 3;
1551 rdev->config.r600.max_backends = 1;
1552 rdev->config.r600.max_gprs = 128;
1553 rdev->config.r600.max_threads = 192;
1554 rdev->config.r600.max_stack_entries = 128;
1555 rdev->config.r600.max_hw_contexts = 8;
1556 rdev->config.r600.max_gs_threads = 4;
1557 rdev->config.r600.sx_max_export_size = 128;
1558 rdev->config.r600.sx_max_export_pos_size = 16;
1559 rdev->config.r600.sx_max_export_smx_size = 128;
1560 rdev->config.r600.sq_num_cf_insts = 2;
1566 rdev->config.r600.max_pipes = 1;
1567 rdev->config.r600.max_tile_pipes = 1;
1568 rdev->config.r600.max_simds = 2;
1569 rdev->config.r600.max_backends = 1;
1570 rdev->config.r600.max_gprs = 128;
1571 rdev->config.r600.max_threads = 192;
1572 rdev->config.r600.max_stack_entries = 128;
1573 rdev->config.r600.max_hw_contexts = 4;
1574 rdev->config.r600.max_gs_threads = 4;
1575 rdev->config.r600.sx_max_export_size = 128;
1576 rdev->config.r600.sx_max_export_pos_size = 16;
1577 rdev->config.r600.sx_max_export_smx_size = 128;
1578 rdev->config.r600.sq_num_cf_insts = 1;
1581 rdev->config.r600.max_pipes = 4;
1582 rdev->config.r600.max_tile_pipes = 4;
1583 rdev->config.r600.max_simds = 4;
1584 rdev->config.r600.max_backends = 4;
1585 rdev->config.r600.max_gprs = 192;
1586 rdev->config.r600.max_threads = 192;
1587 rdev->config.r600.max_stack_entries = 256;
1588 rdev->config.r600.max_hw_contexts = 8;
1589 rdev->config.r600.max_gs_threads = 16;
1590 rdev->config.r600.sx_max_export_size = 128;
1591 rdev->config.r600.sx_max_export_pos_size = 16;
1592 rdev->config.r600.sx_max_export_smx_size = 128;
1593 rdev->config.r600.sq_num_cf_insts = 2;
1599 /* Initialize HDP */
1600 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1601 WREG32((0x2c14 + j), 0x00000000);
1602 WREG32((0x2c18 + j), 0x00000000);
1603 WREG32((0x2c1c + j), 0x00000000);
1604 WREG32((0x2c20 + j), 0x00000000);
1605 WREG32((0x2c24 + j), 0x00000000);
1608 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1612 ramcfg = RREG32(RAMCFG);
1613 switch (rdev->config.r600.max_tile_pipes) {
1615 tiling_config |= PIPE_TILING(0);
1618 tiling_config |= PIPE_TILING(1);
1621 tiling_config |= PIPE_TILING(2);
1624 tiling_config |= PIPE_TILING(3);
1629 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1630 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1631 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1632 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1634 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1636 tiling_config |= ROW_TILING(3);
1637 tiling_config |= SAMPLE_SPLIT(3);
1639 tiling_config |= ROW_TILING(tmp);
1640 tiling_config |= SAMPLE_SPLIT(tmp);
1642 tiling_config |= BANK_SWAPS(1);
1644 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1645 tmp = R6XX_MAX_BACKENDS -
1646 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1647 if (tmp < rdev->config.r600.max_backends) {
1648 rdev->config.r600.max_backends = tmp;
1651 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1652 tmp = R6XX_MAX_PIPES -
1653 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1654 if (tmp < rdev->config.r600.max_pipes) {
1655 rdev->config.r600.max_pipes = tmp;
1657 tmp = R6XX_MAX_SIMDS -
1658 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1659 if (tmp < rdev->config.r600.max_simds) {
1660 rdev->config.r600.max_simds = tmp;
1663 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1664 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1665 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1666 R6XX_MAX_BACKENDS, disabled_rb_mask);
1667 tiling_config |= tmp << 16;
1668 rdev->config.r600.backend_map = tmp;
1670 rdev->config.r600.tile_config = tiling_config;
1671 WREG32(GB_TILING_CONFIG, tiling_config);
1672 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1673 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1674 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
1676 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1677 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1678 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1680 /* Setup some CP states */
1681 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1682 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1684 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1685 SYNC_WALKER | SYNC_ALIGNER));
1686 /* Setup various GPU states */
1687 if (rdev->family == CHIP_RV670)
1688 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1690 tmp = RREG32(SX_DEBUG_1);
1691 tmp |= SMX_EVENT_RELEASE;
1692 if ((rdev->family > CHIP_R600))
1693 tmp |= ENABLE_NEW_SMX_ADDRESS;
1694 WREG32(SX_DEBUG_1, tmp);
1696 if (((rdev->family) == CHIP_R600) ||
1697 ((rdev->family) == CHIP_RV630) ||
1698 ((rdev->family) == CHIP_RV610) ||
1699 ((rdev->family) == CHIP_RV620) ||
1700 ((rdev->family) == CHIP_RS780) ||
1701 ((rdev->family) == CHIP_RS880)) {
1702 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1704 WREG32(DB_DEBUG, 0);
1706 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1707 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1709 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1710 WREG32(VGT_NUM_INSTANCES, 0);
1712 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1713 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1715 tmp = RREG32(SQ_MS_FIFO_SIZES);
1716 if (((rdev->family) == CHIP_RV610) ||
1717 ((rdev->family) == CHIP_RV620) ||
1718 ((rdev->family) == CHIP_RS780) ||
1719 ((rdev->family) == CHIP_RS880)) {
1720 tmp = (CACHE_FIFO_SIZE(0xa) |
1721 FETCH_FIFO_HIWATER(0xa) |
1722 DONE_FIFO_HIWATER(0xe0) |
1723 ALU_UPDATE_FIFO_HIWATER(0x8));
1724 } else if (((rdev->family) == CHIP_R600) ||
1725 ((rdev->family) == CHIP_RV630)) {
1726 tmp &= ~DONE_FIFO_HIWATER(0xff);
1727 tmp |= DONE_FIFO_HIWATER(0x4);
1729 WREG32(SQ_MS_FIFO_SIZES, tmp);
1731 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1732 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1734 sq_config = RREG32(SQ_CONFIG);
1735 sq_config &= ~(PS_PRIO(3) |
1739 sq_config |= (DX9_CONSTS |
1746 if ((rdev->family) == CHIP_R600) {
1747 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1749 NUM_CLAUSE_TEMP_GPRS(4));
1750 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1752 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1753 NUM_VS_THREADS(48) |
1756 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1757 NUM_VS_STACK_ENTRIES(128));
1758 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1759 NUM_ES_STACK_ENTRIES(0));
1760 } else if (((rdev->family) == CHIP_RV610) ||
1761 ((rdev->family) == CHIP_RV620) ||
1762 ((rdev->family) == CHIP_RS780) ||
1763 ((rdev->family) == CHIP_RS880)) {
1764 /* no vertex cache */
1765 sq_config &= ~VC_ENABLE;
1767 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1769 NUM_CLAUSE_TEMP_GPRS(2));
1770 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1772 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1773 NUM_VS_THREADS(78) |
1775 NUM_ES_THREADS(31));
1776 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1777 NUM_VS_STACK_ENTRIES(40));
1778 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1779 NUM_ES_STACK_ENTRIES(16));
1780 } else if (((rdev->family) == CHIP_RV630) ||
1781 ((rdev->family) == CHIP_RV635)) {
1782 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1784 NUM_CLAUSE_TEMP_GPRS(2));
1785 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1787 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1788 NUM_VS_THREADS(78) |
1790 NUM_ES_THREADS(31));
1791 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1792 NUM_VS_STACK_ENTRIES(40));
1793 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1794 NUM_ES_STACK_ENTRIES(16));
1795 } else if ((rdev->family) == CHIP_RV670) {
1796 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1798 NUM_CLAUSE_TEMP_GPRS(2));
1799 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1801 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1802 NUM_VS_THREADS(78) |
1804 NUM_ES_THREADS(31));
1805 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1806 NUM_VS_STACK_ENTRIES(64));
1807 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1808 NUM_ES_STACK_ENTRIES(64));
1811 WREG32(SQ_CONFIG, sq_config);
1812 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1813 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1814 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1815 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1816 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1818 if (((rdev->family) == CHIP_RV610) ||
1819 ((rdev->family) == CHIP_RV620) ||
1820 ((rdev->family) == CHIP_RS780) ||
1821 ((rdev->family) == CHIP_RS880)) {
1822 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1824 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1827 /* More default values. 2D/3D driver should adjust as needed */
1828 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1829 S1_X(0x4) | S1_Y(0xc)));
1830 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1831 S1_X(0x2) | S1_Y(0x2) |
1832 S2_X(0xa) | S2_Y(0x6) |
1833 S3_X(0x6) | S3_Y(0xa)));
1834 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1835 S1_X(0x4) | S1_Y(0xc) |
1836 S2_X(0x1) | S2_Y(0x6) |
1837 S3_X(0xa) | S3_Y(0xe)));
1838 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1839 S5_X(0x0) | S5_Y(0x0) |
1840 S6_X(0xb) | S6_Y(0x4) |
1841 S7_X(0x7) | S7_Y(0x8)));
1843 WREG32(VGT_STRMOUT_EN, 0);
1844 tmp = rdev->config.r600.max_pipes * 16;
1845 switch (rdev->family) {
1861 WREG32(VGT_ES_PER_GS, 128);
1862 WREG32(VGT_GS_PER_ES, tmp);
1863 WREG32(VGT_GS_PER_VS, 2);
1864 WREG32(VGT_GS_VERTEX_REUSE, 16);
1866 /* more default values. 2D/3D driver should adjust as needed */
1867 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1868 WREG32(VGT_STRMOUT_EN, 0);
1870 WREG32(PA_SC_MODE_CNTL, 0);
1871 WREG32(PA_SC_AA_CONFIG, 0);
1872 WREG32(PA_SC_LINE_STIPPLE, 0);
1873 WREG32(SPI_INPUT_Z, 0);
1874 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1875 WREG32(CB_COLOR7_FRAG, 0);
1877 /* Clear render buffer base addresses */
1878 WREG32(CB_COLOR0_BASE, 0);
1879 WREG32(CB_COLOR1_BASE, 0);
1880 WREG32(CB_COLOR2_BASE, 0);
1881 WREG32(CB_COLOR3_BASE, 0);
1882 WREG32(CB_COLOR4_BASE, 0);
1883 WREG32(CB_COLOR5_BASE, 0);
1884 WREG32(CB_COLOR6_BASE, 0);
1885 WREG32(CB_COLOR7_BASE, 0);
1886 WREG32(CB_COLOR7_FRAG, 0);
1888 switch (rdev->family) {
1893 tmp = TC_L2_SIZE(8);
1897 tmp = TC_L2_SIZE(4);
1900 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1903 tmp = TC_L2_SIZE(0);
1906 WREG32(TC_CNTL, tmp);
1908 tmp = RREG32(HDP_HOST_PATH_CNTL);
1909 WREG32(HDP_HOST_PATH_CNTL, tmp);
1911 tmp = RREG32(ARB_POP);
1912 tmp |= ENABLE_TC128;
1913 WREG32(ARB_POP, tmp);
1915 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1916 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1918 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1919 WREG32(VC_ENHANCE, 0);
1924 * Indirect registers accessor
1926 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1930 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1931 (void)RREG32(PCIE_PORT_INDEX);
1932 r = RREG32(PCIE_PORT_DATA);
1936 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1938 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1939 (void)RREG32(PCIE_PORT_INDEX);
1940 WREG32(PCIE_PORT_DATA, (v));
1941 (void)RREG32(PCIE_PORT_DATA);
1947 void r600_cp_stop(struct radeon_device *rdev)
1949 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1950 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1951 WREG32(SCRATCH_UMSK, 0);
1952 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1955 int r600_init_microcode(struct radeon_device *rdev)
1957 struct platform_device *pdev;
1958 const char *chip_name;
1959 const char *rlc_chip_name;
1960 size_t pfp_req_size, me_req_size, rlc_req_size;
1966 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1969 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1973 switch (rdev->family) {
1976 rlc_chip_name = "R600";
1979 chip_name = "RV610";
1980 rlc_chip_name = "R600";
1983 chip_name = "RV630";
1984 rlc_chip_name = "R600";
1987 chip_name = "RV620";
1988 rlc_chip_name = "R600";
1991 chip_name = "RV635";
1992 rlc_chip_name = "R600";
1995 chip_name = "RV670";
1996 rlc_chip_name = "R600";
2000 chip_name = "RS780";
2001 rlc_chip_name = "R600";
2004 chip_name = "RV770";
2005 rlc_chip_name = "R700";
2009 chip_name = "RV730";
2010 rlc_chip_name = "R700";
2013 chip_name = "RV710";
2014 rlc_chip_name = "R700";
2017 chip_name = "CEDAR";
2018 rlc_chip_name = "CEDAR";
2021 chip_name = "REDWOOD";
2022 rlc_chip_name = "REDWOOD";
2025 chip_name = "JUNIPER";
2026 rlc_chip_name = "JUNIPER";
2030 chip_name = "CYPRESS";
2031 rlc_chip_name = "CYPRESS";
2035 rlc_chip_name = "SUMO";
2039 rlc_chip_name = "SUMO";
2042 chip_name = "SUMO2";
2043 rlc_chip_name = "SUMO";
2048 if (rdev->family >= CHIP_CEDAR) {
2049 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2050 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2051 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2052 } else if (rdev->family >= CHIP_RV770) {
2053 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2054 me_req_size = R700_PM4_UCODE_SIZE * 4;
2055 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2057 pfp_req_size = PFP_UCODE_SIZE * 4;
2058 me_req_size = PM4_UCODE_SIZE * 12;
2059 rlc_req_size = RLC_UCODE_SIZE * 4;
2062 DRM_INFO("Loading %s Microcode\n", chip_name);
2064 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2065 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2068 if (rdev->pfp_fw->size != pfp_req_size) {
2070 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2071 rdev->pfp_fw->size, fw_name);
2076 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2077 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2080 if (rdev->me_fw->size != me_req_size) {
2082 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2083 rdev->me_fw->size, fw_name);
2087 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2088 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2091 if (rdev->rlc_fw->size != rlc_req_size) {
2093 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2094 rdev->rlc_fw->size, fw_name);
2099 platform_device_unregister(pdev);
2104 "r600_cp: Failed to load firmware \"%s\"\n",
2106 release_firmware(rdev->pfp_fw);
2107 rdev->pfp_fw = NULL;
2108 release_firmware(rdev->me_fw);
2110 release_firmware(rdev->rlc_fw);
2111 rdev->rlc_fw = NULL;
2116 static int r600_cp_load_microcode(struct radeon_device *rdev)
2118 const __be32 *fw_data;
2121 if (!rdev->me_fw || !rdev->pfp_fw)
2130 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2133 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2134 RREG32(GRBM_SOFT_RESET);
2136 WREG32(GRBM_SOFT_RESET, 0);
2138 WREG32(CP_ME_RAM_WADDR, 0);
2140 fw_data = (const __be32 *)rdev->me_fw->data;
2141 WREG32(CP_ME_RAM_WADDR, 0);
2142 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2143 WREG32(CP_ME_RAM_DATA,
2144 be32_to_cpup(fw_data++));
2146 fw_data = (const __be32 *)rdev->pfp_fw->data;
2147 WREG32(CP_PFP_UCODE_ADDR, 0);
2148 for (i = 0; i < PFP_UCODE_SIZE; i++)
2149 WREG32(CP_PFP_UCODE_DATA,
2150 be32_to_cpup(fw_data++));
2152 WREG32(CP_PFP_UCODE_ADDR, 0);
2153 WREG32(CP_ME_RAM_WADDR, 0);
2154 WREG32(CP_ME_RAM_RADDR, 0);
2158 int r600_cp_start(struct radeon_device *rdev)
2160 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2164 r = radeon_ring_lock(rdev, ring, 7);
2166 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2169 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2170 radeon_ring_write(ring, 0x1);
2171 if (rdev->family >= CHIP_RV770) {
2172 radeon_ring_write(ring, 0x0);
2173 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2175 radeon_ring_write(ring, 0x3);
2176 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2178 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2179 radeon_ring_write(ring, 0);
2180 radeon_ring_write(ring, 0);
2181 radeon_ring_unlock_commit(rdev, ring);
2184 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2188 int r600_cp_resume(struct radeon_device *rdev)
2190 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2196 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2197 RREG32(GRBM_SOFT_RESET);
2199 WREG32(GRBM_SOFT_RESET, 0);
2201 /* Set ring buffer size */
2202 rb_bufsz = drm_order(ring->ring_size / 8);
2203 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2205 tmp |= BUF_SWAP_32BIT;
2207 WREG32(CP_RB_CNTL, tmp);
2208 WREG32(CP_SEM_WAIT_TIMER, 0x0);
2210 /* Set the write pointer delay */
2211 WREG32(CP_RB_WPTR_DELAY, 0);
2213 /* Initialize the ring buffer's read and write pointers */
2214 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2215 WREG32(CP_RB_RPTR_WR, 0);
2217 WREG32(CP_RB_WPTR, ring->wptr);
2219 /* set the wb address whether it's enabled or not */
2220 WREG32(CP_RB_RPTR_ADDR,
2221 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2222 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2223 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2225 if (rdev->wb.enabled)
2226 WREG32(SCRATCH_UMSK, 0xff);
2228 tmp |= RB_NO_UPDATE;
2229 WREG32(SCRATCH_UMSK, 0);
2233 WREG32(CP_RB_CNTL, tmp);
2235 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2236 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2238 ring->rptr = RREG32(CP_RB_RPTR);
2240 r600_cp_start(rdev);
2242 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2244 ring->ready = false;
2250 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2255 /* Align ring size */
2256 rb_bufsz = drm_order(ring_size / 8);
2257 ring_size = (1 << (rb_bufsz + 1)) * 4;
2258 ring->ring_size = ring_size;
2259 ring->align_mask = 16 - 1;
2261 if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2262 r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2264 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2265 ring->rptr_save_reg = 0;
2270 void r600_cp_fini(struct radeon_device *rdev)
2272 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2274 radeon_ring_fini(rdev, ring);
2275 radeon_scratch_free(rdev, ring->rptr_save_reg);
2280 * Starting with R600, the GPU has an asynchronous
2281 * DMA engine. The programming model is very similar
2282 * to the 3D engine (ring buffer, IBs, etc.), but the
2283 * DMA controller has it's own packet format that is
2284 * different form the PM4 format used by the 3D engine.
2285 * It supports copying data, writing embedded data,
2286 * solid fills, and a number of other things. It also
2287 * has support for tiling/detiling of buffers.
2290 * r600_dma_stop - stop the async dma engine
2292 * @rdev: radeon_device pointer
2294 * Stop the async dma engine (r6xx-evergreen).
2296 void r600_dma_stop(struct radeon_device *rdev)
2298 u32 rb_cntl = RREG32(DMA_RB_CNTL);
2300 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2302 rb_cntl &= ~DMA_RB_ENABLE;
2303 WREG32(DMA_RB_CNTL, rb_cntl);
2305 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
2309 * r600_dma_resume - setup and start the async dma engine
2311 * @rdev: radeon_device pointer
2313 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
2314 * Returns 0 for success, error for failure.
2316 int r600_dma_resume(struct radeon_device *rdev)
2318 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2319 u32 rb_cntl, dma_cntl, ib_cntl;
2324 if (rdev->family >= CHIP_RV770)
2325 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
2327 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
2328 RREG32(SRBM_SOFT_RESET);
2330 WREG32(SRBM_SOFT_RESET, 0);
2332 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
2333 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
2335 /* Set ring buffer size in dwords */
2336 rb_bufsz = drm_order(ring->ring_size / 4);
2337 rb_cntl = rb_bufsz << 1;
2339 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
2341 WREG32(DMA_RB_CNTL, rb_cntl);
2343 /* Initialize the ring buffer's read and write pointers */
2344 WREG32(DMA_RB_RPTR, 0);
2345 WREG32(DMA_RB_WPTR, 0);
2347 /* set the wb address whether it's enabled or not */
2348 WREG32(DMA_RB_RPTR_ADDR_HI,
2349 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
2350 WREG32(DMA_RB_RPTR_ADDR_LO,
2351 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
2353 if (rdev->wb.enabled)
2354 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
2356 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
2358 /* enable DMA IBs */
2359 ib_cntl = DMA_IB_ENABLE;
2361 ib_cntl |= DMA_IB_SWAP_ENABLE;
2363 WREG32(DMA_IB_CNTL, ib_cntl);
2365 dma_cntl = RREG32(DMA_CNTL);
2366 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
2367 WREG32(DMA_CNTL, dma_cntl);
2369 if (rdev->family >= CHIP_RV770)
2370 WREG32(DMA_MODE, 1);
2373 WREG32(DMA_RB_WPTR, ring->wptr << 2);
2375 ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
2377 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
2381 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
2383 ring->ready = false;
2387 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2393 * r600_dma_fini - tear down the async dma engine
2395 * @rdev: radeon_device pointer
2397 * Stop the async dma engine and free the ring (r6xx-evergreen).
2399 void r600_dma_fini(struct radeon_device *rdev)
2401 r600_dma_stop(rdev);
2402 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
2406 * GPU scratch registers helpers function.
2408 void r600_scratch_init(struct radeon_device *rdev)
2412 rdev->scratch.num_reg = 7;
2413 rdev->scratch.reg_base = SCRATCH_REG0;
2414 for (i = 0; i < rdev->scratch.num_reg; i++) {
2415 rdev->scratch.free[i] = true;
2416 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2420 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2427 r = radeon_scratch_get(rdev, &scratch);
2429 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2432 WREG32(scratch, 0xCAFEDEAD);
2433 r = radeon_ring_lock(rdev, ring, 3);
2435 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2436 radeon_scratch_free(rdev, scratch);
2439 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2440 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2441 radeon_ring_write(ring, 0xDEADBEEF);
2442 radeon_ring_unlock_commit(rdev, ring);
2443 for (i = 0; i < rdev->usec_timeout; i++) {
2444 tmp = RREG32(scratch);
2445 if (tmp == 0xDEADBEEF)
2449 if (i < rdev->usec_timeout) {
2450 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2452 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2453 ring->idx, scratch, tmp);
2456 radeon_scratch_free(rdev, scratch);
2461 * r600_dma_ring_test - simple async dma engine test
2463 * @rdev: radeon_device pointer
2464 * @ring: radeon_ring structure holding ring information
2466 * Test the DMA engine by writing using it to write an
2467 * value to memory. (r6xx-SI).
2468 * Returns 0 for success, error for failure.
2470 int r600_dma_ring_test(struct radeon_device *rdev,
2471 struct radeon_ring *ring)
2475 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
2479 DRM_ERROR("invalid vram scratch pointer\n");
2486 r = radeon_ring_lock(rdev, ring, 4);
2488 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
2491 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
2492 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
2493 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
2494 radeon_ring_write(ring, 0xDEADBEEF);
2495 radeon_ring_unlock_commit(rdev, ring);
2497 for (i = 0; i < rdev->usec_timeout; i++) {
2499 if (tmp == 0xDEADBEEF)
2504 if (i < rdev->usec_timeout) {
2505 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2507 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
2515 * CP fences/semaphores
2518 void r600_fence_ring_emit(struct radeon_device *rdev,
2519 struct radeon_fence *fence)
2521 struct radeon_ring *ring = &rdev->ring[fence->ring];
2523 if (rdev->wb.use_event) {
2524 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2525 /* flush read cache over gart */
2526 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2527 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2528 PACKET3_VC_ACTION_ENA |
2529 PACKET3_SH_ACTION_ENA);
2530 radeon_ring_write(ring, 0xFFFFFFFF);
2531 radeon_ring_write(ring, 0);
2532 radeon_ring_write(ring, 10); /* poll interval */
2533 /* EVENT_WRITE_EOP - flush caches, send int */
2534 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2535 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2536 radeon_ring_write(ring, addr & 0xffffffff);
2537 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2538 radeon_ring_write(ring, fence->seq);
2539 radeon_ring_write(ring, 0);
2541 /* flush read cache over gart */
2542 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2543 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2544 PACKET3_VC_ACTION_ENA |
2545 PACKET3_SH_ACTION_ENA);
2546 radeon_ring_write(ring, 0xFFFFFFFF);
2547 radeon_ring_write(ring, 0);
2548 radeon_ring_write(ring, 10); /* poll interval */
2549 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2550 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2551 /* wait for 3D idle clean */
2552 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2553 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2554 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2555 /* Emit fence sequence & fire IRQ */
2556 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2557 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2558 radeon_ring_write(ring, fence->seq);
2559 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2560 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2561 radeon_ring_write(ring, RB_INT_STAT);
2565 void r600_semaphore_ring_emit(struct radeon_device *rdev,
2566 struct radeon_ring *ring,
2567 struct radeon_semaphore *semaphore,
2570 uint64_t addr = semaphore->gpu_addr;
2571 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2573 if (rdev->family < CHIP_CAYMAN)
2574 sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2576 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2577 radeon_ring_write(ring, addr & 0xffffffff);
2578 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2582 * DMA fences/semaphores
2586 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
2588 * @rdev: radeon_device pointer
2589 * @fence: radeon fence object
2591 * Add a DMA fence packet to the ring to write
2592 * the fence seq number and DMA trap packet to generate
2593 * an interrupt if needed (r6xx-r7xx).
2595 void r600_dma_fence_ring_emit(struct radeon_device *rdev,
2596 struct radeon_fence *fence)
2598 struct radeon_ring *ring = &rdev->ring[fence->ring];
2599 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2601 /* write the fence */
2602 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
2603 radeon_ring_write(ring, addr & 0xfffffffc);
2604 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
2605 radeon_ring_write(ring, lower_32_bits(fence->seq));
2606 /* generate an interrupt */
2607 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
2611 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
2613 * @rdev: radeon_device pointer
2614 * @ring: radeon_ring structure holding ring information
2615 * @semaphore: radeon semaphore object
2616 * @emit_wait: wait or signal semaphore
2618 * Add a DMA semaphore packet to the ring wait on or signal
2619 * other rings (r6xx-SI).
2621 void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
2622 struct radeon_ring *ring,
2623 struct radeon_semaphore *semaphore,
2626 u64 addr = semaphore->gpu_addr;
2627 u32 s = emit_wait ? 0 : 1;
2629 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
2630 radeon_ring_write(ring, addr & 0xfffffffc);
2631 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
2634 int r600_copy_blit(struct radeon_device *rdev,
2635 uint64_t src_offset,
2636 uint64_t dst_offset,
2637 unsigned num_gpu_pages,
2638 struct radeon_fence **fence)
2640 struct radeon_semaphore *sem = NULL;
2641 struct radeon_sa_bo *vb = NULL;
2644 r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
2648 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
2649 r600_blit_done_copy(rdev, fence, vb, sem);
2654 * r600_copy_dma - copy pages using the DMA engine
2656 * @rdev: radeon_device pointer
2657 * @src_offset: src GPU address
2658 * @dst_offset: dst GPU address
2659 * @num_gpu_pages: number of GPU pages to xfer
2660 * @fence: radeon fence object
2662 * Copy GPU paging using the DMA engine (r6xx).
2663 * Used by the radeon ttm implementation to move pages if
2664 * registered as the asic copy callback.
2666 int r600_copy_dma(struct radeon_device *rdev,
2667 uint64_t src_offset, uint64_t dst_offset,
2668 unsigned num_gpu_pages,
2669 struct radeon_fence **fence)
2671 struct radeon_semaphore *sem = NULL;
2672 int ring_index = rdev->asic->copy.dma_ring_index;
2673 struct radeon_ring *ring = &rdev->ring[ring_index];
2674 u32 size_in_dw, cur_size_in_dw;
2678 r = radeon_semaphore_create(rdev, &sem);
2680 DRM_ERROR("radeon: moving bo (%d).\n", r);
2684 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
2685 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
2686 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
2688 DRM_ERROR("radeon: moving bo (%d).\n", r);
2689 radeon_semaphore_free(rdev, &sem, NULL);
2693 if (radeon_fence_need_sync(*fence, ring->idx)) {
2694 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
2696 radeon_fence_note_sync(*fence, ring->idx);
2698 radeon_semaphore_free(rdev, &sem, NULL);
2701 for (i = 0; i < num_loops; i++) {
2702 cur_size_in_dw = size_in_dw;
2703 if (cur_size_in_dw > 0xFFFE)
2704 cur_size_in_dw = 0xFFFE;
2705 size_in_dw -= cur_size_in_dw;
2706 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
2707 radeon_ring_write(ring, dst_offset & 0xfffffffc);
2708 radeon_ring_write(ring, src_offset & 0xfffffffc);
2709 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
2710 (upper_32_bits(src_offset) & 0xff)));
2711 src_offset += cur_size_in_dw * 4;
2712 dst_offset += cur_size_in_dw * 4;
2715 r = radeon_fence_emit(rdev, fence, ring->idx);
2717 radeon_ring_unlock_undo(rdev, ring);
2721 radeon_ring_unlock_commit(rdev, ring);
2722 radeon_semaphore_free(rdev, &sem, *fence);
2727 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2728 uint32_t tiling_flags, uint32_t pitch,
2729 uint32_t offset, uint32_t obj_size)
2731 /* FIXME: implement */
2735 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2737 /* FIXME: implement */
2740 static int r600_startup(struct radeon_device *rdev)
2742 struct radeon_ring *ring;
2745 /* enable pcie gen2 link */
2746 r600_pcie_gen2_enable(rdev);
2748 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2749 r = r600_init_microcode(rdev);
2751 DRM_ERROR("Failed to load firmware!\n");
2756 r = r600_vram_scratch_init(rdev);
2760 r600_mc_program(rdev);
2761 if (rdev->flags & RADEON_IS_AGP) {
2762 r600_agp_enable(rdev);
2764 r = r600_pcie_gart_enable(rdev);
2768 r600_gpu_init(rdev);
2769 r = r600_blit_init(rdev);
2771 r600_blit_fini(rdev);
2772 rdev->asic->copy.copy = NULL;
2773 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2776 /* allocate wb buffer */
2777 r = radeon_wb_init(rdev);
2781 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2783 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2787 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2789 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2794 r = r600_irq_init(rdev);
2796 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2797 radeon_irq_kms_fini(rdev);
2802 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2803 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2804 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
2805 0, 0xfffff, RADEON_CP_PACKET2);
2809 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2810 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2811 DMA_RB_RPTR, DMA_RB_WPTR,
2812 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2816 r = r600_cp_load_microcode(rdev);
2819 r = r600_cp_resume(rdev);
2823 r = r600_dma_resume(rdev);
2827 r = radeon_ib_pool_init(rdev);
2829 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2833 r = r600_audio_init(rdev);
2835 DRM_ERROR("radeon: audio init failed\n");
2842 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2846 temp = RREG32(CONFIG_CNTL);
2847 if (state == false) {
2853 WREG32(CONFIG_CNTL, temp);
2856 int r600_resume(struct radeon_device *rdev)
2860 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2861 * posting will perform necessary task to bring back GPU into good
2865 atom_asic_init(rdev->mode_info.atom_context);
2867 rdev->accel_working = true;
2868 r = r600_startup(rdev);
2870 DRM_ERROR("r600 startup failed on resume\n");
2871 rdev->accel_working = false;
2878 int r600_suspend(struct radeon_device *rdev)
2880 r600_audio_fini(rdev);
2882 r600_dma_stop(rdev);
2883 r600_irq_suspend(rdev);
2884 radeon_wb_disable(rdev);
2885 r600_pcie_gart_disable(rdev);
2890 /* Plan is to move initialization in that function and use
2891 * helper function so that radeon_device_init pretty much
2892 * do nothing more than calling asic specific function. This
2893 * should also allow to remove a bunch of callback function
2896 int r600_init(struct radeon_device *rdev)
2900 if (r600_debugfs_mc_info_init(rdev)) {
2901 DRM_ERROR("Failed to register debugfs file for mc !\n");
2904 if (!radeon_get_bios(rdev)) {
2905 if (ASIC_IS_AVIVO(rdev))
2908 /* Must be an ATOMBIOS */
2909 if (!rdev->is_atom_bios) {
2910 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2913 r = radeon_atombios_init(rdev);
2916 /* Post card if necessary */
2917 if (!radeon_card_posted(rdev)) {
2919 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2922 DRM_INFO("GPU not posted. posting now...\n");
2923 atom_asic_init(rdev->mode_info.atom_context);
2925 /* Initialize scratch registers */
2926 r600_scratch_init(rdev);
2927 /* Initialize surface registers */
2928 radeon_surface_init(rdev);
2929 /* Initialize clocks */
2930 radeon_get_clock_info(rdev->ddev);
2932 r = radeon_fence_driver_init(rdev);
2935 if (rdev->flags & RADEON_IS_AGP) {
2936 r = radeon_agp_init(rdev);
2938 radeon_agp_disable(rdev);
2940 r = r600_mc_init(rdev);
2943 /* Memory manager */
2944 r = radeon_bo_init(rdev);
2948 r = radeon_irq_kms_init(rdev);
2952 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2953 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2955 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
2956 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
2958 rdev->ih.ring_obj = NULL;
2959 r600_ih_ring_init(rdev, 64 * 1024);
2961 r = r600_pcie_gart_init(rdev);
2965 rdev->accel_working = true;
2966 r = r600_startup(rdev);
2968 dev_err(rdev->dev, "disabling GPU acceleration\n");
2970 r600_dma_fini(rdev);
2971 r600_irq_fini(rdev);
2972 radeon_wb_fini(rdev);
2973 radeon_ib_pool_fini(rdev);
2974 radeon_irq_kms_fini(rdev);
2975 r600_pcie_gart_fini(rdev);
2976 rdev->accel_working = false;
2982 void r600_fini(struct radeon_device *rdev)
2984 r600_audio_fini(rdev);
2985 r600_blit_fini(rdev);
2987 r600_dma_fini(rdev);
2988 r600_irq_fini(rdev);
2989 radeon_wb_fini(rdev);
2990 radeon_ib_pool_fini(rdev);
2991 radeon_irq_kms_fini(rdev);
2992 r600_pcie_gart_fini(rdev);
2993 r600_vram_scratch_fini(rdev);
2994 radeon_agp_fini(rdev);
2995 radeon_gem_fini(rdev);
2996 radeon_fence_driver_fini(rdev);
2997 radeon_bo_fini(rdev);
2998 radeon_atombios_fini(rdev);
3007 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3009 struct radeon_ring *ring = &rdev->ring[ib->ring];
3012 if (ring->rptr_save_reg) {
3013 next_rptr = ring->wptr + 3 + 4;
3014 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3015 radeon_ring_write(ring, ((ring->rptr_save_reg -
3016 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3017 radeon_ring_write(ring, next_rptr);
3018 } else if (rdev->wb.enabled) {
3019 next_rptr = ring->wptr + 5 + 4;
3020 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3021 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3022 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3023 radeon_ring_write(ring, next_rptr);
3024 radeon_ring_write(ring, 0);
3027 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3028 radeon_ring_write(ring,
3032 (ib->gpu_addr & 0xFFFFFFFC));
3033 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3034 radeon_ring_write(ring, ib->length_dw);
3037 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3039 struct radeon_ib ib;
3045 r = radeon_scratch_get(rdev, &scratch);
3047 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3050 WREG32(scratch, 0xCAFEDEAD);
3051 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3053 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3056 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3057 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3058 ib.ptr[2] = 0xDEADBEEF;
3060 r = radeon_ib_schedule(rdev, &ib, NULL);
3062 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3065 r = radeon_fence_wait(ib.fence, false);
3067 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3070 for (i = 0; i < rdev->usec_timeout; i++) {
3071 tmp = RREG32(scratch);
3072 if (tmp == 0xDEADBEEF)
3076 if (i < rdev->usec_timeout) {
3077 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3079 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3084 radeon_ib_free(rdev, &ib);
3086 radeon_scratch_free(rdev, scratch);
3091 * r600_dma_ib_test - test an IB on the DMA engine
3093 * @rdev: radeon_device pointer
3094 * @ring: radeon_ring structure holding ring information
3096 * Test a simple IB in the DMA ring (r6xx-SI).
3097 * Returns 0 on success, error on failure.
3099 int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3101 struct radeon_ib ib;
3104 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3108 DRM_ERROR("invalid vram scratch pointer\n");
3115 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3117 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3121 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
3122 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
3123 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
3124 ib.ptr[3] = 0xDEADBEEF;
3127 r = radeon_ib_schedule(rdev, &ib, NULL);
3129 radeon_ib_free(rdev, &ib);
3130 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3133 r = radeon_fence_wait(ib.fence, false);
3135 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3138 for (i = 0; i < rdev->usec_timeout; i++) {
3140 if (tmp == 0xDEADBEEF)
3144 if (i < rdev->usec_timeout) {
3145 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3147 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
3150 radeon_ib_free(rdev, &ib);
3155 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
3157 * @rdev: radeon_device pointer
3158 * @ib: IB object to schedule
3160 * Schedule an IB in the DMA ring (r6xx-r7xx).
3162 void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3164 struct radeon_ring *ring = &rdev->ring[ib->ring];
3166 if (rdev->wb.enabled) {
3167 u32 next_rptr = ring->wptr + 4;
3168 while ((next_rptr & 7) != 5)
3171 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
3172 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3173 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3174 radeon_ring_write(ring, next_rptr);
3177 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3178 * Pad as necessary with NOPs.
3180 while ((ring->wptr & 7) != 5)
3181 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3182 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
3183 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3184 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3191 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3192 * the same as the CP ring buffer, but in reverse. Rather than the CPU
3193 * writing to the ring and the GPU consuming, the GPU writes to the ring
3194 * and host consumes. As the host irq handler processes interrupts, it
3195 * increments the rptr. When the rptr catches up with the wptr, all the
3196 * current interrupts have been processed.
3199 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3203 /* Align ring size */
3204 rb_bufsz = drm_order(ring_size / 4);
3205 ring_size = (1 << rb_bufsz) * 4;
3206 rdev->ih.ring_size = ring_size;
3207 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3211 int r600_ih_ring_alloc(struct radeon_device *rdev)
3215 /* Allocate ring buffer */
3216 if (rdev->ih.ring_obj == NULL) {
3217 r = radeon_bo_create(rdev, rdev->ih.ring_size,
3219 RADEON_GEM_DOMAIN_GTT,
3220 NULL, &rdev->ih.ring_obj);
3222 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3225 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3226 if (unlikely(r != 0))
3228 r = radeon_bo_pin(rdev->ih.ring_obj,
3229 RADEON_GEM_DOMAIN_GTT,
3230 &rdev->ih.gpu_addr);
3232 radeon_bo_unreserve(rdev->ih.ring_obj);
3233 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3236 r = radeon_bo_kmap(rdev->ih.ring_obj,
3237 (void **)&rdev->ih.ring);
3238 radeon_bo_unreserve(rdev->ih.ring_obj);
3240 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3247 void r600_ih_ring_fini(struct radeon_device *rdev)
3250 if (rdev->ih.ring_obj) {
3251 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3252 if (likely(r == 0)) {
3253 radeon_bo_kunmap(rdev->ih.ring_obj);
3254 radeon_bo_unpin(rdev->ih.ring_obj);
3255 radeon_bo_unreserve(rdev->ih.ring_obj);
3257 radeon_bo_unref(&rdev->ih.ring_obj);
3258 rdev->ih.ring = NULL;
3259 rdev->ih.ring_obj = NULL;
3263 void r600_rlc_stop(struct radeon_device *rdev)
3266 if ((rdev->family >= CHIP_RV770) &&
3267 (rdev->family <= CHIP_RV740)) {
3268 /* r7xx asics need to soft reset RLC before halting */
3269 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3270 RREG32(SRBM_SOFT_RESET);
3272 WREG32(SRBM_SOFT_RESET, 0);
3273 RREG32(SRBM_SOFT_RESET);
3276 WREG32(RLC_CNTL, 0);
3279 static void r600_rlc_start(struct radeon_device *rdev)
3281 WREG32(RLC_CNTL, RLC_ENABLE);
3284 static int r600_rlc_init(struct radeon_device *rdev)
3287 const __be32 *fw_data;
3292 r600_rlc_stop(rdev);
3294 WREG32(RLC_HB_CNTL, 0);
3296 if (rdev->family == CHIP_ARUBA) {
3297 WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
3298 WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
3300 if (rdev->family <= CHIP_CAYMAN) {
3301 WREG32(RLC_HB_BASE, 0);
3302 WREG32(RLC_HB_RPTR, 0);
3303 WREG32(RLC_HB_WPTR, 0);
3305 if (rdev->family <= CHIP_CAICOS) {
3306 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3307 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3309 WREG32(RLC_MC_CNTL, 0);
3310 WREG32(RLC_UCODE_CNTL, 0);
3312 fw_data = (const __be32 *)rdev->rlc_fw->data;
3313 if (rdev->family >= CHIP_ARUBA) {
3314 for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
3315 WREG32(RLC_UCODE_ADDR, i);
3316 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3318 } else if (rdev->family >= CHIP_CAYMAN) {
3319 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
3320 WREG32(RLC_UCODE_ADDR, i);
3321 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3323 } else if (rdev->family >= CHIP_CEDAR) {
3324 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
3325 WREG32(RLC_UCODE_ADDR, i);
3326 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3328 } else if (rdev->family >= CHIP_RV770) {
3329 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3330 WREG32(RLC_UCODE_ADDR, i);
3331 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3334 for (i = 0; i < RLC_UCODE_SIZE; i++) {
3335 WREG32(RLC_UCODE_ADDR, i);
3336 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3339 WREG32(RLC_UCODE_ADDR, 0);
3341 r600_rlc_start(rdev);
3346 static void r600_enable_interrupts(struct radeon_device *rdev)
3348 u32 ih_cntl = RREG32(IH_CNTL);
3349 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3351 ih_cntl |= ENABLE_INTR;
3352 ih_rb_cntl |= IH_RB_ENABLE;
3353 WREG32(IH_CNTL, ih_cntl);
3354 WREG32(IH_RB_CNTL, ih_rb_cntl);
3355 rdev->ih.enabled = true;
3358 void r600_disable_interrupts(struct radeon_device *rdev)
3360 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3361 u32 ih_cntl = RREG32(IH_CNTL);
3363 ih_rb_cntl &= ~IH_RB_ENABLE;
3364 ih_cntl &= ~ENABLE_INTR;
3365 WREG32(IH_RB_CNTL, ih_rb_cntl);
3366 WREG32(IH_CNTL, ih_cntl);
3367 /* set rptr, wptr to 0 */
3368 WREG32(IH_RB_RPTR, 0);
3369 WREG32(IH_RB_WPTR, 0);
3370 rdev->ih.enabled = false;
3374 static void r600_disable_interrupt_state(struct radeon_device *rdev)
3378 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3379 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3380 WREG32(DMA_CNTL, tmp);
3381 WREG32(GRBM_INT_CNTL, 0);
3382 WREG32(DxMODE_INT_MASK, 0);
3383 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3384 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3385 if (ASIC_IS_DCE3(rdev)) {
3386 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3387 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3388 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3389 WREG32(DC_HPD1_INT_CONTROL, tmp);
3390 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3391 WREG32(DC_HPD2_INT_CONTROL, tmp);
3392 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3393 WREG32(DC_HPD3_INT_CONTROL, tmp);
3394 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3395 WREG32(DC_HPD4_INT_CONTROL, tmp);
3396 if (ASIC_IS_DCE32(rdev)) {
3397 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3398 WREG32(DC_HPD5_INT_CONTROL, tmp);
3399 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3400 WREG32(DC_HPD6_INT_CONTROL, tmp);
3401 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3402 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3403 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3404 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3406 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3407 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3408 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3409 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3412 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3413 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3414 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3415 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3416 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3417 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3418 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3419 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3420 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3421 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3422 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3423 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3427 int r600_irq_init(struct radeon_device *rdev)
3431 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3434 ret = r600_ih_ring_alloc(rdev);
3439 r600_disable_interrupts(rdev);
3442 ret = r600_rlc_init(rdev);
3444 r600_ih_ring_fini(rdev);
3448 /* setup interrupt control */
3449 /* set dummy read address to ring address */
3450 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3451 interrupt_cntl = RREG32(INTERRUPT_CNTL);
3452 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3453 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3455 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3456 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3457 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3458 WREG32(INTERRUPT_CNTL, interrupt_cntl);
3460 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3461 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
3463 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3464 IH_WPTR_OVERFLOW_CLEAR |
3467 if (rdev->wb.enabled)
3468 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3470 /* set the writeback address whether it's enabled or not */
3471 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3472 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3474 WREG32(IH_RB_CNTL, ih_rb_cntl);
3476 /* set rptr, wptr to 0 */
3477 WREG32(IH_RB_RPTR, 0);
3478 WREG32(IH_RB_WPTR, 0);
3480 /* Default settings for IH_CNTL (disabled at first) */
3481 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3482 /* RPTR_REARM only works if msi's are enabled */
3483 if (rdev->msi_enabled)
3484 ih_cntl |= RPTR_REARM;
3485 WREG32(IH_CNTL, ih_cntl);
3487 /* force the active interrupt state to all disabled */
3488 if (rdev->family >= CHIP_CEDAR)
3489 evergreen_disable_interrupt_state(rdev);
3491 r600_disable_interrupt_state(rdev);
3493 /* at this point everything should be setup correctly to enable master */
3494 pci_set_master(rdev->pdev);
3497 r600_enable_interrupts(rdev);
3502 void r600_irq_suspend(struct radeon_device *rdev)
3504 r600_irq_disable(rdev);
3505 r600_rlc_stop(rdev);
3508 void r600_irq_fini(struct radeon_device *rdev)
3510 r600_irq_suspend(rdev);
3511 r600_ih_ring_fini(rdev);
3514 int r600_irq_set(struct radeon_device *rdev)
3516 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3518 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3519 u32 grbm_int_cntl = 0;
3521 u32 d1grph = 0, d2grph = 0;
3524 if (!rdev->irq.installed) {
3525 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3528 /* don't enable anything if the ih is disabled */
3529 if (!rdev->ih.enabled) {
3530 r600_disable_interrupts(rdev);
3531 /* force the active interrupt state to all disabled */
3532 r600_disable_interrupt_state(rdev);
3536 if (ASIC_IS_DCE3(rdev)) {
3537 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3538 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3539 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3540 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3541 if (ASIC_IS_DCE32(rdev)) {
3542 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3543 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3544 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3545 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3547 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3548 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3551 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3552 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3553 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3554 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3555 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3557 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3559 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3560 DRM_DEBUG("r600_irq_set: sw int\n");
3561 cp_int_cntl |= RB_INT_ENABLE;
3562 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3565 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3566 DRM_DEBUG("r600_irq_set: sw int dma\n");
3567 dma_cntl |= TRAP_ENABLE;
3570 if (rdev->irq.crtc_vblank_int[0] ||
3571 atomic_read(&rdev->irq.pflip[0])) {
3572 DRM_DEBUG("r600_irq_set: vblank 0\n");
3573 mode_int |= D1MODE_VBLANK_INT_MASK;
3575 if (rdev->irq.crtc_vblank_int[1] ||
3576 atomic_read(&rdev->irq.pflip[1])) {
3577 DRM_DEBUG("r600_irq_set: vblank 1\n");
3578 mode_int |= D2MODE_VBLANK_INT_MASK;
3580 if (rdev->irq.hpd[0]) {
3581 DRM_DEBUG("r600_irq_set: hpd 1\n");
3582 hpd1 |= DC_HPDx_INT_EN;
3584 if (rdev->irq.hpd[1]) {
3585 DRM_DEBUG("r600_irq_set: hpd 2\n");
3586 hpd2 |= DC_HPDx_INT_EN;
3588 if (rdev->irq.hpd[2]) {
3589 DRM_DEBUG("r600_irq_set: hpd 3\n");
3590 hpd3 |= DC_HPDx_INT_EN;
3592 if (rdev->irq.hpd[3]) {
3593 DRM_DEBUG("r600_irq_set: hpd 4\n");
3594 hpd4 |= DC_HPDx_INT_EN;
3596 if (rdev->irq.hpd[4]) {
3597 DRM_DEBUG("r600_irq_set: hpd 5\n");
3598 hpd5 |= DC_HPDx_INT_EN;
3600 if (rdev->irq.hpd[5]) {
3601 DRM_DEBUG("r600_irq_set: hpd 6\n");
3602 hpd6 |= DC_HPDx_INT_EN;
3604 if (rdev->irq.afmt[0]) {
3605 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3606 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3608 if (rdev->irq.afmt[1]) {
3609 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3610 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3613 WREG32(CP_INT_CNTL, cp_int_cntl);
3614 WREG32(DMA_CNTL, dma_cntl);
3615 WREG32(DxMODE_INT_MASK, mode_int);
3616 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3617 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3618 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3619 if (ASIC_IS_DCE3(rdev)) {
3620 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3621 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3622 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3623 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3624 if (ASIC_IS_DCE32(rdev)) {
3625 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3626 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3627 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3628 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3630 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3631 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3634 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3635 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3636 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3637 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3638 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3644 static void r600_irq_ack(struct radeon_device *rdev)
3648 if (ASIC_IS_DCE3(rdev)) {
3649 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3650 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3651 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3652 if (ASIC_IS_DCE32(rdev)) {
3653 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3654 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3656 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3657 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3660 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3661 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3662 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3663 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3664 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3666 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3667 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3669 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3670 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3671 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3672 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3673 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3674 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3675 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3676 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3677 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3678 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3679 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3680 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3681 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3682 if (ASIC_IS_DCE3(rdev)) {
3683 tmp = RREG32(DC_HPD1_INT_CONTROL);
3684 tmp |= DC_HPDx_INT_ACK;
3685 WREG32(DC_HPD1_INT_CONTROL, tmp);
3687 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3688 tmp |= DC_HPDx_INT_ACK;
3689 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3692 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3693 if (ASIC_IS_DCE3(rdev)) {
3694 tmp = RREG32(DC_HPD2_INT_CONTROL);
3695 tmp |= DC_HPDx_INT_ACK;
3696 WREG32(DC_HPD2_INT_CONTROL, tmp);
3698 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3699 tmp |= DC_HPDx_INT_ACK;
3700 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3703 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3704 if (ASIC_IS_DCE3(rdev)) {
3705 tmp = RREG32(DC_HPD3_INT_CONTROL);
3706 tmp |= DC_HPDx_INT_ACK;
3707 WREG32(DC_HPD3_INT_CONTROL, tmp);
3709 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3710 tmp |= DC_HPDx_INT_ACK;
3711 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3714 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3715 tmp = RREG32(DC_HPD4_INT_CONTROL);
3716 tmp |= DC_HPDx_INT_ACK;
3717 WREG32(DC_HPD4_INT_CONTROL, tmp);
3719 if (ASIC_IS_DCE32(rdev)) {
3720 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3721 tmp = RREG32(DC_HPD5_INT_CONTROL);
3722 tmp |= DC_HPDx_INT_ACK;
3723 WREG32(DC_HPD5_INT_CONTROL, tmp);
3725 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3726 tmp = RREG32(DC_HPD5_INT_CONTROL);
3727 tmp |= DC_HPDx_INT_ACK;
3728 WREG32(DC_HPD6_INT_CONTROL, tmp);
3730 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3731 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3732 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3733 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3735 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3736 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3737 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3738 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3741 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3742 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3743 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3744 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3746 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3747 if (ASIC_IS_DCE3(rdev)) {
3748 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3749 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3750 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3752 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3753 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3754 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3760 void r600_irq_disable(struct radeon_device *rdev)
3762 r600_disable_interrupts(rdev);
3763 /* Wait and acknowledge irq */
3766 r600_disable_interrupt_state(rdev);
3769 static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3773 if (rdev->wb.enabled)
3774 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3776 wptr = RREG32(IH_RB_WPTR);
3778 if (wptr & RB_OVERFLOW) {
3779 /* When a ring buffer overflow happen start parsing interrupt
3780 * from the last not overwritten vector (wptr + 16). Hopefully
3781 * this should allow us to catchup.
3783 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3784 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3785 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3786 tmp = RREG32(IH_RB_CNTL);
3787 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3788 WREG32(IH_RB_CNTL, tmp);
3790 return (wptr & rdev->ih.ptr_mask);
3794 * Each IV ring entry is 128 bits:
3795 * [7:0] - interrupt source id
3797 * [59:32] - interrupt source data
3798 * [127:60] - reserved
3800 * The basic interrupt vector entries
3801 * are decoded as follows:
3802 * src_id src_data description
3807 * 19 0 FP Hot plug detection A
3808 * 19 1 FP Hot plug detection B
3809 * 19 2 DAC A auto-detection
3810 * 19 3 DAC B auto-detection
3816 * 181 - EOP Interrupt
3819 * Note, these are based on r600 and may need to be
3820 * adjusted or added to on newer asics
3823 int r600_irq_process(struct radeon_device *rdev)
3827 u32 src_id, src_data;
3829 bool queue_hotplug = false;
3830 bool queue_hdmi = false;
3832 if (!rdev->ih.enabled || rdev->shutdown)
3835 /* No MSIs, need a dummy read to flush PCI DMAs */
3836 if (!rdev->msi_enabled)
3839 wptr = r600_get_ih_wptr(rdev);
3842 /* is somebody else already processing irqs? */
3843 if (atomic_xchg(&rdev->ih.lock, 1))
3846 rptr = rdev->ih.rptr;
3847 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3849 /* Order reading of wptr vs. reading of IH ring data */
3852 /* display interrupts */
3855 while (rptr != wptr) {
3856 /* wptr/rptr are in bytes! */
3857 ring_index = rptr / 4;
3858 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3859 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3862 case 1: /* D1 vblank/vline */
3864 case 0: /* D1 vblank */
3865 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3866 if (rdev->irq.crtc_vblank_int[0]) {
3867 drm_handle_vblank(rdev->ddev, 0);
3868 rdev->pm.vblank_sync = true;
3869 wake_up(&rdev->irq.vblank_queue);
3871 if (atomic_read(&rdev->irq.pflip[0]))
3872 radeon_crtc_handle_flip(rdev, 0);
3873 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3874 DRM_DEBUG("IH: D1 vblank\n");
3877 case 1: /* D1 vline */
3878 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3879 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3880 DRM_DEBUG("IH: D1 vline\n");
3884 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3888 case 5: /* D2 vblank/vline */
3890 case 0: /* D2 vblank */
3891 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3892 if (rdev->irq.crtc_vblank_int[1]) {
3893 drm_handle_vblank(rdev->ddev, 1);
3894 rdev->pm.vblank_sync = true;
3895 wake_up(&rdev->irq.vblank_queue);
3897 if (atomic_read(&rdev->irq.pflip[1]))
3898 radeon_crtc_handle_flip(rdev, 1);
3899 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3900 DRM_DEBUG("IH: D2 vblank\n");
3903 case 1: /* D1 vline */
3904 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3905 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3906 DRM_DEBUG("IH: D2 vline\n");
3910 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3914 case 19: /* HPD/DAC hotplug */
3917 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3918 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3919 queue_hotplug = true;
3920 DRM_DEBUG("IH: HPD1\n");
3924 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3925 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3926 queue_hotplug = true;
3927 DRM_DEBUG("IH: HPD2\n");
3931 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3932 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3933 queue_hotplug = true;
3934 DRM_DEBUG("IH: HPD3\n");
3938 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3939 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3940 queue_hotplug = true;
3941 DRM_DEBUG("IH: HPD4\n");
3945 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3946 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3947 queue_hotplug = true;
3948 DRM_DEBUG("IH: HPD5\n");
3952 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3953 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3954 queue_hotplug = true;
3955 DRM_DEBUG("IH: HPD6\n");
3959 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3966 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3967 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3969 DRM_DEBUG("IH: HDMI0\n");
3973 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3974 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3976 DRM_DEBUG("IH: HDMI1\n");
3980 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3984 case 176: /* CP_INT in ring buffer */
3985 case 177: /* CP_INT in IB1 */
3986 case 178: /* CP_INT in IB2 */
3987 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3988 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3990 case 181: /* CP EOP event */
3991 DRM_DEBUG("IH: CP EOP\n");
3992 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3994 case 224: /* DMA trap event */
3995 DRM_DEBUG("IH: DMA trap\n");
3996 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3998 case 233: /* GUI IDLE */
3999 DRM_DEBUG("IH: GUI idle\n");
4002 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4006 /* wptr/rptr are in bytes! */
4008 rptr &= rdev->ih.ptr_mask;
4011 schedule_work(&rdev->hotplug_work);
4013 schedule_work(&rdev->audio_work);
4014 rdev->ih.rptr = rptr;
4015 WREG32(IH_RB_RPTR, rdev->ih.rptr);
4016 atomic_set(&rdev->ih.lock, 0);
4018 /* make sure wptr hasn't changed while processing */
4019 wptr = r600_get_ih_wptr(rdev);
4029 #if defined(CONFIG_DEBUG_FS)
4031 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
4033 struct drm_info_node *node = (struct drm_info_node *) m->private;
4034 struct drm_device *dev = node->minor->dev;
4035 struct radeon_device *rdev = dev->dev_private;
4037 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
4038 DREG32_SYS(m, rdev, VM_L2_STATUS);
4042 static struct drm_info_list r600_mc_info_list[] = {
4043 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
4047 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4049 #if defined(CONFIG_DEBUG_FS)
4050 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
4057 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
4058 * rdev: radeon device structure
4059 * bo: buffer object struct which userspace is waiting for idle
4061 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
4062 * through ring buffer, this leads to corruption in rendering, see
4063 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
4064 * directly perform HDP flush by writing register through MMIO.
4066 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
4068 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
4069 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4070 * This seems to cause problems on some AGP cards. Just use the old
4073 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
4074 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
4075 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
4078 WREG32(HDP_DEBUG1, 0);
4079 tmp = readl((void __iomem *)ptr);
4081 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4084 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4086 u32 link_width_cntl, mask, target_reg;
4088 if (rdev->flags & RADEON_IS_IGP)
4091 if (!(rdev->flags & RADEON_IS_PCIE))
4094 /* x2 cards have a special sequence */
4095 if (ASIC_IS_X2(rdev))
4098 /* FIXME wait for idle */
4102 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4105 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4108 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4111 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4114 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4117 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4121 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4125 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4127 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
4128 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
4131 if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
4134 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
4135 RADEON_PCIE_LC_RECONFIG_NOW |
4136 R600_PCIE_LC_RENEGOTIATE_EN |
4137 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4138 link_width_cntl |= mask;
4140 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4142 /* some northbridges can renegotiate the link rather than requiring
4143 * a complete re-config.
4144 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
4146 if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
4147 link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
4149 link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
4151 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
4152 RADEON_PCIE_LC_RECONFIG_NOW));
4154 if (rdev->family >= CHIP_RV770)
4155 target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
4157 target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
4159 /* wait for lane set to complete */
4160 link_width_cntl = RREG32(target_reg);
4161 while (link_width_cntl == 0xffffffff)
4162 link_width_cntl = RREG32(target_reg);
4166 int r600_get_pcie_lanes(struct radeon_device *rdev)
4168 u32 link_width_cntl;
4170 if (rdev->flags & RADEON_IS_IGP)
4173 if (!(rdev->flags & RADEON_IS_PCIE))
4176 /* x2 cards have a special sequence */
4177 if (ASIC_IS_X2(rdev))
4180 /* FIXME wait for idle */
4182 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4184 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4185 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4187 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4189 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4191 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4193 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4195 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4201 static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4203 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4208 if (radeon_pcie_gen2 == 0)
4211 if (rdev->flags & RADEON_IS_IGP)
4214 if (!(rdev->flags & RADEON_IS_PCIE))
4217 /* x2 cards have a special sequence */
4218 if (ASIC_IS_X2(rdev))
4221 /* only RV6xx+ chips are supported */
4222 if (rdev->family <= CHIP_R600)
4225 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
4229 if (!(mask & DRM_PCIE_SPEED_50))
4232 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
4233 if (speed_cntl & LC_CURRENT_DATA_RATE) {
4234 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4238 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4240 /* 55 nm r6xx asics */
4241 if ((rdev->family == CHIP_RV670) ||
4242 (rdev->family == CHIP_RV620) ||
4243 (rdev->family == CHIP_RV635)) {
4244 /* advertise upconfig capability */
4245 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
4246 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4247 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4248 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
4249 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4250 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4251 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4252 LC_RECONFIG_ARC_MISSING_ESCAPE);
4253 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4254 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4256 link_width_cntl |= LC_UPCONFIGURE_DIS;
4257 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4261 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
4262 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4263 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4265 /* 55 nm r6xx asics */
4266 if ((rdev->family == CHIP_RV670) ||
4267 (rdev->family == CHIP_RV620) ||
4268 (rdev->family == CHIP_RV635)) {
4269 WREG32(MM_CFGREGS_CNTL, 0x8);
4270 link_cntl2 = RREG32(0x4088);
4271 WREG32(MM_CFGREGS_CNTL, 0);
4272 /* not supported yet */
4273 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4277 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4278 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4279 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4280 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4281 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4282 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
4284 tmp = RREG32(0x541c);
4285 WREG32(0x541c, tmp | 0x8);
4286 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4287 link_cntl2 = RREG16(0x4088);
4288 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4290 WREG16(0x4088, link_cntl2);
4291 WREG32(MM_CFGREGS_CNTL, 0);
4293 if ((rdev->family == CHIP_RV670) ||
4294 (rdev->family == CHIP_RV620) ||
4295 (rdev->family == CHIP_RV635)) {
4296 training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
4297 training_cntl &= ~LC_POINT_7_PLUS_EN;
4298 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
4300 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
4301 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4302 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
4305 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
4306 speed_cntl |= LC_GEN2_EN_STRAP;
4307 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
4310 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
4311 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4313 link_width_cntl |= LC_UPCONFIGURE_DIS;
4315 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4316 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4321 * r600_get_gpu_clock - return GPU clock counter snapshot
4323 * @rdev: radeon_device pointer
4325 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4326 * Returns the 64 bit clock counter snapshot.
4328 uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
4332 mutex_lock(&rdev->gpu_clock_mutex);
4333 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4334 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4335 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4336 mutex_unlock(&rdev->gpu_clock_mutex);