2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
27 #define RADEON_IDLE_LOOP_MS 100
28 #define RADEON_RECLOCK_DELAY_MS 200
29 #define RADEON_WAIT_VBLANK_TIMEOUT 200
30 #define RADEON_WAIT_IDLE_TIMEOUT 200
32 static void radeon_pm_idle_work_handler(struct work_struct *work);
33 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
35 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
37 struct radeon_bo *bo, *n;
39 if (list_empty(&rdev->gem.objects))
42 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
43 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
44 ttm_bo_unmap_virtual(&bo->tbo);
47 if (rdev->gart.table.vram.robj)
48 ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
50 if (rdev->stollen_vga_memory)
51 ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
53 if (rdev->r600_blit.shader_obj)
54 ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
57 static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch)
62 radeon_get_power_state(rdev, rdev->pm.planned_action);
64 mutex_lock(&rdev->ddev->struct_mutex);
65 mutex_lock(&rdev->vram_mutex);
66 mutex_lock(&rdev->cp.mutex);
68 /* wait for GPU idle */
69 rdev->pm.gui_idle = false;
70 rdev->irq.gui_idle = true;
72 wait_event_interruptible_timeout(
73 rdev->irq.idle_queue, rdev->pm.gui_idle,
74 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
75 rdev->irq.gui_idle = false;
78 radeon_unmap_vram_bos(rdev);
81 for (i = 0; i < rdev->num_crtc; i++) {
82 if (rdev->pm.active_crtcs & (1 << i)) {
83 rdev->pm.req_vblank |= (1 << i);
84 drm_vblank_get(rdev->ddev, i);
89 radeon_set_power_state(rdev, static_switch);
92 for (i = 0; i < rdev->num_crtc; i++) {
93 if (rdev->pm.req_vblank & (1 << i)) {
94 rdev->pm.req_vblank &= ~(1 << i);
95 drm_vblank_put(rdev->ddev, i);
100 /* update display watermarks based on new power state */
101 radeon_update_bandwidth_info(rdev);
102 if (rdev->pm.active_crtc_count)
103 radeon_bandwidth_update(rdev);
105 rdev->pm.planned_action = PM_ACTION_NONE;
107 mutex_unlock(&rdev->cp.mutex);
108 mutex_unlock(&rdev->vram_mutex);
109 mutex_unlock(&rdev->ddev->struct_mutex);
112 static ssize_t radeon_get_power_state_static(struct device *dev,
113 struct device_attribute *attr,
116 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
117 struct radeon_device *rdev = ddev->dev_private;
119 return snprintf(buf, PAGE_SIZE, "%d.%d\n", rdev->pm.current_power_state_index,
120 rdev->pm.current_clock_mode_index);
123 static ssize_t radeon_set_power_state_static(struct device *dev,
124 struct device_attribute *attr,
128 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
129 struct radeon_device *rdev = ddev->dev_private;
132 if (sscanf(buf, "%u.%u", &ps, &cm) != 2) {
133 DRM_ERROR("Invalid power state!\n");
137 mutex_lock(&rdev->pm.mutex);
138 if ((ps >= 0) && (ps < rdev->pm.num_power_states) &&
139 (cm >= 0) && (cm < rdev->pm.power_state[ps].num_clock_modes)) {
140 if ((rdev->pm.active_crtc_count > 1) &&
141 (rdev->pm.power_state[ps].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)) {
142 DRM_ERROR("Invalid power state for multi-head: %d.%d\n", ps, cm);
145 rdev->pm.state = PM_STATE_DISABLED;
146 rdev->pm.planned_action = PM_ACTION_NONE;
147 rdev->pm.requested_power_state_index = ps;
148 rdev->pm.requested_clock_mode_index = cm;
149 radeon_pm_set_clocks(rdev, true);
152 DRM_ERROR("Invalid power state: %d.%d\n\n", ps, cm);
153 mutex_unlock(&rdev->pm.mutex);
158 static ssize_t radeon_get_dynpm(struct device *dev,
159 struct device_attribute *attr,
162 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
163 struct radeon_device *rdev = ddev->dev_private;
165 return snprintf(buf, PAGE_SIZE, "%s\n",
166 (rdev->pm.state == PM_STATE_DISABLED) ? "disabled" : "enabled");
169 static ssize_t radeon_set_dynpm(struct device *dev,
170 struct device_attribute *attr,
174 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
175 struct radeon_device *rdev = ddev->dev_private;
176 int tmp = simple_strtoul(buf, NULL, 10);
179 /* update power mode info */
180 radeon_pm_compute_clocks(rdev);
182 mutex_lock(&rdev->pm.mutex);
183 rdev->pm.state = PM_STATE_DISABLED;
184 rdev->pm.planned_action = PM_ACTION_NONE;
185 mutex_unlock(&rdev->pm.mutex);
186 DRM_INFO("radeon: dynamic power management disabled\n");
187 } else if (tmp == 1) {
188 if (rdev->pm.num_power_states > 1) {
190 mutex_lock(&rdev->pm.mutex);
191 rdev->pm.state = PM_STATE_PAUSED;
192 rdev->pm.planned_action = PM_ACTION_DEFAULT;
193 radeon_get_power_state(rdev, rdev->pm.planned_action);
194 mutex_unlock(&rdev->pm.mutex);
195 /* update power mode info */
196 radeon_pm_compute_clocks(rdev);
197 DRM_INFO("radeon: dynamic power management enabled\n");
199 DRM_ERROR("dynpm not valid on this system\n");
201 DRM_ERROR("Invalid setting: %d\n", tmp);
206 static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, radeon_get_power_state_static, radeon_set_power_state_static);
207 static DEVICE_ATTR(dynpm, S_IRUGO | S_IWUSR, radeon_get_dynpm, radeon_set_dynpm);
210 static const char *pm_state_names[4] = {
217 static const char *pm_state_types[5] = {
225 static void radeon_print_power_mode_info(struct radeon_device *rdev)
230 DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
231 for (i = 0; i < rdev->pm.num_power_states; i++) {
232 if (rdev->pm.default_power_state_index == i)
236 DRM_INFO("State %d %s %s\n", i,
237 pm_state_types[rdev->pm.power_state[i].type],
238 is_default ? "(default)" : "");
239 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
240 DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].pcie_lanes);
241 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
242 DRM_INFO("\tSingle display only\n");
243 DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
244 for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
245 if (rdev->flags & RADEON_IS_IGP)
246 DRM_INFO("\t\t%d engine: %d\n",
248 rdev->pm.power_state[i].clock_info[j].sclk * 10);
250 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
252 rdev->pm.power_state[i].clock_info[j].sclk * 10,
253 rdev->pm.power_state[i].clock_info[j].mclk * 10);
258 void radeon_sync_with_vblank(struct radeon_device *rdev)
260 if (rdev->pm.active_crtcs) {
261 rdev->pm.vblank_sync = false;
263 rdev->irq.vblank_queue, rdev->pm.vblank_sync,
264 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
268 int radeon_pm_init(struct radeon_device *rdev)
270 rdev->pm.state = PM_STATE_DISABLED;
271 rdev->pm.planned_action = PM_ACTION_NONE;
272 rdev->pm.can_upclock = true;
273 rdev->pm.can_downclock = true;
276 if (rdev->is_atom_bios)
277 radeon_atombios_get_power_modes(rdev);
279 radeon_combios_get_power_modes(rdev);
280 radeon_print_power_mode_info(rdev);
283 if (radeon_debugfs_pm_init(rdev)) {
284 DRM_ERROR("Failed to register debugfs file for PM!\n");
287 /* where's the best place to put this? */
288 device_create_file(rdev->dev, &dev_attr_power_state);
289 device_create_file(rdev->dev, &dev_attr_dynpm);
291 INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
293 if ((radeon_dynpm != -1 && radeon_dynpm) && (rdev->pm.num_power_states > 1)) {
294 rdev->pm.state = PM_STATE_PAUSED;
295 DRM_INFO("radeon: dynamic power management enabled\n");
298 DRM_INFO("radeon: power management initialized\n");
303 void radeon_pm_fini(struct radeon_device *rdev)
305 if (rdev->pm.state != PM_STATE_DISABLED) {
307 cancel_delayed_work_sync(&rdev->pm.idle_work);
308 /* reset default clocks */
309 rdev->pm.state = PM_STATE_DISABLED;
310 rdev->pm.planned_action = PM_ACTION_DEFAULT;
311 radeon_pm_set_clocks(rdev, false);
312 } else if ((rdev->pm.current_power_state_index !=
313 rdev->pm.default_power_state_index) ||
314 (rdev->pm.current_clock_mode_index != 0)) {
315 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
316 rdev->pm.requested_clock_mode_index = 0;
317 mutex_lock(&rdev->pm.mutex);
318 radeon_pm_set_clocks(rdev, true);
319 mutex_unlock(&rdev->pm.mutex);
322 device_remove_file(rdev->dev, &dev_attr_power_state);
323 device_remove_file(rdev->dev, &dev_attr_dynpm);
325 if (rdev->pm.i2c_bus)
326 radeon_i2c_destroy(rdev->pm.i2c_bus);
329 void radeon_pm_compute_clocks(struct radeon_device *rdev)
331 struct drm_device *ddev = rdev->ddev;
332 struct drm_crtc *crtc;
333 struct radeon_crtc *radeon_crtc;
335 if (rdev->pm.state == PM_STATE_DISABLED)
338 mutex_lock(&rdev->pm.mutex);
340 rdev->pm.active_crtcs = 0;
341 rdev->pm.active_crtc_count = 0;
342 list_for_each_entry(crtc,
343 &ddev->mode_config.crtc_list, head) {
344 radeon_crtc = to_radeon_crtc(crtc);
345 if (radeon_crtc->enabled) {
346 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
347 rdev->pm.active_crtc_count++;
351 if (rdev->pm.active_crtc_count > 1) {
352 if (rdev->pm.state == PM_STATE_ACTIVE) {
353 cancel_delayed_work(&rdev->pm.idle_work);
355 rdev->pm.state = PM_STATE_PAUSED;
356 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
357 radeon_pm_set_clocks(rdev, false);
359 DRM_DEBUG("radeon: dynamic power management deactivated\n");
361 } else if (rdev->pm.active_crtc_count == 1) {
362 /* TODO: Increase clocks if needed for current mode */
364 if (rdev->pm.state == PM_STATE_MINIMUM) {
365 rdev->pm.state = PM_STATE_ACTIVE;
366 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
367 radeon_pm_set_clocks(rdev, false);
369 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
370 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
371 } else if (rdev->pm.state == PM_STATE_PAUSED) {
372 rdev->pm.state = PM_STATE_ACTIVE;
373 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
374 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
375 DRM_DEBUG("radeon: dynamic power management activated\n");
377 } else { /* count == 0 */
378 if (rdev->pm.state != PM_STATE_MINIMUM) {
379 cancel_delayed_work(&rdev->pm.idle_work);
381 rdev->pm.state = PM_STATE_MINIMUM;
382 rdev->pm.planned_action = PM_ACTION_MINIMUM;
383 radeon_pm_set_clocks(rdev, false);
387 mutex_unlock(&rdev->pm.mutex);
390 bool radeon_pm_in_vbl(struct radeon_device *rdev)
392 u32 stat_crtc = 0, vbl = 0, position = 0;
395 if (ASIC_IS_DCE4(rdev)) {
396 if (rdev->pm.active_crtcs & (1 << 0)) {
397 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
398 EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
399 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
400 EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
402 if (rdev->pm.active_crtcs & (1 << 1)) {
403 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
404 EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
405 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
406 EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
408 if (rdev->pm.active_crtcs & (1 << 2)) {
409 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
410 EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
411 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
412 EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
414 if (rdev->pm.active_crtcs & (1 << 3)) {
415 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
416 EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
417 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
418 EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
420 if (rdev->pm.active_crtcs & (1 << 4)) {
421 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
422 EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
423 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
424 EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
426 if (rdev->pm.active_crtcs & (1 << 5)) {
427 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
428 EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
429 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
430 EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
432 } else if (ASIC_IS_AVIVO(rdev)) {
433 if (rdev->pm.active_crtcs & (1 << 0)) {
434 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
435 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
437 if (rdev->pm.active_crtcs & (1 << 1)) {
438 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
439 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
441 if (position < vbl && position > 1)
444 if (rdev->pm.active_crtcs & (1 << 0)) {
445 stat_crtc = RREG32(RADEON_CRTC_STATUS);
446 if (!(stat_crtc & 1))
449 if (rdev->pm.active_crtcs & (1 << 1)) {
450 stat_crtc = RREG32(RADEON_CRTC2_STATUS);
451 if (!(stat_crtc & 1))
456 if (position < vbl && position > 1)
462 bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
465 bool in_vbl = radeon_pm_in_vbl(rdev);
468 DRM_INFO("not in vbl for pm change %08x at %s\n", stat_crtc,
469 finish ? "exit" : "entry");
473 static void radeon_pm_idle_work_handler(struct work_struct *work)
475 struct radeon_device *rdev;
477 rdev = container_of(work, struct radeon_device,
480 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
481 mutex_lock(&rdev->pm.mutex);
482 if (rdev->pm.state == PM_STATE_ACTIVE) {
483 unsigned long irq_flags;
484 int not_processed = 0;
486 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
487 if (!list_empty(&rdev->fence_drv.emited)) {
488 struct list_head *ptr;
489 list_for_each(ptr, &rdev->fence_drv.emited) {
490 /* count up to 3, that's enought info */
491 if (++not_processed >= 3)
495 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
497 if (not_processed >= 3) { /* should upclock */
498 if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
499 rdev->pm.planned_action = PM_ACTION_NONE;
500 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
501 rdev->pm.can_upclock) {
502 rdev->pm.planned_action =
504 rdev->pm.action_timeout = jiffies +
505 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
507 } else if (not_processed == 0) { /* should downclock */
508 if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
509 rdev->pm.planned_action = PM_ACTION_NONE;
510 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
511 rdev->pm.can_downclock) {
512 rdev->pm.planned_action =
514 rdev->pm.action_timeout = jiffies +
515 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
519 if (rdev->pm.planned_action != PM_ACTION_NONE &&
520 jiffies > rdev->pm.action_timeout) {
521 radeon_pm_set_clocks(rdev, false);
524 mutex_unlock(&rdev->pm.mutex);
525 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
527 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
528 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
534 #if defined(CONFIG_DEBUG_FS)
536 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
538 struct drm_info_node *node = (struct drm_info_node *) m->private;
539 struct drm_device *dev = node->minor->dev;
540 struct radeon_device *rdev = dev->dev_private;
542 seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
543 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
544 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
545 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
546 if (rdev->asic->get_memory_clock)
547 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
548 if (rdev->asic->get_pcie_lanes)
549 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
554 static struct drm_info_list radeon_pm_info_list[] = {
555 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
559 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
561 #if defined(CONFIG_DEBUG_FS)
562 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));