1 /* 2 * Permission is hereby granted, free of charge, to any person obtaining a 3 * copy of this software and associated documentation files (the "Software"), 4 * to deal in the Software without restriction, including without limitation 5 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 6 * and/or sell copies of the Software, and to permit persons to whom the 7 * Software is furnished to do so, subject to the following conditions: 8 * 9 * The above copyright notice and this permission notice shall be included in 10 * all copies or substantial portions of the Software. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 18 * OTHER DEALINGS IN THE SOFTWARE. 19 * 20 * Authors: Rafał Miłecki <zajec5@gmail.com> 21 * Alex Deucher <alexdeucher@gmail.com> 22 */ 23 #include "drmP.h" 24 #include "radeon.h" 25 #include "avivod.h" 26 #ifdef CONFIG_ACPI 27 #include <linux/acpi.h> 28 #endif 29 #include <linux/power_supply.h> 30 #include <linux/hwmon.h> 31 #include <linux/hwmon-sysfs.h> 32 33 #define RADEON_IDLE_LOOP_MS 100 34 #define RADEON_RECLOCK_DELAY_MS 200 35 #define RADEON_WAIT_VBLANK_TIMEOUT 200 36 #define RADEON_WAIT_IDLE_TIMEOUT 200 37 38 static const char *radeon_pm_state_type_name[5] = { 39 "Default", 40 "Powersave", 41 "Battery", 42 "Balanced", 43 "Performance", 44 }; 45 46 static void radeon_dynpm_idle_work_handler(struct work_struct *work); 47 static int radeon_debugfs_pm_init(struct radeon_device *rdev); 48 static bool radeon_pm_in_vbl(struct radeon_device *rdev); 49 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); 50 static void radeon_pm_update_profile(struct radeon_device *rdev); 51 static void radeon_pm_set_clocks(struct radeon_device *rdev); 52 53 #define ACPI_AC_CLASS "ac_adapter" 54 55 #ifdef CONFIG_ACPI 56 static int radeon_acpi_event(struct notifier_block *nb, 57 unsigned long val, 58 void *data) 59 { 60 struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); 61 struct acpi_bus_event *entry = (struct acpi_bus_event *)data; 62 63 if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { 64 if (power_supply_is_system_supplied() > 0) 65 DRM_DEBUG_DRIVER("pm: AC\n"); 66 else 67 DRM_DEBUG_DRIVER("pm: DC\n"); 68 69 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 70 if (rdev->pm.profile == PM_PROFILE_AUTO) { 71 mutex_lock(&rdev->pm.mutex); 72 radeon_pm_update_profile(rdev); 73 radeon_pm_set_clocks(rdev); 74 mutex_unlock(&rdev->pm.mutex); 75 } 76 } 77 } 78 79 return NOTIFY_OK; 80 } 81 #endif 82 83 static void radeon_pm_update_profile(struct radeon_device *rdev) 84 { 85 switch (rdev->pm.profile) { 86 case PM_PROFILE_DEFAULT: 87 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; 88 break; 89 case PM_PROFILE_AUTO: 90 if (power_supply_is_system_supplied() > 0) { 91 if (rdev->pm.active_crtc_count > 1) 92 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 93 else 94 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 95 } else { 96 if (rdev->pm.active_crtc_count > 1) 97 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 98 else 99 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 100 } 101 break; 102 case PM_PROFILE_LOW: 103 if (rdev->pm.active_crtc_count > 1) 104 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; 105 else 106 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; 107 break; 108 case PM_PROFILE_MID: 109 if (rdev->pm.active_crtc_count > 1) 110 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 111 else 112 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 113 break; 114 case PM_PROFILE_HIGH: 115 if (rdev->pm.active_crtc_count > 1) 116 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 117 else 118 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 119 break; 120 } 121 122 if (rdev->pm.active_crtc_count == 0) { 123 rdev->pm.requested_power_state_index = 124 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; 125 rdev->pm.requested_clock_mode_index = 126 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; 127 } else { 128 rdev->pm.requested_power_state_index = 129 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; 130 rdev->pm.requested_clock_mode_index = 131 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; 132 } 133 } 134 135 static void radeon_unmap_vram_bos(struct radeon_device *rdev) 136 { 137 struct radeon_bo *bo, *n; 138 139 if (list_empty(&rdev->gem.objects)) 140 return; 141 142 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 143 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 144 ttm_bo_unmap_virtual(&bo->tbo); 145 } 146 } 147 148 static void radeon_sync_with_vblank(struct radeon_device *rdev) 149 { 150 if (rdev->pm.active_crtcs) { 151 rdev->pm.vblank_sync = false; 152 wait_event_timeout( 153 rdev->irq.vblank_queue, rdev->pm.vblank_sync, 154 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); 155 } 156 } 157 158 static void radeon_set_power_state(struct radeon_device *rdev) 159 { 160 u32 sclk, mclk; 161 bool misc_after = false; 162 163 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 164 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 165 return; 166 167 if (radeon_gui_idle(rdev)) { 168 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 169 clock_info[rdev->pm.requested_clock_mode_index].sclk; 170 if (sclk > rdev->clock.default_sclk) 171 sclk = rdev->clock.default_sclk; 172 173 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 174 clock_info[rdev->pm.requested_clock_mode_index].mclk; 175 if (mclk > rdev->clock.default_mclk) 176 mclk = rdev->clock.default_mclk; 177 178 /* upvolt before raising clocks, downvolt after lowering clocks */ 179 if (sclk < rdev->pm.current_sclk) 180 misc_after = true; 181 182 radeon_sync_with_vblank(rdev); 183 184 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 185 if (!radeon_pm_in_vbl(rdev)) 186 return; 187 } 188 189 radeon_pm_prepare(rdev); 190 191 if (!misc_after) 192 /* voltage, pcie lanes, etc.*/ 193 radeon_pm_misc(rdev); 194 195 /* set engine clock */ 196 if (sclk != rdev->pm.current_sclk) { 197 radeon_pm_debug_check_in_vbl(rdev, false); 198 radeon_set_engine_clock(rdev, sclk); 199 radeon_pm_debug_check_in_vbl(rdev, true); 200 rdev->pm.current_sclk = sclk; 201 DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk); 202 } 203 204 /* set memory clock */ 205 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { 206 radeon_pm_debug_check_in_vbl(rdev, false); 207 radeon_set_memory_clock(rdev, mclk); 208 radeon_pm_debug_check_in_vbl(rdev, true); 209 rdev->pm.current_mclk = mclk; 210 DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk); 211 } 212 213 if (misc_after) 214 /* voltage, pcie lanes, etc.*/ 215 radeon_pm_misc(rdev); 216 217 radeon_pm_finish(rdev); 218 219 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; 220 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; 221 } else 222 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n"); 223 } 224 225 static void radeon_pm_set_clocks(struct radeon_device *rdev) 226 { 227 int i; 228 229 mutex_lock(&rdev->ddev->struct_mutex); 230 mutex_lock(&rdev->vram_mutex); 231 mutex_lock(&rdev->cp.mutex); 232 233 /* gui idle int has issues on older chips it seems */ 234 if (rdev->family >= CHIP_R600) { 235 if (rdev->irq.installed) { 236 /* wait for GPU idle */ 237 rdev->pm.gui_idle = false; 238 rdev->irq.gui_idle = true; 239 radeon_irq_set(rdev); 240 wait_event_interruptible_timeout( 241 rdev->irq.idle_queue, rdev->pm.gui_idle, 242 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); 243 rdev->irq.gui_idle = false; 244 radeon_irq_set(rdev); 245 } 246 } else { 247 if (rdev->cp.ready) { 248 struct radeon_fence *fence; 249 radeon_ring_alloc(rdev, 64); 250 radeon_fence_create(rdev, &fence); 251 radeon_fence_emit(rdev, fence); 252 radeon_ring_commit(rdev); 253 radeon_fence_wait(fence, false); 254 radeon_fence_unref(&fence); 255 } 256 } 257 radeon_unmap_vram_bos(rdev); 258 259 if (rdev->irq.installed) { 260 for (i = 0; i < rdev->num_crtc; i++) { 261 if (rdev->pm.active_crtcs & (1 << i)) { 262 rdev->pm.req_vblank |= (1 << i); 263 drm_vblank_get(rdev->ddev, i); 264 } 265 } 266 } 267 268 radeon_set_power_state(rdev); 269 270 if (rdev->irq.installed) { 271 for (i = 0; i < rdev->num_crtc; i++) { 272 if (rdev->pm.req_vblank & (1 << i)) { 273 rdev->pm.req_vblank &= ~(1 << i); 274 drm_vblank_put(rdev->ddev, i); 275 } 276 } 277 } 278 279 /* update display watermarks based on new power state */ 280 radeon_update_bandwidth_info(rdev); 281 if (rdev->pm.active_crtc_count) 282 radeon_bandwidth_update(rdev); 283 284 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 285 286 mutex_unlock(&rdev->cp.mutex); 287 mutex_unlock(&rdev->vram_mutex); 288 mutex_unlock(&rdev->ddev->struct_mutex); 289 } 290 291 static void radeon_pm_print_states(struct radeon_device *rdev) 292 { 293 int i, j; 294 struct radeon_power_state *power_state; 295 struct radeon_pm_clock_info *clock_info; 296 297 DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states); 298 for (i = 0; i < rdev->pm.num_power_states; i++) { 299 power_state = &rdev->pm.power_state[i]; 300 DRM_DEBUG_DRIVER("State %d: %s\n", i, 301 radeon_pm_state_type_name[power_state->type]); 302 if (i == rdev->pm.default_power_state_index) 303 DRM_DEBUG_DRIVER("\tDefault"); 304 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 305 DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes); 306 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 307 DRM_DEBUG_DRIVER("\tSingle display only\n"); 308 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes); 309 for (j = 0; j < power_state->num_clock_modes; j++) { 310 clock_info = &(power_state->clock_info[j]); 311 if (rdev->flags & RADEON_IS_IGP) 312 DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n", 313 j, 314 clock_info->sclk * 10, 315 clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); 316 else 317 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n", 318 j, 319 clock_info->sclk * 10, 320 clock_info->mclk * 10, 321 clock_info->voltage.voltage, 322 clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); 323 } 324 } 325 } 326 327 static ssize_t radeon_get_pm_profile(struct device *dev, 328 struct device_attribute *attr, 329 char *buf) 330 { 331 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 332 struct radeon_device *rdev = ddev->dev_private; 333 int cp = rdev->pm.profile; 334 335 return snprintf(buf, PAGE_SIZE, "%s\n", 336 (cp == PM_PROFILE_AUTO) ? "auto" : 337 (cp == PM_PROFILE_LOW) ? "low" : 338 (cp == PM_PROFILE_MID) ? "mid" : 339 (cp == PM_PROFILE_HIGH) ? "high" : "default"); 340 } 341 342 static ssize_t radeon_set_pm_profile(struct device *dev, 343 struct device_attribute *attr, 344 const char *buf, 345 size_t count) 346 { 347 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 348 struct radeon_device *rdev = ddev->dev_private; 349 350 mutex_lock(&rdev->pm.mutex); 351 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 352 if (strncmp("default", buf, strlen("default")) == 0) 353 rdev->pm.profile = PM_PROFILE_DEFAULT; 354 else if (strncmp("auto", buf, strlen("auto")) == 0) 355 rdev->pm.profile = PM_PROFILE_AUTO; 356 else if (strncmp("low", buf, strlen("low")) == 0) 357 rdev->pm.profile = PM_PROFILE_LOW; 358 else if (strncmp("mid", buf, strlen("mid")) == 0) 359 rdev->pm.profile = PM_PROFILE_MID; 360 else if (strncmp("high", buf, strlen("high")) == 0) 361 rdev->pm.profile = PM_PROFILE_HIGH; 362 else { 363 DRM_ERROR("invalid power profile!\n"); 364 goto fail; 365 } 366 radeon_pm_update_profile(rdev); 367 radeon_pm_set_clocks(rdev); 368 } 369 fail: 370 mutex_unlock(&rdev->pm.mutex); 371 372 return count; 373 } 374 375 static ssize_t radeon_get_pm_method(struct device *dev, 376 struct device_attribute *attr, 377 char *buf) 378 { 379 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 380 struct radeon_device *rdev = ddev->dev_private; 381 int pm = rdev->pm.pm_method; 382 383 return snprintf(buf, PAGE_SIZE, "%s\n", 384 (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); 385 } 386 387 static ssize_t radeon_set_pm_method(struct device *dev, 388 struct device_attribute *attr, 389 const char *buf, 390 size_t count) 391 { 392 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 393 struct radeon_device *rdev = ddev->dev_private; 394 395 396 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { 397 mutex_lock(&rdev->pm.mutex); 398 rdev->pm.pm_method = PM_METHOD_DYNPM; 399 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 400 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 401 mutex_unlock(&rdev->pm.mutex); 402 } else if (strncmp("profile", buf, strlen("profile")) == 0) { 403 bool flush_wq = false; 404 405 mutex_lock(&rdev->pm.mutex); 406 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 407 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 408 flush_wq = true; 409 } 410 /* disable dynpm */ 411 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 412 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 413 rdev->pm.pm_method = PM_METHOD_PROFILE; 414 mutex_unlock(&rdev->pm.mutex); 415 if (flush_wq) 416 flush_workqueue(rdev->wq); 417 } else { 418 DRM_ERROR("invalid power method!\n"); 419 goto fail; 420 } 421 radeon_pm_compute_clocks(rdev); 422 fail: 423 return count; 424 } 425 426 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 427 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 428 429 static ssize_t radeon_hwmon_show_temp(struct device *dev, 430 struct device_attribute *attr, 431 char *buf) 432 { 433 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 434 struct radeon_device *rdev = ddev->dev_private; 435 u32 temp; 436 437 switch (rdev->pm.int_thermal_type) { 438 case THERMAL_TYPE_RV6XX: 439 temp = rv6xx_get_temp(rdev); 440 break; 441 case THERMAL_TYPE_RV770: 442 temp = rv770_get_temp(rdev); 443 break; 444 case THERMAL_TYPE_EVERGREEN: 445 temp = evergreen_get_temp(rdev); 446 break; 447 default: 448 temp = 0; 449 break; 450 } 451 452 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 453 } 454 455 static ssize_t radeon_hwmon_show_name(struct device *dev, 456 struct device_attribute *attr, 457 char *buf) 458 { 459 return sprintf(buf, "radeon\n"); 460 } 461 462 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 463 static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); 464 465 static struct attribute *hwmon_attributes[] = { 466 &sensor_dev_attr_temp1_input.dev_attr.attr, 467 &sensor_dev_attr_name.dev_attr.attr, 468 NULL 469 }; 470 471 static const struct attribute_group hwmon_attrgroup = { 472 .attrs = hwmon_attributes, 473 }; 474 475 static int radeon_hwmon_init(struct radeon_device *rdev) 476 { 477 int err = 0; 478 479 rdev->pm.int_hwmon_dev = NULL; 480 481 switch (rdev->pm.int_thermal_type) { 482 case THERMAL_TYPE_RV6XX: 483 case THERMAL_TYPE_RV770: 484 case THERMAL_TYPE_EVERGREEN: 485 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 486 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 487 err = PTR_ERR(rdev->pm.int_hwmon_dev); 488 dev_err(rdev->dev, 489 "Unable to register hwmon device: %d\n", err); 490 break; 491 } 492 dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev); 493 err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj, 494 &hwmon_attrgroup); 495 if (err) { 496 dev_err(rdev->dev, 497 "Unable to create hwmon sysfs file: %d\n", err); 498 hwmon_device_unregister(rdev->dev); 499 } 500 break; 501 default: 502 break; 503 } 504 505 return err; 506 } 507 508 static void radeon_hwmon_fini(struct radeon_device *rdev) 509 { 510 if (rdev->pm.int_hwmon_dev) { 511 sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup); 512 hwmon_device_unregister(rdev->pm.int_hwmon_dev); 513 } 514 } 515 516 void radeon_pm_suspend(struct radeon_device *rdev) 517 { 518 bool flush_wq = false; 519 520 mutex_lock(&rdev->pm.mutex); 521 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 522 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 523 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) 524 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; 525 flush_wq = true; 526 } 527 mutex_unlock(&rdev->pm.mutex); 528 if (flush_wq) 529 flush_workqueue(rdev->wq); 530 } 531 532 void radeon_pm_resume(struct radeon_device *rdev) 533 { 534 /* asic init will reset the default power state */ 535 mutex_lock(&rdev->pm.mutex); 536 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 537 rdev->pm.current_clock_mode_index = 0; 538 rdev->pm.current_sclk = rdev->clock.default_sclk; 539 rdev->pm.current_mclk = rdev->clock.default_mclk; 540 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 541 if (rdev->pm.pm_method == PM_METHOD_DYNPM 542 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 543 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 544 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, 545 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 546 } 547 mutex_unlock(&rdev->pm.mutex); 548 radeon_pm_compute_clocks(rdev); 549 } 550 551 int radeon_pm_init(struct radeon_device *rdev) 552 { 553 int ret; 554 555 /* default to profile method */ 556 rdev->pm.pm_method = PM_METHOD_PROFILE; 557 rdev->pm.profile = PM_PROFILE_DEFAULT; 558 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 559 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 560 rdev->pm.dynpm_can_upclock = true; 561 rdev->pm.dynpm_can_downclock = true; 562 rdev->pm.current_sclk = rdev->clock.default_sclk; 563 rdev->pm.current_mclk = rdev->clock.default_mclk; 564 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 565 566 if (rdev->bios) { 567 if (rdev->is_atom_bios) 568 radeon_atombios_get_power_modes(rdev); 569 else 570 radeon_combios_get_power_modes(rdev); 571 radeon_pm_print_states(rdev); 572 radeon_pm_init_profile(rdev); 573 } 574 575 /* set up the internal thermal sensor if applicable */ 576 ret = radeon_hwmon_init(rdev); 577 if (ret) 578 return ret; 579 if (rdev->pm.num_power_states > 1) { 580 /* where's the best place to put these? */ 581 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 582 if (ret) 583 DRM_ERROR("failed to create device file for power profile\n"); 584 ret = device_create_file(rdev->dev, &dev_attr_power_method); 585 if (ret) 586 DRM_ERROR("failed to create device file for power method\n"); 587 588 #ifdef CONFIG_ACPI 589 rdev->acpi_nb.notifier_call = radeon_acpi_event; 590 register_acpi_notifier(&rdev->acpi_nb); 591 #endif 592 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); 593 594 if (radeon_debugfs_pm_init(rdev)) { 595 DRM_ERROR("Failed to register debugfs file for PM!\n"); 596 } 597 598 DRM_INFO("radeon: power management initialized\n"); 599 } 600 601 return 0; 602 } 603 604 void radeon_pm_fini(struct radeon_device *rdev) 605 { 606 if (rdev->pm.num_power_states > 1) { 607 bool flush_wq = false; 608 609 mutex_lock(&rdev->pm.mutex); 610 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 611 rdev->pm.profile = PM_PROFILE_DEFAULT; 612 radeon_pm_update_profile(rdev); 613 radeon_pm_set_clocks(rdev); 614 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 615 /* cancel work */ 616 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 617 flush_wq = true; 618 /* reset default clocks */ 619 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 620 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 621 radeon_pm_set_clocks(rdev); 622 } 623 mutex_unlock(&rdev->pm.mutex); 624 if (flush_wq) 625 flush_workqueue(rdev->wq); 626 627 device_remove_file(rdev->dev, &dev_attr_power_profile); 628 device_remove_file(rdev->dev, &dev_attr_power_method); 629 #ifdef CONFIG_ACPI 630 unregister_acpi_notifier(&rdev->acpi_nb); 631 #endif 632 } 633 634 radeon_hwmon_fini(rdev); 635 if (rdev->pm.i2c_bus) 636 radeon_i2c_destroy(rdev->pm.i2c_bus); 637 } 638 639 void radeon_pm_compute_clocks(struct radeon_device *rdev) 640 { 641 struct drm_device *ddev = rdev->ddev; 642 struct drm_crtc *crtc; 643 struct radeon_crtc *radeon_crtc; 644 645 if (rdev->pm.num_power_states < 2) 646 return; 647 648 mutex_lock(&rdev->pm.mutex); 649 650 rdev->pm.active_crtcs = 0; 651 rdev->pm.active_crtc_count = 0; 652 list_for_each_entry(crtc, 653 &ddev->mode_config.crtc_list, head) { 654 radeon_crtc = to_radeon_crtc(crtc); 655 if (radeon_crtc->enabled) { 656 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 657 rdev->pm.active_crtc_count++; 658 } 659 } 660 661 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 662 radeon_pm_update_profile(rdev); 663 radeon_pm_set_clocks(rdev); 664 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 665 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { 666 if (rdev->pm.active_crtc_count > 1) { 667 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 668 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 669 670 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 671 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 672 radeon_pm_get_dynpm_state(rdev); 673 radeon_pm_set_clocks(rdev); 674 675 DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n"); 676 } 677 } else if (rdev->pm.active_crtc_count == 1) { 678 /* TODO: Increase clocks if needed for current mode */ 679 680 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { 681 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 682 rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; 683 radeon_pm_get_dynpm_state(rdev); 684 radeon_pm_set_clocks(rdev); 685 686 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, 687 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 688 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { 689 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 690 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, 691 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 692 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); 693 } 694 } else { /* count == 0 */ 695 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { 696 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 697 698 rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; 699 rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; 700 radeon_pm_get_dynpm_state(rdev); 701 radeon_pm_set_clocks(rdev); 702 } 703 } 704 } 705 } 706 707 mutex_unlock(&rdev->pm.mutex); 708 } 709 710 static bool radeon_pm_in_vbl(struct radeon_device *rdev) 711 { 712 u32 stat_crtc = 0, vbl = 0, position = 0; 713 bool in_vbl = true; 714 715 if (ASIC_IS_DCE4(rdev)) { 716 if (rdev->pm.active_crtcs & (1 << 0)) { 717 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 718 EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; 719 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 720 EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff; 721 } 722 if (rdev->pm.active_crtcs & (1 << 1)) { 723 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 724 EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; 725 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 726 EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff; 727 } 728 if (rdev->pm.active_crtcs & (1 << 2)) { 729 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 730 EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; 731 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 732 EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff; 733 } 734 if (rdev->pm.active_crtcs & (1 << 3)) { 735 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 736 EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; 737 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 738 EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff; 739 } 740 if (rdev->pm.active_crtcs & (1 << 4)) { 741 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 742 EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; 743 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 744 EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff; 745 } 746 if (rdev->pm.active_crtcs & (1 << 5)) { 747 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 748 EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; 749 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 750 EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff; 751 } 752 } else if (ASIC_IS_AVIVO(rdev)) { 753 if (rdev->pm.active_crtcs & (1 << 0)) { 754 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff; 755 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff; 756 } 757 if (rdev->pm.active_crtcs & (1 << 1)) { 758 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff; 759 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff; 760 } 761 if (position < vbl && position > 1) 762 in_vbl = false; 763 } else { 764 if (rdev->pm.active_crtcs & (1 << 0)) { 765 stat_crtc = RREG32(RADEON_CRTC_STATUS); 766 if (!(stat_crtc & 1)) 767 in_vbl = false; 768 } 769 if (rdev->pm.active_crtcs & (1 << 1)) { 770 stat_crtc = RREG32(RADEON_CRTC2_STATUS); 771 if (!(stat_crtc & 1)) 772 in_vbl = false; 773 } 774 } 775 776 if (position < vbl && position > 1) 777 in_vbl = false; 778 779 return in_vbl; 780 } 781 782 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 783 { 784 u32 stat_crtc = 0; 785 bool in_vbl = radeon_pm_in_vbl(rdev); 786 787 if (in_vbl == false) 788 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, 789 finish ? "exit" : "entry"); 790 return in_vbl; 791 } 792 793 static void radeon_dynpm_idle_work_handler(struct work_struct *work) 794 { 795 struct radeon_device *rdev; 796 int resched; 797 rdev = container_of(work, struct radeon_device, 798 pm.dynpm_idle_work.work); 799 800 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 801 mutex_lock(&rdev->pm.mutex); 802 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 803 unsigned long irq_flags; 804 int not_processed = 0; 805 806 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 807 if (!list_empty(&rdev->fence_drv.emited)) { 808 struct list_head *ptr; 809 list_for_each(ptr, &rdev->fence_drv.emited) { 810 /* count up to 3, that's enought info */ 811 if (++not_processed >= 3) 812 break; 813 } 814 } 815 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 816 817 if (not_processed >= 3) { /* should upclock */ 818 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { 819 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 820 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 821 rdev->pm.dynpm_can_upclock) { 822 rdev->pm.dynpm_planned_action = 823 DYNPM_ACTION_UPCLOCK; 824 rdev->pm.dynpm_action_timeout = jiffies + 825 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 826 } 827 } else if (not_processed == 0) { /* should downclock */ 828 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { 829 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 830 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 831 rdev->pm.dynpm_can_downclock) { 832 rdev->pm.dynpm_planned_action = 833 DYNPM_ACTION_DOWNCLOCK; 834 rdev->pm.dynpm_action_timeout = jiffies + 835 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 836 } 837 } 838 839 /* Note, radeon_pm_set_clocks is called with static_switch set 840 * to false since we want to wait for vbl to avoid flicker. 841 */ 842 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && 843 jiffies > rdev->pm.dynpm_action_timeout) { 844 radeon_pm_get_dynpm_state(rdev); 845 radeon_pm_set_clocks(rdev); 846 } 847 848 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, 849 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 850 } 851 mutex_unlock(&rdev->pm.mutex); 852 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 853 } 854 855 /* 856 * Debugfs info 857 */ 858 #if defined(CONFIG_DEBUG_FS) 859 860 static int radeon_debugfs_pm_info(struct seq_file *m, void *data) 861 { 862 struct drm_info_node *node = (struct drm_info_node *) m->private; 863 struct drm_device *dev = node->minor->dev; 864 struct radeon_device *rdev = dev->dev_private; 865 866 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); 867 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 868 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); 869 if (rdev->asic->get_memory_clock) 870 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 871 if (rdev->pm.current_vddc) 872 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); 873 if (rdev->asic->get_pcie_lanes) 874 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 875 876 return 0; 877 } 878 879 static struct drm_info_list radeon_pm_info_list[] = { 880 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL}, 881 }; 882 #endif 883 884 static int radeon_debugfs_pm_init(struct radeon_device *rdev) 885 { 886 #if defined(CONFIG_DEBUG_FS) 887 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 888 #else 889 return 0; 890 #endif 891 } 892