1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 #include <linux/seq_file.h> 26 27 #include "cikd.h" 28 #include "kv_dpm.h" 29 #include "r600_dpm.h" 30 #include "radeon.h" 31 #include "radeon_asic.h" 32 33 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 34 #define KV_MINIMUM_ENGINE_CLOCK 800 35 #define SMC_RAM_END 0x40000 36 37 static int kv_enable_nb_dpm(struct radeon_device *rdev, 38 bool enable); 39 static void kv_init_graphics_levels(struct radeon_device *rdev); 40 static int kv_calculate_ds_divider(struct radeon_device *rdev); 41 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev); 42 static int kv_calculate_dpm_settings(struct radeon_device *rdev); 43 static void kv_enable_new_levels(struct radeon_device *rdev); 44 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 45 struct radeon_ps *new_rps); 46 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level); 47 static int kv_set_enabled_levels(struct radeon_device *rdev); 48 static int kv_force_dpm_highest(struct radeon_device *rdev); 49 static int kv_force_dpm_lowest(struct radeon_device *rdev); 50 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 51 struct radeon_ps *new_rps, 52 struct radeon_ps *old_rps); 53 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 54 int min_temp, int max_temp); 55 static int kv_init_fps_limits(struct radeon_device *rdev); 56 57 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 58 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate); 59 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate); 60 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate); 61 62 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); 63 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); 64 extern void cik_update_cg(struct radeon_device *rdev, 65 u32 block, bool enable); 66 67 static const struct kv_pt_config_reg didt_config_kv[] = { 68 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 69 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 70 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 71 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 72 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 73 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 74 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 75 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 76 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 77 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 78 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 79 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 80 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 81 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 82 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 83 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 84 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 85 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 86 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 87 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 88 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 89 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 90 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 91 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 92 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 93 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 94 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 95 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 96 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 97 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 98 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 99 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 100 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 101 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 102 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 103 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 104 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 105 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 106 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 107 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 108 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 109 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 110 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 111 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 112 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 113 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 114 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 115 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 116 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 117 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 118 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 119 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 120 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 121 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 122 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 123 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 124 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 125 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 126 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 127 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 128 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 129 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 130 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 131 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 132 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 133 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 134 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 135 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 136 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 137 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 138 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 139 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 140 { 0xFFFFFFFF } 141 }; 142 143 static struct kv_ps *kv_get_ps(struct radeon_ps *rps) 144 { 145 struct kv_ps *ps = rps->ps_priv; 146 147 return ps; 148 } 149 150 static struct kv_power_info *kv_get_pi(struct radeon_device *rdev) 151 { 152 struct kv_power_info *pi = rdev->pm.dpm.priv; 153 154 return pi; 155 } 156 157 static int kv_program_pt_config_registers(struct radeon_device *rdev, 158 const struct kv_pt_config_reg *cac_config_regs) 159 { 160 const struct kv_pt_config_reg *config_regs = cac_config_regs; 161 u32 data; 162 u32 cache = 0; 163 164 if (config_regs == NULL) 165 return -EINVAL; 166 167 while (config_regs->offset != 0xFFFFFFFF) { 168 if (config_regs->type == KV_CONFIGREG_CACHE) { 169 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 170 } else { 171 switch (config_regs->type) { 172 case KV_CONFIGREG_SMC_IND: 173 data = RREG32_SMC(config_regs->offset); 174 break; 175 case KV_CONFIGREG_DIDT_IND: 176 data = RREG32_DIDT(config_regs->offset); 177 break; 178 default: 179 data = RREG32(config_regs->offset << 2); 180 break; 181 } 182 183 data &= ~config_regs->mask; 184 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 185 data |= cache; 186 cache = 0; 187 188 switch (config_regs->type) { 189 case KV_CONFIGREG_SMC_IND: 190 WREG32_SMC(config_regs->offset, data); 191 break; 192 case KV_CONFIGREG_DIDT_IND: 193 WREG32_DIDT(config_regs->offset, data); 194 break; 195 default: 196 WREG32(config_regs->offset << 2, data); 197 break; 198 } 199 } 200 config_regs++; 201 } 202 203 return 0; 204 } 205 206 static void kv_do_enable_didt(struct radeon_device *rdev, bool enable) 207 { 208 struct kv_power_info *pi = kv_get_pi(rdev); 209 u32 data; 210 211 if (pi->caps_sq_ramping) { 212 data = RREG32_DIDT(DIDT_SQ_CTRL0); 213 if (enable) 214 data |= DIDT_CTRL_EN; 215 else 216 data &= ~DIDT_CTRL_EN; 217 WREG32_DIDT(DIDT_SQ_CTRL0, data); 218 } 219 220 if (pi->caps_db_ramping) { 221 data = RREG32_DIDT(DIDT_DB_CTRL0); 222 if (enable) 223 data |= DIDT_CTRL_EN; 224 else 225 data &= ~DIDT_CTRL_EN; 226 WREG32_DIDT(DIDT_DB_CTRL0, data); 227 } 228 229 if (pi->caps_td_ramping) { 230 data = RREG32_DIDT(DIDT_TD_CTRL0); 231 if (enable) 232 data |= DIDT_CTRL_EN; 233 else 234 data &= ~DIDT_CTRL_EN; 235 WREG32_DIDT(DIDT_TD_CTRL0, data); 236 } 237 238 if (pi->caps_tcp_ramping) { 239 data = RREG32_DIDT(DIDT_TCP_CTRL0); 240 if (enable) 241 data |= DIDT_CTRL_EN; 242 else 243 data &= ~DIDT_CTRL_EN; 244 WREG32_DIDT(DIDT_TCP_CTRL0, data); 245 } 246 } 247 248 static int kv_enable_didt(struct radeon_device *rdev, bool enable) 249 { 250 struct kv_power_info *pi = kv_get_pi(rdev); 251 int ret; 252 253 if (pi->caps_sq_ramping || 254 pi->caps_db_ramping || 255 pi->caps_td_ramping || 256 pi->caps_tcp_ramping) { 257 cik_enter_rlc_safe_mode(rdev); 258 259 if (enable) { 260 ret = kv_program_pt_config_registers(rdev, didt_config_kv); 261 if (ret) { 262 cik_exit_rlc_safe_mode(rdev); 263 return ret; 264 } 265 } 266 267 kv_do_enable_didt(rdev, enable); 268 269 cik_exit_rlc_safe_mode(rdev); 270 } 271 272 return 0; 273 } 274 275 static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable) 276 { 277 struct kv_power_info *pi = kv_get_pi(rdev); 278 int ret = 0; 279 280 if (pi->caps_cac) { 281 if (enable) { 282 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac); 283 if (ret) 284 pi->cac_enabled = false; 285 else 286 pi->cac_enabled = true; 287 } else if (pi->cac_enabled) { 288 kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac); 289 pi->cac_enabled = false; 290 } 291 } 292 293 return ret; 294 } 295 296 static int kv_process_firmware_header(struct radeon_device *rdev) 297 { 298 struct kv_power_info *pi = kv_get_pi(rdev); 299 u32 tmp; 300 int ret; 301 302 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 303 offsetof(SMU7_Firmware_Header, DpmTable), 304 &tmp, pi->sram_end); 305 306 if (ret == 0) 307 pi->dpm_table_start = tmp; 308 309 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 310 offsetof(SMU7_Firmware_Header, SoftRegisters), 311 &tmp, pi->sram_end); 312 313 if (ret == 0) 314 pi->soft_regs_start = tmp; 315 316 return ret; 317 } 318 319 static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev) 320 { 321 struct kv_power_info *pi = kv_get_pi(rdev); 322 int ret; 323 324 pi->graphics_voltage_change_enable = 1; 325 326 ret = kv_copy_bytes_to_smc(rdev, 327 pi->dpm_table_start + 328 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 329 &pi->graphics_voltage_change_enable, 330 sizeof(u8), pi->sram_end); 331 332 return ret; 333 } 334 335 static int kv_set_dpm_interval(struct radeon_device *rdev) 336 { 337 struct kv_power_info *pi = kv_get_pi(rdev); 338 int ret; 339 340 pi->graphics_interval = 1; 341 342 ret = kv_copy_bytes_to_smc(rdev, 343 pi->dpm_table_start + 344 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 345 &pi->graphics_interval, 346 sizeof(u8), pi->sram_end); 347 348 return ret; 349 } 350 351 static int kv_set_dpm_boot_state(struct radeon_device *rdev) 352 { 353 struct kv_power_info *pi = kv_get_pi(rdev); 354 int ret; 355 356 ret = kv_copy_bytes_to_smc(rdev, 357 pi->dpm_table_start + 358 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 359 &pi->graphics_boot_level, 360 sizeof(u8), pi->sram_end); 361 362 return ret; 363 } 364 365 static void kv_program_vc(struct radeon_device *rdev) 366 { 367 WREG32_SMC(CG_FTV_0, 0x3FFFC100); 368 } 369 370 static void kv_clear_vc(struct radeon_device *rdev) 371 { 372 WREG32_SMC(CG_FTV_0, 0); 373 } 374 375 static int kv_set_divider_value(struct radeon_device *rdev, 376 u32 index, u32 sclk) 377 { 378 struct kv_power_info *pi = kv_get_pi(rdev); 379 struct atom_clock_dividers dividers; 380 int ret; 381 382 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 383 sclk, false, ÷rs); 384 if (ret) 385 return ret; 386 387 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 388 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 389 390 return 0; 391 } 392 393 static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, 394 struct sumo_vid_mapping_table *vid_mapping_table, 395 u32 vid_2bit) 396 { 397 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 398 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 399 u32 i; 400 401 if (vddc_sclk_table && vddc_sclk_table->count) { 402 if (vid_2bit < vddc_sclk_table->count) 403 return vddc_sclk_table->entries[vid_2bit].v; 404 else 405 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 406 } else { 407 for (i = 0; i < vid_mapping_table->num_entries; i++) { 408 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 409 return vid_mapping_table->entries[i].vid_7bit; 410 } 411 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 412 } 413 } 414 415 static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, 416 struct sumo_vid_mapping_table *vid_mapping_table, 417 u32 vid_7bit) 418 { 419 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 420 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 421 u32 i; 422 423 if (vddc_sclk_table && vddc_sclk_table->count) { 424 for (i = 0; i < vddc_sclk_table->count; i++) { 425 if (vddc_sclk_table->entries[i].v == vid_7bit) 426 return i; 427 } 428 return vddc_sclk_table->count - 1; 429 } else { 430 for (i = 0; i < vid_mapping_table->num_entries; i++) { 431 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 432 return vid_mapping_table->entries[i].vid_2bit; 433 } 434 435 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 436 } 437 } 438 439 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, 440 u16 voltage) 441 { 442 return 6200 - (voltage * 25); 443 } 444 445 static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, 446 u32 vid_2bit) 447 { 448 struct kv_power_info *pi = kv_get_pi(rdev); 449 u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, 450 &pi->sys_info.vid_mapping_table, 451 vid_2bit); 452 453 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); 454 } 455 456 457 static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid) 458 { 459 struct kv_power_info *pi = kv_get_pi(rdev); 460 461 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 462 pi->graphics_level[index].MinVddNb = 463 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid)); 464 465 return 0; 466 } 467 468 static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at) 469 { 470 struct kv_power_info *pi = kv_get_pi(rdev); 471 472 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 473 474 return 0; 475 } 476 477 static void kv_dpm_power_level_enable(struct radeon_device *rdev, 478 u32 index, bool enable) 479 { 480 struct kv_power_info *pi = kv_get_pi(rdev); 481 482 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 483 } 484 485 static void kv_start_dpm(struct radeon_device *rdev) 486 { 487 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 488 489 tmp |= GLOBAL_PWRMGT_EN; 490 WREG32_SMC(GENERAL_PWRMGT, tmp); 491 492 kv_smc_dpm_enable(rdev, true); 493 } 494 495 static void kv_stop_dpm(struct radeon_device *rdev) 496 { 497 kv_smc_dpm_enable(rdev, false); 498 } 499 500 static void kv_start_am(struct radeon_device *rdev) 501 { 502 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 503 504 sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 505 sclk_pwrmgt_cntl |= DYNAMIC_PM_EN; 506 507 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 508 } 509 510 static void kv_reset_am(struct radeon_device *rdev) 511 { 512 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 513 514 sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 515 516 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 517 } 518 519 static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze) 520 { 521 return kv_notify_message_to_smu(rdev, freeze ? 522 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 523 } 524 525 static int kv_force_lowest_valid(struct radeon_device *rdev) 526 { 527 return kv_force_dpm_lowest(rdev); 528 } 529 530 static int kv_unforce_levels(struct radeon_device *rdev) 531 { 532 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 533 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 534 else 535 return kv_set_enabled_levels(rdev); 536 } 537 538 static int kv_update_sclk_t(struct radeon_device *rdev) 539 { 540 struct kv_power_info *pi = kv_get_pi(rdev); 541 u32 low_sclk_interrupt_t = 0; 542 int ret = 0; 543 544 if (pi->caps_sclk_throttle_low_notification) { 545 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 546 547 ret = kv_copy_bytes_to_smc(rdev, 548 pi->dpm_table_start + 549 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 550 (u8 *)&low_sclk_interrupt_t, 551 sizeof(u32), pi->sram_end); 552 } 553 return ret; 554 } 555 556 static int kv_program_bootup_state(struct radeon_device *rdev) 557 { 558 struct kv_power_info *pi = kv_get_pi(rdev); 559 u32 i; 560 struct radeon_clock_voltage_dependency_table *table = 561 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 562 563 if (table && table->count) { 564 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 565 if (table->entries[i].clk == pi->boot_pl.sclk) 566 break; 567 } 568 569 pi->graphics_boot_level = (u8)i; 570 kv_dpm_power_level_enable(rdev, i, true); 571 } else { 572 struct sumo_sclk_voltage_mapping_table *table = 573 &pi->sys_info.sclk_voltage_mapping_table; 574 575 if (table->num_max_dpm_entries == 0) 576 return -EINVAL; 577 578 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 579 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 580 break; 581 } 582 583 pi->graphics_boot_level = (u8)i; 584 kv_dpm_power_level_enable(rdev, i, true); 585 } 586 return 0; 587 } 588 589 static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev) 590 { 591 struct kv_power_info *pi = kv_get_pi(rdev); 592 int ret; 593 594 pi->graphics_therm_throttle_enable = 1; 595 596 ret = kv_copy_bytes_to_smc(rdev, 597 pi->dpm_table_start + 598 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 599 &pi->graphics_therm_throttle_enable, 600 sizeof(u8), pi->sram_end); 601 602 return ret; 603 } 604 605 static int kv_upload_dpm_settings(struct radeon_device *rdev) 606 { 607 struct kv_power_info *pi = kv_get_pi(rdev); 608 int ret; 609 610 ret = kv_copy_bytes_to_smc(rdev, 611 pi->dpm_table_start + 612 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 613 (u8 *)&pi->graphics_level, 614 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 615 pi->sram_end); 616 617 if (ret) 618 return ret; 619 620 ret = kv_copy_bytes_to_smc(rdev, 621 pi->dpm_table_start + 622 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 623 &pi->graphics_dpm_level_count, 624 sizeof(u8), pi->sram_end); 625 626 return ret; 627 } 628 629 static u32 kv_get_clock_difference(u32 a, u32 b) 630 { 631 return (a >= b) ? a - b : b - a; 632 } 633 634 static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk) 635 { 636 struct kv_power_info *pi = kv_get_pi(rdev); 637 u32 value; 638 639 if (pi->caps_enable_dfs_bypass) { 640 if (kv_get_clock_difference(clk, 40000) < 200) 641 value = 3; 642 else if (kv_get_clock_difference(clk, 30000) < 200) 643 value = 2; 644 else if (kv_get_clock_difference(clk, 20000) < 200) 645 value = 7; 646 else if (kv_get_clock_difference(clk, 15000) < 200) 647 value = 6; 648 else if (kv_get_clock_difference(clk, 10000) < 200) 649 value = 8; 650 else 651 value = 0; 652 } else { 653 value = 0; 654 } 655 656 return value; 657 } 658 659 static int kv_populate_uvd_table(struct radeon_device *rdev) 660 { 661 struct kv_power_info *pi = kv_get_pi(rdev); 662 struct radeon_uvd_clock_voltage_dependency_table *table = 663 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 664 struct atom_clock_dividers dividers; 665 int ret; 666 u32 i; 667 668 if (table == NULL || table->count == 0) 669 return 0; 670 671 pi->uvd_level_count = 0; 672 for (i = 0; i < table->count; i++) { 673 if (pi->high_voltage_t && 674 (pi->high_voltage_t < table->entries[i].v)) 675 break; 676 677 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 678 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 679 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 680 681 pi->uvd_level[i].VClkBypassCntl = 682 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk); 683 pi->uvd_level[i].DClkBypassCntl = 684 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk); 685 686 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 687 table->entries[i].vclk, false, ÷rs); 688 if (ret) 689 return ret; 690 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 691 692 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 693 table->entries[i].dclk, false, ÷rs); 694 if (ret) 695 return ret; 696 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 697 698 pi->uvd_level_count++; 699 } 700 701 ret = kv_copy_bytes_to_smc(rdev, 702 pi->dpm_table_start + 703 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 704 (u8 *)&pi->uvd_level_count, 705 sizeof(u8), pi->sram_end); 706 if (ret) 707 return ret; 708 709 pi->uvd_interval = 1; 710 711 ret = kv_copy_bytes_to_smc(rdev, 712 pi->dpm_table_start + 713 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 714 &pi->uvd_interval, 715 sizeof(u8), pi->sram_end); 716 if (ret) 717 return ret; 718 719 ret = kv_copy_bytes_to_smc(rdev, 720 pi->dpm_table_start + 721 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 722 (u8 *)&pi->uvd_level, 723 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 724 pi->sram_end); 725 726 return ret; 727 728 } 729 730 static int kv_populate_vce_table(struct radeon_device *rdev) 731 { 732 struct kv_power_info *pi = kv_get_pi(rdev); 733 int ret; 734 u32 i; 735 struct radeon_vce_clock_voltage_dependency_table *table = 736 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 737 struct atom_clock_dividers dividers; 738 739 if (table == NULL || table->count == 0) 740 return 0; 741 742 pi->vce_level_count = 0; 743 for (i = 0; i < table->count; i++) { 744 if (pi->high_voltage_t && 745 pi->high_voltage_t < table->entries[i].v) 746 break; 747 748 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 749 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 750 751 pi->vce_level[i].ClkBypassCntl = 752 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk); 753 754 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 755 table->entries[i].evclk, false, ÷rs); 756 if (ret) 757 return ret; 758 pi->vce_level[i].Divider = (u8)dividers.post_div; 759 760 pi->vce_level_count++; 761 } 762 763 ret = kv_copy_bytes_to_smc(rdev, 764 pi->dpm_table_start + 765 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 766 (u8 *)&pi->vce_level_count, 767 sizeof(u8), 768 pi->sram_end); 769 if (ret) 770 return ret; 771 772 pi->vce_interval = 1; 773 774 ret = kv_copy_bytes_to_smc(rdev, 775 pi->dpm_table_start + 776 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 777 (u8 *)&pi->vce_interval, 778 sizeof(u8), 779 pi->sram_end); 780 if (ret) 781 return ret; 782 783 ret = kv_copy_bytes_to_smc(rdev, 784 pi->dpm_table_start + 785 offsetof(SMU7_Fusion_DpmTable, VceLevel), 786 (u8 *)&pi->vce_level, 787 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 788 pi->sram_end); 789 790 return ret; 791 } 792 793 static int kv_populate_samu_table(struct radeon_device *rdev) 794 { 795 struct kv_power_info *pi = kv_get_pi(rdev); 796 struct radeon_clock_voltage_dependency_table *table = 797 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 798 struct atom_clock_dividers dividers; 799 int ret; 800 u32 i; 801 802 if (table == NULL || table->count == 0) 803 return 0; 804 805 pi->samu_level_count = 0; 806 for (i = 0; i < table->count; i++) { 807 if (pi->high_voltage_t && 808 pi->high_voltage_t < table->entries[i].v) 809 break; 810 811 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 812 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 813 814 pi->samu_level[i].ClkBypassCntl = 815 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk); 816 817 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 818 table->entries[i].clk, false, ÷rs); 819 if (ret) 820 return ret; 821 pi->samu_level[i].Divider = (u8)dividers.post_div; 822 823 pi->samu_level_count++; 824 } 825 826 ret = kv_copy_bytes_to_smc(rdev, 827 pi->dpm_table_start + 828 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 829 (u8 *)&pi->samu_level_count, 830 sizeof(u8), 831 pi->sram_end); 832 if (ret) 833 return ret; 834 835 pi->samu_interval = 1; 836 837 ret = kv_copy_bytes_to_smc(rdev, 838 pi->dpm_table_start + 839 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 840 (u8 *)&pi->samu_interval, 841 sizeof(u8), 842 pi->sram_end); 843 if (ret) 844 return ret; 845 846 ret = kv_copy_bytes_to_smc(rdev, 847 pi->dpm_table_start + 848 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 849 (u8 *)&pi->samu_level, 850 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 851 pi->sram_end); 852 if (ret) 853 return ret; 854 855 return ret; 856 } 857 858 859 static int kv_populate_acp_table(struct radeon_device *rdev) 860 { 861 struct kv_power_info *pi = kv_get_pi(rdev); 862 struct radeon_clock_voltage_dependency_table *table = 863 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 864 struct atom_clock_dividers dividers; 865 int ret; 866 u32 i; 867 868 if (table == NULL || table->count == 0) 869 return 0; 870 871 pi->acp_level_count = 0; 872 for (i = 0; i < table->count; i++) { 873 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 874 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 875 876 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 877 table->entries[i].clk, false, ÷rs); 878 if (ret) 879 return ret; 880 pi->acp_level[i].Divider = (u8)dividers.post_div; 881 882 pi->acp_level_count++; 883 } 884 885 ret = kv_copy_bytes_to_smc(rdev, 886 pi->dpm_table_start + 887 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 888 (u8 *)&pi->acp_level_count, 889 sizeof(u8), 890 pi->sram_end); 891 if (ret) 892 return ret; 893 894 pi->acp_interval = 1; 895 896 ret = kv_copy_bytes_to_smc(rdev, 897 pi->dpm_table_start + 898 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 899 (u8 *)&pi->acp_interval, 900 sizeof(u8), 901 pi->sram_end); 902 if (ret) 903 return ret; 904 905 ret = kv_copy_bytes_to_smc(rdev, 906 pi->dpm_table_start + 907 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 908 (u8 *)&pi->acp_level, 909 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 910 pi->sram_end); 911 if (ret) 912 return ret; 913 914 return ret; 915 } 916 917 static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev) 918 { 919 struct kv_power_info *pi = kv_get_pi(rdev); 920 u32 i; 921 struct radeon_clock_voltage_dependency_table *table = 922 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 923 924 if (table && table->count) { 925 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 926 if (pi->caps_enable_dfs_bypass) { 927 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 928 pi->graphics_level[i].ClkBypassCntl = 3; 929 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 930 pi->graphics_level[i].ClkBypassCntl = 2; 931 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 932 pi->graphics_level[i].ClkBypassCntl = 7; 933 else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200) 934 pi->graphics_level[i].ClkBypassCntl = 6; 935 else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200) 936 pi->graphics_level[i].ClkBypassCntl = 8; 937 else 938 pi->graphics_level[i].ClkBypassCntl = 0; 939 } else { 940 pi->graphics_level[i].ClkBypassCntl = 0; 941 } 942 } 943 } else { 944 struct sumo_sclk_voltage_mapping_table *table = 945 &pi->sys_info.sclk_voltage_mapping_table; 946 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 947 if (pi->caps_enable_dfs_bypass) { 948 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 949 pi->graphics_level[i].ClkBypassCntl = 3; 950 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 951 pi->graphics_level[i].ClkBypassCntl = 2; 952 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 953 pi->graphics_level[i].ClkBypassCntl = 7; 954 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 955 pi->graphics_level[i].ClkBypassCntl = 6; 956 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 957 pi->graphics_level[i].ClkBypassCntl = 8; 958 else 959 pi->graphics_level[i].ClkBypassCntl = 0; 960 } else { 961 pi->graphics_level[i].ClkBypassCntl = 0; 962 } 963 } 964 } 965 } 966 967 static int kv_enable_ulv(struct radeon_device *rdev, bool enable) 968 { 969 return kv_notify_message_to_smu(rdev, enable ? 970 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 971 } 972 973 static void kv_reset_acp_boot_level(struct radeon_device *rdev) 974 { 975 struct kv_power_info *pi = kv_get_pi(rdev); 976 977 pi->acp_boot_level = 0xff; 978 } 979 980 static void kv_update_current_ps(struct radeon_device *rdev, 981 struct radeon_ps *rps) 982 { 983 struct kv_ps *new_ps = kv_get_ps(rps); 984 struct kv_power_info *pi = kv_get_pi(rdev); 985 986 pi->current_rps = *rps; 987 pi->current_ps = *new_ps; 988 pi->current_rps.ps_priv = &pi->current_ps; 989 } 990 991 static void kv_update_requested_ps(struct radeon_device *rdev, 992 struct radeon_ps *rps) 993 { 994 struct kv_ps *new_ps = kv_get_ps(rps); 995 struct kv_power_info *pi = kv_get_pi(rdev); 996 997 pi->requested_rps = *rps; 998 pi->requested_ps = *new_ps; 999 pi->requested_rps.ps_priv = &pi->requested_ps; 1000 } 1001 1002 void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) 1003 { 1004 struct kv_power_info *pi = kv_get_pi(rdev); 1005 int ret; 1006 1007 if (pi->bapm_enable) { 1008 ret = kv_smc_bapm_enable(rdev, enable); 1009 if (ret) 1010 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1011 } 1012 } 1013 1014 static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable) 1015 { 1016 u32 thermal_int; 1017 1018 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL); 1019 if (enable) 1020 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; 1021 else 1022 thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK); 1023 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); 1024 1025 } 1026 1027 int kv_dpm_enable(struct radeon_device *rdev) 1028 { 1029 struct kv_power_info *pi = kv_get_pi(rdev); 1030 int ret; 1031 1032 ret = kv_process_firmware_header(rdev); 1033 if (ret) { 1034 DRM_ERROR("kv_process_firmware_header failed\n"); 1035 return ret; 1036 } 1037 kv_init_fps_limits(rdev); 1038 kv_init_graphics_levels(rdev); 1039 ret = kv_program_bootup_state(rdev); 1040 if (ret) { 1041 DRM_ERROR("kv_program_bootup_state failed\n"); 1042 return ret; 1043 } 1044 kv_calculate_dfs_bypass_settings(rdev); 1045 ret = kv_upload_dpm_settings(rdev); 1046 if (ret) { 1047 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1048 return ret; 1049 } 1050 ret = kv_populate_uvd_table(rdev); 1051 if (ret) { 1052 DRM_ERROR("kv_populate_uvd_table failed\n"); 1053 return ret; 1054 } 1055 ret = kv_populate_vce_table(rdev); 1056 if (ret) { 1057 DRM_ERROR("kv_populate_vce_table failed\n"); 1058 return ret; 1059 } 1060 ret = kv_populate_samu_table(rdev); 1061 if (ret) { 1062 DRM_ERROR("kv_populate_samu_table failed\n"); 1063 return ret; 1064 } 1065 ret = kv_populate_acp_table(rdev); 1066 if (ret) { 1067 DRM_ERROR("kv_populate_acp_table failed\n"); 1068 return ret; 1069 } 1070 kv_program_vc(rdev); 1071 1072 kv_start_am(rdev); 1073 if (pi->enable_auto_thermal_throttling) { 1074 ret = kv_enable_auto_thermal_throttling(rdev); 1075 if (ret) { 1076 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1077 return ret; 1078 } 1079 } 1080 ret = kv_enable_dpm_voltage_scaling(rdev); 1081 if (ret) { 1082 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1083 return ret; 1084 } 1085 ret = kv_set_dpm_interval(rdev); 1086 if (ret) { 1087 DRM_ERROR("kv_set_dpm_interval failed\n"); 1088 return ret; 1089 } 1090 ret = kv_set_dpm_boot_state(rdev); 1091 if (ret) { 1092 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1093 return ret; 1094 } 1095 ret = kv_enable_ulv(rdev, true); 1096 if (ret) { 1097 DRM_ERROR("kv_enable_ulv failed\n"); 1098 return ret; 1099 } 1100 kv_start_dpm(rdev); 1101 ret = kv_enable_didt(rdev, true); 1102 if (ret) { 1103 DRM_ERROR("kv_enable_didt failed\n"); 1104 return ret; 1105 } 1106 ret = kv_enable_smc_cac(rdev, true); 1107 if (ret) { 1108 DRM_ERROR("kv_enable_smc_cac failed\n"); 1109 return ret; 1110 } 1111 1112 kv_reset_acp_boot_level(rdev); 1113 1114 ret = kv_smc_bapm_enable(rdev, false); 1115 if (ret) { 1116 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1117 return ret; 1118 } 1119 1120 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1121 1122 return ret; 1123 } 1124 1125 int kv_dpm_late_enable(struct radeon_device *rdev) 1126 { 1127 int ret = 0; 1128 1129 if (rdev->irq.installed && 1130 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1131 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1132 if (ret) { 1133 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1134 return ret; 1135 } 1136 kv_enable_thermal_int(rdev, true); 1137 } 1138 1139 /* powerdown unused blocks for now */ 1140 kv_dpm_powergate_acp(rdev, true); 1141 kv_dpm_powergate_samu(rdev, true); 1142 kv_dpm_powergate_vce(rdev, true); 1143 kv_dpm_powergate_uvd(rdev, true); 1144 1145 return ret; 1146 } 1147 1148 void kv_dpm_disable(struct radeon_device *rdev) 1149 { 1150 kv_smc_bapm_enable(rdev, false); 1151 1152 if (rdev->family == CHIP_MULLINS) 1153 kv_enable_nb_dpm(rdev, false); 1154 1155 /* powerup blocks */ 1156 kv_dpm_powergate_acp(rdev, false); 1157 kv_dpm_powergate_samu(rdev, false); 1158 kv_dpm_powergate_vce(rdev, false); 1159 kv_dpm_powergate_uvd(rdev, false); 1160 1161 kv_enable_smc_cac(rdev, false); 1162 kv_enable_didt(rdev, false); 1163 kv_clear_vc(rdev); 1164 kv_stop_dpm(rdev); 1165 kv_enable_ulv(rdev, false); 1166 kv_reset_am(rdev); 1167 kv_enable_thermal_int(rdev, false); 1168 1169 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1170 } 1171 1172 static void kv_init_sclk_t(struct radeon_device *rdev) 1173 { 1174 struct kv_power_info *pi = kv_get_pi(rdev); 1175 1176 pi->low_sclk_interrupt_t = 0; 1177 } 1178 1179 static int kv_init_fps_limits(struct radeon_device *rdev) 1180 { 1181 struct kv_power_info *pi = kv_get_pi(rdev); 1182 int ret = 0; 1183 1184 if (pi->caps_fps) { 1185 u16 tmp; 1186 1187 tmp = 45; 1188 pi->fps_high_t = cpu_to_be16(tmp); 1189 ret = kv_copy_bytes_to_smc(rdev, 1190 pi->dpm_table_start + 1191 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1192 (u8 *)&pi->fps_high_t, 1193 sizeof(u16), pi->sram_end); 1194 1195 tmp = 30; 1196 pi->fps_low_t = cpu_to_be16(tmp); 1197 1198 ret = kv_copy_bytes_to_smc(rdev, 1199 pi->dpm_table_start + 1200 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1201 (u8 *)&pi->fps_low_t, 1202 sizeof(u16), pi->sram_end); 1203 1204 } 1205 return ret; 1206 } 1207 1208 static void kv_init_powergate_state(struct radeon_device *rdev) 1209 { 1210 struct kv_power_info *pi = kv_get_pi(rdev); 1211 1212 pi->uvd_power_gated = false; 1213 pi->vce_power_gated = false; 1214 pi->samu_power_gated = false; 1215 pi->acp_power_gated = false; 1216 1217 } 1218 1219 static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 1220 { 1221 return kv_notify_message_to_smu(rdev, enable ? 1222 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1223 } 1224 1225 static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) 1226 { 1227 return kv_notify_message_to_smu(rdev, enable ? 1228 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1229 } 1230 1231 static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) 1232 { 1233 return kv_notify_message_to_smu(rdev, enable ? 1234 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1235 } 1236 1237 static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable) 1238 { 1239 return kv_notify_message_to_smu(rdev, enable ? 1240 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1241 } 1242 1243 static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) 1244 { 1245 struct kv_power_info *pi = kv_get_pi(rdev); 1246 struct radeon_uvd_clock_voltage_dependency_table *table = 1247 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1248 int ret; 1249 u32 mask; 1250 1251 if (!gate) { 1252 if (table->count) 1253 pi->uvd_boot_level = table->count - 1; 1254 else 1255 pi->uvd_boot_level = 0; 1256 1257 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1258 mask = 1 << pi->uvd_boot_level; 1259 } else { 1260 mask = 0x1f; 1261 } 1262 1263 ret = kv_copy_bytes_to_smc(rdev, 1264 pi->dpm_table_start + 1265 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1266 (uint8_t *)&pi->uvd_boot_level, 1267 sizeof(u8), pi->sram_end); 1268 if (ret) 1269 return ret; 1270 1271 kv_send_msg_to_smc_with_parameter(rdev, 1272 PPSMC_MSG_UVDDPM_SetEnabledMask, 1273 mask); 1274 } 1275 1276 return kv_enable_uvd_dpm(rdev, !gate); 1277 } 1278 1279 static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk) 1280 { 1281 u8 i; 1282 struct radeon_vce_clock_voltage_dependency_table *table = 1283 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1284 1285 for (i = 0; i < table->count; i++) { 1286 if (table->entries[i].evclk >= evclk) 1287 break; 1288 } 1289 1290 return i; 1291 } 1292 1293 static int kv_update_vce_dpm(struct radeon_device *rdev, 1294 struct radeon_ps *radeon_new_state, 1295 struct radeon_ps *radeon_current_state) 1296 { 1297 struct kv_power_info *pi = kv_get_pi(rdev); 1298 struct radeon_vce_clock_voltage_dependency_table *table = 1299 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1300 int ret; 1301 1302 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { 1303 kv_dpm_powergate_vce(rdev, false); 1304 /* turn the clocks on when encoding */ 1305 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 1306 if (pi->caps_stable_p_state) 1307 pi->vce_boot_level = table->count - 1; 1308 else 1309 pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk); 1310 1311 ret = kv_copy_bytes_to_smc(rdev, 1312 pi->dpm_table_start + 1313 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1314 (u8 *)&pi->vce_boot_level, 1315 sizeof(u8), 1316 pi->sram_end); 1317 if (ret) 1318 return ret; 1319 1320 if (pi->caps_stable_p_state) 1321 kv_send_msg_to_smc_with_parameter(rdev, 1322 PPSMC_MSG_VCEDPM_SetEnabledMask, 1323 (1 << pi->vce_boot_level)); 1324 1325 kv_enable_vce_dpm(rdev, true); 1326 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { 1327 kv_enable_vce_dpm(rdev, false); 1328 /* turn the clocks off when not encoding */ 1329 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 1330 kv_dpm_powergate_vce(rdev, true); 1331 } 1332 1333 return 0; 1334 } 1335 1336 static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) 1337 { 1338 struct kv_power_info *pi = kv_get_pi(rdev); 1339 struct radeon_clock_voltage_dependency_table *table = 1340 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1341 int ret; 1342 1343 if (!gate) { 1344 if (pi->caps_stable_p_state) 1345 pi->samu_boot_level = table->count - 1; 1346 else 1347 pi->samu_boot_level = 0; 1348 1349 ret = kv_copy_bytes_to_smc(rdev, 1350 pi->dpm_table_start + 1351 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1352 (u8 *)&pi->samu_boot_level, 1353 sizeof(u8), 1354 pi->sram_end); 1355 if (ret) 1356 return ret; 1357 1358 if (pi->caps_stable_p_state) 1359 kv_send_msg_to_smc_with_parameter(rdev, 1360 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1361 (1 << pi->samu_boot_level)); 1362 } 1363 1364 return kv_enable_samu_dpm(rdev, !gate); 1365 } 1366 1367 static u8 kv_get_acp_boot_level(struct radeon_device *rdev) 1368 { 1369 u8 i; 1370 struct radeon_clock_voltage_dependency_table *table = 1371 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1372 1373 for (i = 0; i < table->count; i++) { 1374 if (table->entries[i].clk >= 0) /* XXX */ 1375 break; 1376 } 1377 1378 if (i >= table->count) 1379 i = table->count - 1; 1380 1381 return i; 1382 } 1383 1384 static void kv_update_acp_boot_level(struct radeon_device *rdev) 1385 { 1386 struct kv_power_info *pi = kv_get_pi(rdev); 1387 u8 acp_boot_level; 1388 1389 if (!pi->caps_stable_p_state) { 1390 acp_boot_level = kv_get_acp_boot_level(rdev); 1391 if (acp_boot_level != pi->acp_boot_level) { 1392 pi->acp_boot_level = acp_boot_level; 1393 kv_send_msg_to_smc_with_parameter(rdev, 1394 PPSMC_MSG_ACPDPM_SetEnabledMask, 1395 (1 << pi->acp_boot_level)); 1396 } 1397 } 1398 } 1399 1400 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1401 { 1402 struct kv_power_info *pi = kv_get_pi(rdev); 1403 struct radeon_clock_voltage_dependency_table *table = 1404 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1405 int ret; 1406 1407 if (!gate) { 1408 if (pi->caps_stable_p_state) 1409 pi->acp_boot_level = table->count - 1; 1410 else 1411 pi->acp_boot_level = kv_get_acp_boot_level(rdev); 1412 1413 ret = kv_copy_bytes_to_smc(rdev, 1414 pi->dpm_table_start + 1415 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1416 (u8 *)&pi->acp_boot_level, 1417 sizeof(u8), 1418 pi->sram_end); 1419 if (ret) 1420 return ret; 1421 1422 if (pi->caps_stable_p_state) 1423 kv_send_msg_to_smc_with_parameter(rdev, 1424 PPSMC_MSG_ACPDPM_SetEnabledMask, 1425 (1 << pi->acp_boot_level)); 1426 } 1427 1428 return kv_enable_acp_dpm(rdev, !gate); 1429 } 1430 1431 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 1432 { 1433 struct kv_power_info *pi = kv_get_pi(rdev); 1434 1435 if (pi->uvd_power_gated == gate) 1436 return; 1437 1438 pi->uvd_power_gated = gate; 1439 1440 if (gate) { 1441 if (pi->caps_uvd_pg) { 1442 uvd_v1_0_stop(rdev); 1443 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); 1444 } 1445 kv_update_uvd_dpm(rdev, gate); 1446 if (pi->caps_uvd_pg) 1447 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF); 1448 } else { 1449 if (pi->caps_uvd_pg) { 1450 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON); 1451 uvd_v4_2_resume(rdev); 1452 uvd_v1_0_start(rdev); 1453 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); 1454 } 1455 kv_update_uvd_dpm(rdev, gate); 1456 } 1457 } 1458 1459 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate) 1460 { 1461 struct kv_power_info *pi = kv_get_pi(rdev); 1462 1463 if (pi->vce_power_gated == gate) 1464 return; 1465 1466 pi->vce_power_gated = gate; 1467 1468 if (gate) { 1469 if (pi->caps_vce_pg) { 1470 /* XXX do we need a vce_v1_0_stop() ? */ 1471 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); 1472 } 1473 } else { 1474 if (pi->caps_vce_pg) { 1475 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); 1476 vce_v2_0_resume(rdev); 1477 vce_v1_0_start(rdev); 1478 } 1479 } 1480 } 1481 1482 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate) 1483 { 1484 struct kv_power_info *pi = kv_get_pi(rdev); 1485 1486 if (pi->samu_power_gated == gate) 1487 return; 1488 1489 pi->samu_power_gated = gate; 1490 1491 if (gate) { 1492 kv_update_samu_dpm(rdev, true); 1493 if (pi->caps_samu_pg) 1494 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF); 1495 } else { 1496 if (pi->caps_samu_pg) 1497 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON); 1498 kv_update_samu_dpm(rdev, false); 1499 } 1500 } 1501 1502 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) 1503 { 1504 struct kv_power_info *pi = kv_get_pi(rdev); 1505 1506 if (pi->acp_power_gated == gate) 1507 return; 1508 1509 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 1510 return; 1511 1512 pi->acp_power_gated = gate; 1513 1514 if (gate) { 1515 kv_update_acp_dpm(rdev, true); 1516 if (pi->caps_acp_pg) 1517 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF); 1518 } else { 1519 if (pi->caps_acp_pg) 1520 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON); 1521 kv_update_acp_dpm(rdev, false); 1522 } 1523 } 1524 1525 static void kv_set_valid_clock_range(struct radeon_device *rdev, 1526 struct radeon_ps *new_rps) 1527 { 1528 struct kv_ps *new_ps = kv_get_ps(new_rps); 1529 struct kv_power_info *pi = kv_get_pi(rdev); 1530 u32 i; 1531 struct radeon_clock_voltage_dependency_table *table = 1532 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1533 1534 if (table && table->count) { 1535 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1536 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1537 (i == (pi->graphics_dpm_level_count - 1))) { 1538 pi->lowest_valid = i; 1539 break; 1540 } 1541 } 1542 1543 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1544 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1545 break; 1546 } 1547 pi->highest_valid = i; 1548 1549 if (pi->lowest_valid > pi->highest_valid) { 1550 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1551 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1552 pi->highest_valid = pi->lowest_valid; 1553 else 1554 pi->lowest_valid = pi->highest_valid; 1555 } 1556 } else { 1557 struct sumo_sclk_voltage_mapping_table *table = 1558 &pi->sys_info.sclk_voltage_mapping_table; 1559 1560 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1561 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1562 i == (int)(pi->graphics_dpm_level_count - 1)) { 1563 pi->lowest_valid = i; 1564 break; 1565 } 1566 } 1567 1568 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1569 if (table->entries[i].sclk_frequency <= 1570 new_ps->levels[new_ps->num_levels - 1].sclk) 1571 break; 1572 } 1573 pi->highest_valid = i; 1574 1575 if (pi->lowest_valid > pi->highest_valid) { 1576 if ((new_ps->levels[0].sclk - 1577 table->entries[pi->highest_valid].sclk_frequency) > 1578 (table->entries[pi->lowest_valid].sclk_frequency - 1579 new_ps->levels[new_ps->num_levels - 1].sclk)) 1580 pi->highest_valid = pi->lowest_valid; 1581 else 1582 pi->lowest_valid = pi->highest_valid; 1583 } 1584 } 1585 } 1586 1587 static int kv_update_dfs_bypass_settings(struct radeon_device *rdev, 1588 struct radeon_ps *new_rps) 1589 { 1590 struct kv_ps *new_ps = kv_get_ps(new_rps); 1591 struct kv_power_info *pi = kv_get_pi(rdev); 1592 int ret = 0; 1593 u8 clk_bypass_cntl; 1594 1595 if (pi->caps_enable_dfs_bypass) { 1596 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1597 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1598 ret = kv_copy_bytes_to_smc(rdev, 1599 (pi->dpm_table_start + 1600 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1601 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1602 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1603 &clk_bypass_cntl, 1604 sizeof(u8), pi->sram_end); 1605 } 1606 1607 return ret; 1608 } 1609 1610 static int kv_enable_nb_dpm(struct radeon_device *rdev, 1611 bool enable) 1612 { 1613 struct kv_power_info *pi = kv_get_pi(rdev); 1614 int ret = 0; 1615 1616 if (enable) { 1617 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1618 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); 1619 if (ret == 0) 1620 pi->nb_dpm_enabled = true; 1621 } 1622 } else { 1623 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1624 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable); 1625 if (ret == 0) 1626 pi->nb_dpm_enabled = false; 1627 } 1628 } 1629 1630 return ret; 1631 } 1632 1633 int kv_dpm_force_performance_level(struct radeon_device *rdev, 1634 enum radeon_dpm_forced_level level) 1635 { 1636 int ret; 1637 1638 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1639 ret = kv_force_dpm_highest(rdev); 1640 if (ret) 1641 return ret; 1642 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1643 ret = kv_force_dpm_lowest(rdev); 1644 if (ret) 1645 return ret; 1646 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 1647 ret = kv_unforce_levels(rdev); 1648 if (ret) 1649 return ret; 1650 } 1651 1652 rdev->pm.dpm.forced_level = level; 1653 1654 return 0; 1655 } 1656 1657 int kv_dpm_pre_set_power_state(struct radeon_device *rdev) 1658 { 1659 struct kv_power_info *pi = kv_get_pi(rdev); 1660 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 1661 struct radeon_ps *new_ps = &requested_ps; 1662 1663 kv_update_requested_ps(rdev, new_ps); 1664 1665 kv_apply_state_adjust_rules(rdev, 1666 &pi->requested_rps, 1667 &pi->current_rps); 1668 1669 return 0; 1670 } 1671 1672 int kv_dpm_set_power_state(struct radeon_device *rdev) 1673 { 1674 struct kv_power_info *pi = kv_get_pi(rdev); 1675 struct radeon_ps *new_ps = &pi->requested_rps; 1676 struct radeon_ps *old_ps = &pi->current_rps; 1677 int ret; 1678 1679 if (pi->bapm_enable) { 1680 ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); 1681 if (ret) { 1682 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1683 return ret; 1684 } 1685 } 1686 1687 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1688 if (pi->enable_dpm) { 1689 kv_set_valid_clock_range(rdev, new_ps); 1690 kv_update_dfs_bypass_settings(rdev, new_ps); 1691 ret = kv_calculate_ds_divider(rdev); 1692 if (ret) { 1693 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1694 return ret; 1695 } 1696 kv_calculate_nbps_level_settings(rdev); 1697 kv_calculate_dpm_settings(rdev); 1698 kv_force_lowest_valid(rdev); 1699 kv_enable_new_levels(rdev); 1700 kv_upload_dpm_settings(rdev); 1701 kv_program_nbps_index_settings(rdev, new_ps); 1702 kv_unforce_levels(rdev); 1703 kv_set_enabled_levels(rdev); 1704 kv_force_lowest_valid(rdev); 1705 kv_unforce_levels(rdev); 1706 1707 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1708 if (ret) { 1709 DRM_ERROR("kv_update_vce_dpm failed\n"); 1710 return ret; 1711 } 1712 kv_update_sclk_t(rdev); 1713 if (rdev->family == CHIP_MULLINS) 1714 kv_enable_nb_dpm(rdev, true); 1715 } 1716 } else { 1717 if (pi->enable_dpm) { 1718 kv_set_valid_clock_range(rdev, new_ps); 1719 kv_update_dfs_bypass_settings(rdev, new_ps); 1720 ret = kv_calculate_ds_divider(rdev); 1721 if (ret) { 1722 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1723 return ret; 1724 } 1725 kv_calculate_nbps_level_settings(rdev); 1726 kv_calculate_dpm_settings(rdev); 1727 kv_freeze_sclk_dpm(rdev, true); 1728 kv_upload_dpm_settings(rdev); 1729 kv_program_nbps_index_settings(rdev, new_ps); 1730 kv_freeze_sclk_dpm(rdev, false); 1731 kv_set_enabled_levels(rdev); 1732 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1733 if (ret) { 1734 DRM_ERROR("kv_update_vce_dpm failed\n"); 1735 return ret; 1736 } 1737 kv_update_acp_boot_level(rdev); 1738 kv_update_sclk_t(rdev); 1739 kv_enable_nb_dpm(rdev, true); 1740 } 1741 } 1742 1743 return 0; 1744 } 1745 1746 void kv_dpm_post_set_power_state(struct radeon_device *rdev) 1747 { 1748 struct kv_power_info *pi = kv_get_pi(rdev); 1749 struct radeon_ps *new_ps = &pi->requested_rps; 1750 1751 kv_update_current_ps(rdev, new_ps); 1752 } 1753 1754 void kv_dpm_setup_asic(struct radeon_device *rdev) 1755 { 1756 sumo_take_smu_control(rdev, true); 1757 kv_init_powergate_state(rdev); 1758 kv_init_sclk_t(rdev); 1759 } 1760 1761 //XXX use sumo_dpm_display_configuration_changed 1762 1763 static void kv_construct_max_power_limits_table(struct radeon_device *rdev, 1764 struct radeon_clock_and_voltage_limits *table) 1765 { 1766 struct kv_power_info *pi = kv_get_pi(rdev); 1767 1768 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 1769 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 1770 table->sclk = 1771 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 1772 table->vddc = 1773 kv_convert_2bit_index_to_voltage(rdev, 1774 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 1775 } 1776 1777 table->mclk = pi->sys_info.nbp_memory_clock[0]; 1778 } 1779 1780 static void kv_patch_voltage_values(struct radeon_device *rdev) 1781 { 1782 int i; 1783 struct radeon_uvd_clock_voltage_dependency_table *uvd_table = 1784 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1785 struct radeon_vce_clock_voltage_dependency_table *vce_table = 1786 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1787 struct radeon_clock_voltage_dependency_table *samu_table = 1788 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1789 struct radeon_clock_voltage_dependency_table *acp_table = 1790 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1791 1792 if (uvd_table->count) { 1793 for (i = 0; i < uvd_table->count; i++) 1794 uvd_table->entries[i].v = 1795 kv_convert_8bit_index_to_voltage(rdev, 1796 uvd_table->entries[i].v); 1797 } 1798 1799 if (vce_table->count) { 1800 for (i = 0; i < vce_table->count; i++) 1801 vce_table->entries[i].v = 1802 kv_convert_8bit_index_to_voltage(rdev, 1803 vce_table->entries[i].v); 1804 } 1805 1806 if (samu_table->count) { 1807 for (i = 0; i < samu_table->count; i++) 1808 samu_table->entries[i].v = 1809 kv_convert_8bit_index_to_voltage(rdev, 1810 samu_table->entries[i].v); 1811 } 1812 1813 if (acp_table->count) { 1814 for (i = 0; i < acp_table->count; i++) 1815 acp_table->entries[i].v = 1816 kv_convert_8bit_index_to_voltage(rdev, 1817 acp_table->entries[i].v); 1818 } 1819 1820 } 1821 1822 static void kv_construct_boot_state(struct radeon_device *rdev) 1823 { 1824 struct kv_power_info *pi = kv_get_pi(rdev); 1825 1826 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 1827 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 1828 pi->boot_pl.ds_divider_index = 0; 1829 pi->boot_pl.ss_divider_index = 0; 1830 pi->boot_pl.allow_gnb_slow = 1; 1831 pi->boot_pl.force_nbp_state = 0; 1832 pi->boot_pl.display_wm = 0; 1833 pi->boot_pl.vce_wm = 0; 1834 } 1835 1836 static int kv_force_dpm_highest(struct radeon_device *rdev) 1837 { 1838 int ret; 1839 u32 enable_mask, i; 1840 1841 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 1842 if (ret) 1843 return ret; 1844 1845 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 1846 if (enable_mask & (1 << i)) 1847 break; 1848 } 1849 1850 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 1851 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1852 else 1853 return kv_set_enabled_level(rdev, i); 1854 } 1855 1856 static int kv_force_dpm_lowest(struct radeon_device *rdev) 1857 { 1858 int ret; 1859 u32 enable_mask, i; 1860 1861 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 1862 if (ret) 1863 return ret; 1864 1865 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 1866 if (enable_mask & (1 << i)) 1867 break; 1868 } 1869 1870 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 1871 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1872 else 1873 return kv_set_enabled_level(rdev, i); 1874 } 1875 1876 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 1877 u32 sclk, u32 min_sclk_in_sr) 1878 { 1879 struct kv_power_info *pi = kv_get_pi(rdev); 1880 u32 i; 1881 u32 temp; 1882 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? 1883 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK; 1884 1885 if (sclk < min) 1886 return 0; 1887 1888 if (!pi->caps_sclk_ds) 1889 return 0; 1890 1891 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 1892 temp = sclk / sumo_get_sleep_divider_from_id(i); 1893 if (temp >= min) 1894 break; 1895 } 1896 1897 return (u8)i; 1898 } 1899 1900 static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit) 1901 { 1902 struct kv_power_info *pi = kv_get_pi(rdev); 1903 struct radeon_clock_voltage_dependency_table *table = 1904 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1905 int i; 1906 1907 if (table && table->count) { 1908 for (i = table->count - 1; i >= 0; i--) { 1909 if (pi->high_voltage_t && 1910 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <= 1911 pi->high_voltage_t)) { 1912 *limit = i; 1913 return 0; 1914 } 1915 } 1916 } else { 1917 struct sumo_sclk_voltage_mapping_table *table = 1918 &pi->sys_info.sclk_voltage_mapping_table; 1919 1920 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 1921 if (pi->high_voltage_t && 1922 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <= 1923 pi->high_voltage_t)) { 1924 *limit = i; 1925 return 0; 1926 } 1927 } 1928 } 1929 1930 *limit = 0; 1931 return 0; 1932 } 1933 1934 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 1935 struct radeon_ps *new_rps, 1936 struct radeon_ps *old_rps) 1937 { 1938 struct kv_ps *ps = kv_get_ps(new_rps); 1939 struct kv_power_info *pi = kv_get_pi(rdev); 1940 u32 min_sclk = 10000; /* ??? */ 1941 u32 sclk, mclk = 0; 1942 int i, limit; 1943 bool force_high; 1944 struct radeon_clock_voltage_dependency_table *table = 1945 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1946 u32 stable_p_state_sclk = 0; 1947 struct radeon_clock_and_voltage_limits *max_limits = 1948 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 1949 1950 if (new_rps->vce_active) { 1951 new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 1952 new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 1953 } else { 1954 new_rps->evclk = 0; 1955 new_rps->ecclk = 0; 1956 } 1957 1958 mclk = max_limits->mclk; 1959 sclk = min_sclk; 1960 1961 if (pi->caps_stable_p_state) { 1962 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 1963 1964 for (i = table->count - 1; i >= 0; i--) { 1965 if (stable_p_state_sclk >= table->entries[i].clk) { 1966 stable_p_state_sclk = table->entries[i].clk; 1967 break; 1968 } 1969 } 1970 1971 if (i > 0) 1972 stable_p_state_sclk = table->entries[0].clk; 1973 1974 sclk = stable_p_state_sclk; 1975 } 1976 1977 if (new_rps->vce_active) { 1978 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 1979 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 1980 } 1981 1982 ps->need_dfs_bypass = true; 1983 1984 for (i = 0; i < ps->num_levels; i++) { 1985 if (ps->levels[i].sclk < sclk) 1986 ps->levels[i].sclk = sclk; 1987 } 1988 1989 if (table && table->count) { 1990 for (i = 0; i < ps->num_levels; i++) { 1991 if (pi->high_voltage_t && 1992 (pi->high_voltage_t < 1993 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 1994 kv_get_high_voltage_limit(rdev, &limit); 1995 ps->levels[i].sclk = table->entries[limit].clk; 1996 } 1997 } 1998 } else { 1999 struct sumo_sclk_voltage_mapping_table *table = 2000 &pi->sys_info.sclk_voltage_mapping_table; 2001 2002 for (i = 0; i < ps->num_levels; i++) { 2003 if (pi->high_voltage_t && 2004 (pi->high_voltage_t < 2005 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2006 kv_get_high_voltage_limit(rdev, &limit); 2007 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2008 } 2009 } 2010 } 2011 2012 if (pi->caps_stable_p_state) { 2013 for (i = 0; i < ps->num_levels; i++) { 2014 ps->levels[i].sclk = stable_p_state_sclk; 2015 } 2016 } 2017 2018 pi->video_start = new_rps->dclk || new_rps->vclk || 2019 new_rps->evclk || new_rps->ecclk; 2020 2021 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2022 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2023 pi->battery_state = true; 2024 else 2025 pi->battery_state = false; 2026 2027 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2028 ps->dpm0_pg_nb_ps_lo = 0x1; 2029 ps->dpm0_pg_nb_ps_hi = 0x0; 2030 ps->dpmx_nb_ps_lo = 0x1; 2031 ps->dpmx_nb_ps_hi = 0x0; 2032 } else { 2033 ps->dpm0_pg_nb_ps_lo = 0x3; 2034 ps->dpm0_pg_nb_ps_hi = 0x0; 2035 ps->dpmx_nb_ps_lo = 0x3; 2036 ps->dpmx_nb_ps_hi = 0x0; 2037 2038 if (pi->sys_info.nb_dpm_enable) { 2039 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2040 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2041 pi->disable_nb_ps3_in_battery; 2042 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2043 ps->dpm0_pg_nb_ps_hi = 0x2; 2044 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2045 ps->dpmx_nb_ps_hi = 0x2; 2046 } 2047 } 2048 } 2049 2050 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev, 2051 u32 index, bool enable) 2052 { 2053 struct kv_power_info *pi = kv_get_pi(rdev); 2054 2055 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2056 } 2057 2058 static int kv_calculate_ds_divider(struct radeon_device *rdev) 2059 { 2060 struct kv_power_info *pi = kv_get_pi(rdev); 2061 u32 sclk_in_sr = 10000; /* ??? */ 2062 u32 i; 2063 2064 if (pi->lowest_valid > pi->highest_valid) 2065 return -EINVAL; 2066 2067 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2068 pi->graphics_level[i].DeepSleepDivId = 2069 kv_get_sleep_divider_id_from_clock(rdev, 2070 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2071 sclk_in_sr); 2072 } 2073 return 0; 2074 } 2075 2076 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) 2077 { 2078 struct kv_power_info *pi = kv_get_pi(rdev); 2079 u32 i; 2080 bool force_high; 2081 struct radeon_clock_and_voltage_limits *max_limits = 2082 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2083 u32 mclk = max_limits->mclk; 2084 2085 if (pi->lowest_valid > pi->highest_valid) 2086 return -EINVAL; 2087 2088 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2089 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2090 pi->graphics_level[i].GnbSlow = 1; 2091 pi->graphics_level[i].ForceNbPs1 = 0; 2092 pi->graphics_level[i].UpH = 0; 2093 } 2094 2095 if (!pi->sys_info.nb_dpm_enable) 2096 return 0; 2097 2098 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2099 (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2100 2101 if (force_high) { 2102 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2103 pi->graphics_level[i].GnbSlow = 0; 2104 } else { 2105 if (pi->battery_state) 2106 pi->graphics_level[0].ForceNbPs1 = 1; 2107 2108 pi->graphics_level[1].GnbSlow = 0; 2109 pi->graphics_level[2].GnbSlow = 0; 2110 pi->graphics_level[3].GnbSlow = 0; 2111 pi->graphics_level[4].GnbSlow = 0; 2112 } 2113 } else { 2114 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2115 pi->graphics_level[i].GnbSlow = 1; 2116 pi->graphics_level[i].ForceNbPs1 = 0; 2117 pi->graphics_level[i].UpH = 0; 2118 } 2119 2120 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2121 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2122 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2123 if (pi->lowest_valid != pi->highest_valid) 2124 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2125 } 2126 } 2127 return 0; 2128 } 2129 2130 static int kv_calculate_dpm_settings(struct radeon_device *rdev) 2131 { 2132 struct kv_power_info *pi = kv_get_pi(rdev); 2133 u32 i; 2134 2135 if (pi->lowest_valid > pi->highest_valid) 2136 return -EINVAL; 2137 2138 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2139 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2140 2141 return 0; 2142 } 2143 2144 static void kv_init_graphics_levels(struct radeon_device *rdev) 2145 { 2146 struct kv_power_info *pi = kv_get_pi(rdev); 2147 u32 i; 2148 struct radeon_clock_voltage_dependency_table *table = 2149 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2150 2151 if (table && table->count) { 2152 u32 vid_2bit; 2153 2154 pi->graphics_dpm_level_count = 0; 2155 for (i = 0; i < table->count; i++) { 2156 if (pi->high_voltage_t && 2157 (pi->high_voltage_t < 2158 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v))) 2159 break; 2160 2161 kv_set_divider_value(rdev, i, table->entries[i].clk); 2162 vid_2bit = kv_convert_vid7_to_vid2(rdev, 2163 &pi->sys_info.vid_mapping_table, 2164 table->entries[i].v); 2165 kv_set_vid(rdev, i, vid_2bit); 2166 kv_set_at(rdev, i, pi->at[i]); 2167 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2168 pi->graphics_dpm_level_count++; 2169 } 2170 } else { 2171 struct sumo_sclk_voltage_mapping_table *table = 2172 &pi->sys_info.sclk_voltage_mapping_table; 2173 2174 pi->graphics_dpm_level_count = 0; 2175 for (i = 0; i < table->num_max_dpm_entries; i++) { 2176 if (pi->high_voltage_t && 2177 pi->high_voltage_t < 2178 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit)) 2179 break; 2180 2181 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency); 2182 kv_set_vid(rdev, i, table->entries[i].vid_2bit); 2183 kv_set_at(rdev, i, pi->at[i]); 2184 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2185 pi->graphics_dpm_level_count++; 2186 } 2187 } 2188 2189 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2190 kv_dpm_power_level_enable(rdev, i, false); 2191 } 2192 2193 static void kv_enable_new_levels(struct radeon_device *rdev) 2194 { 2195 struct kv_power_info *pi = kv_get_pi(rdev); 2196 u32 i; 2197 2198 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2199 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2200 kv_dpm_power_level_enable(rdev, i, true); 2201 } 2202 } 2203 2204 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level) 2205 { 2206 u32 new_mask = (1 << level); 2207 2208 return kv_send_msg_to_smc_with_parameter(rdev, 2209 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2210 new_mask); 2211 } 2212 2213 static int kv_set_enabled_levels(struct radeon_device *rdev) 2214 { 2215 struct kv_power_info *pi = kv_get_pi(rdev); 2216 u32 i, new_mask = 0; 2217 2218 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2219 new_mask |= (1 << i); 2220 2221 return kv_send_msg_to_smc_with_parameter(rdev, 2222 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2223 new_mask); 2224 } 2225 2226 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 2227 struct radeon_ps *new_rps) 2228 { 2229 struct kv_ps *new_ps = kv_get_ps(new_rps); 2230 struct kv_power_info *pi = kv_get_pi(rdev); 2231 u32 nbdpmconfig1; 2232 2233 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2234 return; 2235 2236 if (pi->sys_info.nb_dpm_enable) { 2237 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1); 2238 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK | 2239 DpmXNbPsLo_MASK | DpmXNbPsHi_MASK); 2240 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) | 2241 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) | 2242 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) | 2243 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi)); 2244 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1); 2245 } 2246 } 2247 2248 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 2249 int min_temp, int max_temp) 2250 { 2251 int low_temp = 0 * 1000; 2252 int high_temp = 255 * 1000; 2253 u32 tmp; 2254 2255 if (low_temp < min_temp) 2256 low_temp = min_temp; 2257 if (high_temp > max_temp) 2258 high_temp = max_temp; 2259 if (high_temp < low_temp) { 2260 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2261 return -EINVAL; 2262 } 2263 2264 tmp = RREG32_SMC(CG_THERMAL_INT_CTRL); 2265 tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK); 2266 tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) | 2267 DIG_THERM_INTL(49 + (low_temp / 1000))); 2268 WREG32_SMC(CG_THERMAL_INT_CTRL, tmp); 2269 2270 rdev->pm.dpm.thermal.min_temp = low_temp; 2271 rdev->pm.dpm.thermal.max_temp = high_temp; 2272 2273 return 0; 2274 } 2275 2276 union igp_info { 2277 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2278 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2279 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2280 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2281 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2282 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2283 }; 2284 2285 static int kv_parse_sys_info_table(struct radeon_device *rdev) 2286 { 2287 struct kv_power_info *pi = kv_get_pi(rdev); 2288 struct radeon_mode_info *mode_info = &rdev->mode_info; 2289 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2290 union igp_info *igp_info; 2291 u8 frev, crev; 2292 u16 data_offset; 2293 int i; 2294 2295 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 2296 &frev, &crev, &data_offset)) { 2297 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2298 data_offset); 2299 2300 if (crev != 8) { 2301 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2302 return -EINVAL; 2303 } 2304 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2305 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2306 pi->sys_info.bootup_nb_voltage_index = 2307 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2308 if (igp_info->info_8.ucHtcTmpLmt == 0) 2309 pi->sys_info.htc_tmp_lmt = 203; 2310 else 2311 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2312 if (igp_info->info_8.ucHtcHystLmt == 0) 2313 pi->sys_info.htc_hyst_lmt = 5; 2314 else 2315 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2316 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2317 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2318 } 2319 2320 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2321 pi->sys_info.nb_dpm_enable = true; 2322 else 2323 pi->sys_info.nb_dpm_enable = false; 2324 2325 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2326 pi->sys_info.nbp_memory_clock[i] = 2327 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2328 pi->sys_info.nbp_n_clock[i] = 2329 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2330 } 2331 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2332 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2333 pi->caps_enable_dfs_bypass = true; 2334 2335 sumo_construct_sclk_voltage_mapping_table(rdev, 2336 &pi->sys_info.sclk_voltage_mapping_table, 2337 igp_info->info_8.sAvail_SCLK); 2338 2339 sumo_construct_vid_mapping_table(rdev, 2340 &pi->sys_info.vid_mapping_table, 2341 igp_info->info_8.sAvail_SCLK); 2342 2343 kv_construct_max_power_limits_table(rdev, 2344 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2345 } 2346 return 0; 2347 } 2348 2349 union power_info { 2350 struct _ATOM_POWERPLAY_INFO info; 2351 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2352 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2353 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2354 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2355 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2356 }; 2357 2358 union pplib_clock_info { 2359 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2360 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2361 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2362 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2363 }; 2364 2365 union pplib_power_state { 2366 struct _ATOM_PPLIB_STATE v1; 2367 struct _ATOM_PPLIB_STATE_V2 v2; 2368 }; 2369 2370 static void kv_patch_boot_state(struct radeon_device *rdev, 2371 struct kv_ps *ps) 2372 { 2373 struct kv_power_info *pi = kv_get_pi(rdev); 2374 2375 ps->num_levels = 1; 2376 ps->levels[0] = pi->boot_pl; 2377 } 2378 2379 static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev, 2380 struct radeon_ps *rps, 2381 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2382 u8 table_rev) 2383 { 2384 struct kv_ps *ps = kv_get_ps(rps); 2385 2386 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2387 rps->class = le16_to_cpu(non_clock_info->usClassification); 2388 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2389 2390 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2391 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2392 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2393 } else { 2394 rps->vclk = 0; 2395 rps->dclk = 0; 2396 } 2397 2398 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2399 rdev->pm.dpm.boot_ps = rps; 2400 kv_patch_boot_state(rdev, ps); 2401 } 2402 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2403 rdev->pm.dpm.uvd_ps = rps; 2404 } 2405 2406 static void kv_parse_pplib_clock_info(struct radeon_device *rdev, 2407 struct radeon_ps *rps, int index, 2408 union pplib_clock_info *clock_info) 2409 { 2410 struct kv_power_info *pi = kv_get_pi(rdev); 2411 struct kv_ps *ps = kv_get_ps(rps); 2412 struct kv_pl *pl = &ps->levels[index]; 2413 u32 sclk; 2414 2415 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2416 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2417 pl->sclk = sclk; 2418 pl->vddc_index = clock_info->sumo.vddcIndex; 2419 2420 ps->num_levels = index + 1; 2421 2422 if (pi->caps_sclk_ds) { 2423 pl->ds_divider_index = 5; 2424 pl->ss_divider_index = 5; 2425 } 2426 } 2427 2428 static int kv_parse_power_table(struct radeon_device *rdev) 2429 { 2430 struct radeon_mode_info *mode_info = &rdev->mode_info; 2431 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2432 union pplib_power_state *power_state; 2433 int i, j, k, non_clock_array_index, clock_array_index; 2434 union pplib_clock_info *clock_info; 2435 struct _StateArray *state_array; 2436 struct _ClockInfoArray *clock_info_array; 2437 struct _NonClockInfoArray *non_clock_info_array; 2438 union power_info *power_info; 2439 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2440 u16 data_offset; 2441 u8 frev, crev; 2442 u8 *power_state_offset; 2443 struct kv_ps *ps; 2444 2445 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 2446 &frev, &crev, &data_offset)) 2447 return -EINVAL; 2448 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2449 2450 state_array = (struct _StateArray *) 2451 (mode_info->atom_context->bios + data_offset + 2452 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2453 clock_info_array = (struct _ClockInfoArray *) 2454 (mode_info->atom_context->bios + data_offset + 2455 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2456 non_clock_info_array = (struct _NonClockInfoArray *) 2457 (mode_info->atom_context->bios + data_offset + 2458 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2459 2460 rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, 2461 sizeof(struct radeon_ps), 2462 GFP_KERNEL); 2463 if (!rdev->pm.dpm.ps) 2464 return -ENOMEM; 2465 power_state_offset = (u8 *)state_array->states; 2466 for (i = 0; i < state_array->ucNumEntries; i++) { 2467 u8 *idx; 2468 power_state = (union pplib_power_state *)power_state_offset; 2469 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2470 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2471 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2472 if (!rdev->pm.power_state[i].clock_info) 2473 return -EINVAL; 2474 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2475 if (ps == NULL) { 2476 kfree(rdev->pm.dpm.ps); 2477 return -ENOMEM; 2478 } 2479 rdev->pm.dpm.ps[i].ps_priv = ps; 2480 k = 0; 2481 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2482 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2483 clock_array_index = idx[j]; 2484 if (clock_array_index >= clock_info_array->ucNumEntries) 2485 continue; 2486 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2487 break; 2488 clock_info = (union pplib_clock_info *) 2489 ((u8 *)&clock_info_array->clockInfo[0] + 2490 (clock_array_index * clock_info_array->ucEntrySize)); 2491 kv_parse_pplib_clock_info(rdev, 2492 &rdev->pm.dpm.ps[i], k, 2493 clock_info); 2494 k++; 2495 } 2496 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 2497 non_clock_info, 2498 non_clock_info_array->ucEntrySize); 2499 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2500 } 2501 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 2502 2503 /* fill in the vce power states */ 2504 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 2505 u32 sclk; 2506 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 2507 clock_info = (union pplib_clock_info *) 2508 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2509 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2510 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2511 rdev->pm.dpm.vce_states[i].sclk = sclk; 2512 rdev->pm.dpm.vce_states[i].mclk = 0; 2513 } 2514 2515 return 0; 2516 } 2517 2518 int kv_dpm_init(struct radeon_device *rdev) 2519 { 2520 struct kv_power_info *pi; 2521 int ret, i; 2522 2523 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2524 if (pi == NULL) 2525 return -ENOMEM; 2526 rdev->pm.dpm.priv = pi; 2527 2528 ret = r600_get_platform_caps(rdev); 2529 if (ret) 2530 return ret; 2531 2532 ret = r600_parse_extended_power_table(rdev); 2533 if (ret) 2534 return ret; 2535 2536 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2537 pi->at[i] = TRINITY_AT_DFLT; 2538 2539 pi->sram_end = SMC_RAM_END; 2540 2541 /* Enabling nb dpm on an asrock system prevents dpm from working */ 2542 if (rdev->pdev->subsystem_vendor == 0x1849) 2543 pi->enable_nb_dpm = false; 2544 else 2545 pi->enable_nb_dpm = true; 2546 2547 pi->caps_power_containment = true; 2548 pi->caps_cac = true; 2549 pi->enable_didt = false; 2550 if (pi->enable_didt) { 2551 pi->caps_sq_ramping = true; 2552 pi->caps_db_ramping = true; 2553 pi->caps_td_ramping = true; 2554 pi->caps_tcp_ramping = true; 2555 } 2556 2557 pi->caps_sclk_ds = true; 2558 pi->enable_auto_thermal_throttling = true; 2559 pi->disable_nb_ps3_in_battery = false; 2560 if (radeon_bapm == -1) { 2561 /* only enable bapm on KB, ML by default */ 2562 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2563 pi->bapm_enable = true; 2564 else 2565 pi->bapm_enable = false; 2566 } else if (radeon_bapm == 0) { 2567 pi->bapm_enable = false; 2568 } else { 2569 pi->bapm_enable = true; 2570 } 2571 pi->voltage_drop_t = 0; 2572 pi->caps_sclk_throttle_low_notification = false; 2573 pi->caps_fps = false; /* true? */ 2574 pi->caps_uvd_pg = true; 2575 pi->caps_uvd_dpm = true; 2576 pi->caps_vce_pg = false; /* XXX true */ 2577 pi->caps_samu_pg = false; 2578 pi->caps_acp_pg = false; 2579 pi->caps_stable_p_state = false; 2580 2581 ret = kv_parse_sys_info_table(rdev); 2582 if (ret) 2583 return ret; 2584 2585 kv_patch_voltage_values(rdev); 2586 kv_construct_boot_state(rdev); 2587 2588 ret = kv_parse_power_table(rdev); 2589 if (ret) 2590 return ret; 2591 2592 pi->enable_dpm = true; 2593 2594 return 0; 2595 } 2596 2597 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 2598 struct seq_file *m) 2599 { 2600 struct kv_power_info *pi = kv_get_pi(rdev); 2601 u32 current_index = 2602 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2603 CURR_SCLK_INDEX_SHIFT; 2604 u32 sclk, tmp; 2605 u16 vddc; 2606 2607 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2608 seq_printf(m, "invalid dpm profile %d\n", current_index); 2609 } else { 2610 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2611 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2612 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT; 2613 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp); 2614 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2615 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2616 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2617 current_index, sclk, vddc); 2618 } 2619 } 2620 2621 u32 kv_dpm_get_current_sclk(struct radeon_device *rdev) 2622 { 2623 struct kv_power_info *pi = kv_get_pi(rdev); 2624 u32 current_index = 2625 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2626 CURR_SCLK_INDEX_SHIFT; 2627 u32 sclk; 2628 2629 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2630 return 0; 2631 } else { 2632 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2633 return sclk; 2634 } 2635 } 2636 2637 u32 kv_dpm_get_current_mclk(struct radeon_device *rdev) 2638 { 2639 struct kv_power_info *pi = kv_get_pi(rdev); 2640 2641 return pi->sys_info.bootup_uma_clk; 2642 } 2643 2644 void kv_dpm_print_power_state(struct radeon_device *rdev, 2645 struct radeon_ps *rps) 2646 { 2647 int i; 2648 struct kv_ps *ps = kv_get_ps(rps); 2649 2650 r600_dpm_print_class_info(rps->class, rps->class2); 2651 r600_dpm_print_cap_info(rps->caps); 2652 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2653 for (i = 0; i < ps->num_levels; i++) { 2654 struct kv_pl *pl = &ps->levels[i]; 2655 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2656 i, pl->sclk, 2657 kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index)); 2658 } 2659 r600_dpm_print_ps_status(rdev, rps); 2660 } 2661 2662 void kv_dpm_fini(struct radeon_device *rdev) 2663 { 2664 int i; 2665 2666 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 2667 kfree(rdev->pm.dpm.ps[i].ps_priv); 2668 } 2669 kfree(rdev->pm.dpm.ps); 2670 kfree(rdev->pm.dpm.priv); 2671 r600_free_extended_power_table(rdev); 2672 } 2673 2674 void kv_dpm_display_configuration_changed(struct radeon_device *rdev) 2675 { 2676 2677 } 2678 2679 u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low) 2680 { 2681 struct kv_power_info *pi = kv_get_pi(rdev); 2682 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2683 2684 if (low) 2685 return requested_state->levels[0].sclk; 2686 else 2687 return requested_state->levels[requested_state->num_levels - 1].sclk; 2688 } 2689 2690 u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low) 2691 { 2692 struct kv_power_info *pi = kv_get_pi(rdev); 2693 2694 return pi->sys_info.bootup_uma_clk; 2695 } 2696 2697