1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "drmP.h" 25 #include "radeon.h" 26 #include "cikd.h" 27 #include "r600_dpm.h" 28 #include "kv_dpm.h" 29 #include "radeon_asic.h" 30 #include <linux/seq_file.h> 31 32 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 33 #define KV_MINIMUM_ENGINE_CLOCK 800 34 #define SMC_RAM_END 0x40000 35 36 static int kv_enable_nb_dpm(struct radeon_device *rdev, 37 bool enable); 38 static void kv_init_graphics_levels(struct radeon_device *rdev); 39 static int kv_calculate_ds_divider(struct radeon_device *rdev); 40 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev); 41 static int kv_calculate_dpm_settings(struct radeon_device *rdev); 42 static void kv_enable_new_levels(struct radeon_device *rdev); 43 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 44 struct radeon_ps *new_rps); 45 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level); 46 static int kv_set_enabled_levels(struct radeon_device *rdev); 47 static int kv_force_dpm_highest(struct radeon_device *rdev); 48 static int kv_force_dpm_lowest(struct radeon_device *rdev); 49 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 50 struct radeon_ps *new_rps, 51 struct radeon_ps *old_rps); 52 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 53 int min_temp, int max_temp); 54 static int kv_init_fps_limits(struct radeon_device *rdev); 55 56 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 57 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate); 58 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate); 59 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate); 60 61 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); 62 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); 63 extern void cik_update_cg(struct radeon_device *rdev, 64 u32 block, bool enable); 65 66 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 67 { 68 { 0, 4, 1 }, 69 { 1, 4, 1 }, 70 { 2, 5, 1 }, 71 { 3, 4, 2 }, 72 { 4, 1, 1 }, 73 { 5, 5, 2 }, 74 { 6, 6, 1 }, 75 { 7, 9, 2 }, 76 { 0xffffffff } 77 }; 78 79 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 80 { 81 { 0, 4, 1 }, 82 { 0xffffffff } 83 }; 84 85 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 86 { 87 { 0, 4, 1 }, 88 { 0xffffffff } 89 }; 90 91 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 92 { 93 { 0, 4, 1 }, 94 { 0xffffffff } 95 }; 96 97 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 98 { 99 { 0, 4, 1 }, 100 { 0xffffffff } 101 }; 102 103 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 104 { 105 { 0, 4, 1 }, 106 { 1, 4, 1 }, 107 { 2, 5, 1 }, 108 { 3, 4, 1 }, 109 { 4, 1, 1 }, 110 { 5, 5, 1 }, 111 { 6, 6, 1 }, 112 { 7, 9, 1 }, 113 { 8, 4, 1 }, 114 { 9, 2, 1 }, 115 { 10, 3, 1 }, 116 { 11, 6, 1 }, 117 { 12, 8, 2 }, 118 { 13, 1, 1 }, 119 { 14, 2, 1 }, 120 { 15, 3, 1 }, 121 { 16, 1, 1 }, 122 { 17, 4, 1 }, 123 { 18, 3, 1 }, 124 { 19, 1, 1 }, 125 { 20, 8, 1 }, 126 { 21, 5, 1 }, 127 { 22, 1, 1 }, 128 { 23, 1, 1 }, 129 { 24, 4, 1 }, 130 { 27, 6, 1 }, 131 { 28, 1, 1 }, 132 { 0xffffffff } 133 }; 134 135 static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 136 { 137 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 138 }; 139 140 static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 141 { 142 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 143 }; 144 145 static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 146 { 147 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 148 }; 149 150 static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 151 { 152 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 153 }; 154 155 static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 156 { 157 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 158 }; 159 160 static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 161 { 162 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 163 }; 164 165 static const struct kv_pt_config_reg didt_config_kv[] = 166 { 167 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 168 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 169 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 170 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 171 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 172 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 173 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 174 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 175 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 176 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 177 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 178 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 179 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 180 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 181 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 182 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 183 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 184 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 185 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 186 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 187 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 188 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 189 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 190 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 191 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 192 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 193 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 194 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 195 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 196 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 197 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 198 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 199 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 200 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 201 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 202 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 203 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 204 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 205 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 206 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 207 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 208 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 209 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 210 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 211 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 212 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 213 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 214 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 215 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 216 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 217 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 218 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 219 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 220 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 221 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 222 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 223 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 224 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 225 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 226 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 227 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 228 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 229 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 230 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 231 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 232 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 233 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 234 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 235 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 236 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 237 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 238 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 239 { 0xFFFFFFFF } 240 }; 241 242 static struct kv_ps *kv_get_ps(struct radeon_ps *rps) 243 { 244 struct kv_ps *ps = rps->ps_priv; 245 246 return ps; 247 } 248 249 static struct kv_power_info *kv_get_pi(struct radeon_device *rdev) 250 { 251 struct kv_power_info *pi = rdev->pm.dpm.priv; 252 253 return pi; 254 } 255 256 #if 0 257 static void kv_program_local_cac_table(struct radeon_device *rdev, 258 const struct kv_lcac_config_values *local_cac_table, 259 const struct kv_lcac_config_reg *local_cac_reg) 260 { 261 u32 i, count, data; 262 const struct kv_lcac_config_values *values = local_cac_table; 263 264 while (values->block_id != 0xffffffff) { 265 count = values->signal_id; 266 for (i = 0; i < count; i++) { 267 data = ((values->block_id << local_cac_reg->block_shift) & 268 local_cac_reg->block_mask); 269 data |= ((i << local_cac_reg->signal_shift) & 270 local_cac_reg->signal_mask); 271 data |= ((values->t << local_cac_reg->t_shift) & 272 local_cac_reg->t_mask); 273 data |= ((1 << local_cac_reg->enable_shift) & 274 local_cac_reg->enable_mask); 275 WREG32_SMC(local_cac_reg->cntl, data); 276 } 277 values++; 278 } 279 } 280 #endif 281 282 static int kv_program_pt_config_registers(struct radeon_device *rdev, 283 const struct kv_pt_config_reg *cac_config_regs) 284 { 285 const struct kv_pt_config_reg *config_regs = cac_config_regs; 286 u32 data; 287 u32 cache = 0; 288 289 if (config_regs == NULL) 290 return -EINVAL; 291 292 while (config_regs->offset != 0xFFFFFFFF) { 293 if (config_regs->type == KV_CONFIGREG_CACHE) { 294 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 295 } else { 296 switch (config_regs->type) { 297 case KV_CONFIGREG_SMC_IND: 298 data = RREG32_SMC(config_regs->offset); 299 break; 300 case KV_CONFIGREG_DIDT_IND: 301 data = RREG32_DIDT(config_regs->offset); 302 break; 303 default: 304 data = RREG32(config_regs->offset << 2); 305 break; 306 } 307 308 data &= ~config_regs->mask; 309 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 310 data |= cache; 311 cache = 0; 312 313 switch (config_regs->type) { 314 case KV_CONFIGREG_SMC_IND: 315 WREG32_SMC(config_regs->offset, data); 316 break; 317 case KV_CONFIGREG_DIDT_IND: 318 WREG32_DIDT(config_regs->offset, data); 319 break; 320 default: 321 WREG32(config_regs->offset << 2, data); 322 break; 323 } 324 } 325 config_regs++; 326 } 327 328 return 0; 329 } 330 331 static void kv_do_enable_didt(struct radeon_device *rdev, bool enable) 332 { 333 struct kv_power_info *pi = kv_get_pi(rdev); 334 u32 data; 335 336 if (pi->caps_sq_ramping) { 337 data = RREG32_DIDT(DIDT_SQ_CTRL0); 338 if (enable) 339 data |= DIDT_CTRL_EN; 340 else 341 data &= ~DIDT_CTRL_EN; 342 WREG32_DIDT(DIDT_SQ_CTRL0, data); 343 } 344 345 if (pi->caps_db_ramping) { 346 data = RREG32_DIDT(DIDT_DB_CTRL0); 347 if (enable) 348 data |= DIDT_CTRL_EN; 349 else 350 data &= ~DIDT_CTRL_EN; 351 WREG32_DIDT(DIDT_DB_CTRL0, data); 352 } 353 354 if (pi->caps_td_ramping) { 355 data = RREG32_DIDT(DIDT_TD_CTRL0); 356 if (enable) 357 data |= DIDT_CTRL_EN; 358 else 359 data &= ~DIDT_CTRL_EN; 360 WREG32_DIDT(DIDT_TD_CTRL0, data); 361 } 362 363 if (pi->caps_tcp_ramping) { 364 data = RREG32_DIDT(DIDT_TCP_CTRL0); 365 if (enable) 366 data |= DIDT_CTRL_EN; 367 else 368 data &= ~DIDT_CTRL_EN; 369 WREG32_DIDT(DIDT_TCP_CTRL0, data); 370 } 371 } 372 373 static int kv_enable_didt(struct radeon_device *rdev, bool enable) 374 { 375 struct kv_power_info *pi = kv_get_pi(rdev); 376 int ret; 377 378 if (pi->caps_sq_ramping || 379 pi->caps_db_ramping || 380 pi->caps_td_ramping || 381 pi->caps_tcp_ramping) { 382 cik_enter_rlc_safe_mode(rdev); 383 384 if (enable) { 385 ret = kv_program_pt_config_registers(rdev, didt_config_kv); 386 if (ret) { 387 cik_exit_rlc_safe_mode(rdev); 388 return ret; 389 } 390 } 391 392 kv_do_enable_didt(rdev, enable); 393 394 cik_exit_rlc_safe_mode(rdev); 395 } 396 397 return 0; 398 } 399 400 #if 0 401 static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev) 402 { 403 struct kv_power_info *pi = kv_get_pi(rdev); 404 405 if (pi->caps_cac) { 406 WREG32_SMC(LCAC_SX0_OVR_SEL, 0); 407 WREG32_SMC(LCAC_SX0_OVR_VAL, 0); 408 kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 409 410 WREG32_SMC(LCAC_MC0_OVR_SEL, 0); 411 WREG32_SMC(LCAC_MC0_OVR_VAL, 0); 412 kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 413 414 WREG32_SMC(LCAC_MC1_OVR_SEL, 0); 415 WREG32_SMC(LCAC_MC1_OVR_VAL, 0); 416 kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 417 418 WREG32_SMC(LCAC_MC2_OVR_SEL, 0); 419 WREG32_SMC(LCAC_MC2_OVR_VAL, 0); 420 kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 421 422 WREG32_SMC(LCAC_MC3_OVR_SEL, 0); 423 WREG32_SMC(LCAC_MC3_OVR_VAL, 0); 424 kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 425 426 WREG32_SMC(LCAC_CPL_OVR_SEL, 0); 427 WREG32_SMC(LCAC_CPL_OVR_VAL, 0); 428 kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 429 } 430 } 431 #endif 432 433 static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable) 434 { 435 struct kv_power_info *pi = kv_get_pi(rdev); 436 int ret = 0; 437 438 if (pi->caps_cac) { 439 if (enable) { 440 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac); 441 if (ret) 442 pi->cac_enabled = false; 443 else 444 pi->cac_enabled = true; 445 } else if (pi->cac_enabled) { 446 kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac); 447 pi->cac_enabled = false; 448 } 449 } 450 451 return ret; 452 } 453 454 static int kv_process_firmware_header(struct radeon_device *rdev) 455 { 456 struct kv_power_info *pi = kv_get_pi(rdev); 457 u32 tmp; 458 int ret; 459 460 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 461 offsetof(SMU7_Firmware_Header, DpmTable), 462 &tmp, pi->sram_end); 463 464 if (ret == 0) 465 pi->dpm_table_start = tmp; 466 467 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 468 offsetof(SMU7_Firmware_Header, SoftRegisters), 469 &tmp, pi->sram_end); 470 471 if (ret == 0) 472 pi->soft_regs_start = tmp; 473 474 return ret; 475 } 476 477 static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev) 478 { 479 struct kv_power_info *pi = kv_get_pi(rdev); 480 int ret; 481 482 pi->graphics_voltage_change_enable = 1; 483 484 ret = kv_copy_bytes_to_smc(rdev, 485 pi->dpm_table_start + 486 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 487 &pi->graphics_voltage_change_enable, 488 sizeof(u8), pi->sram_end); 489 490 return ret; 491 } 492 493 static int kv_set_dpm_interval(struct radeon_device *rdev) 494 { 495 struct kv_power_info *pi = kv_get_pi(rdev); 496 int ret; 497 498 pi->graphics_interval = 1; 499 500 ret = kv_copy_bytes_to_smc(rdev, 501 pi->dpm_table_start + 502 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 503 &pi->graphics_interval, 504 sizeof(u8), pi->sram_end); 505 506 return ret; 507 } 508 509 static int kv_set_dpm_boot_state(struct radeon_device *rdev) 510 { 511 struct kv_power_info *pi = kv_get_pi(rdev); 512 int ret; 513 514 ret = kv_copy_bytes_to_smc(rdev, 515 pi->dpm_table_start + 516 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 517 &pi->graphics_boot_level, 518 sizeof(u8), pi->sram_end); 519 520 return ret; 521 } 522 523 static void kv_program_vc(struct radeon_device *rdev) 524 { 525 WREG32_SMC(CG_FTV_0, 0x3FFFC100); 526 } 527 528 static void kv_clear_vc(struct radeon_device *rdev) 529 { 530 WREG32_SMC(CG_FTV_0, 0); 531 } 532 533 static int kv_set_divider_value(struct radeon_device *rdev, 534 u32 index, u32 sclk) 535 { 536 struct kv_power_info *pi = kv_get_pi(rdev); 537 struct atom_clock_dividers dividers; 538 int ret; 539 540 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 541 sclk, false, ÷rs); 542 if (ret) 543 return ret; 544 545 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 546 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 547 548 return 0; 549 } 550 551 static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, 552 struct sumo_vid_mapping_table *vid_mapping_table, 553 u32 vid_2bit) 554 { 555 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 556 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 557 u32 i; 558 559 if (vddc_sclk_table && vddc_sclk_table->count) { 560 if (vid_2bit < vddc_sclk_table->count) 561 return vddc_sclk_table->entries[vid_2bit].v; 562 else 563 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 564 } else { 565 for (i = 0; i < vid_mapping_table->num_entries; i++) { 566 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 567 return vid_mapping_table->entries[i].vid_7bit; 568 } 569 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 570 } 571 } 572 573 static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, 574 struct sumo_vid_mapping_table *vid_mapping_table, 575 u32 vid_7bit) 576 { 577 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 578 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 579 u32 i; 580 581 if (vddc_sclk_table && vddc_sclk_table->count) { 582 for (i = 0; i < vddc_sclk_table->count; i++) { 583 if (vddc_sclk_table->entries[i].v == vid_7bit) 584 return i; 585 } 586 return vddc_sclk_table->count - 1; 587 } else { 588 for (i = 0; i < vid_mapping_table->num_entries; i++) { 589 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 590 return vid_mapping_table->entries[i].vid_2bit; 591 } 592 593 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 594 } 595 } 596 597 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, 598 u16 voltage) 599 { 600 return 6200 - (voltage * 25); 601 } 602 603 static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, 604 u32 vid_2bit) 605 { 606 struct kv_power_info *pi = kv_get_pi(rdev); 607 u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, 608 &pi->sys_info.vid_mapping_table, 609 vid_2bit); 610 611 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); 612 } 613 614 615 static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid) 616 { 617 struct kv_power_info *pi = kv_get_pi(rdev); 618 619 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 620 pi->graphics_level[index].MinVddNb = 621 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid)); 622 623 return 0; 624 } 625 626 static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at) 627 { 628 struct kv_power_info *pi = kv_get_pi(rdev); 629 630 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 631 632 return 0; 633 } 634 635 static void kv_dpm_power_level_enable(struct radeon_device *rdev, 636 u32 index, bool enable) 637 { 638 struct kv_power_info *pi = kv_get_pi(rdev); 639 640 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 641 } 642 643 static void kv_start_dpm(struct radeon_device *rdev) 644 { 645 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 646 647 tmp |= GLOBAL_PWRMGT_EN; 648 WREG32_SMC(GENERAL_PWRMGT, tmp); 649 650 kv_smc_dpm_enable(rdev, true); 651 } 652 653 static void kv_stop_dpm(struct radeon_device *rdev) 654 { 655 kv_smc_dpm_enable(rdev, false); 656 } 657 658 static void kv_start_am(struct radeon_device *rdev) 659 { 660 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 661 662 sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 663 sclk_pwrmgt_cntl |= DYNAMIC_PM_EN; 664 665 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 666 } 667 668 static void kv_reset_am(struct radeon_device *rdev) 669 { 670 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 671 672 sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 673 674 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 675 } 676 677 static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze) 678 { 679 return kv_notify_message_to_smu(rdev, freeze ? 680 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 681 } 682 683 static int kv_force_lowest_valid(struct radeon_device *rdev) 684 { 685 return kv_force_dpm_lowest(rdev); 686 } 687 688 static int kv_unforce_levels(struct radeon_device *rdev) 689 { 690 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 691 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 692 else 693 return kv_set_enabled_levels(rdev); 694 } 695 696 static int kv_update_sclk_t(struct radeon_device *rdev) 697 { 698 struct kv_power_info *pi = kv_get_pi(rdev); 699 u32 low_sclk_interrupt_t = 0; 700 int ret = 0; 701 702 if (pi->caps_sclk_throttle_low_notification) { 703 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 704 705 ret = kv_copy_bytes_to_smc(rdev, 706 pi->dpm_table_start + 707 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 708 (u8 *)&low_sclk_interrupt_t, 709 sizeof(u32), pi->sram_end); 710 } 711 return ret; 712 } 713 714 static int kv_program_bootup_state(struct radeon_device *rdev) 715 { 716 struct kv_power_info *pi = kv_get_pi(rdev); 717 u32 i; 718 struct radeon_clock_voltage_dependency_table *table = 719 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 720 721 if (table && table->count) { 722 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 723 if (table->entries[i].clk == pi->boot_pl.sclk) 724 break; 725 } 726 727 pi->graphics_boot_level = (u8)i; 728 kv_dpm_power_level_enable(rdev, i, true); 729 } else { 730 struct sumo_sclk_voltage_mapping_table *table = 731 &pi->sys_info.sclk_voltage_mapping_table; 732 733 if (table->num_max_dpm_entries == 0) 734 return -EINVAL; 735 736 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 737 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 738 break; 739 } 740 741 pi->graphics_boot_level = (u8)i; 742 kv_dpm_power_level_enable(rdev, i, true); 743 } 744 return 0; 745 } 746 747 static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev) 748 { 749 struct kv_power_info *pi = kv_get_pi(rdev); 750 int ret; 751 752 pi->graphics_therm_throttle_enable = 1; 753 754 ret = kv_copy_bytes_to_smc(rdev, 755 pi->dpm_table_start + 756 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 757 &pi->graphics_therm_throttle_enable, 758 sizeof(u8), pi->sram_end); 759 760 return ret; 761 } 762 763 static int kv_upload_dpm_settings(struct radeon_device *rdev) 764 { 765 struct kv_power_info *pi = kv_get_pi(rdev); 766 int ret; 767 768 ret = kv_copy_bytes_to_smc(rdev, 769 pi->dpm_table_start + 770 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 771 (u8 *)&pi->graphics_level, 772 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 773 pi->sram_end); 774 775 if (ret) 776 return ret; 777 778 ret = kv_copy_bytes_to_smc(rdev, 779 pi->dpm_table_start + 780 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 781 &pi->graphics_dpm_level_count, 782 sizeof(u8), pi->sram_end); 783 784 return ret; 785 } 786 787 static u32 kv_get_clock_difference(u32 a, u32 b) 788 { 789 return (a >= b) ? a - b : b - a; 790 } 791 792 static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk) 793 { 794 struct kv_power_info *pi = kv_get_pi(rdev); 795 u32 value; 796 797 if (pi->caps_enable_dfs_bypass) { 798 if (kv_get_clock_difference(clk, 40000) < 200) 799 value = 3; 800 else if (kv_get_clock_difference(clk, 30000) < 200) 801 value = 2; 802 else if (kv_get_clock_difference(clk, 20000) < 200) 803 value = 7; 804 else if (kv_get_clock_difference(clk, 15000) < 200) 805 value = 6; 806 else if (kv_get_clock_difference(clk, 10000) < 200) 807 value = 8; 808 else 809 value = 0; 810 } else { 811 value = 0; 812 } 813 814 return value; 815 } 816 817 static int kv_populate_uvd_table(struct radeon_device *rdev) 818 { 819 struct kv_power_info *pi = kv_get_pi(rdev); 820 struct radeon_uvd_clock_voltage_dependency_table *table = 821 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 822 struct atom_clock_dividers dividers; 823 int ret; 824 u32 i; 825 826 if (table == NULL || table->count == 0) 827 return 0; 828 829 pi->uvd_level_count = 0; 830 for (i = 0; i < table->count; i++) { 831 if (pi->high_voltage_t && 832 (pi->high_voltage_t < table->entries[i].v)) 833 break; 834 835 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 836 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 837 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 838 839 pi->uvd_level[i].VClkBypassCntl = 840 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk); 841 pi->uvd_level[i].DClkBypassCntl = 842 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk); 843 844 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 845 table->entries[i].vclk, false, ÷rs); 846 if (ret) 847 return ret; 848 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 849 850 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 851 table->entries[i].dclk, false, ÷rs); 852 if (ret) 853 return ret; 854 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 855 856 pi->uvd_level_count++; 857 } 858 859 ret = kv_copy_bytes_to_smc(rdev, 860 pi->dpm_table_start + 861 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 862 (u8 *)&pi->uvd_level_count, 863 sizeof(u8), pi->sram_end); 864 if (ret) 865 return ret; 866 867 pi->uvd_interval = 1; 868 869 ret = kv_copy_bytes_to_smc(rdev, 870 pi->dpm_table_start + 871 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 872 &pi->uvd_interval, 873 sizeof(u8), pi->sram_end); 874 if (ret) 875 return ret; 876 877 ret = kv_copy_bytes_to_smc(rdev, 878 pi->dpm_table_start + 879 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 880 (u8 *)&pi->uvd_level, 881 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 882 pi->sram_end); 883 884 return ret; 885 886 } 887 888 static int kv_populate_vce_table(struct radeon_device *rdev) 889 { 890 struct kv_power_info *pi = kv_get_pi(rdev); 891 int ret; 892 u32 i; 893 struct radeon_vce_clock_voltage_dependency_table *table = 894 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 895 struct atom_clock_dividers dividers; 896 897 if (table == NULL || table->count == 0) 898 return 0; 899 900 pi->vce_level_count = 0; 901 for (i = 0; i < table->count; i++) { 902 if (pi->high_voltage_t && 903 pi->high_voltage_t < table->entries[i].v) 904 break; 905 906 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 907 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 908 909 pi->vce_level[i].ClkBypassCntl = 910 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk); 911 912 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 913 table->entries[i].evclk, false, ÷rs); 914 if (ret) 915 return ret; 916 pi->vce_level[i].Divider = (u8)dividers.post_div; 917 918 pi->vce_level_count++; 919 } 920 921 ret = kv_copy_bytes_to_smc(rdev, 922 pi->dpm_table_start + 923 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 924 (u8 *)&pi->vce_level_count, 925 sizeof(u8), 926 pi->sram_end); 927 if (ret) 928 return ret; 929 930 pi->vce_interval = 1; 931 932 ret = kv_copy_bytes_to_smc(rdev, 933 pi->dpm_table_start + 934 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 935 (u8 *)&pi->vce_interval, 936 sizeof(u8), 937 pi->sram_end); 938 if (ret) 939 return ret; 940 941 ret = kv_copy_bytes_to_smc(rdev, 942 pi->dpm_table_start + 943 offsetof(SMU7_Fusion_DpmTable, VceLevel), 944 (u8 *)&pi->vce_level, 945 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 946 pi->sram_end); 947 948 return ret; 949 } 950 951 static int kv_populate_samu_table(struct radeon_device *rdev) 952 { 953 struct kv_power_info *pi = kv_get_pi(rdev); 954 struct radeon_clock_voltage_dependency_table *table = 955 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 956 struct atom_clock_dividers dividers; 957 int ret; 958 u32 i; 959 960 if (table == NULL || table->count == 0) 961 return 0; 962 963 pi->samu_level_count = 0; 964 for (i = 0; i < table->count; i++) { 965 if (pi->high_voltage_t && 966 pi->high_voltage_t < table->entries[i].v) 967 break; 968 969 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 970 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 971 972 pi->samu_level[i].ClkBypassCntl = 973 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk); 974 975 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 976 table->entries[i].clk, false, ÷rs); 977 if (ret) 978 return ret; 979 pi->samu_level[i].Divider = (u8)dividers.post_div; 980 981 pi->samu_level_count++; 982 } 983 984 ret = kv_copy_bytes_to_smc(rdev, 985 pi->dpm_table_start + 986 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 987 (u8 *)&pi->samu_level_count, 988 sizeof(u8), 989 pi->sram_end); 990 if (ret) 991 return ret; 992 993 pi->samu_interval = 1; 994 995 ret = kv_copy_bytes_to_smc(rdev, 996 pi->dpm_table_start + 997 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 998 (u8 *)&pi->samu_interval, 999 sizeof(u8), 1000 pi->sram_end); 1001 if (ret) 1002 return ret; 1003 1004 ret = kv_copy_bytes_to_smc(rdev, 1005 pi->dpm_table_start + 1006 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1007 (u8 *)&pi->samu_level, 1008 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1009 pi->sram_end); 1010 if (ret) 1011 return ret; 1012 1013 return ret; 1014 } 1015 1016 1017 static int kv_populate_acp_table(struct radeon_device *rdev) 1018 { 1019 struct kv_power_info *pi = kv_get_pi(rdev); 1020 struct radeon_clock_voltage_dependency_table *table = 1021 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1022 struct atom_clock_dividers dividers; 1023 int ret; 1024 u32 i; 1025 1026 if (table == NULL || table->count == 0) 1027 return 0; 1028 1029 pi->acp_level_count = 0; 1030 for (i = 0; i < table->count; i++) { 1031 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1032 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1033 1034 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 1035 table->entries[i].clk, false, ÷rs); 1036 if (ret) 1037 return ret; 1038 pi->acp_level[i].Divider = (u8)dividers.post_div; 1039 1040 pi->acp_level_count++; 1041 } 1042 1043 ret = kv_copy_bytes_to_smc(rdev, 1044 pi->dpm_table_start + 1045 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1046 (u8 *)&pi->acp_level_count, 1047 sizeof(u8), 1048 pi->sram_end); 1049 if (ret) 1050 return ret; 1051 1052 pi->acp_interval = 1; 1053 1054 ret = kv_copy_bytes_to_smc(rdev, 1055 pi->dpm_table_start + 1056 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1057 (u8 *)&pi->acp_interval, 1058 sizeof(u8), 1059 pi->sram_end); 1060 if (ret) 1061 return ret; 1062 1063 ret = kv_copy_bytes_to_smc(rdev, 1064 pi->dpm_table_start + 1065 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1066 (u8 *)&pi->acp_level, 1067 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1068 pi->sram_end); 1069 if (ret) 1070 return ret; 1071 1072 return ret; 1073 } 1074 1075 static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev) 1076 { 1077 struct kv_power_info *pi = kv_get_pi(rdev); 1078 u32 i; 1079 struct radeon_clock_voltage_dependency_table *table = 1080 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1081 1082 if (table && table->count) { 1083 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1084 if (pi->caps_enable_dfs_bypass) { 1085 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1086 pi->graphics_level[i].ClkBypassCntl = 3; 1087 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1088 pi->graphics_level[i].ClkBypassCntl = 2; 1089 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1090 pi->graphics_level[i].ClkBypassCntl = 7; 1091 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1092 pi->graphics_level[i].ClkBypassCntl = 6; 1093 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1094 pi->graphics_level[i].ClkBypassCntl = 8; 1095 else 1096 pi->graphics_level[i].ClkBypassCntl = 0; 1097 } else { 1098 pi->graphics_level[i].ClkBypassCntl = 0; 1099 } 1100 } 1101 } else { 1102 struct sumo_sclk_voltage_mapping_table *table = 1103 &pi->sys_info.sclk_voltage_mapping_table; 1104 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1105 if (pi->caps_enable_dfs_bypass) { 1106 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1107 pi->graphics_level[i].ClkBypassCntl = 3; 1108 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1109 pi->graphics_level[i].ClkBypassCntl = 2; 1110 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1111 pi->graphics_level[i].ClkBypassCntl = 7; 1112 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1113 pi->graphics_level[i].ClkBypassCntl = 6; 1114 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1115 pi->graphics_level[i].ClkBypassCntl = 8; 1116 else 1117 pi->graphics_level[i].ClkBypassCntl = 0; 1118 } else { 1119 pi->graphics_level[i].ClkBypassCntl = 0; 1120 } 1121 } 1122 } 1123 } 1124 1125 static int kv_enable_ulv(struct radeon_device *rdev, bool enable) 1126 { 1127 return kv_notify_message_to_smu(rdev, enable ? 1128 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1129 } 1130 1131 static void kv_reset_acp_boot_level(struct radeon_device *rdev) 1132 { 1133 struct kv_power_info *pi = kv_get_pi(rdev); 1134 1135 pi->acp_boot_level = 0xff; 1136 } 1137 1138 static void kv_update_current_ps(struct radeon_device *rdev, 1139 struct radeon_ps *rps) 1140 { 1141 struct kv_ps *new_ps = kv_get_ps(rps); 1142 struct kv_power_info *pi = kv_get_pi(rdev); 1143 1144 pi->current_rps = *rps; 1145 pi->current_ps = *new_ps; 1146 pi->current_rps.ps_priv = &pi->current_ps; 1147 } 1148 1149 static void kv_update_requested_ps(struct radeon_device *rdev, 1150 struct radeon_ps *rps) 1151 { 1152 struct kv_ps *new_ps = kv_get_ps(rps); 1153 struct kv_power_info *pi = kv_get_pi(rdev); 1154 1155 pi->requested_rps = *rps; 1156 pi->requested_ps = *new_ps; 1157 pi->requested_rps.ps_priv = &pi->requested_ps; 1158 } 1159 1160 void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) 1161 { 1162 struct kv_power_info *pi = kv_get_pi(rdev); 1163 int ret; 1164 1165 if (pi->bapm_enable) { 1166 ret = kv_smc_bapm_enable(rdev, enable); 1167 if (ret) 1168 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1169 } 1170 } 1171 1172 int kv_dpm_enable(struct radeon_device *rdev) 1173 { 1174 struct kv_power_info *pi = kv_get_pi(rdev); 1175 int ret; 1176 1177 ret = kv_process_firmware_header(rdev); 1178 if (ret) { 1179 DRM_ERROR("kv_process_firmware_header failed\n"); 1180 return ret; 1181 } 1182 kv_init_fps_limits(rdev); 1183 kv_init_graphics_levels(rdev); 1184 ret = kv_program_bootup_state(rdev); 1185 if (ret) { 1186 DRM_ERROR("kv_program_bootup_state failed\n"); 1187 return ret; 1188 } 1189 kv_calculate_dfs_bypass_settings(rdev); 1190 ret = kv_upload_dpm_settings(rdev); 1191 if (ret) { 1192 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1193 return ret; 1194 } 1195 ret = kv_populate_uvd_table(rdev); 1196 if (ret) { 1197 DRM_ERROR("kv_populate_uvd_table failed\n"); 1198 return ret; 1199 } 1200 ret = kv_populate_vce_table(rdev); 1201 if (ret) { 1202 DRM_ERROR("kv_populate_vce_table failed\n"); 1203 return ret; 1204 } 1205 ret = kv_populate_samu_table(rdev); 1206 if (ret) { 1207 DRM_ERROR("kv_populate_samu_table failed\n"); 1208 return ret; 1209 } 1210 ret = kv_populate_acp_table(rdev); 1211 if (ret) { 1212 DRM_ERROR("kv_populate_acp_table failed\n"); 1213 return ret; 1214 } 1215 kv_program_vc(rdev); 1216 #if 0 1217 kv_initialize_hardware_cac_manager(rdev); 1218 #endif 1219 kv_start_am(rdev); 1220 if (pi->enable_auto_thermal_throttling) { 1221 ret = kv_enable_auto_thermal_throttling(rdev); 1222 if (ret) { 1223 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1224 return ret; 1225 } 1226 } 1227 ret = kv_enable_dpm_voltage_scaling(rdev); 1228 if (ret) { 1229 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1230 return ret; 1231 } 1232 ret = kv_set_dpm_interval(rdev); 1233 if (ret) { 1234 DRM_ERROR("kv_set_dpm_interval failed\n"); 1235 return ret; 1236 } 1237 ret = kv_set_dpm_boot_state(rdev); 1238 if (ret) { 1239 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1240 return ret; 1241 } 1242 ret = kv_enable_ulv(rdev, true); 1243 if (ret) { 1244 DRM_ERROR("kv_enable_ulv failed\n"); 1245 return ret; 1246 } 1247 kv_start_dpm(rdev); 1248 ret = kv_enable_didt(rdev, true); 1249 if (ret) { 1250 DRM_ERROR("kv_enable_didt failed\n"); 1251 return ret; 1252 } 1253 ret = kv_enable_smc_cac(rdev, true); 1254 if (ret) { 1255 DRM_ERROR("kv_enable_smc_cac failed\n"); 1256 return ret; 1257 } 1258 1259 kv_reset_acp_boot_level(rdev); 1260 1261 ret = kv_smc_bapm_enable(rdev, false); 1262 if (ret) { 1263 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1264 return ret; 1265 } 1266 1267 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1268 1269 return ret; 1270 } 1271 1272 int kv_dpm_late_enable(struct radeon_device *rdev) 1273 { 1274 int ret = 0; 1275 1276 if (rdev->irq.installed && 1277 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1278 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1279 if (ret) { 1280 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1281 return ret; 1282 } 1283 rdev->irq.dpm_thermal = true; 1284 radeon_irq_set(rdev); 1285 } 1286 1287 /* powerdown unused blocks for now */ 1288 kv_dpm_powergate_acp(rdev, true); 1289 kv_dpm_powergate_samu(rdev, true); 1290 kv_dpm_powergate_vce(rdev, true); 1291 kv_dpm_powergate_uvd(rdev, true); 1292 1293 return ret; 1294 } 1295 1296 void kv_dpm_disable(struct radeon_device *rdev) 1297 { 1298 kv_smc_bapm_enable(rdev, false); 1299 1300 if (rdev->family == CHIP_MULLINS) 1301 kv_enable_nb_dpm(rdev, false); 1302 1303 /* powerup blocks */ 1304 kv_dpm_powergate_acp(rdev, false); 1305 kv_dpm_powergate_samu(rdev, false); 1306 kv_dpm_powergate_vce(rdev, false); 1307 kv_dpm_powergate_uvd(rdev, false); 1308 1309 kv_enable_smc_cac(rdev, false); 1310 kv_enable_didt(rdev, false); 1311 kv_clear_vc(rdev); 1312 kv_stop_dpm(rdev); 1313 kv_enable_ulv(rdev, false); 1314 kv_reset_am(rdev); 1315 1316 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1317 } 1318 1319 #if 0 1320 static int kv_write_smc_soft_register(struct radeon_device *rdev, 1321 u16 reg_offset, u32 value) 1322 { 1323 struct kv_power_info *pi = kv_get_pi(rdev); 1324 1325 return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset, 1326 (u8 *)&value, sizeof(u16), pi->sram_end); 1327 } 1328 1329 static int kv_read_smc_soft_register(struct radeon_device *rdev, 1330 u16 reg_offset, u32 *value) 1331 { 1332 struct kv_power_info *pi = kv_get_pi(rdev); 1333 1334 return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset, 1335 value, pi->sram_end); 1336 } 1337 #endif 1338 1339 static void kv_init_sclk_t(struct radeon_device *rdev) 1340 { 1341 struct kv_power_info *pi = kv_get_pi(rdev); 1342 1343 pi->low_sclk_interrupt_t = 0; 1344 } 1345 1346 static int kv_init_fps_limits(struct radeon_device *rdev) 1347 { 1348 struct kv_power_info *pi = kv_get_pi(rdev); 1349 int ret = 0; 1350 1351 if (pi->caps_fps) { 1352 u16 tmp; 1353 1354 tmp = 45; 1355 pi->fps_high_t = cpu_to_be16(tmp); 1356 ret = kv_copy_bytes_to_smc(rdev, 1357 pi->dpm_table_start + 1358 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1359 (u8 *)&pi->fps_high_t, 1360 sizeof(u16), pi->sram_end); 1361 1362 tmp = 30; 1363 pi->fps_low_t = cpu_to_be16(tmp); 1364 1365 ret = kv_copy_bytes_to_smc(rdev, 1366 pi->dpm_table_start + 1367 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1368 (u8 *)&pi->fps_low_t, 1369 sizeof(u16), pi->sram_end); 1370 1371 } 1372 return ret; 1373 } 1374 1375 static void kv_init_powergate_state(struct radeon_device *rdev) 1376 { 1377 struct kv_power_info *pi = kv_get_pi(rdev); 1378 1379 pi->uvd_power_gated = false; 1380 pi->vce_power_gated = false; 1381 pi->samu_power_gated = false; 1382 pi->acp_power_gated = false; 1383 1384 } 1385 1386 static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 1387 { 1388 return kv_notify_message_to_smu(rdev, enable ? 1389 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1390 } 1391 1392 static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) 1393 { 1394 return kv_notify_message_to_smu(rdev, enable ? 1395 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1396 } 1397 1398 static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) 1399 { 1400 return kv_notify_message_to_smu(rdev, enable ? 1401 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1402 } 1403 1404 static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable) 1405 { 1406 return kv_notify_message_to_smu(rdev, enable ? 1407 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1408 } 1409 1410 static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) 1411 { 1412 struct kv_power_info *pi = kv_get_pi(rdev); 1413 struct radeon_uvd_clock_voltage_dependency_table *table = 1414 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1415 int ret; 1416 u32 mask; 1417 1418 if (!gate) { 1419 if (table->count) 1420 pi->uvd_boot_level = table->count - 1; 1421 else 1422 pi->uvd_boot_level = 0; 1423 1424 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1425 mask = 1 << pi->uvd_boot_level; 1426 } else { 1427 mask = 0x1f; 1428 } 1429 1430 ret = kv_copy_bytes_to_smc(rdev, 1431 pi->dpm_table_start + 1432 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1433 (uint8_t *)&pi->uvd_boot_level, 1434 sizeof(u8), pi->sram_end); 1435 if (ret) 1436 return ret; 1437 1438 kv_send_msg_to_smc_with_parameter(rdev, 1439 PPSMC_MSG_UVDDPM_SetEnabledMask, 1440 mask); 1441 } 1442 1443 return kv_enable_uvd_dpm(rdev, !gate); 1444 } 1445 1446 static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk) 1447 { 1448 u8 i; 1449 struct radeon_vce_clock_voltage_dependency_table *table = 1450 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1451 1452 for (i = 0; i < table->count; i++) { 1453 if (table->entries[i].evclk >= evclk) 1454 break; 1455 } 1456 1457 return i; 1458 } 1459 1460 static int kv_update_vce_dpm(struct radeon_device *rdev, 1461 struct radeon_ps *radeon_new_state, 1462 struct radeon_ps *radeon_current_state) 1463 { 1464 struct kv_power_info *pi = kv_get_pi(rdev); 1465 struct radeon_vce_clock_voltage_dependency_table *table = 1466 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1467 int ret; 1468 1469 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { 1470 kv_dpm_powergate_vce(rdev, false); 1471 /* turn the clocks on when encoding */ 1472 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 1473 if (pi->caps_stable_p_state) 1474 pi->vce_boot_level = table->count - 1; 1475 else 1476 pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk); 1477 1478 ret = kv_copy_bytes_to_smc(rdev, 1479 pi->dpm_table_start + 1480 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1481 (u8 *)&pi->vce_boot_level, 1482 sizeof(u8), 1483 pi->sram_end); 1484 if (ret) 1485 return ret; 1486 1487 if (pi->caps_stable_p_state) 1488 kv_send_msg_to_smc_with_parameter(rdev, 1489 PPSMC_MSG_VCEDPM_SetEnabledMask, 1490 (1 << pi->vce_boot_level)); 1491 1492 kv_enable_vce_dpm(rdev, true); 1493 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { 1494 kv_enable_vce_dpm(rdev, false); 1495 /* turn the clocks off when not encoding */ 1496 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 1497 kv_dpm_powergate_vce(rdev, true); 1498 } 1499 1500 return 0; 1501 } 1502 1503 static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) 1504 { 1505 struct kv_power_info *pi = kv_get_pi(rdev); 1506 struct radeon_clock_voltage_dependency_table *table = 1507 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1508 int ret; 1509 1510 if (!gate) { 1511 if (pi->caps_stable_p_state) 1512 pi->samu_boot_level = table->count - 1; 1513 else 1514 pi->samu_boot_level = 0; 1515 1516 ret = kv_copy_bytes_to_smc(rdev, 1517 pi->dpm_table_start + 1518 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1519 (u8 *)&pi->samu_boot_level, 1520 sizeof(u8), 1521 pi->sram_end); 1522 if (ret) 1523 return ret; 1524 1525 if (pi->caps_stable_p_state) 1526 kv_send_msg_to_smc_with_parameter(rdev, 1527 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1528 (1 << pi->samu_boot_level)); 1529 } 1530 1531 return kv_enable_samu_dpm(rdev, !gate); 1532 } 1533 1534 static u8 kv_get_acp_boot_level(struct radeon_device *rdev) 1535 { 1536 u8 i; 1537 struct radeon_clock_voltage_dependency_table *table = 1538 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1539 1540 for (i = 0; i < table->count; i++) { 1541 if (table->entries[i].clk >= 0) /* XXX */ 1542 break; 1543 } 1544 1545 if (i >= table->count) 1546 i = table->count - 1; 1547 1548 return i; 1549 } 1550 1551 static void kv_update_acp_boot_level(struct radeon_device *rdev) 1552 { 1553 struct kv_power_info *pi = kv_get_pi(rdev); 1554 u8 acp_boot_level; 1555 1556 if (!pi->caps_stable_p_state) { 1557 acp_boot_level = kv_get_acp_boot_level(rdev); 1558 if (acp_boot_level != pi->acp_boot_level) { 1559 pi->acp_boot_level = acp_boot_level; 1560 kv_send_msg_to_smc_with_parameter(rdev, 1561 PPSMC_MSG_ACPDPM_SetEnabledMask, 1562 (1 << pi->acp_boot_level)); 1563 } 1564 } 1565 } 1566 1567 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1568 { 1569 struct kv_power_info *pi = kv_get_pi(rdev); 1570 struct radeon_clock_voltage_dependency_table *table = 1571 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1572 int ret; 1573 1574 if (!gate) { 1575 if (pi->caps_stable_p_state) 1576 pi->acp_boot_level = table->count - 1; 1577 else 1578 pi->acp_boot_level = kv_get_acp_boot_level(rdev); 1579 1580 ret = kv_copy_bytes_to_smc(rdev, 1581 pi->dpm_table_start + 1582 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1583 (u8 *)&pi->acp_boot_level, 1584 sizeof(u8), 1585 pi->sram_end); 1586 if (ret) 1587 return ret; 1588 1589 if (pi->caps_stable_p_state) 1590 kv_send_msg_to_smc_with_parameter(rdev, 1591 PPSMC_MSG_ACPDPM_SetEnabledMask, 1592 (1 << pi->acp_boot_level)); 1593 } 1594 1595 return kv_enable_acp_dpm(rdev, !gate); 1596 } 1597 1598 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 1599 { 1600 struct kv_power_info *pi = kv_get_pi(rdev); 1601 1602 if (pi->uvd_power_gated == gate) 1603 return; 1604 1605 pi->uvd_power_gated = gate; 1606 1607 if (gate) { 1608 if (pi->caps_uvd_pg) { 1609 uvd_v1_0_stop(rdev); 1610 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); 1611 } 1612 kv_update_uvd_dpm(rdev, gate); 1613 if (pi->caps_uvd_pg) 1614 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF); 1615 } else { 1616 if (pi->caps_uvd_pg) { 1617 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON); 1618 uvd_v4_2_resume(rdev); 1619 uvd_v1_0_start(rdev); 1620 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); 1621 } 1622 kv_update_uvd_dpm(rdev, gate); 1623 } 1624 } 1625 1626 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate) 1627 { 1628 struct kv_power_info *pi = kv_get_pi(rdev); 1629 1630 if (pi->vce_power_gated == gate) 1631 return; 1632 1633 pi->vce_power_gated = gate; 1634 1635 if (gate) { 1636 if (pi->caps_vce_pg) { 1637 /* XXX do we need a vce_v1_0_stop() ? */ 1638 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); 1639 } 1640 } else { 1641 if (pi->caps_vce_pg) { 1642 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); 1643 vce_v2_0_resume(rdev); 1644 vce_v1_0_start(rdev); 1645 } 1646 } 1647 } 1648 1649 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate) 1650 { 1651 struct kv_power_info *pi = kv_get_pi(rdev); 1652 1653 if (pi->samu_power_gated == gate) 1654 return; 1655 1656 pi->samu_power_gated = gate; 1657 1658 if (gate) { 1659 kv_update_samu_dpm(rdev, true); 1660 if (pi->caps_samu_pg) 1661 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF); 1662 } else { 1663 if (pi->caps_samu_pg) 1664 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON); 1665 kv_update_samu_dpm(rdev, false); 1666 } 1667 } 1668 1669 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) 1670 { 1671 struct kv_power_info *pi = kv_get_pi(rdev); 1672 1673 if (pi->acp_power_gated == gate) 1674 return; 1675 1676 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 1677 return; 1678 1679 pi->acp_power_gated = gate; 1680 1681 if (gate) { 1682 kv_update_acp_dpm(rdev, true); 1683 if (pi->caps_acp_pg) 1684 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF); 1685 } else { 1686 if (pi->caps_acp_pg) 1687 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON); 1688 kv_update_acp_dpm(rdev, false); 1689 } 1690 } 1691 1692 static void kv_set_valid_clock_range(struct radeon_device *rdev, 1693 struct radeon_ps *new_rps) 1694 { 1695 struct kv_ps *new_ps = kv_get_ps(new_rps); 1696 struct kv_power_info *pi = kv_get_pi(rdev); 1697 u32 i; 1698 struct radeon_clock_voltage_dependency_table *table = 1699 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1700 1701 if (table && table->count) { 1702 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1703 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1704 (i == (pi->graphics_dpm_level_count - 1))) { 1705 pi->lowest_valid = i; 1706 break; 1707 } 1708 } 1709 1710 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1711 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1712 break; 1713 } 1714 pi->highest_valid = i; 1715 1716 if (pi->lowest_valid > pi->highest_valid) { 1717 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1718 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1719 pi->highest_valid = pi->lowest_valid; 1720 else 1721 pi->lowest_valid = pi->highest_valid; 1722 } 1723 } else { 1724 struct sumo_sclk_voltage_mapping_table *table = 1725 &pi->sys_info.sclk_voltage_mapping_table; 1726 1727 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1728 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1729 i == (int)(pi->graphics_dpm_level_count - 1)) { 1730 pi->lowest_valid = i; 1731 break; 1732 } 1733 } 1734 1735 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1736 if (table->entries[i].sclk_frequency <= 1737 new_ps->levels[new_ps->num_levels - 1].sclk) 1738 break; 1739 } 1740 pi->highest_valid = i; 1741 1742 if (pi->lowest_valid > pi->highest_valid) { 1743 if ((new_ps->levels[0].sclk - 1744 table->entries[pi->highest_valid].sclk_frequency) > 1745 (table->entries[pi->lowest_valid].sclk_frequency - 1746 new_ps->levels[new_ps->num_levels -1].sclk)) 1747 pi->highest_valid = pi->lowest_valid; 1748 else 1749 pi->lowest_valid = pi->highest_valid; 1750 } 1751 } 1752 } 1753 1754 static int kv_update_dfs_bypass_settings(struct radeon_device *rdev, 1755 struct radeon_ps *new_rps) 1756 { 1757 struct kv_ps *new_ps = kv_get_ps(new_rps); 1758 struct kv_power_info *pi = kv_get_pi(rdev); 1759 int ret = 0; 1760 u8 clk_bypass_cntl; 1761 1762 if (pi->caps_enable_dfs_bypass) { 1763 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1764 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1765 ret = kv_copy_bytes_to_smc(rdev, 1766 (pi->dpm_table_start + 1767 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1768 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1769 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1770 &clk_bypass_cntl, 1771 sizeof(u8), pi->sram_end); 1772 } 1773 1774 return ret; 1775 } 1776 1777 static int kv_enable_nb_dpm(struct radeon_device *rdev, 1778 bool enable) 1779 { 1780 struct kv_power_info *pi = kv_get_pi(rdev); 1781 int ret = 0; 1782 1783 if (enable) { 1784 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1785 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); 1786 if (ret == 0) 1787 pi->nb_dpm_enabled = true; 1788 } 1789 } else { 1790 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1791 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable); 1792 if (ret == 0) 1793 pi->nb_dpm_enabled = false; 1794 } 1795 } 1796 1797 return ret; 1798 } 1799 1800 int kv_dpm_force_performance_level(struct radeon_device *rdev, 1801 enum radeon_dpm_forced_level level) 1802 { 1803 int ret; 1804 1805 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1806 ret = kv_force_dpm_highest(rdev); 1807 if (ret) 1808 return ret; 1809 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1810 ret = kv_force_dpm_lowest(rdev); 1811 if (ret) 1812 return ret; 1813 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 1814 ret = kv_unforce_levels(rdev); 1815 if (ret) 1816 return ret; 1817 } 1818 1819 rdev->pm.dpm.forced_level = level; 1820 1821 return 0; 1822 } 1823 1824 int kv_dpm_pre_set_power_state(struct radeon_device *rdev) 1825 { 1826 struct kv_power_info *pi = kv_get_pi(rdev); 1827 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 1828 struct radeon_ps *new_ps = &requested_ps; 1829 1830 kv_update_requested_ps(rdev, new_ps); 1831 1832 kv_apply_state_adjust_rules(rdev, 1833 &pi->requested_rps, 1834 &pi->current_rps); 1835 1836 return 0; 1837 } 1838 1839 int kv_dpm_set_power_state(struct radeon_device *rdev) 1840 { 1841 struct kv_power_info *pi = kv_get_pi(rdev); 1842 struct radeon_ps *new_ps = &pi->requested_rps; 1843 struct radeon_ps *old_ps = &pi->current_rps; 1844 int ret; 1845 1846 if (pi->bapm_enable) { 1847 ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); 1848 if (ret) { 1849 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1850 return ret; 1851 } 1852 } 1853 1854 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1855 if (pi->enable_dpm) { 1856 kv_set_valid_clock_range(rdev, new_ps); 1857 kv_update_dfs_bypass_settings(rdev, new_ps); 1858 ret = kv_calculate_ds_divider(rdev); 1859 if (ret) { 1860 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1861 return ret; 1862 } 1863 kv_calculate_nbps_level_settings(rdev); 1864 kv_calculate_dpm_settings(rdev); 1865 kv_force_lowest_valid(rdev); 1866 kv_enable_new_levels(rdev); 1867 kv_upload_dpm_settings(rdev); 1868 kv_program_nbps_index_settings(rdev, new_ps); 1869 kv_unforce_levels(rdev); 1870 kv_set_enabled_levels(rdev); 1871 kv_force_lowest_valid(rdev); 1872 kv_unforce_levels(rdev); 1873 1874 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1875 if (ret) { 1876 DRM_ERROR("kv_update_vce_dpm failed\n"); 1877 return ret; 1878 } 1879 kv_update_sclk_t(rdev); 1880 if (rdev->family == CHIP_MULLINS) 1881 kv_enable_nb_dpm(rdev, true); 1882 } 1883 } else { 1884 if (pi->enable_dpm) { 1885 kv_set_valid_clock_range(rdev, new_ps); 1886 kv_update_dfs_bypass_settings(rdev, new_ps); 1887 ret = kv_calculate_ds_divider(rdev); 1888 if (ret) { 1889 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1890 return ret; 1891 } 1892 kv_calculate_nbps_level_settings(rdev); 1893 kv_calculate_dpm_settings(rdev); 1894 kv_freeze_sclk_dpm(rdev, true); 1895 kv_upload_dpm_settings(rdev); 1896 kv_program_nbps_index_settings(rdev, new_ps); 1897 kv_freeze_sclk_dpm(rdev, false); 1898 kv_set_enabled_levels(rdev); 1899 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1900 if (ret) { 1901 DRM_ERROR("kv_update_vce_dpm failed\n"); 1902 return ret; 1903 } 1904 kv_update_acp_boot_level(rdev); 1905 kv_update_sclk_t(rdev); 1906 kv_enable_nb_dpm(rdev, true); 1907 } 1908 } 1909 1910 return 0; 1911 } 1912 1913 void kv_dpm_post_set_power_state(struct radeon_device *rdev) 1914 { 1915 struct kv_power_info *pi = kv_get_pi(rdev); 1916 struct radeon_ps *new_ps = &pi->requested_rps; 1917 1918 kv_update_current_ps(rdev, new_ps); 1919 } 1920 1921 void kv_dpm_setup_asic(struct radeon_device *rdev) 1922 { 1923 sumo_take_smu_control(rdev, true); 1924 kv_init_powergate_state(rdev); 1925 kv_init_sclk_t(rdev); 1926 } 1927 1928 void kv_dpm_reset_asic(struct radeon_device *rdev) 1929 { 1930 struct kv_power_info *pi = kv_get_pi(rdev); 1931 1932 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1933 kv_force_lowest_valid(rdev); 1934 kv_init_graphics_levels(rdev); 1935 kv_program_bootup_state(rdev); 1936 kv_upload_dpm_settings(rdev); 1937 kv_force_lowest_valid(rdev); 1938 kv_unforce_levels(rdev); 1939 } else { 1940 kv_init_graphics_levels(rdev); 1941 kv_program_bootup_state(rdev); 1942 kv_freeze_sclk_dpm(rdev, true); 1943 kv_upload_dpm_settings(rdev); 1944 kv_freeze_sclk_dpm(rdev, false); 1945 kv_set_enabled_level(rdev, pi->graphics_boot_level); 1946 } 1947 } 1948 1949 //XXX use sumo_dpm_display_configuration_changed 1950 1951 static void kv_construct_max_power_limits_table(struct radeon_device *rdev, 1952 struct radeon_clock_and_voltage_limits *table) 1953 { 1954 struct kv_power_info *pi = kv_get_pi(rdev); 1955 1956 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 1957 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 1958 table->sclk = 1959 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 1960 table->vddc = 1961 kv_convert_2bit_index_to_voltage(rdev, 1962 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 1963 } 1964 1965 table->mclk = pi->sys_info.nbp_memory_clock[0]; 1966 } 1967 1968 static void kv_patch_voltage_values(struct radeon_device *rdev) 1969 { 1970 int i; 1971 struct radeon_uvd_clock_voltage_dependency_table *uvd_table = 1972 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1973 struct radeon_vce_clock_voltage_dependency_table *vce_table = 1974 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1975 struct radeon_clock_voltage_dependency_table *samu_table = 1976 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1977 struct radeon_clock_voltage_dependency_table *acp_table = 1978 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1979 1980 if (uvd_table->count) { 1981 for (i = 0; i < uvd_table->count; i++) 1982 uvd_table->entries[i].v = 1983 kv_convert_8bit_index_to_voltage(rdev, 1984 uvd_table->entries[i].v); 1985 } 1986 1987 if (vce_table->count) { 1988 for (i = 0; i < vce_table->count; i++) 1989 vce_table->entries[i].v = 1990 kv_convert_8bit_index_to_voltage(rdev, 1991 vce_table->entries[i].v); 1992 } 1993 1994 if (samu_table->count) { 1995 for (i = 0; i < samu_table->count; i++) 1996 samu_table->entries[i].v = 1997 kv_convert_8bit_index_to_voltage(rdev, 1998 samu_table->entries[i].v); 1999 } 2000 2001 if (acp_table->count) { 2002 for (i = 0; i < acp_table->count; i++) 2003 acp_table->entries[i].v = 2004 kv_convert_8bit_index_to_voltage(rdev, 2005 acp_table->entries[i].v); 2006 } 2007 2008 } 2009 2010 static void kv_construct_boot_state(struct radeon_device *rdev) 2011 { 2012 struct kv_power_info *pi = kv_get_pi(rdev); 2013 2014 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2015 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2016 pi->boot_pl.ds_divider_index = 0; 2017 pi->boot_pl.ss_divider_index = 0; 2018 pi->boot_pl.allow_gnb_slow = 1; 2019 pi->boot_pl.force_nbp_state = 0; 2020 pi->boot_pl.display_wm = 0; 2021 pi->boot_pl.vce_wm = 0; 2022 } 2023 2024 static int kv_force_dpm_highest(struct radeon_device *rdev) 2025 { 2026 int ret; 2027 u32 enable_mask, i; 2028 2029 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2030 if (ret) 2031 return ret; 2032 2033 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2034 if (enable_mask & (1 << i)) 2035 break; 2036 } 2037 2038 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2039 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2040 else 2041 return kv_set_enabled_level(rdev, i); 2042 } 2043 2044 static int kv_force_dpm_lowest(struct radeon_device *rdev) 2045 { 2046 int ret; 2047 u32 enable_mask, i; 2048 2049 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2050 if (ret) 2051 return ret; 2052 2053 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2054 if (enable_mask & (1 << i)) 2055 break; 2056 } 2057 2058 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2059 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2060 else 2061 return kv_set_enabled_level(rdev, i); 2062 } 2063 2064 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 2065 u32 sclk, u32 min_sclk_in_sr) 2066 { 2067 struct kv_power_info *pi = kv_get_pi(rdev); 2068 u32 i; 2069 u32 temp; 2070 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? 2071 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK; 2072 2073 if (sclk < min) 2074 return 0; 2075 2076 if (!pi->caps_sclk_ds) 2077 return 0; 2078 2079 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2080 temp = sclk / sumo_get_sleep_divider_from_id(i); 2081 if (temp >= min) 2082 break; 2083 } 2084 2085 return (u8)i; 2086 } 2087 2088 static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit) 2089 { 2090 struct kv_power_info *pi = kv_get_pi(rdev); 2091 struct radeon_clock_voltage_dependency_table *table = 2092 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2093 int i; 2094 2095 if (table && table->count) { 2096 for (i = table->count - 1; i >= 0; i--) { 2097 if (pi->high_voltage_t && 2098 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <= 2099 pi->high_voltage_t)) { 2100 *limit = i; 2101 return 0; 2102 } 2103 } 2104 } else { 2105 struct sumo_sclk_voltage_mapping_table *table = 2106 &pi->sys_info.sclk_voltage_mapping_table; 2107 2108 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2109 if (pi->high_voltage_t && 2110 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <= 2111 pi->high_voltage_t)) { 2112 *limit = i; 2113 return 0; 2114 } 2115 } 2116 } 2117 2118 *limit = 0; 2119 return 0; 2120 } 2121 2122 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 2123 struct radeon_ps *new_rps, 2124 struct radeon_ps *old_rps) 2125 { 2126 struct kv_ps *ps = kv_get_ps(new_rps); 2127 struct kv_power_info *pi = kv_get_pi(rdev); 2128 u32 min_sclk = 10000; /* ??? */ 2129 u32 sclk, mclk = 0; 2130 int i, limit; 2131 bool force_high; 2132 struct radeon_clock_voltage_dependency_table *table = 2133 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2134 u32 stable_p_state_sclk = 0; 2135 struct radeon_clock_and_voltage_limits *max_limits = 2136 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2137 2138 if (new_rps->vce_active) { 2139 new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 2140 new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 2141 } else { 2142 new_rps->evclk = 0; 2143 new_rps->ecclk = 0; 2144 } 2145 2146 mclk = max_limits->mclk; 2147 sclk = min_sclk; 2148 2149 if (pi->caps_stable_p_state) { 2150 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2151 2152 for (i = table->count - 1; i >= 0; i++) { 2153 if (stable_p_state_sclk >= table->entries[i].clk) { 2154 stable_p_state_sclk = table->entries[i].clk; 2155 break; 2156 } 2157 } 2158 2159 if (i > 0) 2160 stable_p_state_sclk = table->entries[0].clk; 2161 2162 sclk = stable_p_state_sclk; 2163 } 2164 2165 if (new_rps->vce_active) { 2166 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 2167 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 2168 } 2169 2170 ps->need_dfs_bypass = true; 2171 2172 for (i = 0; i < ps->num_levels; i++) { 2173 if (ps->levels[i].sclk < sclk) 2174 ps->levels[i].sclk = sclk; 2175 } 2176 2177 if (table && table->count) { 2178 for (i = 0; i < ps->num_levels; i++) { 2179 if (pi->high_voltage_t && 2180 (pi->high_voltage_t < 2181 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2182 kv_get_high_voltage_limit(rdev, &limit); 2183 ps->levels[i].sclk = table->entries[limit].clk; 2184 } 2185 } 2186 } else { 2187 struct sumo_sclk_voltage_mapping_table *table = 2188 &pi->sys_info.sclk_voltage_mapping_table; 2189 2190 for (i = 0; i < ps->num_levels; i++) { 2191 if (pi->high_voltage_t && 2192 (pi->high_voltage_t < 2193 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2194 kv_get_high_voltage_limit(rdev, &limit); 2195 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2196 } 2197 } 2198 } 2199 2200 if (pi->caps_stable_p_state) { 2201 for (i = 0; i < ps->num_levels; i++) { 2202 ps->levels[i].sclk = stable_p_state_sclk; 2203 } 2204 } 2205 2206 pi->video_start = new_rps->dclk || new_rps->vclk || 2207 new_rps->evclk || new_rps->ecclk; 2208 2209 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2210 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2211 pi->battery_state = true; 2212 else 2213 pi->battery_state = false; 2214 2215 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2216 ps->dpm0_pg_nb_ps_lo = 0x1; 2217 ps->dpm0_pg_nb_ps_hi = 0x0; 2218 ps->dpmx_nb_ps_lo = 0x1; 2219 ps->dpmx_nb_ps_hi = 0x0; 2220 } else { 2221 ps->dpm0_pg_nb_ps_lo = 0x3; 2222 ps->dpm0_pg_nb_ps_hi = 0x0; 2223 ps->dpmx_nb_ps_lo = 0x3; 2224 ps->dpmx_nb_ps_hi = 0x0; 2225 2226 if (pi->sys_info.nb_dpm_enable) { 2227 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2228 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2229 pi->disable_nb_ps3_in_battery; 2230 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2231 ps->dpm0_pg_nb_ps_hi = 0x2; 2232 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2233 ps->dpmx_nb_ps_hi = 0x2; 2234 } 2235 } 2236 } 2237 2238 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev, 2239 u32 index, bool enable) 2240 { 2241 struct kv_power_info *pi = kv_get_pi(rdev); 2242 2243 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2244 } 2245 2246 static int kv_calculate_ds_divider(struct radeon_device *rdev) 2247 { 2248 struct kv_power_info *pi = kv_get_pi(rdev); 2249 u32 sclk_in_sr = 10000; /* ??? */ 2250 u32 i; 2251 2252 if (pi->lowest_valid > pi->highest_valid) 2253 return -EINVAL; 2254 2255 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2256 pi->graphics_level[i].DeepSleepDivId = 2257 kv_get_sleep_divider_id_from_clock(rdev, 2258 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2259 sclk_in_sr); 2260 } 2261 return 0; 2262 } 2263 2264 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) 2265 { 2266 struct kv_power_info *pi = kv_get_pi(rdev); 2267 u32 i; 2268 bool force_high; 2269 struct radeon_clock_and_voltage_limits *max_limits = 2270 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2271 u32 mclk = max_limits->mclk; 2272 2273 if (pi->lowest_valid > pi->highest_valid) 2274 return -EINVAL; 2275 2276 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2277 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2278 pi->graphics_level[i].GnbSlow = 1; 2279 pi->graphics_level[i].ForceNbPs1 = 0; 2280 pi->graphics_level[i].UpH = 0; 2281 } 2282 2283 if (!pi->sys_info.nb_dpm_enable) 2284 return 0; 2285 2286 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2287 (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2288 2289 if (force_high) { 2290 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2291 pi->graphics_level[i].GnbSlow = 0; 2292 } else { 2293 if (pi->battery_state) 2294 pi->graphics_level[0].ForceNbPs1 = 1; 2295 2296 pi->graphics_level[1].GnbSlow = 0; 2297 pi->graphics_level[2].GnbSlow = 0; 2298 pi->graphics_level[3].GnbSlow = 0; 2299 pi->graphics_level[4].GnbSlow = 0; 2300 } 2301 } else { 2302 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2303 pi->graphics_level[i].GnbSlow = 1; 2304 pi->graphics_level[i].ForceNbPs1 = 0; 2305 pi->graphics_level[i].UpH = 0; 2306 } 2307 2308 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2309 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2310 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2311 if (pi->lowest_valid != pi->highest_valid) 2312 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2313 } 2314 } 2315 return 0; 2316 } 2317 2318 static int kv_calculate_dpm_settings(struct radeon_device *rdev) 2319 { 2320 struct kv_power_info *pi = kv_get_pi(rdev); 2321 u32 i; 2322 2323 if (pi->lowest_valid > pi->highest_valid) 2324 return -EINVAL; 2325 2326 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2327 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2328 2329 return 0; 2330 } 2331 2332 static void kv_init_graphics_levels(struct radeon_device *rdev) 2333 { 2334 struct kv_power_info *pi = kv_get_pi(rdev); 2335 u32 i; 2336 struct radeon_clock_voltage_dependency_table *table = 2337 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2338 2339 if (table && table->count) { 2340 u32 vid_2bit; 2341 2342 pi->graphics_dpm_level_count = 0; 2343 for (i = 0; i < table->count; i++) { 2344 if (pi->high_voltage_t && 2345 (pi->high_voltage_t < 2346 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v))) 2347 break; 2348 2349 kv_set_divider_value(rdev, i, table->entries[i].clk); 2350 vid_2bit = kv_convert_vid7_to_vid2(rdev, 2351 &pi->sys_info.vid_mapping_table, 2352 table->entries[i].v); 2353 kv_set_vid(rdev, i, vid_2bit); 2354 kv_set_at(rdev, i, pi->at[i]); 2355 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2356 pi->graphics_dpm_level_count++; 2357 } 2358 } else { 2359 struct sumo_sclk_voltage_mapping_table *table = 2360 &pi->sys_info.sclk_voltage_mapping_table; 2361 2362 pi->graphics_dpm_level_count = 0; 2363 for (i = 0; i < table->num_max_dpm_entries; i++) { 2364 if (pi->high_voltage_t && 2365 pi->high_voltage_t < 2366 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit)) 2367 break; 2368 2369 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency); 2370 kv_set_vid(rdev, i, table->entries[i].vid_2bit); 2371 kv_set_at(rdev, i, pi->at[i]); 2372 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2373 pi->graphics_dpm_level_count++; 2374 } 2375 } 2376 2377 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2378 kv_dpm_power_level_enable(rdev, i, false); 2379 } 2380 2381 static void kv_enable_new_levels(struct radeon_device *rdev) 2382 { 2383 struct kv_power_info *pi = kv_get_pi(rdev); 2384 u32 i; 2385 2386 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2387 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2388 kv_dpm_power_level_enable(rdev, i, true); 2389 } 2390 } 2391 2392 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level) 2393 { 2394 u32 new_mask = (1 << level); 2395 2396 return kv_send_msg_to_smc_with_parameter(rdev, 2397 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2398 new_mask); 2399 } 2400 2401 static int kv_set_enabled_levels(struct radeon_device *rdev) 2402 { 2403 struct kv_power_info *pi = kv_get_pi(rdev); 2404 u32 i, new_mask = 0; 2405 2406 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2407 new_mask |= (1 << i); 2408 2409 return kv_send_msg_to_smc_with_parameter(rdev, 2410 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2411 new_mask); 2412 } 2413 2414 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 2415 struct radeon_ps *new_rps) 2416 { 2417 struct kv_ps *new_ps = kv_get_ps(new_rps); 2418 struct kv_power_info *pi = kv_get_pi(rdev); 2419 u32 nbdpmconfig1; 2420 2421 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2422 return; 2423 2424 if (pi->sys_info.nb_dpm_enable) { 2425 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1); 2426 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK | 2427 DpmXNbPsLo_MASK | DpmXNbPsHi_MASK); 2428 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) | 2429 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) | 2430 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) | 2431 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi)); 2432 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1); 2433 } 2434 } 2435 2436 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 2437 int min_temp, int max_temp) 2438 { 2439 int low_temp = 0 * 1000; 2440 int high_temp = 255 * 1000; 2441 u32 tmp; 2442 2443 if (low_temp < min_temp) 2444 low_temp = min_temp; 2445 if (high_temp > max_temp) 2446 high_temp = max_temp; 2447 if (high_temp < low_temp) { 2448 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2449 return -EINVAL; 2450 } 2451 2452 tmp = RREG32_SMC(CG_THERMAL_INT_CTRL); 2453 tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK); 2454 tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) | 2455 DIG_THERM_INTL(49 + (low_temp / 1000))); 2456 WREG32_SMC(CG_THERMAL_INT_CTRL, tmp); 2457 2458 rdev->pm.dpm.thermal.min_temp = low_temp; 2459 rdev->pm.dpm.thermal.max_temp = high_temp; 2460 2461 return 0; 2462 } 2463 2464 union igp_info { 2465 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2466 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2467 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2468 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2469 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2470 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2471 }; 2472 2473 static int kv_parse_sys_info_table(struct radeon_device *rdev) 2474 { 2475 struct kv_power_info *pi = kv_get_pi(rdev); 2476 struct radeon_mode_info *mode_info = &rdev->mode_info; 2477 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2478 union igp_info *igp_info; 2479 u8 frev, crev; 2480 u16 data_offset; 2481 int i; 2482 2483 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 2484 &frev, &crev, &data_offset)) { 2485 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2486 data_offset); 2487 2488 if (crev != 8) { 2489 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2490 return -EINVAL; 2491 } 2492 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2493 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2494 pi->sys_info.bootup_nb_voltage_index = 2495 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2496 if (igp_info->info_8.ucHtcTmpLmt == 0) 2497 pi->sys_info.htc_tmp_lmt = 203; 2498 else 2499 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2500 if (igp_info->info_8.ucHtcHystLmt == 0) 2501 pi->sys_info.htc_hyst_lmt = 5; 2502 else 2503 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2504 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2505 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2506 } 2507 2508 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2509 pi->sys_info.nb_dpm_enable = true; 2510 else 2511 pi->sys_info.nb_dpm_enable = false; 2512 2513 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2514 pi->sys_info.nbp_memory_clock[i] = 2515 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2516 pi->sys_info.nbp_n_clock[i] = 2517 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2518 } 2519 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2520 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2521 pi->caps_enable_dfs_bypass = true; 2522 2523 sumo_construct_sclk_voltage_mapping_table(rdev, 2524 &pi->sys_info.sclk_voltage_mapping_table, 2525 igp_info->info_8.sAvail_SCLK); 2526 2527 sumo_construct_vid_mapping_table(rdev, 2528 &pi->sys_info.vid_mapping_table, 2529 igp_info->info_8.sAvail_SCLK); 2530 2531 kv_construct_max_power_limits_table(rdev, 2532 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2533 } 2534 return 0; 2535 } 2536 2537 union power_info { 2538 struct _ATOM_POWERPLAY_INFO info; 2539 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2540 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2541 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2542 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2543 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2544 }; 2545 2546 union pplib_clock_info { 2547 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2548 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2549 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2550 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2551 }; 2552 2553 union pplib_power_state { 2554 struct _ATOM_PPLIB_STATE v1; 2555 struct _ATOM_PPLIB_STATE_V2 v2; 2556 }; 2557 2558 static void kv_patch_boot_state(struct radeon_device *rdev, 2559 struct kv_ps *ps) 2560 { 2561 struct kv_power_info *pi = kv_get_pi(rdev); 2562 2563 ps->num_levels = 1; 2564 ps->levels[0] = pi->boot_pl; 2565 } 2566 2567 static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev, 2568 struct radeon_ps *rps, 2569 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2570 u8 table_rev) 2571 { 2572 struct kv_ps *ps = kv_get_ps(rps); 2573 2574 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2575 rps->class = le16_to_cpu(non_clock_info->usClassification); 2576 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2577 2578 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2579 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2580 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2581 } else { 2582 rps->vclk = 0; 2583 rps->dclk = 0; 2584 } 2585 2586 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2587 rdev->pm.dpm.boot_ps = rps; 2588 kv_patch_boot_state(rdev, ps); 2589 } 2590 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2591 rdev->pm.dpm.uvd_ps = rps; 2592 } 2593 2594 static void kv_parse_pplib_clock_info(struct radeon_device *rdev, 2595 struct radeon_ps *rps, int index, 2596 union pplib_clock_info *clock_info) 2597 { 2598 struct kv_power_info *pi = kv_get_pi(rdev); 2599 struct kv_ps *ps = kv_get_ps(rps); 2600 struct kv_pl *pl = &ps->levels[index]; 2601 u32 sclk; 2602 2603 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2604 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2605 pl->sclk = sclk; 2606 pl->vddc_index = clock_info->sumo.vddcIndex; 2607 2608 ps->num_levels = index + 1; 2609 2610 if (pi->caps_sclk_ds) { 2611 pl->ds_divider_index = 5; 2612 pl->ss_divider_index = 5; 2613 } 2614 } 2615 2616 static int kv_parse_power_table(struct radeon_device *rdev) 2617 { 2618 struct radeon_mode_info *mode_info = &rdev->mode_info; 2619 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2620 union pplib_power_state *power_state; 2621 int i, j, k, non_clock_array_index, clock_array_index; 2622 union pplib_clock_info *clock_info; 2623 struct _StateArray *state_array; 2624 struct _ClockInfoArray *clock_info_array; 2625 struct _NonClockInfoArray *non_clock_info_array; 2626 union power_info *power_info; 2627 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2628 u16 data_offset; 2629 u8 frev, crev; 2630 u8 *power_state_offset; 2631 struct kv_ps *ps; 2632 2633 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 2634 &frev, &crev, &data_offset)) 2635 return -EINVAL; 2636 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2637 2638 state_array = (struct _StateArray *) 2639 (mode_info->atom_context->bios + data_offset + 2640 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2641 clock_info_array = (struct _ClockInfoArray *) 2642 (mode_info->atom_context->bios + data_offset + 2643 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2644 non_clock_info_array = (struct _NonClockInfoArray *) 2645 (mode_info->atom_context->bios + data_offset + 2646 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2647 2648 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * 2649 state_array->ucNumEntries, GFP_KERNEL); 2650 if (!rdev->pm.dpm.ps) 2651 return -ENOMEM; 2652 power_state_offset = (u8 *)state_array->states; 2653 for (i = 0; i < state_array->ucNumEntries; i++) { 2654 u8 *idx; 2655 power_state = (union pplib_power_state *)power_state_offset; 2656 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2657 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2658 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2659 if (!rdev->pm.power_state[i].clock_info) 2660 return -EINVAL; 2661 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2662 if (ps == NULL) { 2663 kfree(rdev->pm.dpm.ps); 2664 return -ENOMEM; 2665 } 2666 rdev->pm.dpm.ps[i].ps_priv = ps; 2667 k = 0; 2668 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2669 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2670 clock_array_index = idx[j]; 2671 if (clock_array_index >= clock_info_array->ucNumEntries) 2672 continue; 2673 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2674 break; 2675 clock_info = (union pplib_clock_info *) 2676 ((u8 *)&clock_info_array->clockInfo[0] + 2677 (clock_array_index * clock_info_array->ucEntrySize)); 2678 kv_parse_pplib_clock_info(rdev, 2679 &rdev->pm.dpm.ps[i], k, 2680 clock_info); 2681 k++; 2682 } 2683 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 2684 non_clock_info, 2685 non_clock_info_array->ucEntrySize); 2686 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2687 } 2688 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 2689 2690 /* fill in the vce power states */ 2691 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 2692 u32 sclk; 2693 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 2694 clock_info = (union pplib_clock_info *) 2695 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2696 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2697 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2698 rdev->pm.dpm.vce_states[i].sclk = sclk; 2699 rdev->pm.dpm.vce_states[i].mclk = 0; 2700 } 2701 2702 return 0; 2703 } 2704 2705 int kv_dpm_init(struct radeon_device *rdev) 2706 { 2707 struct kv_power_info *pi; 2708 int ret, i; 2709 2710 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2711 if (pi == NULL) 2712 return -ENOMEM; 2713 rdev->pm.dpm.priv = pi; 2714 2715 ret = r600_get_platform_caps(rdev); 2716 if (ret) 2717 return ret; 2718 2719 ret = r600_parse_extended_power_table(rdev); 2720 if (ret) 2721 return ret; 2722 2723 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2724 pi->at[i] = TRINITY_AT_DFLT; 2725 2726 pi->sram_end = SMC_RAM_END; 2727 2728 pi->enable_nb_dpm = true; 2729 2730 pi->caps_power_containment = true; 2731 pi->caps_cac = true; 2732 pi->enable_didt = false; 2733 if (pi->enable_didt) { 2734 pi->caps_sq_ramping = true; 2735 pi->caps_db_ramping = true; 2736 pi->caps_td_ramping = true; 2737 pi->caps_tcp_ramping = true; 2738 } 2739 2740 pi->caps_sclk_ds = true; 2741 pi->enable_auto_thermal_throttling = true; 2742 pi->disable_nb_ps3_in_battery = false; 2743 if (radeon_bapm == 0) 2744 pi->bapm_enable = false; 2745 else 2746 pi->bapm_enable = true; 2747 pi->voltage_drop_t = 0; 2748 pi->caps_sclk_throttle_low_notification = false; 2749 pi->caps_fps = false; /* true? */ 2750 pi->caps_uvd_pg = true; 2751 pi->caps_uvd_dpm = true; 2752 pi->caps_vce_pg = false; /* XXX true */ 2753 pi->caps_samu_pg = false; 2754 pi->caps_acp_pg = false; 2755 pi->caps_stable_p_state = false; 2756 2757 ret = kv_parse_sys_info_table(rdev); 2758 if (ret) 2759 return ret; 2760 2761 kv_patch_voltage_values(rdev); 2762 kv_construct_boot_state(rdev); 2763 2764 ret = kv_parse_power_table(rdev); 2765 if (ret) 2766 return ret; 2767 2768 pi->enable_dpm = true; 2769 2770 return 0; 2771 } 2772 2773 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 2774 struct seq_file *m) 2775 { 2776 struct kv_power_info *pi = kv_get_pi(rdev); 2777 u32 current_index = 2778 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2779 CURR_SCLK_INDEX_SHIFT; 2780 u32 sclk, tmp; 2781 u16 vddc; 2782 2783 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2784 seq_printf(m, "invalid dpm profile %d\n", current_index); 2785 } else { 2786 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2787 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2788 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT; 2789 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp); 2790 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2791 current_index, sclk, vddc); 2792 } 2793 } 2794 2795 void kv_dpm_print_power_state(struct radeon_device *rdev, 2796 struct radeon_ps *rps) 2797 { 2798 int i; 2799 struct kv_ps *ps = kv_get_ps(rps); 2800 2801 r600_dpm_print_class_info(rps->class, rps->class2); 2802 r600_dpm_print_cap_info(rps->caps); 2803 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2804 for (i = 0; i < ps->num_levels; i++) { 2805 struct kv_pl *pl = &ps->levels[i]; 2806 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2807 i, pl->sclk, 2808 kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index)); 2809 } 2810 r600_dpm_print_ps_status(rdev, rps); 2811 } 2812 2813 void kv_dpm_fini(struct radeon_device *rdev) 2814 { 2815 int i; 2816 2817 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 2818 kfree(rdev->pm.dpm.ps[i].ps_priv); 2819 } 2820 kfree(rdev->pm.dpm.ps); 2821 kfree(rdev->pm.dpm.priv); 2822 r600_free_extended_power_table(rdev); 2823 } 2824 2825 void kv_dpm_display_configuration_changed(struct radeon_device *rdev) 2826 { 2827 2828 } 2829 2830 u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low) 2831 { 2832 struct kv_power_info *pi = kv_get_pi(rdev); 2833 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2834 2835 if (low) 2836 return requested_state->levels[0].sclk; 2837 else 2838 return requested_state->levels[requested_state->num_levels - 1].sclk; 2839 } 2840 2841 u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low) 2842 { 2843 struct kv_power_info *pi = kv_get_pi(rdev); 2844 2845 return pi->sys_info.bootup_uma_clk; 2846 } 2847 2848