1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #include "amdgpu_pm.h" 26 #include "cikd.h" 27 #include "atom.h" 28 #include "amdgpu_atombios.h" 29 #include "amdgpu_dpm.h" 30 #include "kv_dpm.h" 31 #include "gfx_v7_0.h" 32 #include <linux/seq_file.h> 33 34 #include "smu/smu_7_0_0_d.h" 35 #include "smu/smu_7_0_0_sh_mask.h" 36 37 #include "gca/gfx_7_2_d.h" 38 #include "gca/gfx_7_2_sh_mask.h" 39 #include "legacy_dpm.h" 40 41 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 42 #define KV_MINIMUM_ENGINE_CLOCK 800 43 #define SMC_RAM_END 0x40000 44 45 static const struct amd_pm_funcs kv_dpm_funcs; 46 47 static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 48 static int kv_enable_nb_dpm(struct amdgpu_device *adev, 49 bool enable); 50 static void kv_init_graphics_levels(struct amdgpu_device *adev); 51 static int kv_calculate_ds_divider(struct amdgpu_device *adev); 52 static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); 53 static int kv_calculate_dpm_settings(struct amdgpu_device *adev); 54 static void kv_enable_new_levels(struct amdgpu_device *adev); 55 static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 56 struct amdgpu_ps *new_rps); 57 static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); 58 static int kv_set_enabled_levels(struct amdgpu_device *adev); 59 static int kv_force_dpm_highest(struct amdgpu_device *adev); 60 static int kv_force_dpm_lowest(struct amdgpu_device *adev); 61 static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 62 struct amdgpu_ps *new_rps, 63 struct amdgpu_ps *old_rps); 64 static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 65 int min_temp, int max_temp); 66 static int kv_init_fps_limits(struct amdgpu_device *adev); 67 68 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); 69 static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); 70 71 72 static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, 73 struct sumo_vid_mapping_table *vid_mapping_table, 74 u32 vid_2bit) 75 { 76 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 77 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 78 u32 i; 79 80 if (vddc_sclk_table && vddc_sclk_table->count) { 81 if (vid_2bit < vddc_sclk_table->count) 82 return vddc_sclk_table->entries[vid_2bit].v; 83 else 84 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 85 } else { 86 for (i = 0; i < vid_mapping_table->num_entries; i++) { 87 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 88 return vid_mapping_table->entries[i].vid_7bit; 89 } 90 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 91 } 92 } 93 94 static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, 95 struct sumo_vid_mapping_table *vid_mapping_table, 96 u32 vid_7bit) 97 { 98 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = 99 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 100 u32 i; 101 102 if (vddc_sclk_table && vddc_sclk_table->count) { 103 for (i = 0; i < vddc_sclk_table->count; i++) { 104 if (vddc_sclk_table->entries[i].v == vid_7bit) 105 return i; 106 } 107 return vddc_sclk_table->count - 1; 108 } else { 109 for (i = 0; i < vid_mapping_table->num_entries; i++) { 110 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 111 return vid_mapping_table->entries[i].vid_2bit; 112 } 113 114 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 115 } 116 } 117 118 static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) 119 { 120 /* This bit selects who handles display phy powergating. 121 * Clear the bit to let atom handle it. 122 * Set it to let the driver handle it. 123 * For now we just let atom handle it. 124 */ 125 #if 0 126 u32 v = RREG32(mmDOUT_SCRATCH3); 127 128 if (enable) 129 v |= 0x4; 130 else 131 v &= 0xFFFFFFFB; 132 133 WREG32(mmDOUT_SCRATCH3, v); 134 #endif 135 } 136 137 static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, 138 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, 139 ATOM_AVAILABLE_SCLK_LIST *table) 140 { 141 u32 i; 142 u32 n = 0; 143 u32 prev_sclk = 0; 144 145 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 146 if (table[i].ulSupportedSCLK > prev_sclk) { 147 sclk_voltage_mapping_table->entries[n].sclk_frequency = 148 table[i].ulSupportedSCLK; 149 sclk_voltage_mapping_table->entries[n].vid_2bit = 150 table[i].usVoltageIndex; 151 prev_sclk = table[i].ulSupportedSCLK; 152 n++; 153 } 154 } 155 156 sclk_voltage_mapping_table->num_max_dpm_entries = n; 157 } 158 159 static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, 160 struct sumo_vid_mapping_table *vid_mapping_table, 161 ATOM_AVAILABLE_SCLK_LIST *table) 162 { 163 u32 i, j; 164 165 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { 166 if (table[i].ulSupportedSCLK != 0) { 167 if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES) 168 continue; 169 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = 170 table[i].usVoltageID; 171 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = 172 table[i].usVoltageIndex; 173 } 174 } 175 176 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { 177 if (vid_mapping_table->entries[i].vid_7bit == 0) { 178 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { 179 if (vid_mapping_table->entries[j].vid_7bit != 0) { 180 vid_mapping_table->entries[i] = 181 vid_mapping_table->entries[j]; 182 vid_mapping_table->entries[j].vid_7bit = 0; 183 break; 184 } 185 } 186 187 if (j == SUMO_MAX_NUMBER_VOLTAGES) 188 break; 189 } 190 } 191 192 vid_mapping_table->num_entries = i; 193 } 194 195 #if 0 196 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = { 197 { 0, 4, 1 }, 198 { 1, 4, 1 }, 199 { 2, 5, 1 }, 200 { 3, 4, 2 }, 201 { 4, 1, 1 }, 202 { 5, 5, 2 }, 203 { 6, 6, 1 }, 204 { 7, 9, 2 }, 205 { 0xffffffff } 206 }; 207 208 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = { 209 { 0, 4, 1 }, 210 { 0xffffffff } 211 }; 212 213 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = { 214 { 0, 4, 1 }, 215 { 0xffffffff } 216 }; 217 218 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = { 219 { 0, 4, 1 }, 220 { 0xffffffff } 221 }; 222 223 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = { 224 { 0, 4, 1 }, 225 { 0xffffffff } 226 }; 227 228 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = { 229 { 0, 4, 1 }, 230 { 1, 4, 1 }, 231 { 2, 5, 1 }, 232 { 3, 4, 1 }, 233 { 4, 1, 1 }, 234 { 5, 5, 1 }, 235 { 6, 6, 1 }, 236 { 7, 9, 1 }, 237 { 8, 4, 1 }, 238 { 9, 2, 1 }, 239 { 10, 3, 1 }, 240 { 11, 6, 1 }, 241 { 12, 8, 2 }, 242 { 13, 1, 1 }, 243 { 14, 2, 1 }, 244 { 15, 3, 1 }, 245 { 16, 1, 1 }, 246 { 17, 4, 1 }, 247 { 18, 3, 1 }, 248 { 19, 1, 1 }, 249 { 20, 8, 1 }, 250 { 21, 5, 1 }, 251 { 22, 1, 1 }, 252 { 23, 1, 1 }, 253 { 24, 4, 1 }, 254 { 27, 6, 1 }, 255 { 28, 1, 1 }, 256 { 0xffffffff } 257 }; 258 259 static const struct kv_lcac_config_reg sx0_cac_config_reg[] = { 260 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 261 }; 262 263 static const struct kv_lcac_config_reg mc0_cac_config_reg[] = { 264 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 265 }; 266 267 static const struct kv_lcac_config_reg mc1_cac_config_reg[] = { 268 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 269 }; 270 271 static const struct kv_lcac_config_reg mc2_cac_config_reg[] = { 272 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 273 }; 274 275 static const struct kv_lcac_config_reg mc3_cac_config_reg[] = { 276 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 277 }; 278 279 static const struct kv_lcac_config_reg cpl_cac_config_reg[] = { 280 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 281 }; 282 #endif 283 284 static const struct kv_pt_config_reg didt_config_kv[] = { 285 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 286 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 287 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 288 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 289 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 290 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 291 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 292 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 293 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 294 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 295 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 296 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 297 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 298 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 299 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 300 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 301 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 302 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 303 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 304 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 305 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 306 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 307 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 308 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 309 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 310 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 311 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 312 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 313 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 314 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 315 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 316 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 317 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 318 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 319 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 320 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 321 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 322 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 323 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 324 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 325 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 326 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 327 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 328 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 329 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 330 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 331 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 332 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 333 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 334 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 335 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 336 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 337 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 338 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 339 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 340 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 341 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 342 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 343 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 344 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 345 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 346 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 347 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 348 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 349 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 350 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 351 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 352 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 353 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 354 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 355 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 356 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 357 { 0xFFFFFFFF } 358 }; 359 360 static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) 361 { 362 struct kv_ps *ps = rps->ps_priv; 363 364 return ps; 365 } 366 367 static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) 368 { 369 struct kv_power_info *pi = adev->pm.dpm.priv; 370 371 return pi; 372 } 373 374 #if 0 375 static void kv_program_local_cac_table(struct amdgpu_device *adev, 376 const struct kv_lcac_config_values *local_cac_table, 377 const struct kv_lcac_config_reg *local_cac_reg) 378 { 379 u32 i, count, data; 380 const struct kv_lcac_config_values *values = local_cac_table; 381 382 while (values->block_id != 0xffffffff) { 383 count = values->signal_id; 384 for (i = 0; i < count; i++) { 385 data = ((values->block_id << local_cac_reg->block_shift) & 386 local_cac_reg->block_mask); 387 data |= ((i << local_cac_reg->signal_shift) & 388 local_cac_reg->signal_mask); 389 data |= ((values->t << local_cac_reg->t_shift) & 390 local_cac_reg->t_mask); 391 data |= ((1 << local_cac_reg->enable_shift) & 392 local_cac_reg->enable_mask); 393 WREG32_SMC(local_cac_reg->cntl, data); 394 } 395 values++; 396 } 397 } 398 #endif 399 400 static int kv_program_pt_config_registers(struct amdgpu_device *adev, 401 const struct kv_pt_config_reg *cac_config_regs) 402 { 403 const struct kv_pt_config_reg *config_regs = cac_config_regs; 404 u32 data; 405 u32 cache = 0; 406 407 if (config_regs == NULL) 408 return -EINVAL; 409 410 while (config_regs->offset != 0xFFFFFFFF) { 411 if (config_regs->type == KV_CONFIGREG_CACHE) { 412 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 413 } else { 414 switch (config_regs->type) { 415 case KV_CONFIGREG_SMC_IND: 416 data = RREG32_SMC(config_regs->offset); 417 break; 418 case KV_CONFIGREG_DIDT_IND: 419 data = RREG32_DIDT(config_regs->offset); 420 break; 421 default: 422 data = RREG32(config_regs->offset); 423 break; 424 } 425 426 data &= ~config_regs->mask; 427 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 428 data |= cache; 429 cache = 0; 430 431 switch (config_regs->type) { 432 case KV_CONFIGREG_SMC_IND: 433 WREG32_SMC(config_regs->offset, data); 434 break; 435 case KV_CONFIGREG_DIDT_IND: 436 WREG32_DIDT(config_regs->offset, data); 437 break; 438 default: 439 WREG32(config_regs->offset, data); 440 break; 441 } 442 } 443 config_regs++; 444 } 445 446 return 0; 447 } 448 449 static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) 450 { 451 struct kv_power_info *pi = kv_get_pi(adev); 452 u32 data; 453 454 if (pi->caps_sq_ramping) { 455 data = RREG32_DIDT(ixDIDT_SQ_CTRL0); 456 if (enable) 457 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 458 else 459 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 460 WREG32_DIDT(ixDIDT_SQ_CTRL0, data); 461 } 462 463 if (pi->caps_db_ramping) { 464 data = RREG32_DIDT(ixDIDT_DB_CTRL0); 465 if (enable) 466 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 467 else 468 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 469 WREG32_DIDT(ixDIDT_DB_CTRL0, data); 470 } 471 472 if (pi->caps_td_ramping) { 473 data = RREG32_DIDT(ixDIDT_TD_CTRL0); 474 if (enable) 475 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 476 else 477 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 478 WREG32_DIDT(ixDIDT_TD_CTRL0, data); 479 } 480 481 if (pi->caps_tcp_ramping) { 482 data = RREG32_DIDT(ixDIDT_TCP_CTRL0); 483 if (enable) 484 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 485 else 486 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 487 WREG32_DIDT(ixDIDT_TCP_CTRL0, data); 488 } 489 } 490 491 static int kv_enable_didt(struct amdgpu_device *adev, bool enable) 492 { 493 struct kv_power_info *pi = kv_get_pi(adev); 494 int ret; 495 496 if (pi->caps_sq_ramping || 497 pi->caps_db_ramping || 498 pi->caps_td_ramping || 499 pi->caps_tcp_ramping) { 500 amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 501 502 if (enable) { 503 ret = kv_program_pt_config_registers(adev, didt_config_kv); 504 if (ret) { 505 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 506 return ret; 507 } 508 } 509 510 kv_do_enable_didt(adev, enable); 511 512 amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 513 } 514 515 return 0; 516 } 517 518 #if 0 519 static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) 520 { 521 struct kv_power_info *pi = kv_get_pi(adev); 522 523 if (pi->caps_cac) { 524 WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); 525 WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); 526 kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 527 528 WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); 529 WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); 530 kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 531 532 WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); 533 WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); 534 kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 535 536 WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); 537 WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); 538 kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 539 540 WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); 541 WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); 542 kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 543 544 WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); 545 WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); 546 kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 547 } 548 } 549 #endif 550 551 static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) 552 { 553 struct kv_power_info *pi = kv_get_pi(adev); 554 int ret = 0; 555 556 if (pi->caps_cac) { 557 if (enable) { 558 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); 559 if (ret) 560 pi->cac_enabled = false; 561 else 562 pi->cac_enabled = true; 563 } else if (pi->cac_enabled) { 564 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); 565 pi->cac_enabled = false; 566 } 567 } 568 569 return ret; 570 } 571 572 static int kv_process_firmware_header(struct amdgpu_device *adev) 573 { 574 struct kv_power_info *pi = kv_get_pi(adev); 575 u32 tmp; 576 int ret; 577 578 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 579 offsetof(SMU7_Firmware_Header, DpmTable), 580 &tmp, pi->sram_end); 581 582 if (ret == 0) 583 pi->dpm_table_start = tmp; 584 585 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + 586 offsetof(SMU7_Firmware_Header, SoftRegisters), 587 &tmp, pi->sram_end); 588 589 if (ret == 0) 590 pi->soft_regs_start = tmp; 591 592 return ret; 593 } 594 595 static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) 596 { 597 struct kv_power_info *pi = kv_get_pi(adev); 598 int ret; 599 600 pi->graphics_voltage_change_enable = 1; 601 602 ret = amdgpu_kv_copy_bytes_to_smc(adev, 603 pi->dpm_table_start + 604 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 605 &pi->graphics_voltage_change_enable, 606 sizeof(u8), pi->sram_end); 607 608 return ret; 609 } 610 611 static int kv_set_dpm_interval(struct amdgpu_device *adev) 612 { 613 struct kv_power_info *pi = kv_get_pi(adev); 614 int ret; 615 616 pi->graphics_interval = 1; 617 618 ret = amdgpu_kv_copy_bytes_to_smc(adev, 619 pi->dpm_table_start + 620 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 621 &pi->graphics_interval, 622 sizeof(u8), pi->sram_end); 623 624 return ret; 625 } 626 627 static int kv_set_dpm_boot_state(struct amdgpu_device *adev) 628 { 629 struct kv_power_info *pi = kv_get_pi(adev); 630 int ret; 631 632 ret = amdgpu_kv_copy_bytes_to_smc(adev, 633 pi->dpm_table_start + 634 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 635 &pi->graphics_boot_level, 636 sizeof(u8), pi->sram_end); 637 638 return ret; 639 } 640 641 static void kv_program_vc(struct amdgpu_device *adev) 642 { 643 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); 644 } 645 646 static void kv_clear_vc(struct amdgpu_device *adev) 647 { 648 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); 649 } 650 651 static int kv_set_divider_value(struct amdgpu_device *adev, 652 u32 index, u32 sclk) 653 { 654 struct kv_power_info *pi = kv_get_pi(adev); 655 struct atom_clock_dividers dividers; 656 int ret; 657 658 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 659 sclk, false, ÷rs); 660 if (ret) 661 return ret; 662 663 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 664 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 665 666 return 0; 667 } 668 669 static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, 670 u16 voltage) 671 { 672 return 6200 - (voltage * 25); 673 } 674 675 static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, 676 u32 vid_2bit) 677 { 678 struct kv_power_info *pi = kv_get_pi(adev); 679 u32 vid_8bit = kv_convert_vid2_to_vid7(adev, 680 &pi->sys_info.vid_mapping_table, 681 vid_2bit); 682 683 return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); 684 } 685 686 687 static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) 688 { 689 struct kv_power_info *pi = kv_get_pi(adev); 690 691 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 692 pi->graphics_level[index].MinVddNb = 693 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); 694 695 return 0; 696 } 697 698 static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) 699 { 700 struct kv_power_info *pi = kv_get_pi(adev); 701 702 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 703 704 return 0; 705 } 706 707 static void kv_dpm_power_level_enable(struct amdgpu_device *adev, 708 u32 index, bool enable) 709 { 710 struct kv_power_info *pi = kv_get_pi(adev); 711 712 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 713 } 714 715 static void kv_start_dpm(struct amdgpu_device *adev) 716 { 717 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 718 719 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; 720 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 721 722 amdgpu_kv_smc_dpm_enable(adev, true); 723 } 724 725 static void kv_stop_dpm(struct amdgpu_device *adev) 726 { 727 amdgpu_kv_smc_dpm_enable(adev, false); 728 } 729 730 static void kv_start_am(struct amdgpu_device *adev) 731 { 732 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 733 734 sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 735 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 736 sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; 737 738 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 739 } 740 741 static void kv_reset_am(struct amdgpu_device *adev) 742 { 743 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 744 745 sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | 746 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 747 748 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 749 } 750 751 static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) 752 { 753 return amdgpu_kv_notify_message_to_smu(adev, freeze ? 754 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 755 } 756 757 static int kv_force_lowest_valid(struct amdgpu_device *adev) 758 { 759 return kv_force_dpm_lowest(adev); 760 } 761 762 static int kv_unforce_levels(struct amdgpu_device *adev) 763 { 764 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 765 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); 766 else 767 return kv_set_enabled_levels(adev); 768 } 769 770 static int kv_update_sclk_t(struct amdgpu_device *adev) 771 { 772 struct kv_power_info *pi = kv_get_pi(adev); 773 u32 low_sclk_interrupt_t = 0; 774 int ret = 0; 775 776 if (pi->caps_sclk_throttle_low_notification) { 777 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 778 779 ret = amdgpu_kv_copy_bytes_to_smc(adev, 780 pi->dpm_table_start + 781 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 782 (u8 *)&low_sclk_interrupt_t, 783 sizeof(u32), pi->sram_end); 784 } 785 return ret; 786 } 787 788 static int kv_program_bootup_state(struct amdgpu_device *adev) 789 { 790 struct kv_power_info *pi = kv_get_pi(adev); 791 u32 i; 792 struct amdgpu_clock_voltage_dependency_table *table = 793 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 794 795 if (table && table->count) { 796 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 797 if (table->entries[i].clk == pi->boot_pl.sclk) 798 break; 799 } 800 801 pi->graphics_boot_level = (u8)i; 802 kv_dpm_power_level_enable(adev, i, true); 803 } else { 804 struct sumo_sclk_voltage_mapping_table *table = 805 &pi->sys_info.sclk_voltage_mapping_table; 806 807 if (table->num_max_dpm_entries == 0) 808 return -EINVAL; 809 810 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 811 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 812 break; 813 } 814 815 pi->graphics_boot_level = (u8)i; 816 kv_dpm_power_level_enable(adev, i, true); 817 } 818 return 0; 819 } 820 821 static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) 822 { 823 struct kv_power_info *pi = kv_get_pi(adev); 824 int ret; 825 826 pi->graphics_therm_throttle_enable = 1; 827 828 ret = amdgpu_kv_copy_bytes_to_smc(adev, 829 pi->dpm_table_start + 830 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 831 &pi->graphics_therm_throttle_enable, 832 sizeof(u8), pi->sram_end); 833 834 return ret; 835 } 836 837 static int kv_upload_dpm_settings(struct amdgpu_device *adev) 838 { 839 struct kv_power_info *pi = kv_get_pi(adev); 840 int ret; 841 842 ret = amdgpu_kv_copy_bytes_to_smc(adev, 843 pi->dpm_table_start + 844 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 845 (u8 *)&pi->graphics_level, 846 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 847 pi->sram_end); 848 849 if (ret) 850 return ret; 851 852 ret = amdgpu_kv_copy_bytes_to_smc(adev, 853 pi->dpm_table_start + 854 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 855 &pi->graphics_dpm_level_count, 856 sizeof(u8), pi->sram_end); 857 858 return ret; 859 } 860 861 static u32 kv_get_clock_difference(u32 a, u32 b) 862 { 863 return (a >= b) ? a - b : b - a; 864 } 865 866 static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) 867 { 868 struct kv_power_info *pi = kv_get_pi(adev); 869 u32 value; 870 871 if (pi->caps_enable_dfs_bypass) { 872 if (kv_get_clock_difference(clk, 40000) < 200) 873 value = 3; 874 else if (kv_get_clock_difference(clk, 30000) < 200) 875 value = 2; 876 else if (kv_get_clock_difference(clk, 20000) < 200) 877 value = 7; 878 else if (kv_get_clock_difference(clk, 15000) < 200) 879 value = 6; 880 else if (kv_get_clock_difference(clk, 10000) < 200) 881 value = 8; 882 else 883 value = 0; 884 } else { 885 value = 0; 886 } 887 888 return value; 889 } 890 891 static int kv_populate_uvd_table(struct amdgpu_device *adev) 892 { 893 struct kv_power_info *pi = kv_get_pi(adev); 894 struct amdgpu_uvd_clock_voltage_dependency_table *table = 895 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 896 struct atom_clock_dividers dividers; 897 int ret; 898 u32 i; 899 900 if (table == NULL || table->count == 0) 901 return 0; 902 903 pi->uvd_level_count = 0; 904 for (i = 0; i < table->count; i++) { 905 if (pi->high_voltage_t && 906 (pi->high_voltage_t < table->entries[i].v)) 907 break; 908 909 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 910 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 911 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 912 913 pi->uvd_level[i].VClkBypassCntl = 914 (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); 915 pi->uvd_level[i].DClkBypassCntl = 916 (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); 917 918 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 919 table->entries[i].vclk, false, ÷rs); 920 if (ret) 921 return ret; 922 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 923 924 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 925 table->entries[i].dclk, false, ÷rs); 926 if (ret) 927 return ret; 928 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 929 930 pi->uvd_level_count++; 931 } 932 933 ret = amdgpu_kv_copy_bytes_to_smc(adev, 934 pi->dpm_table_start + 935 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 936 (u8 *)&pi->uvd_level_count, 937 sizeof(u8), pi->sram_end); 938 if (ret) 939 return ret; 940 941 pi->uvd_interval = 1; 942 943 ret = amdgpu_kv_copy_bytes_to_smc(adev, 944 pi->dpm_table_start + 945 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 946 &pi->uvd_interval, 947 sizeof(u8), pi->sram_end); 948 if (ret) 949 return ret; 950 951 ret = amdgpu_kv_copy_bytes_to_smc(adev, 952 pi->dpm_table_start + 953 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 954 (u8 *)&pi->uvd_level, 955 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 956 pi->sram_end); 957 958 return ret; 959 960 } 961 962 static int kv_populate_vce_table(struct amdgpu_device *adev) 963 { 964 struct kv_power_info *pi = kv_get_pi(adev); 965 int ret; 966 u32 i; 967 struct amdgpu_vce_clock_voltage_dependency_table *table = 968 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 969 struct atom_clock_dividers dividers; 970 971 if (table == NULL || table->count == 0) 972 return 0; 973 974 pi->vce_level_count = 0; 975 for (i = 0; i < table->count; i++) { 976 if (pi->high_voltage_t && 977 pi->high_voltage_t < table->entries[i].v) 978 break; 979 980 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 981 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 982 983 pi->vce_level[i].ClkBypassCntl = 984 (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); 985 986 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 987 table->entries[i].evclk, false, ÷rs); 988 if (ret) 989 return ret; 990 pi->vce_level[i].Divider = (u8)dividers.post_div; 991 992 pi->vce_level_count++; 993 } 994 995 ret = amdgpu_kv_copy_bytes_to_smc(adev, 996 pi->dpm_table_start + 997 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 998 (u8 *)&pi->vce_level_count, 999 sizeof(u8), 1000 pi->sram_end); 1001 if (ret) 1002 return ret; 1003 1004 pi->vce_interval = 1; 1005 1006 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1007 pi->dpm_table_start + 1008 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 1009 (u8 *)&pi->vce_interval, 1010 sizeof(u8), 1011 pi->sram_end); 1012 if (ret) 1013 return ret; 1014 1015 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1016 pi->dpm_table_start + 1017 offsetof(SMU7_Fusion_DpmTable, VceLevel), 1018 (u8 *)&pi->vce_level, 1019 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 1020 pi->sram_end); 1021 1022 return ret; 1023 } 1024 1025 static int kv_populate_samu_table(struct amdgpu_device *adev) 1026 { 1027 struct kv_power_info *pi = kv_get_pi(adev); 1028 struct amdgpu_clock_voltage_dependency_table *table = 1029 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1030 struct atom_clock_dividers dividers; 1031 int ret; 1032 u32 i; 1033 1034 if (table == NULL || table->count == 0) 1035 return 0; 1036 1037 pi->samu_level_count = 0; 1038 for (i = 0; i < table->count; i++) { 1039 if (pi->high_voltage_t && 1040 pi->high_voltage_t < table->entries[i].v) 1041 break; 1042 1043 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1044 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1045 1046 pi->samu_level[i].ClkBypassCntl = 1047 (u8)kv_get_clk_bypass(adev, table->entries[i].clk); 1048 1049 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1050 table->entries[i].clk, false, ÷rs); 1051 if (ret) 1052 return ret; 1053 pi->samu_level[i].Divider = (u8)dividers.post_div; 1054 1055 pi->samu_level_count++; 1056 } 1057 1058 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1059 pi->dpm_table_start + 1060 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 1061 (u8 *)&pi->samu_level_count, 1062 sizeof(u8), 1063 pi->sram_end); 1064 if (ret) 1065 return ret; 1066 1067 pi->samu_interval = 1; 1068 1069 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1070 pi->dpm_table_start + 1071 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 1072 (u8 *)&pi->samu_interval, 1073 sizeof(u8), 1074 pi->sram_end); 1075 if (ret) 1076 return ret; 1077 1078 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1079 pi->dpm_table_start + 1080 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1081 (u8 *)&pi->samu_level, 1082 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1083 pi->sram_end); 1084 if (ret) 1085 return ret; 1086 1087 return ret; 1088 } 1089 1090 1091 static int kv_populate_acp_table(struct amdgpu_device *adev) 1092 { 1093 struct kv_power_info *pi = kv_get_pi(adev); 1094 struct amdgpu_clock_voltage_dependency_table *table = 1095 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1096 struct atom_clock_dividers dividers; 1097 int ret; 1098 u32 i; 1099 1100 if (table == NULL || table->count == 0) 1101 return 0; 1102 1103 pi->acp_level_count = 0; 1104 for (i = 0; i < table->count; i++) { 1105 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1106 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1107 1108 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, 1109 table->entries[i].clk, false, ÷rs); 1110 if (ret) 1111 return ret; 1112 pi->acp_level[i].Divider = (u8)dividers.post_div; 1113 1114 pi->acp_level_count++; 1115 } 1116 1117 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1118 pi->dpm_table_start + 1119 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1120 (u8 *)&pi->acp_level_count, 1121 sizeof(u8), 1122 pi->sram_end); 1123 if (ret) 1124 return ret; 1125 1126 pi->acp_interval = 1; 1127 1128 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1129 pi->dpm_table_start + 1130 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1131 (u8 *)&pi->acp_interval, 1132 sizeof(u8), 1133 pi->sram_end); 1134 if (ret) 1135 return ret; 1136 1137 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1138 pi->dpm_table_start + 1139 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1140 (u8 *)&pi->acp_level, 1141 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1142 pi->sram_end); 1143 if (ret) 1144 return ret; 1145 1146 return ret; 1147 } 1148 1149 static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) 1150 { 1151 struct kv_power_info *pi = kv_get_pi(adev); 1152 u32 i; 1153 struct amdgpu_clock_voltage_dependency_table *table = 1154 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1155 1156 if (table && table->count) { 1157 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1158 if (pi->caps_enable_dfs_bypass) { 1159 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1160 pi->graphics_level[i].ClkBypassCntl = 3; 1161 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1162 pi->graphics_level[i].ClkBypassCntl = 2; 1163 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1164 pi->graphics_level[i].ClkBypassCntl = 7; 1165 else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200) 1166 pi->graphics_level[i].ClkBypassCntl = 6; 1167 else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200) 1168 pi->graphics_level[i].ClkBypassCntl = 8; 1169 else 1170 pi->graphics_level[i].ClkBypassCntl = 0; 1171 } else { 1172 pi->graphics_level[i].ClkBypassCntl = 0; 1173 } 1174 } 1175 } else { 1176 struct sumo_sclk_voltage_mapping_table *table = 1177 &pi->sys_info.sclk_voltage_mapping_table; 1178 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1179 if (pi->caps_enable_dfs_bypass) { 1180 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1181 pi->graphics_level[i].ClkBypassCntl = 3; 1182 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1183 pi->graphics_level[i].ClkBypassCntl = 2; 1184 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1185 pi->graphics_level[i].ClkBypassCntl = 7; 1186 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1187 pi->graphics_level[i].ClkBypassCntl = 6; 1188 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1189 pi->graphics_level[i].ClkBypassCntl = 8; 1190 else 1191 pi->graphics_level[i].ClkBypassCntl = 0; 1192 } else { 1193 pi->graphics_level[i].ClkBypassCntl = 0; 1194 } 1195 } 1196 } 1197 } 1198 1199 static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) 1200 { 1201 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1202 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1203 } 1204 1205 static void kv_reset_acp_boot_level(struct amdgpu_device *adev) 1206 { 1207 struct kv_power_info *pi = kv_get_pi(adev); 1208 1209 pi->acp_boot_level = 0xff; 1210 } 1211 1212 static void kv_update_current_ps(struct amdgpu_device *adev, 1213 struct amdgpu_ps *rps) 1214 { 1215 struct kv_ps *new_ps = kv_get_ps(rps); 1216 struct kv_power_info *pi = kv_get_pi(adev); 1217 1218 pi->current_rps = *rps; 1219 pi->current_ps = *new_ps; 1220 pi->current_rps.ps_priv = &pi->current_ps; 1221 adev->pm.dpm.current_ps = &pi->current_rps; 1222 } 1223 1224 static void kv_update_requested_ps(struct amdgpu_device *adev, 1225 struct amdgpu_ps *rps) 1226 { 1227 struct kv_ps *new_ps = kv_get_ps(rps); 1228 struct kv_power_info *pi = kv_get_pi(adev); 1229 1230 pi->requested_rps = *rps; 1231 pi->requested_ps = *new_ps; 1232 pi->requested_rps.ps_priv = &pi->requested_ps; 1233 adev->pm.dpm.requested_ps = &pi->requested_rps; 1234 } 1235 1236 static void kv_dpm_enable_bapm(void *handle, bool enable) 1237 { 1238 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1239 struct kv_power_info *pi = kv_get_pi(adev); 1240 int ret; 1241 1242 if (pi->bapm_enable) { 1243 ret = amdgpu_kv_smc_bapm_enable(adev, enable); 1244 if (ret) 1245 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1246 } 1247 } 1248 1249 static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) 1250 { 1251 switch (sensor) { 1252 case THERMAL_TYPE_KV: 1253 return true; 1254 case THERMAL_TYPE_NONE: 1255 case THERMAL_TYPE_EXTERNAL: 1256 case THERMAL_TYPE_EXTERNAL_GPIO: 1257 default: 1258 return false; 1259 } 1260 } 1261 1262 static int kv_dpm_enable(struct amdgpu_device *adev) 1263 { 1264 struct kv_power_info *pi = kv_get_pi(adev); 1265 int ret; 1266 1267 ret = kv_process_firmware_header(adev); 1268 if (ret) { 1269 DRM_ERROR("kv_process_firmware_header failed\n"); 1270 return ret; 1271 } 1272 kv_init_fps_limits(adev); 1273 kv_init_graphics_levels(adev); 1274 ret = kv_program_bootup_state(adev); 1275 if (ret) { 1276 DRM_ERROR("kv_program_bootup_state failed\n"); 1277 return ret; 1278 } 1279 kv_calculate_dfs_bypass_settings(adev); 1280 ret = kv_upload_dpm_settings(adev); 1281 if (ret) { 1282 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1283 return ret; 1284 } 1285 ret = kv_populate_uvd_table(adev); 1286 if (ret) { 1287 DRM_ERROR("kv_populate_uvd_table failed\n"); 1288 return ret; 1289 } 1290 ret = kv_populate_vce_table(adev); 1291 if (ret) { 1292 DRM_ERROR("kv_populate_vce_table failed\n"); 1293 return ret; 1294 } 1295 ret = kv_populate_samu_table(adev); 1296 if (ret) { 1297 DRM_ERROR("kv_populate_samu_table failed\n"); 1298 return ret; 1299 } 1300 ret = kv_populate_acp_table(adev); 1301 if (ret) { 1302 DRM_ERROR("kv_populate_acp_table failed\n"); 1303 return ret; 1304 } 1305 kv_program_vc(adev); 1306 #if 0 1307 kv_initialize_hardware_cac_manager(adev); 1308 #endif 1309 kv_start_am(adev); 1310 if (pi->enable_auto_thermal_throttling) { 1311 ret = kv_enable_auto_thermal_throttling(adev); 1312 if (ret) { 1313 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1314 return ret; 1315 } 1316 } 1317 ret = kv_enable_dpm_voltage_scaling(adev); 1318 if (ret) { 1319 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1320 return ret; 1321 } 1322 ret = kv_set_dpm_interval(adev); 1323 if (ret) { 1324 DRM_ERROR("kv_set_dpm_interval failed\n"); 1325 return ret; 1326 } 1327 ret = kv_set_dpm_boot_state(adev); 1328 if (ret) { 1329 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1330 return ret; 1331 } 1332 ret = kv_enable_ulv(adev, true); 1333 if (ret) { 1334 DRM_ERROR("kv_enable_ulv failed\n"); 1335 return ret; 1336 } 1337 kv_start_dpm(adev); 1338 ret = kv_enable_didt(adev, true); 1339 if (ret) { 1340 DRM_ERROR("kv_enable_didt failed\n"); 1341 return ret; 1342 } 1343 ret = kv_enable_smc_cac(adev, true); 1344 if (ret) { 1345 DRM_ERROR("kv_enable_smc_cac failed\n"); 1346 return ret; 1347 } 1348 1349 kv_reset_acp_boot_level(adev); 1350 1351 ret = amdgpu_kv_smc_bapm_enable(adev, false); 1352 if (ret) { 1353 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1354 return ret; 1355 } 1356 1357 if (adev->irq.installed && 1358 kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { 1359 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); 1360 if (ret) { 1361 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1362 return ret; 1363 } 1364 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1365 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1366 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, 1367 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1368 } 1369 1370 return ret; 1371 } 1372 1373 static void kv_dpm_disable(struct amdgpu_device *adev) 1374 { 1375 struct kv_power_info *pi = kv_get_pi(adev); 1376 int err; 1377 1378 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1379 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1380 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1381 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 1382 1383 err = amdgpu_kv_smc_bapm_enable(adev, false); 1384 if (err) 1385 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1386 1387 if (adev->asic_type == CHIP_MULLINS) 1388 kv_enable_nb_dpm(adev, false); 1389 1390 /* powerup blocks */ 1391 kv_dpm_powergate_acp(adev, false); 1392 kv_dpm_powergate_samu(adev, false); 1393 if (pi->caps_vce_pg) /* power on the VCE block */ 1394 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1395 if (pi->caps_uvd_pg) /* power on the UVD block */ 1396 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); 1397 1398 kv_enable_smc_cac(adev, false); 1399 kv_enable_didt(adev, false); 1400 kv_clear_vc(adev); 1401 kv_stop_dpm(adev); 1402 kv_enable_ulv(adev, false); 1403 kv_reset_am(adev); 1404 1405 kv_update_current_ps(adev, adev->pm.dpm.boot_ps); 1406 } 1407 1408 #if 0 1409 static int kv_write_smc_soft_register(struct amdgpu_device *adev, 1410 u16 reg_offset, u32 value) 1411 { 1412 struct kv_power_info *pi = kv_get_pi(adev); 1413 1414 return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, 1415 (u8 *)&value, sizeof(u16), pi->sram_end); 1416 } 1417 1418 static int kv_read_smc_soft_register(struct amdgpu_device *adev, 1419 u16 reg_offset, u32 *value) 1420 { 1421 struct kv_power_info *pi = kv_get_pi(adev); 1422 1423 return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, 1424 value, pi->sram_end); 1425 } 1426 #endif 1427 1428 static void kv_init_sclk_t(struct amdgpu_device *adev) 1429 { 1430 struct kv_power_info *pi = kv_get_pi(adev); 1431 1432 pi->low_sclk_interrupt_t = 0; 1433 } 1434 1435 static int kv_init_fps_limits(struct amdgpu_device *adev) 1436 { 1437 struct kv_power_info *pi = kv_get_pi(adev); 1438 int ret = 0; 1439 1440 if (pi->caps_fps) { 1441 u16 tmp; 1442 1443 tmp = 45; 1444 pi->fps_high_t = cpu_to_be16(tmp); 1445 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1446 pi->dpm_table_start + 1447 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1448 (u8 *)&pi->fps_high_t, 1449 sizeof(u16), pi->sram_end); 1450 1451 tmp = 30; 1452 pi->fps_low_t = cpu_to_be16(tmp); 1453 1454 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1455 pi->dpm_table_start + 1456 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1457 (u8 *)&pi->fps_low_t, 1458 sizeof(u16), pi->sram_end); 1459 1460 } 1461 return ret; 1462 } 1463 1464 static void kv_init_powergate_state(struct amdgpu_device *adev) 1465 { 1466 struct kv_power_info *pi = kv_get_pi(adev); 1467 1468 pi->uvd_power_gated = false; 1469 pi->vce_power_gated = false; 1470 pi->samu_power_gated = false; 1471 pi->acp_power_gated = false; 1472 1473 } 1474 1475 static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) 1476 { 1477 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1478 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1479 } 1480 1481 static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) 1482 { 1483 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1484 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1485 } 1486 1487 static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) 1488 { 1489 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1490 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1491 } 1492 1493 static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) 1494 { 1495 return amdgpu_kv_notify_message_to_smu(adev, enable ? 1496 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1497 } 1498 1499 static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) 1500 { 1501 struct kv_power_info *pi = kv_get_pi(adev); 1502 struct amdgpu_uvd_clock_voltage_dependency_table *table = 1503 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1504 int ret; 1505 u32 mask; 1506 1507 if (!gate) { 1508 if (table->count) 1509 pi->uvd_boot_level = table->count - 1; 1510 else 1511 pi->uvd_boot_level = 0; 1512 1513 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1514 mask = 1 << pi->uvd_boot_level; 1515 } else { 1516 mask = 0x1f; 1517 } 1518 1519 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1520 pi->dpm_table_start + 1521 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1522 (uint8_t *)&pi->uvd_boot_level, 1523 sizeof(u8), pi->sram_end); 1524 if (ret) 1525 return ret; 1526 1527 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1528 PPSMC_MSG_UVDDPM_SetEnabledMask, 1529 mask); 1530 } 1531 1532 return kv_enable_uvd_dpm(adev, !gate); 1533 } 1534 1535 static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) 1536 { 1537 u8 i; 1538 struct amdgpu_vce_clock_voltage_dependency_table *table = 1539 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1540 1541 for (i = 0; i < table->count; i++) { 1542 if (table->entries[i].evclk >= evclk) 1543 break; 1544 } 1545 1546 return i; 1547 } 1548 1549 static int kv_update_vce_dpm(struct amdgpu_device *adev, 1550 struct amdgpu_ps *amdgpu_new_state, 1551 struct amdgpu_ps *amdgpu_current_state) 1552 { 1553 struct kv_power_info *pi = kv_get_pi(adev); 1554 struct amdgpu_vce_clock_voltage_dependency_table *table = 1555 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1556 int ret; 1557 1558 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { 1559 if (pi->caps_stable_p_state) 1560 pi->vce_boot_level = table->count - 1; 1561 else 1562 pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); 1563 1564 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1565 pi->dpm_table_start + 1566 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1567 (u8 *)&pi->vce_boot_level, 1568 sizeof(u8), 1569 pi->sram_end); 1570 if (ret) 1571 return ret; 1572 1573 if (pi->caps_stable_p_state) 1574 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1575 PPSMC_MSG_VCEDPM_SetEnabledMask, 1576 (1 << pi->vce_boot_level)); 1577 kv_enable_vce_dpm(adev, true); 1578 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { 1579 kv_enable_vce_dpm(adev, false); 1580 } 1581 1582 return 0; 1583 } 1584 1585 static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) 1586 { 1587 struct kv_power_info *pi = kv_get_pi(adev); 1588 struct amdgpu_clock_voltage_dependency_table *table = 1589 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1590 int ret; 1591 1592 if (!gate) { 1593 if (pi->caps_stable_p_state) 1594 pi->samu_boot_level = table->count - 1; 1595 else 1596 pi->samu_boot_level = 0; 1597 1598 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1599 pi->dpm_table_start + 1600 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1601 (u8 *)&pi->samu_boot_level, 1602 sizeof(u8), 1603 pi->sram_end); 1604 if (ret) 1605 return ret; 1606 1607 if (pi->caps_stable_p_state) 1608 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1609 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1610 (1 << pi->samu_boot_level)); 1611 } 1612 1613 return kv_enable_samu_dpm(adev, !gate); 1614 } 1615 1616 static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) 1617 { 1618 return 0; 1619 } 1620 1621 static void kv_update_acp_boot_level(struct amdgpu_device *adev) 1622 { 1623 struct kv_power_info *pi = kv_get_pi(adev); 1624 u8 acp_boot_level; 1625 1626 if (!pi->caps_stable_p_state) { 1627 acp_boot_level = kv_get_acp_boot_level(adev); 1628 if (acp_boot_level != pi->acp_boot_level) { 1629 pi->acp_boot_level = acp_boot_level; 1630 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1631 PPSMC_MSG_ACPDPM_SetEnabledMask, 1632 (1 << pi->acp_boot_level)); 1633 } 1634 } 1635 } 1636 1637 static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) 1638 { 1639 struct kv_power_info *pi = kv_get_pi(adev); 1640 struct amdgpu_clock_voltage_dependency_table *table = 1641 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1642 int ret; 1643 1644 if (!gate) { 1645 if (pi->caps_stable_p_state) 1646 pi->acp_boot_level = table->count - 1; 1647 else 1648 pi->acp_boot_level = kv_get_acp_boot_level(adev); 1649 1650 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1651 pi->dpm_table_start + 1652 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1653 (u8 *)&pi->acp_boot_level, 1654 sizeof(u8), 1655 pi->sram_end); 1656 if (ret) 1657 return ret; 1658 1659 if (pi->caps_stable_p_state) 1660 amdgpu_kv_send_msg_to_smc_with_parameter(adev, 1661 PPSMC_MSG_ACPDPM_SetEnabledMask, 1662 (1 << pi->acp_boot_level)); 1663 } 1664 1665 return kv_enable_acp_dpm(adev, !gate); 1666 } 1667 1668 static void kv_dpm_powergate_uvd(void *handle, bool gate) 1669 { 1670 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1671 struct kv_power_info *pi = kv_get_pi(adev); 1672 1673 pi->uvd_power_gated = gate; 1674 1675 if (gate) { 1676 /* stop the UVD block */ 1677 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1678 AMD_PG_STATE_GATE); 1679 kv_update_uvd_dpm(adev, gate); 1680 if (pi->caps_uvd_pg) 1681 /* power off the UVD block */ 1682 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); 1683 } else { 1684 if (pi->caps_uvd_pg) 1685 /* power on the UVD block */ 1686 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); 1687 /* re-init the UVD block */ 1688 kv_update_uvd_dpm(adev, gate); 1689 1690 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1691 AMD_PG_STATE_UNGATE); 1692 } 1693 } 1694 1695 static void kv_dpm_powergate_vce(void *handle, bool gate) 1696 { 1697 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1698 struct kv_power_info *pi = kv_get_pi(adev); 1699 1700 pi->vce_power_gated = gate; 1701 1702 if (gate) { 1703 /* stop the VCE block */ 1704 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1705 AMD_PG_STATE_GATE); 1706 kv_enable_vce_dpm(adev, false); 1707 if (pi->caps_vce_pg) /* power off the VCE block */ 1708 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); 1709 } else { 1710 if (pi->caps_vce_pg) /* power on the VCE block */ 1711 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1712 kv_enable_vce_dpm(adev, true); 1713 /* re-init the VCE block */ 1714 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1715 AMD_PG_STATE_UNGATE); 1716 } 1717 } 1718 1719 1720 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) 1721 { 1722 struct kv_power_info *pi = kv_get_pi(adev); 1723 1724 if (pi->samu_power_gated == gate) 1725 return; 1726 1727 pi->samu_power_gated = gate; 1728 1729 if (gate) { 1730 kv_update_samu_dpm(adev, true); 1731 if (pi->caps_samu_pg) 1732 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); 1733 } else { 1734 if (pi->caps_samu_pg) 1735 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); 1736 kv_update_samu_dpm(adev, false); 1737 } 1738 } 1739 1740 static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) 1741 { 1742 struct kv_power_info *pi = kv_get_pi(adev); 1743 1744 if (pi->acp_power_gated == gate) 1745 return; 1746 1747 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 1748 return; 1749 1750 pi->acp_power_gated = gate; 1751 1752 if (gate) { 1753 kv_update_acp_dpm(adev, true); 1754 if (pi->caps_acp_pg) 1755 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); 1756 } else { 1757 if (pi->caps_acp_pg) 1758 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); 1759 kv_update_acp_dpm(adev, false); 1760 } 1761 } 1762 1763 static void kv_set_valid_clock_range(struct amdgpu_device *adev, 1764 struct amdgpu_ps *new_rps) 1765 { 1766 struct kv_ps *new_ps = kv_get_ps(new_rps); 1767 struct kv_power_info *pi = kv_get_pi(adev); 1768 u32 i; 1769 struct amdgpu_clock_voltage_dependency_table *table = 1770 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1771 1772 if (table && table->count) { 1773 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1774 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1775 (i == (pi->graphics_dpm_level_count - 1))) { 1776 pi->lowest_valid = i; 1777 break; 1778 } 1779 } 1780 1781 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1782 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1783 break; 1784 } 1785 pi->highest_valid = i; 1786 1787 if (pi->lowest_valid > pi->highest_valid) { 1788 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1789 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1790 pi->highest_valid = pi->lowest_valid; 1791 else 1792 pi->lowest_valid = pi->highest_valid; 1793 } 1794 } else { 1795 struct sumo_sclk_voltage_mapping_table *table = 1796 &pi->sys_info.sclk_voltage_mapping_table; 1797 1798 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1799 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1800 i == (int)(pi->graphics_dpm_level_count - 1)) { 1801 pi->lowest_valid = i; 1802 break; 1803 } 1804 } 1805 1806 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1807 if (table->entries[i].sclk_frequency <= 1808 new_ps->levels[new_ps->num_levels - 1].sclk) 1809 break; 1810 } 1811 pi->highest_valid = i; 1812 1813 if (pi->lowest_valid > pi->highest_valid) { 1814 if ((new_ps->levels[0].sclk - 1815 table->entries[pi->highest_valid].sclk_frequency) > 1816 (table->entries[pi->lowest_valid].sclk_frequency - 1817 new_ps->levels[new_ps->num_levels - 1].sclk)) 1818 pi->highest_valid = pi->lowest_valid; 1819 else 1820 pi->lowest_valid = pi->highest_valid; 1821 } 1822 } 1823 } 1824 1825 static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, 1826 struct amdgpu_ps *new_rps) 1827 { 1828 struct kv_ps *new_ps = kv_get_ps(new_rps); 1829 struct kv_power_info *pi = kv_get_pi(adev); 1830 int ret = 0; 1831 u8 clk_bypass_cntl; 1832 1833 if (pi->caps_enable_dfs_bypass) { 1834 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1835 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1836 ret = amdgpu_kv_copy_bytes_to_smc(adev, 1837 (pi->dpm_table_start + 1838 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1839 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1840 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1841 &clk_bypass_cntl, 1842 sizeof(u8), pi->sram_end); 1843 } 1844 1845 return ret; 1846 } 1847 1848 static int kv_enable_nb_dpm(struct amdgpu_device *adev, 1849 bool enable) 1850 { 1851 struct kv_power_info *pi = kv_get_pi(adev); 1852 int ret = 0; 1853 1854 if (enable) { 1855 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1856 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); 1857 if (ret == 0) 1858 pi->nb_dpm_enabled = true; 1859 } 1860 } else { 1861 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1862 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); 1863 if (ret == 0) 1864 pi->nb_dpm_enabled = false; 1865 } 1866 } 1867 1868 return ret; 1869 } 1870 1871 static int kv_dpm_force_performance_level(void *handle, 1872 enum amd_dpm_forced_level level) 1873 { 1874 int ret; 1875 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1876 1877 if (level == AMD_DPM_FORCED_LEVEL_HIGH) { 1878 ret = kv_force_dpm_highest(adev); 1879 if (ret) 1880 return ret; 1881 } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { 1882 ret = kv_force_dpm_lowest(adev); 1883 if (ret) 1884 return ret; 1885 } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { 1886 ret = kv_unforce_levels(adev); 1887 if (ret) 1888 return ret; 1889 } 1890 1891 adev->pm.dpm.forced_level = level; 1892 1893 return 0; 1894 } 1895 1896 static int kv_dpm_pre_set_power_state(void *handle) 1897 { 1898 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1899 struct kv_power_info *pi = kv_get_pi(adev); 1900 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; 1901 struct amdgpu_ps *new_ps = &requested_ps; 1902 1903 kv_update_requested_ps(adev, new_ps); 1904 1905 kv_apply_state_adjust_rules(adev, 1906 &pi->requested_rps, 1907 &pi->current_rps); 1908 1909 return 0; 1910 } 1911 1912 static int kv_dpm_set_power_state(void *handle) 1913 { 1914 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1915 struct kv_power_info *pi = kv_get_pi(adev); 1916 struct amdgpu_ps *new_ps = &pi->requested_rps; 1917 struct amdgpu_ps *old_ps = &pi->current_rps; 1918 int ret; 1919 1920 if (pi->bapm_enable) { 1921 ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power); 1922 if (ret) { 1923 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); 1924 return ret; 1925 } 1926 } 1927 1928 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 1929 if (pi->enable_dpm) { 1930 kv_set_valid_clock_range(adev, new_ps); 1931 kv_update_dfs_bypass_settings(adev, new_ps); 1932 ret = kv_calculate_ds_divider(adev); 1933 if (ret) { 1934 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1935 return ret; 1936 } 1937 kv_calculate_nbps_level_settings(adev); 1938 kv_calculate_dpm_settings(adev); 1939 kv_force_lowest_valid(adev); 1940 kv_enable_new_levels(adev); 1941 kv_upload_dpm_settings(adev); 1942 kv_program_nbps_index_settings(adev, new_ps); 1943 kv_unforce_levels(adev); 1944 kv_set_enabled_levels(adev); 1945 kv_force_lowest_valid(adev); 1946 kv_unforce_levels(adev); 1947 1948 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1949 if (ret) { 1950 DRM_ERROR("kv_update_vce_dpm failed\n"); 1951 return ret; 1952 } 1953 kv_update_sclk_t(adev); 1954 if (adev->asic_type == CHIP_MULLINS) 1955 kv_enable_nb_dpm(adev, true); 1956 } 1957 } else { 1958 if (pi->enable_dpm) { 1959 kv_set_valid_clock_range(adev, new_ps); 1960 kv_update_dfs_bypass_settings(adev, new_ps); 1961 ret = kv_calculate_ds_divider(adev); 1962 if (ret) { 1963 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1964 return ret; 1965 } 1966 kv_calculate_nbps_level_settings(adev); 1967 kv_calculate_dpm_settings(adev); 1968 kv_freeze_sclk_dpm(adev, true); 1969 kv_upload_dpm_settings(adev); 1970 kv_program_nbps_index_settings(adev, new_ps); 1971 kv_freeze_sclk_dpm(adev, false); 1972 kv_set_enabled_levels(adev); 1973 ret = kv_update_vce_dpm(adev, new_ps, old_ps); 1974 if (ret) { 1975 DRM_ERROR("kv_update_vce_dpm failed\n"); 1976 return ret; 1977 } 1978 kv_update_acp_boot_level(adev); 1979 kv_update_sclk_t(adev); 1980 kv_enable_nb_dpm(adev, true); 1981 } 1982 } 1983 1984 return 0; 1985 } 1986 1987 static void kv_dpm_post_set_power_state(void *handle) 1988 { 1989 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1990 struct kv_power_info *pi = kv_get_pi(adev); 1991 struct amdgpu_ps *new_ps = &pi->requested_rps; 1992 1993 kv_update_current_ps(adev, new_ps); 1994 } 1995 1996 static void kv_dpm_setup_asic(struct amdgpu_device *adev) 1997 { 1998 sumo_take_smu_control(adev, true); 1999 kv_init_powergate_state(adev); 2000 kv_init_sclk_t(adev); 2001 } 2002 2003 #if 0 2004 static void kv_dpm_reset_asic(struct amdgpu_device *adev) 2005 { 2006 struct kv_power_info *pi = kv_get_pi(adev); 2007 2008 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2009 kv_force_lowest_valid(adev); 2010 kv_init_graphics_levels(adev); 2011 kv_program_bootup_state(adev); 2012 kv_upload_dpm_settings(adev); 2013 kv_force_lowest_valid(adev); 2014 kv_unforce_levels(adev); 2015 } else { 2016 kv_init_graphics_levels(adev); 2017 kv_program_bootup_state(adev); 2018 kv_freeze_sclk_dpm(adev, true); 2019 kv_upload_dpm_settings(adev); 2020 kv_freeze_sclk_dpm(adev, false); 2021 kv_set_enabled_level(adev, pi->graphics_boot_level); 2022 } 2023 } 2024 #endif 2025 2026 static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, 2027 struct amdgpu_clock_and_voltage_limits *table) 2028 { 2029 struct kv_power_info *pi = kv_get_pi(adev); 2030 2031 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 2032 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 2033 table->sclk = 2034 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 2035 table->vddc = 2036 kv_convert_2bit_index_to_voltage(adev, 2037 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 2038 } 2039 2040 table->mclk = pi->sys_info.nbp_memory_clock[0]; 2041 } 2042 2043 static void kv_patch_voltage_values(struct amdgpu_device *adev) 2044 { 2045 int i; 2046 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 2047 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 2048 struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 2049 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 2050 struct amdgpu_clock_voltage_dependency_table *samu_table = 2051 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 2052 struct amdgpu_clock_voltage_dependency_table *acp_table = 2053 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 2054 2055 if (uvd_table->count) { 2056 for (i = 0; i < uvd_table->count; i++) 2057 uvd_table->entries[i].v = 2058 kv_convert_8bit_index_to_voltage(adev, 2059 uvd_table->entries[i].v); 2060 } 2061 2062 if (vce_table->count) { 2063 for (i = 0; i < vce_table->count; i++) 2064 vce_table->entries[i].v = 2065 kv_convert_8bit_index_to_voltage(adev, 2066 vce_table->entries[i].v); 2067 } 2068 2069 if (samu_table->count) { 2070 for (i = 0; i < samu_table->count; i++) 2071 samu_table->entries[i].v = 2072 kv_convert_8bit_index_to_voltage(adev, 2073 samu_table->entries[i].v); 2074 } 2075 2076 if (acp_table->count) { 2077 for (i = 0; i < acp_table->count; i++) 2078 acp_table->entries[i].v = 2079 kv_convert_8bit_index_to_voltage(adev, 2080 acp_table->entries[i].v); 2081 } 2082 2083 } 2084 2085 static void kv_construct_boot_state(struct amdgpu_device *adev) 2086 { 2087 struct kv_power_info *pi = kv_get_pi(adev); 2088 2089 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2090 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2091 pi->boot_pl.ds_divider_index = 0; 2092 pi->boot_pl.ss_divider_index = 0; 2093 pi->boot_pl.allow_gnb_slow = 1; 2094 pi->boot_pl.force_nbp_state = 0; 2095 pi->boot_pl.display_wm = 0; 2096 pi->boot_pl.vce_wm = 0; 2097 } 2098 2099 static int kv_force_dpm_highest(struct amdgpu_device *adev) 2100 { 2101 int ret; 2102 u32 enable_mask, i; 2103 2104 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2105 if (ret) 2106 return ret; 2107 2108 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2109 if (enable_mask & (1 << i)) 2110 break; 2111 } 2112 2113 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2114 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2115 else 2116 return kv_set_enabled_level(adev, i); 2117 } 2118 2119 static int kv_force_dpm_lowest(struct amdgpu_device *adev) 2120 { 2121 int ret; 2122 u32 enable_mask, i; 2123 2124 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); 2125 if (ret) 2126 return ret; 2127 2128 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2129 if (enable_mask & (1 << i)) 2130 break; 2131 } 2132 2133 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2134 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); 2135 else 2136 return kv_set_enabled_level(adev, i); 2137 } 2138 2139 static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, 2140 u32 sclk, u32 min_sclk_in_sr) 2141 { 2142 struct kv_power_info *pi = kv_get_pi(adev); 2143 u32 i; 2144 u32 temp; 2145 u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); 2146 2147 if (sclk < min) 2148 return 0; 2149 2150 if (!pi->caps_sclk_ds) 2151 return 0; 2152 2153 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2154 temp = sclk >> i; 2155 if (temp >= min) 2156 break; 2157 } 2158 2159 return (u8)i; 2160 } 2161 2162 static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) 2163 { 2164 struct kv_power_info *pi = kv_get_pi(adev); 2165 struct amdgpu_clock_voltage_dependency_table *table = 2166 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2167 int i; 2168 2169 if (table && table->count) { 2170 for (i = table->count - 1; i >= 0; i--) { 2171 if (pi->high_voltage_t && 2172 (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= 2173 pi->high_voltage_t)) { 2174 *limit = i; 2175 return 0; 2176 } 2177 } 2178 } else { 2179 struct sumo_sclk_voltage_mapping_table *table = 2180 &pi->sys_info.sclk_voltage_mapping_table; 2181 2182 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2183 if (pi->high_voltage_t && 2184 (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= 2185 pi->high_voltage_t)) { 2186 *limit = i; 2187 return 0; 2188 } 2189 } 2190 } 2191 2192 *limit = 0; 2193 return 0; 2194 } 2195 2196 static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, 2197 struct amdgpu_ps *new_rps, 2198 struct amdgpu_ps *old_rps) 2199 { 2200 struct kv_ps *ps = kv_get_ps(new_rps); 2201 struct kv_power_info *pi = kv_get_pi(adev); 2202 u32 min_sclk = 10000; /* ??? */ 2203 u32 sclk, mclk = 0; 2204 int i, limit; 2205 bool force_high; 2206 struct amdgpu_clock_voltage_dependency_table *table = 2207 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2208 u32 stable_p_state_sclk = 0; 2209 struct amdgpu_clock_and_voltage_limits *max_limits = 2210 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2211 2212 if (new_rps->vce_active) { 2213 new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 2214 new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; 2215 } else { 2216 new_rps->evclk = 0; 2217 new_rps->ecclk = 0; 2218 } 2219 2220 mclk = max_limits->mclk; 2221 sclk = min_sclk; 2222 2223 if (pi->caps_stable_p_state) { 2224 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2225 2226 for (i = table->count - 1; i >= 0; i--) { 2227 if (stable_p_state_sclk >= table->entries[i].clk) { 2228 stable_p_state_sclk = table->entries[i].clk; 2229 break; 2230 } 2231 } 2232 2233 if (i > 0) 2234 stable_p_state_sclk = table->entries[0].clk; 2235 2236 sclk = stable_p_state_sclk; 2237 } 2238 2239 if (new_rps->vce_active) { 2240 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) 2241 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; 2242 } 2243 2244 ps->need_dfs_bypass = true; 2245 2246 for (i = 0; i < ps->num_levels; i++) { 2247 if (ps->levels[i].sclk < sclk) 2248 ps->levels[i].sclk = sclk; 2249 } 2250 2251 if (table && table->count) { 2252 for (i = 0; i < ps->num_levels; i++) { 2253 if (pi->high_voltage_t && 2254 (pi->high_voltage_t < 2255 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2256 kv_get_high_voltage_limit(adev, &limit); 2257 ps->levels[i].sclk = table->entries[limit].clk; 2258 } 2259 } 2260 } else { 2261 struct sumo_sclk_voltage_mapping_table *table = 2262 &pi->sys_info.sclk_voltage_mapping_table; 2263 2264 for (i = 0; i < ps->num_levels; i++) { 2265 if (pi->high_voltage_t && 2266 (pi->high_voltage_t < 2267 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { 2268 kv_get_high_voltage_limit(adev, &limit); 2269 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2270 } 2271 } 2272 } 2273 2274 if (pi->caps_stable_p_state) { 2275 for (i = 0; i < ps->num_levels; i++) { 2276 ps->levels[i].sclk = stable_p_state_sclk; 2277 } 2278 } 2279 2280 pi->video_start = new_rps->dclk || new_rps->vclk || 2281 new_rps->evclk || new_rps->ecclk; 2282 2283 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2284 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2285 pi->battery_state = true; 2286 else 2287 pi->battery_state = false; 2288 2289 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2290 ps->dpm0_pg_nb_ps_lo = 0x1; 2291 ps->dpm0_pg_nb_ps_hi = 0x0; 2292 ps->dpmx_nb_ps_lo = 0x1; 2293 ps->dpmx_nb_ps_hi = 0x0; 2294 } else { 2295 ps->dpm0_pg_nb_ps_lo = 0x3; 2296 ps->dpm0_pg_nb_ps_hi = 0x0; 2297 ps->dpmx_nb_ps_lo = 0x3; 2298 ps->dpmx_nb_ps_hi = 0x0; 2299 2300 if (pi->sys_info.nb_dpm_enable) { 2301 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2302 pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || 2303 pi->disable_nb_ps3_in_battery; 2304 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2305 ps->dpm0_pg_nb_ps_hi = 0x2; 2306 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2307 ps->dpmx_nb_ps_hi = 0x2; 2308 } 2309 } 2310 } 2311 2312 static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, 2313 u32 index, bool enable) 2314 { 2315 struct kv_power_info *pi = kv_get_pi(adev); 2316 2317 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2318 } 2319 2320 static int kv_calculate_ds_divider(struct amdgpu_device *adev) 2321 { 2322 struct kv_power_info *pi = kv_get_pi(adev); 2323 u32 sclk_in_sr = 10000; /* ??? */ 2324 u32 i; 2325 2326 if (pi->lowest_valid > pi->highest_valid) 2327 return -EINVAL; 2328 2329 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2330 pi->graphics_level[i].DeepSleepDivId = 2331 kv_get_sleep_divider_id_from_clock(adev, 2332 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2333 sclk_in_sr); 2334 } 2335 return 0; 2336 } 2337 2338 static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) 2339 { 2340 struct kv_power_info *pi = kv_get_pi(adev); 2341 u32 i; 2342 bool force_high; 2343 struct amdgpu_clock_and_voltage_limits *max_limits = 2344 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2345 u32 mclk = max_limits->mclk; 2346 2347 if (pi->lowest_valid > pi->highest_valid) 2348 return -EINVAL; 2349 2350 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { 2351 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2352 pi->graphics_level[i].GnbSlow = 1; 2353 pi->graphics_level[i].ForceNbPs1 = 0; 2354 pi->graphics_level[i].UpH = 0; 2355 } 2356 2357 if (!pi->sys_info.nb_dpm_enable) 2358 return 0; 2359 2360 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2361 (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2362 2363 if (force_high) { 2364 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2365 pi->graphics_level[i].GnbSlow = 0; 2366 } else { 2367 if (pi->battery_state) 2368 pi->graphics_level[0].ForceNbPs1 = 1; 2369 2370 pi->graphics_level[1].GnbSlow = 0; 2371 pi->graphics_level[2].GnbSlow = 0; 2372 pi->graphics_level[3].GnbSlow = 0; 2373 pi->graphics_level[4].GnbSlow = 0; 2374 } 2375 } else { 2376 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2377 pi->graphics_level[i].GnbSlow = 1; 2378 pi->graphics_level[i].ForceNbPs1 = 0; 2379 pi->graphics_level[i].UpH = 0; 2380 } 2381 2382 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2383 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2384 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2385 if (pi->lowest_valid != pi->highest_valid) 2386 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2387 } 2388 } 2389 return 0; 2390 } 2391 2392 static int kv_calculate_dpm_settings(struct amdgpu_device *adev) 2393 { 2394 struct kv_power_info *pi = kv_get_pi(adev); 2395 u32 i; 2396 2397 if (pi->lowest_valid > pi->highest_valid) 2398 return -EINVAL; 2399 2400 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2401 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2402 2403 return 0; 2404 } 2405 2406 static void kv_init_graphics_levels(struct amdgpu_device *adev) 2407 { 2408 struct kv_power_info *pi = kv_get_pi(adev); 2409 u32 i; 2410 struct amdgpu_clock_voltage_dependency_table *table = 2411 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2412 2413 if (table && table->count) { 2414 u32 vid_2bit; 2415 2416 pi->graphics_dpm_level_count = 0; 2417 for (i = 0; i < table->count; i++) { 2418 if (pi->high_voltage_t && 2419 (pi->high_voltage_t < 2420 kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) 2421 break; 2422 2423 kv_set_divider_value(adev, i, table->entries[i].clk); 2424 vid_2bit = kv_convert_vid7_to_vid2(adev, 2425 &pi->sys_info.vid_mapping_table, 2426 table->entries[i].v); 2427 kv_set_vid(adev, i, vid_2bit); 2428 kv_set_at(adev, i, pi->at[i]); 2429 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2430 pi->graphics_dpm_level_count++; 2431 } 2432 } else { 2433 struct sumo_sclk_voltage_mapping_table *table = 2434 &pi->sys_info.sclk_voltage_mapping_table; 2435 2436 pi->graphics_dpm_level_count = 0; 2437 for (i = 0; i < table->num_max_dpm_entries; i++) { 2438 if (pi->high_voltage_t && 2439 pi->high_voltage_t < 2440 kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) 2441 break; 2442 2443 kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); 2444 kv_set_vid(adev, i, table->entries[i].vid_2bit); 2445 kv_set_at(adev, i, pi->at[i]); 2446 kv_dpm_power_level_enabled_for_throttle(adev, i, true); 2447 pi->graphics_dpm_level_count++; 2448 } 2449 } 2450 2451 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2452 kv_dpm_power_level_enable(adev, i, false); 2453 } 2454 2455 static void kv_enable_new_levels(struct amdgpu_device *adev) 2456 { 2457 struct kv_power_info *pi = kv_get_pi(adev); 2458 u32 i; 2459 2460 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2461 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2462 kv_dpm_power_level_enable(adev, i, true); 2463 } 2464 } 2465 2466 static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) 2467 { 2468 u32 new_mask = (1 << level); 2469 2470 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2471 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2472 new_mask); 2473 } 2474 2475 static int kv_set_enabled_levels(struct amdgpu_device *adev) 2476 { 2477 struct kv_power_info *pi = kv_get_pi(adev); 2478 u32 i, new_mask = 0; 2479 2480 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2481 new_mask |= (1 << i); 2482 2483 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, 2484 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2485 new_mask); 2486 } 2487 2488 static void kv_program_nbps_index_settings(struct amdgpu_device *adev, 2489 struct amdgpu_ps *new_rps) 2490 { 2491 struct kv_ps *new_ps = kv_get_ps(new_rps); 2492 struct kv_power_info *pi = kv_get_pi(adev); 2493 u32 nbdpmconfig1; 2494 2495 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) 2496 return; 2497 2498 if (pi->sys_info.nb_dpm_enable) { 2499 nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); 2500 nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | 2501 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | 2502 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | 2503 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); 2504 nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | 2505 (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | 2506 (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | 2507 (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); 2508 WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); 2509 } 2510 } 2511 2512 static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, 2513 int min_temp, int max_temp) 2514 { 2515 int low_temp = 0 * 1000; 2516 int high_temp = 255 * 1000; 2517 u32 tmp; 2518 2519 if (low_temp < min_temp) 2520 low_temp = min_temp; 2521 if (high_temp > max_temp) 2522 high_temp = max_temp; 2523 if (high_temp < low_temp) { 2524 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2525 return -EINVAL; 2526 } 2527 2528 tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 2529 tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | 2530 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); 2531 tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | 2532 ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); 2533 WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); 2534 2535 adev->pm.dpm.thermal.min_temp = low_temp; 2536 adev->pm.dpm.thermal.max_temp = high_temp; 2537 2538 return 0; 2539 } 2540 2541 union igp_info { 2542 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2543 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2544 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2545 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2546 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2547 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2548 }; 2549 2550 static int kv_parse_sys_info_table(struct amdgpu_device *adev) 2551 { 2552 struct kv_power_info *pi = kv_get_pi(adev); 2553 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2554 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2555 union igp_info *igp_info; 2556 u8 frev, crev; 2557 u16 data_offset; 2558 int i; 2559 2560 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2561 &frev, &crev, &data_offset)) { 2562 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2563 data_offset); 2564 2565 if (crev != 8) { 2566 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2567 return -EINVAL; 2568 } 2569 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2570 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2571 pi->sys_info.bootup_nb_voltage_index = 2572 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2573 if (igp_info->info_8.ucHtcTmpLmt == 0) 2574 pi->sys_info.htc_tmp_lmt = 203; 2575 else 2576 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2577 if (igp_info->info_8.ucHtcHystLmt == 0) 2578 pi->sys_info.htc_hyst_lmt = 5; 2579 else 2580 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2581 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2582 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2583 } 2584 2585 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2586 pi->sys_info.nb_dpm_enable = true; 2587 else 2588 pi->sys_info.nb_dpm_enable = false; 2589 2590 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2591 pi->sys_info.nbp_memory_clock[i] = 2592 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2593 pi->sys_info.nbp_n_clock[i] = 2594 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2595 } 2596 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2597 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2598 pi->caps_enable_dfs_bypass = true; 2599 2600 sumo_construct_sclk_voltage_mapping_table(adev, 2601 &pi->sys_info.sclk_voltage_mapping_table, 2602 igp_info->info_8.sAvail_SCLK); 2603 2604 sumo_construct_vid_mapping_table(adev, 2605 &pi->sys_info.vid_mapping_table, 2606 igp_info->info_8.sAvail_SCLK); 2607 2608 kv_construct_max_power_limits_table(adev, 2609 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2610 } 2611 return 0; 2612 } 2613 2614 union power_info { 2615 struct _ATOM_POWERPLAY_INFO info; 2616 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2617 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2618 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2619 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2620 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2621 }; 2622 2623 union pplib_clock_info { 2624 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2625 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2626 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2627 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2628 }; 2629 2630 union pplib_power_state { 2631 struct _ATOM_PPLIB_STATE v1; 2632 struct _ATOM_PPLIB_STATE_V2 v2; 2633 }; 2634 2635 static void kv_patch_boot_state(struct amdgpu_device *adev, 2636 struct kv_ps *ps) 2637 { 2638 struct kv_power_info *pi = kv_get_pi(adev); 2639 2640 ps->num_levels = 1; 2641 ps->levels[0] = pi->boot_pl; 2642 } 2643 2644 static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, 2645 struct amdgpu_ps *rps, 2646 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2647 u8 table_rev) 2648 { 2649 struct kv_ps *ps = kv_get_ps(rps); 2650 2651 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2652 rps->class = le16_to_cpu(non_clock_info->usClassification); 2653 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2654 2655 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2656 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2657 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2658 } else { 2659 rps->vclk = 0; 2660 rps->dclk = 0; 2661 } 2662 2663 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2664 adev->pm.dpm.boot_ps = rps; 2665 kv_patch_boot_state(adev, ps); 2666 } 2667 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2668 adev->pm.dpm.uvd_ps = rps; 2669 } 2670 2671 static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, 2672 struct amdgpu_ps *rps, int index, 2673 union pplib_clock_info *clock_info) 2674 { 2675 struct kv_power_info *pi = kv_get_pi(adev); 2676 struct kv_ps *ps = kv_get_ps(rps); 2677 struct kv_pl *pl = &ps->levels[index]; 2678 u32 sclk; 2679 2680 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2681 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2682 pl->sclk = sclk; 2683 pl->vddc_index = clock_info->sumo.vddcIndex; 2684 2685 ps->num_levels = index + 1; 2686 2687 if (pi->caps_sclk_ds) { 2688 pl->ds_divider_index = 5; 2689 pl->ss_divider_index = 5; 2690 } 2691 } 2692 2693 static int kv_parse_power_table(struct amdgpu_device *adev) 2694 { 2695 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2696 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2697 union pplib_power_state *power_state; 2698 int i, j, k, non_clock_array_index, clock_array_index; 2699 union pplib_clock_info *clock_info; 2700 struct _StateArray *state_array; 2701 struct _ClockInfoArray *clock_info_array; 2702 struct _NonClockInfoArray *non_clock_info_array; 2703 union power_info *power_info; 2704 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2705 u16 data_offset; 2706 u8 frev, crev; 2707 u8 *power_state_offset; 2708 struct kv_ps *ps; 2709 2710 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 2711 &frev, &crev, &data_offset)) 2712 return -EINVAL; 2713 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2714 2715 amdgpu_add_thermal_controller(adev); 2716 2717 state_array = (struct _StateArray *) 2718 (mode_info->atom_context->bios + data_offset + 2719 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2720 clock_info_array = (struct _ClockInfoArray *) 2721 (mode_info->atom_context->bios + data_offset + 2722 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2723 non_clock_info_array = (struct _NonClockInfoArray *) 2724 (mode_info->atom_context->bios + data_offset + 2725 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2726 2727 adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, 2728 sizeof(struct amdgpu_ps), 2729 GFP_KERNEL); 2730 if (!adev->pm.dpm.ps) 2731 return -ENOMEM; 2732 power_state_offset = (u8 *)state_array->states; 2733 for (i = 0; i < state_array->ucNumEntries; i++) { 2734 u8 *idx; 2735 power_state = (union pplib_power_state *)power_state_offset; 2736 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2737 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2738 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2739 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2740 if (ps == NULL) 2741 return -ENOMEM; 2742 adev->pm.dpm.ps[i].ps_priv = ps; 2743 k = 0; 2744 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2745 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2746 clock_array_index = idx[j]; 2747 if (clock_array_index >= clock_info_array->ucNumEntries) 2748 continue; 2749 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2750 break; 2751 clock_info = (union pplib_clock_info *) 2752 ((u8 *)&clock_info_array->clockInfo[0] + 2753 (clock_array_index * clock_info_array->ucEntrySize)); 2754 kv_parse_pplib_clock_info(adev, 2755 &adev->pm.dpm.ps[i], k, 2756 clock_info); 2757 k++; 2758 } 2759 kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], 2760 non_clock_info, 2761 non_clock_info_array->ucEntrySize); 2762 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2763 } 2764 adev->pm.dpm.num_ps = state_array->ucNumEntries; 2765 2766 /* fill in the vce power states */ 2767 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { 2768 u32 sclk; 2769 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; 2770 clock_info = (union pplib_clock_info *) 2771 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2772 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2773 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2774 adev->pm.dpm.vce_states[i].sclk = sclk; 2775 adev->pm.dpm.vce_states[i].mclk = 0; 2776 } 2777 2778 return 0; 2779 } 2780 2781 static int kv_dpm_init(struct amdgpu_device *adev) 2782 { 2783 struct kv_power_info *pi; 2784 int ret, i; 2785 2786 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2787 if (pi == NULL) 2788 return -ENOMEM; 2789 adev->pm.dpm.priv = pi; 2790 2791 ret = amdgpu_get_platform_caps(adev); 2792 if (ret) 2793 return ret; 2794 2795 ret = amdgpu_parse_extended_power_table(adev); 2796 if (ret) 2797 return ret; 2798 2799 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2800 pi->at[i] = TRINITY_AT_DFLT; 2801 2802 pi->sram_end = SMC_RAM_END; 2803 2804 pi->enable_nb_dpm = true; 2805 2806 pi->caps_power_containment = true; 2807 pi->caps_cac = true; 2808 pi->enable_didt = false; 2809 if (pi->enable_didt) { 2810 pi->caps_sq_ramping = true; 2811 pi->caps_db_ramping = true; 2812 pi->caps_td_ramping = true; 2813 pi->caps_tcp_ramping = true; 2814 } 2815 2816 if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) 2817 pi->caps_sclk_ds = true; 2818 else 2819 pi->caps_sclk_ds = false; 2820 2821 pi->enable_auto_thermal_throttling = true; 2822 pi->disable_nb_ps3_in_battery = false; 2823 if (amdgpu_bapm == 0) 2824 pi->bapm_enable = false; 2825 else 2826 pi->bapm_enable = true; 2827 pi->voltage_drop_t = 0; 2828 pi->caps_sclk_throttle_low_notification = false; 2829 pi->caps_fps = false; /* true? */ 2830 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; 2831 pi->caps_uvd_dpm = true; 2832 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; 2833 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; 2834 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; 2835 pi->caps_stable_p_state = false; 2836 2837 ret = kv_parse_sys_info_table(adev); 2838 if (ret) 2839 return ret; 2840 2841 kv_patch_voltage_values(adev); 2842 kv_construct_boot_state(adev); 2843 2844 ret = kv_parse_power_table(adev); 2845 if (ret) 2846 return ret; 2847 2848 pi->enable_dpm = true; 2849 2850 return 0; 2851 } 2852 2853 static void 2854 kv_dpm_debugfs_print_current_performance_level(void *handle, 2855 struct seq_file *m) 2856 { 2857 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2858 struct kv_power_info *pi = kv_get_pi(adev); 2859 u32 current_index = 2860 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 2861 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 2862 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 2863 u32 sclk, tmp; 2864 u16 vddc; 2865 2866 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2867 seq_printf(m, "invalid dpm profile %d\n", current_index); 2868 } else { 2869 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2870 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 2871 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2872 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 2873 vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); 2874 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2875 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2876 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2877 current_index, sclk, vddc); 2878 } 2879 } 2880 2881 static void 2882 kv_dpm_print_power_state(void *handle, void *request_ps) 2883 { 2884 int i; 2885 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; 2886 struct kv_ps *ps = kv_get_ps(rps); 2887 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2888 2889 amdgpu_dpm_print_class_info(rps->class, rps->class2); 2890 amdgpu_dpm_print_cap_info(rps->caps); 2891 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2892 for (i = 0; i < ps->num_levels; i++) { 2893 struct kv_pl *pl = &ps->levels[i]; 2894 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2895 i, pl->sclk, 2896 kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); 2897 } 2898 amdgpu_dpm_print_ps_status(adev, rps); 2899 } 2900 2901 static void kv_dpm_fini(struct amdgpu_device *adev) 2902 { 2903 int i; 2904 2905 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 2906 kfree(adev->pm.dpm.ps[i].ps_priv); 2907 } 2908 kfree(adev->pm.dpm.ps); 2909 kfree(adev->pm.dpm.priv); 2910 amdgpu_free_extended_power_table(adev); 2911 } 2912 2913 static void kv_dpm_display_configuration_changed(void *handle) 2914 { 2915 2916 } 2917 2918 static u32 kv_dpm_get_sclk(void *handle, bool low) 2919 { 2920 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2921 struct kv_power_info *pi = kv_get_pi(adev); 2922 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2923 2924 if (low) 2925 return requested_state->levels[0].sclk; 2926 else 2927 return requested_state->levels[requested_state->num_levels - 1].sclk; 2928 } 2929 2930 static u32 kv_dpm_get_mclk(void *handle, bool low) 2931 { 2932 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2933 struct kv_power_info *pi = kv_get_pi(adev); 2934 2935 return pi->sys_info.bootup_uma_clk; 2936 } 2937 2938 /* get temperature in millidegrees */ 2939 static int kv_dpm_get_temp(void *handle) 2940 { 2941 u32 temp; 2942 int actual_temp = 0; 2943 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2944 2945 temp = RREG32_SMC(0xC0300E0C); 2946 2947 if (temp) 2948 actual_temp = (temp / 8) - 49; 2949 else 2950 actual_temp = 0; 2951 2952 actual_temp = actual_temp * 1000; 2953 2954 return actual_temp; 2955 } 2956 2957 static int kv_dpm_early_init(void *handle) 2958 { 2959 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2960 2961 adev->powerplay.pp_funcs = &kv_dpm_funcs; 2962 adev->powerplay.pp_handle = adev; 2963 kv_dpm_set_irq_funcs(adev); 2964 2965 return 0; 2966 } 2967 2968 static int kv_dpm_late_init(void *handle) 2969 { 2970 /* powerdown unused blocks for now */ 2971 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2972 2973 if (!adev->pm.dpm_enabled) 2974 return 0; 2975 2976 kv_dpm_powergate_acp(adev, true); 2977 kv_dpm_powergate_samu(adev, true); 2978 2979 return 0; 2980 } 2981 2982 static int kv_dpm_sw_init(void *handle) 2983 { 2984 int ret; 2985 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2986 2987 ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, 2988 &adev->pm.dpm.thermal.irq); 2989 if (ret) 2990 return ret; 2991 2992 ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, 2993 &adev->pm.dpm.thermal.irq); 2994 if (ret) 2995 return ret; 2996 2997 /* default to balanced state */ 2998 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 2999 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 3000 adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; 3001 adev->pm.default_sclk = adev->clock.default_sclk; 3002 adev->pm.default_mclk = adev->clock.default_mclk; 3003 adev->pm.current_sclk = adev->clock.default_sclk; 3004 adev->pm.current_mclk = adev->clock.default_mclk; 3005 adev->pm.int_thermal_type = THERMAL_TYPE_NONE; 3006 3007 if (amdgpu_dpm == 0) 3008 return 0; 3009 3010 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); 3011 ret = kv_dpm_init(adev); 3012 if (ret) 3013 goto dpm_failed; 3014 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3015 if (amdgpu_dpm == 1) 3016 amdgpu_pm_print_power_states(adev); 3017 DRM_INFO("amdgpu: dpm initialized\n"); 3018 3019 return 0; 3020 3021 dpm_failed: 3022 kv_dpm_fini(adev); 3023 DRM_ERROR("amdgpu: dpm initialization failed\n"); 3024 return ret; 3025 } 3026 3027 static int kv_dpm_sw_fini(void *handle) 3028 { 3029 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3030 3031 flush_work(&adev->pm.dpm.thermal.work); 3032 3033 kv_dpm_fini(adev); 3034 3035 return 0; 3036 } 3037 3038 static int kv_dpm_hw_init(void *handle) 3039 { 3040 int ret; 3041 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3042 3043 if (!amdgpu_dpm) 3044 return 0; 3045 3046 kv_dpm_setup_asic(adev); 3047 ret = kv_dpm_enable(adev); 3048 if (ret) 3049 adev->pm.dpm_enabled = false; 3050 else 3051 adev->pm.dpm_enabled = true; 3052 amdgpu_legacy_dpm_compute_clocks(adev); 3053 return ret; 3054 } 3055 3056 static int kv_dpm_hw_fini(void *handle) 3057 { 3058 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3059 3060 if (adev->pm.dpm_enabled) 3061 kv_dpm_disable(adev); 3062 3063 return 0; 3064 } 3065 3066 static int kv_dpm_suspend(void *handle) 3067 { 3068 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3069 3070 if (adev->pm.dpm_enabled) { 3071 /* disable dpm */ 3072 kv_dpm_disable(adev); 3073 /* reset the power state */ 3074 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3075 } 3076 return 0; 3077 } 3078 3079 static int kv_dpm_resume(void *handle) 3080 { 3081 int ret; 3082 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3083 3084 if (adev->pm.dpm_enabled) { 3085 /* asic init will reset to the boot state */ 3086 kv_dpm_setup_asic(adev); 3087 ret = kv_dpm_enable(adev); 3088 if (ret) 3089 adev->pm.dpm_enabled = false; 3090 else 3091 adev->pm.dpm_enabled = true; 3092 if (adev->pm.dpm_enabled) 3093 amdgpu_legacy_dpm_compute_clocks(adev); 3094 } 3095 return 0; 3096 } 3097 3098 static bool kv_dpm_is_idle(void *handle) 3099 { 3100 return true; 3101 } 3102 3103 static int kv_dpm_wait_for_idle(void *handle) 3104 { 3105 return 0; 3106 } 3107 3108 3109 static int kv_dpm_soft_reset(void *handle) 3110 { 3111 return 0; 3112 } 3113 3114 static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, 3115 struct amdgpu_irq_src *src, 3116 unsigned type, 3117 enum amdgpu_interrupt_state state) 3118 { 3119 u32 cg_thermal_int; 3120 3121 switch (type) { 3122 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: 3123 switch (state) { 3124 case AMDGPU_IRQ_STATE_DISABLE: 3125 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3126 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3127 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3128 break; 3129 case AMDGPU_IRQ_STATE_ENABLE: 3130 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3131 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 3132 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3133 break; 3134 default: 3135 break; 3136 } 3137 break; 3138 3139 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: 3140 switch (state) { 3141 case AMDGPU_IRQ_STATE_DISABLE: 3142 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3143 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3144 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3145 break; 3146 case AMDGPU_IRQ_STATE_ENABLE: 3147 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); 3148 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 3149 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); 3150 break; 3151 default: 3152 break; 3153 } 3154 break; 3155 3156 default: 3157 break; 3158 } 3159 return 0; 3160 } 3161 3162 static int kv_dpm_process_interrupt(struct amdgpu_device *adev, 3163 struct amdgpu_irq_src *source, 3164 struct amdgpu_iv_entry *entry) 3165 { 3166 bool queue_thermal = false; 3167 3168 if (entry == NULL) 3169 return -EINVAL; 3170 3171 switch (entry->src_id) { 3172 case 230: /* thermal low to high */ 3173 DRM_DEBUG("IH: thermal low to high\n"); 3174 adev->pm.dpm.thermal.high_to_low = false; 3175 queue_thermal = true; 3176 break; 3177 case 231: /* thermal high to low */ 3178 DRM_DEBUG("IH: thermal high to low\n"); 3179 adev->pm.dpm.thermal.high_to_low = true; 3180 queue_thermal = true; 3181 break; 3182 default: 3183 break; 3184 } 3185 3186 if (queue_thermal) 3187 schedule_work(&adev->pm.dpm.thermal.work); 3188 3189 return 0; 3190 } 3191 3192 static int kv_dpm_set_clockgating_state(void *handle, 3193 enum amd_clockgating_state state) 3194 { 3195 return 0; 3196 } 3197 3198 static int kv_dpm_set_powergating_state(void *handle, 3199 enum amd_powergating_state state) 3200 { 3201 return 0; 3202 } 3203 3204 static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1, 3205 const struct kv_pl *kv_cpl2) 3206 { 3207 return ((kv_cpl1->sclk == kv_cpl2->sclk) && 3208 (kv_cpl1->vddc_index == kv_cpl2->vddc_index) && 3209 (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) && 3210 (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); 3211 } 3212 3213 static int kv_check_state_equal(void *handle, 3214 void *current_ps, 3215 void *request_ps, 3216 bool *equal) 3217 { 3218 struct kv_ps *kv_cps; 3219 struct kv_ps *kv_rps; 3220 int i; 3221 struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; 3222 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; 3223 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3224 3225 if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) 3226 return -EINVAL; 3227 3228 kv_cps = kv_get_ps(cps); 3229 kv_rps = kv_get_ps(rps); 3230 3231 if (kv_cps == NULL) { 3232 *equal = false; 3233 return 0; 3234 } 3235 3236 if (kv_cps->num_levels != kv_rps->num_levels) { 3237 *equal = false; 3238 return 0; 3239 } 3240 3241 for (i = 0; i < kv_cps->num_levels; i++) { 3242 if (!kv_are_power_levels_equal(&(kv_cps->levels[i]), 3243 &(kv_rps->levels[i]))) { 3244 *equal = false; 3245 return 0; 3246 } 3247 } 3248 3249 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ 3250 *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); 3251 *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); 3252 3253 return 0; 3254 } 3255 3256 static int kv_dpm_read_sensor(void *handle, int idx, 3257 void *value, int *size) 3258 { 3259 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3260 struct kv_power_info *pi = kv_get_pi(adev); 3261 uint32_t sclk; 3262 u32 pl_index = 3263 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 3264 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 3265 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 3266 3267 /* size must be at least 4 bytes for all sensors */ 3268 if (*size < 4) 3269 return -EINVAL; 3270 3271 switch (idx) { 3272 case AMDGPU_PP_SENSOR_GFX_SCLK: 3273 if (pl_index < SMU__NUM_SCLK_DPM_STATE) { 3274 sclk = be32_to_cpu( 3275 pi->graphics_level[pl_index].SclkFrequency); 3276 *((uint32_t *)value) = sclk; 3277 *size = 4; 3278 return 0; 3279 } 3280 return -EINVAL; 3281 case AMDGPU_PP_SENSOR_GPU_TEMP: 3282 *((uint32_t *)value) = kv_dpm_get_temp(adev); 3283 *size = 4; 3284 return 0; 3285 default: 3286 return -EOPNOTSUPP; 3287 } 3288 } 3289 3290 static int kv_set_powergating_by_smu(void *handle, 3291 uint32_t block_type, bool gate) 3292 { 3293 switch (block_type) { 3294 case AMD_IP_BLOCK_TYPE_UVD: 3295 kv_dpm_powergate_uvd(handle, gate); 3296 break; 3297 case AMD_IP_BLOCK_TYPE_VCE: 3298 kv_dpm_powergate_vce(handle, gate); 3299 break; 3300 default: 3301 break; 3302 } 3303 return 0; 3304 } 3305 3306 static const struct amd_ip_funcs kv_dpm_ip_funcs = { 3307 .name = "kv_dpm", 3308 .early_init = kv_dpm_early_init, 3309 .late_init = kv_dpm_late_init, 3310 .sw_init = kv_dpm_sw_init, 3311 .sw_fini = kv_dpm_sw_fini, 3312 .hw_init = kv_dpm_hw_init, 3313 .hw_fini = kv_dpm_hw_fini, 3314 .suspend = kv_dpm_suspend, 3315 .resume = kv_dpm_resume, 3316 .is_idle = kv_dpm_is_idle, 3317 .wait_for_idle = kv_dpm_wait_for_idle, 3318 .soft_reset = kv_dpm_soft_reset, 3319 .set_clockgating_state = kv_dpm_set_clockgating_state, 3320 .set_powergating_state = kv_dpm_set_powergating_state, 3321 .dump_ip_state = NULL, 3322 .print_ip_state = NULL, 3323 }; 3324 3325 const struct amdgpu_ip_block_version kv_smu_ip_block = { 3326 .type = AMD_IP_BLOCK_TYPE_SMC, 3327 .major = 1, 3328 .minor = 0, 3329 .rev = 0, 3330 .funcs = &kv_dpm_ip_funcs, 3331 }; 3332 3333 static const struct amd_pm_funcs kv_dpm_funcs = { 3334 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3335 .set_power_state = &kv_dpm_set_power_state, 3336 .post_set_power_state = &kv_dpm_post_set_power_state, 3337 .display_configuration_changed = &kv_dpm_display_configuration_changed, 3338 .get_sclk = &kv_dpm_get_sclk, 3339 .get_mclk = &kv_dpm_get_mclk, 3340 .print_power_state = &kv_dpm_print_power_state, 3341 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 3342 .force_performance_level = &kv_dpm_force_performance_level, 3343 .set_powergating_by_smu = kv_set_powergating_by_smu, 3344 .enable_bapm = &kv_dpm_enable_bapm, 3345 .get_vce_clock_state = amdgpu_get_vce_clock_state, 3346 .check_state_equal = kv_check_state_equal, 3347 .read_sensor = &kv_dpm_read_sensor, 3348 .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks, 3349 }; 3350 3351 static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { 3352 .set = kv_dpm_set_interrupt_state, 3353 .process = kv_dpm_process_interrupt, 3354 }; 3355 3356 static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) 3357 { 3358 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; 3359 adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; 3360 } 3361