1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/firmware.h> 24 #include <linux/module.h> 25 #include <linux/pci.h> 26 #include <linux/reboot.h> 27 28 #define SMU_13_0_PARTIAL_PPTABLE 29 #define SWSMU_CODE_LAYER_L3 30 31 #include "amdgpu.h" 32 #include "amdgpu_smu.h" 33 #include "atomfirmware.h" 34 #include "amdgpu_atomfirmware.h" 35 #include "amdgpu_atombios.h" 36 #include "smu_v13_0.h" 37 #include "soc15_common.h" 38 #include "atom.h" 39 #include "amdgpu_ras.h" 40 #include "smu_cmn.h" 41 42 #include "asic_reg/thm/thm_13_0_2_offset.h" 43 #include "asic_reg/thm/thm_13_0_2_sh_mask.h" 44 #include "asic_reg/mp/mp_13_0_2_offset.h" 45 #include "asic_reg/mp/mp_13_0_2_sh_mask.h" 46 #include "asic_reg/smuio/smuio_13_0_2_offset.h" 47 #include "asic_reg/smuio/smuio_13_0_2_sh_mask.h" 48 49 /* 50 * DO NOT use these for err/warn/info/debug messages. 51 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 52 * They are more MGPU friendly. 53 */ 54 #undef pr_err 55 #undef pr_warn 56 #undef pr_info 57 #undef pr_debug 58 59 MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); 60 MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin"); 61 MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin"); 62 MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); 63 64 #define mmMP1_SMN_C2PMSG_66 0x0282 65 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0 66 67 #define mmMP1_SMN_C2PMSG_82 0x0292 68 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0 69 70 #define mmMP1_SMN_C2PMSG_90 0x029a 71 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 72 73 #define SMU13_VOLTAGE_SCALE 4 74 75 #define LINK_WIDTH_MAX 6 76 #define LINK_SPEED_MAX 3 77 78 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 79 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L 80 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 81 #define smnPCIE_LC_SPEED_CNTL 0x11140290 82 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0 83 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 84 85 #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 86 87 static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; 88 89 const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5}; 90 const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; 91 92 int smu_v13_0_init_microcode(struct smu_context *smu) 93 { 94 struct amdgpu_device *adev = smu->adev; 95 char ucode_prefix[15]; 96 int err = 0; 97 const struct smc_firmware_header_v1_0 *hdr; 98 const struct common_firmware_header *header; 99 struct amdgpu_firmware_info *ucode = NULL; 100 101 /* doesn't need to load smu firmware in IOV mode */ 102 if (amdgpu_sriov_vf(adev)) 103 return 0; 104 105 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); 106 err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, 107 "amdgpu/%s.bin", ucode_prefix); 108 if (err) 109 goto out; 110 111 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 112 amdgpu_ucode_print_smc_hdr(&hdr->header); 113 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 114 115 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 116 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 117 ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 118 ucode->fw = adev->pm.fw; 119 header = (const struct common_firmware_header *)ucode->fw->data; 120 adev->firmware.fw_size += 121 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 122 } 123 124 out: 125 if (err) 126 amdgpu_ucode_release(&adev->pm.fw); 127 return err; 128 } 129 130 void smu_v13_0_fini_microcode(struct smu_context *smu) 131 { 132 struct amdgpu_device *adev = smu->adev; 133 134 amdgpu_ucode_release(&adev->pm.fw); 135 adev->pm.fw_version = 0; 136 } 137 138 int smu_v13_0_load_microcode(struct smu_context *smu) 139 { 140 #if 0 141 struct amdgpu_device *adev = smu->adev; 142 const uint32_t *src; 143 const struct smc_firmware_header_v1_0 *hdr; 144 uint32_t addr_start = MP1_SRAM; 145 uint32_t i; 146 uint32_t smc_fw_size; 147 uint32_t mp1_fw_flags; 148 149 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 150 src = (const uint32_t *)(adev->pm.fw->data + 151 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 152 smc_fw_size = hdr->header.ucode_size_bytes; 153 154 for (i = 1; i < smc_fw_size/4 - 1; i++) { 155 WREG32_PCIE(addr_start, src[i]); 156 addr_start += 4; 157 } 158 159 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 160 1 & MP1_SMN_PUB_CTRL__RESET_MASK); 161 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 162 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); 163 164 for (i = 0; i < adev->usec_timeout; i++) { 165 mp1_fw_flags = RREG32_PCIE(MP1_Public | 166 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 167 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 168 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 169 break; 170 udelay(1); 171 } 172 173 if (i == adev->usec_timeout) 174 return -ETIME; 175 #endif 176 177 return 0; 178 } 179 180 int smu_v13_0_init_pptable_microcode(struct smu_context *smu) 181 { 182 struct amdgpu_device *adev = smu->adev; 183 struct amdgpu_firmware_info *ucode = NULL; 184 uint32_t size = 0, pptable_id = 0; 185 int ret = 0; 186 void *table; 187 188 /* doesn't need to load smu firmware in IOV mode */ 189 if (amdgpu_sriov_vf(adev)) 190 return 0; 191 192 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 193 return 0; 194 195 if (!adev->scpm_enabled) 196 return 0; 197 198 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 7)) || 199 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) || 200 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10))) 201 return 0; 202 203 /* override pptable_id from driver parameter */ 204 if (amdgpu_smu_pptable_id >= 0) { 205 pptable_id = amdgpu_smu_pptable_id; 206 dev_info(adev->dev, "override pptable id %d\n", pptable_id); 207 } else { 208 pptable_id = smu->smu_table.boot_values.pp_table_id; 209 } 210 211 /* "pptable_id == 0" means vbios carries the pptable. */ 212 if (!pptable_id) 213 return 0; 214 215 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 216 if (ret) 217 return ret; 218 219 smu->pptable_firmware.data = table; 220 smu->pptable_firmware.size = size; 221 222 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE]; 223 ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE; 224 ucode->fw = &smu->pptable_firmware; 225 adev->firmware.fw_size += 226 ALIGN(smu->pptable_firmware.size, PAGE_SIZE); 227 228 return 0; 229 } 230 231 int smu_v13_0_check_fw_status(struct smu_context *smu) 232 { 233 struct amdgpu_device *adev = smu->adev; 234 uint32_t mp1_fw_flags; 235 236 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 237 case IP_VERSION(13, 0, 4): 238 case IP_VERSION(13, 0, 11): 239 mp1_fw_flags = RREG32_PCIE(MP1_Public | 240 (smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff)); 241 break; 242 default: 243 mp1_fw_flags = RREG32_PCIE(MP1_Public | 244 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 245 break; 246 } 247 248 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 249 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 250 return 0; 251 252 return -EIO; 253 } 254 255 int smu_v13_0_check_fw_version(struct smu_context *smu) 256 { 257 struct amdgpu_device *adev = smu->adev; 258 uint32_t if_version = 0xff, smu_version = 0xff; 259 uint8_t smu_program, smu_major, smu_minor, smu_debug; 260 int ret = 0; 261 262 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 263 if (ret) 264 return ret; 265 266 smu_program = (smu_version >> 24) & 0xff; 267 smu_major = (smu_version >> 16) & 0xff; 268 smu_minor = (smu_version >> 8) & 0xff; 269 smu_debug = (smu_version >> 0) & 0xff; 270 adev->pm.fw_version = smu_version; 271 272 /* only for dGPU w/ SMU13*/ 273 if (adev->pm.fw) 274 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n", 275 smu_program, smu_version, smu_major, smu_minor, smu_debug); 276 277 /* 278 * 1. if_version mismatch is not critical as our fw is designed 279 * to be backward compatible. 280 * 2. New fw usually brings some optimizations. But that's visible 281 * only on the paired driver. 282 * Considering above, we just leave user a verbal message instead 283 * of halt driver loading. 284 */ 285 if (if_version != smu->smc_driver_if_version) { 286 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 287 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", 288 smu->smc_driver_if_version, if_version, 289 smu_program, smu_version, smu_major, smu_minor, smu_debug); 290 dev_info(adev->dev, "SMU driver if version not matched\n"); 291 } 292 293 return ret; 294 } 295 296 static int smu_v13_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 297 { 298 struct amdgpu_device *adev = smu->adev; 299 uint32_t ppt_offset_bytes; 300 const struct smc_firmware_header_v2_0 *v2; 301 302 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; 303 304 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); 305 *size = le32_to_cpu(v2->ppt_size_bytes); 306 *table = (uint8_t *)v2 + ppt_offset_bytes; 307 308 return 0; 309 } 310 311 static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table, 312 uint32_t *size, uint32_t pptable_id) 313 { 314 struct amdgpu_device *adev = smu->adev; 315 const struct smc_firmware_header_v2_1 *v2_1; 316 struct smc_soft_pptable_entry *entries; 317 uint32_t pptable_count = 0; 318 int i = 0; 319 320 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; 321 entries = (struct smc_soft_pptable_entry *) 322 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); 323 pptable_count = le32_to_cpu(v2_1->pptable_count); 324 for (i = 0; i < pptable_count; i++) { 325 if (le32_to_cpu(entries[i].id) == pptable_id) { 326 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); 327 *size = le32_to_cpu(entries[i].ppt_size_bytes); 328 break; 329 } 330 } 331 332 if (i == pptable_count) 333 return -EINVAL; 334 335 return 0; 336 } 337 338 static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size) 339 { 340 struct amdgpu_device *adev = smu->adev; 341 uint16_t atom_table_size; 342 uint8_t frev, crev; 343 int ret, index; 344 345 dev_info(adev->dev, "use vbios provided pptable\n"); 346 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 347 powerplayinfo); 348 349 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, 350 (uint8_t **)table); 351 if (ret) 352 return ret; 353 354 if (size) 355 *size = atom_table_size; 356 357 return 0; 358 } 359 360 int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, 361 void **table, 362 uint32_t *size, 363 uint32_t pptable_id) 364 { 365 const struct smc_firmware_header_v1_0 *hdr; 366 struct amdgpu_device *adev = smu->adev; 367 uint16_t version_major, version_minor; 368 int ret; 369 370 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 371 if (!hdr) 372 return -EINVAL; 373 374 dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id); 375 376 version_major = le16_to_cpu(hdr->header.header_version_major); 377 version_minor = le16_to_cpu(hdr->header.header_version_minor); 378 if (version_major != 2) { 379 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n", 380 version_major, version_minor); 381 return -EINVAL; 382 } 383 384 switch (version_minor) { 385 case 0: 386 ret = smu_v13_0_set_pptable_v2_0(smu, table, size); 387 break; 388 case 1: 389 ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id); 390 break; 391 default: 392 ret = -EINVAL; 393 break; 394 } 395 396 return ret; 397 } 398 399 int smu_v13_0_setup_pptable(struct smu_context *smu) 400 { 401 struct amdgpu_device *adev = smu->adev; 402 uint32_t size = 0, pptable_id = 0; 403 void *table; 404 int ret = 0; 405 406 /* override pptable_id from driver parameter */ 407 if (amdgpu_smu_pptable_id >= 0) { 408 pptable_id = amdgpu_smu_pptable_id; 409 dev_info(adev->dev, "override pptable id %d\n", pptable_id); 410 } else { 411 pptable_id = smu->smu_table.boot_values.pp_table_id; 412 } 413 414 /* force using vbios pptable in sriov mode */ 415 if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1)) 416 ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size); 417 else 418 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 419 420 if (ret) 421 return ret; 422 423 if (!smu->smu_table.power_play_table) 424 smu->smu_table.power_play_table = table; 425 if (!smu->smu_table.power_play_table_size) 426 smu->smu_table.power_play_table_size = size; 427 428 return 0; 429 } 430 431 int smu_v13_0_init_smc_tables(struct smu_context *smu) 432 { 433 struct smu_table_context *smu_table = &smu->smu_table; 434 struct smu_table *tables = smu_table->tables; 435 int ret = 0; 436 437 smu_table->driver_pptable = 438 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); 439 if (!smu_table->driver_pptable) { 440 ret = -ENOMEM; 441 goto err0_out; 442 } 443 444 smu_table->max_sustainable_clocks = 445 kzalloc(sizeof(struct smu_13_0_max_sustainable_clocks), GFP_KERNEL); 446 if (!smu_table->max_sustainable_clocks) { 447 ret = -ENOMEM; 448 goto err1_out; 449 } 450 451 /* Aldebaran does not support OVERDRIVE */ 452 if (tables[SMU_TABLE_OVERDRIVE].size) { 453 smu_table->overdrive_table = 454 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 455 if (!smu_table->overdrive_table) { 456 ret = -ENOMEM; 457 goto err2_out; 458 } 459 460 smu_table->boot_overdrive_table = 461 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 462 if (!smu_table->boot_overdrive_table) { 463 ret = -ENOMEM; 464 goto err3_out; 465 } 466 467 smu_table->user_overdrive_table = 468 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 469 if (!smu_table->user_overdrive_table) { 470 ret = -ENOMEM; 471 goto err4_out; 472 } 473 } 474 475 smu_table->combo_pptable = 476 kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL); 477 if (!smu_table->combo_pptable) { 478 ret = -ENOMEM; 479 goto err5_out; 480 } 481 482 return 0; 483 484 err5_out: 485 kfree(smu_table->user_overdrive_table); 486 err4_out: 487 kfree(smu_table->boot_overdrive_table); 488 err3_out: 489 kfree(smu_table->overdrive_table); 490 err2_out: 491 kfree(smu_table->max_sustainable_clocks); 492 err1_out: 493 kfree(smu_table->driver_pptable); 494 err0_out: 495 return ret; 496 } 497 498 int smu_v13_0_fini_smc_tables(struct smu_context *smu) 499 { 500 struct smu_table_context *smu_table = &smu->smu_table; 501 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 502 503 kfree(smu_table->gpu_metrics_table); 504 kfree(smu_table->combo_pptable); 505 kfree(smu_table->user_overdrive_table); 506 kfree(smu_table->boot_overdrive_table); 507 kfree(smu_table->overdrive_table); 508 kfree(smu_table->max_sustainable_clocks); 509 kfree(smu_table->driver_pptable); 510 smu_table->gpu_metrics_table = NULL; 511 smu_table->combo_pptable = NULL; 512 smu_table->user_overdrive_table = NULL; 513 smu_table->boot_overdrive_table = NULL; 514 smu_table->overdrive_table = NULL; 515 smu_table->max_sustainable_clocks = NULL; 516 smu_table->driver_pptable = NULL; 517 kfree(smu_table->hardcode_pptable); 518 smu_table->hardcode_pptable = NULL; 519 520 kfree(smu_table->ecc_table); 521 kfree(smu_table->metrics_table); 522 kfree(smu_table->watermarks_table); 523 smu_table->ecc_table = NULL; 524 smu_table->metrics_table = NULL; 525 smu_table->watermarks_table = NULL; 526 smu_table->metrics_time = 0; 527 528 kfree(smu_dpm->dpm_policies); 529 kfree(smu_dpm->dpm_context); 530 kfree(smu_dpm->golden_dpm_context); 531 kfree(smu_dpm->dpm_current_power_state); 532 kfree(smu_dpm->dpm_request_power_state); 533 smu_dpm->dpm_policies = NULL; 534 smu_dpm->dpm_context = NULL; 535 smu_dpm->golden_dpm_context = NULL; 536 smu_dpm->dpm_context_size = 0; 537 smu_dpm->dpm_current_power_state = NULL; 538 smu_dpm->dpm_request_power_state = NULL; 539 540 return 0; 541 } 542 543 int smu_v13_0_init_power(struct smu_context *smu) 544 { 545 struct smu_power_context *smu_power = &smu->smu_power; 546 547 if (smu_power->power_context || smu_power->power_context_size != 0) 548 return -EINVAL; 549 550 smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context), 551 GFP_KERNEL); 552 if (!smu_power->power_context) 553 return -ENOMEM; 554 smu_power->power_context_size = sizeof(struct smu_13_0_power_context); 555 556 return 0; 557 } 558 559 int smu_v13_0_fini_power(struct smu_context *smu) 560 { 561 struct smu_power_context *smu_power = &smu->smu_power; 562 563 if (!smu_power->power_context || smu_power->power_context_size == 0) 564 return -EINVAL; 565 566 kfree(smu_power->power_context); 567 smu_power->power_context = NULL; 568 smu_power->power_context_size = 0; 569 570 return 0; 571 } 572 573 int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu) 574 { 575 int ret, index; 576 uint16_t size; 577 uint8_t frev, crev; 578 struct atom_common_table_header *header; 579 struct atom_firmware_info_v3_4 *v_3_4; 580 struct atom_firmware_info_v3_3 *v_3_3; 581 struct atom_firmware_info_v3_1 *v_3_1; 582 struct atom_smu_info_v3_6 *smu_info_v3_6; 583 struct atom_smu_info_v4_0 *smu_info_v4_0; 584 585 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 586 firmwareinfo); 587 588 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 589 (uint8_t **)&header); 590 if (ret) 591 return ret; 592 593 if (header->format_revision != 3) { 594 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n"); 595 return -EINVAL; 596 } 597 598 switch (header->content_revision) { 599 case 0: 600 case 1: 601 case 2: 602 v_3_1 = (struct atom_firmware_info_v3_1 *)header; 603 smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 604 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 605 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 606 smu->smu_table.boot_values.socclk = 0; 607 smu->smu_table.boot_values.dcefclk = 0; 608 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 609 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 610 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 611 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 612 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 613 smu->smu_table.boot_values.pp_table_id = 0; 614 break; 615 case 3: 616 v_3_3 = (struct atom_firmware_info_v3_3 *)header; 617 smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 618 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 619 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 620 smu->smu_table.boot_values.socclk = 0; 621 smu->smu_table.boot_values.dcefclk = 0; 622 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 623 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 624 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 625 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 626 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 627 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 628 break; 629 case 4: 630 default: 631 v_3_4 = (struct atom_firmware_info_v3_4 *)header; 632 smu->smu_table.boot_values.revision = v_3_4->firmware_revision; 633 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz; 634 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz; 635 smu->smu_table.boot_values.socclk = 0; 636 smu->smu_table.boot_values.dcefclk = 0; 637 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv; 638 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv; 639 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv; 640 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv; 641 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id; 642 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id; 643 break; 644 } 645 646 smu->smu_table.boot_values.format_revision = header->format_revision; 647 smu->smu_table.boot_values.content_revision = header->content_revision; 648 649 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 650 smu_info); 651 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 652 (uint8_t **)&header)) { 653 654 if ((frev == 3) && (crev == 6)) { 655 smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header; 656 657 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz; 658 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz; 659 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz; 660 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz; 661 } else if ((frev == 3) && (crev == 1)) { 662 return 0; 663 } else if ((frev == 4) && (crev == 0)) { 664 smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header; 665 666 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz; 667 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz; 668 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz; 669 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz; 670 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz; 671 } else { 672 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n", 673 (uint32_t)frev, (uint32_t)crev); 674 } 675 } 676 677 return 0; 678 } 679 680 681 int smu_v13_0_notify_memory_pool_location(struct smu_context *smu) 682 { 683 struct smu_table_context *smu_table = &smu->smu_table; 684 struct smu_table *memory_pool = &smu_table->memory_pool; 685 int ret = 0; 686 uint64_t address; 687 uint32_t address_low, address_high; 688 689 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) 690 return ret; 691 692 address = memory_pool->mc_address; 693 address_high = (uint32_t)upper_32_bits(address); 694 address_low = (uint32_t)lower_32_bits(address); 695 696 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, 697 address_high, NULL); 698 if (ret) 699 return ret; 700 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, 701 address_low, NULL); 702 if (ret) 703 return ret; 704 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, 705 (uint32_t)memory_pool->size, NULL); 706 if (ret) 707 return ret; 708 709 return ret; 710 } 711 712 int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) 713 { 714 int ret; 715 716 ret = smu_cmn_send_smc_msg_with_param(smu, 717 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); 718 if (ret) 719 dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!"); 720 721 return ret; 722 } 723 724 int smu_v13_0_set_driver_table_location(struct smu_context *smu) 725 { 726 struct smu_table *driver_table = &smu->smu_table.driver_table; 727 int ret = 0; 728 729 if (driver_table->mc_address) { 730 ret = smu_cmn_send_smc_msg_with_param(smu, 731 SMU_MSG_SetDriverDramAddrHigh, 732 upper_32_bits(driver_table->mc_address), 733 NULL); 734 if (!ret) 735 ret = smu_cmn_send_smc_msg_with_param(smu, 736 SMU_MSG_SetDriverDramAddrLow, 737 lower_32_bits(driver_table->mc_address), 738 NULL); 739 } 740 741 return ret; 742 } 743 744 int smu_v13_0_set_tool_table_location(struct smu_context *smu) 745 { 746 int ret = 0; 747 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; 748 749 if (tool_table->mc_address) { 750 ret = smu_cmn_send_smc_msg_with_param(smu, 751 SMU_MSG_SetToolsDramAddrHigh, 752 upper_32_bits(tool_table->mc_address), 753 NULL); 754 if (!ret) 755 ret = smu_cmn_send_smc_msg_with_param(smu, 756 SMU_MSG_SetToolsDramAddrLow, 757 lower_32_bits(tool_table->mc_address), 758 NULL); 759 } 760 761 return ret; 762 } 763 764 int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count) 765 { 766 int ret = 0; 767 768 if (!smu->pm_enabled) 769 return ret; 770 771 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); 772 773 return ret; 774 } 775 776 int smu_v13_0_set_allowed_mask(struct smu_context *smu) 777 { 778 struct smu_feature *feature = &smu->smu_feature; 779 int ret = 0; 780 uint32_t feature_mask[2]; 781 782 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || 783 feature->feature_num < 64) 784 return -EINVAL; 785 786 bitmap_to_arr32(feature_mask, feature->allowed, 64); 787 788 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 789 feature_mask[1], NULL); 790 if (ret) 791 return ret; 792 793 return smu_cmn_send_smc_msg_with_param(smu, 794 SMU_MSG_SetAllowedFeaturesMaskLow, 795 feature_mask[0], 796 NULL); 797 } 798 799 int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable) 800 { 801 int ret = 0; 802 struct amdgpu_device *adev = smu->adev; 803 804 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 805 case IP_VERSION(13, 0, 0): 806 case IP_VERSION(13, 0, 1): 807 case IP_VERSION(13, 0, 3): 808 case IP_VERSION(13, 0, 4): 809 case IP_VERSION(13, 0, 5): 810 case IP_VERSION(13, 0, 7): 811 case IP_VERSION(13, 0, 8): 812 case IP_VERSION(13, 0, 10): 813 case IP_VERSION(13, 0, 11): 814 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 815 return 0; 816 if (enable) 817 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 818 else 819 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 820 break; 821 default: 822 break; 823 } 824 825 return ret; 826 } 827 828 int smu_v13_0_system_features_control(struct smu_context *smu, 829 bool en) 830 { 831 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 832 SMU_MSG_DisableAllSmuFeatures), NULL); 833 } 834 835 int smu_v13_0_notify_display_change(struct smu_context *smu) 836 { 837 int ret = 0; 838 839 if (!amdgpu_device_has_dc_support(smu->adev)) 840 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DALNotPresent, NULL); 841 842 return ret; 843 } 844 845 static int 846 smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, 847 enum smu_clk_type clock_select) 848 { 849 int ret = 0; 850 int clk_id; 851 852 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || 853 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0)) 854 return 0; 855 856 clk_id = smu_cmn_to_asic_specific_index(smu, 857 CMN2ASIC_MAPPING_CLK, 858 clock_select); 859 if (clk_id < 0) 860 return -EINVAL; 861 862 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, 863 clk_id << 16, clock); 864 if (ret) { 865 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); 866 return ret; 867 } 868 869 if (*clock != 0) 870 return 0; 871 872 /* if DC limit is zero, return AC limit */ 873 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, 874 clk_id << 16, clock); 875 if (ret) { 876 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); 877 return ret; 878 } 879 880 return 0; 881 } 882 883 int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu) 884 { 885 struct smu_13_0_max_sustainable_clocks *max_sustainable_clocks = 886 smu->smu_table.max_sustainable_clocks; 887 int ret = 0; 888 889 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; 890 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100; 891 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100; 892 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 893 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 894 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 895 896 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 897 ret = smu_v13_0_get_max_sustainable_clock(smu, 898 &(max_sustainable_clocks->uclock), 899 SMU_UCLK); 900 if (ret) { 901 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!", 902 __func__); 903 return ret; 904 } 905 } 906 907 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 908 ret = smu_v13_0_get_max_sustainable_clock(smu, 909 &(max_sustainable_clocks->soc_clock), 910 SMU_SOCCLK); 911 if (ret) { 912 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!", 913 __func__); 914 return ret; 915 } 916 } 917 918 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { 919 ret = smu_v13_0_get_max_sustainable_clock(smu, 920 &(max_sustainable_clocks->dcef_clock), 921 SMU_DCEFCLK); 922 if (ret) { 923 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!", 924 __func__); 925 return ret; 926 } 927 928 ret = smu_v13_0_get_max_sustainable_clock(smu, 929 &(max_sustainable_clocks->display_clock), 930 SMU_DISPCLK); 931 if (ret) { 932 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!", 933 __func__); 934 return ret; 935 } 936 ret = smu_v13_0_get_max_sustainable_clock(smu, 937 &(max_sustainable_clocks->phy_clock), 938 SMU_PHYCLK); 939 if (ret) { 940 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!", 941 __func__); 942 return ret; 943 } 944 ret = smu_v13_0_get_max_sustainable_clock(smu, 945 &(max_sustainable_clocks->pixel_clock), 946 SMU_PIXCLK); 947 if (ret) { 948 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!", 949 __func__); 950 return ret; 951 } 952 } 953 954 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 955 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 956 957 return 0; 958 } 959 960 int smu_v13_0_get_current_power_limit(struct smu_context *smu, 961 uint32_t *power_limit) 962 { 963 int power_src; 964 int ret = 0; 965 966 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) 967 return -EINVAL; 968 969 power_src = smu_cmn_to_asic_specific_index(smu, 970 CMN2ASIC_MAPPING_PWR, 971 smu->adev->pm.ac_power ? 972 SMU_POWER_SOURCE_AC : 973 SMU_POWER_SOURCE_DC); 974 if (power_src < 0) 975 return -EINVAL; 976 977 ret = smu_cmn_send_smc_msg_with_param(smu, 978 SMU_MSG_GetPptLimit, 979 power_src << 16, 980 power_limit); 981 if (ret) 982 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); 983 984 return ret; 985 } 986 987 int smu_v13_0_set_power_limit(struct smu_context *smu, 988 enum smu_ppt_limit_type limit_type, 989 uint32_t limit) 990 { 991 int ret = 0; 992 993 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 994 return -EINVAL; 995 996 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 997 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 998 return -EOPNOTSUPP; 999 } 1000 1001 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL); 1002 if (ret) { 1003 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); 1004 return ret; 1005 } 1006 1007 smu->current_power_limit = limit; 1008 1009 return 0; 1010 } 1011 1012 static int smu_v13_0_allow_ih_interrupt(struct smu_context *smu) 1013 { 1014 return smu_cmn_send_smc_msg(smu, 1015 SMU_MSG_AllowIHHostInterrupt, 1016 NULL); 1017 } 1018 1019 static int smu_v13_0_process_pending_interrupt(struct smu_context *smu) 1020 { 1021 int ret = 0; 1022 1023 if (smu->dc_controlled_by_gpio && 1024 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT)) 1025 ret = smu_v13_0_allow_ih_interrupt(smu); 1026 1027 return ret; 1028 } 1029 1030 int smu_v13_0_enable_thermal_alert(struct smu_context *smu) 1031 { 1032 int ret = 0; 1033 1034 if (!smu->irq_source.num_types) 1035 return 0; 1036 1037 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0); 1038 if (ret) 1039 return ret; 1040 1041 return smu_v13_0_process_pending_interrupt(smu); 1042 } 1043 1044 int smu_v13_0_disable_thermal_alert(struct smu_context *smu) 1045 { 1046 if (!smu->irq_source.num_types) 1047 return 0; 1048 1049 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); 1050 } 1051 1052 static uint16_t convert_to_vddc(uint8_t vid) 1053 { 1054 return (uint16_t) ((6200 - (vid * 25)) / SMU13_VOLTAGE_SCALE); 1055 } 1056 1057 int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) 1058 { 1059 struct amdgpu_device *adev = smu->adev; 1060 uint32_t vdd = 0, val_vid = 0; 1061 1062 if (!value) 1063 return -EINVAL; 1064 val_vid = (RREG32_SOC15(SMUIO, 0, regSMUSVI0_TEL_PLANE0) & 1065 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 1066 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 1067 1068 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid); 1069 1070 *value = vdd; 1071 1072 return 0; 1073 1074 } 1075 1076 int 1077 smu_v13_0_display_clock_voltage_request(struct smu_context *smu, 1078 struct pp_display_clock_request 1079 *clock_req) 1080 { 1081 enum amd_pp_clock_type clk_type = clock_req->clock_type; 1082 int ret = 0; 1083 enum smu_clk_type clk_select = 0; 1084 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 1085 1086 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || 1087 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 1088 switch (clk_type) { 1089 case amd_pp_dcef_clock: 1090 clk_select = SMU_DCEFCLK; 1091 break; 1092 case amd_pp_disp_clock: 1093 clk_select = SMU_DISPCLK; 1094 break; 1095 case amd_pp_pixel_clock: 1096 clk_select = SMU_PIXCLK; 1097 break; 1098 case amd_pp_phy_clock: 1099 clk_select = SMU_PHYCLK; 1100 break; 1101 case amd_pp_mem_clock: 1102 clk_select = SMU_UCLK; 1103 break; 1104 default: 1105 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); 1106 ret = -EINVAL; 1107 break; 1108 } 1109 1110 if (ret) 1111 goto failed; 1112 1113 if (clk_select == SMU_UCLK && smu->disable_uclk_switch) 1114 return 0; 1115 1116 ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); 1117 1118 if (clk_select == SMU_UCLK) 1119 smu->hard_min_uclk_req_from_dal = clk_freq; 1120 } 1121 1122 failed: 1123 return ret; 1124 } 1125 1126 uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu) 1127 { 1128 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1129 return AMD_FAN_CTRL_MANUAL; 1130 else 1131 return AMD_FAN_CTRL_AUTO; 1132 } 1133 1134 static int 1135 smu_v13_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) 1136 { 1137 int ret = 0; 1138 1139 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1140 return 0; 1141 1142 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); 1143 if (ret) 1144 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!", 1145 __func__, (auto_fan_control ? "Start" : "Stop")); 1146 1147 return ret; 1148 } 1149 1150 static int 1151 smu_v13_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) 1152 { 1153 struct amdgpu_device *adev = smu->adev; 1154 1155 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2, 1156 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2), 1157 CG_FDO_CTRL2, TMIN, 0)); 1158 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2, 1159 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2), 1160 CG_FDO_CTRL2, FDO_PWM_MODE, mode)); 1161 1162 return 0; 1163 } 1164 1165 int smu_v13_0_set_fan_speed_pwm(struct smu_context *smu, 1166 uint32_t speed) 1167 { 1168 struct amdgpu_device *adev = smu->adev; 1169 uint32_t duty100, duty; 1170 uint64_t tmp64; 1171 1172 speed = min_t(uint32_t, speed, 255); 1173 1174 if (smu_v13_0_auto_fan_control(smu, 0)) 1175 return -EINVAL; 1176 1177 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL1), 1178 CG_FDO_CTRL1, FMAX_DUTY100); 1179 if (!duty100) 1180 return -EINVAL; 1181 1182 tmp64 = (uint64_t)speed * duty100; 1183 do_div(tmp64, 255); 1184 duty = (uint32_t)tmp64; 1185 1186 WREG32_SOC15(THM, 0, regCG_FDO_CTRL0, 1187 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL0), 1188 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); 1189 1190 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); 1191 } 1192 1193 int 1194 smu_v13_0_set_fan_control_mode(struct smu_context *smu, 1195 uint32_t mode) 1196 { 1197 int ret = 0; 1198 1199 switch (mode) { 1200 case AMD_FAN_CTRL_NONE: 1201 ret = smu_v13_0_set_fan_speed_pwm(smu, 255); 1202 break; 1203 case AMD_FAN_CTRL_MANUAL: 1204 ret = smu_v13_0_auto_fan_control(smu, 0); 1205 break; 1206 case AMD_FAN_CTRL_AUTO: 1207 ret = smu_v13_0_auto_fan_control(smu, 1); 1208 break; 1209 default: 1210 break; 1211 } 1212 1213 if (ret) { 1214 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__); 1215 return -EINVAL; 1216 } 1217 1218 return ret; 1219 } 1220 1221 int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, 1222 uint32_t speed) 1223 { 1224 struct amdgpu_device *adev = smu->adev; 1225 uint32_t crystal_clock_freq = 2500; 1226 uint32_t tach_period; 1227 int ret; 1228 1229 if (!speed) 1230 return -EINVAL; 1231 1232 ret = smu_v13_0_auto_fan_control(smu, 0); 1233 if (ret) 1234 return ret; 1235 1236 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1237 WREG32_SOC15(THM, 0, regCG_TACH_CTRL, 1238 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL), 1239 CG_TACH_CTRL, TARGET_PERIOD, 1240 tach_period)); 1241 1242 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); 1243 } 1244 1245 int smu_v13_0_set_xgmi_pstate(struct smu_context *smu, 1246 uint32_t pstate) 1247 { 1248 int ret = 0; 1249 ret = smu_cmn_send_smc_msg_with_param(smu, 1250 SMU_MSG_SetXgmiMode, 1251 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 1252 NULL); 1253 return ret; 1254 } 1255 1256 static int smu_v13_0_set_irq_state(struct amdgpu_device *adev, 1257 struct amdgpu_irq_src *source, 1258 unsigned tyep, 1259 enum amdgpu_interrupt_state state) 1260 { 1261 struct smu_context *smu = adev->powerplay.pp_handle; 1262 uint32_t low, high; 1263 uint32_t val = 0; 1264 1265 switch (state) { 1266 case AMDGPU_IRQ_STATE_DISABLE: 1267 /* For THM irqs */ 1268 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); 1269 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); 1270 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); 1271 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); 1272 1273 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0); 1274 1275 /* For MP1 SW irqs */ 1276 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1277 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 1278 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 1279 1280 break; 1281 case AMDGPU_IRQ_STATE_ENABLE: 1282 /* For THM irqs */ 1283 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, 1284 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); 1285 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, 1286 smu->thermal_range.software_shutdown_temp); 1287 1288 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); 1289 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 1290 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 1291 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); 1292 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); 1293 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); 1294 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); 1295 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1296 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); 1297 1298 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); 1299 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); 1300 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); 1301 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val); 1302 1303 /* For MP1 SW irqs */ 1304 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); 1305 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 1306 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 1307 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); 1308 1309 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1310 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 1311 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 1312 1313 break; 1314 default: 1315 break; 1316 } 1317 1318 return 0; 1319 } 1320 1321 void smu_v13_0_interrupt_work(struct smu_context *smu) 1322 { 1323 smu_cmn_send_smc_msg(smu, 1324 SMU_MSG_ReenableAcDcInterrupt, 1325 NULL); 1326 } 1327 1328 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ 1329 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ 1330 #define SMUIO_11_0__SRCID__SMUIO_GPIO19 83 1331 1332 static int smu_v13_0_irq_process(struct amdgpu_device *adev, 1333 struct amdgpu_irq_src *source, 1334 struct amdgpu_iv_entry *entry) 1335 { 1336 struct smu_context *smu = adev->powerplay.pp_handle; 1337 uint32_t client_id = entry->client_id; 1338 uint32_t src_id = entry->src_id; 1339 /* 1340 * ctxid is used to distinguish different 1341 * events for SMCToHost interrupt. 1342 */ 1343 uint32_t ctxid = entry->src_data[0]; 1344 uint32_t data; 1345 uint32_t high; 1346 1347 if (client_id == SOC15_IH_CLIENTID_THM) { 1348 switch (src_id) { 1349 case THM_11_0__SRCID__THM_DIG_THERM_L2H: 1350 schedule_delayed_work(&smu->swctf_delayed_work, 1351 msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); 1352 break; 1353 case THM_11_0__SRCID__THM_DIG_THERM_H2L: 1354 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); 1355 break; 1356 default: 1357 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", 1358 src_id); 1359 break; 1360 } 1361 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) { 1362 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); 1363 /* 1364 * HW CTF just occurred. Shutdown to prevent further damage. 1365 */ 1366 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); 1367 orderly_poweroff(true); 1368 } else if (client_id == SOC15_IH_CLIENTID_MP1) { 1369 if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) { 1370 /* ACK SMUToHost interrupt */ 1371 data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1372 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); 1373 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); 1374 1375 switch (ctxid) { 1376 case SMU_IH_INTERRUPT_CONTEXT_ID_AC: 1377 dev_dbg(adev->dev, "Switched to AC mode!\n"); 1378 schedule_work(&smu->interrupt_work); 1379 adev->pm.ac_power = true; 1380 break; 1381 case SMU_IH_INTERRUPT_CONTEXT_ID_DC: 1382 dev_dbg(adev->dev, "Switched to DC mode!\n"); 1383 schedule_work(&smu->interrupt_work); 1384 adev->pm.ac_power = false; 1385 break; 1386 case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING: 1387 /* 1388 * Increment the throttle interrupt counter 1389 */ 1390 atomic64_inc(&smu->throttle_int_counter); 1391 1392 if (!atomic_read(&adev->throttling_logging_enabled)) 1393 return 0; 1394 1395 if (__ratelimit(&adev->throttling_logging_rs)) 1396 schedule_work(&smu->throttling_logging_work); 1397 1398 break; 1399 case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL: 1400 high = smu->thermal_range.software_shutdown_temp + 1401 smu->thermal_range.software_shutdown_temp_offset; 1402 high = min_t(typeof(high), 1403 SMU_THERMAL_MAXIMUM_ALERT_TEMP, 1404 high); 1405 dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n", 1406 high, 1407 smu->thermal_range.software_shutdown_temp_offset); 1408 1409 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); 1410 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL, 1411 DIG_THERM_INTH, 1412 (high & 0xff)); 1413 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1414 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); 1415 break; 1416 case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY: 1417 high = min_t(typeof(high), 1418 SMU_THERMAL_MAXIMUM_ALERT_TEMP, 1419 smu->thermal_range.software_shutdown_temp); 1420 dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high); 1421 1422 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); 1423 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL, 1424 DIG_THERM_INTH, 1425 (high & 0xff)); 1426 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1427 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data); 1428 break; 1429 default: 1430 dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n", 1431 ctxid, client_id); 1432 break; 1433 } 1434 } 1435 } 1436 1437 return 0; 1438 } 1439 1440 static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs = { 1441 .set = smu_v13_0_set_irq_state, 1442 .process = smu_v13_0_irq_process, 1443 }; 1444 1445 int smu_v13_0_register_irq_handler(struct smu_context *smu) 1446 { 1447 struct amdgpu_device *adev = smu->adev; 1448 struct amdgpu_irq_src *irq_src = &smu->irq_source; 1449 int ret = 0; 1450 1451 if (amdgpu_sriov_vf(adev)) 1452 return 0; 1453 1454 irq_src->num_types = 1; 1455 irq_src->funcs = &smu_v13_0_irq_funcs; 1456 1457 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1458 THM_11_0__SRCID__THM_DIG_THERM_L2H, 1459 irq_src); 1460 if (ret) 1461 return ret; 1462 1463 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1464 THM_11_0__SRCID__THM_DIG_THERM_H2L, 1465 irq_src); 1466 if (ret) 1467 return ret; 1468 1469 /* Register CTF(GPIO_19) interrupt */ 1470 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO, 1471 SMUIO_11_0__SRCID__SMUIO_GPIO19, 1472 irq_src); 1473 if (ret) 1474 return ret; 1475 1476 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 1477 SMU_IH_INTERRUPT_ID_TO_DRIVER, 1478 irq_src); 1479 if (ret) 1480 return ret; 1481 1482 return ret; 1483 } 1484 1485 int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 1486 struct pp_smu_nv_clock_table *max_clocks) 1487 { 1488 struct smu_table_context *table_context = &smu->smu_table; 1489 struct smu_13_0_max_sustainable_clocks *sustainable_clocks = NULL; 1490 1491 if (!max_clocks || !table_context->max_sustainable_clocks) 1492 return -EINVAL; 1493 1494 sustainable_clocks = table_context->max_sustainable_clocks; 1495 1496 max_clocks->dcfClockInKhz = 1497 (unsigned int) sustainable_clocks->dcef_clock * 1000; 1498 max_clocks->displayClockInKhz = 1499 (unsigned int) sustainable_clocks->display_clock * 1000; 1500 max_clocks->phyClockInKhz = 1501 (unsigned int) sustainable_clocks->phy_clock * 1000; 1502 max_clocks->pixelClockInKhz = 1503 (unsigned int) sustainable_clocks->pixel_clock * 1000; 1504 max_clocks->uClockInKhz = 1505 (unsigned int) sustainable_clocks->uclock * 1000; 1506 max_clocks->socClockInKhz = 1507 (unsigned int) sustainable_clocks->soc_clock * 1000; 1508 max_clocks->dscClockInKhz = 0; 1509 max_clocks->dppClockInKhz = 0; 1510 max_clocks->fabricClockInKhz = 0; 1511 1512 return 0; 1513 } 1514 1515 int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu) 1516 { 1517 int ret = 0; 1518 1519 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); 1520 1521 return ret; 1522 } 1523 1524 static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu, 1525 uint64_t event_arg) 1526 { 1527 int ret = 0; 1528 1529 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n"); 1530 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL); 1531 1532 return ret; 1533 } 1534 1535 int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, 1536 uint64_t event_arg) 1537 { 1538 int ret = -EINVAL; 1539 1540 switch (event) { 1541 case SMU_EVENT_RESET_COMPLETE: 1542 ret = smu_v13_0_wait_for_reset_complete(smu, event_arg); 1543 break; 1544 default: 1545 break; 1546 } 1547 1548 return ret; 1549 } 1550 1551 int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 1552 uint32_t *min, uint32_t *max) 1553 { 1554 int ret = 0, clk_id = 0; 1555 uint32_t param = 0; 1556 uint32_t clock_limit; 1557 1558 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 1559 ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); 1560 if (ret) 1561 return ret; 1562 1563 /* clock in Mhz unit */ 1564 if (min) 1565 *min = clock_limit / 100; 1566 if (max) 1567 *max = clock_limit / 100; 1568 1569 return 0; 1570 } 1571 1572 clk_id = smu_cmn_to_asic_specific_index(smu, 1573 CMN2ASIC_MAPPING_CLK, 1574 clk_type); 1575 if (clk_id < 0) { 1576 ret = -EINVAL; 1577 goto failed; 1578 } 1579 param = (clk_id & 0xffff) << 16; 1580 1581 if (max) { 1582 if (smu->adev->pm.ac_power) 1583 ret = smu_cmn_send_smc_msg_with_param(smu, 1584 SMU_MSG_GetMaxDpmFreq, 1585 param, 1586 max); 1587 else 1588 ret = smu_cmn_send_smc_msg_with_param(smu, 1589 SMU_MSG_GetDcModeMaxDpmFreq, 1590 param, 1591 max); 1592 if (ret) 1593 goto failed; 1594 } 1595 1596 if (min) { 1597 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); 1598 if (ret) 1599 goto failed; 1600 } 1601 1602 failed: 1603 return ret; 1604 } 1605 1606 int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, 1607 enum smu_clk_type clk_type, 1608 uint32_t min, 1609 uint32_t max, 1610 bool automatic) 1611 { 1612 int ret = 0, clk_id = 0; 1613 uint32_t param; 1614 1615 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1616 return 0; 1617 1618 clk_id = smu_cmn_to_asic_specific_index(smu, 1619 CMN2ASIC_MAPPING_CLK, 1620 clk_type); 1621 if (clk_id < 0) 1622 return clk_id; 1623 1624 if (max > 0) { 1625 if (automatic) 1626 param = (uint32_t)((clk_id << 16) | 0xffff); 1627 else 1628 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1629 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 1630 param, NULL); 1631 if (ret) 1632 goto out; 1633 } 1634 1635 if (min > 0) { 1636 if (automatic) 1637 param = (uint32_t)((clk_id << 16) | 0); 1638 else 1639 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1640 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 1641 param, NULL); 1642 if (ret) 1643 goto out; 1644 } 1645 1646 out: 1647 return ret; 1648 } 1649 1650 int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, 1651 enum smu_clk_type clk_type, 1652 uint32_t min, 1653 uint32_t max) 1654 { 1655 int ret = 0, clk_id = 0; 1656 uint32_t param; 1657 1658 if (min <= 0 && max <= 0) 1659 return -EINVAL; 1660 1661 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1662 return 0; 1663 1664 clk_id = smu_cmn_to_asic_specific_index(smu, 1665 CMN2ASIC_MAPPING_CLK, 1666 clk_type); 1667 if (clk_id < 0) 1668 return clk_id; 1669 1670 if (max > 0) { 1671 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1672 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 1673 param, NULL); 1674 if (ret) 1675 return ret; 1676 } 1677 1678 if (min > 0) { 1679 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1680 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 1681 param, NULL); 1682 if (ret) 1683 return ret; 1684 } 1685 1686 return ret; 1687 } 1688 1689 int smu_v13_0_set_performance_level(struct smu_context *smu, 1690 enum amd_dpm_forced_level level) 1691 { 1692 struct smu_13_0_dpm_context *dpm_context = 1693 smu->smu_dpm.dpm_context; 1694 struct smu_13_0_dpm_table *gfx_table = 1695 &dpm_context->dpm_tables.gfx_table; 1696 struct smu_13_0_dpm_table *mem_table = 1697 &dpm_context->dpm_tables.uclk_table; 1698 struct smu_13_0_dpm_table *soc_table = 1699 &dpm_context->dpm_tables.soc_table; 1700 struct smu_13_0_dpm_table *vclk_table = 1701 &dpm_context->dpm_tables.vclk_table; 1702 struct smu_13_0_dpm_table *dclk_table = 1703 &dpm_context->dpm_tables.dclk_table; 1704 struct smu_13_0_dpm_table *fclk_table = 1705 &dpm_context->dpm_tables.fclk_table; 1706 struct smu_umd_pstate_table *pstate_table = 1707 &smu->pstate_table; 1708 struct amdgpu_device *adev = smu->adev; 1709 uint32_t sclk_min = 0, sclk_max = 0; 1710 uint32_t mclk_min = 0, mclk_max = 0; 1711 uint32_t socclk_min = 0, socclk_max = 0; 1712 uint32_t vclk_min = 0, vclk_max = 0; 1713 uint32_t dclk_min = 0, dclk_max = 0; 1714 uint32_t fclk_min = 0, fclk_max = 0; 1715 int ret = 0, i; 1716 bool auto_level = false; 1717 1718 switch (level) { 1719 case AMD_DPM_FORCED_LEVEL_HIGH: 1720 sclk_min = sclk_max = gfx_table->max; 1721 mclk_min = mclk_max = mem_table->max; 1722 socclk_min = socclk_max = soc_table->max; 1723 vclk_min = vclk_max = vclk_table->max; 1724 dclk_min = dclk_max = dclk_table->max; 1725 fclk_min = fclk_max = fclk_table->max; 1726 break; 1727 case AMD_DPM_FORCED_LEVEL_LOW: 1728 sclk_min = sclk_max = gfx_table->min; 1729 mclk_min = mclk_max = mem_table->min; 1730 socclk_min = socclk_max = soc_table->min; 1731 vclk_min = vclk_max = vclk_table->min; 1732 dclk_min = dclk_max = dclk_table->min; 1733 fclk_min = fclk_max = fclk_table->min; 1734 break; 1735 case AMD_DPM_FORCED_LEVEL_AUTO: 1736 sclk_min = gfx_table->min; 1737 sclk_max = gfx_table->max; 1738 mclk_min = mem_table->min; 1739 mclk_max = mem_table->max; 1740 socclk_min = soc_table->min; 1741 socclk_max = soc_table->max; 1742 vclk_min = vclk_table->min; 1743 vclk_max = vclk_table->max; 1744 dclk_min = dclk_table->min; 1745 dclk_max = dclk_table->max; 1746 fclk_min = fclk_table->min; 1747 fclk_max = fclk_table->max; 1748 auto_level = true; 1749 break; 1750 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1751 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; 1752 mclk_min = mclk_max = pstate_table->uclk_pstate.standard; 1753 socclk_min = socclk_max = pstate_table->socclk_pstate.standard; 1754 vclk_min = vclk_max = pstate_table->vclk_pstate.standard; 1755 dclk_min = dclk_max = pstate_table->dclk_pstate.standard; 1756 fclk_min = fclk_max = pstate_table->fclk_pstate.standard; 1757 break; 1758 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1759 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; 1760 break; 1761 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1762 mclk_min = mclk_max = pstate_table->uclk_pstate.min; 1763 break; 1764 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1765 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; 1766 mclk_min = mclk_max = pstate_table->uclk_pstate.peak; 1767 socclk_min = socclk_max = pstate_table->socclk_pstate.peak; 1768 vclk_min = vclk_max = pstate_table->vclk_pstate.peak; 1769 dclk_min = dclk_max = pstate_table->dclk_pstate.peak; 1770 fclk_min = fclk_max = pstate_table->fclk_pstate.peak; 1771 break; 1772 case AMD_DPM_FORCED_LEVEL_MANUAL: 1773 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1774 return 0; 1775 default: 1776 dev_err(adev->dev, "Invalid performance level %d\n", level); 1777 return -EINVAL; 1778 } 1779 1780 /* 1781 * Unset those settings for SMU 13.0.2. As soft limits settings 1782 * for those clock domains are not supported. 1783 */ 1784 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) { 1785 mclk_min = mclk_max = 0; 1786 socclk_min = socclk_max = 0; 1787 vclk_min = vclk_max = 0; 1788 dclk_min = dclk_max = 0; 1789 fclk_min = fclk_max = 0; 1790 auto_level = false; 1791 } 1792 1793 if (sclk_min && sclk_max) { 1794 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1795 SMU_GFXCLK, 1796 sclk_min, 1797 sclk_max, 1798 auto_level); 1799 if (ret) 1800 return ret; 1801 1802 pstate_table->gfxclk_pstate.curr.min = sclk_min; 1803 pstate_table->gfxclk_pstate.curr.max = sclk_max; 1804 } 1805 1806 if (mclk_min && mclk_max) { 1807 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1808 SMU_MCLK, 1809 mclk_min, 1810 mclk_max, 1811 auto_level); 1812 if (ret) 1813 return ret; 1814 1815 pstate_table->uclk_pstate.curr.min = mclk_min; 1816 pstate_table->uclk_pstate.curr.max = mclk_max; 1817 } 1818 1819 if (socclk_min && socclk_max) { 1820 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1821 SMU_SOCCLK, 1822 socclk_min, 1823 socclk_max, 1824 auto_level); 1825 if (ret) 1826 return ret; 1827 1828 pstate_table->socclk_pstate.curr.min = socclk_min; 1829 pstate_table->socclk_pstate.curr.max = socclk_max; 1830 } 1831 1832 if (vclk_min && vclk_max) { 1833 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1834 if (adev->vcn.harvest_config & (1 << i)) 1835 continue; 1836 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1837 i ? SMU_VCLK1 : SMU_VCLK, 1838 vclk_min, 1839 vclk_max, 1840 auto_level); 1841 if (ret) 1842 return ret; 1843 } 1844 pstate_table->vclk_pstate.curr.min = vclk_min; 1845 pstate_table->vclk_pstate.curr.max = vclk_max; 1846 } 1847 1848 if (dclk_min && dclk_max) { 1849 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1850 if (adev->vcn.harvest_config & (1 << i)) 1851 continue; 1852 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1853 i ? SMU_DCLK1 : SMU_DCLK, 1854 dclk_min, 1855 dclk_max, 1856 auto_level); 1857 if (ret) 1858 return ret; 1859 } 1860 pstate_table->dclk_pstate.curr.min = dclk_min; 1861 pstate_table->dclk_pstate.curr.max = dclk_max; 1862 } 1863 1864 if (fclk_min && fclk_max) { 1865 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1866 SMU_FCLK, 1867 fclk_min, 1868 fclk_max, 1869 auto_level); 1870 if (ret) 1871 return ret; 1872 1873 pstate_table->fclk_pstate.curr.min = fclk_min; 1874 pstate_table->fclk_pstate.curr.max = fclk_max; 1875 } 1876 1877 return ret; 1878 } 1879 1880 int smu_v13_0_set_power_source(struct smu_context *smu, 1881 enum smu_power_src_type power_src) 1882 { 1883 int pwr_source; 1884 1885 pwr_source = smu_cmn_to_asic_specific_index(smu, 1886 CMN2ASIC_MAPPING_PWR, 1887 (uint32_t)power_src); 1888 if (pwr_source < 0) 1889 return -EINVAL; 1890 1891 return smu_cmn_send_smc_msg_with_param(smu, 1892 SMU_MSG_NotifyPowerSource, 1893 pwr_source, 1894 NULL); 1895 } 1896 1897 int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu, 1898 enum smu_clk_type clk_type, 1899 uint32_t *value) 1900 { 1901 int ret = 0; 1902 1903 switch (clk_type) { 1904 case SMU_MCLK: 1905 case SMU_UCLK: 1906 *value = smu->smu_table.boot_values.uclk; 1907 break; 1908 case SMU_FCLK: 1909 *value = smu->smu_table.boot_values.fclk; 1910 break; 1911 case SMU_GFXCLK: 1912 case SMU_SCLK: 1913 *value = smu->smu_table.boot_values.gfxclk; 1914 break; 1915 case SMU_SOCCLK: 1916 *value = smu->smu_table.boot_values.socclk; 1917 break; 1918 case SMU_VCLK: 1919 *value = smu->smu_table.boot_values.vclk; 1920 break; 1921 case SMU_DCLK: 1922 *value = smu->smu_table.boot_values.dclk; 1923 break; 1924 default: 1925 ret = -EINVAL; 1926 break; 1927 } 1928 return ret; 1929 } 1930 1931 int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, 1932 enum smu_clk_type clk_type, uint16_t level, 1933 uint32_t *value) 1934 { 1935 int ret = 0, clk_id = 0; 1936 uint32_t param; 1937 1938 if (!value) 1939 return -EINVAL; 1940 1941 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1942 return smu_v13_0_get_boot_freq_by_index(smu, clk_type, value); 1943 1944 clk_id = smu_cmn_to_asic_specific_index(smu, 1945 CMN2ASIC_MAPPING_CLK, 1946 clk_type); 1947 if (clk_id < 0) 1948 return clk_id; 1949 1950 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 1951 1952 ret = smu_cmn_send_smc_msg_with_param(smu, 1953 SMU_MSG_GetDpmFreqByIndex, 1954 param, 1955 value); 1956 if (ret) 1957 return ret; 1958 1959 *value = *value & 0x7fffffff; 1960 1961 return ret; 1962 } 1963 1964 static int smu_v13_0_get_dpm_level_count(struct smu_context *smu, 1965 enum smu_clk_type clk_type, 1966 uint32_t *value) 1967 { 1968 int ret; 1969 1970 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value); 1971 /* SMU v13.0.2 FW returns 0 based max level, increment by one for it */ 1972 if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) && (!ret && value)) 1973 ++(*value); 1974 1975 return ret; 1976 } 1977 1978 static int smu_v13_0_get_fine_grained_status(struct smu_context *smu, 1979 enum smu_clk_type clk_type, 1980 bool *is_fine_grained_dpm) 1981 { 1982 int ret = 0, clk_id = 0; 1983 uint32_t param; 1984 uint32_t value; 1985 1986 if (!is_fine_grained_dpm) 1987 return -EINVAL; 1988 1989 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1990 return 0; 1991 1992 clk_id = smu_cmn_to_asic_specific_index(smu, 1993 CMN2ASIC_MAPPING_CLK, 1994 clk_type); 1995 if (clk_id < 0) 1996 return clk_id; 1997 1998 param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff); 1999 2000 ret = smu_cmn_send_smc_msg_with_param(smu, 2001 SMU_MSG_GetDpmFreqByIndex, 2002 param, 2003 &value); 2004 if (ret) 2005 return ret; 2006 2007 /* 2008 * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM 2009 * now, we un-support it 2010 */ 2011 *is_fine_grained_dpm = value & 0x80000000; 2012 2013 return 0; 2014 } 2015 2016 int smu_v13_0_set_single_dpm_table(struct smu_context *smu, 2017 enum smu_clk_type clk_type, 2018 struct smu_13_0_dpm_table *single_dpm_table) 2019 { 2020 int ret = 0; 2021 uint32_t clk; 2022 int i; 2023 2024 ret = smu_v13_0_get_dpm_level_count(smu, 2025 clk_type, 2026 &single_dpm_table->count); 2027 if (ret) { 2028 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); 2029 return ret; 2030 } 2031 2032 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) { 2033 ret = smu_v13_0_get_fine_grained_status(smu, 2034 clk_type, 2035 &single_dpm_table->is_fine_grained); 2036 if (ret) { 2037 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__); 2038 return ret; 2039 } 2040 } 2041 2042 for (i = 0; i < single_dpm_table->count; i++) { 2043 ret = smu_v13_0_get_dpm_freq_by_index(smu, 2044 clk_type, 2045 i, 2046 &clk); 2047 if (ret) { 2048 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); 2049 return ret; 2050 } 2051 2052 single_dpm_table->dpm_levels[i].value = clk; 2053 single_dpm_table->dpm_levels[i].enabled = true; 2054 2055 if (i == 0) 2056 single_dpm_table->min = clk; 2057 else if (i == single_dpm_table->count - 1) 2058 single_dpm_table->max = clk; 2059 } 2060 2061 return 0; 2062 } 2063 2064 int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu) 2065 { 2066 struct amdgpu_device *adev = smu->adev; 2067 2068 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 2069 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 2070 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 2071 } 2072 2073 int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu) 2074 { 2075 uint32_t width_level; 2076 2077 width_level = smu_v13_0_get_current_pcie_link_width_level(smu); 2078 if (width_level > LINK_WIDTH_MAX) 2079 width_level = 0; 2080 2081 return link_width[width_level]; 2082 } 2083 2084 int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu) 2085 { 2086 struct amdgpu_device *adev = smu->adev; 2087 2088 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 2089 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 2090 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 2091 } 2092 2093 int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu) 2094 { 2095 uint32_t speed_level; 2096 2097 speed_level = smu_v13_0_get_current_pcie_link_speed_level(smu); 2098 if (speed_level > LINK_SPEED_MAX) 2099 speed_level = 0; 2100 2101 return link_speed[speed_level]; 2102 } 2103 2104 int smu_v13_0_set_vcn_enable(struct smu_context *smu, 2105 bool enable, 2106 int inst) 2107 { 2108 struct amdgpu_device *adev = smu->adev; 2109 int ret = 0; 2110 2111 if (adev->vcn.harvest_config & (1 << inst)) 2112 return ret; 2113 2114 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 2115 SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, 2116 inst << 16U, NULL); 2117 2118 return ret; 2119 } 2120 2121 int smu_v13_0_set_jpeg_enable(struct smu_context *smu, 2122 bool enable) 2123 { 2124 return smu_cmn_send_smc_msg_with_param(smu, enable ? 2125 SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg, 2126 0, NULL); 2127 } 2128 2129 int smu_v13_0_run_btc(struct smu_context *smu) 2130 { 2131 int res; 2132 2133 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); 2134 if (res) 2135 dev_err(smu->adev->dev, "RunDcBtc failed!\n"); 2136 2137 return res; 2138 } 2139 2140 int smu_v13_0_gpo_control(struct smu_context *smu, 2141 bool enablement) 2142 { 2143 int res; 2144 2145 res = smu_cmn_send_smc_msg_with_param(smu, 2146 SMU_MSG_AllowGpo, 2147 enablement ? 1 : 0, 2148 NULL); 2149 if (res) 2150 dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement); 2151 2152 return res; 2153 } 2154 2155 int smu_v13_0_deep_sleep_control(struct smu_context *smu, 2156 bool enablement) 2157 { 2158 struct amdgpu_device *adev = smu->adev; 2159 int ret = 0; 2160 2161 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) { 2162 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); 2163 if (ret) { 2164 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); 2165 return ret; 2166 } 2167 } 2168 2169 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) { 2170 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement); 2171 if (ret) { 2172 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable"); 2173 return ret; 2174 } 2175 } 2176 2177 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) { 2178 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement); 2179 if (ret) { 2180 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable"); 2181 return ret; 2182 } 2183 } 2184 2185 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) { 2186 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement); 2187 if (ret) { 2188 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable"); 2189 return ret; 2190 } 2191 } 2192 2193 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) { 2194 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement); 2195 if (ret) { 2196 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable"); 2197 return ret; 2198 } 2199 } 2200 2201 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) { 2202 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement); 2203 if (ret) { 2204 dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable"); 2205 return ret; 2206 } 2207 } 2208 2209 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) { 2210 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement); 2211 if (ret) { 2212 dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable"); 2213 return ret; 2214 } 2215 } 2216 2217 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) { 2218 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement); 2219 if (ret) { 2220 dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable"); 2221 return ret; 2222 } 2223 } 2224 2225 return ret; 2226 } 2227 2228 int smu_v13_0_gfx_ulv_control(struct smu_context *smu, 2229 bool enablement) 2230 { 2231 int ret = 0; 2232 2233 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT)) 2234 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement); 2235 2236 return ret; 2237 } 2238 2239 static int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, 2240 enum smu_baco_seq baco_seq) 2241 { 2242 struct smu_baco_context *smu_baco = &smu->smu_baco; 2243 int ret; 2244 2245 ret = smu_cmn_send_smc_msg_with_param(smu, 2246 SMU_MSG_ArmD3, 2247 baco_seq, 2248 NULL); 2249 if (ret) 2250 return ret; 2251 2252 if (baco_seq == BACO_SEQ_BAMACO || 2253 baco_seq == BACO_SEQ_BACO) 2254 smu_baco->state = SMU_BACO_STATE_ENTER; 2255 else 2256 smu_baco->state = SMU_BACO_STATE_EXIT; 2257 2258 return 0; 2259 } 2260 2261 static enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu) 2262 { 2263 struct smu_baco_context *smu_baco = &smu->smu_baco; 2264 2265 return smu_baco->state; 2266 } 2267 2268 static int smu_v13_0_baco_set_state(struct smu_context *smu, 2269 enum smu_baco_state state) 2270 { 2271 struct smu_baco_context *smu_baco = &smu->smu_baco; 2272 struct amdgpu_device *adev = smu->adev; 2273 int ret = 0; 2274 2275 if (smu_v13_0_baco_get_state(smu) == state) 2276 return 0; 2277 2278 if (state == SMU_BACO_STATE_ENTER) { 2279 ret = smu_cmn_send_smc_msg_with_param(smu, 2280 SMU_MSG_EnterBaco, 2281 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? 2282 BACO_SEQ_BAMACO : BACO_SEQ_BACO, 2283 NULL); 2284 } else { 2285 ret = smu_cmn_send_smc_msg(smu, 2286 SMU_MSG_ExitBaco, 2287 NULL); 2288 if (ret) 2289 return ret; 2290 2291 /* clear vbios scratch 6 and 7 for coming asic reinit */ 2292 WREG32(adev->bios_scratch_reg_offset + 6, 0); 2293 WREG32(adev->bios_scratch_reg_offset + 7, 0); 2294 } 2295 2296 if (!ret) 2297 smu_baco->state = state; 2298 2299 return ret; 2300 } 2301 2302 int smu_v13_0_get_bamaco_support(struct smu_context *smu) 2303 { 2304 struct smu_baco_context *smu_baco = &smu->smu_baco; 2305 int bamaco_support = 0; 2306 2307 if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) 2308 return 0; 2309 2310 if (smu_baco->maco_support) 2311 bamaco_support |= MACO_SUPPORT; 2312 2313 /* return true if ASIC is in BACO state already */ 2314 if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) 2315 return bamaco_support |= BACO_SUPPORT; 2316 2317 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 2318 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) 2319 return 0; 2320 2321 return (bamaco_support |= BACO_SUPPORT); 2322 } 2323 2324 int smu_v13_0_baco_enter(struct smu_context *smu) 2325 { 2326 struct amdgpu_device *adev = smu->adev; 2327 int ret; 2328 2329 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 2330 return smu_v13_0_baco_set_armd3_sequence(smu, 2331 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? 2332 BACO_SEQ_BAMACO : BACO_SEQ_BACO); 2333 } else { 2334 ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); 2335 if (!ret) 2336 usleep_range(10000, 11000); 2337 2338 return ret; 2339 } 2340 } 2341 2342 int smu_v13_0_baco_exit(struct smu_context *smu) 2343 { 2344 struct amdgpu_device *adev = smu->adev; 2345 int ret; 2346 2347 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 2348 /* Wait for PMFW handling for the Dstate change */ 2349 usleep_range(10000, 11000); 2350 ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 2351 } else { 2352 ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); 2353 } 2354 2355 if (!ret) 2356 adev->gfx.is_poweron = false; 2357 2358 return ret; 2359 } 2360 2361 int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu) 2362 { 2363 uint16_t index; 2364 struct amdgpu_device *adev = smu->adev; 2365 2366 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2367 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu, 2368 ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL); 2369 } 2370 2371 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 2372 SMU_MSG_EnableGfxImu); 2373 return smu_cmn_send_msg_without_waiting(smu, index, 2374 ENABLE_IMU_ARG_GFXOFF_ENABLE); 2375 } 2376 2377 int smu_v13_0_od_edit_dpm_table(struct smu_context *smu, 2378 enum PP_OD_DPM_TABLE_COMMAND type, 2379 long input[], uint32_t size) 2380 { 2381 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 2382 int ret = 0; 2383 2384 /* Only allowed in manual mode */ 2385 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 2386 return -EINVAL; 2387 2388 switch (type) { 2389 case PP_OD_EDIT_SCLK_VDDC_TABLE: 2390 if (size != 2) { 2391 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2392 return -EINVAL; 2393 } 2394 2395 if (input[0] == 0) { 2396 if (input[1] < smu->gfx_default_hard_min_freq) { 2397 dev_warn(smu->adev->dev, 2398 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 2399 input[1], smu->gfx_default_hard_min_freq); 2400 return -EINVAL; 2401 } 2402 smu->gfx_actual_hard_min_freq = input[1]; 2403 } else if (input[0] == 1) { 2404 if (input[1] > smu->gfx_default_soft_max_freq) { 2405 dev_warn(smu->adev->dev, 2406 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 2407 input[1], smu->gfx_default_soft_max_freq); 2408 return -EINVAL; 2409 } 2410 smu->gfx_actual_soft_max_freq = input[1]; 2411 } else { 2412 return -EINVAL; 2413 } 2414 break; 2415 case PP_OD_RESTORE_DEFAULT_TABLE: 2416 if (size != 0) { 2417 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2418 return -EINVAL; 2419 } 2420 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 2421 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 2422 break; 2423 case PP_OD_COMMIT_DPM_TABLE: 2424 if (size != 0) { 2425 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2426 return -EINVAL; 2427 } 2428 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 2429 dev_err(smu->adev->dev, 2430 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 2431 smu->gfx_actual_hard_min_freq, 2432 smu->gfx_actual_soft_max_freq); 2433 return -EINVAL; 2434 } 2435 2436 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 2437 smu->gfx_actual_hard_min_freq, 2438 NULL); 2439 if (ret) { 2440 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 2441 return ret; 2442 } 2443 2444 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 2445 smu->gfx_actual_soft_max_freq, 2446 NULL); 2447 if (ret) { 2448 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 2449 return ret; 2450 } 2451 break; 2452 default: 2453 return -ENOSYS; 2454 } 2455 2456 return ret; 2457 } 2458 2459 int smu_v13_0_set_default_dpm_tables(struct smu_context *smu) 2460 { 2461 struct smu_table_context *smu_table = &smu->smu_table; 2462 2463 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, 2464 smu_table->clocks_table, false); 2465 } 2466 2467 void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu) 2468 { 2469 struct amdgpu_device *adev = smu->adev; 2470 2471 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82); 2472 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66); 2473 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 2474 } 2475 2476 int smu_v13_0_mode1_reset(struct smu_context *smu) 2477 { 2478 int ret = 0; 2479 2480 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); 2481 if (!ret) 2482 msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); 2483 2484 return ret; 2485 } 2486 2487 int smu_v13_0_update_pcie_parameters(struct smu_context *smu, 2488 uint8_t pcie_gen_cap, 2489 uint8_t pcie_width_cap) 2490 { 2491 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 2492 struct smu_13_0_pcie_table *pcie_table = 2493 &dpm_context->dpm_tables.pcie_table; 2494 int num_of_levels = pcie_table->num_of_link_levels; 2495 uint32_t smu_pcie_arg; 2496 int ret, i; 2497 2498 if (!num_of_levels) 2499 return 0; 2500 2501 if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { 2502 if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) 2503 pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; 2504 2505 if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) 2506 pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; 2507 2508 /* Force all levels to use the same settings */ 2509 for (i = 0; i < num_of_levels; i++) { 2510 pcie_table->pcie_gen[i] = pcie_gen_cap; 2511 pcie_table->pcie_lane[i] = pcie_width_cap; 2512 } 2513 } else { 2514 for (i = 0; i < num_of_levels; i++) { 2515 if (pcie_table->pcie_gen[i] > pcie_gen_cap) 2516 pcie_table->pcie_gen[i] = pcie_gen_cap; 2517 if (pcie_table->pcie_lane[i] > pcie_width_cap) 2518 pcie_table->pcie_lane[i] = pcie_width_cap; 2519 } 2520 } 2521 2522 for (i = 0; i < num_of_levels; i++) { 2523 smu_pcie_arg = i << 16; 2524 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; 2525 smu_pcie_arg |= pcie_table->pcie_lane[i]; 2526 2527 ret = smu_cmn_send_smc_msg_with_param(smu, 2528 SMU_MSG_OverridePcieParameters, 2529 smu_pcie_arg, 2530 NULL); 2531 if (ret) 2532 return ret; 2533 } 2534 2535 return 0; 2536 } 2537 2538 int smu_v13_0_disable_pmfw_state(struct smu_context *smu) 2539 { 2540 int ret; 2541 struct amdgpu_device *adev = smu->adev; 2542 2543 WREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff), 0); 2544 2545 ret = RREG32_PCIE(MP1_Public | 2546 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 2547 2548 return ret == 0 ? 0 : -EINVAL; 2549 } 2550 2551 int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable) 2552 { 2553 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableUCLKShadow, enable, NULL); 2554 } 2555 2556 int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, 2557 struct freq_band_range *exclusion_ranges) 2558 { 2559 WifiBandEntryTable_t wifi_bands; 2560 int valid_entries = 0; 2561 int ret, i; 2562 2563 memset(&wifi_bands, 0, sizeof(wifi_bands)); 2564 for (i = 0; i < ARRAY_SIZE(wifi_bands.WifiBandEntry); i++) { 2565 if (!exclusion_ranges[i].start && !exclusion_ranges[i].end) 2566 break; 2567 2568 /* PMFW expects the inputs to be in Mhz unit */ 2569 wifi_bands.WifiBandEntry[valid_entries].LowFreq = 2570 DIV_ROUND_DOWN_ULL(exclusion_ranges[i].start, HZ_PER_MHZ); 2571 wifi_bands.WifiBandEntry[valid_entries++].HighFreq = 2572 DIV_ROUND_UP_ULL(exclusion_ranges[i].end, HZ_PER_MHZ); 2573 } 2574 wifi_bands.WifiBandEntryNum = valid_entries; 2575 2576 /* 2577 * Per confirm with PMFW team, WifiBandEntryNum = 0 2578 * is a valid setting. 2579 * 2580 * Considering the scenarios below: 2581 * - At first the wifi device adds an exclusion range e.g. (2400,2500) to 2582 * BIOS and our driver gets notified. We will set WifiBandEntryNum = 1 2583 * and pass the WifiBandEntry (2400, 2500) to PMFW. 2584 * 2585 * - Later the wifi device removes the wifiband list added above and 2586 * our driver gets notified again. At this time, driver will set 2587 * WifiBandEntryNum = 0 and pass an empty WifiBandEntry list to PMFW. 2588 * 2589 * - PMFW may still need to do some uclk shadow update(e.g. switching 2590 * from shadow clock back to primary clock) on receiving this. 2591 */ 2592 ret = smu_cmn_update_table(smu, SMU_TABLE_WIFIBAND, 0, &wifi_bands, true); 2593 if (ret) 2594 dev_warn(smu->adev->dev, "Failed to set wifiband!"); 2595 2596 return ret; 2597 } 2598