1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #define SWSMU_CODE_LAYER_L4 24 25 #include "amdgpu.h" 26 #include "amdgpu_smu.h" 27 #include "smu_cmn.h" 28 #include "soc15_common.h" 29 30 /* 31 * DO NOT use these for err/warn/info/debug messages. 32 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 33 * They are more MGPU friendly. 34 */ 35 #undef pr_err 36 #undef pr_warn 37 #undef pr_info 38 #undef pr_debug 39 40 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL 41 42 const int link_speed[] = {25, 50, 80, 160, 320, 640}; 43 44 #undef __SMU_DUMMY_MAP 45 #define __SMU_DUMMY_MAP(type) #type 46 static const char * const __smu_message_names[] = { 47 SMU_MESSAGE_TYPES 48 }; 49 50 #define smu_cmn_call_asic_func(intf, smu, args...) \ 51 ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \ 52 (smu)->ppt_funcs->intf(smu, ##args) : \ 53 -ENOTSUPP) : \ 54 -EINVAL) 55 56 static const char *smu_get_message_name(struct smu_context *smu, 57 enum smu_message_type type) 58 { 59 if (type < 0 || type >= SMU_MSG_MAX_COUNT) 60 return "unknown smu message"; 61 62 return __smu_message_names[type]; 63 } 64 65 static void smu_cmn_read_arg(struct smu_context *smu, 66 uint32_t *arg) 67 { 68 struct amdgpu_device *adev = smu->adev; 69 70 *arg = RREG32(smu->param_reg); 71 } 72 73 /* Redefine the SMU error codes here. 74 * 75 * Note that these definitions are redundant and should be removed 76 * when the SMU has exported a unified header file containing these 77 * macros, which header file we can just include and use the SMU's 78 * macros. At the moment, these error codes are defined by the SMU 79 * per-ASIC unfortunately, yet we're a one driver for all ASICs. 80 */ 81 #define SMU_RESP_NONE 0 82 #define SMU_RESP_OK 1 83 #define SMU_RESP_CMD_FAIL 0xFF 84 #define SMU_RESP_CMD_UNKNOWN 0xFE 85 #define SMU_RESP_CMD_BAD_PREREQ 0xFD 86 #define SMU_RESP_BUSY_OTHER 0xFC 87 #define SMU_RESP_DEBUG_END 0xFB 88 89 /** 90 * __smu_cmn_poll_stat -- poll for a status from the SMU 91 * @smu: a pointer to SMU context 92 * 93 * Returns the status of the SMU, which could be, 94 * 0, the SMU is busy with your command; 95 * 1, execution status: success, execution result: success; 96 * 0xFF, execution status: success, execution result: failure; 97 * 0xFE, unknown command; 98 * 0xFD, valid command, but bad (command) prerequisites; 99 * 0xFC, the command was rejected as the SMU is busy; 100 * 0xFB, "SMC_Result_DebugDataDumpEnd". 101 * 102 * The values here are not defined by macros, because I'd rather we 103 * include a single header file which defines them, which is 104 * maintained by the SMU FW team, so that we're impervious to firmware 105 * changes. At the moment those values are defined in various header 106 * files, one for each ASIC, yet here we're a single ASIC-agnostic 107 * interface. Such a change can be followed-up by a subsequent patch. 108 */ 109 static u32 __smu_cmn_poll_stat(struct smu_context *smu) 110 { 111 struct amdgpu_device *adev = smu->adev; 112 int timeout = adev->usec_timeout * 20; 113 u32 reg; 114 115 for ( ; timeout > 0; timeout--) { 116 reg = RREG32(smu->resp_reg); 117 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0) 118 break; 119 120 udelay(1); 121 } 122 123 return reg; 124 } 125 126 static void __smu_cmn_reg_print_error(struct smu_context *smu, 127 u32 reg_c2pmsg_90, 128 int msg_index, 129 u32 param, 130 enum smu_message_type msg) 131 { 132 struct amdgpu_device *adev = smu->adev; 133 const char *message = smu_get_message_name(smu, msg); 134 u32 msg_idx, prm; 135 136 switch (reg_c2pmsg_90) { 137 case SMU_RESP_NONE: { 138 msg_idx = RREG32(smu->msg_reg); 139 prm = RREG32(smu->param_reg); 140 dev_err_ratelimited(adev->dev, 141 "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", 142 msg_idx, prm); 143 } 144 break; 145 case SMU_RESP_OK: 146 /* The SMU executed the command. It completed with a 147 * successful result. 148 */ 149 break; 150 case SMU_RESP_CMD_FAIL: 151 /* The SMU executed the command. It completed with an 152 * unsuccessful result. 153 */ 154 break; 155 case SMU_RESP_CMD_UNKNOWN: 156 dev_err_ratelimited(adev->dev, 157 "SMU: unknown command: index:%d param:0x%08X message:%s", 158 msg_index, param, message); 159 break; 160 case SMU_RESP_CMD_BAD_PREREQ: 161 dev_err_ratelimited(adev->dev, 162 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s", 163 msg_index, param, message); 164 break; 165 case SMU_RESP_BUSY_OTHER: 166 dev_err_ratelimited(adev->dev, 167 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", 168 msg_index, param, message); 169 break; 170 case SMU_RESP_DEBUG_END: 171 dev_err_ratelimited(adev->dev, 172 "SMU: I'm debugging!"); 173 break; 174 default: 175 dev_err_ratelimited(adev->dev, 176 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", 177 reg_c2pmsg_90, msg_index, param, message); 178 break; 179 } 180 } 181 182 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90) 183 { 184 int res; 185 186 switch (reg_c2pmsg_90) { 187 case SMU_RESP_NONE: 188 /* The SMU is busy--still executing your command. 189 */ 190 res = -ETIME; 191 break; 192 case SMU_RESP_OK: 193 res = 0; 194 break; 195 case SMU_RESP_CMD_FAIL: 196 /* Command completed successfully, but the command 197 * status was failure. 198 */ 199 res = -EIO; 200 break; 201 case SMU_RESP_CMD_UNKNOWN: 202 /* Unknown command--ignored by the SMU. 203 */ 204 res = -EOPNOTSUPP; 205 break; 206 case SMU_RESP_CMD_BAD_PREREQ: 207 /* Valid command--bad prerequisites. 208 */ 209 res = -EINVAL; 210 break; 211 case SMU_RESP_BUSY_OTHER: 212 /* The SMU is busy with other commands. The client 213 * should retry in 10 us. 214 */ 215 res = -EBUSY; 216 break; 217 default: 218 /* Unknown or debug response from the SMU. 219 */ 220 res = -EREMOTEIO; 221 break; 222 } 223 224 return res; 225 } 226 227 static void __smu_cmn_send_msg(struct smu_context *smu, 228 u16 msg, 229 u32 param) 230 { 231 struct amdgpu_device *adev = smu->adev; 232 233 WREG32(smu->resp_reg, 0); 234 WREG32(smu->param_reg, param); 235 WREG32(smu->msg_reg, msg); 236 } 237 238 static int __smu_cmn_send_debug_msg(struct smu_context *smu, 239 u32 msg, 240 u32 param) 241 { 242 struct amdgpu_device *adev = smu->adev; 243 244 WREG32(smu->debug_param_reg, param); 245 WREG32(smu->debug_msg_reg, msg); 246 WREG32(smu->debug_resp_reg, 0); 247 248 return 0; 249 } 250 /** 251 * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status 252 * @smu: pointer to an SMU context 253 * @msg_index: message index 254 * @param: message parameter to send to the SMU 255 * 256 * Send a message to the SMU with the parameter passed. Do not wait 257 * for status/result of the message, thus the "without_waiting". 258 * 259 * Return 0 on success, -errno on error if we weren't able to _send_ 260 * the message for some reason. See __smu_cmn_reg2errno() for details 261 * of the -errno. 262 */ 263 int smu_cmn_send_msg_without_waiting(struct smu_context *smu, 264 uint16_t msg_index, 265 uint32_t param) 266 { 267 struct amdgpu_device *adev = smu->adev; 268 u32 reg; 269 int res; 270 271 if (adev->no_hw_access) 272 return 0; 273 274 reg = __smu_cmn_poll_stat(smu); 275 res = __smu_cmn_reg2errno(smu, reg); 276 if (reg == SMU_RESP_NONE || 277 res == -EREMOTEIO) 278 goto Out; 279 __smu_cmn_send_msg(smu, msg_index, param); 280 res = 0; 281 Out: 282 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && 283 res && (res != -ETIME)) { 284 amdgpu_device_halt(adev); 285 WARN_ON(1); 286 } 287 288 return res; 289 } 290 291 /** 292 * smu_cmn_wait_for_response -- wait for response from the SMU 293 * @smu: pointer to an SMU context 294 * 295 * Wait for status from the SMU. 296 * 297 * Return 0 on success, -errno on error, indicating the execution 298 * status and result of the message being waited for. See 299 * __smu_cmn_reg2errno() for details of the -errno. 300 */ 301 int smu_cmn_wait_for_response(struct smu_context *smu) 302 { 303 u32 reg; 304 int res; 305 306 reg = __smu_cmn_poll_stat(smu); 307 res = __smu_cmn_reg2errno(smu, reg); 308 309 if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && 310 res && (res != -ETIME)) { 311 amdgpu_device_halt(smu->adev); 312 WARN_ON(1); 313 } 314 315 return res; 316 } 317 318 /** 319 * smu_cmn_send_smc_msg_with_param -- send a message with parameter 320 * @smu: pointer to an SMU context 321 * @msg: message to send 322 * @param: parameter to send to the SMU 323 * @read_arg: pointer to u32 to return a value from the SMU back 324 * to the caller 325 * 326 * Send the message @msg with parameter @param to the SMU, wait for 327 * completion of the command, and return back a value from the SMU in 328 * @read_arg pointer. 329 * 330 * Return 0 on success, -errno when a problem is encountered sending 331 * message or receiving reply. If there is a PCI bus recovery or 332 * the destination is a virtual GPU which does not allow this message 333 * type, the message is simply dropped and success is also returned. 334 * See __smu_cmn_reg2errno() for details of the -errno. 335 * 336 * If we weren't able to send the message to the SMU, we also print 337 * the error to the standard log. 338 * 339 * Command completion status is printed only if the -errno is 340 * -EREMOTEIO, indicating that the SMU returned back an 341 * undefined/unknown/unspecified result. All other cases are 342 * well-defined, not printed, but instead given back to the client to 343 * decide what further to do. 344 * 345 * The return value, @read_arg is read back regardless, to give back 346 * more information to the client, which on error would most likely be 347 * @param, but we can't assume that. This also eliminates more 348 * conditionals. 349 */ 350 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, 351 enum smu_message_type msg, 352 uint32_t param, 353 uint32_t *read_arg) 354 { 355 struct amdgpu_device *adev = smu->adev; 356 int res, index; 357 u32 reg; 358 359 if (adev->no_hw_access) 360 return 0; 361 362 index = smu_cmn_to_asic_specific_index(smu, 363 CMN2ASIC_MAPPING_MSG, 364 msg); 365 if (index < 0) 366 return index == -EACCES ? 0 : index; 367 368 mutex_lock(&smu->message_lock); 369 reg = __smu_cmn_poll_stat(smu); 370 res = __smu_cmn_reg2errno(smu, reg); 371 if (reg == SMU_RESP_NONE || 372 res == -EREMOTEIO) { 373 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 374 goto Out; 375 } 376 __smu_cmn_send_msg(smu, (uint16_t) index, param); 377 reg = __smu_cmn_poll_stat(smu); 378 res = __smu_cmn_reg2errno(smu, reg); 379 if (res != 0) 380 __smu_cmn_reg_print_error(smu, reg, index, param, msg); 381 if (read_arg) { 382 smu_cmn_read_arg(smu, read_arg); 383 dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\ 384 readval: 0x%08x\n", 385 smu_get_message_name(smu, msg), index, param, reg, *read_arg); 386 } else { 387 dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n", 388 smu_get_message_name(smu, msg), index, param, reg); 389 } 390 Out: 391 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) { 392 amdgpu_device_halt(adev); 393 WARN_ON(1); 394 } 395 396 mutex_unlock(&smu->message_lock); 397 return res; 398 } 399 400 int smu_cmn_send_smc_msg(struct smu_context *smu, 401 enum smu_message_type msg, 402 uint32_t *read_arg) 403 { 404 return smu_cmn_send_smc_msg_with_param(smu, 405 msg, 406 0, 407 read_arg); 408 } 409 410 int smu_cmn_send_debug_smc_msg(struct smu_context *smu, 411 uint32_t msg) 412 { 413 return __smu_cmn_send_debug_msg(smu, msg, 0); 414 } 415 416 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu, 417 uint32_t msg, uint32_t param) 418 { 419 return __smu_cmn_send_debug_msg(smu, msg, param); 420 } 421 422 int smu_cmn_to_asic_specific_index(struct smu_context *smu, 423 enum smu_cmn2asic_mapping_type type, 424 uint32_t index) 425 { 426 struct cmn2asic_msg_mapping msg_mapping; 427 struct cmn2asic_mapping mapping; 428 429 switch (type) { 430 case CMN2ASIC_MAPPING_MSG: 431 if (index >= SMU_MSG_MAX_COUNT || 432 !smu->message_map) 433 return -EINVAL; 434 435 msg_mapping = smu->message_map[index]; 436 if (!msg_mapping.valid_mapping) 437 return -EINVAL; 438 439 if (amdgpu_sriov_vf(smu->adev) && 440 !msg_mapping.valid_in_vf) 441 return -EACCES; 442 443 return msg_mapping.map_to; 444 445 case CMN2ASIC_MAPPING_CLK: 446 if (index >= SMU_CLK_COUNT || 447 !smu->clock_map) 448 return -EINVAL; 449 450 mapping = smu->clock_map[index]; 451 if (!mapping.valid_mapping) 452 return -EINVAL; 453 454 return mapping.map_to; 455 456 case CMN2ASIC_MAPPING_FEATURE: 457 if (index >= SMU_FEATURE_COUNT || 458 !smu->feature_map) 459 return -EINVAL; 460 461 mapping = smu->feature_map[index]; 462 if (!mapping.valid_mapping) 463 return -EINVAL; 464 465 return mapping.map_to; 466 467 case CMN2ASIC_MAPPING_TABLE: 468 if (index >= SMU_TABLE_COUNT || 469 !smu->table_map) 470 return -EINVAL; 471 472 mapping = smu->table_map[index]; 473 if (!mapping.valid_mapping) 474 return -EINVAL; 475 476 return mapping.map_to; 477 478 case CMN2ASIC_MAPPING_PWR: 479 if (index >= SMU_POWER_SOURCE_COUNT || 480 !smu->pwr_src_map) 481 return -EINVAL; 482 483 mapping = smu->pwr_src_map[index]; 484 if (!mapping.valid_mapping) 485 return -EINVAL; 486 487 return mapping.map_to; 488 489 case CMN2ASIC_MAPPING_WORKLOAD: 490 if (index >= PP_SMC_POWER_PROFILE_COUNT || 491 !smu->workload_map) 492 return -EINVAL; 493 494 mapping = smu->workload_map[index]; 495 if (!mapping.valid_mapping) 496 return -ENOTSUPP; 497 498 return mapping.map_to; 499 500 default: 501 return -EINVAL; 502 } 503 } 504 505 int smu_cmn_feature_is_supported(struct smu_context *smu, 506 enum smu_feature_mask mask) 507 { 508 struct smu_feature *feature = &smu->smu_feature; 509 int feature_id; 510 511 feature_id = smu_cmn_to_asic_specific_index(smu, 512 CMN2ASIC_MAPPING_FEATURE, 513 mask); 514 if (feature_id < 0) 515 return 0; 516 517 WARN_ON(feature_id > feature->feature_num); 518 519 return test_bit(feature_id, feature->supported); 520 } 521 522 static int __smu_get_enabled_features(struct smu_context *smu, 523 uint64_t *enabled_features) 524 { 525 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features); 526 } 527 528 int smu_cmn_feature_is_enabled(struct smu_context *smu, 529 enum smu_feature_mask mask) 530 { 531 struct amdgpu_device *adev = smu->adev; 532 uint64_t enabled_features; 533 int feature_id; 534 535 if (__smu_get_enabled_features(smu, &enabled_features)) { 536 dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n"); 537 return 0; 538 } 539 540 /* 541 * For Renoir and Cyan Skillfish, they are assumed to have all features 542 * enabled. Also considering they have no feature_map available, the 543 * check here can avoid unwanted feature_map check below. 544 */ 545 if (enabled_features == ULLONG_MAX) 546 return 1; 547 548 feature_id = smu_cmn_to_asic_specific_index(smu, 549 CMN2ASIC_MAPPING_FEATURE, 550 mask); 551 if (feature_id < 0) 552 return 0; 553 554 return test_bit(feature_id, (unsigned long *)&enabled_features); 555 } 556 557 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, 558 enum smu_clk_type clk_type) 559 { 560 enum smu_feature_mask feature_id = 0; 561 562 switch (clk_type) { 563 case SMU_MCLK: 564 case SMU_UCLK: 565 feature_id = SMU_FEATURE_DPM_UCLK_BIT; 566 break; 567 case SMU_GFXCLK: 568 case SMU_SCLK: 569 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 570 break; 571 case SMU_SOCCLK: 572 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 573 break; 574 case SMU_VCLK: 575 case SMU_VCLK1: 576 feature_id = SMU_FEATURE_DPM_VCLK_BIT; 577 break; 578 case SMU_DCLK: 579 case SMU_DCLK1: 580 feature_id = SMU_FEATURE_DPM_DCLK_BIT; 581 break; 582 case SMU_FCLK: 583 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 584 break; 585 default: 586 return true; 587 } 588 589 if (!smu_cmn_feature_is_enabled(smu, feature_id)) 590 return false; 591 592 return true; 593 } 594 595 int smu_cmn_get_enabled_mask(struct smu_context *smu, 596 uint64_t *feature_mask) 597 { 598 uint32_t *feature_mask_high; 599 uint32_t *feature_mask_low; 600 int ret = 0, index = 0; 601 602 if (!feature_mask) 603 return -EINVAL; 604 605 feature_mask_low = &((uint32_t *)feature_mask)[0]; 606 feature_mask_high = &((uint32_t *)feature_mask)[1]; 607 608 index = smu_cmn_to_asic_specific_index(smu, 609 CMN2ASIC_MAPPING_MSG, 610 SMU_MSG_GetEnabledSmuFeatures); 611 if (index > 0) { 612 ret = smu_cmn_send_smc_msg_with_param(smu, 613 SMU_MSG_GetEnabledSmuFeatures, 614 0, 615 feature_mask_low); 616 if (ret) 617 return ret; 618 619 ret = smu_cmn_send_smc_msg_with_param(smu, 620 SMU_MSG_GetEnabledSmuFeatures, 621 1, 622 feature_mask_high); 623 } else { 624 ret = smu_cmn_send_smc_msg(smu, 625 SMU_MSG_GetEnabledSmuFeaturesHigh, 626 feature_mask_high); 627 if (ret) 628 return ret; 629 630 ret = smu_cmn_send_smc_msg(smu, 631 SMU_MSG_GetEnabledSmuFeaturesLow, 632 feature_mask_low); 633 } 634 635 return ret; 636 } 637 638 uint64_t smu_cmn_get_indep_throttler_status( 639 const unsigned long dep_status, 640 const uint8_t *throttler_map) 641 { 642 uint64_t indep_status = 0; 643 uint8_t dep_bit = 0; 644 645 for_each_set_bit(dep_bit, &dep_status, 32) 646 indep_status |= 1ULL << throttler_map[dep_bit]; 647 648 return indep_status; 649 } 650 651 int smu_cmn_feature_update_enable_state(struct smu_context *smu, 652 uint64_t feature_mask, 653 bool enabled) 654 { 655 int ret = 0; 656 657 if (enabled) { 658 ret = smu_cmn_send_smc_msg_with_param(smu, 659 SMU_MSG_EnableSmuFeaturesLow, 660 lower_32_bits(feature_mask), 661 NULL); 662 if (ret) 663 return ret; 664 ret = smu_cmn_send_smc_msg_with_param(smu, 665 SMU_MSG_EnableSmuFeaturesHigh, 666 upper_32_bits(feature_mask), 667 NULL); 668 } else { 669 ret = smu_cmn_send_smc_msg_with_param(smu, 670 SMU_MSG_DisableSmuFeaturesLow, 671 lower_32_bits(feature_mask), 672 NULL); 673 if (ret) 674 return ret; 675 ret = smu_cmn_send_smc_msg_with_param(smu, 676 SMU_MSG_DisableSmuFeaturesHigh, 677 upper_32_bits(feature_mask), 678 NULL); 679 } 680 681 return ret; 682 } 683 684 int smu_cmn_feature_set_enabled(struct smu_context *smu, 685 enum smu_feature_mask mask, 686 bool enable) 687 { 688 int feature_id; 689 690 feature_id = smu_cmn_to_asic_specific_index(smu, 691 CMN2ASIC_MAPPING_FEATURE, 692 mask); 693 if (feature_id < 0) 694 return -EINVAL; 695 696 return smu_cmn_feature_update_enable_state(smu, 697 1ULL << feature_id, 698 enable); 699 } 700 701 #undef __SMU_DUMMY_MAP 702 #define __SMU_DUMMY_MAP(fea) #fea 703 static const char *__smu_feature_names[] = { 704 SMU_FEATURE_MASKS 705 }; 706 707 static const char *smu_get_feature_name(struct smu_context *smu, 708 enum smu_feature_mask feature) 709 { 710 if (feature < 0 || feature >= SMU_FEATURE_COUNT) 711 return "unknown smu feature"; 712 return __smu_feature_names[feature]; 713 } 714 715 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, 716 char *buf) 717 { 718 int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)]; 719 uint64_t feature_mask; 720 int i, feature_index; 721 uint32_t count = 0; 722 size_t size = 0; 723 724 if (__smu_get_enabled_features(smu, &feature_mask)) 725 return 0; 726 727 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n", 728 upper_32_bits(feature_mask), lower_32_bits(feature_mask)); 729 730 memset(sort_feature, -1, sizeof(sort_feature)); 731 732 for (i = 0; i < SMU_FEATURE_COUNT; i++) { 733 feature_index = smu_cmn_to_asic_specific_index(smu, 734 CMN2ASIC_MAPPING_FEATURE, 735 i); 736 if (feature_index < 0) 737 continue; 738 739 sort_feature[feature_index] = i; 740 } 741 742 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n", 743 "No", "Feature", "Bit", "State"); 744 745 for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) { 746 if (sort_feature[feature_index] < 0) 747 continue; 748 749 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n", 750 count++, 751 smu_get_feature_name(smu, sort_feature[feature_index]), 752 feature_index, 753 !!test_bit(feature_index, (unsigned long *)&feature_mask) ? 754 "enabled" : "disabled"); 755 } 756 757 return size; 758 } 759 760 int smu_cmn_set_pp_feature_mask(struct smu_context *smu, 761 uint64_t new_mask) 762 { 763 int ret = 0; 764 uint64_t feature_mask; 765 uint64_t feature_2_enabled = 0; 766 uint64_t feature_2_disabled = 0; 767 768 ret = __smu_get_enabled_features(smu, &feature_mask); 769 if (ret) 770 return ret; 771 772 feature_2_enabled = ~feature_mask & new_mask; 773 feature_2_disabled = feature_mask & ~new_mask; 774 775 if (feature_2_enabled) { 776 ret = smu_cmn_feature_update_enable_state(smu, 777 feature_2_enabled, 778 true); 779 if (ret) 780 return ret; 781 } 782 if (feature_2_disabled) { 783 ret = smu_cmn_feature_update_enable_state(smu, 784 feature_2_disabled, 785 false); 786 if (ret) 787 return ret; 788 } 789 790 return ret; 791 } 792 793 /** 794 * smu_cmn_disable_all_features_with_exception - disable all dpm features 795 * except this specified by 796 * @mask 797 * 798 * @smu: smu_context pointer 799 * @mask: the dpm feature which should not be disabled 800 * SMU_FEATURE_COUNT: no exception, all dpm features 801 * to disable 802 * 803 * Returns: 804 * 0 on success or a negative error code on failure. 805 */ 806 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, 807 enum smu_feature_mask mask) 808 { 809 uint64_t features_to_disable = U64_MAX; 810 int skipped_feature_id; 811 812 if (mask != SMU_FEATURE_COUNT) { 813 skipped_feature_id = smu_cmn_to_asic_specific_index(smu, 814 CMN2ASIC_MAPPING_FEATURE, 815 mask); 816 if (skipped_feature_id < 0) 817 return -EINVAL; 818 819 features_to_disable &= ~(1ULL << skipped_feature_id); 820 } 821 822 return smu_cmn_feature_update_enable_state(smu, 823 features_to_disable, 824 0); 825 } 826 827 int smu_cmn_get_smc_version(struct smu_context *smu, 828 uint32_t *if_version, 829 uint32_t *smu_version) 830 { 831 int ret = 0; 832 833 if (!if_version && !smu_version) 834 return -EINVAL; 835 836 if (smu->smc_fw_if_version && smu->smc_fw_version) 837 { 838 if (if_version) 839 *if_version = smu->smc_fw_if_version; 840 841 if (smu_version) 842 *smu_version = smu->smc_fw_version; 843 844 return 0; 845 } 846 847 if (if_version) { 848 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); 849 if (ret) 850 return ret; 851 852 smu->smc_fw_if_version = *if_version; 853 } 854 855 if (smu_version) { 856 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); 857 if (ret) 858 return ret; 859 860 smu->smc_fw_version = *smu_version; 861 } 862 863 return ret; 864 } 865 866 int smu_cmn_update_table(struct smu_context *smu, 867 enum smu_table_id table_index, 868 int argument, 869 void *table_data, 870 bool drv2smu) 871 { 872 struct smu_table_context *smu_table = &smu->smu_table; 873 struct amdgpu_device *adev = smu->adev; 874 struct smu_table *table = &smu_table->driver_table; 875 int table_id = smu_cmn_to_asic_specific_index(smu, 876 CMN2ASIC_MAPPING_TABLE, 877 table_index); 878 uint32_t table_size; 879 int ret = 0; 880 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) 881 return -EINVAL; 882 883 table_size = smu_table->tables[table_index].size; 884 885 if (drv2smu) { 886 memcpy(table->cpu_addr, table_data, table_size); 887 /* 888 * Flush hdp cache: to guard the content seen by 889 * GPU is consitent with CPU. 890 */ 891 amdgpu_asic_flush_hdp(adev, NULL); 892 } 893 894 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? 895 SMU_MSG_TransferTableDram2Smu : 896 SMU_MSG_TransferTableSmu2Dram, 897 table_id | ((argument & 0xFFFF) << 16), 898 NULL); 899 if (ret) 900 return ret; 901 902 if (!drv2smu) { 903 amdgpu_asic_invalidate_hdp(adev, NULL); 904 memcpy(table_data, table->cpu_addr, table_size); 905 } 906 907 return 0; 908 } 909 910 int smu_cmn_write_watermarks_table(struct smu_context *smu) 911 { 912 void *watermarks_table = smu->smu_table.watermarks_table; 913 914 if (!watermarks_table) 915 return -EINVAL; 916 917 return smu_cmn_update_table(smu, 918 SMU_TABLE_WATERMARKS, 919 0, 920 watermarks_table, 921 true); 922 } 923 924 int smu_cmn_write_pptable(struct smu_context *smu) 925 { 926 void *pptable = smu->smu_table.driver_pptable; 927 928 return smu_cmn_update_table(smu, 929 SMU_TABLE_PPTABLE, 930 0, 931 pptable, 932 true); 933 } 934 935 int smu_cmn_get_metrics_table(struct smu_context *smu, 936 void *metrics_table, 937 bool bypass_cache) 938 { 939 struct smu_table_context *smu_table = &smu->smu_table; 940 uint32_t table_size = 941 smu_table->tables[SMU_TABLE_SMU_METRICS].size; 942 int ret = 0; 943 944 if (bypass_cache || 945 !smu_table->metrics_time || 946 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { 947 ret = smu_cmn_update_table(smu, 948 SMU_TABLE_SMU_METRICS, 949 0, 950 smu_table->metrics_table, 951 false); 952 if (ret) { 953 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); 954 return ret; 955 } 956 smu_table->metrics_time = jiffies; 957 } 958 959 if (metrics_table) 960 memcpy(metrics_table, smu_table->metrics_table, table_size); 961 962 return 0; 963 } 964 965 int smu_cmn_get_combo_pptable(struct smu_context *smu) 966 { 967 void *pptable = smu->smu_table.combo_pptable; 968 969 return smu_cmn_update_table(smu, 970 SMU_TABLE_COMBO_PPTABLE, 971 0, 972 pptable, 973 false); 974 } 975 976 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) 977 { 978 struct metrics_table_header *header = (struct metrics_table_header *)table; 979 uint16_t structure_size; 980 981 #define METRICS_VERSION(a, b) ((a << 16) | b) 982 983 switch (METRICS_VERSION(frev, crev)) { 984 case METRICS_VERSION(1, 0): 985 structure_size = sizeof(struct gpu_metrics_v1_0); 986 break; 987 case METRICS_VERSION(1, 1): 988 structure_size = sizeof(struct gpu_metrics_v1_1); 989 break; 990 case METRICS_VERSION(1, 2): 991 structure_size = sizeof(struct gpu_metrics_v1_2); 992 break; 993 case METRICS_VERSION(1, 3): 994 structure_size = sizeof(struct gpu_metrics_v1_3); 995 break; 996 case METRICS_VERSION(1, 4): 997 structure_size = sizeof(struct gpu_metrics_v1_4); 998 break; 999 case METRICS_VERSION(1, 5): 1000 structure_size = sizeof(struct gpu_metrics_v1_5); 1001 break; 1002 case METRICS_VERSION(2, 0): 1003 structure_size = sizeof(struct gpu_metrics_v2_0); 1004 break; 1005 case METRICS_VERSION(2, 1): 1006 structure_size = sizeof(struct gpu_metrics_v2_1); 1007 break; 1008 case METRICS_VERSION(2, 2): 1009 structure_size = sizeof(struct gpu_metrics_v2_2); 1010 break; 1011 case METRICS_VERSION(2, 3): 1012 structure_size = sizeof(struct gpu_metrics_v2_3); 1013 break; 1014 case METRICS_VERSION(2, 4): 1015 structure_size = sizeof(struct gpu_metrics_v2_4); 1016 break; 1017 case METRICS_VERSION(3, 0): 1018 structure_size = sizeof(struct gpu_metrics_v3_0); 1019 break; 1020 default: 1021 return; 1022 } 1023 1024 #undef METRICS_VERSION 1025 1026 memset(header, 0xFF, structure_size); 1027 1028 header->format_revision = frev; 1029 header->content_revision = crev; 1030 header->structure_size = structure_size; 1031 1032 } 1033 1034 int smu_cmn_set_mp1_state(struct smu_context *smu, 1035 enum pp_mp1_state mp1_state) 1036 { 1037 enum smu_message_type msg; 1038 int ret; 1039 1040 switch (mp1_state) { 1041 case PP_MP1_STATE_SHUTDOWN: 1042 msg = SMU_MSG_PrepareMp1ForShutdown; 1043 break; 1044 case PP_MP1_STATE_UNLOAD: 1045 msg = SMU_MSG_PrepareMp1ForUnload; 1046 break; 1047 case PP_MP1_STATE_RESET: 1048 msg = SMU_MSG_PrepareMp1ForReset; 1049 break; 1050 case PP_MP1_STATE_NONE: 1051 default: 1052 return 0; 1053 } 1054 1055 ret = smu_cmn_send_smc_msg(smu, msg, NULL); 1056 if (ret) 1057 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); 1058 1059 return ret; 1060 } 1061 1062 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) 1063 { 1064 struct pci_dev *p = NULL; 1065 bool snd_driver_loaded; 1066 1067 /* 1068 * If the ASIC comes with no audio function, we always assume 1069 * it is "enabled". 1070 */ 1071 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 1072 adev->pdev->bus->number, 1); 1073 if (!p) 1074 return true; 1075 1076 snd_driver_loaded = pci_is_enabled(p) ? true : false; 1077 1078 pci_dev_put(p); 1079 1080 return snd_driver_loaded; 1081 } 1082