1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #ifndef __AMDGPU_SMU_H__ 23 #define __AMDGPU_SMU_H__ 24 25 #include <linux/acpi_amd_wbrf.h> 26 #include <linux/units.h> 27 28 #include "amdgpu.h" 29 #include "kgd_pp_interface.h" 30 #include "dm_pp_interface.h" 31 #include "dm_pp_smu.h" 32 #include "smu_types.h" 33 #include "linux/firmware.h" 34 35 #define SMU_THERMAL_MINIMUM_ALERT_TEMP 0 36 #define SMU_THERMAL_MAXIMUM_ALERT_TEMP 255 37 #define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES 1000 38 #define SMU_FW_NAME_LEN 0x24 39 40 #define SMU_DPM_USER_PROFILE_RESTORE (1 << 0) 41 #define SMU_CUSTOM_FAN_SPEED_RPM (1 << 1) 42 #define SMU_CUSTOM_FAN_SPEED_PWM (1 << 2) 43 44 // Power Throttlers 45 #define SMU_THROTTLER_PPT0_BIT 0 46 #define SMU_THROTTLER_PPT1_BIT 1 47 #define SMU_THROTTLER_PPT2_BIT 2 48 #define SMU_THROTTLER_PPT3_BIT 3 49 #define SMU_THROTTLER_SPL_BIT 4 50 #define SMU_THROTTLER_FPPT_BIT 5 51 #define SMU_THROTTLER_SPPT_BIT 6 52 #define SMU_THROTTLER_SPPT_APU_BIT 7 53 54 // Current Throttlers 55 #define SMU_THROTTLER_TDC_GFX_BIT 16 56 #define SMU_THROTTLER_TDC_SOC_BIT 17 57 #define SMU_THROTTLER_TDC_MEM_BIT 18 58 #define SMU_THROTTLER_TDC_VDD_BIT 19 59 #define SMU_THROTTLER_TDC_CVIP_BIT 20 60 #define SMU_THROTTLER_EDC_CPU_BIT 21 61 #define SMU_THROTTLER_EDC_GFX_BIT 22 62 #define SMU_THROTTLER_APCC_BIT 23 63 64 // Temperature 65 #define SMU_THROTTLER_TEMP_GPU_BIT 32 66 #define SMU_THROTTLER_TEMP_CORE_BIT 33 67 #define SMU_THROTTLER_TEMP_MEM_BIT 34 68 #define SMU_THROTTLER_TEMP_EDGE_BIT 35 69 #define SMU_THROTTLER_TEMP_HOTSPOT_BIT 36 70 #define SMU_THROTTLER_TEMP_SOC_BIT 37 71 #define SMU_THROTTLER_TEMP_VR_GFX_BIT 38 72 #define SMU_THROTTLER_TEMP_VR_SOC_BIT 39 73 #define SMU_THROTTLER_TEMP_VR_MEM0_BIT 40 74 #define SMU_THROTTLER_TEMP_VR_MEM1_BIT 41 75 #define SMU_THROTTLER_TEMP_LIQUID0_BIT 42 76 #define SMU_THROTTLER_TEMP_LIQUID1_BIT 43 77 #define SMU_THROTTLER_VRHOT0_BIT 44 78 #define SMU_THROTTLER_VRHOT1_BIT 45 79 #define SMU_THROTTLER_PROCHOT_CPU_BIT 46 80 #define SMU_THROTTLER_PROCHOT_GFX_BIT 47 81 82 // Other 83 #define SMU_THROTTLER_PPM_BIT 56 84 #define SMU_THROTTLER_FIT_BIT 57 85 86 struct smu_hw_power_state { 87 unsigned int magic; 88 }; 89 90 struct smu_power_state; 91 92 enum smu_state_ui_label { 93 SMU_STATE_UI_LABEL_NONE, 94 SMU_STATE_UI_LABEL_BATTERY, 95 SMU_STATE_UI_TABEL_MIDDLE_LOW, 96 SMU_STATE_UI_LABEL_BALLANCED, 97 SMU_STATE_UI_LABEL_MIDDLE_HIGHT, 98 SMU_STATE_UI_LABEL_PERFORMANCE, 99 SMU_STATE_UI_LABEL_BACO, 100 }; 101 102 enum smu_state_classification_flag { 103 SMU_STATE_CLASSIFICATION_FLAG_BOOT = 0x0001, 104 SMU_STATE_CLASSIFICATION_FLAG_THERMAL = 0x0002, 105 SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE = 0x0004, 106 SMU_STATE_CLASSIFICATION_FLAG_RESET = 0x0008, 107 SMU_STATE_CLASSIFICATION_FLAG_FORCED = 0x0010, 108 SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE = 0x0020, 109 SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE = 0x0040, 110 SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE = 0x0080, 111 SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE = 0x0100, 112 SMU_STATE_CLASSIFICATION_FLAG_UVD = 0x0200, 113 SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW = 0x0400, 114 SMU_STATE_CLASSIFICATION_FLAG_ACPI = 0x0800, 115 SMU_STATE_CLASSIFICATION_FLAG_HD2 = 0x1000, 116 SMU_STATE_CLASSIFICATION_FLAG_UVD_HD = 0x2000, 117 SMU_STATE_CLASSIFICATION_FLAG_UVD_SD = 0x4000, 118 SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE = 0x8000, 119 SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE = 0x10000, 120 SMU_STATE_CLASSIFICATION_FLAG_BACO = 0x20000, 121 SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2 = 0x40000, 122 SMU_STATE_CLASSIFICATION_FLAG_ULV = 0x80000, 123 SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC = 0x100000, 124 }; 125 126 struct smu_state_classification_block { 127 enum smu_state_ui_label ui_label; 128 enum smu_state_classification_flag flags; 129 int bios_index; 130 bool temporary_state; 131 bool to_be_deleted; 132 }; 133 134 struct smu_state_pcie_block { 135 unsigned int lanes; 136 }; 137 138 enum smu_refreshrate_source { 139 SMU_REFRESHRATE_SOURCE_EDID, 140 SMU_REFRESHRATE_SOURCE_EXPLICIT 141 }; 142 143 struct smu_state_display_block { 144 bool disable_frame_modulation; 145 bool limit_refreshrate; 146 enum smu_refreshrate_source refreshrate_source; 147 int explicit_refreshrate; 148 int edid_refreshrate_index; 149 bool enable_vari_bright; 150 }; 151 152 struct smu_state_memory_block { 153 bool dll_off; 154 uint8_t m3arb; 155 uint8_t unused[3]; 156 }; 157 158 struct smu_state_software_algorithm_block { 159 bool disable_load_balancing; 160 bool enable_sleep_for_timestamps; 161 }; 162 163 struct smu_temperature_range { 164 int min; 165 int max; 166 int edge_emergency_max; 167 int hotspot_min; 168 int hotspot_crit_max; 169 int hotspot_emergency_max; 170 int mem_min; 171 int mem_crit_max; 172 int mem_emergency_max; 173 int software_shutdown_temp; 174 int software_shutdown_temp_offset; 175 }; 176 177 struct smu_state_validation_block { 178 bool single_display_only; 179 bool disallow_on_dc; 180 uint8_t supported_power_levels; 181 }; 182 183 struct smu_uvd_clocks { 184 uint32_t vclk; 185 uint32_t dclk; 186 }; 187 188 /** 189 * Structure to hold a SMU Power State. 190 */ 191 struct smu_power_state { 192 uint32_t id; 193 struct list_head ordered_list; 194 struct list_head all_states_list; 195 196 struct smu_state_classification_block classification; 197 struct smu_state_validation_block validation; 198 struct smu_state_pcie_block pcie; 199 struct smu_state_display_block display; 200 struct smu_state_memory_block memory; 201 struct smu_state_software_algorithm_block software; 202 struct smu_uvd_clocks uvd_clocks; 203 struct smu_hw_power_state hardware; 204 }; 205 206 enum smu_power_src_type { 207 SMU_POWER_SOURCE_AC, 208 SMU_POWER_SOURCE_DC, 209 SMU_POWER_SOURCE_COUNT, 210 }; 211 212 enum smu_ppt_limit_type { 213 SMU_DEFAULT_PPT_LIMIT = 0, 214 SMU_FAST_PPT_LIMIT, 215 }; 216 217 enum smu_ppt_limit_level { 218 SMU_PPT_LIMIT_MIN = -1, 219 SMU_PPT_LIMIT_CURRENT, 220 SMU_PPT_LIMIT_DEFAULT, 221 SMU_PPT_LIMIT_MAX, 222 }; 223 224 enum smu_memory_pool_size { 225 SMU_MEMORY_POOL_SIZE_ZERO = 0, 226 SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000, 227 SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000, 228 SMU_MEMORY_POOL_SIZE_1_GB = 0x40000000, 229 SMU_MEMORY_POOL_SIZE_2_GB = 0x80000000, 230 }; 231 232 struct smu_user_dpm_profile { 233 uint32_t fan_mode; 234 uint32_t power_limit; 235 uint32_t fan_speed_pwm; 236 uint32_t fan_speed_rpm; 237 uint32_t flags; 238 uint32_t user_od; 239 240 /* user clock state information */ 241 uint32_t clk_mask[SMU_CLK_COUNT]; 242 uint32_t clk_dependency; 243 }; 244 245 #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ 246 do { \ 247 tables[table_id].size = s; \ 248 tables[table_id].align = a; \ 249 tables[table_id].domain = d; \ 250 } while (0) 251 252 struct smu_table_cache { 253 void *buffer; 254 size_t size; 255 /* interval in ms*/ 256 uint32_t interval; 257 unsigned long last_cache_time; 258 }; 259 260 struct smu_table { 261 uint64_t size; 262 uint32_t align; 263 uint8_t domain; 264 uint64_t mc_address; 265 void *cpu_addr; 266 struct amdgpu_bo *bo; 267 uint32_t version; 268 struct smu_table_cache cache; 269 }; 270 271 enum smu_perf_level_designation { 272 PERF_LEVEL_ACTIVITY, 273 PERF_LEVEL_POWER_CONTAINMENT, 274 }; 275 276 struct smu_performance_level { 277 uint32_t core_clock; 278 uint32_t memory_clock; 279 uint32_t vddc; 280 uint32_t vddci; 281 uint32_t non_local_mem_freq; 282 uint32_t non_local_mem_width; 283 }; 284 285 struct smu_clock_info { 286 uint32_t min_mem_clk; 287 uint32_t max_mem_clk; 288 uint32_t min_eng_clk; 289 uint32_t max_eng_clk; 290 uint32_t min_bus_bandwidth; 291 uint32_t max_bus_bandwidth; 292 }; 293 294 struct smu_bios_boot_up_values { 295 uint32_t revision; 296 uint32_t gfxclk; 297 uint32_t uclk; 298 uint32_t socclk; 299 uint32_t dcefclk; 300 uint32_t eclk; 301 uint32_t vclk; 302 uint32_t dclk; 303 uint16_t vddc; 304 uint16_t vddci; 305 uint16_t mvddc; 306 uint16_t vdd_gfx; 307 uint8_t cooling_id; 308 uint32_t pp_table_id; 309 uint32_t format_revision; 310 uint32_t content_revision; 311 uint32_t fclk; 312 uint32_t lclk; 313 uint32_t firmware_caps; 314 }; 315 316 enum smu_table_id { 317 SMU_TABLE_PPTABLE = 0, 318 SMU_TABLE_WATERMARKS, 319 SMU_TABLE_CUSTOM_DPM, 320 SMU_TABLE_DPMCLOCKS, 321 SMU_TABLE_AVFS, 322 SMU_TABLE_AVFS_PSM_DEBUG, 323 SMU_TABLE_AVFS_FUSE_OVERRIDE, 324 SMU_TABLE_PMSTATUSLOG, 325 SMU_TABLE_SMU_METRICS, 326 SMU_TABLE_DRIVER_SMU_CONFIG, 327 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 328 SMU_TABLE_OVERDRIVE, 329 SMU_TABLE_I2C_COMMANDS, 330 SMU_TABLE_PACE, 331 SMU_TABLE_ECCINFO, 332 SMU_TABLE_COMBO_PPTABLE, 333 SMU_TABLE_WIFIBAND, 334 SMU_TABLE_GPUBOARD_TEMP_METRICS, 335 SMU_TABLE_BASEBOARD_TEMP_METRICS, 336 SMU_TABLE_PMFW_SYSTEM_METRICS, 337 SMU_TABLE_COUNT, 338 }; 339 340 struct smu_table_context { 341 void *power_play_table; 342 uint32_t power_play_table_size; 343 void *hardcode_pptable; 344 unsigned long metrics_time; 345 void *metrics_table; 346 void *clocks_table; 347 void *watermarks_table; 348 349 void *max_sustainable_clocks; 350 struct smu_bios_boot_up_values boot_values; 351 void *driver_pptable; 352 void *combo_pptable; 353 void *ecc_table; 354 void *driver_smu_config_table; 355 struct smu_table tables[SMU_TABLE_COUNT]; 356 /* 357 * The driver table is just a staging buffer for 358 * uploading/downloading content from the SMU. 359 * 360 * And the table_id for SMU_MSG_TransferTableSmu2Dram/ 361 * SMU_MSG_TransferTableDram2Smu instructs SMU 362 * which content driver is interested. 363 */ 364 struct smu_table driver_table; 365 struct smu_table memory_pool; 366 struct smu_table dummy_read_1_table; 367 uint8_t thermal_controller_type; 368 369 void *overdrive_table; 370 void *boot_overdrive_table; 371 void *user_overdrive_table; 372 373 uint32_t gpu_metrics_table_size; 374 void *gpu_metrics_table; 375 }; 376 377 struct smu_context; 378 struct smu_dpm_policy; 379 380 struct smu_dpm_policy_desc { 381 const char *name; 382 char *(*get_desc)(struct smu_dpm_policy *dpm_policy, int level); 383 }; 384 385 struct smu_dpm_policy { 386 struct smu_dpm_policy_desc *desc; 387 enum pp_pm_policy policy_type; 388 unsigned long level_mask; 389 int current_level; 390 int (*set_policy)(struct smu_context *ctxt, int level); 391 }; 392 393 struct smu_dpm_policy_ctxt { 394 struct smu_dpm_policy policies[PP_PM_POLICY_NUM]; 395 unsigned long policy_mask; 396 }; 397 398 struct smu_dpm_context { 399 uint32_t dpm_context_size; 400 void *dpm_context; 401 void *golden_dpm_context; 402 enum amd_dpm_forced_level dpm_level; 403 enum amd_dpm_forced_level saved_dpm_level; 404 enum amd_dpm_forced_level requested_dpm_level; 405 struct smu_power_state *dpm_request_power_state; 406 struct smu_power_state *dpm_current_power_state; 407 struct mclock_latency_table *mclk_latency_table; 408 struct smu_dpm_policy_ctxt *dpm_policies; 409 }; 410 411 struct smu_temp_context { 412 const struct smu_temp_funcs *temp_funcs; 413 }; 414 415 struct smu_power_gate { 416 bool uvd_gated; 417 bool vce_gated; 418 atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES]; 419 atomic_t jpeg_gated; 420 atomic_t vpe_gated; 421 atomic_t isp_gated; 422 atomic_t umsch_mm_gated; 423 }; 424 425 struct smu_power_context { 426 void *power_context; 427 uint32_t power_context_size; 428 struct smu_power_gate power_gate; 429 }; 430 431 #define SMU_FEATURE_MAX (64) 432 struct smu_feature { 433 uint32_t feature_num; 434 DECLARE_BITMAP(supported, SMU_FEATURE_MAX); 435 DECLARE_BITMAP(allowed, SMU_FEATURE_MAX); 436 }; 437 438 struct smu_clocks { 439 uint32_t engine_clock; 440 uint32_t memory_clock; 441 uint32_t bus_bandwidth; 442 uint32_t engine_clock_in_sr; 443 uint32_t dcef_clock; 444 uint32_t dcef_clock_in_sr; 445 }; 446 447 #define MAX_REGULAR_DPM_NUM 16 448 struct mclk_latency_entries { 449 uint32_t frequency; 450 uint32_t latency; 451 }; 452 struct mclock_latency_table { 453 uint32_t count; 454 struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM]; 455 }; 456 457 enum smu_reset_mode { 458 SMU_RESET_MODE_0, 459 SMU_RESET_MODE_1, 460 SMU_RESET_MODE_2, 461 SMU_RESET_MODE_3, 462 SMU_RESET_MODE_4, 463 }; 464 465 enum smu_baco_state { 466 SMU_BACO_STATE_ENTER = 0, 467 SMU_BACO_STATE_EXIT, 468 SMU_BACO_STATE_NONE, 469 }; 470 471 struct smu_baco_context { 472 uint32_t state; 473 bool platform_support; 474 bool maco_support; 475 }; 476 477 struct smu_freq_info { 478 uint32_t min; 479 uint32_t max; 480 uint32_t freq_level; 481 }; 482 483 struct pstates_clk_freq { 484 uint32_t min; 485 uint32_t standard; 486 uint32_t peak; 487 struct smu_freq_info custom; 488 struct smu_freq_info curr; 489 }; 490 491 struct smu_umd_pstate_table { 492 struct pstates_clk_freq gfxclk_pstate; 493 struct pstates_clk_freq socclk_pstate; 494 struct pstates_clk_freq uclk_pstate; 495 struct pstates_clk_freq vclk_pstate; 496 struct pstates_clk_freq dclk_pstate; 497 struct pstates_clk_freq fclk_pstate; 498 }; 499 500 struct cmn2asic_msg_mapping { 501 int valid_mapping; 502 int map_to; 503 uint32_t flags; 504 }; 505 506 struct cmn2asic_mapping { 507 int valid_mapping; 508 int map_to; 509 }; 510 511 struct stb_context { 512 uint32_t stb_buf_size; 513 bool enabled; 514 spinlock_t lock; 515 }; 516 517 enum smu_fw_status { 518 SMU_FW_INIT = 0, 519 SMU_FW_RUNTIME, 520 SMU_FW_HANG, 521 }; 522 523 #define WORKLOAD_POLICY_MAX 7 524 525 /* 526 * Configure wbrf event handling pace as there can be only one 527 * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms. 528 */ 529 #define SMU_WBRF_EVENT_HANDLING_PACE 10 530 531 struct smu_context { 532 struct amdgpu_device *adev; 533 struct amdgpu_irq_src irq_source; 534 535 const struct pptable_funcs *ppt_funcs; 536 const struct cmn2asic_msg_mapping *message_map; 537 const struct cmn2asic_mapping *clock_map; 538 const struct cmn2asic_mapping *feature_map; 539 const struct cmn2asic_mapping *table_map; 540 const struct cmn2asic_mapping *pwr_src_map; 541 const struct cmn2asic_mapping *workload_map; 542 struct mutex message_lock; 543 uint64_t pool_size; 544 545 struct smu_table_context smu_table; 546 struct smu_dpm_context smu_dpm; 547 struct smu_power_context smu_power; 548 struct smu_temp_context smu_temp; 549 struct smu_feature smu_feature; 550 struct amd_pp_display_configuration *display_config; 551 struct smu_baco_context smu_baco; 552 struct smu_temperature_range thermal_range; 553 void *od_settings; 554 555 struct smu_umd_pstate_table pstate_table; 556 uint32_t pstate_sclk; 557 uint32_t pstate_mclk; 558 559 bool od_enabled; 560 uint32_t current_power_limit; 561 uint32_t default_power_limit; 562 uint32_t max_power_limit; 563 uint32_t min_power_limit; 564 565 /* soft pptable */ 566 uint32_t ppt_offset_bytes; 567 uint32_t ppt_size_bytes; 568 uint8_t *ppt_start_addr; 569 570 bool support_power_containment; 571 bool disable_watermark; 572 573 #define WATERMARKS_EXIST (1 << 0) 574 #define WATERMARKS_LOADED (1 << 1) 575 uint32_t watermarks_bitmap; 576 uint32_t hard_min_uclk_req_from_dal; 577 bool disable_uclk_switch; 578 579 /* asic agnostic workload mask */ 580 uint32_t workload_mask; 581 bool pause_workload; 582 /* default/user workload preference */ 583 uint32_t power_profile_mode; 584 uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT]; 585 /* backend specific custom workload settings */ 586 long *custom_profile_params; 587 bool pm_enabled; 588 bool is_apu; 589 590 uint32_t smc_driver_if_version; 591 uint32_t smc_fw_if_version; 592 uint32_t smc_fw_version; 593 uint32_t smc_fw_caps; 594 uint8_t smc_fw_state; 595 596 bool uploading_custom_pp_table; 597 bool dc_controlled_by_gpio; 598 599 struct work_struct throttling_logging_work; 600 atomic64_t throttle_int_counter; 601 struct work_struct interrupt_work; 602 603 unsigned fan_max_rpm; 604 unsigned manual_fan_speed_pwm; 605 606 uint32_t gfx_default_hard_min_freq; 607 uint32_t gfx_default_soft_max_freq; 608 uint32_t gfx_actual_hard_min_freq; 609 uint32_t gfx_actual_soft_max_freq; 610 611 /* APU only */ 612 uint32_t cpu_default_soft_min_freq; 613 uint32_t cpu_default_soft_max_freq; 614 uint32_t cpu_actual_soft_min_freq; 615 uint32_t cpu_actual_soft_max_freq; 616 uint32_t cpu_core_id_select; 617 uint16_t cpu_core_num; 618 619 struct smu_user_dpm_profile user_dpm_profile; 620 621 struct stb_context stb_context; 622 623 struct firmware pptable_firmware; 624 625 u32 param_reg; 626 u32 msg_reg; 627 u32 resp_reg; 628 629 u32 debug_param_reg; 630 u32 debug_msg_reg; 631 u32 debug_resp_reg; 632 633 struct delayed_work swctf_delayed_work; 634 635 /* data structures for wbrf feature support */ 636 bool wbrf_supported; 637 struct notifier_block wbrf_notifier; 638 struct delayed_work wbrf_delayed_work; 639 }; 640 641 struct i2c_adapter; 642 643 /** 644 * struct smu_temp_funcs - Callbacks used to get temperature data. 645 */ 646 struct smu_temp_funcs { 647 /** 648 * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's 649 * power delivery and voltage margins. Required for adaptive 650 * @type Temperature metrics type(baseboard/gpuboard) 651 * Return: Size of &table 652 */ 653 ssize_t (*get_temp_metrics)(struct smu_context *smu, 654 enum smu_temp_metric_type type, void *table); 655 656 /** 657 * @temp_metrics_is_support: Get if specific temperature metrics is supported 658 * @type Temperature metrics type(baseboard/gpuboard) 659 * Return: true if supported else false 660 */ 661 bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type); 662 663 }; 664 665 /** 666 * struct pptable_funcs - Callbacks used to interact with the SMU. 667 */ 668 struct pptable_funcs { 669 /** 670 * @run_btc: Calibrate voltage/frequency curve to fit the system's 671 * power delivery and voltage margins. Required for adaptive 672 * voltage frequency scaling (AVFS). 673 */ 674 int (*run_btc)(struct smu_context *smu); 675 676 /** 677 * @get_allowed_feature_mask: Get allowed feature mask. 678 * &feature_mask: Array to store feature mask. 679 * &num: Elements in &feature_mask. 680 */ 681 int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); 682 683 /** 684 * @get_current_power_state: Get the current power state. 685 * 686 * Return: Current power state on success, negative errno on failure. 687 */ 688 enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu); 689 690 /** 691 * @set_default_dpm_table: Retrieve the default overdrive settings from 692 * the SMU. 693 */ 694 int (*set_default_dpm_table)(struct smu_context *smu); 695 696 int (*set_power_state)(struct smu_context *smu); 697 698 /** 699 * @populate_umd_state_clk: Populate the UMD power state table with 700 * defaults. 701 */ 702 int (*populate_umd_state_clk)(struct smu_context *smu); 703 704 /** 705 * @print_clk_levels: Print DPM clock levels for a clock domain 706 * to buffer. Star current level. 707 * 708 * Used for sysfs interfaces. 709 * Return: Number of characters written to the buffer 710 */ 711 int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf); 712 713 /** 714 * @emit_clk_levels: Print DPM clock levels for a clock domain 715 * to buffer using sysfs_emit_at. Star current level. 716 * 717 * Used for sysfs interfaces. 718 * &buf: sysfs buffer 719 * &offset: offset within buffer to start printing, which is updated by the 720 * function. 721 * 722 * Return: 0 on Success or Negative to indicate an error occurred. 723 */ 724 int (*emit_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf, int *offset); 725 726 /** 727 * @force_clk_levels: Set a range of allowed DPM levels for a clock 728 * domain. 729 * &clk_type: Clock domain. 730 * &mask: Range of allowed DPM levels. 731 */ 732 int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask); 733 734 /** 735 * @od_edit_dpm_table: Edit the custom overdrive DPM table. 736 * &type: Type of edit. 737 * &input: Edit parameters. 738 * &size: Size of &input. 739 */ 740 int (*od_edit_dpm_table)(struct smu_context *smu, 741 enum PP_OD_DPM_TABLE_COMMAND type, 742 long *input, uint32_t size); 743 744 /** 745 * @restore_user_od_settings: Restore the user customized 746 * OD settings on S3/S4/Runpm resume. 747 */ 748 int (*restore_user_od_settings)(struct smu_context *smu); 749 750 /** 751 * @get_clock_by_type_with_latency: Get the speed and latency of a clock 752 * domain. 753 */ 754 int (*get_clock_by_type_with_latency)(struct smu_context *smu, 755 enum smu_clk_type clk_type, 756 struct 757 pp_clock_levels_with_latency 758 *clocks); 759 /** 760 * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock 761 * domain. 762 */ 763 int (*get_clock_by_type_with_voltage)(struct smu_context *smu, 764 enum amd_pp_clock_type type, 765 struct 766 pp_clock_levels_with_voltage 767 *clocks); 768 769 /** 770 * @get_power_profile_mode: Print all power profile modes to 771 * buffer. Star current mode. 772 */ 773 int (*get_power_profile_mode)(struct smu_context *smu, char *buf); 774 775 /** 776 * @set_power_profile_mode: Set a power profile mode. Also used to 777 * create/set custom power profile modes. 778 * &input: Power profile mode parameters. 779 * &workload_mask: mask of workloads to enable 780 * &custom_params: custom profile parameters 781 * &custom_params_max_idx: max valid idx into custom_params 782 */ 783 int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask, 784 long *custom_params, u32 custom_params_max_idx); 785 786 /** 787 * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power 788 * management. 789 */ 790 int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable, int inst); 791 792 /** 793 * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power 794 * management. 795 */ 796 int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable); 797 798 /** 799 * @set_gfx_power_up_by_imu: Enable GFX engine with IMU 800 */ 801 int (*set_gfx_power_up_by_imu)(struct smu_context *smu); 802 803 /** 804 * @read_sensor: Read data from a sensor. 805 * &sensor: Sensor to read data from. 806 * &data: Sensor reading. 807 * &size: Size of &data. 808 */ 809 int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor, 810 void *data, uint32_t *size); 811 812 /** 813 * @get_apu_thermal_limit: get apu core limit from smu 814 * &limit: current limit temperature in millidegrees Celsius 815 */ 816 int (*get_apu_thermal_limit)(struct smu_context *smu, uint32_t *limit); 817 818 /** 819 * @set_apu_thermal_limit: update all controllers with new limit 820 * &limit: limit temperature to be setted, in millidegrees Celsius 821 */ 822 int (*set_apu_thermal_limit)(struct smu_context *smu, uint32_t limit); 823 824 /** 825 * @pre_display_config_changed: Prepare GPU for a display configuration 826 * change. 827 * 828 * Disable display tracking and pin memory clock speed to maximum. Used 829 * in display component synchronization. 830 */ 831 int (*pre_display_config_changed)(struct smu_context *smu); 832 833 /** 834 * @display_config_changed: Notify the SMU of the current display 835 * configuration. 836 * 837 * Allows SMU to properly track blanking periods for memory clock 838 * adjustment. Used in display component synchronization. 839 */ 840 int (*display_config_changed)(struct smu_context *smu); 841 842 int (*apply_clocks_adjust_rules)(struct smu_context *smu); 843 844 /** 845 * @notify_smc_display_config: Applies display requirements to the 846 * current power state. 847 * 848 * Optimize deep sleep DCEFclk and mclk for the current display 849 * configuration. Used in display component synchronization. 850 */ 851 int (*notify_smc_display_config)(struct smu_context *smu); 852 853 /** 854 * @is_dpm_running: Check if DPM is running. 855 * 856 * Return: True if DPM is running, false otherwise. 857 */ 858 bool (*is_dpm_running)(struct smu_context *smu); 859 860 /** 861 * @get_fan_speed_pwm: Get the current fan speed in PWM. 862 */ 863 int (*get_fan_speed_pwm)(struct smu_context *smu, uint32_t *speed); 864 865 /** 866 * @get_fan_speed_rpm: Get the current fan speed in rpm. 867 */ 868 int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed); 869 870 /** 871 * @set_watermarks_table: Configure and upload the watermarks tables to 872 * the SMU. 873 */ 874 int (*set_watermarks_table)(struct smu_context *smu, 875 struct pp_smu_wm_range_sets *clock_ranges); 876 877 /** 878 * @get_thermal_temperature_range: Get safe thermal limits in Celcius. 879 */ 880 int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); 881 882 /** 883 * @get_uclk_dpm_states: Get memory clock DPM levels in kHz. 884 * &clocks_in_khz: Array of DPM levels. 885 * &num_states: Elements in &clocks_in_khz. 886 */ 887 int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); 888 889 /** 890 * @set_default_od_settings: Set the overdrive tables to defaults. 891 */ 892 int (*set_default_od_settings)(struct smu_context *smu); 893 894 /** 895 * @set_performance_level: Set a performance level. 896 */ 897 int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); 898 899 /** 900 * @display_disable_memory_clock_switch: Enable/disable dynamic memory 901 * clock switching. 902 * 903 * Disabling this feature forces memory clock speed to maximum. 904 * Enabling sets the minimum memory clock capable of driving the 905 * current display configuration. 906 */ 907 int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); 908 909 /** 910 * @get_power_limit: Get the device's power limits. 911 */ 912 int (*get_power_limit)(struct smu_context *smu, 913 uint32_t *current_power_limit, 914 uint32_t *default_power_limit, 915 uint32_t *max_power_limit, 916 uint32_t *min_power_limit); 917 918 /** 919 * @get_ppt_limit: Get the device's ppt limits. 920 */ 921 int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit, 922 enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level); 923 924 /** 925 * @set_df_cstate: Set data fabric cstate. 926 */ 927 int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state); 928 929 /** 930 * @update_pcie_parameters: Update and upload the system's PCIe 931 * capabilites to the SMU. 932 * &pcie_gen_cap: Maximum allowed PCIe generation. 933 * &pcie_width_cap: Maximum allowed PCIe width. 934 */ 935 int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap); 936 937 /** 938 * @i2c_init: Initialize i2c. 939 * 940 * The i2c bus is used internally by the SMU voltage regulators and 941 * other devices. The i2c's EEPROM also stores bad page tables on boards 942 * with ECC. 943 */ 944 int (*i2c_init)(struct smu_context *smu); 945 946 /** 947 * @i2c_fini: Tear down i2c. 948 */ 949 void (*i2c_fini)(struct smu_context *smu); 950 951 /** 952 * @get_unique_id: Get the GPU's unique id. Used for asset tracking. 953 */ 954 void (*get_unique_id)(struct smu_context *smu); 955 956 /** 957 * @get_dpm_clock_table: Get a copy of the DPM clock table. 958 * 959 * Used by display component in bandwidth and watermark calculations. 960 */ 961 int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table); 962 963 /** 964 * @init_microcode: Request the SMU's firmware from the kernel. 965 */ 966 int (*init_microcode)(struct smu_context *smu); 967 968 /** 969 * @load_microcode: Load firmware onto the SMU. 970 */ 971 int (*load_microcode)(struct smu_context *smu); 972 973 /** 974 * @fini_microcode: Release the SMU's firmware. 975 */ 976 void (*fini_microcode)(struct smu_context *smu); 977 978 /** 979 * @init_smc_tables: Initialize the SMU tables. 980 */ 981 int (*init_smc_tables)(struct smu_context *smu); 982 983 /** 984 * @fini_smc_tables: Release the SMU tables. 985 */ 986 int (*fini_smc_tables)(struct smu_context *smu); 987 988 /** 989 * @init_power: Initialize the power gate table context. 990 */ 991 int (*init_power)(struct smu_context *smu); 992 993 /** 994 * @fini_power: Release the power gate table context. 995 */ 996 int (*fini_power)(struct smu_context *smu); 997 998 /** 999 * @check_fw_status: Check the SMU's firmware status. 1000 * 1001 * Return: Zero if check passes, negative errno on failure. 1002 */ 1003 int (*check_fw_status)(struct smu_context *smu); 1004 1005 /** 1006 * @set_mp1_state: put SMU into a correct state for comming 1007 * resume from runpm or gpu reset. 1008 */ 1009 int (*set_mp1_state)(struct smu_context *smu, 1010 enum pp_mp1_state mp1_state); 1011 1012 /** 1013 * @setup_pptable: Initialize the power play table and populate it with 1014 * default values. 1015 */ 1016 int (*setup_pptable)(struct smu_context *smu); 1017 1018 /** 1019 * @get_vbios_bootup_values: Get default boot values from the VBIOS. 1020 */ 1021 int (*get_vbios_bootup_values)(struct smu_context *smu); 1022 1023 /** 1024 * @check_fw_version: Print driver and SMU interface versions to the 1025 * system log. 1026 * 1027 * Interface mismatch is not a critical failure. 1028 */ 1029 int (*check_fw_version)(struct smu_context *smu); 1030 1031 /** 1032 * @powergate_sdma: Power up/down system direct memory access. 1033 */ 1034 int (*powergate_sdma)(struct smu_context *smu, bool gate); 1035 1036 /** 1037 * @set_gfx_cgpg: Enable/disable graphics engine course grain power 1038 * gating. 1039 */ 1040 int (*set_gfx_cgpg)(struct smu_context *smu, bool enable); 1041 1042 /** 1043 * @write_pptable: Write the power play table to the SMU. 1044 */ 1045 int (*write_pptable)(struct smu_context *smu); 1046 1047 /** 1048 * @set_driver_table_location: Send the location of the driver table to 1049 * the SMU. 1050 */ 1051 int (*set_driver_table_location)(struct smu_context *smu); 1052 1053 /** 1054 * @set_tool_table_location: Send the location of the tool table to the 1055 * SMU. 1056 */ 1057 int (*set_tool_table_location)(struct smu_context *smu); 1058 1059 /** 1060 * @notify_memory_pool_location: Send the location of the memory pool to 1061 * the SMU. 1062 */ 1063 int (*notify_memory_pool_location)(struct smu_context *smu); 1064 1065 /** 1066 * @system_features_control: Enable/disable all SMU features. 1067 */ 1068 int (*system_features_control)(struct smu_context *smu, bool en); 1069 1070 /** 1071 * @send_smc_msg_with_param: Send a message with a parameter to the SMU. 1072 * &msg: Type of message. 1073 * ¶m: Message parameter. 1074 * &read_arg: SMU response (optional). 1075 */ 1076 int (*send_smc_msg_with_param)(struct smu_context *smu, 1077 enum smu_message_type msg, uint32_t param, uint32_t *read_arg); 1078 1079 /** 1080 * @send_smc_msg: Send a message to the SMU. 1081 * &msg: Type of message. 1082 * &read_arg: SMU response (optional). 1083 */ 1084 int (*send_smc_msg)(struct smu_context *smu, 1085 enum smu_message_type msg, 1086 uint32_t *read_arg); 1087 1088 /** 1089 * @init_display_count: Notify the SMU of the number of display 1090 * components in current display configuration. 1091 */ 1092 int (*init_display_count)(struct smu_context *smu, uint32_t count); 1093 1094 /** 1095 * @set_allowed_mask: Notify the SMU of the features currently allowed 1096 * by the driver. 1097 */ 1098 int (*set_allowed_mask)(struct smu_context *smu); 1099 1100 /** 1101 * @get_enabled_mask: Get a mask of features that are currently enabled 1102 * on the SMU. 1103 * &feature_mask: Enabled feature mask. 1104 */ 1105 int (*get_enabled_mask)(struct smu_context *smu, uint64_t *feature_mask); 1106 1107 /** 1108 * @feature_is_enabled: Test if a feature is enabled. 1109 * 1110 * Return: One if enabled, zero if disabled. 1111 */ 1112 int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask); 1113 1114 /** 1115 * @disable_all_features_with_exception: Disable all features with 1116 * exception to those in &mask. 1117 */ 1118 int (*disable_all_features_with_exception)(struct smu_context *smu, 1119 enum smu_feature_mask mask); 1120 1121 /** 1122 * @notify_display_change: General interface call to let SMU know about DC change 1123 */ 1124 int (*notify_display_change)(struct smu_context *smu); 1125 1126 /** 1127 * @set_power_limit: Set power limit in watts. 1128 */ 1129 int (*set_power_limit)(struct smu_context *smu, 1130 enum smu_ppt_limit_type limit_type, 1131 uint32_t limit); 1132 1133 /** 1134 * @init_max_sustainable_clocks: Populate max sustainable clock speed 1135 * table with values from the SMU. 1136 */ 1137 int (*init_max_sustainable_clocks)(struct smu_context *smu); 1138 1139 /** 1140 * @enable_thermal_alert: Enable thermal alert interrupts. 1141 */ 1142 int (*enable_thermal_alert)(struct smu_context *smu); 1143 1144 /** 1145 * @disable_thermal_alert: Disable thermal alert interrupts. 1146 */ 1147 int (*disable_thermal_alert)(struct smu_context *smu); 1148 1149 /** 1150 * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep 1151 * clock speed in MHz. 1152 */ 1153 int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk); 1154 1155 /** 1156 * @display_clock_voltage_request: Set a hard minimum frequency 1157 * for a clock domain. 1158 */ 1159 int (*display_clock_voltage_request)(struct smu_context *smu, struct 1160 pp_display_clock_request 1161 *clock_req); 1162 1163 /** 1164 * @get_fan_control_mode: Get the current fan control mode. 1165 */ 1166 uint32_t (*get_fan_control_mode)(struct smu_context *smu); 1167 1168 /** 1169 * @set_fan_control_mode: Set the fan control mode. 1170 */ 1171 int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); 1172 1173 /** 1174 * @set_fan_speed_pwm: Set a static fan speed in PWM. 1175 */ 1176 int (*set_fan_speed_pwm)(struct smu_context *smu, uint32_t speed); 1177 1178 /** 1179 * @set_fan_speed_rpm: Set a static fan speed in rpm. 1180 */ 1181 int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); 1182 1183 /** 1184 * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate. 1185 * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise. 1186 */ 1187 int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate); 1188 1189 /** 1190 * @gfx_off_control: Enable/disable graphics engine poweroff. 1191 */ 1192 int (*gfx_off_control)(struct smu_context *smu, bool enable); 1193 1194 1195 /** 1196 * @get_gfx_off_status: Get graphics engine poweroff status. 1197 * 1198 * Return: 1199 * 0 - GFXOFF(default). 1200 * 1 - Transition out of GFX State. 1201 * 2 - Not in GFXOFF. 1202 * 3 - Transition into GFXOFF. 1203 */ 1204 uint32_t (*get_gfx_off_status)(struct smu_context *smu); 1205 1206 /** 1207 * @gfx_off_entrycount: total GFXOFF entry count at the time of 1208 * query since system power-up 1209 */ 1210 u32 (*get_gfx_off_entrycount)(struct smu_context *smu, uint64_t *entrycount); 1211 1212 /** 1213 * @set_gfx_off_residency: set 1 to start logging, 0 to stop logging 1214 */ 1215 u32 (*set_gfx_off_residency)(struct smu_context *smu, bool start); 1216 1217 /** 1218 * @get_gfx_off_residency: Average GFXOFF residency % during the logging interval 1219 */ 1220 u32 (*get_gfx_off_residency)(struct smu_context *smu, uint32_t *residency); 1221 1222 /** 1223 * @register_irq_handler: Register interupt request handlers. 1224 */ 1225 int (*register_irq_handler)(struct smu_context *smu); 1226 1227 /** 1228 * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep. 1229 */ 1230 int (*set_azalia_d3_pme)(struct smu_context *smu); 1231 1232 /** 1233 * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable 1234 * clock speeds table. 1235 * 1236 * Provides a way for the display component (DC) to get the max 1237 * sustainable clocks from the SMU. 1238 */ 1239 int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); 1240 1241 /** 1242 * @get_bamaco_support: Check if GPU supports BACO/MACO 1243 * BACO: Bus Active, Chip Off 1244 * MACO: Memory Active, Chip Off 1245 */ 1246 int (*get_bamaco_support)(struct smu_context *smu); 1247 1248 /** 1249 * @baco_get_state: Get the current BACO state. 1250 * 1251 * Return: Current BACO state. 1252 */ 1253 enum smu_baco_state (*baco_get_state)(struct smu_context *smu); 1254 1255 /** 1256 * @baco_set_state: Enter/exit BACO. 1257 */ 1258 int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state); 1259 1260 /** 1261 * @baco_enter: Enter BACO. 1262 */ 1263 int (*baco_enter)(struct smu_context *smu); 1264 1265 /** 1266 * @baco_exit: Exit Baco. 1267 */ 1268 int (*baco_exit)(struct smu_context *smu); 1269 1270 /** 1271 * @mode1_reset_is_support: Check if GPU supports mode1 reset. 1272 */ 1273 bool (*mode1_reset_is_support)(struct smu_context *smu); 1274 1275 /** 1276 * @link_reset_is_support: Check if GPU supports link reset. 1277 */ 1278 bool (*link_reset_is_support)(struct smu_context *smu); 1279 1280 /** 1281 * @mode1_reset: Perform mode1 reset. 1282 * 1283 * Complete GPU reset. 1284 */ 1285 int (*mode1_reset)(struct smu_context *smu); 1286 1287 /** 1288 * @mode2_reset: Perform mode2 reset. 1289 * 1290 * Mode2 reset generally does not reset as many IPs as mode1 reset. The 1291 * IPs reset varies by asic. 1292 */ 1293 int (*mode2_reset)(struct smu_context *smu); 1294 /* for gfx feature enablement after mode2 reset */ 1295 int (*enable_gfx_features)(struct smu_context *smu); 1296 1297 /** 1298 * @link_reset: Perform link reset. 1299 * 1300 * The gfx device driver reset 1301 */ 1302 int (*link_reset)(struct smu_context *smu); 1303 1304 /** 1305 * @get_dpm_ultimate_freq: Get the hard frequency range of a clock 1306 * domain in MHz. 1307 */ 1308 int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); 1309 1310 /** 1311 * @set_soft_freq_limited_range: Set the soft frequency range of a clock 1312 * domain in MHz. 1313 */ 1314 int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max, 1315 bool automatic); 1316 1317 /** 1318 * @set_power_source: Notify the SMU of the current power source. 1319 */ 1320 int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src); 1321 1322 /** 1323 * @log_thermal_throttling_event: Print a thermal throttling warning to 1324 * the system's log. 1325 */ 1326 void (*log_thermal_throttling_event)(struct smu_context *smu); 1327 1328 /** 1329 * @get_pp_feature_mask: Print a human readable table of enabled 1330 * features to buffer. 1331 */ 1332 size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf); 1333 1334 /** 1335 * @set_pp_feature_mask: Request the SMU enable/disable features to 1336 * match those enabled in &new_mask. 1337 */ 1338 int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask); 1339 1340 /** 1341 * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU. 1342 * 1343 * Return: Size of &table 1344 */ 1345 ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table); 1346 1347 /** 1348 * @get_pm_metrics: Get one snapshot of power management metrics from 1349 * PMFW. 1350 * 1351 * Return: Size of the metrics sample 1352 */ 1353 ssize_t (*get_pm_metrics)(struct smu_context *smu, void *pm_metrics, 1354 size_t size); 1355 1356 /** 1357 * @enable_mgpu_fan_boost: Enable multi-GPU fan boost. 1358 */ 1359 int (*enable_mgpu_fan_boost)(struct smu_context *smu); 1360 1361 /** 1362 * @gfx_ulv_control: Enable/disable ultra low voltage. 1363 */ 1364 int (*gfx_ulv_control)(struct smu_context *smu, bool enablement); 1365 1366 /** 1367 * @deep_sleep_control: Enable/disable deep sleep. 1368 */ 1369 int (*deep_sleep_control)(struct smu_context *smu, bool enablement); 1370 1371 /** 1372 * @get_fan_parameters: Get fan parameters. 1373 * 1374 * Get maximum fan speed from the power play table. 1375 */ 1376 int (*get_fan_parameters)(struct smu_context *smu); 1377 1378 /** 1379 * @post_init: Helper function for asic specific workarounds. 1380 */ 1381 int (*post_init)(struct smu_context *smu); 1382 1383 /** 1384 * @interrupt_work: Work task scheduled from SMU interrupt handler. 1385 */ 1386 void (*interrupt_work)(struct smu_context *smu); 1387 1388 /** 1389 * @gpo_control: Enable/disable graphics power optimization if supported. 1390 */ 1391 int (*gpo_control)(struct smu_context *smu, bool enablement); 1392 1393 /** 1394 * @gfx_state_change_set: Send the current graphics state to the SMU. 1395 */ 1396 int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state); 1397 1398 /** 1399 * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock 1400 * parameters to defaults. 1401 */ 1402 int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu); 1403 1404 /** 1405 * @smu_handle_passthrough_sbr: Send message to SMU about special handling for SBR. 1406 */ 1407 int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable); 1408 1409 /** 1410 * @wait_for_event: Wait for events from SMU. 1411 */ 1412 int (*wait_for_event)(struct smu_context *smu, 1413 enum smu_event_type event, uint64_t event_arg); 1414 1415 /** 1416 * @sned_hbm_bad_pages_num: message SMU to update bad page number 1417 * of SMUBUS table. 1418 */ 1419 int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size); 1420 1421 /** 1422 * @send_rma_reason: message rma reason event to SMU. 1423 */ 1424 int (*send_rma_reason)(struct smu_context *smu); 1425 1426 /** 1427 * @reset_sdma: message SMU to soft reset sdma instance. 1428 */ 1429 int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask); 1430 /** 1431 * @reset_sdma_is_supported: Check if support resets the SDMA engine. 1432 */ 1433 bool (*reset_sdma_is_supported)(struct smu_context *smu); 1434 1435 /** 1436 * @reset_vcn: message SMU to soft reset vcn instance. 1437 */ 1438 int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask); 1439 /** 1440 * @reset_vcn_is_supported: Check if support resets vcn. 1441 */ 1442 bool (*reset_vcn_is_supported)(struct smu_context *smu); 1443 1444 /** 1445 * @get_ecc_table: message SMU to get ECC INFO table. 1446 */ 1447 ssize_t (*get_ecc_info)(struct smu_context *smu, void *table); 1448 1449 1450 /** 1451 * @stb_collect_info: Collects Smart Trace Buffers data. 1452 */ 1453 int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size); 1454 1455 /** 1456 * @get_default_config_table_settings: Get the ASIC default DriverSmuConfig table settings. 1457 */ 1458 int (*get_default_config_table_settings)(struct smu_context *smu, struct config_table_setting *table); 1459 1460 /** 1461 * @set_config_table: Apply the input DriverSmuConfig table settings. 1462 */ 1463 int (*set_config_table)(struct smu_context *smu, struct config_table_setting *table); 1464 1465 /** 1466 * @sned_hbm_bad_channel_flag: message SMU to update bad channel info 1467 * of SMUBUS table. 1468 */ 1469 int (*send_hbm_bad_channel_flag)(struct smu_context *smu, uint32_t size); 1470 1471 /** 1472 * @init_pptable_microcode: Prepare the pptable microcode to upload via PSP 1473 */ 1474 int (*init_pptable_microcode)(struct smu_context *smu); 1475 1476 /** 1477 * @dpm_set_vpe_enable: Enable/disable VPE engine dynamic power 1478 * management. 1479 */ 1480 int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable); 1481 1482 /** 1483 * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power 1484 * management. 1485 */ 1486 int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable); 1487 1488 /** 1489 * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power 1490 * management. 1491 */ 1492 int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable); 1493 1494 /** 1495 * @set_mall_enable: Init MALL power gating control. 1496 */ 1497 int (*set_mall_enable)(struct smu_context *smu); 1498 1499 /** 1500 * @notify_rlc_state: Notify RLC power state to SMU. 1501 */ 1502 int (*notify_rlc_state)(struct smu_context *smu, bool en); 1503 1504 /** 1505 * @is_asic_wbrf_supported: check whether PMFW supports the wbrf feature 1506 */ 1507 bool (*is_asic_wbrf_supported)(struct smu_context *smu); 1508 1509 /** 1510 * @enable_uclk_shadow: Enable the uclk shadow feature on wbrf supported 1511 */ 1512 int (*enable_uclk_shadow)(struct smu_context *smu, bool enable); 1513 1514 /** 1515 * @set_wbrf_exclusion_ranges: notify SMU the wifi bands occupied 1516 */ 1517 int (*set_wbrf_exclusion_ranges)(struct smu_context *smu, 1518 struct freq_band_range *exclusion_ranges); 1519 /** 1520 * @get_xcp_metrics: Get a copy of the partition metrics table from SMU. 1521 * Return: Size of table 1522 */ 1523 ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id, 1524 void *table); 1525 }; 1526 1527 typedef enum { 1528 METRICS_CURR_GFXCLK, 1529 METRICS_CURR_SOCCLK, 1530 METRICS_CURR_UCLK, 1531 METRICS_CURR_VCLK, 1532 METRICS_CURR_VCLK1, 1533 METRICS_CURR_DCLK, 1534 METRICS_CURR_DCLK1, 1535 METRICS_CURR_FCLK, 1536 METRICS_CURR_DCEFCLK, 1537 METRICS_AVERAGE_CPUCLK, 1538 METRICS_AVERAGE_GFXCLK, 1539 METRICS_AVERAGE_SOCCLK, 1540 METRICS_AVERAGE_FCLK, 1541 METRICS_AVERAGE_UCLK, 1542 METRICS_AVERAGE_VCLK, 1543 METRICS_AVERAGE_DCLK, 1544 METRICS_AVERAGE_VCLK1, 1545 METRICS_AVERAGE_DCLK1, 1546 METRICS_AVERAGE_GFXACTIVITY, 1547 METRICS_AVERAGE_MEMACTIVITY, 1548 METRICS_AVERAGE_VCNACTIVITY, 1549 METRICS_AVERAGE_SOCKETPOWER, 1550 METRICS_TEMPERATURE_EDGE, 1551 METRICS_TEMPERATURE_HOTSPOT, 1552 METRICS_TEMPERATURE_MEM, 1553 METRICS_TEMPERATURE_VRGFX, 1554 METRICS_TEMPERATURE_VRSOC, 1555 METRICS_TEMPERATURE_VRMEM, 1556 METRICS_THROTTLER_STATUS, 1557 METRICS_CURR_FANSPEED, 1558 METRICS_VOLTAGE_VDDSOC, 1559 METRICS_VOLTAGE_VDDGFX, 1560 METRICS_SS_APU_SHARE, 1561 METRICS_SS_DGPU_SHARE, 1562 METRICS_UNIQUE_ID_UPPER32, 1563 METRICS_UNIQUE_ID_LOWER32, 1564 METRICS_PCIE_RATE, 1565 METRICS_PCIE_WIDTH, 1566 METRICS_CURR_FANPWM, 1567 METRICS_CURR_SOCKETPOWER, 1568 METRICS_AVERAGE_VPECLK, 1569 METRICS_AVERAGE_IPUCLK, 1570 METRICS_AVERAGE_MPIPUCLK, 1571 METRICS_THROTTLER_RESIDENCY_PROCHOT, 1572 METRICS_THROTTLER_RESIDENCY_SPL, 1573 METRICS_THROTTLER_RESIDENCY_FPPT, 1574 METRICS_THROTTLER_RESIDENCY_SPPT, 1575 METRICS_THROTTLER_RESIDENCY_THM_CORE, 1576 METRICS_THROTTLER_RESIDENCY_THM_GFX, 1577 METRICS_THROTTLER_RESIDENCY_THM_SOC, 1578 } MetricsMember_t; 1579 1580 enum smu_cmn2asic_mapping_type { 1581 CMN2ASIC_MAPPING_MSG, 1582 CMN2ASIC_MAPPING_CLK, 1583 CMN2ASIC_MAPPING_FEATURE, 1584 CMN2ASIC_MAPPING_TABLE, 1585 CMN2ASIC_MAPPING_PWR, 1586 CMN2ASIC_MAPPING_WORKLOAD, 1587 }; 1588 1589 enum smu_baco_seq { 1590 BACO_SEQ_BACO = 0, 1591 BACO_SEQ_MSR, 1592 BACO_SEQ_BAMACO, 1593 BACO_SEQ_ULPS, 1594 BACO_SEQ_COUNT, 1595 }; 1596 1597 #define MSG_MAP(msg, index, flags) \ 1598 [SMU_MSG_##msg] = {1, (index), (flags)} 1599 1600 #define CLK_MAP(clk, index) \ 1601 [SMU_##clk] = {1, (index)} 1602 1603 #define FEA_MAP(fea) \ 1604 [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT} 1605 1606 #define FEA_MAP_REVERSE(fea) \ 1607 [SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT} 1608 1609 #define FEA_MAP_HALF_REVERSE(fea) \ 1610 [SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT} 1611 1612 #define TAB_MAP(tab) \ 1613 [SMU_TABLE_##tab] = {1, TABLE_##tab} 1614 1615 #define TAB_MAP_VALID(tab) \ 1616 [SMU_TABLE_##tab] = {1, TABLE_##tab} 1617 1618 #define TAB_MAP_INVALID(tab) \ 1619 [SMU_TABLE_##tab] = {0, TABLE_##tab} 1620 1621 #define PWR_MAP(tab) \ 1622 [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab} 1623 1624 #define WORKLOAD_MAP(profile, workload) \ 1625 [profile] = {1, (workload)} 1626 1627 /** 1628 * smu_memcpy_trailing - Copy the end of one structure into the middle of another 1629 * 1630 * @dst: Pointer to destination struct 1631 * @first_dst_member: The member name in @dst where the overwrite begins 1632 * @last_dst_member: The member name in @dst where the overwrite ends after 1633 * @src: Pointer to the source struct 1634 * @first_src_member: The member name in @src where the copy begins 1635 * 1636 */ 1637 #define smu_memcpy_trailing(dst, first_dst_member, last_dst_member, \ 1638 src, first_src_member) \ 1639 ({ \ 1640 size_t __src_offset = offsetof(typeof(*(src)), first_src_member); \ 1641 size_t __src_size = sizeof(*(src)) - __src_offset; \ 1642 size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member); \ 1643 size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \ 1644 __dst_offset; \ 1645 BUILD_BUG_ON(__src_size != __dst_size); \ 1646 __builtin_memcpy((u8 *)(dst) + __dst_offset, \ 1647 (u8 *)(src) + __src_offset, \ 1648 __dst_size); \ 1649 }) 1650 1651 typedef struct { 1652 uint16_t LowFreq; 1653 uint16_t HighFreq; 1654 } WifiOneBand_t; 1655 1656 typedef struct { 1657 uint32_t WifiBandEntryNum; 1658 WifiOneBand_t WifiBandEntry[11]; 1659 uint32_t MmHubPadding[8]; 1660 } WifiBandEntryTable_t; 1661 1662 #define STR_SOC_PSTATE_POLICY "soc_pstate" 1663 #define STR_XGMI_PLPD_POLICY "xgmi_plpd" 1664 1665 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu, 1666 enum pp_pm_policy p_type); 1667 1668 static inline enum smu_table_id 1669 smu_metrics_get_temp_table_id(enum smu_temp_metric_type type) 1670 { 1671 switch (type) { 1672 case SMU_TEMP_METRIC_BASEBOARD: 1673 return SMU_TABLE_BASEBOARD_TEMP_METRICS; 1674 case SMU_TEMP_METRIC_GPUBOARD: 1675 return SMU_TABLE_GPUBOARD_TEMP_METRICS; 1676 default: 1677 return SMU_TABLE_COUNT; 1678 } 1679 1680 return SMU_TABLE_COUNT; 1681 } 1682 1683 static inline void smu_table_cache_update_time(struct smu_table *table, 1684 unsigned long time) 1685 { 1686 table->cache.last_cache_time = time; 1687 } 1688 1689 static inline bool smu_table_cache_is_valid(struct smu_table *table) 1690 { 1691 if (!table->cache.buffer || !table->cache.last_cache_time || 1692 !table->cache.interval || !table->cache.size || 1693 time_after(jiffies, 1694 table->cache.last_cache_time + 1695 msecs_to_jiffies(table->cache.interval))) 1696 return false; 1697 1698 return true; 1699 } 1700 1701 static inline int smu_table_cache_init(struct smu_context *smu, 1702 enum smu_table_id table_id, size_t size, 1703 uint32_t cache_interval) 1704 { 1705 struct smu_table_context *smu_table = &smu->smu_table; 1706 struct smu_table *tables = smu_table->tables; 1707 1708 tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL); 1709 if (!tables[table_id].cache.buffer) 1710 return -ENOMEM; 1711 1712 tables[table_id].cache.last_cache_time = 0; 1713 tables[table_id].cache.interval = cache_interval; 1714 tables[table_id].cache.size = size; 1715 1716 return 0; 1717 } 1718 1719 static inline void smu_table_cache_fini(struct smu_context *smu, 1720 enum smu_table_id table_id) 1721 { 1722 struct smu_table_context *smu_table = &smu->smu_table; 1723 struct smu_table *tables = smu_table->tables; 1724 1725 if (tables[table_id].cache.buffer) { 1726 kfree(tables[table_id].cache.buffer); 1727 tables[table_id].cache.buffer = NULL; 1728 tables[table_id].cache.last_cache_time = 0; 1729 tables[table_id].cache.interval = 0; 1730 } 1731 } 1732 1733 #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) 1734 int smu_get_power_limit(void *handle, 1735 uint32_t *limit, 1736 enum pp_power_limit_level pp_limit_level, 1737 enum pp_power_type pp_power_type); 1738 1739 bool smu_mode1_reset_is_support(struct smu_context *smu); 1740 bool smu_link_reset_is_support(struct smu_context *smu); 1741 int smu_mode1_reset(struct smu_context *smu); 1742 int smu_link_reset(struct smu_context *smu); 1743 1744 extern const struct amd_ip_funcs smu_ip_funcs; 1745 1746 bool is_support_sw_smu(struct amdgpu_device *adev); 1747 bool is_support_cclk_dpm(struct amdgpu_device *adev); 1748 int smu_write_watermarks_table(struct smu_context *smu); 1749 1750 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, 1751 uint32_t *min, uint32_t *max); 1752 1753 int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type, 1754 uint32_t min, uint32_t max); 1755 1756 int smu_set_gfx_power_up_by_imu(struct smu_context *smu); 1757 1758 int smu_set_ac_dc(struct smu_context *smu); 1759 1760 int smu_set_xgmi_plpd_mode(struct smu_context *smu, 1761 enum pp_xgmi_plpd_mode mode); 1762 1763 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value); 1764 1765 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value); 1766 1767 int smu_set_residency_gfxoff(struct smu_context *smu, bool value); 1768 1769 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value); 1770 1771 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable); 1772 1773 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, 1774 uint64_t event_arg); 1775 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc); 1776 int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size); 1777 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); 1778 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size); 1779 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size); 1780 int smu_send_rma_reason(struct smu_context *smu); 1781 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask); 1782 bool smu_reset_sdma_is_supported(struct smu_context *smu); 1783 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask); 1784 bool smu_reset_vcn_is_supported(struct smu_context *smu); 1785 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, 1786 int level); 1787 ssize_t smu_get_pm_policy_info(struct smu_context *smu, 1788 enum pp_pm_policy p_type, char *sysbuf); 1789 1790 #endif 1791 #endif 1792