1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #ifndef __AMDGPU_SMU_H__ 23 #define __AMDGPU_SMU_H__ 24 25 #include <linux/acpi_amd_wbrf.h> 26 #include <linux/units.h> 27 28 #include "amdgpu.h" 29 #include "kgd_pp_interface.h" 30 #include "dm_pp_interface.h" 31 #include "dm_pp_smu.h" 32 #include "smu_types.h" 33 #include "linux/firmware.h" 34 35 #define SMU_THERMAL_MINIMUM_ALERT_TEMP 0 36 #define SMU_THERMAL_MAXIMUM_ALERT_TEMP 255 37 #define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES 1000 38 #define SMU_FW_NAME_LEN 0x24 39 40 #define SMU_DPM_USER_PROFILE_RESTORE (1 << 0) 41 #define SMU_CUSTOM_FAN_SPEED_RPM (1 << 1) 42 #define SMU_CUSTOM_FAN_SPEED_PWM (1 << 2) 43 44 // Power Throttlers 45 #define SMU_THROTTLER_PPT0_BIT 0 46 #define SMU_THROTTLER_PPT1_BIT 1 47 #define SMU_THROTTLER_PPT2_BIT 2 48 #define SMU_THROTTLER_PPT3_BIT 3 49 #define SMU_THROTTLER_SPL_BIT 4 50 #define SMU_THROTTLER_FPPT_BIT 5 51 #define SMU_THROTTLER_SPPT_BIT 6 52 #define SMU_THROTTLER_SPPT_APU_BIT 7 53 54 // Current Throttlers 55 #define SMU_THROTTLER_TDC_GFX_BIT 16 56 #define SMU_THROTTLER_TDC_SOC_BIT 17 57 #define SMU_THROTTLER_TDC_MEM_BIT 18 58 #define SMU_THROTTLER_TDC_VDD_BIT 19 59 #define SMU_THROTTLER_TDC_CVIP_BIT 20 60 #define SMU_THROTTLER_EDC_CPU_BIT 21 61 #define SMU_THROTTLER_EDC_GFX_BIT 22 62 #define SMU_THROTTLER_APCC_BIT 23 63 64 // Temperature 65 #define SMU_THROTTLER_TEMP_GPU_BIT 32 66 #define SMU_THROTTLER_TEMP_CORE_BIT 33 67 #define SMU_THROTTLER_TEMP_MEM_BIT 34 68 #define SMU_THROTTLER_TEMP_EDGE_BIT 35 69 #define SMU_THROTTLER_TEMP_HOTSPOT_BIT 36 70 #define SMU_THROTTLER_TEMP_SOC_BIT 37 71 #define SMU_THROTTLER_TEMP_VR_GFX_BIT 38 72 #define SMU_THROTTLER_TEMP_VR_SOC_BIT 39 73 #define SMU_THROTTLER_TEMP_VR_MEM0_BIT 40 74 #define SMU_THROTTLER_TEMP_VR_MEM1_BIT 41 75 #define SMU_THROTTLER_TEMP_LIQUID0_BIT 42 76 #define SMU_THROTTLER_TEMP_LIQUID1_BIT 43 77 #define SMU_THROTTLER_VRHOT0_BIT 44 78 #define SMU_THROTTLER_VRHOT1_BIT 45 79 #define SMU_THROTTLER_PROCHOT_CPU_BIT 46 80 #define SMU_THROTTLER_PROCHOT_GFX_BIT 47 81 82 // Other 83 #define SMU_THROTTLER_PPM_BIT 56 84 #define SMU_THROTTLER_FIT_BIT 57 85 86 struct smu_hw_power_state { 87 unsigned int magic; 88 }; 89 90 struct smu_power_state; 91 92 enum smu_state_ui_label { 93 SMU_STATE_UI_LABEL_NONE, 94 SMU_STATE_UI_LABEL_BATTERY, 95 SMU_STATE_UI_TABEL_MIDDLE_LOW, 96 SMU_STATE_UI_LABEL_BALLANCED, 97 SMU_STATE_UI_LABEL_MIDDLE_HIGHT, 98 SMU_STATE_UI_LABEL_PERFORMANCE, 99 SMU_STATE_UI_LABEL_BACO, 100 }; 101 102 enum smu_state_classification_flag { 103 SMU_STATE_CLASSIFICATION_FLAG_BOOT = 0x0001, 104 SMU_STATE_CLASSIFICATION_FLAG_THERMAL = 0x0002, 105 SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE = 0x0004, 106 SMU_STATE_CLASSIFICATION_FLAG_RESET = 0x0008, 107 SMU_STATE_CLASSIFICATION_FLAG_FORCED = 0x0010, 108 SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE = 0x0020, 109 SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE = 0x0040, 110 SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE = 0x0080, 111 SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE = 0x0100, 112 SMU_STATE_CLASSIFICATION_FLAG_UVD = 0x0200, 113 SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW = 0x0400, 114 SMU_STATE_CLASSIFICATION_FLAG_ACPI = 0x0800, 115 SMU_STATE_CLASSIFICATION_FLAG_HD2 = 0x1000, 116 SMU_STATE_CLASSIFICATION_FLAG_UVD_HD = 0x2000, 117 SMU_STATE_CLASSIFICATION_FLAG_UVD_SD = 0x4000, 118 SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE = 0x8000, 119 SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE = 0x10000, 120 SMU_STATE_CLASSIFICATION_FLAG_BACO = 0x20000, 121 SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2 = 0x40000, 122 SMU_STATE_CLASSIFICATION_FLAG_ULV = 0x80000, 123 SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC = 0x100000, 124 }; 125 126 struct smu_state_classification_block { 127 enum smu_state_ui_label ui_label; 128 enum smu_state_classification_flag flags; 129 int bios_index; 130 bool temporary_state; 131 bool to_be_deleted; 132 }; 133 134 struct smu_state_pcie_block { 135 unsigned int lanes; 136 }; 137 138 enum smu_refreshrate_source { 139 SMU_REFRESHRATE_SOURCE_EDID, 140 SMU_REFRESHRATE_SOURCE_EXPLICIT 141 }; 142 143 struct smu_state_display_block { 144 bool disable_frame_modulation; 145 bool limit_refreshrate; 146 enum smu_refreshrate_source refreshrate_source; 147 int explicit_refreshrate; 148 int edid_refreshrate_index; 149 bool enable_vari_bright; 150 }; 151 152 struct smu_state_memory_block { 153 bool dll_off; 154 uint8_t m3arb; 155 uint8_t unused[3]; 156 }; 157 158 struct smu_state_software_algorithm_block { 159 bool disable_load_balancing; 160 bool enable_sleep_for_timestamps; 161 }; 162 163 struct smu_temperature_range { 164 int min; 165 int max; 166 int edge_emergency_max; 167 int hotspot_min; 168 int hotspot_crit_max; 169 int hotspot_emergency_max; 170 int mem_min; 171 int mem_crit_max; 172 int mem_emergency_max; 173 int software_shutdown_temp; 174 int software_shutdown_temp_offset; 175 }; 176 177 struct smu_state_validation_block { 178 bool single_display_only; 179 bool disallow_on_dc; 180 uint8_t supported_power_levels; 181 }; 182 183 struct smu_uvd_clocks { 184 uint32_t vclk; 185 uint32_t dclk; 186 }; 187 188 /** 189 * Structure to hold a SMU Power State. 190 */ 191 struct smu_power_state { 192 uint32_t id; 193 struct list_head ordered_list; 194 struct list_head all_states_list; 195 196 struct smu_state_classification_block classification; 197 struct smu_state_validation_block validation; 198 struct smu_state_pcie_block pcie; 199 struct smu_state_display_block display; 200 struct smu_state_memory_block memory; 201 struct smu_state_software_algorithm_block software; 202 struct smu_uvd_clocks uvd_clocks; 203 struct smu_hw_power_state hardware; 204 }; 205 206 enum smu_power_src_type { 207 SMU_POWER_SOURCE_AC, 208 SMU_POWER_SOURCE_DC, 209 SMU_POWER_SOURCE_COUNT, 210 }; 211 212 enum smu_ppt_limit_type { 213 SMU_DEFAULT_PPT_LIMIT = 0, 214 SMU_FAST_PPT_LIMIT, 215 }; 216 217 enum smu_ppt_limit_level { 218 SMU_PPT_LIMIT_MIN = -1, 219 SMU_PPT_LIMIT_CURRENT, 220 SMU_PPT_LIMIT_DEFAULT, 221 SMU_PPT_LIMIT_MAX, 222 }; 223 224 enum smu_memory_pool_size { 225 SMU_MEMORY_POOL_SIZE_ZERO = 0, 226 SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000, 227 SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000, 228 SMU_MEMORY_POOL_SIZE_1_GB = 0x40000000, 229 SMU_MEMORY_POOL_SIZE_2_GB = 0x80000000, 230 }; 231 232 struct smu_user_dpm_profile { 233 uint32_t fan_mode; 234 uint32_t power_limit; 235 uint32_t fan_speed_pwm; 236 uint32_t fan_speed_rpm; 237 uint32_t flags; 238 uint32_t user_od; 239 240 /* user clock state information */ 241 uint32_t clk_mask[SMU_CLK_COUNT]; 242 uint32_t clk_dependency; 243 }; 244 245 #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ 246 do { \ 247 tables[table_id].size = s; \ 248 tables[table_id].align = a; \ 249 tables[table_id].domain = d; \ 250 } while (0) 251 252 struct smu_table_cache { 253 void *buffer; 254 size_t size; 255 /* interval in ms*/ 256 uint32_t interval; 257 unsigned long last_cache_time; 258 }; 259 260 struct smu_table { 261 uint64_t size; 262 uint32_t align; 263 uint8_t domain; 264 uint64_t mc_address; 265 void *cpu_addr; 266 struct amdgpu_bo *bo; 267 uint32_t version; 268 struct smu_table_cache cache; 269 }; 270 271 enum smu_perf_level_designation { 272 PERF_LEVEL_ACTIVITY, 273 PERF_LEVEL_POWER_CONTAINMENT, 274 }; 275 276 struct smu_performance_level { 277 uint32_t core_clock; 278 uint32_t memory_clock; 279 uint32_t vddc; 280 uint32_t vddci; 281 uint32_t non_local_mem_freq; 282 uint32_t non_local_mem_width; 283 }; 284 285 struct smu_clock_info { 286 uint32_t min_mem_clk; 287 uint32_t max_mem_clk; 288 uint32_t min_eng_clk; 289 uint32_t max_eng_clk; 290 uint32_t min_bus_bandwidth; 291 uint32_t max_bus_bandwidth; 292 }; 293 294 struct smu_bios_boot_up_values { 295 uint32_t revision; 296 uint32_t gfxclk; 297 uint32_t uclk; 298 uint32_t socclk; 299 uint32_t dcefclk; 300 uint32_t eclk; 301 uint32_t vclk; 302 uint32_t dclk; 303 uint16_t vddc; 304 uint16_t vddci; 305 uint16_t mvddc; 306 uint16_t vdd_gfx; 307 uint8_t cooling_id; 308 uint32_t pp_table_id; 309 uint32_t format_revision; 310 uint32_t content_revision; 311 uint32_t fclk; 312 uint32_t lclk; 313 uint32_t firmware_caps; 314 }; 315 316 enum smu_table_id { 317 SMU_TABLE_PPTABLE = 0, 318 SMU_TABLE_WATERMARKS, 319 SMU_TABLE_CUSTOM_DPM, 320 SMU_TABLE_DPMCLOCKS, 321 SMU_TABLE_AVFS, 322 SMU_TABLE_AVFS_PSM_DEBUG, 323 SMU_TABLE_AVFS_FUSE_OVERRIDE, 324 SMU_TABLE_PMSTATUSLOG, 325 SMU_TABLE_SMU_METRICS, 326 SMU_TABLE_DRIVER_SMU_CONFIG, 327 SMU_TABLE_ACTIVITY_MONITOR_COEFF, 328 SMU_TABLE_OVERDRIVE, 329 SMU_TABLE_I2C_COMMANDS, 330 SMU_TABLE_PACE, 331 SMU_TABLE_ECCINFO, 332 SMU_TABLE_COMBO_PPTABLE, 333 SMU_TABLE_WIFIBAND, 334 SMU_TABLE_GPUBOARD_TEMP_METRICS, 335 SMU_TABLE_BASEBOARD_TEMP_METRICS, 336 SMU_TABLE_COUNT, 337 }; 338 339 struct smu_table_context { 340 void *power_play_table; 341 uint32_t power_play_table_size; 342 void *hardcode_pptable; 343 unsigned long metrics_time; 344 void *metrics_table; 345 void *clocks_table; 346 void *watermarks_table; 347 348 void *max_sustainable_clocks; 349 struct smu_bios_boot_up_values boot_values; 350 void *driver_pptable; 351 void *combo_pptable; 352 void *ecc_table; 353 void *driver_smu_config_table; 354 struct smu_table tables[SMU_TABLE_COUNT]; 355 /* 356 * The driver table is just a staging buffer for 357 * uploading/downloading content from the SMU. 358 * 359 * And the table_id for SMU_MSG_TransferTableSmu2Dram/ 360 * SMU_MSG_TransferTableDram2Smu instructs SMU 361 * which content driver is interested. 362 */ 363 struct smu_table driver_table; 364 struct smu_table memory_pool; 365 struct smu_table dummy_read_1_table; 366 uint8_t thermal_controller_type; 367 368 void *overdrive_table; 369 void *boot_overdrive_table; 370 void *user_overdrive_table; 371 372 uint32_t gpu_metrics_table_size; 373 void *gpu_metrics_table; 374 }; 375 376 struct smu_context; 377 struct smu_dpm_policy; 378 379 struct smu_dpm_policy_desc { 380 const char *name; 381 char *(*get_desc)(struct smu_dpm_policy *dpm_policy, int level); 382 }; 383 384 struct smu_dpm_policy { 385 struct smu_dpm_policy_desc *desc; 386 enum pp_pm_policy policy_type; 387 unsigned long level_mask; 388 int current_level; 389 int (*set_policy)(struct smu_context *ctxt, int level); 390 }; 391 392 struct smu_dpm_policy_ctxt { 393 struct smu_dpm_policy policies[PP_PM_POLICY_NUM]; 394 unsigned long policy_mask; 395 }; 396 397 struct smu_dpm_context { 398 uint32_t dpm_context_size; 399 void *dpm_context; 400 void *golden_dpm_context; 401 enum amd_dpm_forced_level dpm_level; 402 enum amd_dpm_forced_level saved_dpm_level; 403 enum amd_dpm_forced_level requested_dpm_level; 404 struct smu_power_state *dpm_request_power_state; 405 struct smu_power_state *dpm_current_power_state; 406 struct mclock_latency_table *mclk_latency_table; 407 struct smu_dpm_policy_ctxt *dpm_policies; 408 }; 409 410 struct smu_temp_context { 411 const struct smu_temp_funcs *temp_funcs; 412 }; 413 414 struct smu_power_gate { 415 bool uvd_gated; 416 bool vce_gated; 417 atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES]; 418 atomic_t jpeg_gated; 419 atomic_t vpe_gated; 420 atomic_t isp_gated; 421 atomic_t umsch_mm_gated; 422 }; 423 424 struct smu_power_context { 425 void *power_context; 426 uint32_t power_context_size; 427 struct smu_power_gate power_gate; 428 }; 429 430 #define SMU_FEATURE_MAX (64) 431 struct smu_feature { 432 uint32_t feature_num; 433 DECLARE_BITMAP(supported, SMU_FEATURE_MAX); 434 DECLARE_BITMAP(allowed, SMU_FEATURE_MAX); 435 }; 436 437 struct smu_clocks { 438 uint32_t engine_clock; 439 uint32_t memory_clock; 440 uint32_t bus_bandwidth; 441 uint32_t engine_clock_in_sr; 442 uint32_t dcef_clock; 443 uint32_t dcef_clock_in_sr; 444 }; 445 446 #define MAX_REGULAR_DPM_NUM 16 447 struct mclk_latency_entries { 448 uint32_t frequency; 449 uint32_t latency; 450 }; 451 struct mclock_latency_table { 452 uint32_t count; 453 struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM]; 454 }; 455 456 enum smu_reset_mode { 457 SMU_RESET_MODE_0, 458 SMU_RESET_MODE_1, 459 SMU_RESET_MODE_2, 460 SMU_RESET_MODE_3, 461 SMU_RESET_MODE_4, 462 }; 463 464 enum smu_baco_state { 465 SMU_BACO_STATE_ENTER = 0, 466 SMU_BACO_STATE_EXIT, 467 SMU_BACO_STATE_NONE, 468 }; 469 470 struct smu_baco_context { 471 uint32_t state; 472 bool platform_support; 473 bool maco_support; 474 }; 475 476 struct smu_freq_info { 477 uint32_t min; 478 uint32_t max; 479 uint32_t freq_level; 480 }; 481 482 struct pstates_clk_freq { 483 uint32_t min; 484 uint32_t standard; 485 uint32_t peak; 486 struct smu_freq_info custom; 487 struct smu_freq_info curr; 488 }; 489 490 struct smu_umd_pstate_table { 491 struct pstates_clk_freq gfxclk_pstate; 492 struct pstates_clk_freq socclk_pstate; 493 struct pstates_clk_freq uclk_pstate; 494 struct pstates_clk_freq vclk_pstate; 495 struct pstates_clk_freq dclk_pstate; 496 struct pstates_clk_freq fclk_pstate; 497 }; 498 499 struct cmn2asic_msg_mapping { 500 int valid_mapping; 501 int map_to; 502 uint32_t flags; 503 }; 504 505 struct cmn2asic_mapping { 506 int valid_mapping; 507 int map_to; 508 }; 509 510 struct stb_context { 511 uint32_t stb_buf_size; 512 bool enabled; 513 spinlock_t lock; 514 }; 515 516 enum smu_fw_status { 517 SMU_FW_INIT = 0, 518 SMU_FW_RUNTIME, 519 SMU_FW_HANG, 520 }; 521 522 #define WORKLOAD_POLICY_MAX 7 523 524 /* 525 * Configure wbrf event handling pace as there can be only one 526 * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms. 527 */ 528 #define SMU_WBRF_EVENT_HANDLING_PACE 10 529 530 struct smu_context { 531 struct amdgpu_device *adev; 532 struct amdgpu_irq_src irq_source; 533 534 const struct pptable_funcs *ppt_funcs; 535 const struct cmn2asic_msg_mapping *message_map; 536 const struct cmn2asic_mapping *clock_map; 537 const struct cmn2asic_mapping *feature_map; 538 const struct cmn2asic_mapping *table_map; 539 const struct cmn2asic_mapping *pwr_src_map; 540 const struct cmn2asic_mapping *workload_map; 541 struct mutex message_lock; 542 uint64_t pool_size; 543 544 struct smu_table_context smu_table; 545 struct smu_dpm_context smu_dpm; 546 struct smu_power_context smu_power; 547 struct smu_temp_context smu_temp; 548 struct smu_feature smu_feature; 549 struct amd_pp_display_configuration *display_config; 550 struct smu_baco_context smu_baco; 551 struct smu_temperature_range thermal_range; 552 void *od_settings; 553 554 struct smu_umd_pstate_table pstate_table; 555 uint32_t pstate_sclk; 556 uint32_t pstate_mclk; 557 558 bool od_enabled; 559 uint32_t current_power_limit; 560 uint32_t default_power_limit; 561 uint32_t max_power_limit; 562 uint32_t min_power_limit; 563 564 /* soft pptable */ 565 uint32_t ppt_offset_bytes; 566 uint32_t ppt_size_bytes; 567 uint8_t *ppt_start_addr; 568 569 bool support_power_containment; 570 bool disable_watermark; 571 572 #define WATERMARKS_EXIST (1 << 0) 573 #define WATERMARKS_LOADED (1 << 1) 574 uint32_t watermarks_bitmap; 575 uint32_t hard_min_uclk_req_from_dal; 576 bool disable_uclk_switch; 577 578 /* asic agnostic workload mask */ 579 uint32_t workload_mask; 580 bool pause_workload; 581 /* default/user workload preference */ 582 uint32_t power_profile_mode; 583 uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT]; 584 /* backend specific custom workload settings */ 585 long *custom_profile_params; 586 bool pm_enabled; 587 bool is_apu; 588 589 uint32_t smc_driver_if_version; 590 uint32_t smc_fw_if_version; 591 uint32_t smc_fw_version; 592 uint32_t smc_fw_caps; 593 uint8_t smc_fw_state; 594 595 bool uploading_custom_pp_table; 596 bool dc_controlled_by_gpio; 597 598 struct work_struct throttling_logging_work; 599 atomic64_t throttle_int_counter; 600 struct work_struct interrupt_work; 601 602 unsigned fan_max_rpm; 603 unsigned manual_fan_speed_pwm; 604 605 uint32_t gfx_default_hard_min_freq; 606 uint32_t gfx_default_soft_max_freq; 607 uint32_t gfx_actual_hard_min_freq; 608 uint32_t gfx_actual_soft_max_freq; 609 610 /* APU only */ 611 uint32_t cpu_default_soft_min_freq; 612 uint32_t cpu_default_soft_max_freq; 613 uint32_t cpu_actual_soft_min_freq; 614 uint32_t cpu_actual_soft_max_freq; 615 uint32_t cpu_core_id_select; 616 uint16_t cpu_core_num; 617 618 struct smu_user_dpm_profile user_dpm_profile; 619 620 struct stb_context stb_context; 621 622 struct firmware pptable_firmware; 623 624 u32 param_reg; 625 u32 msg_reg; 626 u32 resp_reg; 627 628 u32 debug_param_reg; 629 u32 debug_msg_reg; 630 u32 debug_resp_reg; 631 632 struct delayed_work swctf_delayed_work; 633 634 /* data structures for wbrf feature support */ 635 bool wbrf_supported; 636 struct notifier_block wbrf_notifier; 637 struct delayed_work wbrf_delayed_work; 638 }; 639 640 struct i2c_adapter; 641 642 /** 643 * struct smu_temp_funcs - Callbacks used to get temperature data. 644 */ 645 struct smu_temp_funcs { 646 /** 647 * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's 648 * power delivery and voltage margins. Required for adaptive 649 * @type Temperature metrics type(baseboard/gpuboard) 650 * Return: Size of &table 651 */ 652 ssize_t (*get_temp_metrics)(struct smu_context *smu, 653 enum smu_temp_metric_type type, void *table); 654 655 /** 656 * @temp_metrics_is_support: Get if specific temperature metrics is supported 657 * @type Temperature metrics type(baseboard/gpuboard) 658 * Return: true if supported else false 659 */ 660 bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type); 661 662 }; 663 664 /** 665 * struct pptable_funcs - Callbacks used to interact with the SMU. 666 */ 667 struct pptable_funcs { 668 /** 669 * @run_btc: Calibrate voltage/frequency curve to fit the system's 670 * power delivery and voltage margins. Required for adaptive 671 * voltage frequency scaling (AVFS). 672 */ 673 int (*run_btc)(struct smu_context *smu); 674 675 /** 676 * @get_allowed_feature_mask: Get allowed feature mask. 677 * &feature_mask: Array to store feature mask. 678 * &num: Elements in &feature_mask. 679 */ 680 int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); 681 682 /** 683 * @get_current_power_state: Get the current power state. 684 * 685 * Return: Current power state on success, negative errno on failure. 686 */ 687 enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu); 688 689 /** 690 * @set_default_dpm_table: Retrieve the default overdrive settings from 691 * the SMU. 692 */ 693 int (*set_default_dpm_table)(struct smu_context *smu); 694 695 int (*set_power_state)(struct smu_context *smu); 696 697 /** 698 * @populate_umd_state_clk: Populate the UMD power state table with 699 * defaults. 700 */ 701 int (*populate_umd_state_clk)(struct smu_context *smu); 702 703 /** 704 * @print_clk_levels: Print DPM clock levels for a clock domain 705 * to buffer. Star current level. 706 * 707 * Used for sysfs interfaces. 708 * Return: Number of characters written to the buffer 709 */ 710 int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf); 711 712 /** 713 * @emit_clk_levels: Print DPM clock levels for a clock domain 714 * to buffer using sysfs_emit_at. Star current level. 715 * 716 * Used for sysfs interfaces. 717 * &buf: sysfs buffer 718 * &offset: offset within buffer to start printing, which is updated by the 719 * function. 720 * 721 * Return: 0 on Success or Negative to indicate an error occurred. 722 */ 723 int (*emit_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf, int *offset); 724 725 /** 726 * @force_clk_levels: Set a range of allowed DPM levels for a clock 727 * domain. 728 * &clk_type: Clock domain. 729 * &mask: Range of allowed DPM levels. 730 */ 731 int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask); 732 733 /** 734 * @od_edit_dpm_table: Edit the custom overdrive DPM table. 735 * &type: Type of edit. 736 * &input: Edit parameters. 737 * &size: Size of &input. 738 */ 739 int (*od_edit_dpm_table)(struct smu_context *smu, 740 enum PP_OD_DPM_TABLE_COMMAND type, 741 long *input, uint32_t size); 742 743 /** 744 * @restore_user_od_settings: Restore the user customized 745 * OD settings on S3/S4/Runpm resume. 746 */ 747 int (*restore_user_od_settings)(struct smu_context *smu); 748 749 /** 750 * @get_clock_by_type_with_latency: Get the speed and latency of a clock 751 * domain. 752 */ 753 int (*get_clock_by_type_with_latency)(struct smu_context *smu, 754 enum smu_clk_type clk_type, 755 struct 756 pp_clock_levels_with_latency 757 *clocks); 758 /** 759 * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock 760 * domain. 761 */ 762 int (*get_clock_by_type_with_voltage)(struct smu_context *smu, 763 enum amd_pp_clock_type type, 764 struct 765 pp_clock_levels_with_voltage 766 *clocks); 767 768 /** 769 * @get_power_profile_mode: Print all power profile modes to 770 * buffer. Star current mode. 771 */ 772 int (*get_power_profile_mode)(struct smu_context *smu, char *buf); 773 774 /** 775 * @set_power_profile_mode: Set a power profile mode. Also used to 776 * create/set custom power profile modes. 777 * &input: Power profile mode parameters. 778 * &workload_mask: mask of workloads to enable 779 * &custom_params: custom profile parameters 780 * &custom_params_max_idx: max valid idx into custom_params 781 */ 782 int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask, 783 long *custom_params, u32 custom_params_max_idx); 784 785 /** 786 * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power 787 * management. 788 */ 789 int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable, int inst); 790 791 /** 792 * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power 793 * management. 794 */ 795 int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable); 796 797 /** 798 * @set_gfx_power_up_by_imu: Enable GFX engine with IMU 799 */ 800 int (*set_gfx_power_up_by_imu)(struct smu_context *smu); 801 802 /** 803 * @read_sensor: Read data from a sensor. 804 * &sensor: Sensor to read data from. 805 * &data: Sensor reading. 806 * &size: Size of &data. 807 */ 808 int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor, 809 void *data, uint32_t *size); 810 811 /** 812 * @get_apu_thermal_limit: get apu core limit from smu 813 * &limit: current limit temperature in millidegrees Celsius 814 */ 815 int (*get_apu_thermal_limit)(struct smu_context *smu, uint32_t *limit); 816 817 /** 818 * @set_apu_thermal_limit: update all controllers with new limit 819 * &limit: limit temperature to be setted, in millidegrees Celsius 820 */ 821 int (*set_apu_thermal_limit)(struct smu_context *smu, uint32_t limit); 822 823 /** 824 * @pre_display_config_changed: Prepare GPU for a display configuration 825 * change. 826 * 827 * Disable display tracking and pin memory clock speed to maximum. Used 828 * in display component synchronization. 829 */ 830 int (*pre_display_config_changed)(struct smu_context *smu); 831 832 /** 833 * @display_config_changed: Notify the SMU of the current display 834 * configuration. 835 * 836 * Allows SMU to properly track blanking periods for memory clock 837 * adjustment. Used in display component synchronization. 838 */ 839 int (*display_config_changed)(struct smu_context *smu); 840 841 int (*apply_clocks_adjust_rules)(struct smu_context *smu); 842 843 /** 844 * @notify_smc_display_config: Applies display requirements to the 845 * current power state. 846 * 847 * Optimize deep sleep DCEFclk and mclk for the current display 848 * configuration. Used in display component synchronization. 849 */ 850 int (*notify_smc_display_config)(struct smu_context *smu); 851 852 /** 853 * @is_dpm_running: Check if DPM is running. 854 * 855 * Return: True if DPM is running, false otherwise. 856 */ 857 bool (*is_dpm_running)(struct smu_context *smu); 858 859 /** 860 * @get_fan_speed_pwm: Get the current fan speed in PWM. 861 */ 862 int (*get_fan_speed_pwm)(struct smu_context *smu, uint32_t *speed); 863 864 /** 865 * @get_fan_speed_rpm: Get the current fan speed in rpm. 866 */ 867 int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed); 868 869 /** 870 * @set_watermarks_table: Configure and upload the watermarks tables to 871 * the SMU. 872 */ 873 int (*set_watermarks_table)(struct smu_context *smu, 874 struct pp_smu_wm_range_sets *clock_ranges); 875 876 /** 877 * @get_thermal_temperature_range: Get safe thermal limits in Celcius. 878 */ 879 int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); 880 881 /** 882 * @get_uclk_dpm_states: Get memory clock DPM levels in kHz. 883 * &clocks_in_khz: Array of DPM levels. 884 * &num_states: Elements in &clocks_in_khz. 885 */ 886 int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); 887 888 /** 889 * @set_default_od_settings: Set the overdrive tables to defaults. 890 */ 891 int (*set_default_od_settings)(struct smu_context *smu); 892 893 /** 894 * @set_performance_level: Set a performance level. 895 */ 896 int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); 897 898 /** 899 * @display_disable_memory_clock_switch: Enable/disable dynamic memory 900 * clock switching. 901 * 902 * Disabling this feature forces memory clock speed to maximum. 903 * Enabling sets the minimum memory clock capable of driving the 904 * current display configuration. 905 */ 906 int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); 907 908 /** 909 * @get_power_limit: Get the device's power limits. 910 */ 911 int (*get_power_limit)(struct smu_context *smu, 912 uint32_t *current_power_limit, 913 uint32_t *default_power_limit, 914 uint32_t *max_power_limit, 915 uint32_t *min_power_limit); 916 917 /** 918 * @get_ppt_limit: Get the device's ppt limits. 919 */ 920 int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit, 921 enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level); 922 923 /** 924 * @set_df_cstate: Set data fabric cstate. 925 */ 926 int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state); 927 928 /** 929 * @update_pcie_parameters: Update and upload the system's PCIe 930 * capabilites to the SMU. 931 * &pcie_gen_cap: Maximum allowed PCIe generation. 932 * &pcie_width_cap: Maximum allowed PCIe width. 933 */ 934 int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap); 935 936 /** 937 * @i2c_init: Initialize i2c. 938 * 939 * The i2c bus is used internally by the SMU voltage regulators and 940 * other devices. The i2c's EEPROM also stores bad page tables on boards 941 * with ECC. 942 */ 943 int (*i2c_init)(struct smu_context *smu); 944 945 /** 946 * @i2c_fini: Tear down i2c. 947 */ 948 void (*i2c_fini)(struct smu_context *smu); 949 950 /** 951 * @get_unique_id: Get the GPU's unique id. Used for asset tracking. 952 */ 953 void (*get_unique_id)(struct smu_context *smu); 954 955 /** 956 * @get_dpm_clock_table: Get a copy of the DPM clock table. 957 * 958 * Used by display component in bandwidth and watermark calculations. 959 */ 960 int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table); 961 962 /** 963 * @init_microcode: Request the SMU's firmware from the kernel. 964 */ 965 int (*init_microcode)(struct smu_context *smu); 966 967 /** 968 * @load_microcode: Load firmware onto the SMU. 969 */ 970 int (*load_microcode)(struct smu_context *smu); 971 972 /** 973 * @fini_microcode: Release the SMU's firmware. 974 */ 975 void (*fini_microcode)(struct smu_context *smu); 976 977 /** 978 * @init_smc_tables: Initialize the SMU tables. 979 */ 980 int (*init_smc_tables)(struct smu_context *smu); 981 982 /** 983 * @fini_smc_tables: Release the SMU tables. 984 */ 985 int (*fini_smc_tables)(struct smu_context *smu); 986 987 /** 988 * @init_power: Initialize the power gate table context. 989 */ 990 int (*init_power)(struct smu_context *smu); 991 992 /** 993 * @fini_power: Release the power gate table context. 994 */ 995 int (*fini_power)(struct smu_context *smu); 996 997 /** 998 * @check_fw_status: Check the SMU's firmware status. 999 * 1000 * Return: Zero if check passes, negative errno on failure. 1001 */ 1002 int (*check_fw_status)(struct smu_context *smu); 1003 1004 /** 1005 * @set_mp1_state: put SMU into a correct state for comming 1006 * resume from runpm or gpu reset. 1007 */ 1008 int (*set_mp1_state)(struct smu_context *smu, 1009 enum pp_mp1_state mp1_state); 1010 1011 /** 1012 * @setup_pptable: Initialize the power play table and populate it with 1013 * default values. 1014 */ 1015 int (*setup_pptable)(struct smu_context *smu); 1016 1017 /** 1018 * @get_vbios_bootup_values: Get default boot values from the VBIOS. 1019 */ 1020 int (*get_vbios_bootup_values)(struct smu_context *smu); 1021 1022 /** 1023 * @check_fw_version: Print driver and SMU interface versions to the 1024 * system log. 1025 * 1026 * Interface mismatch is not a critical failure. 1027 */ 1028 int (*check_fw_version)(struct smu_context *smu); 1029 1030 /** 1031 * @powergate_sdma: Power up/down system direct memory access. 1032 */ 1033 int (*powergate_sdma)(struct smu_context *smu, bool gate); 1034 1035 /** 1036 * @set_gfx_cgpg: Enable/disable graphics engine course grain power 1037 * gating. 1038 */ 1039 int (*set_gfx_cgpg)(struct smu_context *smu, bool enable); 1040 1041 /** 1042 * @write_pptable: Write the power play table to the SMU. 1043 */ 1044 int (*write_pptable)(struct smu_context *smu); 1045 1046 /** 1047 * @set_driver_table_location: Send the location of the driver table to 1048 * the SMU. 1049 */ 1050 int (*set_driver_table_location)(struct smu_context *smu); 1051 1052 /** 1053 * @set_tool_table_location: Send the location of the tool table to the 1054 * SMU. 1055 */ 1056 int (*set_tool_table_location)(struct smu_context *smu); 1057 1058 /** 1059 * @notify_memory_pool_location: Send the location of the memory pool to 1060 * the SMU. 1061 */ 1062 int (*notify_memory_pool_location)(struct smu_context *smu); 1063 1064 /** 1065 * @system_features_control: Enable/disable all SMU features. 1066 */ 1067 int (*system_features_control)(struct smu_context *smu, bool en); 1068 1069 /** 1070 * @send_smc_msg_with_param: Send a message with a parameter to the SMU. 1071 * &msg: Type of message. 1072 * ¶m: Message parameter. 1073 * &read_arg: SMU response (optional). 1074 */ 1075 int (*send_smc_msg_with_param)(struct smu_context *smu, 1076 enum smu_message_type msg, uint32_t param, uint32_t *read_arg); 1077 1078 /** 1079 * @send_smc_msg: Send a message to the SMU. 1080 * &msg: Type of message. 1081 * &read_arg: SMU response (optional). 1082 */ 1083 int (*send_smc_msg)(struct smu_context *smu, 1084 enum smu_message_type msg, 1085 uint32_t *read_arg); 1086 1087 /** 1088 * @init_display_count: Notify the SMU of the number of display 1089 * components in current display configuration. 1090 */ 1091 int (*init_display_count)(struct smu_context *smu, uint32_t count); 1092 1093 /** 1094 * @set_allowed_mask: Notify the SMU of the features currently allowed 1095 * by the driver. 1096 */ 1097 int (*set_allowed_mask)(struct smu_context *smu); 1098 1099 /** 1100 * @get_enabled_mask: Get a mask of features that are currently enabled 1101 * on the SMU. 1102 * &feature_mask: Enabled feature mask. 1103 */ 1104 int (*get_enabled_mask)(struct smu_context *smu, uint64_t *feature_mask); 1105 1106 /** 1107 * @feature_is_enabled: Test if a feature is enabled. 1108 * 1109 * Return: One if enabled, zero if disabled. 1110 */ 1111 int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask); 1112 1113 /** 1114 * @disable_all_features_with_exception: Disable all features with 1115 * exception to those in &mask. 1116 */ 1117 int (*disable_all_features_with_exception)(struct smu_context *smu, 1118 enum smu_feature_mask mask); 1119 1120 /** 1121 * @notify_display_change: General interface call to let SMU know about DC change 1122 */ 1123 int (*notify_display_change)(struct smu_context *smu); 1124 1125 /** 1126 * @set_power_limit: Set power limit in watts. 1127 */ 1128 int (*set_power_limit)(struct smu_context *smu, 1129 enum smu_ppt_limit_type limit_type, 1130 uint32_t limit); 1131 1132 /** 1133 * @init_max_sustainable_clocks: Populate max sustainable clock speed 1134 * table with values from the SMU. 1135 */ 1136 int (*init_max_sustainable_clocks)(struct smu_context *smu); 1137 1138 /** 1139 * @enable_thermal_alert: Enable thermal alert interrupts. 1140 */ 1141 int (*enable_thermal_alert)(struct smu_context *smu); 1142 1143 /** 1144 * @disable_thermal_alert: Disable thermal alert interrupts. 1145 */ 1146 int (*disable_thermal_alert)(struct smu_context *smu); 1147 1148 /** 1149 * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep 1150 * clock speed in MHz. 1151 */ 1152 int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk); 1153 1154 /** 1155 * @display_clock_voltage_request: Set a hard minimum frequency 1156 * for a clock domain. 1157 */ 1158 int (*display_clock_voltage_request)(struct smu_context *smu, struct 1159 pp_display_clock_request 1160 *clock_req); 1161 1162 /** 1163 * @get_fan_control_mode: Get the current fan control mode. 1164 */ 1165 uint32_t (*get_fan_control_mode)(struct smu_context *smu); 1166 1167 /** 1168 * @set_fan_control_mode: Set the fan control mode. 1169 */ 1170 int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); 1171 1172 /** 1173 * @set_fan_speed_pwm: Set a static fan speed in PWM. 1174 */ 1175 int (*set_fan_speed_pwm)(struct smu_context *smu, uint32_t speed); 1176 1177 /** 1178 * @set_fan_speed_rpm: Set a static fan speed in rpm. 1179 */ 1180 int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); 1181 1182 /** 1183 * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate. 1184 * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise. 1185 */ 1186 int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate); 1187 1188 /** 1189 * @gfx_off_control: Enable/disable graphics engine poweroff. 1190 */ 1191 int (*gfx_off_control)(struct smu_context *smu, bool enable); 1192 1193 1194 /** 1195 * @get_gfx_off_status: Get graphics engine poweroff status. 1196 * 1197 * Return: 1198 * 0 - GFXOFF(default). 1199 * 1 - Transition out of GFX State. 1200 * 2 - Not in GFXOFF. 1201 * 3 - Transition into GFXOFF. 1202 */ 1203 uint32_t (*get_gfx_off_status)(struct smu_context *smu); 1204 1205 /** 1206 * @gfx_off_entrycount: total GFXOFF entry count at the time of 1207 * query since system power-up 1208 */ 1209 u32 (*get_gfx_off_entrycount)(struct smu_context *smu, uint64_t *entrycount); 1210 1211 /** 1212 * @set_gfx_off_residency: set 1 to start logging, 0 to stop logging 1213 */ 1214 u32 (*set_gfx_off_residency)(struct smu_context *smu, bool start); 1215 1216 /** 1217 * @get_gfx_off_residency: Average GFXOFF residency % during the logging interval 1218 */ 1219 u32 (*get_gfx_off_residency)(struct smu_context *smu, uint32_t *residency); 1220 1221 /** 1222 * @register_irq_handler: Register interupt request handlers. 1223 */ 1224 int (*register_irq_handler)(struct smu_context *smu); 1225 1226 /** 1227 * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep. 1228 */ 1229 int (*set_azalia_d3_pme)(struct smu_context *smu); 1230 1231 /** 1232 * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable 1233 * clock speeds table. 1234 * 1235 * Provides a way for the display component (DC) to get the max 1236 * sustainable clocks from the SMU. 1237 */ 1238 int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); 1239 1240 /** 1241 * @get_bamaco_support: Check if GPU supports BACO/MACO 1242 * BACO: Bus Active, Chip Off 1243 * MACO: Memory Active, Chip Off 1244 */ 1245 int (*get_bamaco_support)(struct smu_context *smu); 1246 1247 /** 1248 * @baco_get_state: Get the current BACO state. 1249 * 1250 * Return: Current BACO state. 1251 */ 1252 enum smu_baco_state (*baco_get_state)(struct smu_context *smu); 1253 1254 /** 1255 * @baco_set_state: Enter/exit BACO. 1256 */ 1257 int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state); 1258 1259 /** 1260 * @baco_enter: Enter BACO. 1261 */ 1262 int (*baco_enter)(struct smu_context *smu); 1263 1264 /** 1265 * @baco_exit: Exit Baco. 1266 */ 1267 int (*baco_exit)(struct smu_context *smu); 1268 1269 /** 1270 * @mode1_reset_is_support: Check if GPU supports mode1 reset. 1271 */ 1272 bool (*mode1_reset_is_support)(struct smu_context *smu); 1273 1274 /** 1275 * @link_reset_is_support: Check if GPU supports link reset. 1276 */ 1277 bool (*link_reset_is_support)(struct smu_context *smu); 1278 1279 /** 1280 * @mode1_reset: Perform mode1 reset. 1281 * 1282 * Complete GPU reset. 1283 */ 1284 int (*mode1_reset)(struct smu_context *smu); 1285 1286 /** 1287 * @mode2_reset: Perform mode2 reset. 1288 * 1289 * Mode2 reset generally does not reset as many IPs as mode1 reset. The 1290 * IPs reset varies by asic. 1291 */ 1292 int (*mode2_reset)(struct smu_context *smu); 1293 /* for gfx feature enablement after mode2 reset */ 1294 int (*enable_gfx_features)(struct smu_context *smu); 1295 1296 /** 1297 * @link_reset: Perform link reset. 1298 * 1299 * The gfx device driver reset 1300 */ 1301 int (*link_reset)(struct smu_context *smu); 1302 1303 /** 1304 * @get_dpm_ultimate_freq: Get the hard frequency range of a clock 1305 * domain in MHz. 1306 */ 1307 int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); 1308 1309 /** 1310 * @set_soft_freq_limited_range: Set the soft frequency range of a clock 1311 * domain in MHz. 1312 */ 1313 int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max, 1314 bool automatic); 1315 1316 /** 1317 * @set_power_source: Notify the SMU of the current power source. 1318 */ 1319 int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src); 1320 1321 /** 1322 * @log_thermal_throttling_event: Print a thermal throttling warning to 1323 * the system's log. 1324 */ 1325 void (*log_thermal_throttling_event)(struct smu_context *smu); 1326 1327 /** 1328 * @get_pp_feature_mask: Print a human readable table of enabled 1329 * features to buffer. 1330 */ 1331 size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf); 1332 1333 /** 1334 * @set_pp_feature_mask: Request the SMU enable/disable features to 1335 * match those enabled in &new_mask. 1336 */ 1337 int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask); 1338 1339 /** 1340 * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU. 1341 * 1342 * Return: Size of &table 1343 */ 1344 ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table); 1345 1346 /** 1347 * @get_pm_metrics: Get one snapshot of power management metrics from 1348 * PMFW. 1349 * 1350 * Return: Size of the metrics sample 1351 */ 1352 ssize_t (*get_pm_metrics)(struct smu_context *smu, void *pm_metrics, 1353 size_t size); 1354 1355 /** 1356 * @enable_mgpu_fan_boost: Enable multi-GPU fan boost. 1357 */ 1358 int (*enable_mgpu_fan_boost)(struct smu_context *smu); 1359 1360 /** 1361 * @gfx_ulv_control: Enable/disable ultra low voltage. 1362 */ 1363 int (*gfx_ulv_control)(struct smu_context *smu, bool enablement); 1364 1365 /** 1366 * @deep_sleep_control: Enable/disable deep sleep. 1367 */ 1368 int (*deep_sleep_control)(struct smu_context *smu, bool enablement); 1369 1370 /** 1371 * @get_fan_parameters: Get fan parameters. 1372 * 1373 * Get maximum fan speed from the power play table. 1374 */ 1375 int (*get_fan_parameters)(struct smu_context *smu); 1376 1377 /** 1378 * @post_init: Helper function for asic specific workarounds. 1379 */ 1380 int (*post_init)(struct smu_context *smu); 1381 1382 /** 1383 * @interrupt_work: Work task scheduled from SMU interrupt handler. 1384 */ 1385 void (*interrupt_work)(struct smu_context *smu); 1386 1387 /** 1388 * @gpo_control: Enable/disable graphics power optimization if supported. 1389 */ 1390 int (*gpo_control)(struct smu_context *smu, bool enablement); 1391 1392 /** 1393 * @gfx_state_change_set: Send the current graphics state to the SMU. 1394 */ 1395 int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state); 1396 1397 /** 1398 * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock 1399 * parameters to defaults. 1400 */ 1401 int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu); 1402 1403 /** 1404 * @smu_handle_passthrough_sbr: Send message to SMU about special handling for SBR. 1405 */ 1406 int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable); 1407 1408 /** 1409 * @wait_for_event: Wait for events from SMU. 1410 */ 1411 int (*wait_for_event)(struct smu_context *smu, 1412 enum smu_event_type event, uint64_t event_arg); 1413 1414 /** 1415 * @sned_hbm_bad_pages_num: message SMU to update bad page number 1416 * of SMUBUS table. 1417 */ 1418 int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size); 1419 1420 /** 1421 * @send_rma_reason: message rma reason event to SMU. 1422 */ 1423 int (*send_rma_reason)(struct smu_context *smu); 1424 1425 /** 1426 * @reset_sdma: message SMU to soft reset sdma instance. 1427 */ 1428 int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask); 1429 /** 1430 * @reset_sdma_is_supported: Check if support resets the SDMA engine. 1431 */ 1432 bool (*reset_sdma_is_supported)(struct smu_context *smu); 1433 1434 /** 1435 * @reset_vcn: message SMU to soft reset vcn instance. 1436 */ 1437 int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask); 1438 1439 /** 1440 * @get_ecc_table: message SMU to get ECC INFO table. 1441 */ 1442 ssize_t (*get_ecc_info)(struct smu_context *smu, void *table); 1443 1444 1445 /** 1446 * @stb_collect_info: Collects Smart Trace Buffers data. 1447 */ 1448 int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size); 1449 1450 /** 1451 * @get_default_config_table_settings: Get the ASIC default DriverSmuConfig table settings. 1452 */ 1453 int (*get_default_config_table_settings)(struct smu_context *smu, struct config_table_setting *table); 1454 1455 /** 1456 * @set_config_table: Apply the input DriverSmuConfig table settings. 1457 */ 1458 int (*set_config_table)(struct smu_context *smu, struct config_table_setting *table); 1459 1460 /** 1461 * @sned_hbm_bad_channel_flag: message SMU to update bad channel info 1462 * of SMUBUS table. 1463 */ 1464 int (*send_hbm_bad_channel_flag)(struct smu_context *smu, uint32_t size); 1465 1466 /** 1467 * @init_pptable_microcode: Prepare the pptable microcode to upload via PSP 1468 */ 1469 int (*init_pptable_microcode)(struct smu_context *smu); 1470 1471 /** 1472 * @dpm_set_vpe_enable: Enable/disable VPE engine dynamic power 1473 * management. 1474 */ 1475 int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable); 1476 1477 /** 1478 * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power 1479 * management. 1480 */ 1481 int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable); 1482 1483 /** 1484 * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power 1485 * management. 1486 */ 1487 int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable); 1488 1489 /** 1490 * @set_mall_enable: Init MALL power gating control. 1491 */ 1492 int (*set_mall_enable)(struct smu_context *smu); 1493 1494 /** 1495 * @notify_rlc_state: Notify RLC power state to SMU. 1496 */ 1497 int (*notify_rlc_state)(struct smu_context *smu, bool en); 1498 1499 /** 1500 * @is_asic_wbrf_supported: check whether PMFW supports the wbrf feature 1501 */ 1502 bool (*is_asic_wbrf_supported)(struct smu_context *smu); 1503 1504 /** 1505 * @enable_uclk_shadow: Enable the uclk shadow feature on wbrf supported 1506 */ 1507 int (*enable_uclk_shadow)(struct smu_context *smu, bool enable); 1508 1509 /** 1510 * @set_wbrf_exclusion_ranges: notify SMU the wifi bands occupied 1511 */ 1512 int (*set_wbrf_exclusion_ranges)(struct smu_context *smu, 1513 struct freq_band_range *exclusion_ranges); 1514 /** 1515 * @get_xcp_metrics: Get a copy of the partition metrics table from SMU. 1516 * Return: Size of table 1517 */ 1518 ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id, 1519 void *table); 1520 }; 1521 1522 typedef enum { 1523 METRICS_CURR_GFXCLK, 1524 METRICS_CURR_SOCCLK, 1525 METRICS_CURR_UCLK, 1526 METRICS_CURR_VCLK, 1527 METRICS_CURR_VCLK1, 1528 METRICS_CURR_DCLK, 1529 METRICS_CURR_DCLK1, 1530 METRICS_CURR_FCLK, 1531 METRICS_CURR_DCEFCLK, 1532 METRICS_AVERAGE_CPUCLK, 1533 METRICS_AVERAGE_GFXCLK, 1534 METRICS_AVERAGE_SOCCLK, 1535 METRICS_AVERAGE_FCLK, 1536 METRICS_AVERAGE_UCLK, 1537 METRICS_AVERAGE_VCLK, 1538 METRICS_AVERAGE_DCLK, 1539 METRICS_AVERAGE_VCLK1, 1540 METRICS_AVERAGE_DCLK1, 1541 METRICS_AVERAGE_GFXACTIVITY, 1542 METRICS_AVERAGE_MEMACTIVITY, 1543 METRICS_AVERAGE_VCNACTIVITY, 1544 METRICS_AVERAGE_SOCKETPOWER, 1545 METRICS_TEMPERATURE_EDGE, 1546 METRICS_TEMPERATURE_HOTSPOT, 1547 METRICS_TEMPERATURE_MEM, 1548 METRICS_TEMPERATURE_VRGFX, 1549 METRICS_TEMPERATURE_VRSOC, 1550 METRICS_TEMPERATURE_VRMEM, 1551 METRICS_THROTTLER_STATUS, 1552 METRICS_CURR_FANSPEED, 1553 METRICS_VOLTAGE_VDDSOC, 1554 METRICS_VOLTAGE_VDDGFX, 1555 METRICS_SS_APU_SHARE, 1556 METRICS_SS_DGPU_SHARE, 1557 METRICS_UNIQUE_ID_UPPER32, 1558 METRICS_UNIQUE_ID_LOWER32, 1559 METRICS_PCIE_RATE, 1560 METRICS_PCIE_WIDTH, 1561 METRICS_CURR_FANPWM, 1562 METRICS_CURR_SOCKETPOWER, 1563 METRICS_AVERAGE_VPECLK, 1564 METRICS_AVERAGE_IPUCLK, 1565 METRICS_AVERAGE_MPIPUCLK, 1566 METRICS_THROTTLER_RESIDENCY_PROCHOT, 1567 METRICS_THROTTLER_RESIDENCY_SPL, 1568 METRICS_THROTTLER_RESIDENCY_FPPT, 1569 METRICS_THROTTLER_RESIDENCY_SPPT, 1570 METRICS_THROTTLER_RESIDENCY_THM_CORE, 1571 METRICS_THROTTLER_RESIDENCY_THM_GFX, 1572 METRICS_THROTTLER_RESIDENCY_THM_SOC, 1573 } MetricsMember_t; 1574 1575 enum smu_cmn2asic_mapping_type { 1576 CMN2ASIC_MAPPING_MSG, 1577 CMN2ASIC_MAPPING_CLK, 1578 CMN2ASIC_MAPPING_FEATURE, 1579 CMN2ASIC_MAPPING_TABLE, 1580 CMN2ASIC_MAPPING_PWR, 1581 CMN2ASIC_MAPPING_WORKLOAD, 1582 }; 1583 1584 enum smu_baco_seq { 1585 BACO_SEQ_BACO = 0, 1586 BACO_SEQ_MSR, 1587 BACO_SEQ_BAMACO, 1588 BACO_SEQ_ULPS, 1589 BACO_SEQ_COUNT, 1590 }; 1591 1592 #define MSG_MAP(msg, index, flags) \ 1593 [SMU_MSG_##msg] = {1, (index), (flags)} 1594 1595 #define CLK_MAP(clk, index) \ 1596 [SMU_##clk] = {1, (index)} 1597 1598 #define FEA_MAP(fea) \ 1599 [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT} 1600 1601 #define FEA_MAP_REVERSE(fea) \ 1602 [SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT} 1603 1604 #define FEA_MAP_HALF_REVERSE(fea) \ 1605 [SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT} 1606 1607 #define TAB_MAP(tab) \ 1608 [SMU_TABLE_##tab] = {1, TABLE_##tab} 1609 1610 #define TAB_MAP_VALID(tab) \ 1611 [SMU_TABLE_##tab] = {1, TABLE_##tab} 1612 1613 #define TAB_MAP_INVALID(tab) \ 1614 [SMU_TABLE_##tab] = {0, TABLE_##tab} 1615 1616 #define PWR_MAP(tab) \ 1617 [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab} 1618 1619 #define WORKLOAD_MAP(profile, workload) \ 1620 [profile] = {1, (workload)} 1621 1622 /** 1623 * smu_memcpy_trailing - Copy the end of one structure into the middle of another 1624 * 1625 * @dst: Pointer to destination struct 1626 * @first_dst_member: The member name in @dst where the overwrite begins 1627 * @last_dst_member: The member name in @dst where the overwrite ends after 1628 * @src: Pointer to the source struct 1629 * @first_src_member: The member name in @src where the copy begins 1630 * 1631 */ 1632 #define smu_memcpy_trailing(dst, first_dst_member, last_dst_member, \ 1633 src, first_src_member) \ 1634 ({ \ 1635 size_t __src_offset = offsetof(typeof(*(src)), first_src_member); \ 1636 size_t __src_size = sizeof(*(src)) - __src_offset; \ 1637 size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member); \ 1638 size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \ 1639 __dst_offset; \ 1640 BUILD_BUG_ON(__src_size != __dst_size); \ 1641 __builtin_memcpy((u8 *)(dst) + __dst_offset, \ 1642 (u8 *)(src) + __src_offset, \ 1643 __dst_size); \ 1644 }) 1645 1646 typedef struct { 1647 uint16_t LowFreq; 1648 uint16_t HighFreq; 1649 } WifiOneBand_t; 1650 1651 typedef struct { 1652 uint32_t WifiBandEntryNum; 1653 WifiOneBand_t WifiBandEntry[11]; 1654 uint32_t MmHubPadding[8]; 1655 } WifiBandEntryTable_t; 1656 1657 #define STR_SOC_PSTATE_POLICY "soc_pstate" 1658 #define STR_XGMI_PLPD_POLICY "xgmi_plpd" 1659 1660 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu, 1661 enum pp_pm_policy p_type); 1662 1663 static inline enum smu_table_id 1664 smu_metrics_get_temp_table_id(enum smu_temp_metric_type type) 1665 { 1666 switch (type) { 1667 case SMU_TEMP_METRIC_BASEBOARD: 1668 return SMU_TABLE_BASEBOARD_TEMP_METRICS; 1669 case SMU_TEMP_METRIC_GPUBOARD: 1670 return SMU_TABLE_GPUBOARD_TEMP_METRICS; 1671 default: 1672 return SMU_TABLE_COUNT; 1673 } 1674 1675 return SMU_TABLE_COUNT; 1676 } 1677 1678 static inline void smu_table_cache_update_time(struct smu_table *table, 1679 unsigned long time) 1680 { 1681 table->cache.last_cache_time = time; 1682 } 1683 1684 static inline bool smu_table_cache_is_valid(struct smu_table *table) 1685 { 1686 if (!table->cache.buffer || !table->cache.last_cache_time || 1687 !table->cache.interval || !table->cache.size || 1688 time_after(jiffies, 1689 table->cache.last_cache_time + 1690 msecs_to_jiffies(table->cache.interval))) 1691 return false; 1692 1693 return true; 1694 } 1695 1696 static inline int smu_table_cache_init(struct smu_context *smu, 1697 enum smu_table_id table_id, size_t size, 1698 uint32_t cache_interval) 1699 { 1700 struct smu_table_context *smu_table = &smu->smu_table; 1701 struct smu_table *tables = smu_table->tables; 1702 1703 tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL); 1704 if (!tables[table_id].cache.buffer) 1705 return -ENOMEM; 1706 1707 tables[table_id].cache.last_cache_time = 0; 1708 tables[table_id].cache.interval = cache_interval; 1709 tables[table_id].cache.size = size; 1710 1711 return 0; 1712 } 1713 1714 static inline void smu_table_cache_fini(struct smu_context *smu, 1715 enum smu_table_id table_id) 1716 { 1717 struct smu_table_context *smu_table = &smu->smu_table; 1718 struct smu_table *tables = smu_table->tables; 1719 1720 if (tables[table_id].cache.buffer) { 1721 kfree(tables[table_id].cache.buffer); 1722 tables[table_id].cache.buffer = NULL; 1723 tables[table_id].cache.last_cache_time = 0; 1724 tables[table_id].cache.interval = 0; 1725 } 1726 } 1727 1728 #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) 1729 int smu_get_power_limit(void *handle, 1730 uint32_t *limit, 1731 enum pp_power_limit_level pp_limit_level, 1732 enum pp_power_type pp_power_type); 1733 1734 bool smu_mode1_reset_is_support(struct smu_context *smu); 1735 bool smu_link_reset_is_support(struct smu_context *smu); 1736 int smu_mode1_reset(struct smu_context *smu); 1737 int smu_link_reset(struct smu_context *smu); 1738 1739 extern const struct amd_ip_funcs smu_ip_funcs; 1740 1741 bool is_support_sw_smu(struct amdgpu_device *adev); 1742 bool is_support_cclk_dpm(struct amdgpu_device *adev); 1743 int smu_write_watermarks_table(struct smu_context *smu); 1744 1745 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, 1746 uint32_t *min, uint32_t *max); 1747 1748 int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type, 1749 uint32_t min, uint32_t max); 1750 1751 int smu_set_gfx_power_up_by_imu(struct smu_context *smu); 1752 1753 int smu_set_ac_dc(struct smu_context *smu); 1754 1755 int smu_set_xgmi_plpd_mode(struct smu_context *smu, 1756 enum pp_xgmi_plpd_mode mode); 1757 1758 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value); 1759 1760 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value); 1761 1762 int smu_set_residency_gfxoff(struct smu_context *smu, bool value); 1763 1764 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value); 1765 1766 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable); 1767 1768 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, 1769 uint64_t event_arg); 1770 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc); 1771 int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size); 1772 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); 1773 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size); 1774 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size); 1775 int smu_send_rma_reason(struct smu_context *smu); 1776 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask); 1777 bool smu_reset_sdma_is_supported(struct smu_context *smu); 1778 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask); 1779 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, 1780 int level); 1781 ssize_t smu_get_pm_policy_info(struct smu_context *smu, 1782 enum pp_pm_policy p_type, char *sysbuf); 1783 1784 #endif 1785 #endif 1786