1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #ifndef __AMDGPU_SMU_H__
23 #define __AMDGPU_SMU_H__
24
25 #include <linux/acpi_amd_wbrf.h>
26 #include <linux/units.h>
27
28 #include "amdgpu.h"
29 #include "kgd_pp_interface.h"
30 #include "dm_pp_interface.h"
31 #include "dm_pp_smu.h"
32 #include "smu_types.h"
33 #include "linux/firmware.h"
34
35 #define SMU_THERMAL_MINIMUM_ALERT_TEMP 0
36 #define SMU_THERMAL_MAXIMUM_ALERT_TEMP 255
37 #define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
38 #define SMU_FW_NAME_LEN 0x24
39
40 #define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
41 #define SMU_CUSTOM_FAN_SPEED_RPM (1 << 1)
42 #define SMU_CUSTOM_FAN_SPEED_PWM (1 << 2)
43
44 #define SMU_GPU_METRICS_CACHE_INTERVAL 5
45
46 // Power Throttlers
47 #define SMU_THROTTLER_PPT0_BIT 0
48 #define SMU_THROTTLER_PPT1_BIT 1
49 #define SMU_THROTTLER_PPT2_BIT 2
50 #define SMU_THROTTLER_PPT3_BIT 3
51 #define SMU_THROTTLER_SPL_BIT 4
52 #define SMU_THROTTLER_FPPT_BIT 5
53 #define SMU_THROTTLER_SPPT_BIT 6
54 #define SMU_THROTTLER_SPPT_APU_BIT 7
55
56 // Current Throttlers
57 #define SMU_THROTTLER_TDC_GFX_BIT 16
58 #define SMU_THROTTLER_TDC_SOC_BIT 17
59 #define SMU_THROTTLER_TDC_MEM_BIT 18
60 #define SMU_THROTTLER_TDC_VDD_BIT 19
61 #define SMU_THROTTLER_TDC_CVIP_BIT 20
62 #define SMU_THROTTLER_EDC_CPU_BIT 21
63 #define SMU_THROTTLER_EDC_GFX_BIT 22
64 #define SMU_THROTTLER_APCC_BIT 23
65
66 // Temperature
67 #define SMU_THROTTLER_TEMP_GPU_BIT 32
68 #define SMU_THROTTLER_TEMP_CORE_BIT 33
69 #define SMU_THROTTLER_TEMP_MEM_BIT 34
70 #define SMU_THROTTLER_TEMP_EDGE_BIT 35
71 #define SMU_THROTTLER_TEMP_HOTSPOT_BIT 36
72 #define SMU_THROTTLER_TEMP_SOC_BIT 37
73 #define SMU_THROTTLER_TEMP_VR_GFX_BIT 38
74 #define SMU_THROTTLER_TEMP_VR_SOC_BIT 39
75 #define SMU_THROTTLER_TEMP_VR_MEM0_BIT 40
76 #define SMU_THROTTLER_TEMP_VR_MEM1_BIT 41
77 #define SMU_THROTTLER_TEMP_LIQUID0_BIT 42
78 #define SMU_THROTTLER_TEMP_LIQUID1_BIT 43
79 #define SMU_THROTTLER_VRHOT0_BIT 44
80 #define SMU_THROTTLER_VRHOT1_BIT 45
81 #define SMU_THROTTLER_PROCHOT_CPU_BIT 46
82 #define SMU_THROTTLER_PROCHOT_GFX_BIT 47
83
84 // Other
85 #define SMU_THROTTLER_PPM_BIT 56
86 #define SMU_THROTTLER_FIT_BIT 57
87
88 struct smu_hw_power_state {
89 unsigned int magic;
90 };
91
92 struct smu_power_state;
93
94 enum smu_state_ui_label {
95 SMU_STATE_UI_LABEL_NONE,
96 SMU_STATE_UI_LABEL_BATTERY,
97 SMU_STATE_UI_TABEL_MIDDLE_LOW,
98 SMU_STATE_UI_LABEL_BALLANCED,
99 SMU_STATE_UI_LABEL_MIDDLE_HIGHT,
100 SMU_STATE_UI_LABEL_PERFORMANCE,
101 SMU_STATE_UI_LABEL_BACO,
102 };
103
104 enum smu_state_classification_flag {
105 SMU_STATE_CLASSIFICATION_FLAG_BOOT = 0x0001,
106 SMU_STATE_CLASSIFICATION_FLAG_THERMAL = 0x0002,
107 SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE = 0x0004,
108 SMU_STATE_CLASSIFICATION_FLAG_RESET = 0x0008,
109 SMU_STATE_CLASSIFICATION_FLAG_FORCED = 0x0010,
110 SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE = 0x0020,
111 SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE = 0x0040,
112 SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE = 0x0080,
113 SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE = 0x0100,
114 SMU_STATE_CLASSIFICATION_FLAG_UVD = 0x0200,
115 SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW = 0x0400,
116 SMU_STATE_CLASSIFICATION_FLAG_ACPI = 0x0800,
117 SMU_STATE_CLASSIFICATION_FLAG_HD2 = 0x1000,
118 SMU_STATE_CLASSIFICATION_FLAG_UVD_HD = 0x2000,
119 SMU_STATE_CLASSIFICATION_FLAG_UVD_SD = 0x4000,
120 SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE = 0x8000,
121 SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE = 0x10000,
122 SMU_STATE_CLASSIFICATION_FLAG_BACO = 0x20000,
123 SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2 = 0x40000,
124 SMU_STATE_CLASSIFICATION_FLAG_ULV = 0x80000,
125 SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC = 0x100000,
126 };
127
128 struct smu_state_classification_block {
129 enum smu_state_ui_label ui_label;
130 enum smu_state_classification_flag flags;
131 int bios_index;
132 bool temporary_state;
133 bool to_be_deleted;
134 };
135
136 struct smu_state_pcie_block {
137 unsigned int lanes;
138 };
139
140 enum smu_refreshrate_source {
141 SMU_REFRESHRATE_SOURCE_EDID,
142 SMU_REFRESHRATE_SOURCE_EXPLICIT
143 };
144
145 struct smu_state_display_block {
146 bool disable_frame_modulation;
147 bool limit_refreshrate;
148 enum smu_refreshrate_source refreshrate_source;
149 int explicit_refreshrate;
150 int edid_refreshrate_index;
151 bool enable_vari_bright;
152 };
153
154 struct smu_state_memory_block {
155 bool dll_off;
156 uint8_t m3arb;
157 uint8_t unused[3];
158 };
159
160 struct smu_state_software_algorithm_block {
161 bool disable_load_balancing;
162 bool enable_sleep_for_timestamps;
163 };
164
165 struct smu_temperature_range {
166 int min;
167 int max;
168 int edge_emergency_max;
169 int hotspot_min;
170 int hotspot_crit_max;
171 int hotspot_emergency_max;
172 int mem_min;
173 int mem_crit_max;
174 int mem_emergency_max;
175 int software_shutdown_temp;
176 int software_shutdown_temp_offset;
177 };
178
179 struct smu_state_validation_block {
180 bool single_display_only;
181 bool disallow_on_dc;
182 uint8_t supported_power_levels;
183 };
184
185 struct smu_uvd_clocks {
186 uint32_t vclk;
187 uint32_t dclk;
188 };
189
190 /**
191 * Structure to hold a SMU Power State.
192 */
193 struct smu_power_state {
194 uint32_t id;
195 struct list_head ordered_list;
196 struct list_head all_states_list;
197
198 struct smu_state_classification_block classification;
199 struct smu_state_validation_block validation;
200 struct smu_state_pcie_block pcie;
201 struct smu_state_display_block display;
202 struct smu_state_memory_block memory;
203 struct smu_state_software_algorithm_block software;
204 struct smu_uvd_clocks uvd_clocks;
205 struct smu_hw_power_state hardware;
206 };
207
208 enum smu_power_src_type {
209 SMU_POWER_SOURCE_AC,
210 SMU_POWER_SOURCE_DC,
211 SMU_POWER_SOURCE_COUNT,
212 };
213
214 enum smu_ppt_limit_type {
215 SMU_DEFAULT_PPT_LIMIT = 0,
216 SMU_FAST_PPT_LIMIT,
217 SMU_LIMIT_TYPE_COUNT,
218 };
219
220 enum smu_ppt_limit_level {
221 SMU_PPT_LIMIT_MIN = -1,
222 SMU_PPT_LIMIT_CURRENT,
223 SMU_PPT_LIMIT_DEFAULT,
224 SMU_PPT_LIMIT_MAX,
225 };
226
227 enum smu_memory_pool_size {
228 SMU_MEMORY_POOL_SIZE_ZERO = 0,
229 SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000,
230 SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000,
231 SMU_MEMORY_POOL_SIZE_1_GB = 0x40000000,
232 SMU_MEMORY_POOL_SIZE_2_GB = 0x80000000,
233 };
234
235 struct smu_user_dpm_profile {
236 uint32_t fan_mode;
237 uint32_t power_limits[SMU_LIMIT_TYPE_COUNT];
238 uint32_t fan_speed_pwm;
239 uint32_t fan_speed_rpm;
240 uint32_t flags;
241 uint32_t user_od;
242
243 /* user clock state information */
244 uint32_t clk_mask[SMU_CLK_COUNT];
245 uint32_t clk_dependency;
246 };
247
248 #define SMU_TABLE_INIT(tables, table_id, s, a, d) \
249 do { \
250 tables[table_id].size = s; \
251 tables[table_id].align = a; \
252 tables[table_id].domain = d; \
253 } while (0)
254
255 struct smu_table_cache {
256 void *buffer;
257 size_t size;
258 /* interval in ms*/
259 uint32_t interval;
260 unsigned long last_cache_time;
261 };
262
263 struct smu_table {
264 uint64_t size;
265 uint32_t align;
266 uint8_t domain;
267 uint64_t mc_address;
268 void *cpu_addr;
269 struct amdgpu_bo *bo;
270 uint32_t version;
271 struct smu_table_cache cache;
272 };
273
274 enum smu_driver_table_id {
275 SMU_DRIVER_TABLE_GPU_METRICS = 0,
276 SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS,
277 SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS,
278 SMU_DRIVER_TABLE_COUNT,
279 };
280
281 struct smu_driver_table {
282 enum smu_driver_table_id id;
283 struct smu_table_cache cache;
284 };
285
286 enum smu_perf_level_designation {
287 PERF_LEVEL_ACTIVITY,
288 PERF_LEVEL_POWER_CONTAINMENT,
289 };
290
291 struct smu_performance_level {
292 uint32_t core_clock;
293 uint32_t memory_clock;
294 uint32_t vddc;
295 uint32_t vddci;
296 uint32_t non_local_mem_freq;
297 uint32_t non_local_mem_width;
298 };
299
300 struct smu_clock_info {
301 uint32_t min_mem_clk;
302 uint32_t max_mem_clk;
303 uint32_t min_eng_clk;
304 uint32_t max_eng_clk;
305 uint32_t min_bus_bandwidth;
306 uint32_t max_bus_bandwidth;
307 };
308
309 #define SMU_MAX_DPM_LEVELS 16
310
311 struct smu_dpm_clk_level {
312 bool enabled;
313 uint32_t value;
314 };
315
316 #define SMU_DPM_TABLE_FINE_GRAINED BIT(0)
317
318 struct smu_dpm_table {
319 enum smu_clk_type clk_type;
320 uint32_t count;
321 uint32_t flags;
322 struct smu_dpm_clk_level dpm_levels[SMU_MAX_DPM_LEVELS];
323 };
324
325 #define SMU_DPM_TABLE_MIN(table) \
326 ((table)->count > 0 ? (table)->dpm_levels[0].value : 0)
327
328 #define SMU_DPM_TABLE_MAX(table) \
329 ((table)->count > 0 ? (table)->dpm_levels[(table)->count - 1].value : 0)
330
331 #define SMU_MAX_PCIE_LEVELS 3
332
333 struct smu_pcie_table {
334 uint8_t pcie_gen[SMU_MAX_PCIE_LEVELS];
335 uint8_t pcie_lane[SMU_MAX_PCIE_LEVELS];
336 uint16_t lclk_freq[SMU_MAX_PCIE_LEVELS];
337 uint32_t lclk_levels;
338 };
339
340 struct smu_bios_boot_up_values {
341 uint32_t revision;
342 uint32_t gfxclk;
343 uint32_t uclk;
344 uint32_t socclk;
345 uint32_t dcefclk;
346 uint32_t eclk;
347 uint32_t vclk;
348 uint32_t dclk;
349 uint16_t vddc;
350 uint16_t vddci;
351 uint16_t mvddc;
352 uint16_t vdd_gfx;
353 uint8_t cooling_id;
354 uint32_t pp_table_id;
355 uint32_t format_revision;
356 uint32_t content_revision;
357 uint32_t fclk;
358 uint32_t lclk;
359 uint32_t firmware_caps;
360 };
361
362 enum smu_table_id {
363 SMU_TABLE_PPTABLE = 0,
364 SMU_TABLE_WATERMARKS,
365 SMU_TABLE_CUSTOM_DPM,
366 SMU_TABLE_DPMCLOCKS,
367 SMU_TABLE_AVFS,
368 SMU_TABLE_AVFS_PSM_DEBUG,
369 SMU_TABLE_AVFS_FUSE_OVERRIDE,
370 SMU_TABLE_PMSTATUSLOG,
371 SMU_TABLE_SMU_METRICS,
372 SMU_TABLE_DRIVER_SMU_CONFIG,
373 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
374 SMU_TABLE_OVERDRIVE,
375 SMU_TABLE_I2C_COMMANDS,
376 SMU_TABLE_PACE,
377 SMU_TABLE_ECCINFO,
378 SMU_TABLE_COMBO_PPTABLE,
379 SMU_TABLE_WIFIBAND,
380 SMU_TABLE_PMFW_SYSTEM_METRICS,
381 SMU_TABLE_COUNT,
382 };
383
384 struct smu_table_context {
385 void *power_play_table;
386 uint32_t power_play_table_size;
387 void *hardcode_pptable;
388 unsigned long metrics_time;
389 void *metrics_table;
390 void *clocks_table;
391 void *watermarks_table;
392 struct mutex metrics_lock;
393
394 void *max_sustainable_clocks;
395 struct smu_bios_boot_up_values boot_values;
396 void *driver_pptable;
397 void *combo_pptable;
398 void *ecc_table;
399 void *driver_smu_config_table;
400 struct smu_table tables[SMU_TABLE_COUNT];
401 /*
402 * The driver table is just a staging buffer for
403 * uploading/downloading content from the SMU.
404 *
405 * And the table_id for SMU_MSG_TransferTableSmu2Dram/
406 * SMU_MSG_TransferTableDram2Smu instructs SMU
407 * which content driver is interested.
408 */
409 struct smu_table driver_table;
410 struct smu_table memory_pool;
411 struct smu_table dummy_read_1_table;
412 uint8_t thermal_controller_type;
413
414 void *overdrive_table;
415 void *boot_overdrive_table;
416 void *user_overdrive_table;
417
418 struct smu_driver_table driver_tables[SMU_DRIVER_TABLE_COUNT];
419 };
420
421 struct smu_context;
422 struct smu_dpm_policy;
423
424 struct smu_dpm_policy_desc {
425 const char *name;
426 char *(*get_desc)(struct smu_dpm_policy *dpm_policy, int level);
427 };
428
429 struct smu_dpm_policy {
430 struct smu_dpm_policy_desc *desc;
431 enum pp_pm_policy policy_type;
432 unsigned long level_mask;
433 int current_level;
434 int (*set_policy)(struct smu_context *ctxt, int level);
435 };
436
437 struct smu_dpm_policy_ctxt {
438 struct smu_dpm_policy policies[PP_PM_POLICY_NUM];
439 unsigned long policy_mask;
440 };
441
442 struct smu_dpm_context {
443 uint32_t dpm_context_size;
444 void *dpm_context;
445 void *golden_dpm_context;
446 enum amd_dpm_forced_level dpm_level;
447 enum amd_dpm_forced_level saved_dpm_level;
448 enum amd_dpm_forced_level requested_dpm_level;
449 struct smu_power_state *dpm_request_power_state;
450 struct smu_power_state *dpm_current_power_state;
451 struct mclock_latency_table *mclk_latency_table;
452 struct smu_dpm_policy_ctxt *dpm_policies;
453 };
454
455 struct smu_temp_context {
456 const struct smu_temp_funcs *temp_funcs;
457 };
458
459 struct smu_power_gate {
460 bool uvd_gated;
461 bool vce_gated;
462 atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
463 atomic_t jpeg_gated;
464 atomic_t vpe_gated;
465 atomic_t isp_gated;
466 atomic_t umsch_mm_gated;
467 };
468
469 struct smu_power_context {
470 void *power_context;
471 uint32_t power_context_size;
472 struct smu_power_gate power_gate;
473 };
474
475 #define SMU_FEATURE_NUM_DEFAULT (64)
476 #define SMU_FEATURE_MAX (128)
477
478 struct smu_feature_bits {
479 DECLARE_BITMAP(bits, SMU_FEATURE_MAX);
480 };
481
482 /*
483 * Helpers for initializing smu_feature_bits statically.
484 * Use SMU_FEATURE_BIT_INIT() which automatically handles array indexing:
485 * static const struct smu_feature_bits example = {
486 * .bits = {
487 * SMU_FEATURE_BIT_INIT(5),
488 * SMU_FEATURE_BIT_INIT(10),
489 * SMU_FEATURE_BIT_INIT(65),
490 * SMU_FEATURE_BIT_INIT(100)
491 * }
492 * };
493 */
494 #define SMU_FEATURE_BITS_ELEM(bit) ((bit) / BITS_PER_LONG)
495 #define SMU_FEATURE_BITS_POS(bit) ((bit) % BITS_PER_LONG)
496 #define SMU_FEATURE_BIT_INIT(bit) \
497 [SMU_FEATURE_BITS_ELEM(bit)] = (1UL << SMU_FEATURE_BITS_POS(bit))
498
499 enum smu_feature_list {
500 SMU_FEATURE_LIST_SUPPORTED,
501 SMU_FEATURE_LIST_ALLOWED,
502 SMU_FEATURE_LIST_MAX,
503 };
504
505 struct smu_feature {
506 uint32_t feature_num;
507 struct smu_feature_bits bits[SMU_FEATURE_LIST_MAX];
508 };
509
510 struct smu_clocks {
511 uint32_t engine_clock;
512 uint32_t memory_clock;
513 uint32_t bus_bandwidth;
514 uint32_t engine_clock_in_sr;
515 uint32_t dcef_clock;
516 uint32_t dcef_clock_in_sr;
517 };
518
519 #define MAX_REGULAR_DPM_NUM 16
520 struct mclk_latency_entries {
521 uint32_t frequency;
522 uint32_t latency;
523 };
524 struct mclock_latency_table {
525 uint32_t count;
526 struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
527 };
528
529 enum smu_reset_mode {
530 SMU_RESET_MODE_0,
531 SMU_RESET_MODE_1,
532 SMU_RESET_MODE_2,
533 SMU_RESET_MODE_3,
534 SMU_RESET_MODE_4,
535 };
536
537 enum smu_baco_state {
538 SMU_BACO_STATE_ENTER = 0,
539 SMU_BACO_STATE_EXIT,
540 };
541
542 struct smu_baco_context {
543 uint32_t state;
544 bool platform_support;
545 bool maco_support;
546 };
547
548 struct smu_freq_info {
549 uint32_t min;
550 uint32_t max;
551 uint32_t freq_level;
552 };
553
554 struct pstates_clk_freq {
555 uint32_t min;
556 uint32_t standard;
557 uint32_t peak;
558 struct smu_freq_info custom;
559 struct smu_freq_info curr;
560 };
561
562 struct smu_umd_pstate_table {
563 struct pstates_clk_freq gfxclk_pstate;
564 struct pstates_clk_freq socclk_pstate;
565 struct pstates_clk_freq uclk_pstate;
566 struct pstates_clk_freq vclk_pstate;
567 struct pstates_clk_freq dclk_pstate;
568 struct pstates_clk_freq fclk_pstate;
569 };
570
571 struct cmn2asic_msg_mapping {
572 int valid_mapping;
573 int map_to;
574 uint32_t flags;
575 };
576
577 struct cmn2asic_mapping {
578 int valid_mapping;
579 int map_to;
580 };
581
582 #define SMU_MSG_MAX_ARGS 4
583
584 /* Message flags for smu_msg_args */
585 #define SMU_MSG_FLAG_ASYNC BIT(0) /* Async send - skip post-poll */
586 #define SMU_MSG_FLAG_LOCK_HELD BIT(1) /* Caller holds ctl->lock */
587 #define SMU_MSG_FLAG_FORCE_READ_ARG BIT(2) /* force read smu arg from pmfw */
588
589 /* smu_msg_ctl flags */
590 #define SMU_MSG_CTL_DEBUG_MAILBOX BIT(0) /* Debug mailbox supported */
591
592 struct smu_msg_ctl;
593 /**
594 * struct smu_msg_config - IP-level register configuration
595 * @msg_reg: Message register offset
596 * @resp_reg: Response register offset
597 * @arg_regs: Argument register offsets (up to SMU_MSG_MAX_ARGS)
598 * @num_arg_regs: Number of argument registers available
599 * @debug_msg_reg: Debug message register offset
600 * @debug_resp_reg: Debug response register offset
601 * @debug_param_reg: Debug parameter register offset
602 */
603 struct smu_msg_config {
604 u32 msg_reg;
605 u32 resp_reg;
606 u32 arg_regs[SMU_MSG_MAX_ARGS];
607 int num_arg_regs;
608 u32 debug_msg_reg;
609 u32 debug_resp_reg;
610 u32 debug_param_reg;
611 };
612
613 /**
614 * struct smu_msg_args - Per-call message arguments
615 * @msg: Common message type (enum smu_message_type)
616 * @args: Input arguments
617 * @num_args: Number of input arguments
618 * @out_args: Output arguments (filled after successful send)
619 * @num_out_args: Number of output arguments to read
620 * @flags: Message flags (SMU_MSG_FLAG_*)
621 * @timeout: Per-message timeout in us (0 = use default)
622 */
623 struct smu_msg_args {
624 enum smu_message_type msg;
625 u32 args[SMU_MSG_MAX_ARGS];
626 int num_args;
627 u32 out_args[SMU_MSG_MAX_ARGS];
628 int num_out_args;
629 u32 flags;
630 u32 timeout;
631 };
632
633 /**
634 * struct smu_msg_ops - IP-level protocol operations
635 * @send_msg: send message protocol
636 * @wait_response: wait for response (for split send/wait cases)
637 * @decode_response: Convert response register value to errno
638 * @send_debug_msg: send debug message
639 */
640 struct smu_msg_ops {
641 int (*send_msg)(struct smu_msg_ctl *ctl, struct smu_msg_args *args);
642 int (*wait_response)(struct smu_msg_ctl *ctl, u32 timeout_us);
643 int (*decode_response)(u32 resp);
644 int (*send_debug_msg)(struct smu_msg_ctl *ctl, u32 msg, u32 param);
645 };
646
647 /**
648 * struct smu_msg_ctl - Per-device message control block
649 * This is a standalone control block that encapsulates everything
650 * needed for SMU messaging. The ops->send_msg implements the complete
651 * protocol including all filtering and error handling.
652 */
653 struct smu_msg_ctl {
654 struct smu_context *smu;
655 struct mutex lock;
656 struct smu_msg_config config;
657 const struct smu_msg_ops *ops;
658 const struct cmn2asic_msg_mapping *message_map;
659 u32 default_timeout;
660 u32 flags;
661 };
662
663 struct stb_context {
664 uint32_t stb_buf_size;
665 bool enabled;
666 spinlock_t lock;
667 };
668
669 enum smu_fw_status {
670 SMU_FW_INIT = 0,
671 SMU_FW_RUNTIME,
672 SMU_FW_HANG,
673 };
674
675 #define WORKLOAD_POLICY_MAX 7
676
677 /*
678 * Configure wbrf event handling pace as there can be only one
679 * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms.
680 */
681 #define SMU_WBRF_EVENT_HANDLING_PACE 10
682
683 enum smu_feature_cap_id {
684 SMU_FEATURE_CAP_ID__LINK_RESET = 0,
685 SMU_FEATURE_CAP_ID__SDMA_RESET,
686 SMU_FEATURE_CAP_ID__VCN_RESET,
687 SMU_FEATURE_CAP_ID__COUNT,
688 };
689
690 struct smu_feature_cap {
691 DECLARE_BITMAP(cap_map, SMU_FEATURE_CAP_ID__COUNT);
692 };
693
694 struct smu_context {
695 struct amdgpu_device *adev;
696 struct amdgpu_irq_src irq_source;
697
698 const struct pptable_funcs *ppt_funcs;
699 const struct cmn2asic_mapping *clock_map;
700 const struct cmn2asic_mapping *feature_map;
701 const struct cmn2asic_mapping *table_map;
702 const struct cmn2asic_mapping *pwr_src_map;
703 const struct cmn2asic_mapping *workload_map;
704 uint64_t pool_size;
705
706 struct smu_table_context smu_table;
707 struct smu_dpm_context smu_dpm;
708 struct smu_power_context smu_power;
709 struct smu_temp_context smu_temp;
710 struct smu_feature smu_feature;
711 struct amd_pp_display_configuration *display_config;
712 struct smu_baco_context smu_baco;
713 struct smu_temperature_range thermal_range;
714 struct smu_feature_cap fea_cap;
715 void *od_settings;
716
717 struct smu_umd_pstate_table pstate_table;
718 uint32_t pstate_sclk;
719 uint32_t pstate_mclk;
720
721 bool od_enabled;
722 uint32_t current_power_limit;
723 uint32_t default_power_limit;
724 uint32_t max_power_limit;
725 uint32_t min_power_limit;
726
727 /* soft pptable */
728 uint32_t ppt_offset_bytes;
729 uint32_t ppt_size_bytes;
730 uint8_t *ppt_start_addr;
731
732 bool support_power_containment;
733 bool disable_watermark;
734
735 #define WATERMARKS_EXIST (1 << 0)
736 #define WATERMARKS_LOADED (1 << 1)
737 uint32_t watermarks_bitmap;
738 uint32_t hard_min_uclk_req_from_dal;
739 bool disable_uclk_switch;
740
741 /* asic agnostic workload mask */
742 uint32_t workload_mask;
743 bool pause_workload;
744 /* default/user workload preference */
745 uint32_t power_profile_mode;
746 uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
747 /* backend specific custom workload settings */
748 long *custom_profile_params;
749 bool pm_enabled;
750 bool is_apu;
751
752 uint32_t smc_driver_if_version;
753 uint32_t smc_fw_if_version;
754 uint32_t smc_fw_version;
755 uint32_t smc_fw_caps;
756 uint8_t smc_fw_state;
757
758 bool uploading_custom_pp_table;
759 bool dc_controlled_by_gpio;
760
761 struct work_struct throttling_logging_work;
762 atomic64_t throttle_int_counter;
763 struct work_struct interrupt_work;
764
765 unsigned fan_max_rpm;
766 unsigned manual_fan_speed_pwm;
767
768 uint32_t gfx_default_hard_min_freq;
769 uint32_t gfx_default_soft_max_freq;
770 uint32_t gfx_actual_hard_min_freq;
771 uint32_t gfx_actual_soft_max_freq;
772
773 /* APU only */
774 uint32_t cpu_default_soft_min_freq;
775 uint32_t cpu_default_soft_max_freq;
776 uint32_t cpu_actual_soft_min_freq;
777 uint32_t cpu_actual_soft_max_freq;
778 uint32_t cpu_core_id_select;
779 uint16_t cpu_core_num;
780
781 struct smu_user_dpm_profile user_dpm_profile;
782
783 struct stb_context stb_context;
784
785 struct firmware pptable_firmware;
786
787 struct delayed_work swctf_delayed_work;
788
789 /* data structures for wbrf feature support */
790 bool wbrf_supported;
791 struct notifier_block wbrf_notifier;
792 struct delayed_work wbrf_delayed_work;
793
794 /* SMU message control block */
795 struct smu_msg_ctl msg_ctl;
796 };
797
798 struct i2c_adapter;
799
800 /**
801 * struct smu_temp_funcs - Callbacks used to get temperature data.
802 */
803 struct smu_temp_funcs {
804 /**
805 * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's
806 * power delivery and voltage margins. Required for adaptive
807 * @type Temperature metrics type(baseboard/gpuboard)
808 * Return: Size of &table
809 */
810 ssize_t (*get_temp_metrics)(struct smu_context *smu,
811 enum smu_temp_metric_type type, void *table);
812
813 /**
814 * @temp_metrics_is_support: Get if specific temperature metrics is supported
815 * @type Temperature metrics type(baseboard/gpuboard)
816 * Return: true if supported else false
817 */
818 bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type);
819
820 };
821
822 /**
823 * struct pptable_funcs - Callbacks used to interact with the SMU.
824 */
825 struct pptable_funcs {
826 /**
827 * @run_btc: Calibrate voltage/frequency curve to fit the system's
828 * power delivery and voltage margins. Required for adaptive
829 * voltage frequency scaling (AVFS).
830 */
831 int (*run_btc)(struct smu_context *smu);
832
833 /**
834 * @init_allowed_features: Initialize allowed features bitmap.
835 * Directly sets allowed features using smu_feature wrapper functions.
836 */
837 int (*init_allowed_features)(struct smu_context *smu);
838
839 /**
840 * @get_current_power_state: Get the current power state.
841 *
842 * Return: Current power state on success, negative errno on failure.
843 */
844 enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
845
846 /**
847 * @set_default_dpm_table: Retrieve the default overdrive settings from
848 * the SMU.
849 */
850 int (*set_default_dpm_table)(struct smu_context *smu);
851
852 int (*set_power_state)(struct smu_context *smu);
853
854 /**
855 * @populate_umd_state_clk: Populate the UMD power state table with
856 * defaults.
857 */
858 int (*populate_umd_state_clk)(struct smu_context *smu);
859
860 /**
861 * @emit_clk_levels: Print DPM clock levels for a clock domain
862 * to buffer using sysfs_emit_at. Star current level.
863 *
864 * Used for sysfs interfaces.
865 * &buf: sysfs buffer
866 * &offset: offset within buffer to start printing, which is updated by the
867 * function.
868 *
869 * Return: 0 on Success or Negative to indicate an error occurred.
870 */
871 int (*emit_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf, int *offset);
872
873 /**
874 * @force_clk_levels: Set a range of allowed DPM levels for a clock
875 * domain.
876 * &clk_type: Clock domain.
877 * &mask: Range of allowed DPM levels.
878 */
879 int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask);
880
881 /**
882 * @od_edit_dpm_table: Edit the custom overdrive DPM table.
883 * &type: Type of edit.
884 * &input: Edit parameters.
885 * &size: Size of &input.
886 */
887 int (*od_edit_dpm_table)(struct smu_context *smu,
888 enum PP_OD_DPM_TABLE_COMMAND type,
889 long *input, uint32_t size);
890
891 /**
892 * @restore_user_od_settings: Restore the user customized
893 * OD settings on S3/S4/Runpm resume.
894 */
895 int (*restore_user_od_settings)(struct smu_context *smu);
896
897 /**
898 * @get_clock_by_type_with_latency: Get the speed and latency of a clock
899 * domain.
900 */
901 int (*get_clock_by_type_with_latency)(struct smu_context *smu,
902 enum smu_clk_type clk_type,
903 struct
904 pp_clock_levels_with_latency
905 *clocks);
906 /**
907 * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock
908 * domain.
909 */
910 int (*get_clock_by_type_with_voltage)(struct smu_context *smu,
911 enum amd_pp_clock_type type,
912 struct
913 pp_clock_levels_with_voltage
914 *clocks);
915
916 /**
917 * @get_power_profile_mode: Print all power profile modes to
918 * buffer. Star current mode.
919 */
920 int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
921
922 /**
923 * @set_power_profile_mode: Set a power profile mode. Also used to
924 * create/set custom power profile modes.
925 * &input: Power profile mode parameters.
926 * &workload_mask: mask of workloads to enable
927 * &custom_params: custom profile parameters
928 * &custom_params_max_idx: max valid idx into custom_params
929 */
930 int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask,
931 long *custom_params, u32 custom_params_max_idx);
932
933 /**
934 * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
935 * management.
936 */
937 int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable, int inst);
938
939 /**
940 * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power
941 * management.
942 */
943 int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
944
945 /**
946 * @set_gfx_power_up_by_imu: Enable GFX engine with IMU
947 */
948 int (*set_gfx_power_up_by_imu)(struct smu_context *smu);
949
950 /**
951 * @read_sensor: Read data from a sensor.
952 * &sensor: Sensor to read data from.
953 * &data: Sensor reading.
954 * &size: Size of &data.
955 */
956 int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
957 void *data, uint32_t *size);
958
959 /**
960 * @get_apu_thermal_limit: get apu core limit from smu
961 * &limit: current limit temperature in millidegrees Celsius
962 */
963 int (*get_apu_thermal_limit)(struct smu_context *smu, uint32_t *limit);
964
965 /**
966 * @set_apu_thermal_limit: update all controllers with new limit
967 * &limit: limit temperature to be setted, in millidegrees Celsius
968 */
969 int (*set_apu_thermal_limit)(struct smu_context *smu, uint32_t limit);
970
971 /**
972 * @pre_display_config_changed: Prepare GPU for a display configuration
973 * change.
974 *
975 * Disable display tracking and pin memory clock speed to maximum. Used
976 * in display component synchronization.
977 */
978 int (*pre_display_config_changed)(struct smu_context *smu);
979
980 /**
981 * @display_config_changed: Notify the SMU of the current display
982 * configuration.
983 *
984 * Allows SMU to properly track blanking periods for memory clock
985 * adjustment. Used in display component synchronization.
986 */
987 int (*display_config_changed)(struct smu_context *smu);
988
989 int (*apply_clocks_adjust_rules)(struct smu_context *smu);
990
991 /**
992 * @notify_smc_display_config: Applies display requirements to the
993 * current power state.
994 *
995 * Optimize deep sleep DCEFclk and mclk for the current display
996 * configuration. Used in display component synchronization.
997 */
998 int (*notify_smc_display_config)(struct smu_context *smu);
999
1000 /**
1001 * @is_dpm_running: Check if DPM is running.
1002 *
1003 * Return: True if DPM is running, false otherwise.
1004 */
1005 bool (*is_dpm_running)(struct smu_context *smu);
1006
1007 /**
1008 * @get_fan_speed_pwm: Get the current fan speed in PWM.
1009 */
1010 int (*get_fan_speed_pwm)(struct smu_context *smu, uint32_t *speed);
1011
1012 /**
1013 * @get_fan_speed_rpm: Get the current fan speed in rpm.
1014 */
1015 int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
1016
1017 /**
1018 * @set_watermarks_table: Configure and upload the watermarks tables to
1019 * the SMU.
1020 */
1021 int (*set_watermarks_table)(struct smu_context *smu,
1022 struct pp_smu_wm_range_sets *clock_ranges);
1023
1024 /**
1025 * @get_thermal_temperature_range: Get safe thermal limits in Celcius.
1026 */
1027 int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
1028
1029 /**
1030 * @get_uclk_dpm_states: Get memory clock DPM levels in kHz.
1031 * &clocks_in_khz: Array of DPM levels.
1032 * &num_states: Elements in &clocks_in_khz.
1033 */
1034 int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
1035
1036 /**
1037 * @set_default_od_settings: Set the overdrive tables to defaults.
1038 */
1039 int (*set_default_od_settings)(struct smu_context *smu);
1040
1041 /**
1042 * @set_performance_level: Set a performance level.
1043 */
1044 int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
1045
1046 /**
1047 * @display_disable_memory_clock_switch: Enable/disable dynamic memory
1048 * clock switching.
1049 *
1050 * Disabling this feature forces memory clock speed to maximum.
1051 * Enabling sets the minimum memory clock capable of driving the
1052 * current display configuration.
1053 */
1054 int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
1055
1056 /**
1057 * @get_power_limit: Get the device's power limits.
1058 */
1059 int (*get_power_limit)(struct smu_context *smu,
1060 uint32_t *current_power_limit,
1061 uint32_t *default_power_limit,
1062 uint32_t *max_power_limit,
1063 uint32_t *min_power_limit);
1064
1065 /**
1066 * @get_ppt_limit: Get the device's ppt limits.
1067 */
1068 int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit,
1069 enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level);
1070
1071 /**
1072 * @set_df_cstate: Set data fabric cstate.
1073 */
1074 int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
1075
1076 /**
1077 * @update_pcie_parameters: Update and upload the system's PCIe
1078 * capabilites to the SMU.
1079 * &pcie_gen_cap: Maximum allowed PCIe generation.
1080 * &pcie_width_cap: Maximum allowed PCIe width.
1081 */
1082 int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap);
1083
1084 /**
1085 * @i2c_init: Initialize i2c.
1086 *
1087 * The i2c bus is used internally by the SMU voltage regulators and
1088 * other devices. The i2c's EEPROM also stores bad page tables on boards
1089 * with ECC.
1090 */
1091 int (*i2c_init)(struct smu_context *smu);
1092
1093 /**
1094 * @i2c_fini: Tear down i2c.
1095 */
1096 void (*i2c_fini)(struct smu_context *smu);
1097
1098 /**
1099 * @get_unique_id: Get the GPU's unique id. Used for asset tracking.
1100 */
1101 void (*get_unique_id)(struct smu_context *smu);
1102
1103 /**
1104 * @get_dpm_clock_table: Get a copy of the DPM clock table.
1105 *
1106 * Used by display component in bandwidth and watermark calculations.
1107 */
1108 int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
1109
1110 /**
1111 * @init_microcode: Request the SMU's firmware from the kernel.
1112 */
1113 int (*init_microcode)(struct smu_context *smu);
1114
1115 /**
1116 * @load_microcode: Load firmware onto the SMU.
1117 */
1118 int (*load_microcode)(struct smu_context *smu);
1119
1120 /**
1121 * @fini_microcode: Release the SMU's firmware.
1122 */
1123 void (*fini_microcode)(struct smu_context *smu);
1124
1125 /**
1126 * @init_smc_tables: Initialize the SMU tables.
1127 */
1128 int (*init_smc_tables)(struct smu_context *smu);
1129
1130 /**
1131 * @fini_smc_tables: Release the SMU tables.
1132 */
1133 int (*fini_smc_tables)(struct smu_context *smu);
1134
1135 /**
1136 * @init_power: Initialize the power gate table context.
1137 */
1138 int (*init_power)(struct smu_context *smu);
1139
1140 /**
1141 * @fini_power: Release the power gate table context.
1142 */
1143 int (*fini_power)(struct smu_context *smu);
1144
1145 /**
1146 * @check_fw_status: Check the SMU's firmware status.
1147 *
1148 * Return: Zero if check passes, negative errno on failure.
1149 */
1150 int (*check_fw_status)(struct smu_context *smu);
1151
1152 /**
1153 * @set_mp1_state: put SMU into a correct state for comming
1154 * resume from runpm or gpu reset.
1155 */
1156 int (*set_mp1_state)(struct smu_context *smu,
1157 enum pp_mp1_state mp1_state);
1158
1159 /**
1160 * @setup_pptable: Initialize the power play table and populate it with
1161 * default values.
1162 */
1163 int (*setup_pptable)(struct smu_context *smu);
1164
1165 /**
1166 * @get_vbios_bootup_values: Get default boot values from the VBIOS.
1167 */
1168 int (*get_vbios_bootup_values)(struct smu_context *smu);
1169
1170 /**
1171 * @check_fw_version: Print driver and SMU interface versions to the
1172 * system log.
1173 *
1174 * Interface mismatch is not a critical failure.
1175 */
1176 int (*check_fw_version)(struct smu_context *smu);
1177
1178 /**
1179 * @powergate_sdma: Power up/down system direct memory access.
1180 */
1181 int (*powergate_sdma)(struct smu_context *smu, bool gate);
1182
1183 /**
1184 * @set_gfx_cgpg: Enable/disable graphics engine course grain power
1185 * gating.
1186 */
1187 int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
1188
1189 /**
1190 * @write_pptable: Write the power play table to the SMU.
1191 */
1192 int (*write_pptable)(struct smu_context *smu);
1193
1194 /**
1195 * @set_driver_table_location: Send the location of the driver table to
1196 * the SMU.
1197 */
1198 int (*set_driver_table_location)(struct smu_context *smu);
1199
1200 /**
1201 * @set_tool_table_location: Send the location of the tool table to the
1202 * SMU.
1203 */
1204 int (*set_tool_table_location)(struct smu_context *smu);
1205
1206 /**
1207 * @notify_memory_pool_location: Send the location of the memory pool to
1208 * the SMU.
1209 */
1210 int (*notify_memory_pool_location)(struct smu_context *smu);
1211
1212 /**
1213 * @system_features_control: Enable/disable all SMU features.
1214 */
1215 int (*system_features_control)(struct smu_context *smu, bool en);
1216
1217 /**
1218 * @init_display_count: Notify the SMU of the number of display
1219 * components in current display configuration.
1220 */
1221 int (*init_display_count)(struct smu_context *smu, uint32_t count);
1222
1223 /**
1224 * @set_allowed_mask: Notify the SMU of the features currently allowed
1225 * by the driver.
1226 */
1227 int (*set_allowed_mask)(struct smu_context *smu);
1228
1229 /**
1230 * @get_enabled_mask: Get a mask of features that are currently enabled
1231 * on the SMU.
1232 * &feature_mask: Enabled feature mask.
1233 */
1234 int (*get_enabled_mask)(struct smu_context *smu,
1235 struct smu_feature_bits *feature_mask);
1236
1237 /**
1238 * @feature_is_enabled: Test if a feature is enabled.
1239 *
1240 * Return: One if enabled, zero if disabled.
1241 */
1242 int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask);
1243
1244 /**
1245 * @disable_all_features_with_exception: Disable all features with
1246 * exception to those in &mask.
1247 */
1248 int (*disable_all_features_with_exception)(struct smu_context *smu,
1249 enum smu_feature_mask mask);
1250
1251 /**
1252 * @notify_display_change: General interface call to let SMU know about DC change
1253 */
1254 int (*notify_display_change)(struct smu_context *smu);
1255
1256 /**
1257 * @set_power_limit: Set power limit in watts.
1258 */
1259 int (*set_power_limit)(struct smu_context *smu,
1260 enum smu_ppt_limit_type limit_type,
1261 uint32_t limit);
1262
1263 /**
1264 * @init_max_sustainable_clocks: Populate max sustainable clock speed
1265 * table with values from the SMU.
1266 */
1267 int (*init_max_sustainable_clocks)(struct smu_context *smu);
1268
1269 /**
1270 * @enable_thermal_alert: Enable thermal alert interrupts.
1271 */
1272 int (*enable_thermal_alert)(struct smu_context *smu);
1273
1274 /**
1275 * @disable_thermal_alert: Disable thermal alert interrupts.
1276 */
1277 int (*disable_thermal_alert)(struct smu_context *smu);
1278
1279 /**
1280 * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep
1281 * clock speed in MHz.
1282 */
1283 int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk);
1284
1285 /**
1286 * @display_clock_voltage_request: Set a hard minimum frequency
1287 * for a clock domain.
1288 */
1289 int (*display_clock_voltage_request)(struct smu_context *smu, struct
1290 pp_display_clock_request
1291 *clock_req);
1292
1293 /**
1294 * @get_fan_control_mode: Get the current fan control mode.
1295 */
1296 uint32_t (*get_fan_control_mode)(struct smu_context *smu);
1297
1298 /**
1299 * @set_fan_control_mode: Set the fan control mode.
1300 */
1301 int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
1302
1303 /**
1304 * @set_fan_speed_pwm: Set a static fan speed in PWM.
1305 */
1306 int (*set_fan_speed_pwm)(struct smu_context *smu, uint32_t speed);
1307
1308 /**
1309 * @set_fan_speed_rpm: Set a static fan speed in rpm.
1310 */
1311 int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
1312
1313 /**
1314 * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate.
1315 * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise.
1316 */
1317 int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
1318
1319 /**
1320 * @gfx_off_control: Enable/disable graphics engine poweroff.
1321 */
1322 int (*gfx_off_control)(struct smu_context *smu, bool enable);
1323
1324
1325 /**
1326 * @get_gfx_off_status: Get graphics engine poweroff status.
1327 *
1328 * Return:
1329 * 0 - GFXOFF(default).
1330 * 1 - Transition out of GFX State.
1331 * 2 - Not in GFXOFF.
1332 * 3 - Transition into GFXOFF.
1333 */
1334 uint32_t (*get_gfx_off_status)(struct smu_context *smu);
1335
1336 /**
1337 * @gfx_off_entrycount: total GFXOFF entry count at the time of
1338 * query since system power-up
1339 */
1340 u32 (*get_gfx_off_entrycount)(struct smu_context *smu, uint64_t *entrycount);
1341
1342 /**
1343 * @set_gfx_off_residency: set 1 to start logging, 0 to stop logging
1344 */
1345 u32 (*set_gfx_off_residency)(struct smu_context *smu, bool start);
1346
1347 /**
1348 * @get_gfx_off_residency: Average GFXOFF residency % during the logging interval
1349 */
1350 u32 (*get_gfx_off_residency)(struct smu_context *smu, uint32_t *residency);
1351
1352 /**
1353 * @register_irq_handler: Register interupt request handlers.
1354 */
1355 int (*register_irq_handler)(struct smu_context *smu);
1356
1357 /**
1358 * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep.
1359 */
1360 int (*set_azalia_d3_pme)(struct smu_context *smu);
1361
1362 /**
1363 * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable
1364 * clock speeds table.
1365 *
1366 * Provides a way for the display component (DC) to get the max
1367 * sustainable clocks from the SMU.
1368 */
1369 int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
1370
1371 /**
1372 * @get_bamaco_support: Check if GPU supports BACO/MACO
1373 * BACO: Bus Active, Chip Off
1374 * MACO: Memory Active, Chip Off
1375 */
1376 int (*get_bamaco_support)(struct smu_context *smu);
1377
1378 /**
1379 * @baco_get_state: Get the current BACO state.
1380 *
1381 * Return: Current BACO state.
1382 */
1383 enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
1384
1385 /**
1386 * @baco_set_state: Enter/exit BACO.
1387 */
1388 int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
1389
1390 /**
1391 * @baco_enter: Enter BACO.
1392 */
1393 int (*baco_enter)(struct smu_context *smu);
1394
1395 /**
1396 * @baco_exit: Exit Baco.
1397 */
1398 int (*baco_exit)(struct smu_context *smu);
1399
1400 /**
1401 * @mode1_reset_is_support: Check if GPU supports mode1 reset.
1402 */
1403 bool (*mode1_reset_is_support)(struct smu_context *smu);
1404
1405 /**
1406 * @mode1_reset: Perform mode1 reset.
1407 *
1408 * Complete GPU reset.
1409 */
1410 int (*mode1_reset)(struct smu_context *smu);
1411
1412 /**
1413 * @mode2_reset: Perform mode2 reset.
1414 *
1415 * Mode2 reset generally does not reset as many IPs as mode1 reset. The
1416 * IPs reset varies by asic.
1417 */
1418 int (*mode2_reset)(struct smu_context *smu);
1419 /* for gfx feature enablement after mode2 reset */
1420 int (*enable_gfx_features)(struct smu_context *smu);
1421
1422 /**
1423 * @link_reset: Perform link reset.
1424 *
1425 * The gfx device driver reset
1426 */
1427 int (*link_reset)(struct smu_context *smu);
1428
1429 /**
1430 * @get_dpm_ultimate_freq: Get the hard frequency range of a clock
1431 * domain in MHz.
1432 */
1433 int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
1434
1435 /**
1436 * @set_soft_freq_limited_range: Set the soft frequency range of a clock
1437 * domain in MHz.
1438 */
1439 int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max,
1440 bool automatic);
1441
1442 /**
1443 * @set_power_source: Notify the SMU of the current power source.
1444 */
1445 int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
1446
1447 /**
1448 * @log_thermal_throttling_event: Print a thermal throttling warning to
1449 * the system's log.
1450 */
1451 void (*log_thermal_throttling_event)(struct smu_context *smu);
1452
1453 /**
1454 * @get_pp_feature_mask: Print a human readable table of enabled
1455 * features to buffer.
1456 */
1457 size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
1458
1459 /**
1460 * @set_pp_feature_mask: Request the SMU enable/disable features to
1461 * match those enabled in &new_mask.
1462 */
1463 int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
1464
1465 /**
1466 * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU.
1467 *
1468 * Return: Size of &table
1469 */
1470 ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
1471
1472 /**
1473 * @get_pm_metrics: Get one snapshot of power management metrics from
1474 * PMFW.
1475 *
1476 * Return: Size of the metrics sample
1477 */
1478 ssize_t (*get_pm_metrics)(struct smu_context *smu, void *pm_metrics,
1479 size_t size);
1480
1481 /**
1482 * @enable_mgpu_fan_boost: Enable multi-GPU fan boost.
1483 */
1484 int (*enable_mgpu_fan_boost)(struct smu_context *smu);
1485
1486 /**
1487 * @gfx_ulv_control: Enable/disable ultra low voltage.
1488 */
1489 int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
1490
1491 /**
1492 * @deep_sleep_control: Enable/disable deep sleep.
1493 */
1494 int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
1495
1496 /**
1497 * @get_fan_parameters: Get fan parameters.
1498 *
1499 * Get maximum fan speed from the power play table.
1500 */
1501 int (*get_fan_parameters)(struct smu_context *smu);
1502
1503 /**
1504 * @post_init: Helper function for asic specific workarounds.
1505 */
1506 int (*post_init)(struct smu_context *smu);
1507
1508 /**
1509 * @interrupt_work: Work task scheduled from SMU interrupt handler.
1510 */
1511 void (*interrupt_work)(struct smu_context *smu);
1512
1513 /**
1514 * @gpo_control: Enable/disable graphics power optimization if supported.
1515 */
1516 int (*gpo_control)(struct smu_context *smu, bool enablement);
1517
1518 /**
1519 * @gfx_state_change_set: Send the current graphics state to the SMU.
1520 */
1521 int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state);
1522
1523 /**
1524 * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock
1525 * parameters to defaults.
1526 */
1527 int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu);
1528
1529 /**
1530 * @smu_handle_passthrough_sbr: Send message to SMU about special handling for SBR.
1531 */
1532 int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable);
1533
1534 /**
1535 * @wait_for_event: Wait for events from SMU.
1536 */
1537 int (*wait_for_event)(struct smu_context *smu,
1538 enum smu_event_type event, uint64_t event_arg);
1539
1540 /**
1541 * @sned_hbm_bad_pages_num: message SMU to update bad page number
1542 * of SMUBUS table.
1543 */
1544 int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size);
1545
1546 /**
1547 * @send_rma_reason: message rma reason event to SMU.
1548 */
1549 int (*send_rma_reason)(struct smu_context *smu);
1550
1551 /**
1552 * @reset_sdma: message SMU to soft reset sdma instance.
1553 */
1554 int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
1555
1556 /**
1557 * @reset_vcn: message SMU to soft reset vcn instance.
1558 */
1559 int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
1560
1561 /**
1562 * @get_ecc_table: message SMU to get ECC INFO table.
1563 */
1564 ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
1565
1566
1567 /**
1568 * @stb_collect_info: Collects Smart Trace Buffers data.
1569 */
1570 int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size);
1571
1572 /**
1573 * @get_default_config_table_settings: Get the ASIC default DriverSmuConfig table settings.
1574 */
1575 int (*get_default_config_table_settings)(struct smu_context *smu, struct config_table_setting *table);
1576
1577 /**
1578 * @set_config_table: Apply the input DriverSmuConfig table settings.
1579 */
1580 int (*set_config_table)(struct smu_context *smu, struct config_table_setting *table);
1581
1582 /**
1583 * @sned_hbm_bad_channel_flag: message SMU to update bad channel info
1584 * of SMUBUS table.
1585 */
1586 int (*send_hbm_bad_channel_flag)(struct smu_context *smu, uint32_t size);
1587
1588 /**
1589 * @init_pptable_microcode: Prepare the pptable microcode to upload via PSP
1590 */
1591 int (*init_pptable_microcode)(struct smu_context *smu);
1592
1593 /**
1594 * @dpm_set_vpe_enable: Enable/disable VPE engine dynamic power
1595 * management.
1596 */
1597 int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable);
1598
1599 /**
1600 * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power
1601 * management.
1602 */
1603 int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable);
1604
1605 /**
1606 * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power
1607 * management.
1608 */
1609 int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
1610
1611 /**
1612 * @set_mall_enable: Init MALL power gating control.
1613 */
1614 int (*set_mall_enable)(struct smu_context *smu);
1615
1616 /**
1617 * @notify_rlc_state: Notify RLC power state to SMU.
1618 */
1619 int (*notify_rlc_state)(struct smu_context *smu, bool en);
1620
1621 /**
1622 * @is_asic_wbrf_supported: check whether PMFW supports the wbrf feature
1623 */
1624 bool (*is_asic_wbrf_supported)(struct smu_context *smu);
1625
1626 /**
1627 * @enable_uclk_shadow: Enable the uclk shadow feature on wbrf supported
1628 */
1629 int (*enable_uclk_shadow)(struct smu_context *smu, bool enable);
1630
1631 /**
1632 * @set_wbrf_exclusion_ranges: notify SMU the wifi bands occupied
1633 */
1634 int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
1635 struct freq_band_range *exclusion_ranges);
1636 /**
1637 * @get_xcp_metrics: Get a copy of the partition metrics table from SMU.
1638 * Return: Size of table
1639 */
1640 ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
1641 void *table);
1642 /**
1643 * @ras_send_msg: Send a message with a parameter from Ras
1644 * &msg: Type of message.
1645 * ¶m: Message parameter.
1646 * &read_arg: SMU response (optional).
1647 */
1648 int (*ras_send_msg)(struct smu_context *smu,
1649 enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
1650
1651
1652 /**
1653 * @get_ras_smu_drv: Get RAS smu driver interface
1654 * Return: ras_smu_drv *
1655 */
1656 int (*get_ras_smu_drv)(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv);
1657 };
1658
1659 typedef enum {
1660 METRICS_CURR_GFXCLK,
1661 METRICS_CURR_SOCCLK,
1662 METRICS_CURR_UCLK,
1663 METRICS_CURR_VCLK,
1664 METRICS_CURR_VCLK1,
1665 METRICS_CURR_DCLK,
1666 METRICS_CURR_DCLK1,
1667 METRICS_CURR_FCLK,
1668 METRICS_CURR_DCEFCLK,
1669 METRICS_AVERAGE_CPUCLK,
1670 METRICS_AVERAGE_GFXCLK,
1671 METRICS_AVERAGE_SOCCLK,
1672 METRICS_AVERAGE_FCLK,
1673 METRICS_AVERAGE_UCLK,
1674 METRICS_AVERAGE_VCLK,
1675 METRICS_AVERAGE_DCLK,
1676 METRICS_AVERAGE_VCLK1,
1677 METRICS_AVERAGE_DCLK1,
1678 METRICS_AVERAGE_GFXACTIVITY,
1679 METRICS_AVERAGE_MEMACTIVITY,
1680 METRICS_AVERAGE_VCNACTIVITY,
1681 METRICS_AVERAGE_SOCKETPOWER,
1682 METRICS_TEMPERATURE_EDGE,
1683 METRICS_TEMPERATURE_HOTSPOT,
1684 METRICS_TEMPERATURE_MEM,
1685 METRICS_TEMPERATURE_VRGFX,
1686 METRICS_TEMPERATURE_VRSOC,
1687 METRICS_TEMPERATURE_VRMEM,
1688 METRICS_THROTTLER_STATUS,
1689 METRICS_CURR_FANSPEED,
1690 METRICS_VOLTAGE_VDDSOC,
1691 METRICS_VOLTAGE_VDDGFX,
1692 METRICS_SS_APU_SHARE,
1693 METRICS_SS_DGPU_SHARE,
1694 METRICS_UNIQUE_ID_UPPER32,
1695 METRICS_UNIQUE_ID_LOWER32,
1696 METRICS_PCIE_RATE,
1697 METRICS_PCIE_WIDTH,
1698 METRICS_CURR_FANPWM,
1699 METRICS_CURR_SOCKETPOWER,
1700 METRICS_AVERAGE_VPECLK,
1701 METRICS_AVERAGE_IPUCLK,
1702 METRICS_AVERAGE_MPIPUCLK,
1703 METRICS_THROTTLER_RESIDENCY_PROCHOT,
1704 METRICS_THROTTLER_RESIDENCY_SPL,
1705 METRICS_THROTTLER_RESIDENCY_FPPT,
1706 METRICS_THROTTLER_RESIDENCY_SPPT,
1707 METRICS_THROTTLER_RESIDENCY_THM_CORE,
1708 METRICS_THROTTLER_RESIDENCY_THM_GFX,
1709 METRICS_THROTTLER_RESIDENCY_THM_SOC,
1710 METRICS_AVERAGE_NPUCLK,
1711 } MetricsMember_t;
1712
1713 enum smu_cmn2asic_mapping_type {
1714 CMN2ASIC_MAPPING_MSG,
1715 CMN2ASIC_MAPPING_CLK,
1716 CMN2ASIC_MAPPING_FEATURE,
1717 CMN2ASIC_MAPPING_TABLE,
1718 CMN2ASIC_MAPPING_PWR,
1719 CMN2ASIC_MAPPING_WORKLOAD,
1720 };
1721
1722 enum smu_baco_seq {
1723 BACO_SEQ_BACO = 0,
1724 BACO_SEQ_MSR,
1725 BACO_SEQ_BAMACO,
1726 BACO_SEQ_ULPS,
1727 BACO_SEQ_COUNT,
1728 };
1729
1730 #define MSG_MAP(msg, index, flags) \
1731 [SMU_MSG_##msg] = {1, (index), (flags)}
1732
1733 #define CLK_MAP(clk, index) \
1734 [SMU_##clk] = {1, (index)}
1735
1736 #define FEA_MAP(fea) \
1737 [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
1738
1739 #define FEA_MAP_REVERSE(fea) \
1740 [SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT}
1741
1742 #define FEA_MAP_HALF_REVERSE(fea) \
1743 [SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT}
1744
1745 #define TAB_MAP(tab) \
1746 [SMU_TABLE_##tab] = {1, TABLE_##tab}
1747
1748 #define TAB_MAP_VALID(tab) \
1749 [SMU_TABLE_##tab] = {1, TABLE_##tab}
1750
1751 #define TAB_MAP_INVALID(tab) \
1752 [SMU_TABLE_##tab] = {0, TABLE_##tab}
1753
1754 #define PWR_MAP(tab) \
1755 [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
1756
1757 #define WORKLOAD_MAP(profile, workload) \
1758 [profile] = {1, (workload)}
1759
1760 /**
1761 * smu_memcpy_trailing - Copy the end of one structure into the middle of another
1762 *
1763 * @dst: Pointer to destination struct
1764 * @first_dst_member: The member name in @dst where the overwrite begins
1765 * @last_dst_member: The member name in @dst where the overwrite ends after
1766 * @src: Pointer to the source struct
1767 * @first_src_member: The member name in @src where the copy begins
1768 *
1769 */
1770 #define smu_memcpy_trailing(dst, first_dst_member, last_dst_member, \
1771 src, first_src_member) \
1772 ({ \
1773 size_t __src_offset = offsetof(typeof(*(src)), first_src_member); \
1774 size_t __src_size = sizeof(*(src)) - __src_offset; \
1775 size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member); \
1776 size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \
1777 __dst_offset; \
1778 BUILD_BUG_ON(__src_size != __dst_size); \
1779 __builtin_memcpy((u8 *)(dst) + __dst_offset, \
1780 (u8 *)(src) + __src_offset, \
1781 __dst_size); \
1782 })
1783
1784 typedef struct {
1785 uint16_t LowFreq;
1786 uint16_t HighFreq;
1787 } WifiOneBand_t;
1788
1789 typedef struct {
1790 uint32_t WifiBandEntryNum;
1791 WifiOneBand_t WifiBandEntry[11];
1792 uint32_t MmHubPadding[8];
1793 } WifiBandEntryTable_t;
1794
1795 #define STR_SOC_PSTATE_POLICY "soc_pstate"
1796 #define STR_XGMI_PLPD_POLICY "xgmi_plpd"
1797
1798 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
1799 enum pp_pm_policy p_type);
1800
1801 static inline enum smu_driver_table_id
smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)1802 smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)
1803 {
1804 switch (type) {
1805 case SMU_TEMP_METRIC_BASEBOARD:
1806 return SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS;
1807 case SMU_TEMP_METRIC_GPUBOARD:
1808 return SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS;
1809 default:
1810 return SMU_DRIVER_TABLE_COUNT;
1811 }
1812
1813 return SMU_DRIVER_TABLE_COUNT;
1814 }
1815
smu_table_cache_update_time(struct smu_table * table,unsigned long time)1816 static inline void smu_table_cache_update_time(struct smu_table *table,
1817 unsigned long time)
1818 {
1819 table->cache.last_cache_time = time;
1820 }
1821
smu_table_cache_is_valid(struct smu_table * table)1822 static inline bool smu_table_cache_is_valid(struct smu_table *table)
1823 {
1824 if (!table->cache.buffer || !table->cache.last_cache_time ||
1825 !table->cache.interval || !table->cache.size ||
1826 time_after(jiffies,
1827 table->cache.last_cache_time +
1828 msecs_to_jiffies(table->cache.interval)))
1829 return false;
1830
1831 return true;
1832 }
1833
smu_table_cache_init(struct smu_context * smu,enum smu_table_id table_id,size_t size,uint32_t cache_interval)1834 static inline int smu_table_cache_init(struct smu_context *smu,
1835 enum smu_table_id table_id, size_t size,
1836 uint32_t cache_interval)
1837 {
1838 struct smu_table_context *smu_table = &smu->smu_table;
1839 struct smu_table *tables = smu_table->tables;
1840
1841 tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
1842 if (!tables[table_id].cache.buffer)
1843 return -ENOMEM;
1844
1845 tables[table_id].cache.last_cache_time = 0;
1846 tables[table_id].cache.interval = cache_interval;
1847 tables[table_id].cache.size = size;
1848
1849 return 0;
1850 }
1851
smu_table_cache_fini(struct smu_context * smu,enum smu_table_id table_id)1852 static inline void smu_table_cache_fini(struct smu_context *smu,
1853 enum smu_table_id table_id)
1854 {
1855 struct smu_table_context *smu_table = &smu->smu_table;
1856 struct smu_table *tables = smu_table->tables;
1857
1858 if (tables[table_id].cache.buffer) {
1859 kfree(tables[table_id].cache.buffer);
1860 tables[table_id].cache.buffer = NULL;
1861 tables[table_id].cache.last_cache_time = 0;
1862 tables[table_id].cache.interval = 0;
1863 }
1864 }
1865
smu_driver_table_init(struct smu_context * smu,enum smu_driver_table_id table_id,size_t size,uint32_t cache_interval)1866 static inline int smu_driver_table_init(struct smu_context *smu,
1867 enum smu_driver_table_id table_id,
1868 size_t size, uint32_t cache_interval)
1869 {
1870 struct smu_table_context *smu_table = &smu->smu_table;
1871 struct smu_driver_table *driver_tables = smu_table->driver_tables;
1872
1873 if (table_id >= SMU_DRIVER_TABLE_COUNT)
1874 return -EINVAL;
1875
1876 driver_tables[table_id].id = table_id;
1877 driver_tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
1878 if (!driver_tables[table_id].cache.buffer)
1879 return -ENOMEM;
1880
1881 driver_tables[table_id].cache.last_cache_time = 0;
1882 driver_tables[table_id].cache.interval = cache_interval;
1883 driver_tables[table_id].cache.size = size;
1884
1885 return 0;
1886 }
1887
smu_driver_table_fini(struct smu_context * smu,enum smu_driver_table_id table_id)1888 static inline void smu_driver_table_fini(struct smu_context *smu,
1889 enum smu_driver_table_id table_id)
1890 {
1891 struct smu_table_context *smu_table = &smu->smu_table;
1892 struct smu_driver_table *driver_tables = smu_table->driver_tables;
1893
1894 if (table_id >= SMU_DRIVER_TABLE_COUNT)
1895 return;
1896
1897 if (driver_tables[table_id].cache.buffer) {
1898 kfree(driver_tables[table_id].cache.buffer);
1899 driver_tables[table_id].cache.buffer = NULL;
1900 driver_tables[table_id].cache.last_cache_time = 0;
1901 driver_tables[table_id].cache.interval = 0;
1902 }
1903 }
1904
smu_driver_table_is_valid(struct smu_driver_table * table)1905 static inline bool smu_driver_table_is_valid(struct smu_driver_table *table)
1906 {
1907 if (!table->cache.buffer || !table->cache.last_cache_time ||
1908 !table->cache.interval || !table->cache.size ||
1909 time_after(jiffies,
1910 table->cache.last_cache_time +
1911 msecs_to_jiffies(table->cache.interval)))
1912 return false;
1913
1914 return true;
1915 }
1916
smu_driver_table_ptr(struct smu_context * smu,enum smu_driver_table_id table_id)1917 static inline void *smu_driver_table_ptr(struct smu_context *smu,
1918 enum smu_driver_table_id table_id)
1919 {
1920 struct smu_table_context *smu_table = &smu->smu_table;
1921 struct smu_driver_table *driver_tables = smu_table->driver_tables;
1922
1923 if (table_id >= SMU_DRIVER_TABLE_COUNT)
1924 return NULL;
1925
1926 return driver_tables[table_id].cache.buffer;
1927 }
1928
1929 static inline void
smu_driver_table_update_cache_time(struct smu_context * smu,enum smu_driver_table_id table_id)1930 smu_driver_table_update_cache_time(struct smu_context *smu,
1931 enum smu_driver_table_id table_id)
1932 {
1933 struct smu_table_context *smu_table = &smu->smu_table;
1934 struct smu_driver_table *driver_tables = smu_table->driver_tables;
1935
1936 if (table_id >= SMU_DRIVER_TABLE_COUNT)
1937 return;
1938
1939 driver_tables[table_id].cache.last_cache_time = jiffies;
1940 }
1941
1942 #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
1943 int smu_get_power_limit(void *handle,
1944 uint32_t *limit,
1945 enum pp_power_limit_level pp_limit_level,
1946 enum pp_power_type pp_power_type);
1947
1948 bool smu_mode1_reset_is_support(struct smu_context *smu);
1949 bool smu_link_reset_is_support(struct smu_context *smu);
1950 int smu_mode1_reset(struct smu_context *smu);
1951 int smu_link_reset(struct smu_context *smu);
1952
1953 extern const struct amd_ip_funcs smu_ip_funcs;
1954
1955 bool is_support_sw_smu(struct amdgpu_device *adev);
1956 bool is_support_cclk_dpm(struct amdgpu_device *adev);
1957 int smu_write_watermarks_table(struct smu_context *smu);
1958
1959 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
1960 uint32_t *min, uint32_t *max);
1961
1962 int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type,
1963 uint32_t min, uint32_t max);
1964
1965 int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
1966
1967 int smu_set_ac_dc(struct smu_context *smu);
1968
1969 int smu_set_xgmi_plpd_mode(struct smu_context *smu,
1970 enum pp_xgmi_plpd_mode mode);
1971
1972 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value);
1973
1974 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value);
1975
1976 int smu_set_residency_gfxoff(struct smu_context *smu, bool value);
1977
1978 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value);
1979
1980 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
1981
1982 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
1983 uint64_t event_arg);
1984 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc);
1985 int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size);
1986 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
1987 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
1988 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
1989 int smu_send_rma_reason(struct smu_context *smu);
1990 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
1991 bool smu_reset_sdma_is_supported(struct smu_context *smu);
1992 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
1993 bool smu_reset_vcn_is_supported(struct smu_context *smu);
1994 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
1995 int level);
1996 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
1997 enum pp_pm_policy p_type, char *sysbuf);
1998 const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle);
1999
2000 int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
2001 uint32_t param, uint32_t *readarg);
2002 int amdgpu_smu_ras_feature_is_enabled(struct amdgpu_device *adev,
2003 enum smu_feature_mask mask);
2004 #endif
2005
2006 void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id);
2007 bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id);
2008
smu_feature_bits_is_set(const struct smu_feature_bits * bits,unsigned int bit)2009 static inline bool smu_feature_bits_is_set(const struct smu_feature_bits *bits,
2010 unsigned int bit)
2011 {
2012 if (bit >= SMU_FEATURE_MAX)
2013 return false;
2014
2015 return test_bit(bit, bits->bits);
2016 }
2017
smu_feature_bits_set_bit(struct smu_feature_bits * bits,unsigned int bit)2018 static inline void smu_feature_bits_set_bit(struct smu_feature_bits *bits,
2019 unsigned int bit)
2020 {
2021 if (bit < SMU_FEATURE_MAX)
2022 __set_bit(bit, bits->bits);
2023 }
2024
smu_feature_bits_clear_bit(struct smu_feature_bits * bits,unsigned int bit)2025 static inline void smu_feature_bits_clear_bit(struct smu_feature_bits *bits,
2026 unsigned int bit)
2027 {
2028 if (bit < SMU_FEATURE_MAX)
2029 __clear_bit(bit, bits->bits);
2030 }
2031
smu_feature_bits_clearall(struct smu_feature_bits * bits)2032 static inline void smu_feature_bits_clearall(struct smu_feature_bits *bits)
2033 {
2034 bitmap_zero(bits->bits, SMU_FEATURE_MAX);
2035 }
2036
smu_feature_bits_fill(struct smu_feature_bits * bits)2037 static inline void smu_feature_bits_fill(struct smu_feature_bits *bits)
2038 {
2039 bitmap_fill(bits->bits, SMU_FEATURE_MAX);
2040 }
2041
2042 static inline bool
smu_feature_bits_test_mask(const struct smu_feature_bits * bits,const unsigned long * mask)2043 smu_feature_bits_test_mask(const struct smu_feature_bits *bits,
2044 const unsigned long *mask)
2045 {
2046 return bitmap_intersects(bits->bits, mask, SMU_FEATURE_MAX);
2047 }
2048
smu_feature_bits_from_arr32(struct smu_feature_bits * bits,const uint32_t * arr,unsigned int nbits)2049 static inline void smu_feature_bits_from_arr32(struct smu_feature_bits *bits,
2050 const uint32_t *arr,
2051 unsigned int nbits)
2052 {
2053 bitmap_from_arr32(bits->bits, arr, nbits);
2054 }
2055
2056 static inline void
smu_feature_bits_to_arr32(const struct smu_feature_bits * bits,uint32_t * arr,unsigned int nbits)2057 smu_feature_bits_to_arr32(const struct smu_feature_bits *bits, uint32_t *arr,
2058 unsigned int nbits)
2059 {
2060 bitmap_to_arr32(arr, bits->bits, nbits);
2061 }
2062
smu_feature_bits_empty(const struct smu_feature_bits * bits,unsigned int nbits)2063 static inline bool smu_feature_bits_empty(const struct smu_feature_bits *bits,
2064 unsigned int nbits)
2065 {
2066 return bitmap_empty(bits->bits, nbits);
2067 }
2068
smu_feature_bits_full(const struct smu_feature_bits * bits,unsigned int nbits)2069 static inline bool smu_feature_bits_full(const struct smu_feature_bits *bits,
2070 unsigned int nbits)
2071 {
2072 return bitmap_full(bits->bits, nbits);
2073 }
2074
smu_feature_bits_copy(struct smu_feature_bits * dst,const unsigned long * src,unsigned int nbits)2075 static inline void smu_feature_bits_copy(struct smu_feature_bits *dst,
2076 const unsigned long *src,
2077 unsigned int nbits)
2078 {
2079 bitmap_copy(dst->bits, src, nbits);
2080 }
2081
2082 static inline struct smu_feature_bits *
__smu_feature_get_list(struct smu_context * smu,enum smu_feature_list list)2083 __smu_feature_get_list(struct smu_context *smu, enum smu_feature_list list)
2084 {
2085 if (unlikely(list >= SMU_FEATURE_LIST_MAX)) {
2086 dev_warn(smu->adev->dev, "Invalid feature list: %d\n", list);
2087 return &smu->smu_feature.bits[SMU_FEATURE_LIST_SUPPORTED];
2088 }
2089
2090 return &smu->smu_feature.bits[list];
2091 }
2092
smu_feature_list_is_set(struct smu_context * smu,enum smu_feature_list list,unsigned int bit)2093 static inline bool smu_feature_list_is_set(struct smu_context *smu,
2094 enum smu_feature_list list,
2095 unsigned int bit)
2096 {
2097 if (bit >= smu->smu_feature.feature_num)
2098 return false;
2099
2100 return smu_feature_bits_is_set(__smu_feature_get_list(smu, list), bit);
2101 }
2102
smu_feature_list_set_bit(struct smu_context * smu,enum smu_feature_list list,unsigned int bit)2103 static inline void smu_feature_list_set_bit(struct smu_context *smu,
2104 enum smu_feature_list list,
2105 unsigned int bit)
2106 {
2107 if (bit >= smu->smu_feature.feature_num)
2108 return;
2109
2110 smu_feature_bits_set_bit(__smu_feature_get_list(smu, list), bit);
2111 }
2112
smu_feature_list_clear_bit(struct smu_context * smu,enum smu_feature_list list,unsigned int bit)2113 static inline void smu_feature_list_clear_bit(struct smu_context *smu,
2114 enum smu_feature_list list,
2115 unsigned int bit)
2116 {
2117 if (bit >= smu->smu_feature.feature_num)
2118 return;
2119
2120 smu_feature_bits_clear_bit(__smu_feature_get_list(smu, list), bit);
2121 }
2122
smu_feature_list_set_all(struct smu_context * smu,enum smu_feature_list list)2123 static inline void smu_feature_list_set_all(struct smu_context *smu,
2124 enum smu_feature_list list)
2125 {
2126 smu_feature_bits_fill(__smu_feature_get_list(smu, list));
2127 }
2128
smu_feature_list_clear_all(struct smu_context * smu,enum smu_feature_list list)2129 static inline void smu_feature_list_clear_all(struct smu_context *smu,
2130 enum smu_feature_list list)
2131 {
2132 smu_feature_bits_clearall(__smu_feature_get_list(smu, list));
2133 }
2134
smu_feature_list_is_empty(struct smu_context * smu,enum smu_feature_list list)2135 static inline bool smu_feature_list_is_empty(struct smu_context *smu,
2136 enum smu_feature_list list)
2137 {
2138 return smu_feature_bits_empty(__smu_feature_get_list(smu, list),
2139 smu->smu_feature.feature_num);
2140 }
2141
smu_feature_list_set_bits(struct smu_context * smu,enum smu_feature_list dst_list,const unsigned long * src)2142 static inline void smu_feature_list_set_bits(struct smu_context *smu,
2143 enum smu_feature_list dst_list,
2144 const unsigned long *src)
2145 {
2146 smu_feature_bits_copy(__smu_feature_get_list(smu, dst_list), src,
2147 smu->smu_feature.feature_num);
2148 }
2149
smu_feature_list_to_arr32(struct smu_context * smu,enum smu_feature_list list,uint32_t * arr)2150 static inline void smu_feature_list_to_arr32(struct smu_context *smu,
2151 enum smu_feature_list list,
2152 uint32_t *arr)
2153 {
2154 smu_feature_bits_to_arr32(__smu_feature_get_list(smu, list), arr,
2155 smu->smu_feature.feature_num);
2156 }
2157
smu_feature_init(struct smu_context * smu,int feature_num)2158 static inline void smu_feature_init(struct smu_context *smu, int feature_num)
2159 {
2160 if (!feature_num || smu->smu_feature.feature_num != 0)
2161 return;
2162
2163 smu->smu_feature.feature_num = feature_num;
2164 smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_SUPPORTED);
2165 smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_ALLOWED);
2166 }
2167
2168 /*
2169 * smu_safe_u16_nn - Make u16 safe by filtering negative overflow errors
2170 * @val: Input u16 value, may contain invalid negative overflows
2171 *
2172 * Convert u16 to non-negative value. Cast to s16 to detect negative values
2173 * caused by calculation errors. Return 0 for negative errors, return
2174 * original value if valid.
2175 *
2176 * Return: Valid u16 value or 0
2177 */
smu_safe_u16_nn(u16 val)2178 static inline u16 smu_safe_u16_nn(u16 val)
2179 {
2180 s16 tmp = (s16)val;
2181
2182 return tmp < 0 ? 0 : val;
2183 }
2184
2185 #endif
2186