xref: /linux/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h (revision 49b4b0c6af391f89353e57120272015cdc1cbff5)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #ifndef __AMDGPU_SMU_H__
23 #define __AMDGPU_SMU_H__
24 
25 #include <linux/acpi_amd_wbrf.h>
26 #include <linux/units.h>
27 
28 #include "amdgpu.h"
29 #include "kgd_pp_interface.h"
30 #include "dm_pp_interface.h"
31 #include "dm_pp_smu.h"
32 #include "smu_types.h"
33 #include "linux/firmware.h"
34 
35 #define SMU_THERMAL_MINIMUM_ALERT_TEMP		0
36 #define SMU_THERMAL_MAXIMUM_ALERT_TEMP		255
37 #define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES	1000
38 #define SMU_FW_NAME_LEN			0x24
39 
40 #define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
41 #define SMU_CUSTOM_FAN_SPEED_RPM     (1 << 1)
42 #define SMU_CUSTOM_FAN_SPEED_PWM     (1 << 2)
43 
44 #define SMU_GPU_METRICS_CACHE_INTERVAL 5
45 
46 // Power Throttlers
47 #define SMU_THROTTLER_PPT0_BIT			0
48 #define SMU_THROTTLER_PPT1_BIT			1
49 #define SMU_THROTTLER_PPT2_BIT			2
50 #define SMU_THROTTLER_PPT3_BIT			3
51 #define SMU_THROTTLER_SPL_BIT			4
52 #define SMU_THROTTLER_FPPT_BIT			5
53 #define SMU_THROTTLER_SPPT_BIT			6
54 #define SMU_THROTTLER_SPPT_APU_BIT		7
55 
56 // Current Throttlers
57 #define SMU_THROTTLER_TDC_GFX_BIT		16
58 #define SMU_THROTTLER_TDC_SOC_BIT		17
59 #define SMU_THROTTLER_TDC_MEM_BIT		18
60 #define SMU_THROTTLER_TDC_VDD_BIT		19
61 #define SMU_THROTTLER_TDC_CVIP_BIT		20
62 #define SMU_THROTTLER_EDC_CPU_BIT		21
63 #define SMU_THROTTLER_EDC_GFX_BIT		22
64 #define SMU_THROTTLER_APCC_BIT			23
65 
66 // Temperature
67 #define SMU_THROTTLER_TEMP_GPU_BIT		32
68 #define SMU_THROTTLER_TEMP_CORE_BIT		33
69 #define SMU_THROTTLER_TEMP_MEM_BIT		34
70 #define SMU_THROTTLER_TEMP_EDGE_BIT		35
71 #define SMU_THROTTLER_TEMP_HOTSPOT_BIT		36
72 #define SMU_THROTTLER_TEMP_SOC_BIT		37
73 #define SMU_THROTTLER_TEMP_VR_GFX_BIT		38
74 #define SMU_THROTTLER_TEMP_VR_SOC_BIT		39
75 #define SMU_THROTTLER_TEMP_VR_MEM0_BIT		40
76 #define SMU_THROTTLER_TEMP_VR_MEM1_BIT		41
77 #define SMU_THROTTLER_TEMP_LIQUID0_BIT		42
78 #define SMU_THROTTLER_TEMP_LIQUID1_BIT		43
79 #define SMU_THROTTLER_VRHOT0_BIT		44
80 #define SMU_THROTTLER_VRHOT1_BIT		45
81 #define SMU_THROTTLER_PROCHOT_CPU_BIT		46
82 #define SMU_THROTTLER_PROCHOT_GFX_BIT		47
83 
84 // Other
85 #define SMU_THROTTLER_PPM_BIT			56
86 #define SMU_THROTTLER_FIT_BIT			57
87 
88 struct smu_hw_power_state {
89 	unsigned int magic;
90 };
91 
92 struct smu_power_state;
93 
94 enum smu_state_ui_label {
95 	SMU_STATE_UI_LABEL_NONE,
96 	SMU_STATE_UI_LABEL_BATTERY,
97 	SMU_STATE_UI_TABEL_MIDDLE_LOW,
98 	SMU_STATE_UI_LABEL_BALLANCED,
99 	SMU_STATE_UI_LABEL_MIDDLE_HIGHT,
100 	SMU_STATE_UI_LABEL_PERFORMANCE,
101 	SMU_STATE_UI_LABEL_BACO,
102 };
103 
104 enum smu_state_classification_flag {
105 	SMU_STATE_CLASSIFICATION_FLAG_BOOT                     = 0x0001,
106 	SMU_STATE_CLASSIFICATION_FLAG_THERMAL                  = 0x0002,
107 	SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE      = 0x0004,
108 	SMU_STATE_CLASSIFICATION_FLAG_RESET                    = 0x0008,
109 	SMU_STATE_CLASSIFICATION_FLAG_FORCED                   = 0x0010,
110 	SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE      = 0x0020,
111 	SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE      = 0x0040,
112 	SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE           = 0x0080,
113 	SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE   = 0x0100,
114 	SMU_STATE_CLASSIFICATION_FLAG_UVD                      = 0x0200,
115 	SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW       = 0x0400,
116 	SMU_STATE_CLASSIFICATION_FLAG_ACPI                     = 0x0800,
117 	SMU_STATE_CLASSIFICATION_FLAG_HD2                      = 0x1000,
118 	SMU_STATE_CLASSIFICATION_FLAG_UVD_HD                   = 0x2000,
119 	SMU_STATE_CLASSIFICATION_FLAG_UVD_SD                   = 0x4000,
120 	SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE      = 0x8000,
121 	SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE   = 0x10000,
122 	SMU_STATE_CLASSIFICATION_FLAG_BACO                     = 0x20000,
123 	SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2      = 0x40000,
124 	SMU_STATE_CLASSIFICATION_FLAG_ULV                      = 0x80000,
125 	SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC                  = 0x100000,
126 };
127 
128 struct smu_state_classification_block {
129 	enum smu_state_ui_label         ui_label;
130 	enum smu_state_classification_flag  flags;
131 	int                          bios_index;
132 	bool                      temporary_state;
133 	bool                      to_be_deleted;
134 };
135 
136 struct smu_state_pcie_block {
137 	unsigned int lanes;
138 };
139 
140 enum smu_refreshrate_source {
141 	SMU_REFRESHRATE_SOURCE_EDID,
142 	SMU_REFRESHRATE_SOURCE_EXPLICIT
143 };
144 
145 struct smu_state_display_block {
146 	bool              disable_frame_modulation;
147 	bool              limit_refreshrate;
148 	enum smu_refreshrate_source refreshrate_source;
149 	int                  explicit_refreshrate;
150 	int                  edid_refreshrate_index;
151 	bool              enable_vari_bright;
152 };
153 
154 struct smu_state_memory_block {
155 	bool              dll_off;
156 	uint8_t                 m3arb;
157 	uint8_t                 unused[3];
158 };
159 
160 struct smu_state_software_algorithm_block {
161 	bool disable_load_balancing;
162 	bool enable_sleep_for_timestamps;
163 };
164 
165 struct smu_temperature_range {
166 	int min;
167 	int max;
168 	int edge_emergency_max;
169 	int hotspot_min;
170 	int hotspot_crit_max;
171 	int hotspot_emergency_max;
172 	int mem_min;
173 	int mem_crit_max;
174 	int mem_emergency_max;
175 	int software_shutdown_temp;
176 	int software_shutdown_temp_offset;
177 };
178 
179 struct smu_state_validation_block {
180 	bool single_display_only;
181 	bool disallow_on_dc;
182 	uint8_t supported_power_levels;
183 };
184 
185 struct smu_uvd_clocks {
186 	uint32_t vclk;
187 	uint32_t dclk;
188 };
189 
190 /**
191 * Structure to hold a SMU Power State.
192 */
193 struct smu_power_state {
194 	uint32_t                                      id;
195 	struct list_head                              ordered_list;
196 	struct list_head                              all_states_list;
197 
198 	struct smu_state_classification_block         classification;
199 	struct smu_state_validation_block             validation;
200 	struct smu_state_pcie_block                   pcie;
201 	struct smu_state_display_block                display;
202 	struct smu_state_memory_block                 memory;
203 	struct smu_state_software_algorithm_block     software;
204 	struct smu_uvd_clocks                         uvd_clocks;
205 	struct smu_hw_power_state                     hardware;
206 };
207 
208 enum smu_power_src_type {
209 	SMU_POWER_SOURCE_AC,
210 	SMU_POWER_SOURCE_DC,
211 	SMU_POWER_SOURCE_COUNT,
212 };
213 
214 enum smu_ppt_limit_type {
215 	SMU_DEFAULT_PPT_LIMIT = 0,
216 	SMU_FAST_PPT_LIMIT,
217 	SMU_LIMIT_TYPE_COUNT,
218 };
219 
220 enum smu_ppt_limit_level {
221 	SMU_PPT_LIMIT_MIN = -1,
222 	SMU_PPT_LIMIT_CURRENT,
223 	SMU_PPT_LIMIT_DEFAULT,
224 	SMU_PPT_LIMIT_MAX,
225 };
226 
227 enum smu_memory_pool_size {
228     SMU_MEMORY_POOL_SIZE_ZERO   = 0,
229     SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000,
230     SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000,
231     SMU_MEMORY_POOL_SIZE_1_GB   = 0x40000000,
232     SMU_MEMORY_POOL_SIZE_2_GB   = 0x80000000,
233 };
234 
235 struct smu_user_dpm_profile {
236 	uint32_t fan_mode;
237 	uint32_t power_limits[SMU_LIMIT_TYPE_COUNT];
238 	uint32_t fan_speed_pwm;
239 	uint32_t fan_speed_rpm;
240 	uint32_t flags;
241 	uint32_t user_od;
242 
243 	/* user clock state information */
244 	uint32_t clk_mask[SMU_CLK_COUNT];
245 	uint32_t clk_dependency;
246 };
247 
248 #define SMU_TABLE_INIT(tables, table_id, s, a, d)	\
249 	do {						\
250 		tables[table_id].size = s;		\
251 		tables[table_id].align = a;		\
252 		tables[table_id].domain = d;		\
253 	} while (0)
254 
255 struct smu_table_cache {
256 	void *buffer;
257 	size_t size;
258 	/* interval in ms*/
259 	uint32_t interval;
260 	unsigned long last_cache_time;
261 };
262 
263 struct smu_table {
264 	uint64_t size;
265 	uint32_t align;
266 	uint8_t domain;
267 	uint64_t mc_address;
268 	void *cpu_addr;
269 	struct amdgpu_bo *bo;
270 	uint32_t version;
271 	struct smu_table_cache cache;
272 };
273 
274 enum smu_driver_table_id {
275 	SMU_DRIVER_TABLE_GPU_METRICS = 0,
276 	SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS,
277 	SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS,
278 	SMU_DRIVER_TABLE_COUNT,
279 };
280 
281 struct smu_driver_table {
282 	enum smu_driver_table_id id;
283 	struct smu_table_cache cache;
284 };
285 
286 enum smu_perf_level_designation {
287 	PERF_LEVEL_ACTIVITY,
288 	PERF_LEVEL_POWER_CONTAINMENT,
289 };
290 
291 struct smu_performance_level {
292 	uint32_t core_clock;
293 	uint32_t memory_clock;
294 	uint32_t vddc;
295 	uint32_t vddci;
296 	uint32_t non_local_mem_freq;
297 	uint32_t non_local_mem_width;
298 };
299 
300 struct smu_clock_info {
301 	uint32_t min_mem_clk;
302 	uint32_t max_mem_clk;
303 	uint32_t min_eng_clk;
304 	uint32_t max_eng_clk;
305 	uint32_t min_bus_bandwidth;
306 	uint32_t max_bus_bandwidth;
307 };
308 
309 #define SMU_MAX_DPM_LEVELS 16
310 
311 struct smu_dpm_clk_level {
312 	bool		enabled;
313 	uint32_t	value;
314 };
315 
316 #define SMU_DPM_TABLE_FINE_GRAINED	BIT(0)
317 
318 struct smu_dpm_table {
319 	enum smu_clk_type	clk_type;
320 	uint32_t		count;
321 	uint32_t		flags;
322 	struct smu_dpm_clk_level dpm_levels[SMU_MAX_DPM_LEVELS];
323 };
324 
325 #define SMU_DPM_TABLE_MIN(table) \
326 	((table)->count > 0 ? (table)->dpm_levels[0].value : 0)
327 
328 #define SMU_DPM_TABLE_MAX(table) \
329 	((table)->count > 0 ? (table)->dpm_levels[(table)->count - 1].value : 0)
330 
331 #define SMU_MAX_PCIE_LEVELS 3
332 
333 struct smu_pcie_table {
334 	uint8_t pcie_gen[SMU_MAX_PCIE_LEVELS];
335 	uint8_t pcie_lane[SMU_MAX_PCIE_LEVELS];
336 	uint16_t lclk_freq[SMU_MAX_PCIE_LEVELS];
337 	uint32_t lclk_levels;
338 };
339 
340 struct smu_bios_boot_up_values {
341 	uint32_t			revision;
342 	uint32_t			gfxclk;
343 	uint32_t			uclk;
344 	uint32_t			socclk;
345 	uint32_t			dcefclk;
346 	uint32_t			eclk;
347 	uint32_t			vclk;
348 	uint32_t			dclk;
349 	uint16_t			vddc;
350 	uint16_t			vddci;
351 	uint16_t			mvddc;
352 	uint16_t			vdd_gfx;
353 	uint8_t				cooling_id;
354 	uint32_t			pp_table_id;
355 	uint32_t			format_revision;
356 	uint32_t			content_revision;
357 	uint32_t			fclk;
358 	uint32_t			lclk;
359 	uint32_t			firmware_caps;
360 };
361 
362 enum smu_table_id {
363 	SMU_TABLE_PPTABLE = 0,
364 	SMU_TABLE_WATERMARKS,
365 	SMU_TABLE_CUSTOM_DPM,
366 	SMU_TABLE_DPMCLOCKS,
367 	SMU_TABLE_AVFS,
368 	SMU_TABLE_AVFS_PSM_DEBUG,
369 	SMU_TABLE_AVFS_FUSE_OVERRIDE,
370 	SMU_TABLE_PMSTATUSLOG,
371 	SMU_TABLE_SMU_METRICS,
372 	SMU_TABLE_DRIVER_SMU_CONFIG,
373 	SMU_TABLE_ACTIVITY_MONITOR_COEFF,
374 	SMU_TABLE_OVERDRIVE,
375 	SMU_TABLE_I2C_COMMANDS,
376 	SMU_TABLE_PACE,
377 	SMU_TABLE_ECCINFO,
378 	SMU_TABLE_COMBO_PPTABLE,
379 	SMU_TABLE_WIFIBAND,
380 	SMU_TABLE_PMFW_SYSTEM_METRICS,
381 	SMU_TABLE_COUNT,
382 };
383 
384 struct smu_table_context {
385 	void				*power_play_table;
386 	uint32_t			power_play_table_size;
387 	void				*hardcode_pptable;
388 	unsigned long			metrics_time;
389 	void				*metrics_table;
390 	void				*clocks_table;
391 	void				*watermarks_table;
392 	struct mutex			metrics_lock;
393 
394 	void				*max_sustainable_clocks;
395 	struct smu_bios_boot_up_values	boot_values;
396 	void				*driver_pptable;
397 	void				*combo_pptable;
398 	void                            *ecc_table;
399 	void				*driver_smu_config_table;
400 	struct smu_table		tables[SMU_TABLE_COUNT];
401 	/*
402 	 * The driver table is just a staging buffer for
403 	 * uploading/downloading content from the SMU.
404 	 *
405 	 * And the table_id for SMU_MSG_TransferTableSmu2Dram/
406 	 * SMU_MSG_TransferTableDram2Smu instructs SMU
407 	 * which content driver is interested.
408 	 */
409 	struct smu_table		driver_table;
410 	struct smu_table		memory_pool;
411 	struct smu_table		dummy_read_1_table;
412 	uint8_t                         thermal_controller_type;
413 
414 	void				*overdrive_table;
415 	void                            *boot_overdrive_table;
416 	void				*user_overdrive_table;
417 
418 	struct smu_driver_table driver_tables[SMU_DRIVER_TABLE_COUNT];
419 };
420 
421 struct smu_context;
422 struct smu_dpm_policy;
423 
424 struct smu_dpm_policy_desc {
425 	const char *name;
426 	char *(*get_desc)(struct smu_dpm_policy *dpm_policy, int level);
427 };
428 
429 struct smu_dpm_policy {
430 	struct smu_dpm_policy_desc *desc;
431 	enum pp_pm_policy policy_type;
432 	unsigned long level_mask;
433 	int current_level;
434 	int (*set_policy)(struct smu_context *ctxt, int level);
435 };
436 
437 struct smu_dpm_policy_ctxt {
438 	struct smu_dpm_policy policies[PP_PM_POLICY_NUM];
439 	unsigned long policy_mask;
440 };
441 
442 struct smu_dpm_context {
443 	uint32_t dpm_context_size;
444 	void *dpm_context;
445 	void *golden_dpm_context;
446 	enum amd_dpm_forced_level dpm_level;
447 	enum amd_dpm_forced_level saved_dpm_level;
448 	enum amd_dpm_forced_level requested_dpm_level;
449 	struct smu_power_state *dpm_request_power_state;
450 	struct smu_power_state *dpm_current_power_state;
451 	struct mclock_latency_table *mclk_latency_table;
452 	struct smu_dpm_policy_ctxt *dpm_policies;
453 };
454 
455 struct smu_temp_context {
456 	const struct smu_temp_funcs      *temp_funcs;
457 };
458 
459 struct smu_power_gate {
460 	bool uvd_gated;
461 	bool vce_gated;
462 	atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
463 	atomic_t jpeg_gated;
464 	atomic_t vpe_gated;
465 	atomic_t isp_gated;
466 	atomic_t umsch_mm_gated;
467 };
468 
469 struct smu_power_context {
470 	void *power_context;
471 	uint32_t power_context_size;
472 	struct smu_power_gate power_gate;
473 };
474 
475 #define SMU_FEATURE_NUM_DEFAULT (64)
476 #define SMU_FEATURE_MAX (128)
477 
478 struct smu_feature_bits {
479 	DECLARE_BITMAP(bits, SMU_FEATURE_MAX);
480 };
481 
482 /*
483  * Helpers for initializing smu_feature_bits statically.
484  * Use SMU_FEATURE_BIT_INIT() which automatically handles array indexing:
485  *   static const struct smu_feature_bits example = {
486  *       .bits = {
487  *           SMU_FEATURE_BIT_INIT(5),
488  *           SMU_FEATURE_BIT_INIT(10),
489  *           SMU_FEATURE_BIT_INIT(65),
490  *           SMU_FEATURE_BIT_INIT(100)
491  *       }
492  *   };
493  */
494 #define SMU_FEATURE_BITS_ELEM(bit) ((bit) / BITS_PER_LONG)
495 #define SMU_FEATURE_BITS_POS(bit) ((bit) % BITS_PER_LONG)
496 #define SMU_FEATURE_BIT_INIT(bit) \
497 	[SMU_FEATURE_BITS_ELEM(bit)] = (1UL << SMU_FEATURE_BITS_POS(bit))
498 
499 enum smu_feature_list {
500 	SMU_FEATURE_LIST_SUPPORTED,
501 	SMU_FEATURE_LIST_ALLOWED,
502 	SMU_FEATURE_LIST_MAX,
503 };
504 
505 struct smu_feature {
506 	uint32_t feature_num;
507 	struct smu_feature_bits bits[SMU_FEATURE_LIST_MAX];
508 };
509 
510 struct smu_clocks {
511 	uint32_t engine_clock;
512 	uint32_t memory_clock;
513 	uint32_t bus_bandwidth;
514 	uint32_t engine_clock_in_sr;
515 	uint32_t dcef_clock;
516 	uint32_t dcef_clock_in_sr;
517 };
518 
519 #define MAX_REGULAR_DPM_NUM 16
520 struct mclk_latency_entries {
521 	uint32_t  frequency;
522 	uint32_t  latency;
523 };
524 struct mclock_latency_table {
525 	uint32_t  count;
526 	struct mclk_latency_entries  entries[MAX_REGULAR_DPM_NUM];
527 };
528 
529 enum smu_reset_mode {
530 	SMU_RESET_MODE_0,
531 	SMU_RESET_MODE_1,
532 	SMU_RESET_MODE_2,
533 	SMU_RESET_MODE_3,
534 	SMU_RESET_MODE_4,
535 };
536 
537 enum smu_baco_state {
538 	SMU_BACO_STATE_ENTER = 0,
539 	SMU_BACO_STATE_EXIT,
540 };
541 
542 struct smu_baco_context {
543 	uint32_t state;
544 	bool platform_support;
545 	bool maco_support;
546 };
547 
548 struct smu_freq_info {
549 	uint32_t min;
550 	uint32_t max;
551 	uint32_t freq_level;
552 };
553 
554 struct pstates_clk_freq {
555 	uint32_t			min;
556 	uint32_t			standard;
557 	uint32_t			peak;
558 	struct smu_freq_info		custom;
559 	struct smu_freq_info		curr;
560 };
561 
562 struct smu_umd_pstate_table {
563 	struct pstates_clk_freq		gfxclk_pstate;
564 	struct pstates_clk_freq		socclk_pstate;
565 	struct pstates_clk_freq		uclk_pstate;
566 	struct pstates_clk_freq		vclk_pstate;
567 	struct pstates_clk_freq		dclk_pstate;
568 	struct pstates_clk_freq		fclk_pstate;
569 };
570 
571 struct cmn2asic_msg_mapping {
572 	int	valid_mapping;
573 	int	map_to;
574 	uint32_t flags;
575 };
576 
577 struct cmn2asic_mapping {
578 	int	valid_mapping;
579 	int	map_to;
580 };
581 
582 #define SMU_MSG_MAX_ARGS 4
583 
584 /* Message flags for smu_msg_args */
585 #define SMU_MSG_FLAG_ASYNC	BIT(0) /* Async send - skip post-poll */
586 #define SMU_MSG_FLAG_LOCK_HELD	BIT(1) /* Caller holds ctl->lock */
587 
588 /* smu_msg_ctl flags */
589 #define SMU_MSG_CTL_DEBUG_MAILBOX	BIT(0) /* Debug mailbox supported */
590 
591 struct smu_msg_ctl;
592 /**
593  * struct smu_msg_config - IP-level register configuration
594  * @msg_reg: Message register offset
595  * @resp_reg: Response register offset
596  * @arg_regs: Argument register offsets (up to SMU_MSG_MAX_ARGS)
597  * @num_arg_regs: Number of argument registers available
598  * @debug_msg_reg: Debug message register offset
599  * @debug_resp_reg: Debug response register offset
600  * @debug_param_reg: Debug parameter register offset
601  */
602 struct smu_msg_config {
603 	u32 msg_reg;
604 	u32 resp_reg;
605 	u32 arg_regs[SMU_MSG_MAX_ARGS];
606 	int num_arg_regs;
607 	u32 debug_msg_reg;
608 	u32 debug_resp_reg;
609 	u32 debug_param_reg;
610 };
611 
612 /**
613  * struct smu_msg_args - Per-call message arguments
614  * @msg: Common message type (enum smu_message_type)
615  * @args: Input arguments
616  * @num_args: Number of input arguments
617  * @out_args: Output arguments (filled after successful send)
618  * @num_out_args: Number of output arguments to read
619  * @flags: Message flags (SMU_MSG_FLAG_*)
620  * @timeout: Per-message timeout in us (0 = use default)
621  */
622 struct smu_msg_args {
623 	enum smu_message_type msg;
624 	u32 args[SMU_MSG_MAX_ARGS];
625 	int num_args;
626 	u32 out_args[SMU_MSG_MAX_ARGS];
627 	int num_out_args;
628 	u32 flags;
629 	u32 timeout;
630 };
631 
632 /**
633  * struct smu_msg_ops - IP-level protocol operations
634  * @send_msg: send message protocol
635  * @wait_response: wait for response (for split send/wait cases)
636  * @decode_response: Convert response register value to errno
637  * @send_debug_msg: send debug message
638  */
639 struct smu_msg_ops {
640 	int (*send_msg)(struct smu_msg_ctl *ctl, struct smu_msg_args *args);
641 	int (*wait_response)(struct smu_msg_ctl *ctl, u32 timeout_us);
642 	int (*decode_response)(u32 resp);
643 	int (*send_debug_msg)(struct smu_msg_ctl *ctl, u32 msg, u32 param);
644 };
645 
646 /**
647  * struct smu_msg_ctl - Per-device message control block
648  * This is a standalone control block that encapsulates everything
649  * needed for SMU messaging. The ops->send_msg implements the complete
650  * protocol including all filtering and error handling.
651  */
652 struct smu_msg_ctl {
653 	struct smu_context *smu;
654 	struct mutex lock;
655 	struct smu_msg_config config;
656 	const struct smu_msg_ops *ops;
657 	const struct cmn2asic_msg_mapping *message_map;
658 	u32 default_timeout;
659 	u32 flags;
660 };
661 
662 struct stb_context {
663 	uint32_t stb_buf_size;
664 	bool enabled;
665 	spinlock_t lock;
666 };
667 
668 enum smu_fw_status {
669 	SMU_FW_INIT = 0,
670 	SMU_FW_RUNTIME,
671 	SMU_FW_HANG,
672 };
673 
674 #define WORKLOAD_POLICY_MAX 7
675 
676 /*
677  * Configure wbrf event handling pace as there can be only one
678  * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms.
679  */
680 #define SMU_WBRF_EVENT_HANDLING_PACE	10
681 
682 enum smu_feature_cap_id {
683 	SMU_FEATURE_CAP_ID__LINK_RESET = 0,
684 	SMU_FEATURE_CAP_ID__SDMA_RESET,
685 	SMU_FEATURE_CAP_ID__VCN_RESET,
686 	SMU_FEATURE_CAP_ID__COUNT,
687 };
688 
689 struct smu_feature_cap {
690 	DECLARE_BITMAP(cap_map, SMU_FEATURE_CAP_ID__COUNT);
691 };
692 
693 struct smu_context {
694 	struct amdgpu_device            *adev;
695 	struct amdgpu_irq_src		irq_source;
696 
697 	const struct pptable_funcs	*ppt_funcs;
698 	const struct cmn2asic_mapping	*clock_map;
699 	const struct cmn2asic_mapping	*feature_map;
700 	const struct cmn2asic_mapping	*table_map;
701 	const struct cmn2asic_mapping	*pwr_src_map;
702 	const struct cmn2asic_mapping	*workload_map;
703 	uint64_t pool_size;
704 
705 	struct smu_table_context	smu_table;
706 	struct smu_dpm_context		smu_dpm;
707 	struct smu_power_context	smu_power;
708 	struct smu_temp_context		smu_temp;
709 	struct smu_feature		smu_feature;
710 	struct amd_pp_display_configuration  *display_config;
711 	struct smu_baco_context		smu_baco;
712 	struct smu_temperature_range	thermal_range;
713 	struct smu_feature_cap		fea_cap;
714 	void *od_settings;
715 
716 	struct smu_umd_pstate_table	pstate_table;
717 	uint32_t pstate_sclk;
718 	uint32_t pstate_mclk;
719 
720 	bool od_enabled;
721 	uint32_t current_power_limit;
722 	uint32_t default_power_limit;
723 	uint32_t max_power_limit;
724 	uint32_t min_power_limit;
725 
726 	/* soft pptable */
727 	uint32_t ppt_offset_bytes;
728 	uint32_t ppt_size_bytes;
729 	uint8_t  *ppt_start_addr;
730 
731 	bool support_power_containment;
732 	bool disable_watermark;
733 
734 #define WATERMARKS_EXIST	(1 << 0)
735 #define WATERMARKS_LOADED	(1 << 1)
736 	uint32_t watermarks_bitmap;
737 	uint32_t hard_min_uclk_req_from_dal;
738 	bool disable_uclk_switch;
739 
740 	/* asic agnostic workload mask */
741 	uint32_t workload_mask;
742 	bool pause_workload;
743 	/* default/user workload preference */
744 	uint32_t power_profile_mode;
745 	uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
746 	/* backend specific custom workload settings */
747 	long *custom_profile_params;
748 	bool pm_enabled;
749 	bool is_apu;
750 
751 	uint32_t smc_driver_if_version;
752 	uint32_t smc_fw_if_version;
753 	uint32_t smc_fw_version;
754 	uint32_t smc_fw_caps;
755 	uint8_t smc_fw_state;
756 
757 	bool uploading_custom_pp_table;
758 	bool dc_controlled_by_gpio;
759 
760 	struct work_struct throttling_logging_work;
761 	atomic64_t throttle_int_counter;
762 	struct work_struct interrupt_work;
763 
764 	unsigned fan_max_rpm;
765 	unsigned manual_fan_speed_pwm;
766 
767 	uint32_t gfx_default_hard_min_freq;
768 	uint32_t gfx_default_soft_max_freq;
769 	uint32_t gfx_actual_hard_min_freq;
770 	uint32_t gfx_actual_soft_max_freq;
771 
772 	/* APU only */
773 	uint32_t cpu_default_soft_min_freq;
774 	uint32_t cpu_default_soft_max_freq;
775 	uint32_t cpu_actual_soft_min_freq;
776 	uint32_t cpu_actual_soft_max_freq;
777 	uint32_t cpu_core_id_select;
778 	uint16_t cpu_core_num;
779 
780 	struct smu_user_dpm_profile user_dpm_profile;
781 
782 	struct stb_context stb_context;
783 
784 	struct firmware pptable_firmware;
785 
786 	struct delayed_work		swctf_delayed_work;
787 
788 	/* data structures for wbrf feature support */
789 	bool				wbrf_supported;
790 	struct notifier_block		wbrf_notifier;
791 	struct delayed_work		wbrf_delayed_work;
792 
793 	/* SMU message control block */
794 	struct smu_msg_ctl msg_ctl;
795 };
796 
797 struct i2c_adapter;
798 
799 /**
800  * struct smu_temp_funcs - Callbacks used to get temperature data.
801  */
802 struct smu_temp_funcs {
803 	/**
804 	 * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's
805 	 *           power delivery and voltage margins. Required for adaptive
806 	 * @type Temperature metrics type(baseboard/gpuboard)
807 	 * Return: Size of &table
808 	 */
809 	ssize_t (*get_temp_metrics)(struct smu_context *smu,
810 				    enum smu_temp_metric_type type, void *table);
811 
812 	/**
813 	 * @temp_metrics_is_support: Get if specific temperature metrics is supported
814 	 * @type Temperature metrics type(baseboard/gpuboard)
815 	 * Return: true if supported else false
816 	 */
817 	bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type);
818 
819 };
820 
821 /**
822  * struct pptable_funcs - Callbacks used to interact with the SMU.
823  */
824 struct pptable_funcs {
825 	/**
826 	 * @run_btc: Calibrate voltage/frequency curve to fit the system's
827 	 *           power delivery and voltage margins. Required for adaptive
828 	 *           voltage frequency scaling (AVFS).
829 	 */
830 	int (*run_btc)(struct smu_context *smu);
831 
832 	/**
833 	 * @init_allowed_features: Initialize allowed features bitmap.
834 	 * Directly sets allowed features using smu_feature wrapper functions.
835 	 */
836 	int (*init_allowed_features)(struct smu_context *smu);
837 
838 	/**
839 	 * @get_current_power_state: Get the current power state.
840 	 *
841 	 * Return: Current power state on success, negative errno on failure.
842 	 */
843 	enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
844 
845 	/**
846 	 * @set_default_dpm_table: Retrieve the default overdrive settings from
847 	 *                         the SMU.
848 	 */
849 	int (*set_default_dpm_table)(struct smu_context *smu);
850 
851 	int (*set_power_state)(struct smu_context *smu);
852 
853 	/**
854 	 * @populate_umd_state_clk: Populate the UMD power state table with
855 	 *                          defaults.
856 	 */
857 	int (*populate_umd_state_clk)(struct smu_context *smu);
858 
859 	/**
860 	 * @emit_clk_levels: Print DPM clock levels for a clock domain
861 	 *                    to buffer using sysfs_emit_at. Star current level.
862 	 *
863 	 * Used for sysfs interfaces.
864 	 * &buf: sysfs buffer
865 	 * &offset: offset within buffer to start printing, which is updated by the
866 	 * function.
867 	 *
868 	 * Return: 0 on Success or Negative to indicate an error occurred.
869 	 */
870 	int (*emit_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf, int *offset);
871 
872 	/**
873 	 * @force_clk_levels: Set a range of allowed DPM levels for a clock
874 	 *                    domain.
875 	 * &clk_type: Clock domain.
876 	 * &mask: Range of allowed DPM levels.
877 	 */
878 	int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask);
879 
880 	/**
881 	 * @od_edit_dpm_table: Edit the custom overdrive DPM table.
882 	 * &type: Type of edit.
883 	 * &input: Edit parameters.
884 	 * &size: Size of &input.
885 	 */
886 	int (*od_edit_dpm_table)(struct smu_context *smu,
887 				 enum PP_OD_DPM_TABLE_COMMAND type,
888 				 long *input, uint32_t size);
889 
890 	/**
891 	 * @restore_user_od_settings: Restore the user customized
892 	 *                            OD settings on S3/S4/Runpm resume.
893 	 */
894 	int (*restore_user_od_settings)(struct smu_context *smu);
895 
896 	/**
897 	 * @get_clock_by_type_with_latency: Get the speed and latency of a clock
898 	 *                                  domain.
899 	 */
900 	int (*get_clock_by_type_with_latency)(struct smu_context *smu,
901 					      enum smu_clk_type clk_type,
902 					      struct
903 					      pp_clock_levels_with_latency
904 					      *clocks);
905 	/**
906 	 * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock
907 	 *                                  domain.
908 	 */
909 	int (*get_clock_by_type_with_voltage)(struct smu_context *smu,
910 					      enum amd_pp_clock_type type,
911 					      struct
912 					      pp_clock_levels_with_voltage
913 					      *clocks);
914 
915 	/**
916 	 * @get_power_profile_mode: Print all power profile modes to
917 	 *                          buffer. Star current mode.
918 	 */
919 	int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
920 
921 	/**
922 	 * @set_power_profile_mode: Set a power profile mode. Also used to
923 	 *                          create/set custom power profile modes.
924 	 * &input: Power profile mode parameters.
925 	 * &workload_mask: mask of workloads to enable
926 	 * &custom_params: custom profile parameters
927 	 * &custom_params_max_idx: max valid idx into custom_params
928 	 */
929 	int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask,
930 				      long *custom_params, u32 custom_params_max_idx);
931 
932 	/**
933 	 * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
934 	 *                      management.
935 	 */
936 	int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable, int inst);
937 
938 	/**
939 	 * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power
940 	 *                       management.
941 	 */
942 	int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
943 
944 	/**
945 	 * @set_gfx_power_up_by_imu: Enable GFX engine with IMU
946 	 */
947 	int (*set_gfx_power_up_by_imu)(struct smu_context *smu);
948 
949 	/**
950 	 * @read_sensor: Read data from a sensor.
951 	 * &sensor: Sensor to read data from.
952 	 * &data: Sensor reading.
953 	 * &size: Size of &data.
954 	 */
955 	int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
956 			   void *data, uint32_t *size);
957 
958 	/**
959 	 * @get_apu_thermal_limit: get apu core limit from smu
960 	 * &limit: current limit temperature in millidegrees Celsius
961 	 */
962 	int (*get_apu_thermal_limit)(struct smu_context *smu, uint32_t *limit);
963 
964 	/**
965 	 * @set_apu_thermal_limit: update all controllers with new limit
966 	 * &limit: limit temperature to be setted, in millidegrees Celsius
967 	 */
968 	int (*set_apu_thermal_limit)(struct smu_context *smu, uint32_t limit);
969 
970 	/**
971 	 * @pre_display_config_changed: Prepare GPU for a display configuration
972 	 *                              change.
973 	 *
974 	 * Disable display tracking and pin memory clock speed to maximum. Used
975 	 * in display component synchronization.
976 	 */
977 	int (*pre_display_config_changed)(struct smu_context *smu);
978 
979 	/**
980 	 * @display_config_changed: Notify the SMU of the current display
981 	 *                          configuration.
982 	 *
983 	 * Allows SMU to properly track blanking periods for memory clock
984 	 * adjustment. Used in display component synchronization.
985 	 */
986 	int (*display_config_changed)(struct smu_context *smu);
987 
988 	int (*apply_clocks_adjust_rules)(struct smu_context *smu);
989 
990 	/**
991 	 * @notify_smc_display_config: Applies display requirements to the
992 	 *                             current power state.
993 	 *
994 	 * Optimize deep sleep DCEFclk and mclk for the current display
995 	 * configuration. Used in display component synchronization.
996 	 */
997 	int (*notify_smc_display_config)(struct smu_context *smu);
998 
999 	/**
1000 	 * @is_dpm_running: Check if DPM is running.
1001 	 *
1002 	 * Return: True if DPM is running, false otherwise.
1003 	 */
1004 	bool (*is_dpm_running)(struct smu_context *smu);
1005 
1006 	/**
1007 	 * @get_fan_speed_pwm: Get the current fan speed in PWM.
1008 	 */
1009 	int (*get_fan_speed_pwm)(struct smu_context *smu, uint32_t *speed);
1010 
1011 	/**
1012 	 * @get_fan_speed_rpm: Get the current fan speed in rpm.
1013 	 */
1014 	int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
1015 
1016 	/**
1017 	 * @set_watermarks_table: Configure and upload the watermarks tables to
1018 	 *                        the SMU.
1019 	 */
1020 	int (*set_watermarks_table)(struct smu_context *smu,
1021 				    struct pp_smu_wm_range_sets *clock_ranges);
1022 
1023 	/**
1024 	 * @get_thermal_temperature_range: Get safe thermal limits in Celcius.
1025 	 */
1026 	int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
1027 
1028 	/**
1029 	 * @get_uclk_dpm_states: Get memory clock DPM levels in kHz.
1030 	 * &clocks_in_khz: Array of DPM levels.
1031 	 * &num_states: Elements in &clocks_in_khz.
1032 	 */
1033 	int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
1034 
1035 	/**
1036 	 * @set_default_od_settings: Set the overdrive tables to defaults.
1037 	 */
1038 	int (*set_default_od_settings)(struct smu_context *smu);
1039 
1040 	/**
1041 	 * @set_performance_level: Set a performance level.
1042 	 */
1043 	int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
1044 
1045 	/**
1046 	 * @display_disable_memory_clock_switch: Enable/disable dynamic memory
1047 	 *                                       clock switching.
1048 	 *
1049 	 * Disabling this feature forces memory clock speed to maximum.
1050 	 * Enabling sets the minimum memory clock capable of driving the
1051 	 * current display configuration.
1052 	 */
1053 	int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
1054 
1055 	/**
1056 	 * @get_power_limit: Get the device's power limits.
1057 	 */
1058 	int (*get_power_limit)(struct smu_context *smu,
1059 					uint32_t *current_power_limit,
1060 					uint32_t *default_power_limit,
1061 					uint32_t *max_power_limit,
1062 					uint32_t *min_power_limit);
1063 
1064 	/**
1065 	 * @get_ppt_limit: Get the device's ppt limits.
1066 	 */
1067 	int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit,
1068 			enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level);
1069 
1070 	/**
1071 	 * @set_df_cstate: Set data fabric cstate.
1072 	 */
1073 	int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
1074 
1075 	/**
1076 	 * @update_pcie_parameters: Update and upload the system's PCIe
1077 	 *                          capabilites to the SMU.
1078 	 * &pcie_gen_cap: Maximum allowed PCIe generation.
1079 	 * &pcie_width_cap: Maximum allowed PCIe width.
1080 	 */
1081 	int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap);
1082 
1083 	/**
1084 	 * @i2c_init: Initialize i2c.
1085 	 *
1086 	 * The i2c bus is used internally by the SMU voltage regulators and
1087 	 * other devices. The i2c's EEPROM also stores bad page tables on boards
1088 	 * with ECC.
1089 	 */
1090 	int (*i2c_init)(struct smu_context *smu);
1091 
1092 	/**
1093 	 * @i2c_fini: Tear down i2c.
1094 	 */
1095 	void (*i2c_fini)(struct smu_context *smu);
1096 
1097 	/**
1098 	 * @get_unique_id: Get the GPU's unique id. Used for asset tracking.
1099 	 */
1100 	void (*get_unique_id)(struct smu_context *smu);
1101 
1102 	/**
1103 	 * @get_dpm_clock_table: Get a copy of the DPM clock table.
1104 	 *
1105 	 * Used by display component in bandwidth and watermark calculations.
1106 	 */
1107 	int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
1108 
1109 	/**
1110 	 * @init_microcode: Request the SMU's firmware from the kernel.
1111 	 */
1112 	int (*init_microcode)(struct smu_context *smu);
1113 
1114 	/**
1115 	 * @load_microcode: Load firmware onto the SMU.
1116 	 */
1117 	int (*load_microcode)(struct smu_context *smu);
1118 
1119 	/**
1120 	 * @fini_microcode: Release the SMU's firmware.
1121 	 */
1122 	void (*fini_microcode)(struct smu_context *smu);
1123 
1124 	/**
1125 	 * @init_smc_tables: Initialize the SMU tables.
1126 	 */
1127 	int (*init_smc_tables)(struct smu_context *smu);
1128 
1129 	/**
1130 	 * @fini_smc_tables: Release the SMU tables.
1131 	 */
1132 	int (*fini_smc_tables)(struct smu_context *smu);
1133 
1134 	/**
1135 	 * @init_power: Initialize the power gate table context.
1136 	 */
1137 	int (*init_power)(struct smu_context *smu);
1138 
1139 	/**
1140 	 * @fini_power: Release the power gate table context.
1141 	 */
1142 	int (*fini_power)(struct smu_context *smu);
1143 
1144 	/**
1145 	 * @check_fw_status: Check the SMU's firmware status.
1146 	 *
1147 	 * Return: Zero if check passes, negative errno on failure.
1148 	 */
1149 	int (*check_fw_status)(struct smu_context *smu);
1150 
1151 	/**
1152 	 * @set_mp1_state: put SMU into a correct state for comming
1153 	 *                 resume from runpm or gpu reset.
1154 	 */
1155 	int (*set_mp1_state)(struct smu_context *smu,
1156 			     enum pp_mp1_state mp1_state);
1157 
1158 	/**
1159 	 * @setup_pptable: Initialize the power play table and populate it with
1160 	 *                 default values.
1161 	 */
1162 	int (*setup_pptable)(struct smu_context *smu);
1163 
1164 	/**
1165 	 * @get_vbios_bootup_values: Get default boot values from the VBIOS.
1166 	 */
1167 	int (*get_vbios_bootup_values)(struct smu_context *smu);
1168 
1169 	/**
1170 	 * @check_fw_version: Print driver and SMU interface versions to the
1171 	 *                    system log.
1172 	 *
1173 	 * Interface mismatch is not a critical failure.
1174 	 */
1175 	int (*check_fw_version)(struct smu_context *smu);
1176 
1177 	/**
1178 	 * @powergate_sdma: Power up/down system direct memory access.
1179 	 */
1180 	int (*powergate_sdma)(struct smu_context *smu, bool gate);
1181 
1182 	/**
1183 	 * @set_gfx_cgpg: Enable/disable graphics engine course grain power
1184 	 *                gating.
1185 	 */
1186 	int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
1187 
1188 	/**
1189 	 * @write_pptable: Write the power play table to the SMU.
1190 	 */
1191 	int (*write_pptable)(struct smu_context *smu);
1192 
1193 	/**
1194 	 * @set_driver_table_location: Send the location of the driver table to
1195 	 *                             the SMU.
1196 	 */
1197 	int (*set_driver_table_location)(struct smu_context *smu);
1198 
1199 	/**
1200 	 * @set_tool_table_location: Send the location of the tool table to the
1201 	 *                           SMU.
1202 	 */
1203 	int (*set_tool_table_location)(struct smu_context *smu);
1204 
1205 	/**
1206 	 * @notify_memory_pool_location: Send the location of the memory pool to
1207 	 *                               the SMU.
1208 	 */
1209 	int (*notify_memory_pool_location)(struct smu_context *smu);
1210 
1211 	/**
1212 	 * @system_features_control: Enable/disable all SMU features.
1213 	 */
1214 	int (*system_features_control)(struct smu_context *smu, bool en);
1215 
1216 	/**
1217 	 * @init_display_count: Notify the SMU of the number of display
1218 	 *                      components in current display configuration.
1219 	 */
1220 	int (*init_display_count)(struct smu_context *smu, uint32_t count);
1221 
1222 	/**
1223 	 * @set_allowed_mask: Notify the SMU of the features currently allowed
1224 	 *                    by the driver.
1225 	 */
1226 	int (*set_allowed_mask)(struct smu_context *smu);
1227 
1228 	/**
1229 	 * @get_enabled_mask: Get a mask of features that are currently enabled
1230 	 *                    on the SMU.
1231 	 * &feature_mask: Enabled feature mask.
1232 	 */
1233 	int (*get_enabled_mask)(struct smu_context *smu,
1234 				struct smu_feature_bits *feature_mask);
1235 
1236 	/**
1237 	 * @feature_is_enabled: Test if a feature is enabled.
1238 	 *
1239 	 * Return: One if enabled, zero if disabled.
1240 	 */
1241 	int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask);
1242 
1243 	/**
1244 	 * @disable_all_features_with_exception: Disable all features with
1245 	 *                                       exception to those in &mask.
1246 	 */
1247 	int (*disable_all_features_with_exception)(struct smu_context *smu,
1248 						   enum smu_feature_mask mask);
1249 
1250 	/**
1251 	 * @notify_display_change: General interface call to let SMU know about DC change
1252 	 */
1253 	int (*notify_display_change)(struct smu_context *smu);
1254 
1255 	/**
1256 	 * @set_power_limit: Set power limit in watts.
1257 	 */
1258 	int (*set_power_limit)(struct smu_context *smu,
1259 			       enum smu_ppt_limit_type limit_type,
1260 			       uint32_t limit);
1261 
1262 	/**
1263 	 * @init_max_sustainable_clocks: Populate max sustainable clock speed
1264 	 *                               table with values from the SMU.
1265 	 */
1266 	int (*init_max_sustainable_clocks)(struct smu_context *smu);
1267 
1268 	/**
1269 	 * @enable_thermal_alert: Enable thermal alert interrupts.
1270 	 */
1271 	int (*enable_thermal_alert)(struct smu_context *smu);
1272 
1273 	/**
1274 	 * @disable_thermal_alert: Disable thermal alert interrupts.
1275 	 */
1276 	int (*disable_thermal_alert)(struct smu_context *smu);
1277 
1278 	/**
1279 	 * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep
1280 	 *                           clock speed in MHz.
1281 	 */
1282 	int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk);
1283 
1284 	/**
1285 	 * @display_clock_voltage_request: Set a hard minimum frequency
1286 	 * for a clock domain.
1287 	 */
1288 	int (*display_clock_voltage_request)(struct smu_context *smu, struct
1289 					     pp_display_clock_request
1290 					     *clock_req);
1291 
1292 	/**
1293 	 * @get_fan_control_mode: Get the current fan control mode.
1294 	 */
1295 	uint32_t (*get_fan_control_mode)(struct smu_context *smu);
1296 
1297 	/**
1298 	 * @set_fan_control_mode: Set the fan control mode.
1299 	 */
1300 	int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
1301 
1302 	/**
1303 	 * @set_fan_speed_pwm: Set a static fan speed in PWM.
1304 	 */
1305 	int (*set_fan_speed_pwm)(struct smu_context *smu, uint32_t speed);
1306 
1307 	/**
1308 	 * @set_fan_speed_rpm: Set a static fan speed in rpm.
1309 	 */
1310 	int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
1311 
1312 	/**
1313 	 * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate.
1314 	 * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise.
1315 	 */
1316 	int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
1317 
1318 	/**
1319 	 * @gfx_off_control: Enable/disable graphics engine poweroff.
1320 	 */
1321 	int (*gfx_off_control)(struct smu_context *smu, bool enable);
1322 
1323 
1324 	/**
1325 	 * @get_gfx_off_status: Get graphics engine poweroff status.
1326 	 *
1327 	 * Return:
1328 	 * 0 - GFXOFF(default).
1329 	 * 1 - Transition out of GFX State.
1330 	 * 2 - Not in GFXOFF.
1331 	 * 3 - Transition into GFXOFF.
1332 	 */
1333 	uint32_t (*get_gfx_off_status)(struct smu_context *smu);
1334 
1335 	/**
1336 	 * @gfx_off_entrycount: total GFXOFF entry count at the time of
1337 	 * query since system power-up
1338 	 */
1339 	u32 (*get_gfx_off_entrycount)(struct smu_context *smu, uint64_t *entrycount);
1340 
1341 	/**
1342 	 * @set_gfx_off_residency: set 1 to start logging, 0 to stop logging
1343 	 */
1344 	u32 (*set_gfx_off_residency)(struct smu_context *smu, bool start);
1345 
1346 	/**
1347 	 * @get_gfx_off_residency: Average GFXOFF residency % during the logging interval
1348 	 */
1349 	u32 (*get_gfx_off_residency)(struct smu_context *smu, uint32_t *residency);
1350 
1351 	/**
1352 	 * @register_irq_handler: Register interupt request handlers.
1353 	 */
1354 	int (*register_irq_handler)(struct smu_context *smu);
1355 
1356 	/**
1357 	 * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep.
1358 	 */
1359 	int (*set_azalia_d3_pme)(struct smu_context *smu);
1360 
1361 	/**
1362 	 * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable
1363 	 *                                    clock speeds table.
1364 	 *
1365 	 * Provides a way for the display component (DC) to get the max
1366 	 * sustainable clocks from the SMU.
1367 	 */
1368 	int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
1369 
1370 	/**
1371 	 * @get_bamaco_support: Check if GPU supports BACO/MACO
1372 	 * BACO: Bus Active, Chip Off
1373 	 * MACO: Memory Active, Chip Off
1374 	 */
1375 	int (*get_bamaco_support)(struct smu_context *smu);
1376 
1377 	/**
1378 	 * @baco_get_state: Get the current BACO state.
1379 	 *
1380 	 * Return: Current BACO state.
1381 	 */
1382 	enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
1383 
1384 	/**
1385 	 * @baco_set_state: Enter/exit BACO.
1386 	 */
1387 	int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
1388 
1389 	/**
1390 	 * @baco_enter: Enter BACO.
1391 	 */
1392 	int (*baco_enter)(struct smu_context *smu);
1393 
1394 	/**
1395 	 * @baco_exit: Exit Baco.
1396 	 */
1397 	int (*baco_exit)(struct smu_context *smu);
1398 
1399 	/**
1400 	 * @mode1_reset_is_support: Check if GPU supports mode1 reset.
1401 	 */
1402 	bool (*mode1_reset_is_support)(struct smu_context *smu);
1403 
1404 	/**
1405 	 * @mode1_reset: Perform mode1 reset.
1406 	 *
1407 	 * Complete GPU reset.
1408 	 */
1409 	int (*mode1_reset)(struct smu_context *smu);
1410 
1411 	/**
1412 	 * @mode2_reset: Perform mode2 reset.
1413 	 *
1414 	 * Mode2 reset generally does not reset as many IPs as mode1 reset. The
1415 	 * IPs reset varies by asic.
1416 	 */
1417 	int (*mode2_reset)(struct smu_context *smu);
1418 	/* for gfx feature enablement after mode2 reset */
1419 	int (*enable_gfx_features)(struct smu_context *smu);
1420 
1421 	/**
1422 	 * @link_reset: Perform link reset.
1423 	 *
1424 	 * The gfx device driver reset
1425 	 */
1426 	int (*link_reset)(struct smu_context *smu);
1427 
1428 	/**
1429 	 * @get_dpm_ultimate_freq: Get the hard frequency range of a clock
1430 	 *                         domain in MHz.
1431 	 */
1432 	int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
1433 
1434 	/**
1435 	 * @set_soft_freq_limited_range: Set the soft frequency range of a clock
1436 	 *                               domain in MHz.
1437 	 */
1438 	int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max,
1439 					   bool automatic);
1440 
1441 	/**
1442 	 * @set_power_source: Notify the SMU of the current power source.
1443 	 */
1444 	int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
1445 
1446 	/**
1447 	 * @log_thermal_throttling_event: Print a thermal throttling warning to
1448 	 *                                the system's log.
1449 	 */
1450 	void (*log_thermal_throttling_event)(struct smu_context *smu);
1451 
1452 	/**
1453 	 * @get_pp_feature_mask: Print a human readable table of enabled
1454 	 *                       features to buffer.
1455 	 */
1456 	size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
1457 
1458 	/**
1459 	 * @set_pp_feature_mask: Request the SMU enable/disable features to
1460 	 *                       match those enabled in &new_mask.
1461 	 */
1462 	int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
1463 
1464 	/**
1465 	 * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU.
1466 	 *
1467 	 * Return: Size of &table
1468 	 */
1469 	ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
1470 
1471 	/**
1472 	 * @get_pm_metrics: Get one snapshot of power management metrics from
1473 	 * PMFW.
1474 	 *
1475 	 * Return: Size of the metrics sample
1476 	 */
1477 	ssize_t (*get_pm_metrics)(struct smu_context *smu, void *pm_metrics,
1478 				  size_t size);
1479 
1480 	/**
1481 	 * @enable_mgpu_fan_boost: Enable multi-GPU fan boost.
1482 	 */
1483 	int (*enable_mgpu_fan_boost)(struct smu_context *smu);
1484 
1485 	/**
1486 	 * @gfx_ulv_control: Enable/disable ultra low voltage.
1487 	 */
1488 	int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
1489 
1490 	/**
1491 	 * @deep_sleep_control: Enable/disable deep sleep.
1492 	 */
1493 	int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
1494 
1495 	/**
1496 	 * @get_fan_parameters: Get fan parameters.
1497 	 *
1498 	 * Get maximum fan speed from the power play table.
1499 	 */
1500 	int (*get_fan_parameters)(struct smu_context *smu);
1501 
1502 	/**
1503 	 * @post_init: Helper function for asic specific workarounds.
1504 	 */
1505 	int (*post_init)(struct smu_context *smu);
1506 
1507 	/**
1508 	 * @interrupt_work: Work task scheduled from SMU interrupt handler.
1509 	 */
1510 	void (*interrupt_work)(struct smu_context *smu);
1511 
1512 	/**
1513 	 * @gpo_control: Enable/disable graphics power optimization if supported.
1514 	 */
1515 	int (*gpo_control)(struct smu_context *smu, bool enablement);
1516 
1517 	/**
1518 	 * @gfx_state_change_set: Send the current graphics state to the SMU.
1519 	 */
1520 	int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state);
1521 
1522 	/**
1523 	 * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock
1524 	 *                                      parameters to defaults.
1525 	 */
1526 	int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu);
1527 
1528 	/**
1529 	 * @smu_handle_passthrough_sbr:  Send message to SMU about special handling for SBR.
1530 	 */
1531 	int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable);
1532 
1533 	/**
1534 	 * @wait_for_event:  Wait for events from SMU.
1535 	 */
1536 	int (*wait_for_event)(struct smu_context *smu,
1537 			      enum smu_event_type event, uint64_t event_arg);
1538 
1539 	/**
1540 	 * @sned_hbm_bad_pages_num:  message SMU to update bad page number
1541 	 *										of SMUBUS table.
1542 	 */
1543 	int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size);
1544 
1545 	/**
1546 	 * @send_rma_reason: message rma reason event to SMU.
1547 	 */
1548 	int (*send_rma_reason)(struct smu_context *smu);
1549 
1550 	/**
1551 	 * @reset_sdma: message SMU to soft reset sdma instance.
1552 	 */
1553 	int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
1554 
1555 	/**
1556 	 * @reset_vcn: message SMU to soft reset vcn instance.
1557 	 */
1558 	int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
1559 
1560 	/**
1561 	 * @get_ecc_table:  message SMU to get ECC INFO table.
1562 	 */
1563 	ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
1564 
1565 
1566 	/**
1567 	 * @stb_collect_info: Collects Smart Trace Buffers data.
1568 	 */
1569 	int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size);
1570 
1571 	/**
1572 	 * @get_default_config_table_settings: Get the ASIC default DriverSmuConfig table settings.
1573 	 */
1574 	int (*get_default_config_table_settings)(struct smu_context *smu, struct config_table_setting *table);
1575 
1576 	/**
1577 	 * @set_config_table: Apply the input DriverSmuConfig table settings.
1578 	 */
1579 	int (*set_config_table)(struct smu_context *smu, struct config_table_setting *table);
1580 
1581 	/**
1582 	 * @sned_hbm_bad_channel_flag:  message SMU to update bad channel info
1583 	 *										of SMUBUS table.
1584 	 */
1585 	int (*send_hbm_bad_channel_flag)(struct smu_context *smu, uint32_t size);
1586 
1587 	/**
1588 	 * @init_pptable_microcode: Prepare the pptable microcode to upload via PSP
1589 	 */
1590 	int (*init_pptable_microcode)(struct smu_context *smu);
1591 
1592 	/**
1593 	 * @dpm_set_vpe_enable: Enable/disable VPE engine dynamic power
1594 	 *                       management.
1595 	 */
1596 	int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable);
1597 
1598 	/**
1599 	 * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power
1600 	 *                       management.
1601 	 */
1602 	int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable);
1603 
1604 	/**
1605 	 * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power
1606 	 *                       management.
1607 	 */
1608 	int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
1609 
1610 	/**
1611 	 * @set_mall_enable: Init MALL power gating control.
1612 	 */
1613 	int (*set_mall_enable)(struct smu_context *smu);
1614 
1615 	/**
1616 	 * @notify_rlc_state: Notify RLC power state to SMU.
1617 	 */
1618 	int (*notify_rlc_state)(struct smu_context *smu, bool en);
1619 
1620 	/**
1621 	 * @is_asic_wbrf_supported: check whether PMFW supports the wbrf feature
1622 	 */
1623 	bool (*is_asic_wbrf_supported)(struct smu_context *smu);
1624 
1625 	/**
1626 	 * @enable_uclk_shadow: Enable the uclk shadow feature on wbrf supported
1627 	 */
1628 	int (*enable_uclk_shadow)(struct smu_context *smu, bool enable);
1629 
1630 	/**
1631 	 * @set_wbrf_exclusion_ranges: notify SMU the wifi bands occupied
1632 	 */
1633 	int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
1634 					struct freq_band_range *exclusion_ranges);
1635 	/**
1636 	 * @get_xcp_metrics: Get a copy of the partition metrics table from SMU.
1637 	 * Return: Size of table
1638 	 */
1639 	ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
1640 				   void *table);
1641 	/**
1642 	 * @ras_send_msg: Send a message with a parameter from Ras
1643 	 * &msg: Type of message.
1644 	 * &param: Message parameter.
1645 	 * &read_arg: SMU response (optional).
1646 	 */
1647 	int (*ras_send_msg)(struct smu_context *smu,
1648 			    enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
1649 
1650 
1651 	/**
1652 	 * @get_ras_smu_drv: Get RAS smu driver interface
1653 	 * Return: ras_smu_drv *
1654 	 */
1655 	int (*get_ras_smu_drv)(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv);
1656 };
1657 
1658 typedef enum {
1659 	METRICS_CURR_GFXCLK,
1660 	METRICS_CURR_SOCCLK,
1661 	METRICS_CURR_UCLK,
1662 	METRICS_CURR_VCLK,
1663 	METRICS_CURR_VCLK1,
1664 	METRICS_CURR_DCLK,
1665 	METRICS_CURR_DCLK1,
1666 	METRICS_CURR_FCLK,
1667 	METRICS_CURR_DCEFCLK,
1668 	METRICS_AVERAGE_CPUCLK,
1669 	METRICS_AVERAGE_GFXCLK,
1670 	METRICS_AVERAGE_SOCCLK,
1671 	METRICS_AVERAGE_FCLK,
1672 	METRICS_AVERAGE_UCLK,
1673 	METRICS_AVERAGE_VCLK,
1674 	METRICS_AVERAGE_DCLK,
1675 	METRICS_AVERAGE_VCLK1,
1676 	METRICS_AVERAGE_DCLK1,
1677 	METRICS_AVERAGE_GFXACTIVITY,
1678 	METRICS_AVERAGE_MEMACTIVITY,
1679 	METRICS_AVERAGE_VCNACTIVITY,
1680 	METRICS_AVERAGE_SOCKETPOWER,
1681 	METRICS_TEMPERATURE_EDGE,
1682 	METRICS_TEMPERATURE_HOTSPOT,
1683 	METRICS_TEMPERATURE_MEM,
1684 	METRICS_TEMPERATURE_VRGFX,
1685 	METRICS_TEMPERATURE_VRSOC,
1686 	METRICS_TEMPERATURE_VRMEM,
1687 	METRICS_THROTTLER_STATUS,
1688 	METRICS_CURR_FANSPEED,
1689 	METRICS_VOLTAGE_VDDSOC,
1690 	METRICS_VOLTAGE_VDDGFX,
1691 	METRICS_SS_APU_SHARE,
1692 	METRICS_SS_DGPU_SHARE,
1693 	METRICS_UNIQUE_ID_UPPER32,
1694 	METRICS_UNIQUE_ID_LOWER32,
1695 	METRICS_PCIE_RATE,
1696 	METRICS_PCIE_WIDTH,
1697 	METRICS_CURR_FANPWM,
1698 	METRICS_CURR_SOCKETPOWER,
1699 	METRICS_AVERAGE_VPECLK,
1700 	METRICS_AVERAGE_IPUCLK,
1701 	METRICS_AVERAGE_MPIPUCLK,
1702 	METRICS_THROTTLER_RESIDENCY_PROCHOT,
1703 	METRICS_THROTTLER_RESIDENCY_SPL,
1704 	METRICS_THROTTLER_RESIDENCY_FPPT,
1705 	METRICS_THROTTLER_RESIDENCY_SPPT,
1706 	METRICS_THROTTLER_RESIDENCY_THM_CORE,
1707 	METRICS_THROTTLER_RESIDENCY_THM_GFX,
1708 	METRICS_THROTTLER_RESIDENCY_THM_SOC,
1709 	METRICS_AVERAGE_NPUCLK,
1710 } MetricsMember_t;
1711 
1712 enum smu_cmn2asic_mapping_type {
1713 	CMN2ASIC_MAPPING_MSG,
1714 	CMN2ASIC_MAPPING_CLK,
1715 	CMN2ASIC_MAPPING_FEATURE,
1716 	CMN2ASIC_MAPPING_TABLE,
1717 	CMN2ASIC_MAPPING_PWR,
1718 	CMN2ASIC_MAPPING_WORKLOAD,
1719 };
1720 
1721 enum smu_baco_seq {
1722 	BACO_SEQ_BACO = 0,
1723 	BACO_SEQ_MSR,
1724 	BACO_SEQ_BAMACO,
1725 	BACO_SEQ_ULPS,
1726 	BACO_SEQ_COUNT,
1727 };
1728 
1729 #define MSG_MAP(msg, index, flags) \
1730 	[SMU_MSG_##msg] = {1, (index), (flags)}
1731 
1732 #define CLK_MAP(clk, index) \
1733 	[SMU_##clk] = {1, (index)}
1734 
1735 #define FEA_MAP(fea) \
1736 	[SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
1737 
1738 #define FEA_MAP_REVERSE(fea) \
1739 	[SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT}
1740 
1741 #define FEA_MAP_HALF_REVERSE(fea) \
1742 	[SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT}
1743 
1744 #define TAB_MAP(tab) \
1745 	[SMU_TABLE_##tab] = {1, TABLE_##tab}
1746 
1747 #define TAB_MAP_VALID(tab) \
1748 	[SMU_TABLE_##tab] = {1, TABLE_##tab}
1749 
1750 #define TAB_MAP_INVALID(tab) \
1751 	[SMU_TABLE_##tab] = {0, TABLE_##tab}
1752 
1753 #define PWR_MAP(tab) \
1754 	[SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
1755 
1756 #define WORKLOAD_MAP(profile, workload) \
1757 	[profile] = {1, (workload)}
1758 
1759 /**
1760  * smu_memcpy_trailing - Copy the end of one structure into the middle of another
1761  *
1762  * @dst: Pointer to destination struct
1763  * @first_dst_member: The member name in @dst where the overwrite begins
1764  * @last_dst_member: The member name in @dst where the overwrite ends after
1765  * @src: Pointer to the source struct
1766  * @first_src_member: The member name in @src where the copy begins
1767  *
1768  */
1769 #define smu_memcpy_trailing(dst, first_dst_member, last_dst_member,	   \
1770 			    src, first_src_member)			   \
1771 ({									   \
1772 	size_t __src_offset = offsetof(typeof(*(src)), first_src_member);  \
1773 	size_t __src_size = sizeof(*(src)) - __src_offset;		   \
1774 	size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member);  \
1775 	size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \
1776 			    __dst_offset;				   \
1777 	BUILD_BUG_ON(__src_size != __dst_size);				   \
1778 	__builtin_memcpy((u8 *)(dst) + __dst_offset,			   \
1779 			 (u8 *)(src) + __src_offset,			   \
1780 			 __dst_size);					   \
1781 })
1782 
1783 typedef struct {
1784 	uint16_t     LowFreq;
1785 	uint16_t     HighFreq;
1786 } WifiOneBand_t;
1787 
1788 typedef struct {
1789 	uint32_t		WifiBandEntryNum;
1790 	WifiOneBand_t	WifiBandEntry[11];
1791 	uint32_t		MmHubPadding[8];
1792 } WifiBandEntryTable_t;
1793 
1794 #define STR_SOC_PSTATE_POLICY "soc_pstate"
1795 #define STR_XGMI_PLPD_POLICY "xgmi_plpd"
1796 
1797 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
1798 					 enum pp_pm_policy p_type);
1799 
1800 static inline enum smu_driver_table_id
1801 smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)
1802 {
1803 	switch (type) {
1804 	case SMU_TEMP_METRIC_BASEBOARD:
1805 		return SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS;
1806 	case SMU_TEMP_METRIC_GPUBOARD:
1807 		return SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS;
1808 	default:
1809 		return SMU_DRIVER_TABLE_COUNT;
1810 	}
1811 
1812 	return SMU_DRIVER_TABLE_COUNT;
1813 }
1814 
1815 static inline void smu_table_cache_update_time(struct smu_table *table,
1816 					       unsigned long time)
1817 {
1818 	table->cache.last_cache_time = time;
1819 }
1820 
1821 static inline bool smu_table_cache_is_valid(struct smu_table *table)
1822 {
1823 	if (!table->cache.buffer || !table->cache.last_cache_time ||
1824 	    !table->cache.interval || !table->cache.size ||
1825 	    time_after(jiffies,
1826 		       table->cache.last_cache_time +
1827 			       msecs_to_jiffies(table->cache.interval)))
1828 		return false;
1829 
1830 	return true;
1831 }
1832 
1833 static inline int smu_table_cache_init(struct smu_context *smu,
1834 				       enum smu_table_id table_id, size_t size,
1835 				       uint32_t cache_interval)
1836 {
1837 	struct smu_table_context *smu_table = &smu->smu_table;
1838 	struct smu_table *tables = smu_table->tables;
1839 
1840 	tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
1841 	if (!tables[table_id].cache.buffer)
1842 		return -ENOMEM;
1843 
1844 	tables[table_id].cache.last_cache_time = 0;
1845 	tables[table_id].cache.interval = cache_interval;
1846 	tables[table_id].cache.size = size;
1847 
1848 	return 0;
1849 }
1850 
1851 static inline void smu_table_cache_fini(struct smu_context *smu,
1852 					enum smu_table_id table_id)
1853 {
1854 	struct smu_table_context *smu_table = &smu->smu_table;
1855 	struct smu_table *tables = smu_table->tables;
1856 
1857 	if (tables[table_id].cache.buffer) {
1858 		kfree(tables[table_id].cache.buffer);
1859 		tables[table_id].cache.buffer = NULL;
1860 		tables[table_id].cache.last_cache_time = 0;
1861 		tables[table_id].cache.interval = 0;
1862 	}
1863 }
1864 
1865 static inline int smu_driver_table_init(struct smu_context *smu,
1866 					enum smu_driver_table_id table_id,
1867 					size_t size, uint32_t cache_interval)
1868 {
1869 	struct smu_table_context *smu_table = &smu->smu_table;
1870 	struct smu_driver_table *driver_tables = smu_table->driver_tables;
1871 
1872 	if (table_id >= SMU_DRIVER_TABLE_COUNT)
1873 		return -EINVAL;
1874 
1875 	driver_tables[table_id].id = table_id;
1876 	driver_tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
1877 	if (!driver_tables[table_id].cache.buffer)
1878 		return -ENOMEM;
1879 
1880 	driver_tables[table_id].cache.last_cache_time = 0;
1881 	driver_tables[table_id].cache.interval = cache_interval;
1882 	driver_tables[table_id].cache.size = size;
1883 
1884 	return 0;
1885 }
1886 
1887 static inline void smu_driver_table_fini(struct smu_context *smu,
1888 					 enum smu_driver_table_id table_id)
1889 {
1890 	struct smu_table_context *smu_table = &smu->smu_table;
1891 	struct smu_driver_table *driver_tables = smu_table->driver_tables;
1892 
1893 	if (table_id >= SMU_DRIVER_TABLE_COUNT)
1894 		return;
1895 
1896 	if (driver_tables[table_id].cache.buffer) {
1897 		kfree(driver_tables[table_id].cache.buffer);
1898 		driver_tables[table_id].cache.buffer = NULL;
1899 		driver_tables[table_id].cache.last_cache_time = 0;
1900 		driver_tables[table_id].cache.interval = 0;
1901 	}
1902 }
1903 
1904 static inline bool smu_driver_table_is_valid(struct smu_driver_table *table)
1905 {
1906 	if (!table->cache.buffer || !table->cache.last_cache_time ||
1907 	    !table->cache.interval || !table->cache.size ||
1908 	    time_after(jiffies,
1909 		       table->cache.last_cache_time +
1910 			       msecs_to_jiffies(table->cache.interval)))
1911 		return false;
1912 
1913 	return true;
1914 }
1915 
1916 static inline void *smu_driver_table_ptr(struct smu_context *smu,
1917 					 enum smu_driver_table_id table_id)
1918 {
1919 	struct smu_table_context *smu_table = &smu->smu_table;
1920 	struct smu_driver_table *driver_tables = smu_table->driver_tables;
1921 
1922 	if (table_id >= SMU_DRIVER_TABLE_COUNT)
1923 		return NULL;
1924 
1925 	return driver_tables[table_id].cache.buffer;
1926 }
1927 
1928 static inline void
1929 smu_driver_table_update_cache_time(struct smu_context *smu,
1930 				   enum smu_driver_table_id table_id)
1931 {
1932 	struct smu_table_context *smu_table = &smu->smu_table;
1933 	struct smu_driver_table *driver_tables = smu_table->driver_tables;
1934 
1935 	if (table_id >= SMU_DRIVER_TABLE_COUNT)
1936 		return;
1937 
1938 	driver_tables[table_id].cache.last_cache_time = jiffies;
1939 }
1940 
1941 #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
1942 int smu_get_power_limit(void *handle,
1943 			uint32_t *limit,
1944 			enum pp_power_limit_level pp_limit_level,
1945 			enum pp_power_type pp_power_type);
1946 
1947 bool smu_mode1_reset_is_support(struct smu_context *smu);
1948 bool smu_link_reset_is_support(struct smu_context *smu);
1949 int smu_mode1_reset(struct smu_context *smu);
1950 int smu_link_reset(struct smu_context *smu);
1951 
1952 extern const struct amd_ip_funcs smu_ip_funcs;
1953 
1954 bool is_support_sw_smu(struct amdgpu_device *adev);
1955 bool is_support_cclk_dpm(struct amdgpu_device *adev);
1956 int smu_write_watermarks_table(struct smu_context *smu);
1957 
1958 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
1959 			   uint32_t *min, uint32_t *max);
1960 
1961 int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type,
1962 			    uint32_t min, uint32_t max);
1963 
1964 int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
1965 
1966 int smu_set_ac_dc(struct smu_context *smu);
1967 
1968 int smu_set_xgmi_plpd_mode(struct smu_context *smu,
1969 			   enum pp_xgmi_plpd_mode mode);
1970 
1971 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value);
1972 
1973 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value);
1974 
1975 int smu_set_residency_gfxoff(struct smu_context *smu, bool value);
1976 
1977 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value);
1978 
1979 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
1980 
1981 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
1982 		       uint64_t event_arg);
1983 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc);
1984 int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size);
1985 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
1986 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
1987 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
1988 int smu_send_rma_reason(struct smu_context *smu);
1989 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
1990 bool smu_reset_sdma_is_supported(struct smu_context *smu);
1991 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
1992 bool smu_reset_vcn_is_supported(struct smu_context *smu);
1993 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
1994 		      int level);
1995 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
1996 			       enum pp_pm_policy p_type, char *sysbuf);
1997 const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle);
1998 
1999 int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
2000 			    uint32_t param, uint32_t *readarg);
2001 int amdgpu_smu_ras_feature_is_enabled(struct amdgpu_device *adev,
2002 						enum smu_feature_mask mask);
2003 #endif
2004 
2005 void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id);
2006 bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id);
2007 
2008 static inline bool smu_feature_bits_is_set(const struct smu_feature_bits *bits,
2009 					   unsigned int bit)
2010 {
2011 	if (bit >= SMU_FEATURE_MAX)
2012 		return false;
2013 
2014 	return test_bit(bit, bits->bits);
2015 }
2016 
2017 static inline void smu_feature_bits_set_bit(struct smu_feature_bits *bits,
2018 					    unsigned int bit)
2019 {
2020 	if (bit < SMU_FEATURE_MAX)
2021 		__set_bit(bit, bits->bits);
2022 }
2023 
2024 static inline void smu_feature_bits_clear_bit(struct smu_feature_bits *bits,
2025 					      unsigned int bit)
2026 {
2027 	if (bit < SMU_FEATURE_MAX)
2028 		__clear_bit(bit, bits->bits);
2029 }
2030 
2031 static inline void smu_feature_bits_clearall(struct smu_feature_bits *bits)
2032 {
2033 	bitmap_zero(bits->bits, SMU_FEATURE_MAX);
2034 }
2035 
2036 static inline void smu_feature_bits_fill(struct smu_feature_bits *bits)
2037 {
2038 	bitmap_fill(bits->bits, SMU_FEATURE_MAX);
2039 }
2040 
2041 static inline bool
2042 smu_feature_bits_test_mask(const struct smu_feature_bits *bits,
2043 			   const unsigned long *mask)
2044 {
2045 	return bitmap_intersects(bits->bits, mask, SMU_FEATURE_MAX);
2046 }
2047 
2048 static inline void smu_feature_bits_from_arr32(struct smu_feature_bits *bits,
2049 					       const uint32_t *arr,
2050 					       unsigned int nbits)
2051 {
2052 	bitmap_from_arr32(bits->bits, arr, nbits);
2053 }
2054 
2055 static inline void
2056 smu_feature_bits_to_arr32(const struct smu_feature_bits *bits, uint32_t *arr,
2057 			  unsigned int nbits)
2058 {
2059 	bitmap_to_arr32(arr, bits->bits, nbits);
2060 }
2061 
2062 static inline bool smu_feature_bits_empty(const struct smu_feature_bits *bits,
2063 					  unsigned int nbits)
2064 {
2065 	return bitmap_empty(bits->bits, nbits);
2066 }
2067 
2068 static inline bool smu_feature_bits_full(const struct smu_feature_bits *bits,
2069 					 unsigned int nbits)
2070 {
2071 	return bitmap_full(bits->bits, nbits);
2072 }
2073 
2074 static inline void smu_feature_bits_copy(struct smu_feature_bits *dst,
2075 					 const unsigned long *src,
2076 					 unsigned int nbits)
2077 {
2078 	bitmap_copy(dst->bits, src, nbits);
2079 }
2080 
2081 static inline struct smu_feature_bits *
2082 __smu_feature_get_list(struct smu_context *smu, enum smu_feature_list list)
2083 {
2084 	if (unlikely(list >= SMU_FEATURE_LIST_MAX)) {
2085 		dev_warn(smu->adev->dev, "Invalid feature list: %d\n", list);
2086 		return &smu->smu_feature.bits[SMU_FEATURE_LIST_SUPPORTED];
2087 	}
2088 
2089 	return &smu->smu_feature.bits[list];
2090 }
2091 
2092 static inline bool smu_feature_list_is_set(struct smu_context *smu,
2093 					   enum smu_feature_list list,
2094 					   unsigned int bit)
2095 {
2096 	if (bit >= smu->smu_feature.feature_num)
2097 		return false;
2098 
2099 	return smu_feature_bits_is_set(__smu_feature_get_list(smu, list), bit);
2100 }
2101 
2102 static inline void smu_feature_list_set_bit(struct smu_context *smu,
2103 					    enum smu_feature_list list,
2104 					    unsigned int bit)
2105 {
2106 	if (bit >= smu->smu_feature.feature_num)
2107 		return;
2108 
2109 	smu_feature_bits_set_bit(__smu_feature_get_list(smu, list), bit);
2110 }
2111 
2112 static inline void smu_feature_list_clear_bit(struct smu_context *smu,
2113 					      enum smu_feature_list list,
2114 					      unsigned int bit)
2115 {
2116 	if (bit >= smu->smu_feature.feature_num)
2117 		return;
2118 
2119 	smu_feature_bits_clear_bit(__smu_feature_get_list(smu, list), bit);
2120 }
2121 
2122 static inline void smu_feature_list_set_all(struct smu_context *smu,
2123 					    enum smu_feature_list list)
2124 {
2125 	smu_feature_bits_fill(__smu_feature_get_list(smu, list));
2126 }
2127 
2128 static inline void smu_feature_list_clear_all(struct smu_context *smu,
2129 					      enum smu_feature_list list)
2130 {
2131 	smu_feature_bits_clearall(__smu_feature_get_list(smu, list));
2132 }
2133 
2134 static inline bool smu_feature_list_is_empty(struct smu_context *smu,
2135 					     enum smu_feature_list list)
2136 {
2137 	return smu_feature_bits_empty(__smu_feature_get_list(smu, list),
2138 				      smu->smu_feature.feature_num);
2139 }
2140 
2141 static inline void smu_feature_list_set_bits(struct smu_context *smu,
2142 					     enum smu_feature_list dst_list,
2143 					     const unsigned long *src)
2144 {
2145 	smu_feature_bits_copy(__smu_feature_get_list(smu, dst_list), src,
2146 			      smu->smu_feature.feature_num);
2147 }
2148 
2149 static inline void smu_feature_list_to_arr32(struct smu_context *smu,
2150 					     enum smu_feature_list list,
2151 					     uint32_t *arr)
2152 {
2153 	smu_feature_bits_to_arr32(__smu_feature_get_list(smu, list), arr,
2154 				  smu->smu_feature.feature_num);
2155 }
2156 
2157 static inline void smu_feature_init(struct smu_context *smu, int feature_num)
2158 {
2159 	if (!feature_num || smu->smu_feature.feature_num != 0)
2160 		return;
2161 
2162 	smu->smu_feature.feature_num = feature_num;
2163 	smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_SUPPORTED);
2164 	smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_ALLOWED);
2165 }
2166 
2167 #endif
2168