xref: /linux/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h (revision d4a292c5f8e65d2784b703c67179f4f7d0c7846c)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #ifndef __AMDGPU_SMU_H__
23 #define __AMDGPU_SMU_H__
24 
25 #include <linux/acpi_amd_wbrf.h>
26 #include <linux/units.h>
27 
28 #include "amdgpu.h"
29 #include "kgd_pp_interface.h"
30 #include "dm_pp_interface.h"
31 #include "dm_pp_smu.h"
32 #include "smu_types.h"
33 #include "linux/firmware.h"
34 
35 #define SMU_THERMAL_MINIMUM_ALERT_TEMP		0
36 #define SMU_THERMAL_MAXIMUM_ALERT_TEMP		255
37 #define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES	1000
38 #define SMU_FW_NAME_LEN			0x24
39 
40 #define SMU_DPM_USER_PROFILE_RESTORE (1 << 0)
41 #define SMU_CUSTOM_FAN_SPEED_RPM     (1 << 1)
42 #define SMU_CUSTOM_FAN_SPEED_PWM     (1 << 2)
43 
44 #define SMU_GPU_METRICS_CACHE_INTERVAL 5
45 
46 // Power Throttlers
47 #define SMU_THROTTLER_PPT0_BIT			0
48 #define SMU_THROTTLER_PPT1_BIT			1
49 #define SMU_THROTTLER_PPT2_BIT			2
50 #define SMU_THROTTLER_PPT3_BIT			3
51 #define SMU_THROTTLER_SPL_BIT			4
52 #define SMU_THROTTLER_FPPT_BIT			5
53 #define SMU_THROTTLER_SPPT_BIT			6
54 #define SMU_THROTTLER_SPPT_APU_BIT		7
55 
56 // Current Throttlers
57 #define SMU_THROTTLER_TDC_GFX_BIT		16
58 #define SMU_THROTTLER_TDC_SOC_BIT		17
59 #define SMU_THROTTLER_TDC_MEM_BIT		18
60 #define SMU_THROTTLER_TDC_VDD_BIT		19
61 #define SMU_THROTTLER_TDC_CVIP_BIT		20
62 #define SMU_THROTTLER_EDC_CPU_BIT		21
63 #define SMU_THROTTLER_EDC_GFX_BIT		22
64 #define SMU_THROTTLER_APCC_BIT			23
65 
66 // Temperature
67 #define SMU_THROTTLER_TEMP_GPU_BIT		32
68 #define SMU_THROTTLER_TEMP_CORE_BIT		33
69 #define SMU_THROTTLER_TEMP_MEM_BIT		34
70 #define SMU_THROTTLER_TEMP_EDGE_BIT		35
71 #define SMU_THROTTLER_TEMP_HOTSPOT_BIT		36
72 #define SMU_THROTTLER_TEMP_SOC_BIT		37
73 #define SMU_THROTTLER_TEMP_VR_GFX_BIT		38
74 #define SMU_THROTTLER_TEMP_VR_SOC_BIT		39
75 #define SMU_THROTTLER_TEMP_VR_MEM0_BIT		40
76 #define SMU_THROTTLER_TEMP_VR_MEM1_BIT		41
77 #define SMU_THROTTLER_TEMP_LIQUID0_BIT		42
78 #define SMU_THROTTLER_TEMP_LIQUID1_BIT		43
79 #define SMU_THROTTLER_VRHOT0_BIT		44
80 #define SMU_THROTTLER_VRHOT1_BIT		45
81 #define SMU_THROTTLER_PROCHOT_CPU_BIT		46
82 #define SMU_THROTTLER_PROCHOT_GFX_BIT		47
83 
84 // Other
85 #define SMU_THROTTLER_PPM_BIT			56
86 #define SMU_THROTTLER_FIT_BIT			57
87 
88 struct smu_hw_power_state {
89 	unsigned int magic;
90 };
91 
92 struct smu_power_state;
93 
94 enum smu_state_ui_label {
95 	SMU_STATE_UI_LABEL_NONE,
96 	SMU_STATE_UI_LABEL_BATTERY,
97 	SMU_STATE_UI_TABEL_MIDDLE_LOW,
98 	SMU_STATE_UI_LABEL_BALLANCED,
99 	SMU_STATE_UI_LABEL_MIDDLE_HIGHT,
100 	SMU_STATE_UI_LABEL_PERFORMANCE,
101 	SMU_STATE_UI_LABEL_BACO,
102 };
103 
104 enum smu_state_classification_flag {
105 	SMU_STATE_CLASSIFICATION_FLAG_BOOT                     = 0x0001,
106 	SMU_STATE_CLASSIFICATION_FLAG_THERMAL                  = 0x0002,
107 	SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE      = 0x0004,
108 	SMU_STATE_CLASSIFICATION_FLAG_RESET                    = 0x0008,
109 	SMU_STATE_CLASSIFICATION_FLAG_FORCED                   = 0x0010,
110 	SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE      = 0x0020,
111 	SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE      = 0x0040,
112 	SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE           = 0x0080,
113 	SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE   = 0x0100,
114 	SMU_STATE_CLASSIFICATION_FLAG_UVD                      = 0x0200,
115 	SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW       = 0x0400,
116 	SMU_STATE_CLASSIFICATION_FLAG_ACPI                     = 0x0800,
117 	SMU_STATE_CLASSIFICATION_FLAG_HD2                      = 0x1000,
118 	SMU_STATE_CLASSIFICATION_FLAG_UVD_HD                   = 0x2000,
119 	SMU_STATE_CLASSIFICATION_FLAG_UVD_SD                   = 0x4000,
120 	SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE      = 0x8000,
121 	SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE   = 0x10000,
122 	SMU_STATE_CLASSIFICATION_FLAG_BACO                     = 0x20000,
123 	SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2      = 0x40000,
124 	SMU_STATE_CLASSIFICATION_FLAG_ULV                      = 0x80000,
125 	SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC                  = 0x100000,
126 };
127 
128 struct smu_state_classification_block {
129 	enum smu_state_ui_label         ui_label;
130 	enum smu_state_classification_flag  flags;
131 	int                          bios_index;
132 	bool                      temporary_state;
133 	bool                      to_be_deleted;
134 };
135 
136 struct smu_state_pcie_block {
137 	unsigned int lanes;
138 };
139 
140 enum smu_refreshrate_source {
141 	SMU_REFRESHRATE_SOURCE_EDID,
142 	SMU_REFRESHRATE_SOURCE_EXPLICIT
143 };
144 
145 struct smu_state_display_block {
146 	bool              disable_frame_modulation;
147 	bool              limit_refreshrate;
148 	enum smu_refreshrate_source refreshrate_source;
149 	int                  explicit_refreshrate;
150 	int                  edid_refreshrate_index;
151 	bool              enable_vari_bright;
152 };
153 
154 struct smu_state_memory_block {
155 	bool              dll_off;
156 	uint8_t                 m3arb;
157 	uint8_t                 unused[3];
158 };
159 
160 struct smu_state_software_algorithm_block {
161 	bool disable_load_balancing;
162 	bool enable_sleep_for_timestamps;
163 };
164 
165 struct smu_temperature_range {
166 	int min;
167 	int max;
168 	int edge_emergency_max;
169 	int hotspot_min;
170 	int hotspot_crit_max;
171 	int hotspot_emergency_max;
172 	int mem_min;
173 	int mem_crit_max;
174 	int mem_emergency_max;
175 	int software_shutdown_temp;
176 	int software_shutdown_temp_offset;
177 };
178 
179 struct smu_state_validation_block {
180 	bool single_display_only;
181 	bool disallow_on_dc;
182 	uint8_t supported_power_levels;
183 };
184 
185 struct smu_uvd_clocks {
186 	uint32_t vclk;
187 	uint32_t dclk;
188 };
189 
190 /**
191 * Structure to hold a SMU Power State.
192 */
193 struct smu_power_state {
194 	uint32_t                                      id;
195 	struct list_head                              ordered_list;
196 	struct list_head                              all_states_list;
197 
198 	struct smu_state_classification_block         classification;
199 	struct smu_state_validation_block             validation;
200 	struct smu_state_pcie_block                   pcie;
201 	struct smu_state_display_block                display;
202 	struct smu_state_memory_block                 memory;
203 	struct smu_state_software_algorithm_block     software;
204 	struct smu_uvd_clocks                         uvd_clocks;
205 	struct smu_hw_power_state                     hardware;
206 };
207 
208 enum smu_power_src_type {
209 	SMU_POWER_SOURCE_AC,
210 	SMU_POWER_SOURCE_DC,
211 	SMU_POWER_SOURCE_COUNT,
212 };
213 
214 enum smu_ppt_limit_type {
215 	SMU_DEFAULT_PPT_LIMIT = 0,
216 	SMU_FAST_PPT_LIMIT,
217 	SMU_LIMIT_TYPE_COUNT,
218 };
219 
220 enum smu_ppt_limit_level {
221 	SMU_PPT_LIMIT_MIN = -1,
222 	SMU_PPT_LIMIT_CURRENT,
223 	SMU_PPT_LIMIT_DEFAULT,
224 	SMU_PPT_LIMIT_MAX,
225 };
226 
227 enum smu_memory_pool_size {
228     SMU_MEMORY_POOL_SIZE_ZERO   = 0,
229     SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000,
230     SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000,
231     SMU_MEMORY_POOL_SIZE_1_GB   = 0x40000000,
232     SMU_MEMORY_POOL_SIZE_2_GB   = 0x80000000,
233 };
234 
235 struct smu_user_dpm_profile {
236 	uint32_t fan_mode;
237 	uint32_t power_limits[SMU_LIMIT_TYPE_COUNT];
238 	uint32_t fan_speed_pwm;
239 	uint32_t fan_speed_rpm;
240 	uint32_t flags;
241 	uint32_t user_od;
242 
243 	/* user clock state information */
244 	uint32_t clk_mask[SMU_CLK_COUNT];
245 	uint32_t clk_dependency;
246 };
247 
248 #define SMU_TABLE_INIT(tables, table_id, s, a, d)	\
249 	do {						\
250 		tables[table_id].size = s;		\
251 		tables[table_id].align = a;		\
252 		tables[table_id].domain = d;		\
253 	} while (0)
254 
255 struct smu_table_cache {
256 	void *buffer;
257 	size_t size;
258 	/* interval in ms*/
259 	uint32_t interval;
260 	unsigned long last_cache_time;
261 };
262 
263 struct smu_table {
264 	uint64_t size;
265 	uint32_t align;
266 	uint8_t domain;
267 	uint64_t mc_address;
268 	void *cpu_addr;
269 	struct amdgpu_bo *bo;
270 	uint32_t version;
271 	struct smu_table_cache cache;
272 };
273 
274 enum smu_driver_table_id {
275 	SMU_DRIVER_TABLE_GPU_METRICS = 0,
276 	SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS,
277 	SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS,
278 	SMU_DRIVER_TABLE_COUNT,
279 };
280 
281 struct smu_driver_table {
282 	enum smu_driver_table_id id;
283 	struct smu_table_cache cache;
284 };
285 
286 enum smu_perf_level_designation {
287 	PERF_LEVEL_ACTIVITY,
288 	PERF_LEVEL_POWER_CONTAINMENT,
289 };
290 
291 struct smu_performance_level {
292 	uint32_t core_clock;
293 	uint32_t memory_clock;
294 	uint32_t vddc;
295 	uint32_t vddci;
296 	uint32_t non_local_mem_freq;
297 	uint32_t non_local_mem_width;
298 };
299 
300 struct smu_clock_info {
301 	uint32_t min_mem_clk;
302 	uint32_t max_mem_clk;
303 	uint32_t min_eng_clk;
304 	uint32_t max_eng_clk;
305 	uint32_t min_bus_bandwidth;
306 	uint32_t max_bus_bandwidth;
307 };
308 
309 #define SMU_MAX_DPM_LEVELS 16
310 
311 struct smu_dpm_clk_level {
312 	bool		enabled;
313 	uint32_t	value;
314 };
315 
316 #define SMU_DPM_TABLE_FINE_GRAINED	BIT(0)
317 
318 struct smu_dpm_table {
319 	enum smu_clk_type	clk_type;
320 	uint32_t		count;
321 	uint32_t		flags;
322 	struct smu_dpm_clk_level dpm_levels[SMU_MAX_DPM_LEVELS];
323 };
324 
325 #define SMU_DPM_TABLE_MIN(table) \
326 	((table)->count > 0 ? (table)->dpm_levels[0].value : 0)
327 
328 #define SMU_DPM_TABLE_MAX(table) \
329 	((table)->count > 0 ? (table)->dpm_levels[(table)->count - 1].value : 0)
330 
331 #define SMU_MAX_PCIE_LEVELS 3
332 
333 struct smu_pcie_table {
334 	uint8_t pcie_gen[SMU_MAX_PCIE_LEVELS];
335 	uint8_t pcie_lane[SMU_MAX_PCIE_LEVELS];
336 	uint16_t lclk_freq[SMU_MAX_PCIE_LEVELS];
337 	uint32_t lclk_levels;
338 };
339 
340 struct smu_bios_boot_up_values {
341 	uint32_t			revision;
342 	uint32_t			gfxclk;
343 	uint32_t			uclk;
344 	uint32_t			socclk;
345 	uint32_t			dcefclk;
346 	uint32_t			eclk;
347 	uint32_t			vclk;
348 	uint32_t			dclk;
349 	uint16_t			vddc;
350 	uint16_t			vddci;
351 	uint16_t			mvddc;
352 	uint16_t			vdd_gfx;
353 	uint8_t				cooling_id;
354 	uint32_t			pp_table_id;
355 	uint32_t			format_revision;
356 	uint32_t			content_revision;
357 	uint32_t			fclk;
358 	uint32_t			lclk;
359 	uint32_t			firmware_caps;
360 };
361 
362 enum smu_table_id {
363 	SMU_TABLE_PPTABLE = 0,
364 	SMU_TABLE_WATERMARKS,
365 	SMU_TABLE_CUSTOM_DPM,
366 	SMU_TABLE_DPMCLOCKS,
367 	SMU_TABLE_AVFS,
368 	SMU_TABLE_AVFS_PSM_DEBUG,
369 	SMU_TABLE_AVFS_FUSE_OVERRIDE,
370 	SMU_TABLE_PMSTATUSLOG,
371 	SMU_TABLE_SMU_METRICS,
372 	SMU_TABLE_DRIVER_SMU_CONFIG,
373 	SMU_TABLE_ACTIVITY_MONITOR_COEFF,
374 	SMU_TABLE_OVERDRIVE,
375 	SMU_TABLE_I2C_COMMANDS,
376 	SMU_TABLE_PACE,
377 	SMU_TABLE_ECCINFO,
378 	SMU_TABLE_COMBO_PPTABLE,
379 	SMU_TABLE_WIFIBAND,
380 	SMU_TABLE_PMFW_SYSTEM_METRICS,
381 	SMU_TABLE_COUNT,
382 };
383 
384 struct smu_table_context {
385 	void				*power_play_table;
386 	uint32_t			power_play_table_size;
387 	void				*hardcode_pptable;
388 	unsigned long			metrics_time;
389 	void				*metrics_table;
390 	void				*clocks_table;
391 	void				*watermarks_table;
392 
393 	void				*max_sustainable_clocks;
394 	struct smu_bios_boot_up_values	boot_values;
395 	void				*driver_pptable;
396 	void				*combo_pptable;
397 	void                            *ecc_table;
398 	void				*driver_smu_config_table;
399 	struct smu_table		tables[SMU_TABLE_COUNT];
400 	/*
401 	 * The driver table is just a staging buffer for
402 	 * uploading/downloading content from the SMU.
403 	 *
404 	 * And the table_id for SMU_MSG_TransferTableSmu2Dram/
405 	 * SMU_MSG_TransferTableDram2Smu instructs SMU
406 	 * which content driver is interested.
407 	 */
408 	struct smu_table		driver_table;
409 	struct smu_table		memory_pool;
410 	struct smu_table		dummy_read_1_table;
411 	uint8_t                         thermal_controller_type;
412 
413 	void				*overdrive_table;
414 	void                            *boot_overdrive_table;
415 	void				*user_overdrive_table;
416 
417 	struct smu_driver_table driver_tables[SMU_DRIVER_TABLE_COUNT];
418 };
419 
420 struct smu_context;
421 struct smu_dpm_policy;
422 
423 struct smu_dpm_policy_desc {
424 	const char *name;
425 	char *(*get_desc)(struct smu_dpm_policy *dpm_policy, int level);
426 };
427 
428 struct smu_dpm_policy {
429 	struct smu_dpm_policy_desc *desc;
430 	enum pp_pm_policy policy_type;
431 	unsigned long level_mask;
432 	int current_level;
433 	int (*set_policy)(struct smu_context *ctxt, int level);
434 };
435 
436 struct smu_dpm_policy_ctxt {
437 	struct smu_dpm_policy policies[PP_PM_POLICY_NUM];
438 	unsigned long policy_mask;
439 };
440 
441 struct smu_dpm_context {
442 	uint32_t dpm_context_size;
443 	void *dpm_context;
444 	void *golden_dpm_context;
445 	enum amd_dpm_forced_level dpm_level;
446 	enum amd_dpm_forced_level saved_dpm_level;
447 	enum amd_dpm_forced_level requested_dpm_level;
448 	struct smu_power_state *dpm_request_power_state;
449 	struct smu_power_state *dpm_current_power_state;
450 	struct mclock_latency_table *mclk_latency_table;
451 	struct smu_dpm_policy_ctxt *dpm_policies;
452 };
453 
454 struct smu_temp_context {
455 	const struct smu_temp_funcs      *temp_funcs;
456 };
457 
458 struct smu_power_gate {
459 	bool uvd_gated;
460 	bool vce_gated;
461 	atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
462 	atomic_t jpeg_gated;
463 	atomic_t vpe_gated;
464 	atomic_t isp_gated;
465 	atomic_t umsch_mm_gated;
466 };
467 
468 struct smu_power_context {
469 	void *power_context;
470 	uint32_t power_context_size;
471 	struct smu_power_gate power_gate;
472 };
473 
474 #define SMU_FEATURE_NUM_DEFAULT (64)
475 #define SMU_FEATURE_MAX (128)
476 
477 struct smu_feature_bits {
478 	DECLARE_BITMAP(bits, SMU_FEATURE_MAX);
479 };
480 
481 /*
482  * Helpers for initializing smu_feature_bits statically.
483  * Use SMU_FEATURE_BIT_INIT() which automatically handles array indexing:
484  *   static const struct smu_feature_bits example = {
485  *       .bits = {
486  *           SMU_FEATURE_BIT_INIT(5),
487  *           SMU_FEATURE_BIT_INIT(10),
488  *           SMU_FEATURE_BIT_INIT(65),
489  *           SMU_FEATURE_BIT_INIT(100)
490  *       }
491  *   };
492  */
493 #define SMU_FEATURE_BITS_ELEM(bit) ((bit) / BITS_PER_LONG)
494 #define SMU_FEATURE_BITS_POS(bit) ((bit) % BITS_PER_LONG)
495 #define SMU_FEATURE_BIT_INIT(bit) \
496 	[SMU_FEATURE_BITS_ELEM(bit)] = (1UL << SMU_FEATURE_BITS_POS(bit))
497 
498 enum smu_feature_list {
499 	SMU_FEATURE_LIST_SUPPORTED,
500 	SMU_FEATURE_LIST_ALLOWED,
501 	SMU_FEATURE_LIST_MAX,
502 };
503 
504 struct smu_feature {
505 	uint32_t feature_num;
506 	struct smu_feature_bits bits[SMU_FEATURE_LIST_MAX];
507 };
508 
509 struct smu_clocks {
510 	uint32_t engine_clock;
511 	uint32_t memory_clock;
512 	uint32_t bus_bandwidth;
513 	uint32_t engine_clock_in_sr;
514 	uint32_t dcef_clock;
515 	uint32_t dcef_clock_in_sr;
516 };
517 
518 #define MAX_REGULAR_DPM_NUM 16
519 struct mclk_latency_entries {
520 	uint32_t  frequency;
521 	uint32_t  latency;
522 };
523 struct mclock_latency_table {
524 	uint32_t  count;
525 	struct mclk_latency_entries  entries[MAX_REGULAR_DPM_NUM];
526 };
527 
528 enum smu_reset_mode {
529 	SMU_RESET_MODE_0,
530 	SMU_RESET_MODE_1,
531 	SMU_RESET_MODE_2,
532 	SMU_RESET_MODE_3,
533 	SMU_RESET_MODE_4,
534 };
535 
536 enum smu_baco_state {
537 	SMU_BACO_STATE_ENTER = 0,
538 	SMU_BACO_STATE_EXIT,
539 };
540 
541 struct smu_baco_context {
542 	uint32_t state;
543 	bool platform_support;
544 	bool maco_support;
545 };
546 
547 struct smu_freq_info {
548 	uint32_t min;
549 	uint32_t max;
550 	uint32_t freq_level;
551 };
552 
553 struct pstates_clk_freq {
554 	uint32_t			min;
555 	uint32_t			standard;
556 	uint32_t			peak;
557 	struct smu_freq_info		custom;
558 	struct smu_freq_info		curr;
559 };
560 
561 struct smu_umd_pstate_table {
562 	struct pstates_clk_freq		gfxclk_pstate;
563 	struct pstates_clk_freq		socclk_pstate;
564 	struct pstates_clk_freq		uclk_pstate;
565 	struct pstates_clk_freq		vclk_pstate;
566 	struct pstates_clk_freq		dclk_pstate;
567 	struct pstates_clk_freq		fclk_pstate;
568 };
569 
570 struct cmn2asic_msg_mapping {
571 	int	valid_mapping;
572 	int	map_to;
573 	uint32_t flags;
574 };
575 
576 struct cmn2asic_mapping {
577 	int	valid_mapping;
578 	int	map_to;
579 };
580 
581 #define SMU_MSG_MAX_ARGS 4
582 
583 /* Message flags for smu_msg_args */
584 #define SMU_MSG_FLAG_ASYNC	BIT(0) /* Async send - skip post-poll */
585 #define SMU_MSG_FLAG_LOCK_HELD	BIT(1) /* Caller holds ctl->lock */
586 
587 /* smu_msg_ctl flags */
588 #define SMU_MSG_CTL_DEBUG_MAILBOX	BIT(0) /* Debug mailbox supported */
589 
590 struct smu_msg_ctl;
591 /**
592  * struct smu_msg_config - IP-level register configuration
593  * @msg_reg: Message register offset
594  * @resp_reg: Response register offset
595  * @arg_regs: Argument register offsets (up to SMU_MSG_MAX_ARGS)
596  * @num_arg_regs: Number of argument registers available
597  * @debug_msg_reg: Debug message register offset
598  * @debug_resp_reg: Debug response register offset
599  * @debug_param_reg: Debug parameter register offset
600  */
601 struct smu_msg_config {
602 	u32 msg_reg;
603 	u32 resp_reg;
604 	u32 arg_regs[SMU_MSG_MAX_ARGS];
605 	int num_arg_regs;
606 	u32 debug_msg_reg;
607 	u32 debug_resp_reg;
608 	u32 debug_param_reg;
609 };
610 
611 /**
612  * struct smu_msg_args - Per-call message arguments
613  * @msg: Common message type (enum smu_message_type)
614  * @args: Input arguments
615  * @num_args: Number of input arguments
616  * @out_args: Output arguments (filled after successful send)
617  * @num_out_args: Number of output arguments to read
618  * @flags: Message flags (SMU_MSG_FLAG_*)
619  * @timeout: Per-message timeout in us (0 = use default)
620  */
621 struct smu_msg_args {
622 	enum smu_message_type msg;
623 	u32 args[SMU_MSG_MAX_ARGS];
624 	int num_args;
625 	u32 out_args[SMU_MSG_MAX_ARGS];
626 	int num_out_args;
627 	u32 flags;
628 	u32 timeout;
629 };
630 
631 /**
632  * struct smu_msg_ops - IP-level protocol operations
633  * @send_msg: send message protocol
634  * @wait_response: wait for response (for split send/wait cases)
635  * @decode_response: Convert response register value to errno
636  * @send_debug_msg: send debug message
637  */
638 struct smu_msg_ops {
639 	int (*send_msg)(struct smu_msg_ctl *ctl, struct smu_msg_args *args);
640 	int (*wait_response)(struct smu_msg_ctl *ctl, u32 timeout_us);
641 	int (*decode_response)(u32 resp);
642 	int (*send_debug_msg)(struct smu_msg_ctl *ctl, u32 msg, u32 param);
643 };
644 
645 /**
646  * struct smu_msg_ctl - Per-device message control block
647  * This is a standalone control block that encapsulates everything
648  * needed for SMU messaging. The ops->send_msg implements the complete
649  * protocol including all filtering and error handling.
650  */
651 struct smu_msg_ctl {
652 	struct smu_context *smu;
653 	struct mutex lock;
654 	struct smu_msg_config config;
655 	const struct smu_msg_ops *ops;
656 	const struct cmn2asic_msg_mapping *message_map;
657 	u32 default_timeout;
658 	u32 flags;
659 };
660 
661 struct stb_context {
662 	uint32_t stb_buf_size;
663 	bool enabled;
664 	spinlock_t lock;
665 };
666 
667 enum smu_fw_status {
668 	SMU_FW_INIT = 0,
669 	SMU_FW_RUNTIME,
670 	SMU_FW_HANG,
671 };
672 
673 #define WORKLOAD_POLICY_MAX 7
674 
675 /*
676  * Configure wbrf event handling pace as there can be only one
677  * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms.
678  */
679 #define SMU_WBRF_EVENT_HANDLING_PACE	10
680 
681 enum smu_feature_cap_id {
682 	SMU_FEATURE_CAP_ID__LINK_RESET = 0,
683 	SMU_FEATURE_CAP_ID__SDMA_RESET,
684 	SMU_FEATURE_CAP_ID__VCN_RESET,
685 	SMU_FEATURE_CAP_ID__COUNT,
686 };
687 
688 struct smu_feature_cap {
689 	DECLARE_BITMAP(cap_map, SMU_FEATURE_CAP_ID__COUNT);
690 };
691 
692 struct smu_context {
693 	struct amdgpu_device            *adev;
694 	struct amdgpu_irq_src		irq_source;
695 
696 	const struct pptable_funcs	*ppt_funcs;
697 	const struct cmn2asic_mapping	*clock_map;
698 	const struct cmn2asic_mapping	*feature_map;
699 	const struct cmn2asic_mapping	*table_map;
700 	const struct cmn2asic_mapping	*pwr_src_map;
701 	const struct cmn2asic_mapping	*workload_map;
702 	uint64_t pool_size;
703 
704 	struct smu_table_context	smu_table;
705 	struct smu_dpm_context		smu_dpm;
706 	struct smu_power_context	smu_power;
707 	struct smu_temp_context		smu_temp;
708 	struct smu_feature		smu_feature;
709 	struct amd_pp_display_configuration  *display_config;
710 	struct smu_baco_context		smu_baco;
711 	struct smu_temperature_range	thermal_range;
712 	struct smu_feature_cap		fea_cap;
713 	void *od_settings;
714 
715 	struct smu_umd_pstate_table	pstate_table;
716 	uint32_t pstate_sclk;
717 	uint32_t pstate_mclk;
718 
719 	bool od_enabled;
720 	uint32_t current_power_limit;
721 	uint32_t default_power_limit;
722 	uint32_t max_power_limit;
723 	uint32_t min_power_limit;
724 
725 	/* soft pptable */
726 	uint32_t ppt_offset_bytes;
727 	uint32_t ppt_size_bytes;
728 	uint8_t  *ppt_start_addr;
729 
730 	bool support_power_containment;
731 	bool disable_watermark;
732 
733 #define WATERMARKS_EXIST	(1 << 0)
734 #define WATERMARKS_LOADED	(1 << 1)
735 	uint32_t watermarks_bitmap;
736 	uint32_t hard_min_uclk_req_from_dal;
737 	bool disable_uclk_switch;
738 
739 	/* asic agnostic workload mask */
740 	uint32_t workload_mask;
741 	bool pause_workload;
742 	/* default/user workload preference */
743 	uint32_t power_profile_mode;
744 	uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
745 	/* backend specific custom workload settings */
746 	long *custom_profile_params;
747 	bool pm_enabled;
748 	bool is_apu;
749 
750 	uint32_t smc_driver_if_version;
751 	uint32_t smc_fw_if_version;
752 	uint32_t smc_fw_version;
753 	uint32_t smc_fw_caps;
754 	uint8_t smc_fw_state;
755 
756 	bool uploading_custom_pp_table;
757 	bool dc_controlled_by_gpio;
758 
759 	struct work_struct throttling_logging_work;
760 	atomic64_t throttle_int_counter;
761 	struct work_struct interrupt_work;
762 
763 	unsigned fan_max_rpm;
764 	unsigned manual_fan_speed_pwm;
765 
766 	uint32_t gfx_default_hard_min_freq;
767 	uint32_t gfx_default_soft_max_freq;
768 	uint32_t gfx_actual_hard_min_freq;
769 	uint32_t gfx_actual_soft_max_freq;
770 
771 	/* APU only */
772 	uint32_t cpu_default_soft_min_freq;
773 	uint32_t cpu_default_soft_max_freq;
774 	uint32_t cpu_actual_soft_min_freq;
775 	uint32_t cpu_actual_soft_max_freq;
776 	uint32_t cpu_core_id_select;
777 	uint16_t cpu_core_num;
778 
779 	struct smu_user_dpm_profile user_dpm_profile;
780 
781 	struct stb_context stb_context;
782 
783 	struct firmware pptable_firmware;
784 
785 	struct delayed_work		swctf_delayed_work;
786 
787 	/* data structures for wbrf feature support */
788 	bool				wbrf_supported;
789 	struct notifier_block		wbrf_notifier;
790 	struct delayed_work		wbrf_delayed_work;
791 
792 	/* SMU message control block */
793 	struct smu_msg_ctl msg_ctl;
794 };
795 
796 struct i2c_adapter;
797 
798 /**
799  * struct smu_temp_funcs - Callbacks used to get temperature data.
800  */
801 struct smu_temp_funcs {
802 	/**
803 	 * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's
804 	 *           power delivery and voltage margins. Required for adaptive
805 	 * @type Temperature metrics type(baseboard/gpuboard)
806 	 * Return: Size of &table
807 	 */
808 	ssize_t (*get_temp_metrics)(struct smu_context *smu,
809 				    enum smu_temp_metric_type type, void *table);
810 
811 	/**
812 	 * @temp_metrics_is_support: Get if specific temperature metrics is supported
813 	 * @type Temperature metrics type(baseboard/gpuboard)
814 	 * Return: true if supported else false
815 	 */
816 	bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type);
817 
818 };
819 
820 /**
821  * struct pptable_funcs - Callbacks used to interact with the SMU.
822  */
823 struct pptable_funcs {
824 	/**
825 	 * @run_btc: Calibrate voltage/frequency curve to fit the system's
826 	 *           power delivery and voltage margins. Required for adaptive
827 	 *           voltage frequency scaling (AVFS).
828 	 */
829 	int (*run_btc)(struct smu_context *smu);
830 
831 	/**
832 	 * @init_allowed_features: Initialize allowed features bitmap.
833 	 * Directly sets allowed features using smu_feature wrapper functions.
834 	 */
835 	int (*init_allowed_features)(struct smu_context *smu);
836 
837 	/**
838 	 * @get_current_power_state: Get the current power state.
839 	 *
840 	 * Return: Current power state on success, negative errno on failure.
841 	 */
842 	enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
843 
844 	/**
845 	 * @set_default_dpm_table: Retrieve the default overdrive settings from
846 	 *                         the SMU.
847 	 */
848 	int (*set_default_dpm_table)(struct smu_context *smu);
849 
850 	int (*set_power_state)(struct smu_context *smu);
851 
852 	/**
853 	 * @populate_umd_state_clk: Populate the UMD power state table with
854 	 *                          defaults.
855 	 */
856 	int (*populate_umd_state_clk)(struct smu_context *smu);
857 
858 	/**
859 	 * @emit_clk_levels: Print DPM clock levels for a clock domain
860 	 *                    to buffer using sysfs_emit_at. Star current level.
861 	 *
862 	 * Used for sysfs interfaces.
863 	 * &buf: sysfs buffer
864 	 * &offset: offset within buffer to start printing, which is updated by the
865 	 * function.
866 	 *
867 	 * Return: 0 on Success or Negative to indicate an error occurred.
868 	 */
869 	int (*emit_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf, int *offset);
870 
871 	/**
872 	 * @force_clk_levels: Set a range of allowed DPM levels for a clock
873 	 *                    domain.
874 	 * &clk_type: Clock domain.
875 	 * &mask: Range of allowed DPM levels.
876 	 */
877 	int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask);
878 
879 	/**
880 	 * @od_edit_dpm_table: Edit the custom overdrive DPM table.
881 	 * &type: Type of edit.
882 	 * &input: Edit parameters.
883 	 * &size: Size of &input.
884 	 */
885 	int (*od_edit_dpm_table)(struct smu_context *smu,
886 				 enum PP_OD_DPM_TABLE_COMMAND type,
887 				 long *input, uint32_t size);
888 
889 	/**
890 	 * @restore_user_od_settings: Restore the user customized
891 	 *                            OD settings on S3/S4/Runpm resume.
892 	 */
893 	int (*restore_user_od_settings)(struct smu_context *smu);
894 
895 	/**
896 	 * @get_clock_by_type_with_latency: Get the speed and latency of a clock
897 	 *                                  domain.
898 	 */
899 	int (*get_clock_by_type_with_latency)(struct smu_context *smu,
900 					      enum smu_clk_type clk_type,
901 					      struct
902 					      pp_clock_levels_with_latency
903 					      *clocks);
904 	/**
905 	 * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock
906 	 *                                  domain.
907 	 */
908 	int (*get_clock_by_type_with_voltage)(struct smu_context *smu,
909 					      enum amd_pp_clock_type type,
910 					      struct
911 					      pp_clock_levels_with_voltage
912 					      *clocks);
913 
914 	/**
915 	 * @get_power_profile_mode: Print all power profile modes to
916 	 *                          buffer. Star current mode.
917 	 */
918 	int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
919 
920 	/**
921 	 * @set_power_profile_mode: Set a power profile mode. Also used to
922 	 *                          create/set custom power profile modes.
923 	 * &input: Power profile mode parameters.
924 	 * &workload_mask: mask of workloads to enable
925 	 * &custom_params: custom profile parameters
926 	 * &custom_params_max_idx: max valid idx into custom_params
927 	 */
928 	int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask,
929 				      long *custom_params, u32 custom_params_max_idx);
930 
931 	/**
932 	 * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
933 	 *                      management.
934 	 */
935 	int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable, int inst);
936 
937 	/**
938 	 * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power
939 	 *                       management.
940 	 */
941 	int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
942 
943 	/**
944 	 * @set_gfx_power_up_by_imu: Enable GFX engine with IMU
945 	 */
946 	int (*set_gfx_power_up_by_imu)(struct smu_context *smu);
947 
948 	/**
949 	 * @read_sensor: Read data from a sensor.
950 	 * &sensor: Sensor to read data from.
951 	 * &data: Sensor reading.
952 	 * &size: Size of &data.
953 	 */
954 	int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
955 			   void *data, uint32_t *size);
956 
957 	/**
958 	 * @get_apu_thermal_limit: get apu core limit from smu
959 	 * &limit: current limit temperature in millidegrees Celsius
960 	 */
961 	int (*get_apu_thermal_limit)(struct smu_context *smu, uint32_t *limit);
962 
963 	/**
964 	 * @set_apu_thermal_limit: update all controllers with new limit
965 	 * &limit: limit temperature to be setted, in millidegrees Celsius
966 	 */
967 	int (*set_apu_thermal_limit)(struct smu_context *smu, uint32_t limit);
968 
969 	/**
970 	 * @pre_display_config_changed: Prepare GPU for a display configuration
971 	 *                              change.
972 	 *
973 	 * Disable display tracking and pin memory clock speed to maximum. Used
974 	 * in display component synchronization.
975 	 */
976 	int (*pre_display_config_changed)(struct smu_context *smu);
977 
978 	/**
979 	 * @display_config_changed: Notify the SMU of the current display
980 	 *                          configuration.
981 	 *
982 	 * Allows SMU to properly track blanking periods for memory clock
983 	 * adjustment. Used in display component synchronization.
984 	 */
985 	int (*display_config_changed)(struct smu_context *smu);
986 
987 	int (*apply_clocks_adjust_rules)(struct smu_context *smu);
988 
989 	/**
990 	 * @notify_smc_display_config: Applies display requirements to the
991 	 *                             current power state.
992 	 *
993 	 * Optimize deep sleep DCEFclk and mclk for the current display
994 	 * configuration. Used in display component synchronization.
995 	 */
996 	int (*notify_smc_display_config)(struct smu_context *smu);
997 
998 	/**
999 	 * @is_dpm_running: Check if DPM is running.
1000 	 *
1001 	 * Return: True if DPM is running, false otherwise.
1002 	 */
1003 	bool (*is_dpm_running)(struct smu_context *smu);
1004 
1005 	/**
1006 	 * @get_fan_speed_pwm: Get the current fan speed in PWM.
1007 	 */
1008 	int (*get_fan_speed_pwm)(struct smu_context *smu, uint32_t *speed);
1009 
1010 	/**
1011 	 * @get_fan_speed_rpm: Get the current fan speed in rpm.
1012 	 */
1013 	int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
1014 
1015 	/**
1016 	 * @set_watermarks_table: Configure and upload the watermarks tables to
1017 	 *                        the SMU.
1018 	 */
1019 	int (*set_watermarks_table)(struct smu_context *smu,
1020 				    struct pp_smu_wm_range_sets *clock_ranges);
1021 
1022 	/**
1023 	 * @get_thermal_temperature_range: Get safe thermal limits in Celcius.
1024 	 */
1025 	int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
1026 
1027 	/**
1028 	 * @get_uclk_dpm_states: Get memory clock DPM levels in kHz.
1029 	 * &clocks_in_khz: Array of DPM levels.
1030 	 * &num_states: Elements in &clocks_in_khz.
1031 	 */
1032 	int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
1033 
1034 	/**
1035 	 * @set_default_od_settings: Set the overdrive tables to defaults.
1036 	 */
1037 	int (*set_default_od_settings)(struct smu_context *smu);
1038 
1039 	/**
1040 	 * @set_performance_level: Set a performance level.
1041 	 */
1042 	int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
1043 
1044 	/**
1045 	 * @display_disable_memory_clock_switch: Enable/disable dynamic memory
1046 	 *                                       clock switching.
1047 	 *
1048 	 * Disabling this feature forces memory clock speed to maximum.
1049 	 * Enabling sets the minimum memory clock capable of driving the
1050 	 * current display configuration.
1051 	 */
1052 	int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
1053 
1054 	/**
1055 	 * @get_power_limit: Get the device's power limits.
1056 	 */
1057 	int (*get_power_limit)(struct smu_context *smu,
1058 					uint32_t *current_power_limit,
1059 					uint32_t *default_power_limit,
1060 					uint32_t *max_power_limit,
1061 					uint32_t *min_power_limit);
1062 
1063 	/**
1064 	 * @get_ppt_limit: Get the device's ppt limits.
1065 	 */
1066 	int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit,
1067 			enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level);
1068 
1069 	/**
1070 	 * @set_df_cstate: Set data fabric cstate.
1071 	 */
1072 	int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
1073 
1074 	/**
1075 	 * @update_pcie_parameters: Update and upload the system's PCIe
1076 	 *                          capabilites to the SMU.
1077 	 * &pcie_gen_cap: Maximum allowed PCIe generation.
1078 	 * &pcie_width_cap: Maximum allowed PCIe width.
1079 	 */
1080 	int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap);
1081 
1082 	/**
1083 	 * @i2c_init: Initialize i2c.
1084 	 *
1085 	 * The i2c bus is used internally by the SMU voltage regulators and
1086 	 * other devices. The i2c's EEPROM also stores bad page tables on boards
1087 	 * with ECC.
1088 	 */
1089 	int (*i2c_init)(struct smu_context *smu);
1090 
1091 	/**
1092 	 * @i2c_fini: Tear down i2c.
1093 	 */
1094 	void (*i2c_fini)(struct smu_context *smu);
1095 
1096 	/**
1097 	 * @get_unique_id: Get the GPU's unique id. Used for asset tracking.
1098 	 */
1099 	void (*get_unique_id)(struct smu_context *smu);
1100 
1101 	/**
1102 	 * @get_dpm_clock_table: Get a copy of the DPM clock table.
1103 	 *
1104 	 * Used by display component in bandwidth and watermark calculations.
1105 	 */
1106 	int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
1107 
1108 	/**
1109 	 * @init_microcode: Request the SMU's firmware from the kernel.
1110 	 */
1111 	int (*init_microcode)(struct smu_context *smu);
1112 
1113 	/**
1114 	 * @load_microcode: Load firmware onto the SMU.
1115 	 */
1116 	int (*load_microcode)(struct smu_context *smu);
1117 
1118 	/**
1119 	 * @fini_microcode: Release the SMU's firmware.
1120 	 */
1121 	void (*fini_microcode)(struct smu_context *smu);
1122 
1123 	/**
1124 	 * @init_smc_tables: Initialize the SMU tables.
1125 	 */
1126 	int (*init_smc_tables)(struct smu_context *smu);
1127 
1128 	/**
1129 	 * @fini_smc_tables: Release the SMU tables.
1130 	 */
1131 	int (*fini_smc_tables)(struct smu_context *smu);
1132 
1133 	/**
1134 	 * @init_power: Initialize the power gate table context.
1135 	 */
1136 	int (*init_power)(struct smu_context *smu);
1137 
1138 	/**
1139 	 * @fini_power: Release the power gate table context.
1140 	 */
1141 	int (*fini_power)(struct smu_context *smu);
1142 
1143 	/**
1144 	 * @check_fw_status: Check the SMU's firmware status.
1145 	 *
1146 	 * Return: Zero if check passes, negative errno on failure.
1147 	 */
1148 	int (*check_fw_status)(struct smu_context *smu);
1149 
1150 	/**
1151 	 * @set_mp1_state: put SMU into a correct state for comming
1152 	 *                 resume from runpm or gpu reset.
1153 	 */
1154 	int (*set_mp1_state)(struct smu_context *smu,
1155 			     enum pp_mp1_state mp1_state);
1156 
1157 	/**
1158 	 * @setup_pptable: Initialize the power play table and populate it with
1159 	 *                 default values.
1160 	 */
1161 	int (*setup_pptable)(struct smu_context *smu);
1162 
1163 	/**
1164 	 * @get_vbios_bootup_values: Get default boot values from the VBIOS.
1165 	 */
1166 	int (*get_vbios_bootup_values)(struct smu_context *smu);
1167 
1168 	/**
1169 	 * @check_fw_version: Print driver and SMU interface versions to the
1170 	 *                    system log.
1171 	 *
1172 	 * Interface mismatch is not a critical failure.
1173 	 */
1174 	int (*check_fw_version)(struct smu_context *smu);
1175 
1176 	/**
1177 	 * @powergate_sdma: Power up/down system direct memory access.
1178 	 */
1179 	int (*powergate_sdma)(struct smu_context *smu, bool gate);
1180 
1181 	/**
1182 	 * @set_gfx_cgpg: Enable/disable graphics engine course grain power
1183 	 *                gating.
1184 	 */
1185 	int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
1186 
1187 	/**
1188 	 * @write_pptable: Write the power play table to the SMU.
1189 	 */
1190 	int (*write_pptable)(struct smu_context *smu);
1191 
1192 	/**
1193 	 * @set_driver_table_location: Send the location of the driver table to
1194 	 *                             the SMU.
1195 	 */
1196 	int (*set_driver_table_location)(struct smu_context *smu);
1197 
1198 	/**
1199 	 * @set_tool_table_location: Send the location of the tool table to the
1200 	 *                           SMU.
1201 	 */
1202 	int (*set_tool_table_location)(struct smu_context *smu);
1203 
1204 	/**
1205 	 * @notify_memory_pool_location: Send the location of the memory pool to
1206 	 *                               the SMU.
1207 	 */
1208 	int (*notify_memory_pool_location)(struct smu_context *smu);
1209 
1210 	/**
1211 	 * @system_features_control: Enable/disable all SMU features.
1212 	 */
1213 	int (*system_features_control)(struct smu_context *smu, bool en);
1214 
1215 	/**
1216 	 * @init_display_count: Notify the SMU of the number of display
1217 	 *                      components in current display configuration.
1218 	 */
1219 	int (*init_display_count)(struct smu_context *smu, uint32_t count);
1220 
1221 	/**
1222 	 * @set_allowed_mask: Notify the SMU of the features currently allowed
1223 	 *                    by the driver.
1224 	 */
1225 	int (*set_allowed_mask)(struct smu_context *smu);
1226 
1227 	/**
1228 	 * @get_enabled_mask: Get a mask of features that are currently enabled
1229 	 *                    on the SMU.
1230 	 * &feature_mask: Enabled feature mask.
1231 	 */
1232 	int (*get_enabled_mask)(struct smu_context *smu,
1233 				struct smu_feature_bits *feature_mask);
1234 
1235 	/**
1236 	 * @feature_is_enabled: Test if a feature is enabled.
1237 	 *
1238 	 * Return: One if enabled, zero if disabled.
1239 	 */
1240 	int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask);
1241 
1242 	/**
1243 	 * @disable_all_features_with_exception: Disable all features with
1244 	 *                                       exception to those in &mask.
1245 	 */
1246 	int (*disable_all_features_with_exception)(struct smu_context *smu,
1247 						   enum smu_feature_mask mask);
1248 
1249 	/**
1250 	 * @notify_display_change: General interface call to let SMU know about DC change
1251 	 */
1252 	int (*notify_display_change)(struct smu_context *smu);
1253 
1254 	/**
1255 	 * @set_power_limit: Set power limit in watts.
1256 	 */
1257 	int (*set_power_limit)(struct smu_context *smu,
1258 			       enum smu_ppt_limit_type limit_type,
1259 			       uint32_t limit);
1260 
1261 	/**
1262 	 * @init_max_sustainable_clocks: Populate max sustainable clock speed
1263 	 *                               table with values from the SMU.
1264 	 */
1265 	int (*init_max_sustainable_clocks)(struct smu_context *smu);
1266 
1267 	/**
1268 	 * @enable_thermal_alert: Enable thermal alert interrupts.
1269 	 */
1270 	int (*enable_thermal_alert)(struct smu_context *smu);
1271 
1272 	/**
1273 	 * @disable_thermal_alert: Disable thermal alert interrupts.
1274 	 */
1275 	int (*disable_thermal_alert)(struct smu_context *smu);
1276 
1277 	/**
1278 	 * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep
1279 	 *                           clock speed in MHz.
1280 	 */
1281 	int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk);
1282 
1283 	/**
1284 	 * @display_clock_voltage_request: Set a hard minimum frequency
1285 	 * for a clock domain.
1286 	 */
1287 	int (*display_clock_voltage_request)(struct smu_context *smu, struct
1288 					     pp_display_clock_request
1289 					     *clock_req);
1290 
1291 	/**
1292 	 * @get_fan_control_mode: Get the current fan control mode.
1293 	 */
1294 	uint32_t (*get_fan_control_mode)(struct smu_context *smu);
1295 
1296 	/**
1297 	 * @set_fan_control_mode: Set the fan control mode.
1298 	 */
1299 	int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
1300 
1301 	/**
1302 	 * @set_fan_speed_pwm: Set a static fan speed in PWM.
1303 	 */
1304 	int (*set_fan_speed_pwm)(struct smu_context *smu, uint32_t speed);
1305 
1306 	/**
1307 	 * @set_fan_speed_rpm: Set a static fan speed in rpm.
1308 	 */
1309 	int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
1310 
1311 	/**
1312 	 * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate.
1313 	 * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise.
1314 	 */
1315 	int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
1316 
1317 	/**
1318 	 * @gfx_off_control: Enable/disable graphics engine poweroff.
1319 	 */
1320 	int (*gfx_off_control)(struct smu_context *smu, bool enable);
1321 
1322 
1323 	/**
1324 	 * @get_gfx_off_status: Get graphics engine poweroff status.
1325 	 *
1326 	 * Return:
1327 	 * 0 - GFXOFF(default).
1328 	 * 1 - Transition out of GFX State.
1329 	 * 2 - Not in GFXOFF.
1330 	 * 3 - Transition into GFXOFF.
1331 	 */
1332 	uint32_t (*get_gfx_off_status)(struct smu_context *smu);
1333 
1334 	/**
1335 	 * @gfx_off_entrycount: total GFXOFF entry count at the time of
1336 	 * query since system power-up
1337 	 */
1338 	u32 (*get_gfx_off_entrycount)(struct smu_context *smu, uint64_t *entrycount);
1339 
1340 	/**
1341 	 * @set_gfx_off_residency: set 1 to start logging, 0 to stop logging
1342 	 */
1343 	u32 (*set_gfx_off_residency)(struct smu_context *smu, bool start);
1344 
1345 	/**
1346 	 * @get_gfx_off_residency: Average GFXOFF residency % during the logging interval
1347 	 */
1348 	u32 (*get_gfx_off_residency)(struct smu_context *smu, uint32_t *residency);
1349 
1350 	/**
1351 	 * @register_irq_handler: Register interupt request handlers.
1352 	 */
1353 	int (*register_irq_handler)(struct smu_context *smu);
1354 
1355 	/**
1356 	 * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep.
1357 	 */
1358 	int (*set_azalia_d3_pme)(struct smu_context *smu);
1359 
1360 	/**
1361 	 * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable
1362 	 *                                    clock speeds table.
1363 	 *
1364 	 * Provides a way for the display component (DC) to get the max
1365 	 * sustainable clocks from the SMU.
1366 	 */
1367 	int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks);
1368 
1369 	/**
1370 	 * @get_bamaco_support: Check if GPU supports BACO/MACO
1371 	 * BACO: Bus Active, Chip Off
1372 	 * MACO: Memory Active, Chip Off
1373 	 */
1374 	int (*get_bamaco_support)(struct smu_context *smu);
1375 
1376 	/**
1377 	 * @baco_get_state: Get the current BACO state.
1378 	 *
1379 	 * Return: Current BACO state.
1380 	 */
1381 	enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
1382 
1383 	/**
1384 	 * @baco_set_state: Enter/exit BACO.
1385 	 */
1386 	int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
1387 
1388 	/**
1389 	 * @baco_enter: Enter BACO.
1390 	 */
1391 	int (*baco_enter)(struct smu_context *smu);
1392 
1393 	/**
1394 	 * @baco_exit: Exit Baco.
1395 	 */
1396 	int (*baco_exit)(struct smu_context *smu);
1397 
1398 	/**
1399 	 * @mode1_reset_is_support: Check if GPU supports mode1 reset.
1400 	 */
1401 	bool (*mode1_reset_is_support)(struct smu_context *smu);
1402 
1403 	/**
1404 	 * @mode1_reset: Perform mode1 reset.
1405 	 *
1406 	 * Complete GPU reset.
1407 	 */
1408 	int (*mode1_reset)(struct smu_context *smu);
1409 
1410 	/**
1411 	 * @mode2_reset: Perform mode2 reset.
1412 	 *
1413 	 * Mode2 reset generally does not reset as many IPs as mode1 reset. The
1414 	 * IPs reset varies by asic.
1415 	 */
1416 	int (*mode2_reset)(struct smu_context *smu);
1417 	/* for gfx feature enablement after mode2 reset */
1418 	int (*enable_gfx_features)(struct smu_context *smu);
1419 
1420 	/**
1421 	 * @link_reset: Perform link reset.
1422 	 *
1423 	 * The gfx device driver reset
1424 	 */
1425 	int (*link_reset)(struct smu_context *smu);
1426 
1427 	/**
1428 	 * @get_dpm_ultimate_freq: Get the hard frequency range of a clock
1429 	 *                         domain in MHz.
1430 	 */
1431 	int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
1432 
1433 	/**
1434 	 * @set_soft_freq_limited_range: Set the soft frequency range of a clock
1435 	 *                               domain in MHz.
1436 	 */
1437 	int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max,
1438 					   bool automatic);
1439 
1440 	/**
1441 	 * @set_power_source: Notify the SMU of the current power source.
1442 	 */
1443 	int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
1444 
1445 	/**
1446 	 * @log_thermal_throttling_event: Print a thermal throttling warning to
1447 	 *                                the system's log.
1448 	 */
1449 	void (*log_thermal_throttling_event)(struct smu_context *smu);
1450 
1451 	/**
1452 	 * @get_pp_feature_mask: Print a human readable table of enabled
1453 	 *                       features to buffer.
1454 	 */
1455 	size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
1456 
1457 	/**
1458 	 * @set_pp_feature_mask: Request the SMU enable/disable features to
1459 	 *                       match those enabled in &new_mask.
1460 	 */
1461 	int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
1462 
1463 	/**
1464 	 * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU.
1465 	 *
1466 	 * Return: Size of &table
1467 	 */
1468 	ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
1469 
1470 	/**
1471 	 * @get_pm_metrics: Get one snapshot of power management metrics from
1472 	 * PMFW.
1473 	 *
1474 	 * Return: Size of the metrics sample
1475 	 */
1476 	ssize_t (*get_pm_metrics)(struct smu_context *smu, void *pm_metrics,
1477 				  size_t size);
1478 
1479 	/**
1480 	 * @enable_mgpu_fan_boost: Enable multi-GPU fan boost.
1481 	 */
1482 	int (*enable_mgpu_fan_boost)(struct smu_context *smu);
1483 
1484 	/**
1485 	 * @gfx_ulv_control: Enable/disable ultra low voltage.
1486 	 */
1487 	int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
1488 
1489 	/**
1490 	 * @deep_sleep_control: Enable/disable deep sleep.
1491 	 */
1492 	int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
1493 
1494 	/**
1495 	 * @get_fan_parameters: Get fan parameters.
1496 	 *
1497 	 * Get maximum fan speed from the power play table.
1498 	 */
1499 	int (*get_fan_parameters)(struct smu_context *smu);
1500 
1501 	/**
1502 	 * @post_init: Helper function for asic specific workarounds.
1503 	 */
1504 	int (*post_init)(struct smu_context *smu);
1505 
1506 	/**
1507 	 * @interrupt_work: Work task scheduled from SMU interrupt handler.
1508 	 */
1509 	void (*interrupt_work)(struct smu_context *smu);
1510 
1511 	/**
1512 	 * @gpo_control: Enable/disable graphics power optimization if supported.
1513 	 */
1514 	int (*gpo_control)(struct smu_context *smu, bool enablement);
1515 
1516 	/**
1517 	 * @gfx_state_change_set: Send the current graphics state to the SMU.
1518 	 */
1519 	int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state);
1520 
1521 	/**
1522 	 * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock
1523 	 *                                      parameters to defaults.
1524 	 */
1525 	int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu);
1526 
1527 	/**
1528 	 * @smu_handle_passthrough_sbr:  Send message to SMU about special handling for SBR.
1529 	 */
1530 	int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable);
1531 
1532 	/**
1533 	 * @wait_for_event:  Wait for events from SMU.
1534 	 */
1535 	int (*wait_for_event)(struct smu_context *smu,
1536 			      enum smu_event_type event, uint64_t event_arg);
1537 
1538 	/**
1539 	 * @sned_hbm_bad_pages_num:  message SMU to update bad page number
1540 	 *										of SMUBUS table.
1541 	 */
1542 	int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size);
1543 
1544 	/**
1545 	 * @send_rma_reason: message rma reason event to SMU.
1546 	 */
1547 	int (*send_rma_reason)(struct smu_context *smu);
1548 
1549 	/**
1550 	 * @reset_sdma: message SMU to soft reset sdma instance.
1551 	 */
1552 	int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
1553 
1554 	/**
1555 	 * @reset_vcn: message SMU to soft reset vcn instance.
1556 	 */
1557 	int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask);
1558 
1559 	/**
1560 	 * @get_ecc_table:  message SMU to get ECC INFO table.
1561 	 */
1562 	ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
1563 
1564 
1565 	/**
1566 	 * @stb_collect_info: Collects Smart Trace Buffers data.
1567 	 */
1568 	int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size);
1569 
1570 	/**
1571 	 * @get_default_config_table_settings: Get the ASIC default DriverSmuConfig table settings.
1572 	 */
1573 	int (*get_default_config_table_settings)(struct smu_context *smu, struct config_table_setting *table);
1574 
1575 	/**
1576 	 * @set_config_table: Apply the input DriverSmuConfig table settings.
1577 	 */
1578 	int (*set_config_table)(struct smu_context *smu, struct config_table_setting *table);
1579 
1580 	/**
1581 	 * @sned_hbm_bad_channel_flag:  message SMU to update bad channel info
1582 	 *										of SMUBUS table.
1583 	 */
1584 	int (*send_hbm_bad_channel_flag)(struct smu_context *smu, uint32_t size);
1585 
1586 	/**
1587 	 * @init_pptable_microcode: Prepare the pptable microcode to upload via PSP
1588 	 */
1589 	int (*init_pptable_microcode)(struct smu_context *smu);
1590 
1591 	/**
1592 	 * @dpm_set_vpe_enable: Enable/disable VPE engine dynamic power
1593 	 *                       management.
1594 	 */
1595 	int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable);
1596 
1597 	/**
1598 	 * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power
1599 	 *                       management.
1600 	 */
1601 	int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable);
1602 
1603 	/**
1604 	 * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power
1605 	 *                       management.
1606 	 */
1607 	int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
1608 
1609 	/**
1610 	 * @set_mall_enable: Init MALL power gating control.
1611 	 */
1612 	int (*set_mall_enable)(struct smu_context *smu);
1613 
1614 	/**
1615 	 * @notify_rlc_state: Notify RLC power state to SMU.
1616 	 */
1617 	int (*notify_rlc_state)(struct smu_context *smu, bool en);
1618 
1619 	/**
1620 	 * @is_asic_wbrf_supported: check whether PMFW supports the wbrf feature
1621 	 */
1622 	bool (*is_asic_wbrf_supported)(struct smu_context *smu);
1623 
1624 	/**
1625 	 * @enable_uclk_shadow: Enable the uclk shadow feature on wbrf supported
1626 	 */
1627 	int (*enable_uclk_shadow)(struct smu_context *smu, bool enable);
1628 
1629 	/**
1630 	 * @set_wbrf_exclusion_ranges: notify SMU the wifi bands occupied
1631 	 */
1632 	int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
1633 					struct freq_band_range *exclusion_ranges);
1634 	/**
1635 	 * @get_xcp_metrics: Get a copy of the partition metrics table from SMU.
1636 	 * Return: Size of table
1637 	 */
1638 	ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
1639 				   void *table);
1640 	/**
1641 	 * @ras_send_msg: Send a message with a parameter from Ras
1642 	 * &msg: Type of message.
1643 	 * &param: Message parameter.
1644 	 * &read_arg: SMU response (optional).
1645 	 */
1646 	int (*ras_send_msg)(struct smu_context *smu,
1647 			    enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
1648 
1649 
1650 	/**
1651 	 * @get_ras_smu_drv: Get RAS smu driver interface
1652 	 * Return: ras_smu_drv *
1653 	 */
1654 	int (*get_ras_smu_drv)(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv);
1655 };
1656 
1657 typedef enum {
1658 	METRICS_CURR_GFXCLK,
1659 	METRICS_CURR_SOCCLK,
1660 	METRICS_CURR_UCLK,
1661 	METRICS_CURR_VCLK,
1662 	METRICS_CURR_VCLK1,
1663 	METRICS_CURR_DCLK,
1664 	METRICS_CURR_DCLK1,
1665 	METRICS_CURR_FCLK,
1666 	METRICS_CURR_DCEFCLK,
1667 	METRICS_AVERAGE_CPUCLK,
1668 	METRICS_AVERAGE_GFXCLK,
1669 	METRICS_AVERAGE_SOCCLK,
1670 	METRICS_AVERAGE_FCLK,
1671 	METRICS_AVERAGE_UCLK,
1672 	METRICS_AVERAGE_VCLK,
1673 	METRICS_AVERAGE_DCLK,
1674 	METRICS_AVERAGE_VCLK1,
1675 	METRICS_AVERAGE_DCLK1,
1676 	METRICS_AVERAGE_GFXACTIVITY,
1677 	METRICS_AVERAGE_MEMACTIVITY,
1678 	METRICS_AVERAGE_VCNACTIVITY,
1679 	METRICS_AVERAGE_SOCKETPOWER,
1680 	METRICS_TEMPERATURE_EDGE,
1681 	METRICS_TEMPERATURE_HOTSPOT,
1682 	METRICS_TEMPERATURE_MEM,
1683 	METRICS_TEMPERATURE_VRGFX,
1684 	METRICS_TEMPERATURE_VRSOC,
1685 	METRICS_TEMPERATURE_VRMEM,
1686 	METRICS_THROTTLER_STATUS,
1687 	METRICS_CURR_FANSPEED,
1688 	METRICS_VOLTAGE_VDDSOC,
1689 	METRICS_VOLTAGE_VDDGFX,
1690 	METRICS_SS_APU_SHARE,
1691 	METRICS_SS_DGPU_SHARE,
1692 	METRICS_UNIQUE_ID_UPPER32,
1693 	METRICS_UNIQUE_ID_LOWER32,
1694 	METRICS_PCIE_RATE,
1695 	METRICS_PCIE_WIDTH,
1696 	METRICS_CURR_FANPWM,
1697 	METRICS_CURR_SOCKETPOWER,
1698 	METRICS_AVERAGE_VPECLK,
1699 	METRICS_AVERAGE_IPUCLK,
1700 	METRICS_AVERAGE_MPIPUCLK,
1701 	METRICS_THROTTLER_RESIDENCY_PROCHOT,
1702 	METRICS_THROTTLER_RESIDENCY_SPL,
1703 	METRICS_THROTTLER_RESIDENCY_FPPT,
1704 	METRICS_THROTTLER_RESIDENCY_SPPT,
1705 	METRICS_THROTTLER_RESIDENCY_THM_CORE,
1706 	METRICS_THROTTLER_RESIDENCY_THM_GFX,
1707 	METRICS_THROTTLER_RESIDENCY_THM_SOC,
1708 	METRICS_AVERAGE_NPUCLK,
1709 } MetricsMember_t;
1710 
1711 enum smu_cmn2asic_mapping_type {
1712 	CMN2ASIC_MAPPING_MSG,
1713 	CMN2ASIC_MAPPING_CLK,
1714 	CMN2ASIC_MAPPING_FEATURE,
1715 	CMN2ASIC_MAPPING_TABLE,
1716 	CMN2ASIC_MAPPING_PWR,
1717 	CMN2ASIC_MAPPING_WORKLOAD,
1718 };
1719 
1720 enum smu_baco_seq {
1721 	BACO_SEQ_BACO = 0,
1722 	BACO_SEQ_MSR,
1723 	BACO_SEQ_BAMACO,
1724 	BACO_SEQ_ULPS,
1725 	BACO_SEQ_COUNT,
1726 };
1727 
1728 #define MSG_MAP(msg, index, flags) \
1729 	[SMU_MSG_##msg] = {1, (index), (flags)}
1730 
1731 #define CLK_MAP(clk, index) \
1732 	[SMU_##clk] = {1, (index)}
1733 
1734 #define FEA_MAP(fea) \
1735 	[SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT}
1736 
1737 #define FEA_MAP_REVERSE(fea) \
1738 	[SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT}
1739 
1740 #define FEA_MAP_HALF_REVERSE(fea) \
1741 	[SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT}
1742 
1743 #define TAB_MAP(tab) \
1744 	[SMU_TABLE_##tab] = {1, TABLE_##tab}
1745 
1746 #define TAB_MAP_VALID(tab) \
1747 	[SMU_TABLE_##tab] = {1, TABLE_##tab}
1748 
1749 #define TAB_MAP_INVALID(tab) \
1750 	[SMU_TABLE_##tab] = {0, TABLE_##tab}
1751 
1752 #define PWR_MAP(tab) \
1753 	[SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab}
1754 
1755 #define WORKLOAD_MAP(profile, workload) \
1756 	[profile] = {1, (workload)}
1757 
1758 /**
1759  * smu_memcpy_trailing - Copy the end of one structure into the middle of another
1760  *
1761  * @dst: Pointer to destination struct
1762  * @first_dst_member: The member name in @dst where the overwrite begins
1763  * @last_dst_member: The member name in @dst where the overwrite ends after
1764  * @src: Pointer to the source struct
1765  * @first_src_member: The member name in @src where the copy begins
1766  *
1767  */
1768 #define smu_memcpy_trailing(dst, first_dst_member, last_dst_member,	   \
1769 			    src, first_src_member)			   \
1770 ({									   \
1771 	size_t __src_offset = offsetof(typeof(*(src)), first_src_member);  \
1772 	size_t __src_size = sizeof(*(src)) - __src_offset;		   \
1773 	size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member);  \
1774 	size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \
1775 			    __dst_offset;				   \
1776 	BUILD_BUG_ON(__src_size != __dst_size);				   \
1777 	__builtin_memcpy((u8 *)(dst) + __dst_offset,			   \
1778 			 (u8 *)(src) + __src_offset,			   \
1779 			 __dst_size);					   \
1780 })
1781 
1782 typedef struct {
1783 	uint16_t     LowFreq;
1784 	uint16_t     HighFreq;
1785 } WifiOneBand_t;
1786 
1787 typedef struct {
1788 	uint32_t		WifiBandEntryNum;
1789 	WifiOneBand_t	WifiBandEntry[11];
1790 	uint32_t		MmHubPadding[8];
1791 } WifiBandEntryTable_t;
1792 
1793 #define STR_SOC_PSTATE_POLICY "soc_pstate"
1794 #define STR_XGMI_PLPD_POLICY "xgmi_plpd"
1795 
1796 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
1797 					 enum pp_pm_policy p_type);
1798 
1799 static inline enum smu_driver_table_id
smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)1800 smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)
1801 {
1802 	switch (type) {
1803 	case SMU_TEMP_METRIC_BASEBOARD:
1804 		return SMU_DRIVER_TABLE_BASEBOARD_TEMP_METRICS;
1805 	case SMU_TEMP_METRIC_GPUBOARD:
1806 		return SMU_DRIVER_TABLE_GPUBOARD_TEMP_METRICS;
1807 	default:
1808 		return SMU_DRIVER_TABLE_COUNT;
1809 	}
1810 
1811 	return SMU_DRIVER_TABLE_COUNT;
1812 }
1813 
smu_table_cache_update_time(struct smu_table * table,unsigned long time)1814 static inline void smu_table_cache_update_time(struct smu_table *table,
1815 					       unsigned long time)
1816 {
1817 	table->cache.last_cache_time = time;
1818 }
1819 
smu_table_cache_is_valid(struct smu_table * table)1820 static inline bool smu_table_cache_is_valid(struct smu_table *table)
1821 {
1822 	if (!table->cache.buffer || !table->cache.last_cache_time ||
1823 	    !table->cache.interval || !table->cache.size ||
1824 	    time_after(jiffies,
1825 		       table->cache.last_cache_time +
1826 			       msecs_to_jiffies(table->cache.interval)))
1827 		return false;
1828 
1829 	return true;
1830 }
1831 
smu_table_cache_init(struct smu_context * smu,enum smu_table_id table_id,size_t size,uint32_t cache_interval)1832 static inline int smu_table_cache_init(struct smu_context *smu,
1833 				       enum smu_table_id table_id, size_t size,
1834 				       uint32_t cache_interval)
1835 {
1836 	struct smu_table_context *smu_table = &smu->smu_table;
1837 	struct smu_table *tables = smu_table->tables;
1838 
1839 	tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
1840 	if (!tables[table_id].cache.buffer)
1841 		return -ENOMEM;
1842 
1843 	tables[table_id].cache.last_cache_time = 0;
1844 	tables[table_id].cache.interval = cache_interval;
1845 	tables[table_id].cache.size = size;
1846 
1847 	return 0;
1848 }
1849 
smu_table_cache_fini(struct smu_context * smu,enum smu_table_id table_id)1850 static inline void smu_table_cache_fini(struct smu_context *smu,
1851 					enum smu_table_id table_id)
1852 {
1853 	struct smu_table_context *smu_table = &smu->smu_table;
1854 	struct smu_table *tables = smu_table->tables;
1855 
1856 	if (tables[table_id].cache.buffer) {
1857 		kfree(tables[table_id].cache.buffer);
1858 		tables[table_id].cache.buffer = NULL;
1859 		tables[table_id].cache.last_cache_time = 0;
1860 		tables[table_id].cache.interval = 0;
1861 	}
1862 }
1863 
smu_driver_table_init(struct smu_context * smu,enum smu_driver_table_id table_id,size_t size,uint32_t cache_interval)1864 static inline int smu_driver_table_init(struct smu_context *smu,
1865 					enum smu_driver_table_id table_id,
1866 					size_t size, uint32_t cache_interval)
1867 {
1868 	struct smu_table_context *smu_table = &smu->smu_table;
1869 	struct smu_driver_table *driver_tables = smu_table->driver_tables;
1870 
1871 	if (table_id >= SMU_DRIVER_TABLE_COUNT)
1872 		return -EINVAL;
1873 
1874 	driver_tables[table_id].id = table_id;
1875 	driver_tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
1876 	if (!driver_tables[table_id].cache.buffer)
1877 		return -ENOMEM;
1878 
1879 	driver_tables[table_id].cache.last_cache_time = 0;
1880 	driver_tables[table_id].cache.interval = cache_interval;
1881 	driver_tables[table_id].cache.size = size;
1882 
1883 	return 0;
1884 }
1885 
smu_driver_table_fini(struct smu_context * smu,enum smu_driver_table_id table_id)1886 static inline void smu_driver_table_fini(struct smu_context *smu,
1887 					 enum smu_driver_table_id table_id)
1888 {
1889 	struct smu_table_context *smu_table = &smu->smu_table;
1890 	struct smu_driver_table *driver_tables = smu_table->driver_tables;
1891 
1892 	if (table_id >= SMU_DRIVER_TABLE_COUNT)
1893 		return;
1894 
1895 	if (driver_tables[table_id].cache.buffer) {
1896 		kfree(driver_tables[table_id].cache.buffer);
1897 		driver_tables[table_id].cache.buffer = NULL;
1898 		driver_tables[table_id].cache.last_cache_time = 0;
1899 		driver_tables[table_id].cache.interval = 0;
1900 	}
1901 }
1902 
smu_driver_table_is_valid(struct smu_driver_table * table)1903 static inline bool smu_driver_table_is_valid(struct smu_driver_table *table)
1904 {
1905 	if (!table->cache.buffer || !table->cache.last_cache_time ||
1906 	    !table->cache.interval || !table->cache.size ||
1907 	    time_after(jiffies,
1908 		       table->cache.last_cache_time +
1909 			       msecs_to_jiffies(table->cache.interval)))
1910 		return false;
1911 
1912 	return true;
1913 }
1914 
smu_driver_table_ptr(struct smu_context * smu,enum smu_driver_table_id table_id)1915 static inline void *smu_driver_table_ptr(struct smu_context *smu,
1916 					 enum smu_driver_table_id table_id)
1917 {
1918 	struct smu_table_context *smu_table = &smu->smu_table;
1919 	struct smu_driver_table *driver_tables = smu_table->driver_tables;
1920 
1921 	if (table_id >= SMU_DRIVER_TABLE_COUNT)
1922 		return NULL;
1923 
1924 	return driver_tables[table_id].cache.buffer;
1925 }
1926 
1927 static inline void
smu_driver_table_update_cache_time(struct smu_context * smu,enum smu_driver_table_id table_id)1928 smu_driver_table_update_cache_time(struct smu_context *smu,
1929 				   enum smu_driver_table_id table_id)
1930 {
1931 	struct smu_table_context *smu_table = &smu->smu_table;
1932 	struct smu_driver_table *driver_tables = smu_table->driver_tables;
1933 
1934 	if (table_id >= SMU_DRIVER_TABLE_COUNT)
1935 		return;
1936 
1937 	driver_tables[table_id].cache.last_cache_time = jiffies;
1938 }
1939 
1940 #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
1941 int smu_get_power_limit(void *handle,
1942 			uint32_t *limit,
1943 			enum pp_power_limit_level pp_limit_level,
1944 			enum pp_power_type pp_power_type);
1945 
1946 bool smu_mode1_reset_is_support(struct smu_context *smu);
1947 bool smu_link_reset_is_support(struct smu_context *smu);
1948 int smu_mode1_reset(struct smu_context *smu);
1949 int smu_link_reset(struct smu_context *smu);
1950 
1951 extern const struct amd_ip_funcs smu_ip_funcs;
1952 
1953 bool is_support_sw_smu(struct amdgpu_device *adev);
1954 bool is_support_cclk_dpm(struct amdgpu_device *adev);
1955 int smu_write_watermarks_table(struct smu_context *smu);
1956 
1957 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
1958 			   uint32_t *min, uint32_t *max);
1959 
1960 int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type,
1961 			    uint32_t min, uint32_t max);
1962 
1963 int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
1964 
1965 int smu_set_ac_dc(struct smu_context *smu);
1966 
1967 int smu_set_xgmi_plpd_mode(struct smu_context *smu,
1968 			   enum pp_xgmi_plpd_mode mode);
1969 
1970 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value);
1971 
1972 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value);
1973 
1974 int smu_set_residency_gfxoff(struct smu_context *smu, bool value);
1975 
1976 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value);
1977 
1978 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
1979 
1980 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
1981 		       uint64_t event_arg);
1982 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc);
1983 int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size);
1984 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
1985 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
1986 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
1987 int smu_send_rma_reason(struct smu_context *smu);
1988 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
1989 bool smu_reset_sdma_is_supported(struct smu_context *smu);
1990 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
1991 bool smu_reset_vcn_is_supported(struct smu_context *smu);
1992 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
1993 		      int level);
1994 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
1995 			       enum pp_pm_policy p_type, char *sysbuf);
1996 const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle);
1997 
1998 int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
1999 			    uint32_t param, uint32_t *readarg);
2000 #endif
2001 
2002 void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id);
2003 bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id);
2004 
smu_feature_bits_is_set(const struct smu_feature_bits * bits,unsigned int bit)2005 static inline bool smu_feature_bits_is_set(const struct smu_feature_bits *bits,
2006 					   unsigned int bit)
2007 {
2008 	if (bit >= SMU_FEATURE_MAX)
2009 		return false;
2010 
2011 	return test_bit(bit, bits->bits);
2012 }
2013 
smu_feature_bits_set_bit(struct smu_feature_bits * bits,unsigned int bit)2014 static inline void smu_feature_bits_set_bit(struct smu_feature_bits *bits,
2015 					    unsigned int bit)
2016 {
2017 	if (bit < SMU_FEATURE_MAX)
2018 		__set_bit(bit, bits->bits);
2019 }
2020 
smu_feature_bits_clear_bit(struct smu_feature_bits * bits,unsigned int bit)2021 static inline void smu_feature_bits_clear_bit(struct smu_feature_bits *bits,
2022 					      unsigned int bit)
2023 {
2024 	if (bit < SMU_FEATURE_MAX)
2025 		__clear_bit(bit, bits->bits);
2026 }
2027 
smu_feature_bits_clearall(struct smu_feature_bits * bits)2028 static inline void smu_feature_bits_clearall(struct smu_feature_bits *bits)
2029 {
2030 	bitmap_zero(bits->bits, SMU_FEATURE_MAX);
2031 }
2032 
smu_feature_bits_fill(struct smu_feature_bits * bits)2033 static inline void smu_feature_bits_fill(struct smu_feature_bits *bits)
2034 {
2035 	bitmap_fill(bits->bits, SMU_FEATURE_MAX);
2036 }
2037 
2038 static inline bool
smu_feature_bits_test_mask(const struct smu_feature_bits * bits,const unsigned long * mask)2039 smu_feature_bits_test_mask(const struct smu_feature_bits *bits,
2040 			   const unsigned long *mask)
2041 {
2042 	return bitmap_intersects(bits->bits, mask, SMU_FEATURE_MAX);
2043 }
2044 
smu_feature_bits_from_arr32(struct smu_feature_bits * bits,const uint32_t * arr,unsigned int nbits)2045 static inline void smu_feature_bits_from_arr32(struct smu_feature_bits *bits,
2046 					       const uint32_t *arr,
2047 					       unsigned int nbits)
2048 {
2049 	bitmap_from_arr32(bits->bits, arr, nbits);
2050 }
2051 
2052 static inline void
smu_feature_bits_to_arr32(const struct smu_feature_bits * bits,uint32_t * arr,unsigned int nbits)2053 smu_feature_bits_to_arr32(const struct smu_feature_bits *bits, uint32_t *arr,
2054 			  unsigned int nbits)
2055 {
2056 	bitmap_to_arr32(arr, bits->bits, nbits);
2057 }
2058 
smu_feature_bits_empty(const struct smu_feature_bits * bits,unsigned int nbits)2059 static inline bool smu_feature_bits_empty(const struct smu_feature_bits *bits,
2060 					  unsigned int nbits)
2061 {
2062 	return bitmap_empty(bits->bits, nbits);
2063 }
2064 
smu_feature_bits_full(const struct smu_feature_bits * bits,unsigned int nbits)2065 static inline bool smu_feature_bits_full(const struct smu_feature_bits *bits,
2066 					 unsigned int nbits)
2067 {
2068 	return bitmap_full(bits->bits, nbits);
2069 }
2070 
smu_feature_bits_copy(struct smu_feature_bits * dst,const unsigned long * src,unsigned int nbits)2071 static inline void smu_feature_bits_copy(struct smu_feature_bits *dst,
2072 					 const unsigned long *src,
2073 					 unsigned int nbits)
2074 {
2075 	bitmap_copy(dst->bits, src, nbits);
2076 }
2077 
2078 static inline struct smu_feature_bits *
__smu_feature_get_list(struct smu_context * smu,enum smu_feature_list list)2079 __smu_feature_get_list(struct smu_context *smu, enum smu_feature_list list)
2080 {
2081 	if (unlikely(list >= SMU_FEATURE_LIST_MAX)) {
2082 		dev_warn(smu->adev->dev, "Invalid feature list: %d\n", list);
2083 		return &smu->smu_feature.bits[SMU_FEATURE_LIST_SUPPORTED];
2084 	}
2085 
2086 	return &smu->smu_feature.bits[list];
2087 }
2088 
smu_feature_list_is_set(struct smu_context * smu,enum smu_feature_list list,unsigned int bit)2089 static inline bool smu_feature_list_is_set(struct smu_context *smu,
2090 					   enum smu_feature_list list,
2091 					   unsigned int bit)
2092 {
2093 	if (bit >= smu->smu_feature.feature_num)
2094 		return false;
2095 
2096 	return smu_feature_bits_is_set(__smu_feature_get_list(smu, list), bit);
2097 }
2098 
smu_feature_list_set_bit(struct smu_context * smu,enum smu_feature_list list,unsigned int bit)2099 static inline void smu_feature_list_set_bit(struct smu_context *smu,
2100 					    enum smu_feature_list list,
2101 					    unsigned int bit)
2102 {
2103 	if (bit >= smu->smu_feature.feature_num)
2104 		return;
2105 
2106 	smu_feature_bits_set_bit(__smu_feature_get_list(smu, list), bit);
2107 }
2108 
smu_feature_list_clear_bit(struct smu_context * smu,enum smu_feature_list list,unsigned int bit)2109 static inline void smu_feature_list_clear_bit(struct smu_context *smu,
2110 					      enum smu_feature_list list,
2111 					      unsigned int bit)
2112 {
2113 	if (bit >= smu->smu_feature.feature_num)
2114 		return;
2115 
2116 	smu_feature_bits_clear_bit(__smu_feature_get_list(smu, list), bit);
2117 }
2118 
smu_feature_list_set_all(struct smu_context * smu,enum smu_feature_list list)2119 static inline void smu_feature_list_set_all(struct smu_context *smu,
2120 					    enum smu_feature_list list)
2121 {
2122 	smu_feature_bits_fill(__smu_feature_get_list(smu, list));
2123 }
2124 
smu_feature_list_clear_all(struct smu_context * smu,enum smu_feature_list list)2125 static inline void smu_feature_list_clear_all(struct smu_context *smu,
2126 					      enum smu_feature_list list)
2127 {
2128 	smu_feature_bits_clearall(__smu_feature_get_list(smu, list));
2129 }
2130 
smu_feature_list_is_empty(struct smu_context * smu,enum smu_feature_list list)2131 static inline bool smu_feature_list_is_empty(struct smu_context *smu,
2132 					     enum smu_feature_list list)
2133 {
2134 	return smu_feature_bits_empty(__smu_feature_get_list(smu, list),
2135 				      smu->smu_feature.feature_num);
2136 }
2137 
smu_feature_list_set_bits(struct smu_context * smu,enum smu_feature_list dst_list,const unsigned long * src)2138 static inline void smu_feature_list_set_bits(struct smu_context *smu,
2139 					     enum smu_feature_list dst_list,
2140 					     const unsigned long *src)
2141 {
2142 	smu_feature_bits_copy(__smu_feature_get_list(smu, dst_list), src,
2143 			      smu->smu_feature.feature_num);
2144 }
2145 
smu_feature_list_to_arr32(struct smu_context * smu,enum smu_feature_list list,uint32_t * arr)2146 static inline void smu_feature_list_to_arr32(struct smu_context *smu,
2147 					     enum smu_feature_list list,
2148 					     uint32_t *arr)
2149 {
2150 	smu_feature_bits_to_arr32(__smu_feature_get_list(smu, list), arr,
2151 				  smu->smu_feature.feature_num);
2152 }
2153 
smu_feature_init(struct smu_context * smu,int feature_num)2154 static inline void smu_feature_init(struct smu_context *smu, int feature_num)
2155 {
2156 	if (!feature_num || smu->smu_feature.feature_num != 0)
2157 		return;
2158 
2159 	smu->smu_feature.feature_num = feature_num;
2160 	smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_SUPPORTED);
2161 	smu_feature_list_clear_all(smu, SMU_FEATURE_LIST_ALLOWED);
2162 }
2163 
2164 #endif
2165