xref: /linux/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #ifndef __SMU_CMN_H__
24 #define __SMU_CMN_H__
25 
26 #include "amdgpu_smu.h"
27 
28 #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
29 
30 #define FDO_PWM_MODE_STATIC  1
31 #define FDO_PWM_MODE_STATIC_RPM 5
32 
33 #define SMU_IH_INTERRUPT_ID_TO_DRIVER                   0xFE
34 #define SMU_IH_INTERRUPT_CONTEXT_ID_BACO                0x2
35 #define SMU_IH_INTERRUPT_CONTEXT_ID_AC                  0x3
36 #define SMU_IH_INTERRUPT_CONTEXT_ID_DC                  0x4
37 #define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D0            0x5
38 #define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D3            0x6
39 #define SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING  0x7
40 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL        0x8
41 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY        0x9
42 
43 #define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev)                   \
44 	do {                                                             \
45 		typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \
46 		struct gpu_metrics_v##frev##_##crev *tmp = (ptr);        \
47 		struct metrics_table_header *header =                    \
48 			(struct metrics_table_header *)tmp;              \
49 		memset(header, 0xFF, sizeof(*tmp));                      \
50 		header->format_revision = frev;                          \
51 		header->content_revision = crev;                         \
52 		header->structure_size = sizeof(*tmp);                   \
53 	} while (0)
54 
55 #define smu_cmn_init_partition_metrics(ptr, fr, cr)                        \
56 	do {                                                               \
57 		typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *,  \
58 			  (ptr));                                          \
59 		struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \
60 		struct metrics_table_header *header =                      \
61 			(struct metrics_table_header *)tmp;                \
62 		memset(header, 0xFF, sizeof(*tmp));                        \
63 		header->format_revision = fr;                              \
64 		header->content_revision = cr;                             \
65 		header->structure_size = sizeof(*tmp);                     \
66 	} while (0)
67 
68 extern const int link_speed[];
69 
70 /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
pcie_gen_to_speed(uint32_t gen)71 static inline int pcie_gen_to_speed(uint32_t gen)
72 {
73 	return ((gen == 0) ? link_speed[0] : link_speed[gen - 1]);
74 }
75 
76 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
77 				     uint16_t msg_index,
78 				     uint32_t param);
79 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
80 				    enum smu_message_type msg,
81 				    uint32_t param,
82 				    uint32_t *read_arg);
83 
84 int smu_cmn_send_smc_msg(struct smu_context *smu,
85 			 enum smu_message_type msg,
86 			 uint32_t *read_arg);
87 
88 int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
89 			 uint32_t msg);
90 
91 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
92 			 uint32_t msg, uint32_t param);
93 
94 int smu_cmn_wait_for_response(struct smu_context *smu);
95 
96 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
97 				   enum smu_cmn2asic_mapping_type type,
98 				   uint32_t index);
99 
100 int smu_cmn_feature_is_supported(struct smu_context *smu,
101 				 enum smu_feature_mask mask);
102 
103 int smu_cmn_feature_is_enabled(struct smu_context *smu,
104 			       enum smu_feature_mask mask);
105 
106 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
107 				enum smu_clk_type clk_type);
108 
109 int smu_cmn_get_enabled_mask(struct smu_context *smu,
110 			     uint64_t *feature_mask);
111 
112 uint64_t smu_cmn_get_indep_throttler_status(
113 					const unsigned long dep_status,
114 					const uint8_t *throttler_map);
115 
116 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
117 					uint64_t feature_mask,
118 					bool enabled);
119 
120 int smu_cmn_feature_set_enabled(struct smu_context *smu,
121 				enum smu_feature_mask mask,
122 				bool enable);
123 
124 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
125 				   char *buf);
126 
127 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
128 				uint64_t new_mask);
129 
130 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
131 						enum smu_feature_mask mask);
132 
133 int smu_cmn_get_smc_version(struct smu_context *smu,
134 			    uint32_t *if_version,
135 			    uint32_t *smu_version);
136 
137 int smu_cmn_update_table(struct smu_context *smu,
138 			 enum smu_table_id table_index,
139 			 int argument,
140 			 void *table_data,
141 			 bool drv2smu);
142 
143 int smu_cmn_write_watermarks_table(struct smu_context *smu);
144 
145 int smu_cmn_write_pptable(struct smu_context *smu);
146 
147 int smu_cmn_get_metrics_table(struct smu_context *smu,
148 			      void *metrics_table,
149 			      bool bypass_cache);
150 
151 int smu_cmn_get_combo_pptable(struct smu_context *smu);
152 
153 int smu_cmn_set_mp1_state(struct smu_context *smu,
154 			  enum pp_mp1_state mp1_state);
155 
156 /*
157  * Helper function to make sysfs_emit_at() happy. Align buf to
158  * the current page boundary and record the offset.
159  */
smu_cmn_get_sysfs_buf(char ** buf,int * offset)160 static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
161 {
162 	if (!*buf || !offset)
163 		return;
164 
165 	*offset = offset_in_page(*buf);
166 	*buf -= *offset;
167 }
168 
169 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
170 void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy);
171 void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy);
172 
173 void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
174 				       u32 workload_mask,
175 				       u32 *backend_workload_mask);
176 
177 #endif
178 #endif
179