1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #ifndef __SMU_CMN_H__
24 #define __SMU_CMN_H__
25
26 #include "amdgpu_smu.h"
27
28 #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
29
30 #define FDO_PWM_MODE_STATIC 1
31 #define FDO_PWM_MODE_STATIC_RPM 5
32
33 #define SMU_IH_INTERRUPT_ID_TO_DRIVER 0xFE
34 #define SMU_IH_INTERRUPT_CONTEXT_ID_BACO 0x2
35 #define SMU_IH_INTERRUPT_CONTEXT_ID_AC 0x3
36 #define SMU_IH_INTERRUPT_CONTEXT_ID_DC 0x4
37 #define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
38 #define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
39 #define SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
40 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
41 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
42
43 #define SMU_IGNORE_IF_VERSION 0xFFFFFFFF
44
45 #define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
46 do { \
47 typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \
48 struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \
49 struct metrics_table_header *header = \
50 (struct metrics_table_header *)tmp; \
51 memset(header, 0xFF, sizeof(*tmp)); \
52 header->format_revision = frev; \
53 header->content_revision = crev; \
54 header->structure_size = sizeof(*tmp); \
55 } while (0)
56
57 #define smu_cmn_init_partition_metrics(ptr, fr, cr) \
58 do { \
59 typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \
60 (ptr)); \
61 struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \
62 struct metrics_table_header *header = \
63 (struct metrics_table_header *)tmp; \
64 memset(header, 0xFF, sizeof(*tmp)); \
65 header->format_revision = fr; \
66 header->content_revision = cr; \
67 header->structure_size = sizeof(*tmp); \
68 } while (0)
69
70 #define smu_cmn_init_baseboard_temp_metrics(ptr, fr, cr) \
71 do { \
72 typecheck(struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *, \
73 (ptr)); \
74 struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
75 struct metrics_table_header *header = \
76 (struct metrics_table_header *)tmp; \
77 memset(header, 0xFF, sizeof(*tmp)); \
78 header->format_revision = fr; \
79 header->content_revision = cr; \
80 header->structure_size = sizeof(*tmp); \
81 } while (0)
82
83 #define smu_cmn_init_gpuboard_temp_metrics(ptr, fr, cr) \
84 do { \
85 typecheck(struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *, \
86 (ptr)); \
87 struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
88 struct metrics_table_header *header = \
89 (struct metrics_table_header *)tmp; \
90 memset(header, 0xFF, sizeof(*tmp)); \
91 header->format_revision = fr; \
92 header->content_revision = cr; \
93 header->structure_size = sizeof(*tmp); \
94 } while (0)
95
96 extern const int link_speed[];
97
98 /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
pcie_gen_to_speed(uint32_t gen)99 static inline int pcie_gen_to_speed(uint32_t gen)
100 {
101 return ((gen == 0) ? link_speed[0] : link_speed[gen - 1]);
102 }
103
104 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
105 uint16_t msg_index,
106 uint32_t param);
107 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
108 enum smu_message_type msg,
109 uint32_t param,
110 uint32_t *read_arg);
111
112 int smu_cmn_send_smc_msg(struct smu_context *smu,
113 enum smu_message_type msg,
114 uint32_t *read_arg);
115
116 int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
117 uint32_t msg);
118
119 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
120 uint32_t msg, uint32_t param);
121
122 int smu_cmn_wait_for_response(struct smu_context *smu);
123
124 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
125 enum smu_cmn2asic_mapping_type type,
126 uint32_t index);
127
128 int smu_cmn_feature_is_supported(struct smu_context *smu,
129 enum smu_feature_mask mask);
130
131 int smu_cmn_feature_is_enabled(struct smu_context *smu,
132 enum smu_feature_mask mask);
133
134 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
135 enum smu_clk_type clk_type);
136
137 int smu_cmn_get_enabled_mask(struct smu_context *smu,
138 uint64_t *feature_mask);
139
140 uint64_t smu_cmn_get_indep_throttler_status(
141 const unsigned long dep_status,
142 const uint8_t *throttler_map);
143
144 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
145 uint64_t feature_mask,
146 bool enabled);
147
148 int smu_cmn_feature_set_enabled(struct smu_context *smu,
149 enum smu_feature_mask mask,
150 bool enable);
151
152 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
153 char *buf);
154
155 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
156 uint64_t new_mask);
157
158 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
159 enum smu_feature_mask mask);
160
161 int smu_cmn_get_smc_version(struct smu_context *smu,
162 uint32_t *if_version,
163 uint32_t *smu_version);
164
165 int smu_cmn_update_table(struct smu_context *smu,
166 enum smu_table_id table_index,
167 int argument,
168 void *table_data,
169 bool drv2smu);
170
171 int smu_cmn_write_watermarks_table(struct smu_context *smu);
172
173 int smu_cmn_write_pptable(struct smu_context *smu);
174
175 int smu_cmn_get_metrics_table(struct smu_context *smu,
176 void *metrics_table,
177 bool bypass_cache);
178
179 int smu_cmn_get_combo_pptable(struct smu_context *smu);
180
181 int smu_cmn_set_mp1_state(struct smu_context *smu,
182 enum pp_mp1_state mp1_state);
183
184 /*
185 * Helper function to make sysfs_emit_at() happy. Align buf to
186 * the current page boundary and record the offset.
187 */
smu_cmn_get_sysfs_buf(char ** buf,int * offset)188 static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
189 {
190 if (!*buf || !offset)
191 return;
192
193 *offset = offset_in_page(*buf);
194 *buf -= *offset;
195 }
196
197 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
198 void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy);
199 void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy);
200
201 void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
202 u32 workload_mask,
203 u32 *backend_workload_mask);
204
205 #endif
206 #endif
207