1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #ifndef __SMU_CMN_H__
24 #define __SMU_CMN_H__
25
26 #include "amdgpu_smu.h"
27
28 #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
29
30 #define FDO_PWM_MODE_STATIC 1
31 #define FDO_PWM_MODE_STATIC_RPM 5
32
33 #define SMU_IH_INTERRUPT_ID_TO_DRIVER 0xFE
34 #define SMU_IH_INTERRUPT_CONTEXT_ID_BACO 0x2
35 #define SMU_IH_INTERRUPT_CONTEXT_ID_AC 0x3
36 #define SMU_IH_INTERRUPT_CONTEXT_ID_DC 0x4
37 #define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
38 #define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
39 #define SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
40 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
41 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
42
43 #define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
44 do { \
45 typecheck(struct gpu_metrics_v##frev##_##crev, \
46 typeof(*(ptr))); \
47 struct metrics_table_header *header = \
48 (struct metrics_table_header *)(ptr); \
49 memset(header, 0xFF, sizeof(*(ptr))); \
50 header->format_revision = frev; \
51 header->content_revision = crev; \
52 header->structure_size = sizeof(*(ptr)); \
53 } while (0)
54
55 #define smu_cmn_init_partition_metrics(ptr, frev, crev) \
56 do { \
57 typecheck(struct amdgpu_partition_metrics_v##frev##_##crev, \
58 typeof(*(ptr))); \
59 struct metrics_table_header *header = \
60 (struct metrics_table_header *)(ptr); \
61 memset(header, 0xFF, sizeof(*(ptr))); \
62 header->format_revision = frev; \
63 header->content_revision = crev; \
64 header->structure_size = sizeof(*(ptr)); \
65 } while (0)
66
67 extern const int link_speed[];
68
69 /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
pcie_gen_to_speed(uint32_t gen)70 static inline int pcie_gen_to_speed(uint32_t gen)
71 {
72 return ((gen == 0) ? link_speed[0] : link_speed[gen - 1]);
73 }
74
75 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
76 uint16_t msg_index,
77 uint32_t param);
78 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
79 enum smu_message_type msg,
80 uint32_t param,
81 uint32_t *read_arg);
82
83 int smu_cmn_send_smc_msg(struct smu_context *smu,
84 enum smu_message_type msg,
85 uint32_t *read_arg);
86
87 int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
88 uint32_t msg);
89
90 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
91 uint32_t msg, uint32_t param);
92
93 int smu_cmn_wait_for_response(struct smu_context *smu);
94
95 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
96 enum smu_cmn2asic_mapping_type type,
97 uint32_t index);
98
99 int smu_cmn_feature_is_supported(struct smu_context *smu,
100 enum smu_feature_mask mask);
101
102 int smu_cmn_feature_is_enabled(struct smu_context *smu,
103 enum smu_feature_mask mask);
104
105 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
106 enum smu_clk_type clk_type);
107
108 int smu_cmn_get_enabled_mask(struct smu_context *smu,
109 uint64_t *feature_mask);
110
111 uint64_t smu_cmn_get_indep_throttler_status(
112 const unsigned long dep_status,
113 const uint8_t *throttler_map);
114
115 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
116 uint64_t feature_mask,
117 bool enabled);
118
119 int smu_cmn_feature_set_enabled(struct smu_context *smu,
120 enum smu_feature_mask mask,
121 bool enable);
122
123 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
124 char *buf);
125
126 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
127 uint64_t new_mask);
128
129 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
130 enum smu_feature_mask mask);
131
132 int smu_cmn_get_smc_version(struct smu_context *smu,
133 uint32_t *if_version,
134 uint32_t *smu_version);
135
136 int smu_cmn_update_table(struct smu_context *smu,
137 enum smu_table_id table_index,
138 int argument,
139 void *table_data,
140 bool drv2smu);
141
142 int smu_cmn_write_watermarks_table(struct smu_context *smu);
143
144 int smu_cmn_write_pptable(struct smu_context *smu);
145
146 int smu_cmn_get_metrics_table(struct smu_context *smu,
147 void *metrics_table,
148 bool bypass_cache);
149
150 int smu_cmn_get_combo_pptable(struct smu_context *smu);
151
152 int smu_cmn_set_mp1_state(struct smu_context *smu,
153 enum pp_mp1_state mp1_state);
154
155 /*
156 * Helper function to make sysfs_emit_at() happy. Align buf to
157 * the current page boundary and record the offset.
158 */
smu_cmn_get_sysfs_buf(char ** buf,int * offset)159 static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
160 {
161 if (!*buf || !offset)
162 return;
163
164 *offset = offset_in_page(*buf);
165 *buf -= *offset;
166 }
167
168 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
169 void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy);
170 void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy);
171
172 void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
173 u32 workload_mask,
174 u32 *backend_workload_mask);
175
176 #endif
177 #endif
178