xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c (revision 16280ded45fba1216d1d4c6acfc20c2d5b45ef50)
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/list.h>
25 #include "amdgpu.h"
26 #include "amdgpu_xgmi.h"
27 #include "amdgpu_ras.h"
28 #include "soc15.h"
29 #include "df/df_3_6_offset.h"
30 #include "xgmi/xgmi_4_0_0_smn.h"
31 #include "xgmi/xgmi_4_0_0_sh_mask.h"
32 #include "xgmi/xgmi_6_1_0_sh_mask.h"
33 #include "wafl/wafl2_4_0_0_smn.h"
34 #include "wafl/wafl2_4_0_0_sh_mask.h"
35 
36 #include "amdgpu_reset.h"
37 
38 #define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
39 #define smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK   0x11a00218
40 #define smnPCS_GOPX1_PCS_ERROR_STATUS    0x12200210
41 #define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK      0x12200218
42 
43 #define XGMI_STATE_DISABLE                      0xD1
44 #define XGMI_STATE_LS0                          0x81
45 #define XGMI_LINK_ACTIVE			1
46 #define XGMI_LINK_INACTIVE			0
47 
48 static DEFINE_MUTEX(xgmi_mutex);
49 
50 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE		4
51 
52 static LIST_HEAD(xgmi_hive_list);
53 
54 static const int xgmi_pcs_err_status_reg_vg20[] = {
55 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
56 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
57 };
58 
59 static const int wafl_pcs_err_status_reg_vg20[] = {
60 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
61 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
62 };
63 
64 static const int xgmi_pcs_err_status_reg_arct[] = {
65 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
66 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
67 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
68 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
69 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
70 	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
71 };
72 
73 /* same as vg20*/
74 static const int wafl_pcs_err_status_reg_arct[] = {
75 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
76 	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
77 };
78 
79 static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
80 	smnPCS_XGMI3X16_PCS_ERROR_STATUS,
81 	smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000,
82 	smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x200000,
83 	smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x300000,
84 	smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x400000,
85 	smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x500000,
86 	smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x600000,
87 	smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000
88 };
89 
90 static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
91 	smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK,
92 	smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000,
93 	smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x200000,
94 	smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x300000,
95 	smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x400000,
96 	smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x500000,
97 	smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x600000,
98 	smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x700000
99 };
100 
101 static const int walf_pcs_err_status_reg_aldebaran[] = {
102 	smnPCS_GOPX1_PCS_ERROR_STATUS,
103 	smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000
104 };
105 
106 static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
107 	smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK,
108 	smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
109 };
110 
111 static const int xgmi3x16_pcs_err_status_reg_v6_4[] = {
112 	smnPCS_XGMI3X16_PCS_ERROR_STATUS,
113 	smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000
114 };
115 
116 static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[] = {
117 	smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK,
118 	smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
119 };
120 
121 static const u64 xgmi_v6_4_0_mca_base_array[] = {
122 	0x11a09200,
123 	0x11b09200,
124 };
125 
126 static const char *xgmi_v6_4_0_ras_error_code_ext[32] = {
127 	[0x00] = "XGMI PCS DataLossErr",
128 	[0x01] = "XGMI PCS TrainingErr",
129 	[0x02] = "XGMI PCS FlowCtrlAckErr",
130 	[0x03] = "XGMI PCS RxFifoUnderflowErr",
131 	[0x04] = "XGMI PCS RxFifoOverflowErr",
132 	[0x05] = "XGMI PCS CRCErr",
133 	[0x06] = "XGMI PCS BERExceededErr",
134 	[0x07] = "XGMI PCS TxMetaDataErr",
135 	[0x08] = "XGMI PCS ReplayBufParityErr",
136 	[0x09] = "XGMI PCS DataParityErr",
137 	[0x0a] = "XGMI PCS ReplayFifoOverflowErr",
138 	[0x0b] = "XGMI PCS ReplayFifoUnderflowErr",
139 	[0x0c] = "XGMI PCS ElasticFifoOverflowErr",
140 	[0x0d] = "XGMI PCS DeskewErr",
141 	[0x0e] = "XGMI PCS FlowCtrlCRCErr",
142 	[0x0f] = "XGMI PCS DataStartupLimitErr",
143 	[0x10] = "XGMI PCS FCInitTimeoutErr",
144 	[0x11] = "XGMI PCS RecoveryTimeoutErr",
145 	[0x12] = "XGMI PCS ReadySerialTimeoutErr",
146 	[0x13] = "XGMI PCS ReadySerialAttemptErr",
147 	[0x14] = "XGMI PCS RecoveryAttemptErr",
148 	[0x15] = "XGMI PCS RecoveryRelockAttemptErr",
149 	[0x16] = "XGMI PCS ReplayAttemptErr",
150 	[0x17] = "XGMI PCS SyncHdrErr",
151 	[0x18] = "XGMI PCS TxReplayTimeoutErr",
152 	[0x19] = "XGMI PCS RxReplayTimeoutErr",
153 	[0x1a] = "XGMI PCS LinkSubTxTimeoutErr",
154 	[0x1b] = "XGMI PCS LinkSubRxTimeoutErr",
155 	[0x1c] = "XGMI PCS RxCMDPktErr",
156 };
157 
158 static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
159 	{"XGMI PCS DataLossErr",
160 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
161 	{"XGMI PCS TrainingErr",
162 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
163 	{"XGMI PCS CRCErr",
164 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
165 	{"XGMI PCS BERExceededErr",
166 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
167 	{"XGMI PCS TxMetaDataErr",
168 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
169 	{"XGMI PCS ReplayBufParityErr",
170 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
171 	{"XGMI PCS DataParityErr",
172 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
173 	{"XGMI PCS ReplayFifoOverflowErr",
174 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
175 	{"XGMI PCS ReplayFifoUnderflowErr",
176 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
177 	{"XGMI PCS ElasticFifoOverflowErr",
178 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
179 	{"XGMI PCS DeskewErr",
180 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
181 	{"XGMI PCS DataStartupLimitErr",
182 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
183 	{"XGMI PCS FCInitTimeoutErr",
184 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
185 	{"XGMI PCS RecoveryTimeoutErr",
186 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
187 	{"XGMI PCS ReadySerialTimeoutErr",
188 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
189 	{"XGMI PCS ReadySerialAttemptErr",
190 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
191 	{"XGMI PCS RecoveryAttemptErr",
192 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
193 	{"XGMI PCS RecoveryRelockAttemptErr",
194 	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
195 };
196 
197 static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
198 	{"WAFL PCS DataLossErr",
199 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
200 	{"WAFL PCS TrainingErr",
201 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
202 	{"WAFL PCS CRCErr",
203 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
204 	{"WAFL PCS BERExceededErr",
205 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
206 	{"WAFL PCS TxMetaDataErr",
207 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
208 	{"WAFL PCS ReplayBufParityErr",
209 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
210 	{"WAFL PCS DataParityErr",
211 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
212 	{"WAFL PCS ReplayFifoOverflowErr",
213 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
214 	{"WAFL PCS ReplayFifoUnderflowErr",
215 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
216 	{"WAFL PCS ElasticFifoOverflowErr",
217 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
218 	{"WAFL PCS DeskewErr",
219 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
220 	{"WAFL PCS DataStartupLimitErr",
221 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
222 	{"WAFL PCS FCInitTimeoutErr",
223 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
224 	{"WAFL PCS RecoveryTimeoutErr",
225 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
226 	{"WAFL PCS ReadySerialTimeoutErr",
227 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
228 	{"WAFL PCS ReadySerialAttemptErr",
229 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
230 	{"WAFL PCS RecoveryAttemptErr",
231 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
232 	{"WAFL PCS RecoveryRelockAttemptErr",
233 	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
234 };
235 
236 static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
237 	{"XGMI3X16 PCS DataLossErr",
238 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataLossErr)},
239 	{"XGMI3X16 PCS TrainingErr",
240 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TrainingErr)},
241 	{"XGMI3X16 PCS FlowCtrlAckErr",
242 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlAckErr)},
243 	{"XGMI3X16 PCS RxFifoUnderflowErr",
244 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoUnderflowErr)},
245 	{"XGMI3X16 PCS RxFifoOverflowErr",
246 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoOverflowErr)},
247 	{"XGMI3X16 PCS CRCErr",
248 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, CRCErr)},
249 	{"XGMI3X16 PCS BERExceededErr",
250 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, BERExceededErr)},
251 	{"XGMI3X16 PCS TxVcidDataErr",
252 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxVcidDataErr)},
253 	{"XGMI3X16 PCS ReplayBufParityErr",
254 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayBufParityErr)},
255 	{"XGMI3X16 PCS DataParityErr",
256 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataParityErr)},
257 	{"XGMI3X16 PCS ReplayFifoOverflowErr",
258 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
259 	{"XGMI3X16 PCS ReplayFifoUnderflowErr",
260 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
261 	{"XGMI3X16 PCS ElasticFifoOverflowErr",
262 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
263 	{"XGMI3X16 PCS DeskewErr",
264 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DeskewErr)},
265 	{"XGMI3X16 PCS FlowCtrlCRCErr",
266 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlCRCErr)},
267 	{"XGMI3X16 PCS DataStartupLimitErr",
268 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataStartupLimitErr)},
269 	{"XGMI3X16 PCS FCInitTimeoutErr",
270 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
271 	{"XGMI3X16 PCS RecoveryTimeoutErr",
272 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
273 	{"XGMI3X16 PCS ReadySerialTimeoutErr",
274 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
275 	{"XGMI3X16 PCS ReadySerialAttemptErr",
276 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
277 	{"XGMI3X16 PCS RecoveryAttemptErr",
278 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
279 	{"XGMI3X16 PCS RecoveryRelockAttemptErr",
280 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
281 	{"XGMI3X16 PCS ReplayAttemptErr",
282 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayAttemptErr)},
283 	{"XGMI3X16 PCS SyncHdrErr",
284 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, SyncHdrErr)},
285 	{"XGMI3X16 PCS TxReplayTimeoutErr",
286 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxReplayTimeoutErr)},
287 	{"XGMI3X16 PCS RxReplayTimeoutErr",
288 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxReplayTimeoutErr)},
289 	{"XGMI3X16 PCS LinkSubTxTimeoutErr",
290 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubTxTimeoutErr)},
291 	{"XGMI3X16 PCS LinkSubRxTimeoutErr",
292 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubRxTimeoutErr)},
293 	{"XGMI3X16 PCS RxCMDPktErr",
294 	 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
295 };
296 
297 static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num)
298 {
299 	const u32 smnpcs_xgmi3x16_pcs_state_hist1 = 0x11a00070;
300 	const int xgmi_inst = 2;
301 	u32 link_inst;
302 	u64 addr;
303 
304 	link_inst = global_link_num % xgmi_inst;
305 
306 	addr = (smnpcs_xgmi3x16_pcs_state_hist1 | (link_inst << 20)) +
307 		adev->asic_funcs->encode_ext_smn_addressing(global_link_num / xgmi_inst);
308 
309 	return RREG32_PCIE_EXT(addr);
310 }
311 
312 int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num)
313 {
314 	u32 xgmi_state_reg_val;
315 
316 	switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
317 	case IP_VERSION(6, 4, 0):
318 	case IP_VERSION(6, 4, 1):
319 		xgmi_state_reg_val = xgmi_v6_4_get_link_status(adev, global_link_num);
320 		break;
321 	default:
322 		return -EOPNOTSUPP;
323 	}
324 
325 	if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_DISABLE)
326 		return -ENOLINK;
327 
328 	if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_LS0)
329 		return XGMI_LINK_ACTIVE;
330 
331 	return XGMI_LINK_INACTIVE;
332 }
333 
334 /**
335  * DOC: AMDGPU XGMI Support
336  *
337  * XGMI is a high speed interconnect that joins multiple GPU cards
338  * into a homogeneous memory space that is organized by a collective
339  * hive ID and individual node IDs, both of which are 64-bit numbers.
340  *
341  * The file xgmi_device_id contains the unique per GPU device ID and
342  * is stored in the /sys/class/drm/card${cardno}/device/ directory.
343  *
344  * Inside the device directory a sub-directory 'xgmi_hive_info' is
345  * created which contains the hive ID and the list of nodes.
346  *
347  * The hive ID is stored in:
348  *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
349  *
350  * The node information is stored in numbered directories:
351  *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
352  *
353  * Each device has their own xgmi_hive_info direction with a mirror
354  * set of node sub-directories.
355  *
356  * The XGMI memory space is built by contiguously adding the power of
357  * two padded VRAM space from each node to each other.
358  *
359  */
360 
361 static struct attribute amdgpu_xgmi_hive_id = {
362 	.name = "xgmi_hive_id",
363 	.mode = S_IRUGO
364 };
365 
366 static struct attribute *amdgpu_xgmi_hive_attrs[] = {
367 	&amdgpu_xgmi_hive_id,
368 	NULL
369 };
370 ATTRIBUTE_GROUPS(amdgpu_xgmi_hive);
371 
372 static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
373 	struct attribute *attr, char *buf)
374 {
375 	struct amdgpu_hive_info *hive = container_of(
376 		kobj, struct amdgpu_hive_info, kobj);
377 
378 	if (attr == &amdgpu_xgmi_hive_id)
379 		return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
380 
381 	return 0;
382 }
383 
384 static void amdgpu_xgmi_hive_release(struct kobject *kobj)
385 {
386 	struct amdgpu_hive_info *hive = container_of(
387 		kobj, struct amdgpu_hive_info, kobj);
388 
389 	amdgpu_reset_put_reset_domain(hive->reset_domain);
390 	hive->reset_domain = NULL;
391 
392 	mutex_destroy(&hive->hive_lock);
393 	kfree(hive);
394 }
395 
396 static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
397 	.show = amdgpu_xgmi_show_attrs,
398 };
399 
400 static const struct kobj_type amdgpu_xgmi_hive_type = {
401 	.release = amdgpu_xgmi_hive_release,
402 	.sysfs_ops = &amdgpu_xgmi_hive_ops,
403 	.default_groups = amdgpu_xgmi_hive_groups,
404 };
405 
406 static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
407 				     struct device_attribute *attr,
408 				     char *buf)
409 {
410 	struct drm_device *ddev = dev_get_drvdata(dev);
411 	struct amdgpu_device *adev = drm_to_adev(ddev);
412 
413 	return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);
414 
415 }
416 
417 static ssize_t amdgpu_xgmi_show_physical_id(struct device *dev,
418 				     struct device_attribute *attr,
419 				     char *buf)
420 {
421 	struct drm_device *ddev = dev_get_drvdata(dev);
422 	struct amdgpu_device *adev = drm_to_adev(ddev);
423 
424 	return sysfs_emit(buf, "%u\n", adev->gmc.xgmi.physical_node_id);
425 
426 }
427 
428 static ssize_t amdgpu_xgmi_show_num_hops(struct device *dev,
429 					struct device_attribute *attr,
430 					char *buf)
431 {
432 	struct drm_device *ddev = dev_get_drvdata(dev);
433 	struct amdgpu_device *adev = drm_to_adev(ddev);
434 	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
435 	int i;
436 
437 	for (i = 0; i < top->num_nodes; i++)
438 		sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_hops);
439 
440 	return sysfs_emit(buf, "%s\n", buf);
441 }
442 
443 static ssize_t amdgpu_xgmi_show_num_links(struct device *dev,
444 					struct device_attribute *attr,
445 					char *buf)
446 {
447 	struct drm_device *ddev = dev_get_drvdata(dev);
448 	struct amdgpu_device *adev = drm_to_adev(ddev);
449 	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
450 	int i;
451 
452 	for (i = 0; i < top->num_nodes; i++)
453 		sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_links);
454 
455 	return sysfs_emit(buf, "%s\n", buf);
456 }
457 
458 static ssize_t amdgpu_xgmi_show_connected_port_num(struct device *dev,
459 					struct device_attribute *attr,
460 					char *buf)
461 {
462 	struct drm_device *ddev = dev_get_drvdata(dev);
463 	struct amdgpu_device *adev = drm_to_adev(ddev);
464 	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
465 	int i, j, size = 0;
466 	int current_node;
467 	/*
468 	 * get the node id in the sysfs for the current socket and show
469 	 * it in the port num info output in the sysfs for easy reading.
470 	 * it is NOT the one retrieved from xgmi ta.
471 	 */
472 	for (i = 0; i < top->num_nodes; i++) {
473 		if (top->nodes[i].node_id == adev->gmc.xgmi.node_id) {
474 			current_node = i;
475 			break;
476 		}
477 	}
478 
479 	if (i == top->num_nodes)
480 		return -EINVAL;
481 
482 	for (i = 0; i < top->num_nodes; i++) {
483 		for (j = 0; j < top->nodes[i].num_links; j++)
484 			/* node id in sysfs starts from 1 rather than 0 so +1 here */
485 			size += sysfs_emit_at(buf, size, "%02x:%02x ->  %02x:%02x\n", current_node + 1,
486 					      top->nodes[i].port_num[j].src_xgmi_port_num, i + 1,
487 					      top->nodes[i].port_num[j].dst_xgmi_port_num);
488 	}
489 
490 	return size;
491 }
492 
493 #define AMDGPU_XGMI_SET_FICAA(o)	((o) | 0x456801)
494 static ssize_t amdgpu_xgmi_show_error(struct device *dev,
495 				      struct device_attribute *attr,
496 				      char *buf)
497 {
498 	struct drm_device *ddev = dev_get_drvdata(dev);
499 	struct amdgpu_device *adev = drm_to_adev(ddev);
500 	uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
501 	uint64_t fica_out;
502 	unsigned int error_count = 0;
503 
504 	ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
505 	ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
506 
507 	if ((!adev->df.funcs) ||
508 	    (!adev->df.funcs->get_fica) ||
509 	    (!adev->df.funcs->set_fica))
510 		return -EINVAL;
511 
512 	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
513 	if (fica_out != 0x1f)
514 		pr_err("xGMI error counters not enabled!\n");
515 
516 	fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
517 
518 	if ((fica_out & 0xffff) == 2)
519 		error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
520 
521 	adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
522 
523 	return sysfs_emit(buf, "%u\n", error_count);
524 }
525 
526 
527 static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
528 static DEVICE_ATTR(xgmi_physical_id, 0444, amdgpu_xgmi_show_physical_id, NULL);
529 static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
530 static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL);
531 static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL);
532 static DEVICE_ATTR(xgmi_port_num, S_IRUGO, amdgpu_xgmi_show_connected_port_num, NULL);
533 
534 static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
535 					 struct amdgpu_hive_info *hive)
536 {
537 	int ret = 0;
538 	char node[10] = { 0 };
539 
540 	/* Create xgmi device id file */
541 	ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
542 	if (ret) {
543 		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
544 		return ret;
545 	}
546 
547 	ret = device_create_file(adev->dev, &dev_attr_xgmi_physical_id);
548 	if (ret) {
549 		dev_err(adev->dev, "XGMI: Failed to create device file xgmi_physical_id\n");
550 		return ret;
551 	}
552 
553 	/* Create xgmi error file */
554 	ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
555 	if (ret)
556 		pr_err("failed to create xgmi_error\n");
557 
558 	/* Create xgmi num hops file */
559 	ret = device_create_file(adev->dev, &dev_attr_xgmi_num_hops);
560 	if (ret)
561 		pr_err("failed to create xgmi_num_hops\n");
562 
563 	/* Create xgmi num links file */
564 	ret = device_create_file(adev->dev, &dev_attr_xgmi_num_links);
565 	if (ret)
566 		pr_err("failed to create xgmi_num_links\n");
567 
568 	/* Create xgmi port num file if supported */
569 	if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) {
570 		ret = device_create_file(adev->dev, &dev_attr_xgmi_port_num);
571 		if (ret)
572 			dev_err(adev->dev, "failed to create xgmi_port_num\n");
573 	}
574 
575 	/* Create sysfs link to hive info folder on the first device */
576 	if (hive->kobj.parent != (&adev->dev->kobj)) {
577 		ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
578 					"xgmi_hive_info");
579 		if (ret) {
580 			dev_err(adev->dev, "XGMI: Failed to create link to hive info");
581 			goto remove_file;
582 		}
583 	}
584 
585 	sprintf(node, "node%d", atomic_read(&hive->number_devices));
586 	/* Create sysfs link form the hive folder to yourself */
587 	ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
588 	if (ret) {
589 		dev_err(adev->dev, "XGMI: Failed to create link from hive info");
590 		goto remove_link;
591 	}
592 
593 	goto success;
594 
595 
596 remove_link:
597 	sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
598 
599 remove_file:
600 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
601 	device_remove_file(adev->dev, &dev_attr_xgmi_physical_id);
602 	device_remove_file(adev->dev, &dev_attr_xgmi_error);
603 	device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
604 	device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
605 	if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG)
606 		device_remove_file(adev->dev, &dev_attr_xgmi_port_num);
607 
608 success:
609 	return ret;
610 }
611 
612 static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
613 					  struct amdgpu_hive_info *hive)
614 {
615 	char node[10];
616 	memset(node, 0, sizeof(node));
617 
618 	device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
619 	device_remove_file(adev->dev, &dev_attr_xgmi_physical_id);
620 	device_remove_file(adev->dev, &dev_attr_xgmi_error);
621 	device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
622 	device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
623 	if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG)
624 		device_remove_file(adev->dev, &dev_attr_xgmi_port_num);
625 
626 	if (hive->kobj.parent != (&adev->dev->kobj))
627 		sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
628 
629 	sprintf(node, "node%d", atomic_read(&hive->number_devices));
630 	sysfs_remove_link(&hive->kobj, node);
631 
632 }
633 
634 
635 
636 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
637 {
638 	struct amdgpu_hive_info *hive = NULL;
639 	int ret;
640 
641 	if (!adev->gmc.xgmi.hive_id)
642 		return NULL;
643 
644 	if (adev->hive) {
645 		kobject_get(&adev->hive->kobj);
646 		return adev->hive;
647 	}
648 
649 	mutex_lock(&xgmi_mutex);
650 
651 	list_for_each_entry(hive, &xgmi_hive_list, node)  {
652 		if (hive->hive_id == adev->gmc.xgmi.hive_id)
653 			goto pro_end;
654 	}
655 
656 	hive = kzalloc(sizeof(*hive), GFP_KERNEL);
657 	if (!hive) {
658 		dev_err(adev->dev, "XGMI: allocation failed\n");
659 		ret = -ENOMEM;
660 		hive = NULL;
661 		goto pro_end;
662 	}
663 
664 	/* initialize new hive if not exist */
665 	ret = kobject_init_and_add(&hive->kobj,
666 			&amdgpu_xgmi_hive_type,
667 			&adev->dev->kobj,
668 			"%s", "xgmi_hive_info");
669 	if (ret) {
670 		dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
671 		kobject_put(&hive->kobj);
672 		hive = NULL;
673 		goto pro_end;
674 	}
675 
676 	/**
677 	 * Only init hive->reset_domain for none SRIOV configuration. For SRIOV,
678 	 * Host driver decide how to reset the GPU either through FLR or chain reset.
679 	 * Guest side will get individual notifications from the host for the FLR
680 	 * if necessary.
681 	 */
682 	if (!amdgpu_sriov_vf(adev)) {
683 	/**
684 	 * Avoid recreating reset domain when hive is reconstructed for the case
685 	 * of reset the devices in the XGMI hive during probe for passthrough GPU
686 	 * See https://www.spinics.net/lists/amd-gfx/msg58836.html
687 	 */
688 		if (adev->reset_domain->type != XGMI_HIVE) {
689 			hive->reset_domain =
690 				amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
691 			if (!hive->reset_domain) {
692 				dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
693 				ret = -ENOMEM;
694 				kobject_put(&hive->kobj);
695 				hive = NULL;
696 				goto pro_end;
697 			}
698 		} else {
699 			amdgpu_reset_get_reset_domain(adev->reset_domain);
700 			hive->reset_domain = adev->reset_domain;
701 		}
702 	}
703 
704 	hive->hive_id = adev->gmc.xgmi.hive_id;
705 	INIT_LIST_HEAD(&hive->device_list);
706 	INIT_LIST_HEAD(&hive->node);
707 	mutex_init(&hive->hive_lock);
708 	atomic_set(&hive->number_devices, 0);
709 	task_barrier_init(&hive->tb);
710 	hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
711 	hive->hi_req_gpu = NULL;
712 	atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE);
713 
714 	/*
715 	 * hive pstate on boot is high in vega20 so we have to go to low
716 	 * pstate on after boot.
717 	 */
718 	hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
719 	list_add_tail(&hive->node, &xgmi_hive_list);
720 
721 pro_end:
722 	if (hive)
723 		kobject_get(&hive->kobj);
724 	mutex_unlock(&xgmi_mutex);
725 	return hive;
726 }
727 
728 void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
729 {
730 	if (hive)
731 		kobject_put(&hive->kobj);
732 }
733 
734 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
735 {
736 	int ret = 0;
737 	struct amdgpu_hive_info *hive;
738 	struct amdgpu_device *request_adev;
739 	bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
740 	bool init_low;
741 
742 	hive = amdgpu_get_xgmi_hive(adev);
743 	if (!hive)
744 		return 0;
745 
746 	request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev;
747 	init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
748 	amdgpu_put_xgmi_hive(hive);
749 	/* fw bug so temporarily disable pstate switching */
750 	return 0;
751 
752 	if (!hive || adev->asic_type != CHIP_VEGA20)
753 		return 0;
754 
755 	mutex_lock(&hive->hive_lock);
756 
757 	if (is_hi_req)
758 		hive->hi_req_count++;
759 	else
760 		hive->hi_req_count--;
761 
762 	/*
763 	 * Vega20 only needs single peer to request pstate high for the hive to
764 	 * go high but all peers must request pstate low for the hive to go low
765 	 */
766 	if (hive->pstate == pstate ||
767 			(!is_hi_req && hive->hi_req_count && !init_low))
768 		goto out;
769 
770 	dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
771 
772 	ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
773 	if (ret) {
774 		dev_err(request_adev->dev,
775 			"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
776 			request_adev->gmc.xgmi.node_id,
777 			request_adev->gmc.xgmi.hive_id, ret);
778 		goto out;
779 	}
780 
781 	if (init_low)
782 		hive->pstate = hive->hi_req_count ?
783 					hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
784 	else {
785 		hive->pstate = pstate;
786 		hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
787 							adev : NULL;
788 	}
789 out:
790 	mutex_unlock(&hive->hive_lock);
791 	return ret;
792 }
793 
794 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
795 {
796 	int ret;
797 
798 	if (amdgpu_sriov_vf(adev))
799 		return 0;
800 
801 	/* Each psp need to set the latest topology */
802 	ret = psp_xgmi_set_topology_info(&adev->psp,
803 					 atomic_read(&hive->number_devices),
804 					 &adev->psp.xgmi_context.top_info);
805 	if (ret)
806 		dev_err(adev->dev,
807 			"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
808 			adev->gmc.xgmi.node_id,
809 			adev->gmc.xgmi.hive_id, ret);
810 
811 	return ret;
812 }
813 
814 
815 /*
816  * NOTE psp_xgmi_node_info.num_hops layout is as follows:
817  * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
818  * num_hops[5:3] = reserved
819  * num_hops[2:0] = number of hops
820  */
821 int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
822 			       struct amdgpu_device *peer_adev)
823 {
824 	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
825 	uint8_t num_hops_mask = 0x7;
826 	int i;
827 
828 	if (!adev->gmc.xgmi.supported)
829 		return 0;
830 
831 	for (i = 0 ; i < top->num_nodes; ++i)
832 		if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
833 			return top->nodes[i].num_hops & num_hops_mask;
834 
835 	dev_err(adev->dev, "Failed to get xgmi hops count for peer %d.\n",
836 		peer_adev->gmc.xgmi.physical_node_id);
837 
838 	return 0;
839 }
840 
841 int amdgpu_xgmi_get_bandwidth(struct amdgpu_device *adev, struct amdgpu_device *peer_adev,
842 			      enum amdgpu_xgmi_bw_mode bw_mode, enum amdgpu_xgmi_bw_unit bw_unit,
843 			      uint32_t *min_bw, uint32_t *max_bw)
844 {
845 	bool peer_mode = bw_mode == AMDGPU_XGMI_BW_MODE_PER_PEER;
846 	int unit_scale = bw_unit == AMDGPU_XGMI_BW_UNIT_MBYTES ? 1000 : 1;
847 	int speed = 25, num_lanes = 16, num_links = !peer_mode ? 1 : -1;
848 
849 	if (!(min_bw && max_bw))
850 		return -EINVAL;
851 
852 	*min_bw = 0;
853 	*max_bw = 0;
854 
855 	if (!adev->gmc.xgmi.supported)
856 		return -ENODATA;
857 
858 	if (peer_mode && !peer_adev)
859 		return -EINVAL;
860 
861 	if (peer_mode) {
862 		struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
863 		int i;
864 
865 		for (i = 0 ; i < top->num_nodes; ++i) {
866 			if (top->nodes[i].node_id != peer_adev->gmc.xgmi.node_id)
867 				continue;
868 
869 			num_links =  top->nodes[i].num_links;
870 			break;
871 		}
872 	}
873 
874 	if (num_links == -1) {
875 		dev_err(adev->dev, "Failed to get number of xgmi links for peer %d.\n",
876 			peer_adev->gmc.xgmi.physical_node_id);
877 	} else if (num_links) {
878 		int per_link_bw = (speed * num_lanes * unit_scale)/BITS_PER_BYTE;
879 
880 		*min_bw = per_link_bw;
881 		*max_bw = num_links * per_link_bw;
882 	}
883 
884 	return 0;
885 }
886 
887 bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
888 					struct amdgpu_device *peer_adev)
889 {
890 	struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
891 	int i;
892 
893 	/* Sharing should always be enabled for non-SRIOV. */
894 	if (!amdgpu_sriov_vf(adev))
895 		return true;
896 
897 	for (i = 0 ; i < top->num_nodes; ++i)
898 		if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
899 			return !!top->nodes[i].is_sharing_enabled;
900 
901 	return false;
902 }
903 
904 /*
905  * Devices that support extended data require the entire hive to initialize with
906  * the shared memory buffer flag set.
907  *
908  * Hive locks and conditions apply - see amdgpu_xgmi_add_device
909  */
910 static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive,
911 							bool set_extended_data)
912 {
913 	struct amdgpu_device *tmp_adev;
914 	int ret;
915 
916 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
917 		ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false);
918 		if (ret) {
919 			dev_err(tmp_adev->dev,
920 				"XGMI: Failed to initialize xgmi session for data partition %i\n",
921 				set_extended_data);
922 			return ret;
923 		}
924 
925 	}
926 
927 	return 0;
928 }
929 
930 static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev,
931 	struct amdgpu_device *peer_adev)
932 {
933 	struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info;
934 	struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info;
935 
936 	for (int i = 0; i < peer_info->num_nodes; i++) {
937 		if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) {
938 			for (int j = 0; j < top_info->num_nodes; j++) {
939 				if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) {
940 					peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops;
941 					peer_info->nodes[i].is_sharing_enabled =
942 							top_info->nodes[j].is_sharing_enabled;
943 					peer_info->nodes[i].num_links =
944 							top_info->nodes[j].num_links;
945 					return;
946 				}
947 			}
948 		}
949 	}
950 }
951 
952 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
953 {
954 	struct psp_xgmi_topology_info *top_info;
955 	struct amdgpu_hive_info *hive;
956 	struct amdgpu_xgmi	*entry;
957 	struct amdgpu_device *tmp_adev = NULL;
958 
959 	int count = 0, ret = 0;
960 
961 	if (!adev->gmc.xgmi.supported)
962 		return 0;
963 
964 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
965 		ret = psp_xgmi_initialize(&adev->psp, false, true);
966 		if (ret) {
967 			dev_err(adev->dev,
968 				"XGMI: Failed to initialize xgmi session\n");
969 			return ret;
970 		}
971 
972 		ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
973 		if (ret) {
974 			dev_err(adev->dev,
975 				"XGMI: Failed to get hive id\n");
976 			return ret;
977 		}
978 
979 		ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
980 		if (ret) {
981 			dev_err(adev->dev,
982 				"XGMI: Failed to get node id\n");
983 			return ret;
984 		}
985 	} else {
986 		adev->gmc.xgmi.hive_id = 16;
987 		adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
988 	}
989 
990 	hive = amdgpu_get_xgmi_hive(adev);
991 	if (!hive) {
992 		ret = -EINVAL;
993 		dev_err(adev->dev,
994 			"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
995 			adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
996 		goto exit;
997 	}
998 	mutex_lock(&hive->hive_lock);
999 
1000 	top_info = &adev->psp.xgmi_context.top_info;
1001 
1002 	list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
1003 	list_for_each_entry(entry, &hive->device_list, head)
1004 		top_info->nodes[count++].node_id = entry->node_id;
1005 	top_info->num_nodes = count;
1006 	atomic_set(&hive->number_devices, count);
1007 
1008 	task_barrier_add_task(&hive->tb);
1009 
1010 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
1011 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
1012 			/* update node list for other device in the hive */
1013 			if (tmp_adev != adev) {
1014 				top_info = &tmp_adev->psp.xgmi_context.top_info;
1015 				top_info->nodes[count - 1].node_id =
1016 					adev->gmc.xgmi.node_id;
1017 				top_info->num_nodes = count;
1018 			}
1019 			ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
1020 			if (ret)
1021 				goto exit_unlock;
1022 		}
1023 
1024 		if (amdgpu_sriov_vf(adev) &&
1025 			adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) {
1026 			/* only get topology for VF being init if it can support full duplex */
1027 			ret = psp_xgmi_get_topology_info(&adev->psp, count,
1028 						&adev->psp.xgmi_context.top_info, false);
1029 			if (ret) {
1030 				dev_err(adev->dev,
1031 					"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
1032 					adev->gmc.xgmi.node_id,
1033 					adev->gmc.xgmi.hive_id, ret);
1034 				/* To do: continue with some node failed or disable the whole hive*/
1035 				goto exit_unlock;
1036 			}
1037 
1038 			/* fill the topology info for peers instead of getting from PSP */
1039 			list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
1040 				amdgpu_xgmi_fill_topology_info(adev, tmp_adev);
1041 			}
1042 		} else {
1043 			/* get latest topology info for each device from psp */
1044 			list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
1045 				ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
1046 					&tmp_adev->psp.xgmi_context.top_info, false);
1047 				if (ret) {
1048 					dev_err(tmp_adev->dev,
1049 						"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
1050 						tmp_adev->gmc.xgmi.node_id,
1051 						tmp_adev->gmc.xgmi.hive_id, ret);
1052 					/* To do : continue with some node failed or disable the whole hive */
1053 					goto exit_unlock;
1054 				}
1055 			}
1056 		}
1057 
1058 		/* get topology again for hives that support extended data */
1059 		if (adev->psp.xgmi_context.supports_extended_data) {
1060 
1061 			/* initialize the hive to get extended data.  */
1062 			ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true);
1063 			if (ret)
1064 				goto exit_unlock;
1065 
1066 			/* get the extended data. */
1067 			list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
1068 				ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
1069 						&tmp_adev->psp.xgmi_context.top_info, true);
1070 				if (ret) {
1071 					dev_err(tmp_adev->dev,
1072 						"XGMI: Get topology for extended data failure on device %llx, hive %llx, ret %d",
1073 						tmp_adev->gmc.xgmi.node_id,
1074 						tmp_adev->gmc.xgmi.hive_id, ret);
1075 					goto exit_unlock;
1076 				}
1077 			}
1078 
1079 			/* initialize the hive to get non-extended data for the next round. */
1080 			ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false);
1081 			if (ret)
1082 				goto exit_unlock;
1083 
1084 		}
1085 	}
1086 
1087 	if (!ret)
1088 		ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
1089 
1090 exit_unlock:
1091 	mutex_unlock(&hive->hive_lock);
1092 exit:
1093 	if (!ret) {
1094 		adev->hive = hive;
1095 		dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
1096 			 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
1097 	} else {
1098 		amdgpu_put_xgmi_hive(hive);
1099 		dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
1100 			adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
1101 			ret);
1102 	}
1103 
1104 	return ret;
1105 }
1106 
1107 int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
1108 {
1109 	struct amdgpu_hive_info *hive = adev->hive;
1110 
1111 	if (!adev->gmc.xgmi.supported)
1112 		return -EINVAL;
1113 
1114 	if (!hive)
1115 		return -EINVAL;
1116 
1117 	mutex_lock(&hive->hive_lock);
1118 	task_barrier_rem_task(&hive->tb);
1119 	amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
1120 	if (hive->hi_req_gpu == adev)
1121 		hive->hi_req_gpu = NULL;
1122 	list_del(&adev->gmc.xgmi.head);
1123 	mutex_unlock(&hive->hive_lock);
1124 
1125 	amdgpu_put_xgmi_hive(hive);
1126 	adev->hive = NULL;
1127 
1128 	if (atomic_dec_return(&hive->number_devices) == 0) {
1129 		/* Remove the hive from global hive list */
1130 		mutex_lock(&xgmi_mutex);
1131 		list_del(&hive->node);
1132 		mutex_unlock(&xgmi_mutex);
1133 
1134 		amdgpu_put_xgmi_hive(hive);
1135 	}
1136 
1137 	return 0;
1138 }
1139 
1140 static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
1141 				       enum aca_smu_type type, void *data)
1142 {
1143 	struct amdgpu_device *adev = handle->adev;
1144 	struct aca_bank_info info;
1145 	const char *error_str;
1146 	u64 status, count;
1147 	int ret, ext_error_code;
1148 
1149 	ret = aca_bank_info_decode(bank, &info);
1150 	if (ret)
1151 		return ret;
1152 
1153 	status = bank->regs[ACA_REG_IDX_STATUS];
1154 	ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
1155 
1156 	error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
1157 		xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
1158 	if (error_str)
1159 		dev_info(adev->dev, "%s detected\n", error_str);
1160 
1161 	count = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]);
1162 
1163 	switch (type) {
1164 	case ACA_SMU_TYPE_UE:
1165 		if (ext_error_code != 0 && ext_error_code != 9)
1166 			count = 0ULL;
1167 
1168 		bank->aca_err_type = ACA_ERROR_TYPE_UE;
1169 		ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, count);
1170 		break;
1171 	case ACA_SMU_TYPE_CE:
1172 		count = ext_error_code == 6 ? count : 0ULL;
1173 		bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank);
1174 		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, count);
1175 		break;
1176 	default:
1177 		return -EINVAL;
1178 	}
1179 
1180 	return ret;
1181 }
1182 
1183 static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = {
1184 	.aca_bank_parser = xgmi_v6_4_0_aca_bank_parser,
1185 };
1186 
1187 static const struct aca_info xgmi_v6_4_0_aca_info = {
1188 	.hwip = ACA_HWIP_TYPE_PCS_XGMI,
1189 	.mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
1190 	.bank_ops = &xgmi_v6_4_0_aca_bank_ops,
1191 };
1192 
1193 static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1194 {
1195 	int r;
1196 
1197 	if (!adev->gmc.xgmi.supported ||
1198 	    adev->gmc.xgmi.num_physical_nodes == 0)
1199 		return 0;
1200 
1201 	amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
1202 
1203 	r = amdgpu_ras_block_late_init(adev, ras_block);
1204 	if (r)
1205 		return r;
1206 
1207 	switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
1208 	case IP_VERSION(6, 4, 0):
1209 	case IP_VERSION(6, 4, 1):
1210 		r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL,
1211 					&xgmi_v6_4_0_aca_info, NULL);
1212 		if (r)
1213 			goto late_fini;
1214 		break;
1215 	default:
1216 		break;
1217 	}
1218 
1219 	return 0;
1220 
1221 late_fini:
1222 	amdgpu_ras_block_late_fini(adev, ras_block);
1223 
1224 	return r;
1225 }
1226 
1227 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
1228 					   uint64_t addr)
1229 {
1230 	struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
1231 	return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
1232 }
1233 
1234 static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
1235 {
1236 	WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
1237 	WREG32_PCIE(pcs_status_reg, 0);
1238 }
1239 
1240 static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev)
1241 {
1242 	uint32_t i;
1243 
1244 	switch (adev->asic_type) {
1245 	case CHIP_ARCTURUS:
1246 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
1247 			pcs_clear_status(adev,
1248 					 xgmi_pcs_err_status_reg_arct[i]);
1249 		break;
1250 	case CHIP_VEGA20:
1251 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
1252 			pcs_clear_status(adev,
1253 					 xgmi_pcs_err_status_reg_vg20[i]);
1254 		break;
1255 	case CHIP_ALDEBARAN:
1256 		for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++)
1257 			pcs_clear_status(adev,
1258 					 xgmi3x16_pcs_err_status_reg_aldebaran[i]);
1259 		for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++)
1260 			pcs_clear_status(adev,
1261 					 walf_pcs_err_status_reg_aldebaran[i]);
1262 		break;
1263 	default:
1264 		break;
1265 	}
1266 
1267 	switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
1268 	case IP_VERSION(6, 4, 0):
1269 	case IP_VERSION(6, 4, 1):
1270 		for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++)
1271 			pcs_clear_status(adev,
1272 					xgmi3x16_pcs_err_status_reg_v6_4[i]);
1273 		break;
1274 	default:
1275 		break;
1276 	}
1277 }
1278 
1279 static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base)
1280 {
1281 	WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
1282 }
1283 
1284 static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst)
1285 {
1286 	int i;
1287 
1288 	for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++)
1289 		__xgmi_v6_4_0_reset_error_count(adev, xgmi_inst, xgmi_v6_4_0_mca_base_array[i]);
1290 }
1291 
1292 static void xgmi_v6_4_0_reset_ras_error_count(struct amdgpu_device *adev)
1293 {
1294 	int i;
1295 
1296 	for_each_inst(i, adev->aid_mask)
1297 		xgmi_v6_4_0_reset_error_count(adev, i);
1298 }
1299 
1300 static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
1301 {
1302 	switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
1303 	case IP_VERSION(6, 4, 0):
1304 	case IP_VERSION(6, 4, 1):
1305 		xgmi_v6_4_0_reset_ras_error_count(adev);
1306 		break;
1307 	default:
1308 		amdgpu_xgmi_legacy_reset_ras_error_count(adev);
1309 		break;
1310 	}
1311 }
1312 
1313 static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
1314 					      uint32_t value,
1315 						  uint32_t mask_value,
1316 					      uint32_t *ue_count,
1317 					      uint32_t *ce_count,
1318 					      bool is_xgmi_pcs,
1319 						  bool check_mask)
1320 {
1321 	int i;
1322 	int ue_cnt = 0;
1323 	const struct amdgpu_pcs_ras_field *pcs_ras_fields = NULL;
1324 	uint32_t field_array_size = 0;
1325 
1326 	if (is_xgmi_pcs) {
1327 		if (amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
1328 		    IP_VERSION(6, 1, 0) ||
1329 		    amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
1330 		    IP_VERSION(6, 4, 0) ||
1331 		    amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
1332 		    IP_VERSION(6, 4, 1)) {
1333 			pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];
1334 			field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);
1335 		} else {
1336 			pcs_ras_fields = &xgmi_pcs_ras_fields[0];
1337 			field_array_size = ARRAY_SIZE(xgmi_pcs_ras_fields);
1338 		}
1339 	} else {
1340 		pcs_ras_fields = &wafl_pcs_ras_fields[0];
1341 		field_array_size = ARRAY_SIZE(wafl_pcs_ras_fields);
1342 	}
1343 
1344 	if (check_mask)
1345 		value = value & ~mask_value;
1346 
1347 	/* query xgmi/walf pcs error status,
1348 	 * only ue is supported */
1349 	for (i = 0; value && i < field_array_size; i++) {
1350 		ue_cnt = (value &
1351 				pcs_ras_fields[i].pcs_err_mask) >>
1352 				pcs_ras_fields[i].pcs_err_shift;
1353 		if (ue_cnt) {
1354 			dev_info(adev->dev, "%s detected\n",
1355 				 pcs_ras_fields[i].err_name);
1356 			*ue_count += ue_cnt;
1357 		}
1358 
1359 		/* reset bit value if the bit is checked */
1360 		value &= ~(pcs_ras_fields[i].pcs_err_mask);
1361 	}
1362 
1363 	return 0;
1364 }
1365 
1366 static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev,
1367 						     void *ras_error_status)
1368 {
1369 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1370 	int i, supported = 1;
1371 	uint32_t data, mask_data = 0;
1372 	uint32_t ue_cnt = 0, ce_cnt = 0;
1373 
1374 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
1375 		return ;
1376 
1377 	err_data->ue_count = 0;
1378 	err_data->ce_count = 0;
1379 
1380 	switch (adev->asic_type) {
1381 	case CHIP_ARCTURUS:
1382 		/* check xgmi pcs error */
1383 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
1384 			data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
1385 			if (data)
1386 				amdgpu_xgmi_query_pcs_error_status(adev, data,
1387 						mask_data, &ue_cnt, &ce_cnt, true, false);
1388 		}
1389 		/* check wafl pcs error */
1390 		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
1391 			data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
1392 			if (data)
1393 				amdgpu_xgmi_query_pcs_error_status(adev, data,
1394 						mask_data, &ue_cnt, &ce_cnt, false, false);
1395 		}
1396 		break;
1397 	case CHIP_VEGA20:
1398 		/* check xgmi pcs error */
1399 		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
1400 			data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
1401 			if (data)
1402 				amdgpu_xgmi_query_pcs_error_status(adev, data,
1403 						mask_data, &ue_cnt, &ce_cnt, true, false);
1404 		}
1405 		/* check wafl pcs error */
1406 		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
1407 			data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
1408 			if (data)
1409 				amdgpu_xgmi_query_pcs_error_status(adev, data,
1410 						mask_data, &ue_cnt, &ce_cnt, false, false);
1411 		}
1412 		break;
1413 	case CHIP_ALDEBARAN:
1414 		/* check xgmi3x16 pcs error */
1415 		for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
1416 			data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
1417 			mask_data =
1418 				RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
1419 			if (data)
1420 				amdgpu_xgmi_query_pcs_error_status(adev, data,
1421 						mask_data, &ue_cnt, &ce_cnt, true, true);
1422 		}
1423 		/* check wafl pcs error */
1424 		for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) {
1425 			data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]);
1426 			mask_data =
1427 				RREG32_PCIE(walf_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
1428 			if (data)
1429 				amdgpu_xgmi_query_pcs_error_status(adev, data,
1430 						mask_data, &ue_cnt, &ce_cnt, false, true);
1431 		}
1432 		break;
1433 	default:
1434 		supported = 0;
1435 		break;
1436 	}
1437 
1438 	switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
1439 	case IP_VERSION(6, 4, 0):
1440 	case IP_VERSION(6, 4, 1):
1441 		/* check xgmi3x16 pcs error */
1442 		for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) {
1443 			data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_v6_4[i]);
1444 			mask_data =
1445 				RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[i]);
1446 			if (data)
1447 				amdgpu_xgmi_query_pcs_error_status(adev, data,
1448 						mask_data, &ue_cnt, &ce_cnt, true, true);
1449 		}
1450 		break;
1451 	default:
1452 		if (!supported)
1453 			dev_warn(adev->dev, "XGMI RAS error query not supported");
1454 		break;
1455 	}
1456 
1457 	amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
1458 
1459 	err_data->ue_count += ue_cnt;
1460 	err_data->ce_count += ce_cnt;
1461 }
1462 
1463 static enum aca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status)
1464 {
1465 	const char *error_str;
1466 	int ext_error_code;
1467 
1468 	ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
1469 
1470 	error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
1471 		xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
1472 	if (error_str)
1473 		dev_info(adev->dev, "%s detected\n", error_str);
1474 
1475 	switch (ext_error_code) {
1476 	case 0:
1477 		return ACA_ERROR_TYPE_UE;
1478 	case 6:
1479 		return ACA_ERROR_TYPE_CE;
1480 	default:
1481 		return -EINVAL;
1482 	}
1483 
1484 	return -EINVAL;
1485 }
1486 
1487 static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct amdgpu_smuio_mcm_config_info *mcm_info,
1488 					    u64 mca_base, struct ras_err_data *err_data)
1489 {
1490 	int xgmi_inst = mcm_info->die_id;
1491 	u64 status = 0;
1492 
1493 	status = RREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS);
1494 	if (!ACA_REG__STATUS__VAL(status))
1495 		return;
1496 
1497 	switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
1498 	case ACA_ERROR_TYPE_UE:
1499 		amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
1500 		break;
1501 	case ACA_ERROR_TYPE_CE:
1502 		amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
1503 		break;
1504 	default:
1505 		break;
1506 	}
1507 
1508 	WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
1509 }
1510 
1511 static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data)
1512 {
1513 	struct amdgpu_smuio_mcm_config_info mcm_info = {
1514 		.socket_id = adev->smuio.funcs->get_socket_id(adev),
1515 		.die_id = xgmi_inst,
1516 	};
1517 	int i;
1518 
1519 	for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++)
1520 		__xgmi_v6_4_0_query_error_count(adev, &mcm_info, xgmi_v6_4_0_mca_base_array[i], err_data);
1521 }
1522 
1523 static void xgmi_v6_4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status)
1524 {
1525 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1526 	int i;
1527 
1528 	for_each_inst(i, adev->aid_mask)
1529 		xgmi_v6_4_0_query_error_count(adev, i, err_data);
1530 }
1531 
1532 static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
1533 					      void *ras_error_status)
1534 {
1535 	switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
1536 	case IP_VERSION(6, 4, 0):
1537 	case IP_VERSION(6, 4, 1):
1538 		xgmi_v6_4_0_query_ras_error_count(adev, ras_error_status);
1539 		break;
1540 	default:
1541 		amdgpu_xgmi_legacy_query_ras_error_count(adev, ras_error_status);
1542 		break;
1543 	}
1544 }
1545 
1546 /* Trigger XGMI/WAFL error */
1547 static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
1548 			void *inject_if, uint32_t instance_mask)
1549 {
1550 	int ret1, ret2;
1551 	struct ta_ras_trigger_error_input *block_info =
1552 				(struct ta_ras_trigger_error_input *)inject_if;
1553 
1554 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
1555 		dev_warn(adev->dev, "Failed to disallow df cstate");
1556 
1557 	ret1 = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DISALLOW);
1558 	if (ret1 && ret1 != -EOPNOTSUPP)
1559 		dev_warn(adev->dev, "Failed to disallow XGMI power down");
1560 
1561 	ret2 = psp_ras_trigger_error(&adev->psp, block_info, instance_mask);
1562 
1563 	if (amdgpu_ras_intr_triggered())
1564 		return ret2;
1565 
1566 	ret1 = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DEFAULT);
1567 	if (ret1 && ret1 != -EOPNOTSUPP)
1568 		dev_warn(adev->dev, "Failed to allow XGMI power down");
1569 
1570 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
1571 		dev_warn(adev->dev, "Failed to allow df cstate");
1572 
1573 	return ret2;
1574 }
1575 
1576 struct amdgpu_ras_block_hw_ops  xgmi_ras_hw_ops = {
1577 	.query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
1578 	.reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
1579 	.ras_error_inject = amdgpu_ras_error_inject_xgmi,
1580 };
1581 
1582 struct amdgpu_xgmi_ras xgmi_ras = {
1583 	.ras_block = {
1584 		.hw_ops = &xgmi_ras_hw_ops,
1585 		.ras_late_init = amdgpu_xgmi_ras_late_init,
1586 	},
1587 };
1588 
1589 int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev)
1590 {
1591 	int err;
1592 	struct amdgpu_xgmi_ras *ras;
1593 
1594 	if (!adev->gmc.xgmi.ras)
1595 		return 0;
1596 
1597 	ras = adev->gmc.xgmi.ras;
1598 	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1599 	if (err) {
1600 		dev_err(adev->dev, "Failed to register xgmi_wafl_pcs ras block!\n");
1601 		return err;
1602 	}
1603 
1604 	strcpy(ras->ras_block.ras_comm.name, "xgmi_wafl");
1605 	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
1606 	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
1607 	adev->gmc.xgmi.ras_if = &ras->ras_block.ras_comm;
1608 
1609 	return 0;
1610 }
1611 
1612 static void amdgpu_xgmi_reset_on_init_work(struct work_struct *work)
1613 {
1614 	struct amdgpu_hive_info *hive =
1615 		container_of(work, struct amdgpu_hive_info, reset_on_init_work);
1616 	struct amdgpu_reset_context reset_context;
1617 	struct amdgpu_device *tmp_adev;
1618 	struct list_head device_list;
1619 	int r;
1620 
1621 	mutex_lock(&hive->hive_lock);
1622 
1623 	INIT_LIST_HEAD(&device_list);
1624 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
1625 		list_add_tail(&tmp_adev->reset_list, &device_list);
1626 
1627 	tmp_adev = list_first_entry(&device_list, struct amdgpu_device,
1628 				    reset_list);
1629 	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
1630 
1631 	reset_context.method = AMD_RESET_METHOD_ON_INIT;
1632 	reset_context.reset_req_dev = tmp_adev;
1633 	reset_context.hive = hive;
1634 	reset_context.reset_device_list = &device_list;
1635 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1636 	set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
1637 
1638 	amdgpu_reset_do_xgmi_reset_on_init(&reset_context);
1639 	mutex_unlock(&hive->hive_lock);
1640 	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
1641 
1642 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
1643 		r = amdgpu_ras_init_badpage_info(tmp_adev);
1644 		if (r && r != -EHWPOISON)
1645 			dev_err(tmp_adev->dev,
1646 				"error during bad page data initialization");
1647 	}
1648 }
1649 
1650 static void amdgpu_xgmi_schedule_reset_on_init(struct amdgpu_hive_info *hive)
1651 {
1652 	INIT_WORK(&hive->reset_on_init_work, amdgpu_xgmi_reset_on_init_work);
1653 	amdgpu_reset_domain_schedule(hive->reset_domain,
1654 				     &hive->reset_on_init_work);
1655 }
1656 
1657 int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev)
1658 {
1659 	struct amdgpu_hive_info *hive;
1660 	bool reset_scheduled;
1661 	int num_devs;
1662 
1663 	hive = amdgpu_get_xgmi_hive(adev);
1664 	if (!hive)
1665 		return -EINVAL;
1666 
1667 	mutex_lock(&hive->hive_lock);
1668 	num_devs = atomic_read(&hive->number_devices);
1669 	reset_scheduled = false;
1670 	if (num_devs == adev->gmc.xgmi.num_physical_nodes) {
1671 		amdgpu_xgmi_schedule_reset_on_init(hive);
1672 		reset_scheduled = true;
1673 	}
1674 
1675 	mutex_unlock(&hive->hive_lock);
1676 	amdgpu_put_xgmi_hive(hive);
1677 
1678 	if (reset_scheduled)
1679 		flush_work(&hive->reset_on_init_work);
1680 
1681 	return 0;
1682 }
1683 
1684 int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
1685 				   struct amdgpu_hive_info *hive,
1686 				   int req_nps_mode)
1687 {
1688 	struct amdgpu_device *tmp_adev;
1689 	int cur_nps_mode, r;
1690 
1691 	/* This is expected to be called only during unload of driver. The
1692 	 * request needs to be placed only once for all devices in the hive. If
1693 	 * one of them fail, revert the request for previous successful devices.
1694 	 * After placing the request, make hive mode as UNKNOWN so that other
1695 	 * devices don't request anymore.
1696 	 */
1697 	mutex_lock(&hive->hive_lock);
1698 	if (atomic_read(&hive->requested_nps_mode) ==
1699 	    UNKNOWN_MEMORY_PARTITION_MODE) {
1700 		dev_dbg(adev->dev, "Unexpected entry for hive NPS change");
1701 		mutex_unlock(&hive->hive_lock);
1702 		return 0;
1703 	}
1704 	list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
1705 		r = adev->gmc.gmc_funcs->request_mem_partition_mode(
1706 			tmp_adev, req_nps_mode);
1707 		if (r)
1708 			break;
1709 	}
1710 	if (r) {
1711 		/* Request back current mode if one of the requests failed */
1712 		cur_nps_mode =
1713 			adev->gmc.gmc_funcs->query_mem_partition_mode(tmp_adev);
1714 		list_for_each_entry_continue_reverse(
1715 			tmp_adev, &hive->device_list, gmc.xgmi.head)
1716 			adev->gmc.gmc_funcs->request_mem_partition_mode(
1717 				tmp_adev, cur_nps_mode);
1718 	}
1719 	/* Set to UNKNOWN so that other devices don't request anymore */
1720 	atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE);
1721 	mutex_unlock(&hive->hive_lock);
1722 
1723 	return r;
1724 }
1725 
1726 bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
1727 			   struct amdgpu_device *bo_adev)
1728 {
1729 	return (amdgpu_use_xgmi_p2p && adev != bo_adev &&
1730 		adev->gmc.xgmi.hive_id &&
1731 		adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
1732 }
1733 
1734 void amdgpu_xgmi_early_init(struct amdgpu_device *adev)
1735 {
1736 	if (!adev->gmc.xgmi.supported)
1737 		return;
1738 
1739 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1740 	case IP_VERSION(9, 4, 0):
1741 	case IP_VERSION(9, 4, 1):
1742 	case IP_VERSION(9, 4, 2):
1743 		adev->gmc.xgmi.max_speed = XGMI_SPEED_25GT;
1744 		adev->gmc.xgmi.max_width = 16;
1745 		break;
1746 	case IP_VERSION(9, 4, 3):
1747 	case IP_VERSION(9, 4, 4):
1748 	case IP_VERSION(9, 5, 0):
1749 		adev->gmc.xgmi.max_speed = XGMI_SPEED_32GT;
1750 		adev->gmc.xgmi.max_width = 16;
1751 		break;
1752 	default:
1753 		break;
1754 	}
1755 }
1756