xref: /linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 220994d61cebfc04f071d69049127657c7e8191b)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26 
27 #include <drm/drm_cache.h>
28 
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33 
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
42 
43 #include "soc15.h"
44 #include "soc15d.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
47 
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "gfxhub_v1_2.h"
53 #include "mmhub_v9_4.h"
54 #include "mmhub_v1_7.h"
55 #include "mmhub_v1_8.h"
56 #include "umc_v6_1.h"
57 #include "umc_v6_0.h"
58 #include "umc_v6_7.h"
59 #include "umc_v12_0.h"
60 #include "hdp_v4_0.h"
61 #include "mca_v3_0.h"
62 
63 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
64 
65 #include "amdgpu_ras.h"
66 #include "amdgpu_xgmi.h"
67 
68 /* add these here since we already include dce12 headers and these are for DCN */
69 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
70 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
71 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
72 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
73 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
74 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
75 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x049d
76 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
77 
78 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2                                                          0x05ea
79 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX                                                 2
80 
81 static const char * const gfxhub_client_ids[] = {
82 	"CB",
83 	"DB",
84 	"IA",
85 	"WD",
86 	"CPF",
87 	"CPC",
88 	"CPG",
89 	"RLC",
90 	"TCP",
91 	"SQC (inst)",
92 	"SQC (data)",
93 	"SQG",
94 	"PA",
95 };
96 
97 static const char *mmhub_client_ids_raven[][2] = {
98 	[0][0] = "MP1",
99 	[1][0] = "MP0",
100 	[2][0] = "VCN",
101 	[3][0] = "VCNU",
102 	[4][0] = "HDP",
103 	[5][0] = "DCE",
104 	[13][0] = "UTCL2",
105 	[19][0] = "TLS",
106 	[26][0] = "OSS",
107 	[27][0] = "SDMA0",
108 	[0][1] = "MP1",
109 	[1][1] = "MP0",
110 	[2][1] = "VCN",
111 	[3][1] = "VCNU",
112 	[4][1] = "HDP",
113 	[5][1] = "XDP",
114 	[6][1] = "DBGU0",
115 	[7][1] = "DCE",
116 	[8][1] = "DCEDWB0",
117 	[9][1] = "DCEDWB1",
118 	[26][1] = "OSS",
119 	[27][1] = "SDMA0",
120 };
121 
122 static const char *mmhub_client_ids_renoir[][2] = {
123 	[0][0] = "MP1",
124 	[1][0] = "MP0",
125 	[2][0] = "HDP",
126 	[4][0] = "DCEDMC",
127 	[5][0] = "DCEVGA",
128 	[13][0] = "UTCL2",
129 	[19][0] = "TLS",
130 	[26][0] = "OSS",
131 	[27][0] = "SDMA0",
132 	[28][0] = "VCN",
133 	[29][0] = "VCNU",
134 	[30][0] = "JPEG",
135 	[0][1] = "MP1",
136 	[1][1] = "MP0",
137 	[2][1] = "HDP",
138 	[3][1] = "XDP",
139 	[6][1] = "DBGU0",
140 	[7][1] = "DCEDMC",
141 	[8][1] = "DCEVGA",
142 	[9][1] = "DCEDWB",
143 	[26][1] = "OSS",
144 	[27][1] = "SDMA0",
145 	[28][1] = "VCN",
146 	[29][1] = "VCNU",
147 	[30][1] = "JPEG",
148 };
149 
150 static const char *mmhub_client_ids_vega10[][2] = {
151 	[0][0] = "MP0",
152 	[1][0] = "UVD",
153 	[2][0] = "UVDU",
154 	[3][0] = "HDP",
155 	[13][0] = "UTCL2",
156 	[14][0] = "OSS",
157 	[15][0] = "SDMA1",
158 	[32+0][0] = "VCE0",
159 	[32+1][0] = "VCE0U",
160 	[32+2][0] = "XDMA",
161 	[32+3][0] = "DCE",
162 	[32+4][0] = "MP1",
163 	[32+14][0] = "SDMA0",
164 	[0][1] = "MP0",
165 	[1][1] = "UVD",
166 	[2][1] = "UVDU",
167 	[3][1] = "DBGU0",
168 	[4][1] = "HDP",
169 	[5][1] = "XDP",
170 	[14][1] = "OSS",
171 	[15][1] = "SDMA0",
172 	[32+0][1] = "VCE0",
173 	[32+1][1] = "VCE0U",
174 	[32+2][1] = "XDMA",
175 	[32+3][1] = "DCE",
176 	[32+4][1] = "DCEDWB",
177 	[32+5][1] = "MP1",
178 	[32+6][1] = "DBGU1",
179 	[32+14][1] = "SDMA1",
180 };
181 
182 static const char *mmhub_client_ids_vega12[][2] = {
183 	[0][0] = "MP0",
184 	[1][0] = "VCE0",
185 	[2][0] = "VCE0U",
186 	[3][0] = "HDP",
187 	[13][0] = "UTCL2",
188 	[14][0] = "OSS",
189 	[15][0] = "SDMA1",
190 	[32+0][0] = "DCE",
191 	[32+1][0] = "XDMA",
192 	[32+2][0] = "UVD",
193 	[32+3][0] = "UVDU",
194 	[32+4][0] = "MP1",
195 	[32+15][0] = "SDMA0",
196 	[0][1] = "MP0",
197 	[1][1] = "VCE0",
198 	[2][1] = "VCE0U",
199 	[3][1] = "DBGU0",
200 	[4][1] = "HDP",
201 	[5][1] = "XDP",
202 	[14][1] = "OSS",
203 	[15][1] = "SDMA0",
204 	[32+0][1] = "DCE",
205 	[32+1][1] = "DCEDWB",
206 	[32+2][1] = "XDMA",
207 	[32+3][1] = "UVD",
208 	[32+4][1] = "UVDU",
209 	[32+5][1] = "MP1",
210 	[32+6][1] = "DBGU1",
211 	[32+15][1] = "SDMA1",
212 };
213 
214 static const char *mmhub_client_ids_vega20[][2] = {
215 	[0][0] = "XDMA",
216 	[1][0] = "DCE",
217 	[2][0] = "VCE0",
218 	[3][0] = "VCE0U",
219 	[4][0] = "UVD",
220 	[5][0] = "UVD1U",
221 	[13][0] = "OSS",
222 	[14][0] = "HDP",
223 	[15][0] = "SDMA0",
224 	[32+0][0] = "UVD",
225 	[32+1][0] = "UVDU",
226 	[32+2][0] = "MP1",
227 	[32+3][0] = "MP0",
228 	[32+12][0] = "UTCL2",
229 	[32+14][0] = "SDMA1",
230 	[0][1] = "XDMA",
231 	[1][1] = "DCE",
232 	[2][1] = "DCEDWB",
233 	[3][1] = "VCE0",
234 	[4][1] = "VCE0U",
235 	[5][1] = "UVD1",
236 	[6][1] = "UVD1U",
237 	[7][1] = "DBGU0",
238 	[8][1] = "XDP",
239 	[13][1] = "OSS",
240 	[14][1] = "HDP",
241 	[15][1] = "SDMA0",
242 	[32+0][1] = "UVD",
243 	[32+1][1] = "UVDU",
244 	[32+2][1] = "DBGU1",
245 	[32+3][1] = "MP1",
246 	[32+4][1] = "MP0",
247 	[32+14][1] = "SDMA1",
248 };
249 
250 static const char *mmhub_client_ids_arcturus[][2] = {
251 	[0][0] = "DBGU1",
252 	[1][0] = "XDP",
253 	[2][0] = "MP1",
254 	[14][0] = "HDP",
255 	[171][0] = "JPEG",
256 	[172][0] = "VCN",
257 	[173][0] = "VCNU",
258 	[203][0] = "JPEG1",
259 	[204][0] = "VCN1",
260 	[205][0] = "VCN1U",
261 	[256][0] = "SDMA0",
262 	[257][0] = "SDMA1",
263 	[258][0] = "SDMA2",
264 	[259][0] = "SDMA3",
265 	[260][0] = "SDMA4",
266 	[261][0] = "SDMA5",
267 	[262][0] = "SDMA6",
268 	[263][0] = "SDMA7",
269 	[384][0] = "OSS",
270 	[0][1] = "DBGU1",
271 	[1][1] = "XDP",
272 	[2][1] = "MP1",
273 	[14][1] = "HDP",
274 	[171][1] = "JPEG",
275 	[172][1] = "VCN",
276 	[173][1] = "VCNU",
277 	[203][1] = "JPEG1",
278 	[204][1] = "VCN1",
279 	[205][1] = "VCN1U",
280 	[256][1] = "SDMA0",
281 	[257][1] = "SDMA1",
282 	[258][1] = "SDMA2",
283 	[259][1] = "SDMA3",
284 	[260][1] = "SDMA4",
285 	[261][1] = "SDMA5",
286 	[262][1] = "SDMA6",
287 	[263][1] = "SDMA7",
288 	[384][1] = "OSS",
289 };
290 
291 static const char *mmhub_client_ids_aldebaran[][2] = {
292 	[2][0] = "MP1",
293 	[3][0] = "MP0",
294 	[32+1][0] = "DBGU_IO0",
295 	[32+2][0] = "DBGU_IO2",
296 	[32+4][0] = "MPIO",
297 	[96+11][0] = "JPEG0",
298 	[96+12][0] = "VCN0",
299 	[96+13][0] = "VCNU0",
300 	[128+11][0] = "JPEG1",
301 	[128+12][0] = "VCN1",
302 	[128+13][0] = "VCNU1",
303 	[160+1][0] = "XDP",
304 	[160+14][0] = "HDP",
305 	[256+0][0] = "SDMA0",
306 	[256+1][0] = "SDMA1",
307 	[256+2][0] = "SDMA2",
308 	[256+3][0] = "SDMA3",
309 	[256+4][0] = "SDMA4",
310 	[384+0][0] = "OSS",
311 	[2][1] = "MP1",
312 	[3][1] = "MP0",
313 	[32+1][1] = "DBGU_IO0",
314 	[32+2][1] = "DBGU_IO2",
315 	[32+4][1] = "MPIO",
316 	[96+11][1] = "JPEG0",
317 	[96+12][1] = "VCN0",
318 	[96+13][1] = "VCNU0",
319 	[128+11][1] = "JPEG1",
320 	[128+12][1] = "VCN1",
321 	[128+13][1] = "VCNU1",
322 	[160+1][1] = "XDP",
323 	[160+14][1] = "HDP",
324 	[256+0][1] = "SDMA0",
325 	[256+1][1] = "SDMA1",
326 	[256+2][1] = "SDMA2",
327 	[256+3][1] = "SDMA3",
328 	[256+4][1] = "SDMA4",
329 	[384+0][1] = "OSS",
330 };
331 
332 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = {
333 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
334 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
335 };
336 
337 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = {
338 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
339 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
340 };
341 
342 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
343 	(0x000143c0 + 0x00000000),
344 	(0x000143c0 + 0x00000800),
345 	(0x000143c0 + 0x00001000),
346 	(0x000143c0 + 0x00001800),
347 	(0x000543c0 + 0x00000000),
348 	(0x000543c0 + 0x00000800),
349 	(0x000543c0 + 0x00001000),
350 	(0x000543c0 + 0x00001800),
351 	(0x000943c0 + 0x00000000),
352 	(0x000943c0 + 0x00000800),
353 	(0x000943c0 + 0x00001000),
354 	(0x000943c0 + 0x00001800),
355 	(0x000d43c0 + 0x00000000),
356 	(0x000d43c0 + 0x00000800),
357 	(0x000d43c0 + 0x00001000),
358 	(0x000d43c0 + 0x00001800),
359 	(0x001143c0 + 0x00000000),
360 	(0x001143c0 + 0x00000800),
361 	(0x001143c0 + 0x00001000),
362 	(0x001143c0 + 0x00001800),
363 	(0x001543c0 + 0x00000000),
364 	(0x001543c0 + 0x00000800),
365 	(0x001543c0 + 0x00001000),
366 	(0x001543c0 + 0x00001800),
367 	(0x001943c0 + 0x00000000),
368 	(0x001943c0 + 0x00000800),
369 	(0x001943c0 + 0x00001000),
370 	(0x001943c0 + 0x00001800),
371 	(0x001d43c0 + 0x00000000),
372 	(0x001d43c0 + 0x00000800),
373 	(0x001d43c0 + 0x00001000),
374 	(0x001d43c0 + 0x00001800),
375 };
376 
377 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
378 	(0x000143e0 + 0x00000000),
379 	(0x000143e0 + 0x00000800),
380 	(0x000143e0 + 0x00001000),
381 	(0x000143e0 + 0x00001800),
382 	(0x000543e0 + 0x00000000),
383 	(0x000543e0 + 0x00000800),
384 	(0x000543e0 + 0x00001000),
385 	(0x000543e0 + 0x00001800),
386 	(0x000943e0 + 0x00000000),
387 	(0x000943e0 + 0x00000800),
388 	(0x000943e0 + 0x00001000),
389 	(0x000943e0 + 0x00001800),
390 	(0x000d43e0 + 0x00000000),
391 	(0x000d43e0 + 0x00000800),
392 	(0x000d43e0 + 0x00001000),
393 	(0x000d43e0 + 0x00001800),
394 	(0x001143e0 + 0x00000000),
395 	(0x001143e0 + 0x00000800),
396 	(0x001143e0 + 0x00001000),
397 	(0x001143e0 + 0x00001800),
398 	(0x001543e0 + 0x00000000),
399 	(0x001543e0 + 0x00000800),
400 	(0x001543e0 + 0x00001000),
401 	(0x001543e0 + 0x00001800),
402 	(0x001943e0 + 0x00000000),
403 	(0x001943e0 + 0x00000800),
404 	(0x001943e0 + 0x00001000),
405 	(0x001943e0 + 0x00001800),
406 	(0x001d43e0 + 0x00000000),
407 	(0x001d43e0 + 0x00000800),
408 	(0x001d43e0 + 0x00001000),
409 	(0x001d43e0 + 0x00001800),
410 };
411 
gmc_v9_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)412 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
413 		struct amdgpu_irq_src *src,
414 		unsigned int type,
415 		enum amdgpu_interrupt_state state)
416 {
417 	u32 bits, i, tmp, reg;
418 
419 	/* Devices newer then VEGA10/12 shall have these programming
420 	 * sequences performed by PSP BL
421 	 */
422 	if (adev->asic_type >= CHIP_VEGA20)
423 		return 0;
424 
425 	bits = 0x7f;
426 
427 	switch (state) {
428 	case AMDGPU_IRQ_STATE_DISABLE:
429 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
430 			reg = ecc_umc_mcumc_ctrl_addrs[i];
431 			tmp = RREG32(reg);
432 			tmp &= ~bits;
433 			WREG32(reg, tmp);
434 		}
435 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
436 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
437 			tmp = RREG32(reg);
438 			tmp &= ~bits;
439 			WREG32(reg, tmp);
440 		}
441 		break;
442 	case AMDGPU_IRQ_STATE_ENABLE:
443 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
444 			reg = ecc_umc_mcumc_ctrl_addrs[i];
445 			tmp = RREG32(reg);
446 			tmp |= bits;
447 			WREG32(reg, tmp);
448 		}
449 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
450 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
451 			tmp = RREG32(reg);
452 			tmp |= bits;
453 			WREG32(reg, tmp);
454 		}
455 		break;
456 	default:
457 		break;
458 	}
459 
460 	return 0;
461 }
462 
gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)463 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
464 					struct amdgpu_irq_src *src,
465 					unsigned int type,
466 					enum amdgpu_interrupt_state state)
467 {
468 	struct amdgpu_vmhub *hub;
469 	u32 tmp, reg, bits, i, j;
470 
471 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
472 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
473 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
474 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
475 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
477 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
478 
479 	switch (state) {
480 	case AMDGPU_IRQ_STATE_DISABLE:
481 		for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
482 			hub = &adev->vmhub[j];
483 			for (i = 0; i < 16; i++) {
484 				reg = hub->vm_context0_cntl + i;
485 
486 				/* This works because this interrupt is only
487 				 * enabled at init/resume and disabled in
488 				 * fini/suspend, so the overall state doesn't
489 				 * change over the course of suspend/resume.
490 				 */
491 				if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
492 					continue;
493 
494 				if (j >= AMDGPU_MMHUB0(0))
495 					tmp = RREG32_SOC15_IP(MMHUB, reg);
496 				else
497 					tmp = RREG32_XCC(reg, j);
498 
499 				tmp &= ~bits;
500 
501 				if (j >= AMDGPU_MMHUB0(0))
502 					WREG32_SOC15_IP(MMHUB, reg, tmp);
503 				else
504 					WREG32_XCC(reg, tmp, j);
505 			}
506 		}
507 		break;
508 	case AMDGPU_IRQ_STATE_ENABLE:
509 		for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
510 			hub = &adev->vmhub[j];
511 			for (i = 0; i < 16; i++) {
512 				reg = hub->vm_context0_cntl + i;
513 
514 				/* This works because this interrupt is only
515 				 * enabled at init/resume and disabled in
516 				 * fini/suspend, so the overall state doesn't
517 				 * change over the course of suspend/resume.
518 				 */
519 				if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
520 					continue;
521 
522 				if (j >= AMDGPU_MMHUB0(0))
523 					tmp = RREG32_SOC15_IP(MMHUB, reg);
524 				else
525 					tmp = RREG32_XCC(reg, j);
526 
527 				tmp |= bits;
528 
529 				if (j >= AMDGPU_MMHUB0(0))
530 					WREG32_SOC15_IP(MMHUB, reg, tmp);
531 				else
532 					WREG32_XCC(reg, tmp, j);
533 			}
534 		}
535 		break;
536 	default:
537 		break;
538 	}
539 
540 	return 0;
541 }
542 
gmc_v9_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)543 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
544 				      struct amdgpu_irq_src *source,
545 				      struct amdgpu_iv_entry *entry)
546 {
547 	bool retry_fault = !!(entry->src_data[1] & 0x80);
548 	bool write_fault = !!(entry->src_data[1] & 0x20);
549 	uint32_t status = 0, cid = 0, rw = 0, fed = 0;
550 	struct amdgpu_task_info *task_info;
551 	struct amdgpu_vmhub *hub;
552 	const char *mmhub_cid;
553 	const char *hub_name;
554 	unsigned int vmhub;
555 	u64 addr;
556 	uint32_t cam_index = 0;
557 	int ret, xcc_id = 0;
558 	uint32_t node_id;
559 
560 	node_id = entry->node_id;
561 
562 	addr = (u64)entry->src_data[0] << 12;
563 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
564 
565 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
566 		hub_name = "mmhub0";
567 		vmhub = AMDGPU_MMHUB0(node_id / 4);
568 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
569 		hub_name = "mmhub1";
570 		vmhub = AMDGPU_MMHUB1(0);
571 	} else {
572 		hub_name = "gfxhub0";
573 		if (adev->gfx.funcs->ih_node_to_logical_xcc) {
574 			xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
575 				node_id);
576 			if (xcc_id < 0)
577 				xcc_id = 0;
578 		}
579 		vmhub = xcc_id;
580 	}
581 	hub = &adev->vmhub[vmhub];
582 
583 	if (retry_fault) {
584 		if (adev->irq.retry_cam_enabled) {
585 			/* Delegate it to a different ring if the hardware hasn't
586 			 * already done it.
587 			 */
588 			if (entry->ih == &adev->irq.ih) {
589 				amdgpu_irq_delegate(adev, entry, 8);
590 				return 1;
591 			}
592 
593 			cam_index = entry->src_data[2] & 0x3ff;
594 
595 			ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
596 						     addr, entry->timestamp, write_fault);
597 			WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
598 			if (ret)
599 				return 1;
600 		} else {
601 			/* Process it onyl if it's the first fault for this address */
602 			if (entry->ih != &adev->irq.ih_soft &&
603 			    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
604 					     entry->timestamp))
605 				return 1;
606 
607 			/* Delegate it to a different ring if the hardware hasn't
608 			 * already done it.
609 			 */
610 			if (entry->ih == &adev->irq.ih) {
611 				amdgpu_irq_delegate(adev, entry, 8);
612 				return 1;
613 			}
614 
615 			/* Try to handle the recoverable page faults by filling page
616 			 * tables
617 			 */
618 			if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
619 						   addr, entry->timestamp, write_fault))
620 				return 1;
621 		}
622 	}
623 
624 	if (kgd2kfd_vmfault_fast_path(adev, entry, retry_fault))
625 		return 1;
626 
627 	if (!printk_ratelimit())
628 		return 0;
629 
630 	dev_err(adev->dev,
631 		"[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", hub_name,
632 		retry_fault ? "retry" : "no-retry",
633 		entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
634 
635 	task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
636 	if (task_info) {
637 		amdgpu_vm_print_task_info(adev, task_info);
638 		amdgpu_vm_put_task_info(task_info);
639 	}
640 
641 	dev_err(adev->dev, "  in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
642 		addr, entry->client_id,
643 		soc15_ih_clientid_name[entry->client_id]);
644 
645 	if (amdgpu_is_multi_aid(adev))
646 		dev_err(adev->dev, "  cookie node_id %d fault from die %s%d%s\n",
647 			node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
648 			node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
649 
650 	if (amdgpu_sriov_vf(adev))
651 		return 0;
652 
653 	/*
654 	 * Issue a dummy read to wait for the status register to
655 	 * be updated to avoid reading an incorrect value due to
656 	 * the new fast GRBM interface.
657 	 */
658 	if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
659 	    (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
660 		RREG32(hub->vm_l2_pro_fault_status);
661 
662 	status = RREG32(hub->vm_l2_pro_fault_status);
663 	cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
664 	rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
665 	fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
666 
667 	/* for fed error, kfd will handle it, return directly */
668 	if (fed && amdgpu_ras_is_poison_mode_supported(adev) &&
669 	    (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
670 		return 0;
671 
672 	/* Only print L2 fault status if the status register could be read and
673 	 * contains useful information
674 	 */
675 	if (!status)
676 		return 0;
677 
678 	if (!amdgpu_sriov_vf(adev))
679 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
680 
681 	amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
682 
683 	dev_err(adev->dev,
684 		"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
685 		status);
686 	if (entry->vmid_src == AMDGPU_GFXHUB(0)) {
687 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
688 			cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
689 			gfxhub_client_ids[cid],
690 			cid);
691 	} else {
692 		switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
693 		case IP_VERSION(9, 0, 0):
694 			mmhub_cid = mmhub_client_ids_vega10[cid][rw];
695 			break;
696 		case IP_VERSION(9, 3, 0):
697 			mmhub_cid = mmhub_client_ids_vega12[cid][rw];
698 			break;
699 		case IP_VERSION(9, 4, 0):
700 			mmhub_cid = mmhub_client_ids_vega20[cid][rw];
701 			break;
702 		case IP_VERSION(9, 4, 1):
703 			mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
704 			break;
705 		case IP_VERSION(9, 1, 0):
706 		case IP_VERSION(9, 2, 0):
707 			mmhub_cid = mmhub_client_ids_raven[cid][rw];
708 			break;
709 		case IP_VERSION(1, 5, 0):
710 		case IP_VERSION(2, 4, 0):
711 			mmhub_cid = mmhub_client_ids_renoir[cid][rw];
712 			break;
713 		case IP_VERSION(1, 8, 0):
714 		case IP_VERSION(9, 4, 2):
715 			mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
716 			break;
717 		default:
718 			mmhub_cid = NULL;
719 			break;
720 		}
721 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
722 			mmhub_cid ? mmhub_cid : "unknown", cid);
723 	}
724 	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
725 		REG_GET_FIELD(status,
726 		VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
727 	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
728 		REG_GET_FIELD(status,
729 		VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
730 	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
731 		REG_GET_FIELD(status,
732 		VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
733 	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
734 		REG_GET_FIELD(status,
735 		VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
736 	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
737 	return 0;
738 }
739 
740 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
741 	.set = gmc_v9_0_vm_fault_interrupt_state,
742 	.process = gmc_v9_0_process_interrupt,
743 };
744 
745 
746 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
747 	.set = gmc_v9_0_ecc_interrupt_state,
748 	.process = amdgpu_umc_process_ecc_irq,
749 };
750 
gmc_v9_0_set_irq_funcs(struct amdgpu_device * adev)751 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
752 {
753 	adev->gmc.vm_fault.num_types = 1;
754 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
755 
756 	if (!amdgpu_sriov_vf(adev) &&
757 	    !adev->gmc.xgmi.connected_to_cpu &&
758 	    !adev->gmc.is_app_apu) {
759 		adev->gmc.ecc_irq.num_types = 1;
760 		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
761 	}
762 }
763 
gmc_v9_0_get_invalidate_req(unsigned int vmid,uint32_t flush_type)764 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
765 					uint32_t flush_type)
766 {
767 	u32 req = 0;
768 
769 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
770 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
771 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
772 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
773 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
774 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
775 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
776 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
777 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
778 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
779 
780 	return req;
781 }
782 
783 /**
784  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
785  *
786  * @adev: amdgpu_device pointer
787  * @vmhub: vmhub type
788  *
789  */
gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)790 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
791 				       uint32_t vmhub)
792 {
793 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
794 	    amdgpu_is_multi_aid(adev))
795 		return false;
796 
797 	return ((vmhub == AMDGPU_MMHUB0(0) ||
798 		 vmhub == AMDGPU_MMHUB1(0)) &&
799 		(!amdgpu_sriov_vf(adev)) &&
800 		(!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
801 		   (adev->apu_flags & AMD_APU_IS_PICASSO))));
802 }
803 
gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)804 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
805 					uint8_t vmid, uint16_t *p_pasid)
806 {
807 	uint32_t value;
808 
809 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
810 		     + vmid);
811 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
812 
813 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
814 }
815 
816 /*
817  * GART
818  * VMID 0 is the physical GPU addresses as used by the kernel.
819  * VMIDs 1-15 are used for userspace clients and are handled
820  * by the amdgpu vm/hsa code.
821  */
822 
823 /**
824  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
825  *
826  * @adev: amdgpu_device pointer
827  * @vmid: vm instance to flush
828  * @vmhub: which hub to flush
829  * @flush_type: the flush type
830  *
831  * Flush the TLB for the requested page table using certain type.
832  */
gmc_v9_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)833 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
834 					uint32_t vmhub, uint32_t flush_type)
835 {
836 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
837 	u32 j, inv_req, tmp, sem, req, ack, inst;
838 	const unsigned int eng = 17;
839 	struct amdgpu_vmhub *hub;
840 
841 	BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
842 
843 	hub = &adev->vmhub[vmhub];
844 	inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
845 	sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
846 	req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
847 	ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
848 
849 	if (vmhub >= AMDGPU_MMHUB0(0))
850 		inst = 0;
851 	else
852 		inst = vmhub;
853 
854 	/* This is necessary for SRIOV as well as for GFXOFF to function
855 	 * properly under bare metal
856 	 */
857 	if (adev->gfx.kiq[inst].ring.sched.ready &&
858 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
859 		uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
860 		uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
861 
862 		amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
863 						 1 << vmid, inst);
864 		return;
865 	}
866 
867 	/* This path is needed before KIQ/MES/GFXOFF are set up */
868 	spin_lock(&adev->gmc.invalidate_lock);
869 
870 	/*
871 	 * It may lose gpuvm invalidate acknowldege state across power-gating
872 	 * off cycle, add semaphore acquire before invalidation and semaphore
873 	 * release after invalidation to avoid entering power gated state
874 	 * to WA the Issue
875 	 */
876 
877 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
878 	if (use_semaphore) {
879 		for (j = 0; j < adev->usec_timeout; j++) {
880 			/* a read return value of 1 means semaphore acquire */
881 			if (vmhub >= AMDGPU_MMHUB0(0))
882 				tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, GET_INST(GC, inst));
883 			else
884 				tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, GET_INST(GC, inst));
885 			if (tmp & 0x1)
886 				break;
887 			udelay(1);
888 		}
889 
890 		if (j >= adev->usec_timeout)
891 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
892 	}
893 
894 	if (vmhub >= AMDGPU_MMHUB0(0))
895 		WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, GET_INST(GC, inst));
896 	else
897 		WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, GET_INST(GC, inst));
898 
899 	/*
900 	 * Issue a dummy read to wait for the ACK register to
901 	 * be cleared to avoid a false ACK due to the new fast
902 	 * GRBM interface.
903 	 */
904 	if ((vmhub == AMDGPU_GFXHUB(0)) &&
905 	    (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
906 		RREG32_NO_KIQ(req);
907 
908 	for (j = 0; j < adev->usec_timeout; j++) {
909 		if (vmhub >= AMDGPU_MMHUB0(0))
910 			tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, GET_INST(GC, inst));
911 		else
912 			tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, GET_INST(GC, inst));
913 		if (tmp & (1 << vmid))
914 			break;
915 		udelay(1);
916 	}
917 
918 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
919 	if (use_semaphore) {
920 		/*
921 		 * add semaphore release after invalidation,
922 		 * write with 0 means semaphore release
923 		 */
924 		if (vmhub >= AMDGPU_MMHUB0(0))
925 			WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, GET_INST(GC, inst));
926 		else
927 			WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, GET_INST(GC, inst));
928 	}
929 
930 	spin_unlock(&adev->gmc.invalidate_lock);
931 
932 	if (j < adev->usec_timeout)
933 		return;
934 
935 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
936 }
937 
938 /**
939  * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
940  *
941  * @adev: amdgpu_device pointer
942  * @pasid: pasid to be flush
943  * @flush_type: the flush type
944  * @all_hub: flush all hubs
945  * @inst: is used to select which instance of KIQ to use for the invalidation
946  *
947  * Flush the TLB for the requested pasid.
948  */
gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub,uint32_t inst)949 static void gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
950 					 uint16_t pasid, uint32_t flush_type,
951 					 bool all_hub, uint32_t inst)
952 {
953 	uint16_t queried;
954 	int i, vmid;
955 
956 	for (vmid = 1; vmid < 16; vmid++) {
957 		bool valid;
958 
959 		valid = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
960 								 &queried);
961 		if (!valid || queried != pasid)
962 			continue;
963 
964 		if (all_hub) {
965 			for_each_set_bit(i, adev->vmhubs_mask,
966 					 AMDGPU_MAX_VMHUBS)
967 				gmc_v9_0_flush_gpu_tlb(adev, vmid, i,
968 						       flush_type);
969 		} else {
970 			gmc_v9_0_flush_gpu_tlb(adev, vmid,
971 					       AMDGPU_GFXHUB(0),
972 					       flush_type);
973 		}
974 	}
975 }
976 
gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)977 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
978 					    unsigned int vmid, uint64_t pd_addr)
979 {
980 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
981 	struct amdgpu_device *adev = ring->adev;
982 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
983 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
984 	unsigned int eng = ring->vm_inv_eng;
985 
986 	/*
987 	 * It may lose gpuvm invalidate acknowldege state across power-gating
988 	 * off cycle, add semaphore acquire before invalidation and semaphore
989 	 * release after invalidation to avoid entering power gated state
990 	 * to WA the Issue
991 	 */
992 
993 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
994 	if (use_semaphore)
995 		/* a read return value of 1 means semaphore acuqire */
996 		amdgpu_ring_emit_reg_wait(ring,
997 					  hub->vm_inv_eng0_sem +
998 					  hub->eng_distance * eng, 0x1, 0x1);
999 
1000 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
1001 			      (hub->ctx_addr_distance * vmid),
1002 			      lower_32_bits(pd_addr));
1003 
1004 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
1005 			      (hub->ctx_addr_distance * vmid),
1006 			      upper_32_bits(pd_addr));
1007 
1008 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
1009 					    hub->eng_distance * eng,
1010 					    hub->vm_inv_eng0_ack +
1011 					    hub->eng_distance * eng,
1012 					    req, 1 << vmid);
1013 
1014 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
1015 	if (use_semaphore)
1016 		/*
1017 		 * add semaphore release after invalidation,
1018 		 * write with 0 means semaphore release
1019 		 */
1020 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
1021 				      hub->eng_distance * eng, 0);
1022 
1023 	return pd_addr;
1024 }
1025 
gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned int vmid,unsigned int pasid)1026 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
1027 					unsigned int pasid)
1028 {
1029 	struct amdgpu_device *adev = ring->adev;
1030 	uint32_t reg;
1031 
1032 	/* Do nothing because there's no lut register for mmhub1. */
1033 	if (ring->vm_hub == AMDGPU_MMHUB1(0))
1034 		return;
1035 
1036 	if (ring->vm_hub == AMDGPU_GFXHUB(0))
1037 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
1038 	else
1039 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
1040 
1041 	amdgpu_ring_emit_wreg(ring, reg, pasid);
1042 }
1043 
1044 /*
1045  * PTE format on VEGA 10:
1046  * 63:59 reserved
1047  * 58:57 mtype
1048  * 56 F
1049  * 55 L
1050  * 54 P
1051  * 53 SW
1052  * 52 T
1053  * 50:48 reserved
1054  * 47:12 4k physical page base address
1055  * 11:7 fragment
1056  * 6 write
1057  * 5 read
1058  * 4 exe
1059  * 3 Z
1060  * 2 snooped
1061  * 1 system
1062  * 0 valid
1063  *
1064  * PDE format on VEGA 10:
1065  * 63:59 block fragment size
1066  * 58:55 reserved
1067  * 54 P
1068  * 53:48 reserved
1069  * 47:6 physical base address of PD or PTE
1070  * 5:3 reserved
1071  * 2 C
1072  * 1 system
1073  * 0 valid
1074  */
1075 
gmc_v9_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)1076 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1077 
1078 {
1079 	switch (flags) {
1080 	case AMDGPU_VM_MTYPE_DEFAULT:
1081 		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1082 	case AMDGPU_VM_MTYPE_NC:
1083 		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1084 	case AMDGPU_VM_MTYPE_WC:
1085 		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_WC);
1086 	case AMDGPU_VM_MTYPE_RW:
1087 		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_RW);
1088 	case AMDGPU_VM_MTYPE_CC:
1089 		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_CC);
1090 	case AMDGPU_VM_MTYPE_UC:
1091 		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC);
1092 	default:
1093 		return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1094 	}
1095 }
1096 
gmc_v9_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)1097 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1098 				uint64_t *addr, uint64_t *flags)
1099 {
1100 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1101 		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1102 	BUG_ON(*addr & 0xFFFF00000000003FULL);
1103 
1104 	if (!adev->gmc.translate_further)
1105 		return;
1106 
1107 	if (level == AMDGPU_VM_PDB1) {
1108 		/* Set the block fragment size */
1109 		if (!(*flags & AMDGPU_PDE_PTE))
1110 			*flags |= AMDGPU_PDE_BFS(0x9);
1111 
1112 	} else if (level == AMDGPU_VM_PDB0) {
1113 		if (*flags & AMDGPU_PDE_PTE) {
1114 			*flags &= ~AMDGPU_PDE_PTE;
1115 			if (!(*flags & AMDGPU_PTE_VALID))
1116 				*addr |= 1 << PAGE_SHIFT;
1117 		} else {
1118 			*flags |= AMDGPU_PTE_TF;
1119 		}
1120 	}
1121 }
1122 
gmc_v9_0_get_coherence_flags(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo,uint64_t * flags)1123 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
1124 					 struct amdgpu_vm *vm,
1125 					 struct amdgpu_bo *bo,
1126 					 uint64_t *flags)
1127 {
1128 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1129 	bool is_vram = bo->tbo.resource &&
1130 		bo->tbo.resource->mem_type == TTM_PL_VRAM;
1131 	bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
1132 				     AMDGPU_GEM_CREATE_EXT_COHERENT);
1133 	bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
1134 	bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
1135 	unsigned int mtype_local, mtype;
1136 	uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
1137 	bool snoop = false;
1138 	bool is_local;
1139 
1140 	dma_resv_assert_held(bo->tbo.base.resv);
1141 
1142 	switch (gc_ip_version) {
1143 	case IP_VERSION(9, 4, 1):
1144 	case IP_VERSION(9, 4, 2):
1145 		if (is_vram) {
1146 			if (bo_adev == adev) {
1147 				if (uncached)
1148 					mtype = MTYPE_UC;
1149 				else if (coherent)
1150 					mtype = MTYPE_CC;
1151 				else
1152 					mtype = MTYPE_RW;
1153 				/* FIXME: is this still needed? Or does
1154 				 * amdgpu_ttm_tt_pde_flags already handle this?
1155 				 */
1156 				if (gc_ip_version == IP_VERSION(9, 4, 2) &&
1157 				    adev->gmc.xgmi.connected_to_cpu)
1158 					snoop = true;
1159 			} else {
1160 				if (uncached || coherent)
1161 					mtype = MTYPE_UC;
1162 				else
1163 					mtype = MTYPE_NC;
1164 				if (amdgpu_xgmi_same_hive(adev, bo_adev))
1165 					snoop = true;
1166 			}
1167 		} else {
1168 			if (uncached || coherent)
1169 				mtype = MTYPE_UC;
1170 			else
1171 				mtype = MTYPE_NC;
1172 			/* FIXME: is this still needed? Or does
1173 			 * amdgpu_ttm_tt_pde_flags already handle this?
1174 			 */
1175 			snoop = true;
1176 		}
1177 		break;
1178 	case IP_VERSION(9, 4, 3):
1179 	case IP_VERSION(9, 4, 4):
1180 	case IP_VERSION(9, 5, 0):
1181 		/* Only local VRAM BOs or system memory on non-NUMA APUs
1182 		 * can be assumed to be local in their entirety. Choose
1183 		 * MTYPE_NC as safe fallback for all system memory BOs on
1184 		 * NUMA systems. Their MTYPE can be overridden per-page in
1185 		 * gmc_v9_0_override_vm_pte_flags.
1186 		 */
1187 		mtype_local = MTYPE_RW;
1188 		if (amdgpu_mtype_local == 1) {
1189 			DRM_INFO_ONCE("Using MTYPE_NC for local memory\n");
1190 			mtype_local = MTYPE_NC;
1191 		} else if (amdgpu_mtype_local == 2) {
1192 			DRM_INFO_ONCE("Using MTYPE_CC for local memory\n");
1193 			mtype_local = MTYPE_CC;
1194 		} else {
1195 			DRM_INFO_ONCE("Using MTYPE_RW for local memory\n");
1196 		}
1197 		is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
1198 			    num_possible_nodes() <= 1) ||
1199 			   (is_vram && adev == bo_adev &&
1200 			    KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
1201 		snoop = true;
1202 		if (uncached) {
1203 			mtype = MTYPE_UC;
1204 		} else if (ext_coherent) {
1205 			mtype = is_local ? MTYPE_CC : MTYPE_UC;
1206 		} else if (adev->flags & AMD_IS_APU) {
1207 			mtype = is_local ? mtype_local : MTYPE_NC;
1208 		} else {
1209 			/* dGPU */
1210 			if (is_local)
1211 				mtype = mtype_local;
1212 			else if (gc_ip_version < IP_VERSION(9, 5, 0) && !is_vram)
1213 				mtype = MTYPE_UC;
1214 			else
1215 				mtype = MTYPE_NC;
1216 		}
1217 
1218 		break;
1219 	default:
1220 		if (uncached || coherent)
1221 			mtype = MTYPE_UC;
1222 		else
1223 			mtype = MTYPE_NC;
1224 
1225 		/* FIXME: is this still needed? Or does
1226 		 * amdgpu_ttm_tt_pde_flags already handle this?
1227 		 */
1228 		if (!is_vram)
1229 			snoop = true;
1230 	}
1231 
1232 	if (mtype != MTYPE_NC)
1233 		*flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype);
1234 
1235 	*flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1236 }
1237 
gmc_v9_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)1238 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1239 				struct amdgpu_bo_va_mapping *mapping,
1240 				uint64_t *flags)
1241 {
1242 	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
1243 
1244 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
1245 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1246 
1247 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1248 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1249 
1250 	if (mapping->flags & AMDGPU_PTE_PRT) {
1251 		*flags |= AMDGPU_PTE_PRT;
1252 		*flags &= ~AMDGPU_PTE_VALID;
1253 	}
1254 
1255 	if ((*flags & AMDGPU_PTE_VALID) && bo)
1256 		gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo,
1257 					     flags);
1258 }
1259 
gmc_v9_0_override_vm_pte_flags(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t addr,uint64_t * flags)1260 static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
1261 					   struct amdgpu_vm *vm,
1262 					   uint64_t addr, uint64_t *flags)
1263 {
1264 	int local_node, nid;
1265 
1266 	/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
1267 	 * memory can use more efficient MTYPEs.
1268 	 */
1269 	if (!(adev->flags & AMD_IS_APU) ||
1270 	    amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))
1271 		return;
1272 
1273 	/* Only direct-mapped memory allows us to determine the NUMA node from
1274 	 * the DMA address.
1275 	 */
1276 	if (!adev->ram_is_direct_mapped) {
1277 		dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
1278 		return;
1279 	}
1280 
1281 	/* MTYPE_NC is the same default and can be overridden.
1282 	 * MTYPE_UC will be present if the memory is extended-coherent
1283 	 * and can also be overridden.
1284 	 */
1285 	if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1286 	    AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC) &&
1287 	    (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1288 	    AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC)) {
1289 		dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n");
1290 		return;
1291 	}
1292 
1293 	/* FIXME: Only supported on native mode for now. For carve-out, the
1294 	 * NUMA affinity of the GPU/VM needs to come from the PCI info because
1295 	 * memory partitions are not associated with different NUMA nodes.
1296 	 */
1297 	if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
1298 		local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
1299 	} else {
1300 		dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
1301 		return;
1302 	}
1303 
1304 	/* Only handle real RAM. Mappings of PCIe resources don't have struct
1305 	 * page or NUMA nodes.
1306 	 */
1307 	if (!page_is_ram(addr >> PAGE_SHIFT)) {
1308 		dev_dbg_ratelimited(adev->dev, "Page is not RAM.\n");
1309 		return;
1310 	}
1311 	nid = pfn_to_nid(addr >> PAGE_SHIFT);
1312 	dev_dbg_ratelimited(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
1313 			    vm->mem_id, local_node, nid);
1314 	if (nid == local_node) {
1315 		uint64_t old_flags = *flags;
1316 		if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) ==
1317 			AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC)) {
1318 			unsigned int mtype_local = MTYPE_RW;
1319 
1320 			if (amdgpu_mtype_local == 1)
1321 				mtype_local = MTYPE_NC;
1322 			else if (amdgpu_mtype_local == 2)
1323 				mtype_local = MTYPE_CC;
1324 
1325 			*flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local);
1326 		} else {
1327 			/* MTYPE_UC case */
1328 			*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
1329 		}
1330 
1331 		dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n",
1332 				    old_flags, *flags);
1333 	}
1334 }
1335 
gmc_v9_0_get_vbios_fb_size(struct amdgpu_device * adev)1336 static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1337 {
1338 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1339 	unsigned int size;
1340 
1341 	/* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1342 
1343 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1344 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
1345 	} else {
1346 		u32 viewport;
1347 
1348 		switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1349 		case IP_VERSION(1, 0, 0):
1350 		case IP_VERSION(1, 0, 1):
1351 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1352 			size = (REG_GET_FIELD(viewport,
1353 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1354 				REG_GET_FIELD(viewport,
1355 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1356 				4);
1357 			break;
1358 		case IP_VERSION(2, 1, 0):
1359 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1360 			size = (REG_GET_FIELD(viewport,
1361 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1362 				REG_GET_FIELD(viewport,
1363 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1364 				4);
1365 			break;
1366 		default:
1367 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1368 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1369 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1370 				4);
1371 			break;
1372 		}
1373 	}
1374 
1375 	return size;
1376 }
1377 
gmc_v9_0_need_reset_on_init(struct amdgpu_device * adev)1378 static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
1379 {
1380 	if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested &&
1381 	    adev->nbio.funcs->is_nps_switch_requested(adev)) {
1382 		adev->gmc.reset_flags |= AMDGPU_GMC_INIT_RESET_NPS;
1383 		return true;
1384 	}
1385 
1386 	return false;
1387 }
1388 
1389 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1390 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1391 	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1392 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1393 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1394 	.map_mtype = gmc_v9_0_map_mtype,
1395 	.get_vm_pde = gmc_v9_0_get_vm_pde,
1396 	.get_vm_pte = gmc_v9_0_get_vm_pte,
1397 	.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
1398 	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1399 	.query_mem_partition_mode = &amdgpu_gmc_query_memory_partition,
1400 	.request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
1401 	.need_reset_on_init = &gmc_v9_0_need_reset_on_init,
1402 };
1403 
gmc_v9_0_set_gmc_funcs(struct amdgpu_device * adev)1404 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1405 {
1406 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1407 }
1408 
gmc_v9_0_set_umc_funcs(struct amdgpu_device * adev)1409 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1410 {
1411 	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1412 	case IP_VERSION(6, 0, 0):
1413 		adev->umc.funcs = &umc_v6_0_funcs;
1414 		break;
1415 	case IP_VERSION(6, 1, 1):
1416 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1417 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1418 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1419 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1420 		adev->umc.retire_unit = 1;
1421 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1422 		adev->umc.ras = &umc_v6_1_ras;
1423 		break;
1424 	case IP_VERSION(6, 1, 2):
1425 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1426 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1427 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1428 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1429 		adev->umc.retire_unit = 1;
1430 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1431 		adev->umc.ras = &umc_v6_1_ras;
1432 		break;
1433 	case IP_VERSION(6, 7, 0):
1434 		adev->umc.max_ras_err_cnt_per_query =
1435 			UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1436 		adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1437 		adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1438 		adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1439 		adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
1440 		if (!adev->gmc.xgmi.connected_to_cpu)
1441 			adev->umc.ras = &umc_v6_7_ras;
1442 		if (1 & adev->smuio.funcs->get_die_id(adev))
1443 			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1444 		else
1445 			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1446 		break;
1447 	case IP_VERSION(12, 0, 0):
1448 	case IP_VERSION(12, 5, 0):
1449 		adev->umc.max_ras_err_cnt_per_query =
1450 			UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
1451 		adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM;
1452 		adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
1453 		adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
1454 		adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
1455 		if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1456 			adev->umc.ras = &umc_v12_0_ras;
1457 		break;
1458 	default:
1459 		break;
1460 	}
1461 }
1462 
gmc_v9_0_set_mmhub_funcs(struct amdgpu_device * adev)1463 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1464 {
1465 	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1466 	case IP_VERSION(9, 4, 1):
1467 		adev->mmhub.funcs = &mmhub_v9_4_funcs;
1468 		break;
1469 	case IP_VERSION(9, 4, 2):
1470 		adev->mmhub.funcs = &mmhub_v1_7_funcs;
1471 		break;
1472 	case IP_VERSION(1, 8, 0):
1473 	case IP_VERSION(1, 8, 1):
1474 		adev->mmhub.funcs = &mmhub_v1_8_funcs;
1475 		break;
1476 	default:
1477 		adev->mmhub.funcs = &mmhub_v1_0_funcs;
1478 		break;
1479 	}
1480 }
1481 
gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device * adev)1482 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1483 {
1484 	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1485 	case IP_VERSION(9, 4, 0):
1486 		adev->mmhub.ras = &mmhub_v1_0_ras;
1487 		break;
1488 	case IP_VERSION(9, 4, 1):
1489 		adev->mmhub.ras = &mmhub_v9_4_ras;
1490 		break;
1491 	case IP_VERSION(9, 4, 2):
1492 		adev->mmhub.ras = &mmhub_v1_7_ras;
1493 		break;
1494 	case IP_VERSION(1, 8, 0):
1495 	case IP_VERSION(1, 8, 1):
1496 		adev->mmhub.ras = &mmhub_v1_8_ras;
1497 		break;
1498 	default:
1499 		/* mmhub ras is not available */
1500 		break;
1501 	}
1502 }
1503 
gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device * adev)1504 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1505 {
1506 	if (amdgpu_is_multi_aid(adev))
1507 		adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
1508 	else
1509 		adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1510 }
1511 
gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device * adev)1512 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1513 {
1514 	adev->hdp.ras = &hdp_v4_0_ras;
1515 }
1516 
gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device * adev)1517 static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
1518 {
1519 	struct amdgpu_mca *mca = &adev->mca;
1520 
1521 	/* is UMC the right IP to check for MCA?  Maybe DF? */
1522 	switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1523 	case IP_VERSION(6, 7, 0):
1524 		if (!adev->gmc.xgmi.connected_to_cpu) {
1525 			mca->mp0.ras = &mca_v3_0_mp0_ras;
1526 			mca->mp1.ras = &mca_v3_0_mp1_ras;
1527 			mca->mpio.ras = &mca_v3_0_mpio_ras;
1528 		}
1529 		break;
1530 	default:
1531 		break;
1532 	}
1533 }
1534 
gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device * adev)1535 static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
1536 {
1537 	if (!adev->gmc.xgmi.connected_to_cpu)
1538 		adev->gmc.xgmi.ras = &xgmi_ras;
1539 }
1540 
gmc_v9_0_init_nps_details(struct amdgpu_device * adev)1541 static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
1542 {
1543 	enum amdgpu_memory_partition mode;
1544 	uint32_t supp_modes;
1545 	int i;
1546 
1547 	adev->gmc.supported_nps_modes = 0;
1548 
1549 	if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
1550 		return;
1551 
1552 	mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
1553 
1554 	/* Mode detected by hardware and supported modes available */
1555 	if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) {
1556 		while ((i = ffs(supp_modes))) {
1557 			if (AMDGPU_ALL_NPS_MASK & BIT(i))
1558 				adev->gmc.supported_nps_modes |= BIT(i);
1559 			supp_modes &= supp_modes - 1;
1560 		}
1561 	} else {
1562 		/*TODO: Check PSP version also which supports NPS switch. Otherwise keep
1563 	 * supported modes as 0.
1564 	 */
1565 		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1566 		case IP_VERSION(9, 4, 3):
1567 		case IP_VERSION(9, 4, 4):
1568 			adev->gmc.supported_nps_modes =
1569 				BIT(AMDGPU_NPS1_PARTITION_MODE) |
1570 				BIT(AMDGPU_NPS4_PARTITION_MODE);
1571 			break;
1572 		default:
1573 			break;
1574 		}
1575 	}
1576 }
1577 
gmc_v9_0_early_init(struct amdgpu_ip_block * ip_block)1578 static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
1579 {
1580 	struct amdgpu_device *adev = ip_block->adev;
1581 
1582 	/*
1583 	 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
1584 	 * in their IP discovery tables
1585 	 */
1586 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
1587 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
1588 	    amdgpu_is_multi_aid(adev))
1589 		adev->gmc.xgmi.supported = true;
1590 
1591 	if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
1592 		adev->gmc.xgmi.supported = true;
1593 		adev->gmc.xgmi.connected_to_cpu =
1594 			adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1595 	}
1596 
1597 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
1598 		enum amdgpu_pkg_type pkg_type =
1599 			adev->smuio.funcs->get_pkg_type(adev);
1600 		/* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
1601 		 * and the APU, can be in used two possible modes:
1602 		 *  - carveout mode
1603 		 *  - native APU mode
1604 		 * "is_app_apu" can be used to identify the APU in the native
1605 		 * mode.
1606 		 */
1607 		adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU &&
1608 					!pci_resource_len(adev->pdev, 0));
1609 	}
1610 
1611 	gmc_v9_0_set_gmc_funcs(adev);
1612 	gmc_v9_0_set_irq_funcs(adev);
1613 	gmc_v9_0_set_umc_funcs(adev);
1614 	gmc_v9_0_set_mmhub_funcs(adev);
1615 	gmc_v9_0_set_mmhub_ras_funcs(adev);
1616 	gmc_v9_0_set_gfxhub_funcs(adev);
1617 	gmc_v9_0_set_hdp_ras_funcs(adev);
1618 	gmc_v9_0_set_mca_ras_funcs(adev);
1619 	gmc_v9_0_set_xgmi_ras_funcs(adev);
1620 
1621 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1622 	adev->gmc.shared_aperture_end =
1623 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1624 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1625 	adev->gmc.private_aperture_end =
1626 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1627 	adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
1628 
1629 	return 0;
1630 }
1631 
gmc_v9_0_late_init(struct amdgpu_ip_block * ip_block)1632 static int gmc_v9_0_late_init(struct amdgpu_ip_block *ip_block)
1633 {
1634 	struct amdgpu_device *adev = ip_block->adev;
1635 	int r;
1636 
1637 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1638 	if (r)
1639 		return r;
1640 
1641 	/*
1642 	 * Workaround performance drop issue with VBIOS enables partial
1643 	 * writes, while disables HBM ECC for vega10.
1644 	 */
1645 	if (!amdgpu_sriov_vf(adev) &&
1646 	    (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) {
1647 		if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1648 			if (adev->df.funcs &&
1649 			    adev->df.funcs->enable_ecc_force_par_wr_rmw)
1650 				adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1651 		}
1652 	}
1653 
1654 	if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1655 		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
1656 		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP);
1657 	}
1658 
1659 	r = amdgpu_gmc_ras_late_init(adev);
1660 	if (r)
1661 		return r;
1662 
1663 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1664 }
1665 
gmc_v9_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)1666 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1667 					struct amdgpu_gmc *mc)
1668 {
1669 	u64 base = adev->mmhub.funcs->get_fb_location(adev);
1670 
1671 	amdgpu_gmc_set_agp_default(adev, mc);
1672 
1673 	/* add the xgmi offset of the physical node */
1674 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1675 	if (amdgpu_gmc_is_pdb0_enabled(adev)) {
1676 		amdgpu_gmc_sysvm_location(adev, mc);
1677 	} else {
1678 		amdgpu_gmc_vram_location(adev, mc, base);
1679 		amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
1680 		if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
1681 			amdgpu_gmc_agp_location(adev, mc);
1682 	}
1683 	/* base offset of vram pages */
1684 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1685 
1686 	/* XXX: add the xgmi offset of the physical node? */
1687 	adev->vm_manager.vram_base_offset +=
1688 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1689 }
1690 
1691 /**
1692  * gmc_v9_0_mc_init - initialize the memory controller driver params
1693  *
1694  * @adev: amdgpu_device pointer
1695  *
1696  * Look up the amount of vram, vram width, and decide how to place
1697  * vram and gart within the GPU's physical address space.
1698  * Returns 0 for success.
1699  */
gmc_v9_0_mc_init(struct amdgpu_device * adev)1700 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1701 {
1702 	int r;
1703 
1704 	/* size in MB on si */
1705 	if (!adev->gmc.is_app_apu) {
1706 		adev->gmc.mc_vram_size =
1707 			adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1708 	} else {
1709 		DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n");
1710 		adev->gmc.mc_vram_size = 0;
1711 	}
1712 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1713 
1714 	if (!(adev->flags & AMD_IS_APU) &&
1715 	    !adev->gmc.xgmi.connected_to_cpu) {
1716 		r = amdgpu_device_resize_fb_bar(adev);
1717 		if (r)
1718 			return r;
1719 	}
1720 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1721 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1722 
1723 #ifdef CONFIG_X86_64
1724 	/*
1725 	 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1726 	 * interface can use VRAM through here as it appears system reserved
1727 	 * memory in host address space.
1728 	 *
1729 	 * For APUs, VRAM is just the stolen system memory and can be accessed
1730 	 * directly.
1731 	 *
1732 	 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1733 	 */
1734 
1735 	/* check whether both host-gpu and gpu-gpu xgmi links exist */
1736 	if ((!amdgpu_sriov_vf(adev) &&
1737 		(adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
1738 	    (adev->gmc.xgmi.supported &&
1739 	     adev->gmc.xgmi.connected_to_cpu)) {
1740 		adev->gmc.aper_base =
1741 			adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1742 			adev->gmc.xgmi.physical_node_id *
1743 			adev->gmc.xgmi.node_segment_size;
1744 		adev->gmc.aper_size = adev->gmc.real_vram_size;
1745 	}
1746 
1747 #endif
1748 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
1749 
1750 	/* set the gart size */
1751 	if (amdgpu_gart_size == -1) {
1752 		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1753 		case IP_VERSION(9, 0, 1):  /* all engines support GPUVM */
1754 		case IP_VERSION(9, 2, 1):  /* all engines support GPUVM */
1755 		case IP_VERSION(9, 4, 0):
1756 		case IP_VERSION(9, 4, 1):
1757 		case IP_VERSION(9, 4, 2):
1758 		case IP_VERSION(9, 4, 3):
1759 		case IP_VERSION(9, 4, 4):
1760 		case IP_VERSION(9, 5, 0):
1761 		default:
1762 			adev->gmc.gart_size = 512ULL << 20;
1763 			break;
1764 		case IP_VERSION(9, 1, 0):   /* DCE SG support */
1765 		case IP_VERSION(9, 2, 2):   /* DCE SG support */
1766 		case IP_VERSION(9, 3, 0):
1767 			adev->gmc.gart_size = 1024ULL << 20;
1768 			break;
1769 		}
1770 	} else {
1771 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1772 	}
1773 
1774 	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1775 
1776 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1777 
1778 	return 0;
1779 }
1780 
gmc_v9_0_gart_init(struct amdgpu_device * adev)1781 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1782 {
1783 	int r;
1784 
1785 	if (adev->gart.bo) {
1786 		WARN(1, "VEGA10 PCIE GART already initialized\n");
1787 		return 0;
1788 	}
1789 
1790 	if (amdgpu_gmc_is_pdb0_enabled(adev)) {
1791 		adev->gmc.vmid0_page_table_depth = 1;
1792 		adev->gmc.vmid0_page_table_block_size = 12;
1793 	} else {
1794 		adev->gmc.vmid0_page_table_depth = 0;
1795 		adev->gmc.vmid0_page_table_block_size = 0;
1796 	}
1797 
1798 	/* Initialize common gart structure */
1799 	r = amdgpu_gart_init(adev);
1800 	if (r)
1801 		return r;
1802 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1803 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC) |
1804 				 AMDGPU_PTE_EXECUTABLE;
1805 
1806 	if (!adev->gmc.real_vram_size) {
1807 		dev_info(adev->dev, "Put GART in system memory for APU\n");
1808 		r = amdgpu_gart_table_ram_alloc(adev);
1809 		if (r)
1810 			dev_err(adev->dev, "Failed to allocate GART in system memory\n");
1811 	} else {
1812 		r = amdgpu_gart_table_vram_alloc(adev);
1813 		if (r)
1814 			return r;
1815 
1816 		if (amdgpu_gmc_is_pdb0_enabled(adev))
1817 			r = amdgpu_gmc_pdb0_alloc(adev);
1818 	}
1819 
1820 	return r;
1821 }
1822 
1823 /**
1824  * gmc_v9_0_save_registers - saves regs
1825  *
1826  * @adev: amdgpu_device pointer
1827  *
1828  * This saves potential register values that should be
1829  * restored upon resume
1830  */
gmc_v9_0_save_registers(struct amdgpu_device * adev)1831 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1832 {
1833 	if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1834 	    (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1)))
1835 		adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1836 }
1837 
gmc_v9_4_3_init_vram_info(struct amdgpu_device * adev)1838 static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
1839 {
1840 	adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
1841 	adev->gmc.vram_width = 128 * 64;
1842 
1843 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
1844 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
1845 }
1846 
gmc_v9_0_sw_init(struct amdgpu_ip_block * ip_block)1847 static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
1848 {
1849 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
1850 	struct amdgpu_device *adev = ip_block->adev;
1851 	unsigned long inst_mask = adev->aid_mask;
1852 
1853 	adev->gfxhub.funcs->init(adev);
1854 
1855 	adev->mmhub.funcs->init(adev);
1856 
1857 	spin_lock_init(&adev->gmc.invalidate_lock);
1858 
1859 	if (amdgpu_is_multi_aid(adev)) {
1860 		gmc_v9_4_3_init_vram_info(adev);
1861 	} else if (!adev->bios) {
1862 		if (adev->flags & AMD_IS_APU) {
1863 			adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
1864 			adev->gmc.vram_width = 64 * 64;
1865 		} else {
1866 			adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
1867 			adev->gmc.vram_width = 128 * 64;
1868 		}
1869 	} else {
1870 		r = amdgpu_atomfirmware_get_vram_info(adev,
1871 			&vram_width, &vram_type, &vram_vendor);
1872 		if (amdgpu_sriov_vf(adev))
1873 			/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1874 			 * and DF related registers is not readable, seems hardcord is the
1875 			 * only way to set the correct vram_width
1876 			 */
1877 			adev->gmc.vram_width = 2048;
1878 		else if (amdgpu_emu_mode != 1)
1879 			adev->gmc.vram_width = vram_width;
1880 
1881 		if (!adev->gmc.vram_width) {
1882 			int chansize, numchan;
1883 
1884 			/* hbm memory channel size */
1885 			if (adev->flags & AMD_IS_APU)
1886 				chansize = 64;
1887 			else
1888 				chansize = 128;
1889 			if (adev->df.funcs &&
1890 			    adev->df.funcs->get_hbm_channel_number) {
1891 				numchan = adev->df.funcs->get_hbm_channel_number(adev);
1892 				adev->gmc.vram_width = numchan * chansize;
1893 			}
1894 		}
1895 
1896 		adev->gmc.vram_type = vram_type;
1897 		adev->gmc.vram_vendor = vram_vendor;
1898 	}
1899 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1900 	case IP_VERSION(9, 1, 0):
1901 	case IP_VERSION(9, 2, 2):
1902 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
1903 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
1904 
1905 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1906 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1907 		} else {
1908 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
1909 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1910 			adev->gmc.translate_further =
1911 				adev->vm_manager.num_level > 1;
1912 		}
1913 		break;
1914 	case IP_VERSION(9, 0, 1):
1915 	case IP_VERSION(9, 2, 1):
1916 	case IP_VERSION(9, 4, 0):
1917 	case IP_VERSION(9, 3, 0):
1918 	case IP_VERSION(9, 4, 2):
1919 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
1920 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
1921 
1922 		/*
1923 		 * To fulfill 4-level page support,
1924 		 * vm size is 256TB (48bit), maximum size of Vega10,
1925 		 * block size 512 (9bit)
1926 		 */
1927 
1928 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1929 		if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
1930 			adev->gmc.translate_further = adev->vm_manager.num_level > 1;
1931 		break;
1932 	case IP_VERSION(9, 4, 1):
1933 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
1934 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
1935 		set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask);
1936 
1937 		/* Keep the vm size same with Vega20 */
1938 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1939 		adev->gmc.translate_further = adev->vm_manager.num_level > 1;
1940 		break;
1941 	case IP_VERSION(9, 4, 3):
1942 	case IP_VERSION(9, 4, 4):
1943 	case IP_VERSION(9, 5, 0):
1944 		bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
1945 				  NUM_XCC(adev->gfx.xcc_mask));
1946 
1947 		inst_mask <<= AMDGPU_MMHUB0(0);
1948 		bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
1949 
1950 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1951 		adev->gmc.translate_further = adev->vm_manager.num_level > 1;
1952 		break;
1953 	default:
1954 		break;
1955 	}
1956 
1957 	/* This interrupt is VMC page fault.*/
1958 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1959 				&adev->gmc.vm_fault);
1960 	if (r)
1961 		return r;
1962 
1963 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
1964 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1965 					&adev->gmc.vm_fault);
1966 		if (r)
1967 			return r;
1968 	}
1969 
1970 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1971 				&adev->gmc.vm_fault);
1972 
1973 	if (r)
1974 		return r;
1975 
1976 	if (!amdgpu_sriov_vf(adev) &&
1977 	    !adev->gmc.xgmi.connected_to_cpu &&
1978 	    !adev->gmc.is_app_apu) {
1979 		/* interrupt sent to DF. */
1980 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1981 				      &adev->gmc.ecc_irq);
1982 		if (r)
1983 			return r;
1984 	}
1985 
1986 	/* Set the internal MC address mask
1987 	 * This is the max address of the GPU's
1988 	 * internal address space.
1989 	 */
1990 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1991 
1992 	dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >=
1993 					IP_VERSION(9, 4, 2) ?
1994 				48 :
1995 				44;
1996 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
1997 	if (r) {
1998 		dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
1999 		return r;
2000 	}
2001 	adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
2002 
2003 	r = gmc_v9_0_mc_init(adev);
2004 	if (r)
2005 		return r;
2006 
2007 	amdgpu_gmc_get_vbios_allocations(adev);
2008 
2009 	if (amdgpu_is_multi_aid(adev)) {
2010 		r = amdgpu_gmc_init_mem_ranges(adev);
2011 		if (r)
2012 			return r;
2013 	}
2014 
2015 	/* Memory manager */
2016 	r = amdgpu_bo_init(adev);
2017 	if (r)
2018 		return r;
2019 
2020 	r = gmc_v9_0_gart_init(adev);
2021 	if (r)
2022 		return r;
2023 
2024 	gmc_v9_0_init_nps_details(adev);
2025 	/*
2026 	 * number of VMs
2027 	 * VMID 0 is reserved for System
2028 	 * amdgpu graphics/compute will use VMIDs 1..n-1
2029 	 * amdkfd will use VMIDs n..15
2030 	 *
2031 	 * The first KFD VMID is 8 for GPUs with graphics, 3 for
2032 	 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
2033 	 * for video processing.
2034 	 */
2035 	adev->vm_manager.first_kfd_vmid =
2036 		(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
2037 		 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
2038 		 amdgpu_is_multi_aid(adev)) ?
2039 			3 :
2040 			8;
2041 
2042 	amdgpu_vm_manager_init(adev);
2043 
2044 	gmc_v9_0_save_registers(adev);
2045 
2046 	r = amdgpu_gmc_ras_sw_init(adev);
2047 	if (r)
2048 		return r;
2049 
2050 	if (amdgpu_is_multi_aid(adev))
2051 		amdgpu_gmc_sysfs_init(adev);
2052 
2053 	return 0;
2054 }
2055 
gmc_v9_0_sw_fini(struct amdgpu_ip_block * ip_block)2056 static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
2057 {
2058 	struct amdgpu_device *adev = ip_block->adev;
2059 
2060 	if (amdgpu_is_multi_aid(adev))
2061 		amdgpu_gmc_sysfs_fini(adev);
2062 
2063 	amdgpu_gmc_ras_fini(adev);
2064 	amdgpu_gem_force_release(adev);
2065 	amdgpu_vm_manager_fini(adev);
2066 	if (!adev->gmc.real_vram_size) {
2067 		dev_info(adev->dev, "Put GART in system memory for APU free\n");
2068 		amdgpu_gart_table_ram_free(adev);
2069 	} else {
2070 		amdgpu_gart_table_vram_free(adev);
2071 	}
2072 	amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
2073 	amdgpu_bo_fini(adev);
2074 
2075 	adev->gmc.num_mem_partitions = 0;
2076 	kfree(adev->gmc.mem_partitions);
2077 
2078 	return 0;
2079 }
2080 
gmc_v9_0_init_golden_registers(struct amdgpu_device * adev)2081 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
2082 {
2083 	switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
2084 	case IP_VERSION(9, 0, 0):
2085 		if (amdgpu_sriov_vf(adev))
2086 			break;
2087 		fallthrough;
2088 	case IP_VERSION(9, 4, 0):
2089 		soc15_program_register_sequence(adev,
2090 						golden_settings_mmhub_1_0_0,
2091 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
2092 		soc15_program_register_sequence(adev,
2093 						golden_settings_athub_1_0_0,
2094 						ARRAY_SIZE(golden_settings_athub_1_0_0));
2095 		break;
2096 	case IP_VERSION(9, 1, 0):
2097 	case IP_VERSION(9, 2, 0):
2098 		/* TODO for renoir */
2099 		soc15_program_register_sequence(adev,
2100 						golden_settings_athub_1_0_0,
2101 						ARRAY_SIZE(golden_settings_athub_1_0_0));
2102 		break;
2103 	default:
2104 		break;
2105 	}
2106 }
2107 
2108 /**
2109  * gmc_v9_0_restore_registers - restores regs
2110  *
2111  * @adev: amdgpu_device pointer
2112  *
2113  * This restores register values, saved at suspend.
2114  */
gmc_v9_0_restore_registers(struct amdgpu_device * adev)2115 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
2116 {
2117 	if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
2118 	    (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) {
2119 		WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
2120 		WARN_ON(adev->gmc.sdpif_register !=
2121 			RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
2122 	}
2123 }
2124 
2125 /**
2126  * gmc_v9_0_gart_enable - gart enable
2127  *
2128  * @adev: amdgpu_device pointer
2129  */
gmc_v9_0_gart_enable(struct amdgpu_device * adev)2130 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
2131 {
2132 	int r;
2133 
2134 	if (amdgpu_gmc_is_pdb0_enabled(adev))
2135 		amdgpu_gmc_init_pdb0(adev);
2136 
2137 	if (adev->gart.bo == NULL) {
2138 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
2139 		return -EINVAL;
2140 	}
2141 
2142 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
2143 
2144 	if (!adev->in_s0ix) {
2145 		r = adev->gfxhub.funcs->gart_enable(adev);
2146 		if (r)
2147 			return r;
2148 	}
2149 
2150 	r = adev->mmhub.funcs->gart_enable(adev);
2151 	if (r)
2152 		return r;
2153 
2154 	DRM_INFO("PCIE GART of %uM enabled.\n",
2155 		 (unsigned int)(adev->gmc.gart_size >> 20));
2156 	if (adev->gmc.pdb0_bo)
2157 		DRM_INFO("PDB0 located at 0x%016llX\n",
2158 				(unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
2159 	DRM_INFO("PTB located at 0x%016llX\n",
2160 			(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
2161 
2162 	return 0;
2163 }
2164 
gmc_v9_0_hw_init(struct amdgpu_ip_block * ip_block)2165 static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
2166 {
2167 	struct amdgpu_device *adev = ip_block->adev;
2168 	bool value;
2169 	int i, r;
2170 
2171 	adev->gmc.flush_pasid_uses_kiq = true;
2172 
2173 	/* Vega20+XGMI caches PTEs in TC and TLB. Add a heavy-weight TLB flush
2174 	 * (type 2), which flushes both. Due to a race condition with
2175 	 * concurrent memory accesses using the same TLB cache line, we still
2176 	 * need a second TLB flush after this.
2177 	 */
2178 	adev->gmc.flush_tlb_needs_extra_type_2 =
2179 		amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
2180 		adev->gmc.xgmi.num_physical_nodes;
2181 
2182 	/* The sequence of these two function calls matters.*/
2183 	gmc_v9_0_init_golden_registers(adev);
2184 
2185 	if (adev->mode_info.num_crtc) {
2186 		/* Lockout access through VGA aperture*/
2187 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
2188 		/* disable VGA render */
2189 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
2190 	}
2191 
2192 	if (adev->mmhub.funcs->update_power_gating)
2193 		adev->mmhub.funcs->update_power_gating(adev, true);
2194 
2195 	adev->hdp.funcs->init_registers(adev);
2196 
2197 	/* After HDP is initialized, flush HDP.*/
2198 	amdgpu_device_flush_hdp(adev, NULL);
2199 
2200 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
2201 		value = false;
2202 	else
2203 		value = true;
2204 
2205 	if (!amdgpu_sriov_vf(adev)) {
2206 		if (!adev->in_s0ix)
2207 			adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2208 		adev->mmhub.funcs->set_fault_enable_default(adev, value);
2209 	}
2210 	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2211 		if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0)))
2212 			continue;
2213 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
2214 	}
2215 
2216 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
2217 		adev->umc.funcs->init_registers(adev);
2218 
2219 	r = gmc_v9_0_gart_enable(adev);
2220 	if (r)
2221 		return r;
2222 
2223 	if (amdgpu_emu_mode == 1)
2224 		return amdgpu_gmc_vram_checking(adev);
2225 
2226 	return 0;
2227 }
2228 
2229 /**
2230  * gmc_v9_0_gart_disable - gart disable
2231  *
2232  * @adev: amdgpu_device pointer
2233  *
2234  * This disables all VM page table.
2235  */
gmc_v9_0_gart_disable(struct amdgpu_device * adev)2236 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
2237 {
2238 	if (!adev->in_s0ix)
2239 		adev->gfxhub.funcs->gart_disable(adev);
2240 	adev->mmhub.funcs->gart_disable(adev);
2241 }
2242 
gmc_v9_0_hw_fini(struct amdgpu_ip_block * ip_block)2243 static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
2244 {
2245 	struct amdgpu_device *adev = ip_block->adev;
2246 
2247 	gmc_v9_0_gart_disable(adev);
2248 
2249 	if (amdgpu_sriov_vf(adev)) {
2250 		/* full access mode, so don't touch any GMC register */
2251 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
2252 		return 0;
2253 	}
2254 
2255 	/*
2256 	 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
2257 	 * a correct cached state for GMC. Otherwise, the "gate" again
2258 	 * operation on S3 resuming will fail due to wrong cached state.
2259 	 */
2260 	if (adev->mmhub.funcs->update_power_gating)
2261 		adev->mmhub.funcs->update_power_gating(adev, false);
2262 
2263 	/*
2264 	 * For minimal init, late_init is not called, hence VM fault/RAS irqs
2265 	 * are not enabled.
2266 	 */
2267 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
2268 		amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
2269 
2270 		if (adev->gmc.ecc_irq.funcs &&
2271 		    amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
2272 			amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
2273 	}
2274 
2275 	return 0;
2276 }
2277 
gmc_v9_0_suspend(struct amdgpu_ip_block * ip_block)2278 static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block)
2279 {
2280 	return gmc_v9_0_hw_fini(ip_block);
2281 }
2282 
gmc_v9_0_resume(struct amdgpu_ip_block * ip_block)2283 static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
2284 {
2285 	struct amdgpu_device *adev = ip_block->adev;
2286 	int r;
2287 
2288 	/* If a reset is done for NPS mode switch, read the memory range
2289 	 * information again.
2290 	 */
2291 	if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) {
2292 		amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
2293 		adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS;
2294 	}
2295 
2296 	r = gmc_v9_0_hw_init(ip_block);
2297 	if (r)
2298 		return r;
2299 
2300 	amdgpu_vmid_reset_all(ip_block->adev);
2301 
2302 	return 0;
2303 }
2304 
gmc_v9_0_is_idle(struct amdgpu_ip_block * ip_block)2305 static bool gmc_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
2306 {
2307 	/* MC is always ready in GMC v9.*/
2308 	return true;
2309 }
2310 
gmc_v9_0_wait_for_idle(struct amdgpu_ip_block * ip_block)2311 static int gmc_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
2312 {
2313 	/* There is no need to wait for MC idle in GMC v9.*/
2314 	return 0;
2315 }
2316 
gmc_v9_0_soft_reset(struct amdgpu_ip_block * ip_block)2317 static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
2318 {
2319 	/* XXX for emulation.*/
2320 	return 0;
2321 }
2322 
gmc_v9_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)2323 static int gmc_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2324 					enum amd_clockgating_state state)
2325 {
2326 	struct amdgpu_device *adev = ip_block->adev;
2327 
2328 	adev->mmhub.funcs->set_clockgating(adev, state);
2329 
2330 	athub_v1_0_set_clockgating(adev, state);
2331 
2332 	return 0;
2333 }
2334 
gmc_v9_0_get_clockgating_state(struct amdgpu_ip_block * ip_block,u64 * flags)2335 static void gmc_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
2336 {
2337 	struct amdgpu_device *adev = ip_block->adev;
2338 
2339 	adev->mmhub.funcs->get_clockgating(adev, flags);
2340 
2341 	athub_v1_0_get_clockgating(adev, flags);
2342 }
2343 
gmc_v9_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)2344 static int gmc_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
2345 					enum amd_powergating_state state)
2346 {
2347 	return 0;
2348 }
2349 
2350 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
2351 	.name = "gmc_v9_0",
2352 	.early_init = gmc_v9_0_early_init,
2353 	.late_init = gmc_v9_0_late_init,
2354 	.sw_init = gmc_v9_0_sw_init,
2355 	.sw_fini = gmc_v9_0_sw_fini,
2356 	.hw_init = gmc_v9_0_hw_init,
2357 	.hw_fini = gmc_v9_0_hw_fini,
2358 	.suspend = gmc_v9_0_suspend,
2359 	.resume = gmc_v9_0_resume,
2360 	.is_idle = gmc_v9_0_is_idle,
2361 	.wait_for_idle = gmc_v9_0_wait_for_idle,
2362 	.soft_reset = gmc_v9_0_soft_reset,
2363 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
2364 	.set_powergating_state = gmc_v9_0_set_powergating_state,
2365 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
2366 };
2367 
2368 const struct amdgpu_ip_block_version gmc_v9_0_ip_block = {
2369 	.type = AMD_IP_BLOCK_TYPE_GMC,
2370 	.major = 9,
2371 	.minor = 0,
2372 	.rev = 0,
2373 	.funcs = &gmc_v9_0_ip_funcs,
2374 };
2375