1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26
27 #include <drm/drm_cache.h>
28
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
42
43 #include "soc15.h"
44 #include "soc15d.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
47
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "gfxhub_v1_2.h"
53 #include "mmhub_v9_4.h"
54 #include "mmhub_v1_7.h"
55 #include "mmhub_v1_8.h"
56 #include "umc_v6_1.h"
57 #include "umc_v6_0.h"
58 #include "umc_v6_7.h"
59 #include "umc_v12_0.h"
60 #include "hdp_v4_0.h"
61 #include "mca_v3_0.h"
62
63 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
64
65 #include "amdgpu_ras.h"
66 #include "amdgpu_xgmi.h"
67
68 /* add these here since we already include dce12 headers and these are for DCN */
69 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
70 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
71 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
72 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
73 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
74 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
75 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
76 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
77
78 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
79 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
80
81 #define MAX_MEM_RANGES 8
82
83 static const char * const gfxhub_client_ids[] = {
84 "CB",
85 "DB",
86 "IA",
87 "WD",
88 "CPF",
89 "CPC",
90 "CPG",
91 "RLC",
92 "TCP",
93 "SQC (inst)",
94 "SQC (data)",
95 "SQG",
96 "PA",
97 };
98
99 static const char *mmhub_client_ids_raven[][2] = {
100 [0][0] = "MP1",
101 [1][0] = "MP0",
102 [2][0] = "VCN",
103 [3][0] = "VCNU",
104 [4][0] = "HDP",
105 [5][0] = "DCE",
106 [13][0] = "UTCL2",
107 [19][0] = "TLS",
108 [26][0] = "OSS",
109 [27][0] = "SDMA0",
110 [0][1] = "MP1",
111 [1][1] = "MP0",
112 [2][1] = "VCN",
113 [3][1] = "VCNU",
114 [4][1] = "HDP",
115 [5][1] = "XDP",
116 [6][1] = "DBGU0",
117 [7][1] = "DCE",
118 [8][1] = "DCEDWB0",
119 [9][1] = "DCEDWB1",
120 [26][1] = "OSS",
121 [27][1] = "SDMA0",
122 };
123
124 static const char *mmhub_client_ids_renoir[][2] = {
125 [0][0] = "MP1",
126 [1][0] = "MP0",
127 [2][0] = "HDP",
128 [4][0] = "DCEDMC",
129 [5][0] = "DCEVGA",
130 [13][0] = "UTCL2",
131 [19][0] = "TLS",
132 [26][0] = "OSS",
133 [27][0] = "SDMA0",
134 [28][0] = "VCN",
135 [29][0] = "VCNU",
136 [30][0] = "JPEG",
137 [0][1] = "MP1",
138 [1][1] = "MP0",
139 [2][1] = "HDP",
140 [3][1] = "XDP",
141 [6][1] = "DBGU0",
142 [7][1] = "DCEDMC",
143 [8][1] = "DCEVGA",
144 [9][1] = "DCEDWB",
145 [26][1] = "OSS",
146 [27][1] = "SDMA0",
147 [28][1] = "VCN",
148 [29][1] = "VCNU",
149 [30][1] = "JPEG",
150 };
151
152 static const char *mmhub_client_ids_vega10[][2] = {
153 [0][0] = "MP0",
154 [1][0] = "UVD",
155 [2][0] = "UVDU",
156 [3][0] = "HDP",
157 [13][0] = "UTCL2",
158 [14][0] = "OSS",
159 [15][0] = "SDMA1",
160 [32+0][0] = "VCE0",
161 [32+1][0] = "VCE0U",
162 [32+2][0] = "XDMA",
163 [32+3][0] = "DCE",
164 [32+4][0] = "MP1",
165 [32+14][0] = "SDMA0",
166 [0][1] = "MP0",
167 [1][1] = "UVD",
168 [2][1] = "UVDU",
169 [3][1] = "DBGU0",
170 [4][1] = "HDP",
171 [5][1] = "XDP",
172 [14][1] = "OSS",
173 [15][1] = "SDMA0",
174 [32+0][1] = "VCE0",
175 [32+1][1] = "VCE0U",
176 [32+2][1] = "XDMA",
177 [32+3][1] = "DCE",
178 [32+4][1] = "DCEDWB",
179 [32+5][1] = "MP1",
180 [32+6][1] = "DBGU1",
181 [32+14][1] = "SDMA1",
182 };
183
184 static const char *mmhub_client_ids_vega12[][2] = {
185 [0][0] = "MP0",
186 [1][0] = "VCE0",
187 [2][0] = "VCE0U",
188 [3][0] = "HDP",
189 [13][0] = "UTCL2",
190 [14][0] = "OSS",
191 [15][0] = "SDMA1",
192 [32+0][0] = "DCE",
193 [32+1][0] = "XDMA",
194 [32+2][0] = "UVD",
195 [32+3][0] = "UVDU",
196 [32+4][0] = "MP1",
197 [32+15][0] = "SDMA0",
198 [0][1] = "MP0",
199 [1][1] = "VCE0",
200 [2][1] = "VCE0U",
201 [3][1] = "DBGU0",
202 [4][1] = "HDP",
203 [5][1] = "XDP",
204 [14][1] = "OSS",
205 [15][1] = "SDMA0",
206 [32+0][1] = "DCE",
207 [32+1][1] = "DCEDWB",
208 [32+2][1] = "XDMA",
209 [32+3][1] = "UVD",
210 [32+4][1] = "UVDU",
211 [32+5][1] = "MP1",
212 [32+6][1] = "DBGU1",
213 [32+15][1] = "SDMA1",
214 };
215
216 static const char *mmhub_client_ids_vega20[][2] = {
217 [0][0] = "XDMA",
218 [1][0] = "DCE",
219 [2][0] = "VCE0",
220 [3][0] = "VCE0U",
221 [4][0] = "UVD",
222 [5][0] = "UVD1U",
223 [13][0] = "OSS",
224 [14][0] = "HDP",
225 [15][0] = "SDMA0",
226 [32+0][0] = "UVD",
227 [32+1][0] = "UVDU",
228 [32+2][0] = "MP1",
229 [32+3][0] = "MP0",
230 [32+12][0] = "UTCL2",
231 [32+14][0] = "SDMA1",
232 [0][1] = "XDMA",
233 [1][1] = "DCE",
234 [2][1] = "DCEDWB",
235 [3][1] = "VCE0",
236 [4][1] = "VCE0U",
237 [5][1] = "UVD1",
238 [6][1] = "UVD1U",
239 [7][1] = "DBGU0",
240 [8][1] = "XDP",
241 [13][1] = "OSS",
242 [14][1] = "HDP",
243 [15][1] = "SDMA0",
244 [32+0][1] = "UVD",
245 [32+1][1] = "UVDU",
246 [32+2][1] = "DBGU1",
247 [32+3][1] = "MP1",
248 [32+4][1] = "MP0",
249 [32+14][1] = "SDMA1",
250 };
251
252 static const char *mmhub_client_ids_arcturus[][2] = {
253 [0][0] = "DBGU1",
254 [1][0] = "XDP",
255 [2][0] = "MP1",
256 [14][0] = "HDP",
257 [171][0] = "JPEG",
258 [172][0] = "VCN",
259 [173][0] = "VCNU",
260 [203][0] = "JPEG1",
261 [204][0] = "VCN1",
262 [205][0] = "VCN1U",
263 [256][0] = "SDMA0",
264 [257][0] = "SDMA1",
265 [258][0] = "SDMA2",
266 [259][0] = "SDMA3",
267 [260][0] = "SDMA4",
268 [261][0] = "SDMA5",
269 [262][0] = "SDMA6",
270 [263][0] = "SDMA7",
271 [384][0] = "OSS",
272 [0][1] = "DBGU1",
273 [1][1] = "XDP",
274 [2][1] = "MP1",
275 [14][1] = "HDP",
276 [171][1] = "JPEG",
277 [172][1] = "VCN",
278 [173][1] = "VCNU",
279 [203][1] = "JPEG1",
280 [204][1] = "VCN1",
281 [205][1] = "VCN1U",
282 [256][1] = "SDMA0",
283 [257][1] = "SDMA1",
284 [258][1] = "SDMA2",
285 [259][1] = "SDMA3",
286 [260][1] = "SDMA4",
287 [261][1] = "SDMA5",
288 [262][1] = "SDMA6",
289 [263][1] = "SDMA7",
290 [384][1] = "OSS",
291 };
292
293 static const char *mmhub_client_ids_aldebaran[][2] = {
294 [2][0] = "MP1",
295 [3][0] = "MP0",
296 [32+1][0] = "DBGU_IO0",
297 [32+2][0] = "DBGU_IO2",
298 [32+4][0] = "MPIO",
299 [96+11][0] = "JPEG0",
300 [96+12][0] = "VCN0",
301 [96+13][0] = "VCNU0",
302 [128+11][0] = "JPEG1",
303 [128+12][0] = "VCN1",
304 [128+13][0] = "VCNU1",
305 [160+1][0] = "XDP",
306 [160+14][0] = "HDP",
307 [256+0][0] = "SDMA0",
308 [256+1][0] = "SDMA1",
309 [256+2][0] = "SDMA2",
310 [256+3][0] = "SDMA3",
311 [256+4][0] = "SDMA4",
312 [384+0][0] = "OSS",
313 [2][1] = "MP1",
314 [3][1] = "MP0",
315 [32+1][1] = "DBGU_IO0",
316 [32+2][1] = "DBGU_IO2",
317 [32+4][1] = "MPIO",
318 [96+11][1] = "JPEG0",
319 [96+12][1] = "VCN0",
320 [96+13][1] = "VCNU0",
321 [128+11][1] = "JPEG1",
322 [128+12][1] = "VCN1",
323 [128+13][1] = "VCNU1",
324 [160+1][1] = "XDP",
325 [160+14][1] = "HDP",
326 [256+0][1] = "SDMA0",
327 [256+1][1] = "SDMA1",
328 [256+2][1] = "SDMA2",
329 [256+3][1] = "SDMA3",
330 [256+4][1] = "SDMA4",
331 [384+0][1] = "OSS",
332 };
333
334 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = {
335 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
336 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
337 };
338
339 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = {
340 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
341 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
342 };
343
344 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
345 (0x000143c0 + 0x00000000),
346 (0x000143c0 + 0x00000800),
347 (0x000143c0 + 0x00001000),
348 (0x000143c0 + 0x00001800),
349 (0x000543c0 + 0x00000000),
350 (0x000543c0 + 0x00000800),
351 (0x000543c0 + 0x00001000),
352 (0x000543c0 + 0x00001800),
353 (0x000943c0 + 0x00000000),
354 (0x000943c0 + 0x00000800),
355 (0x000943c0 + 0x00001000),
356 (0x000943c0 + 0x00001800),
357 (0x000d43c0 + 0x00000000),
358 (0x000d43c0 + 0x00000800),
359 (0x000d43c0 + 0x00001000),
360 (0x000d43c0 + 0x00001800),
361 (0x001143c0 + 0x00000000),
362 (0x001143c0 + 0x00000800),
363 (0x001143c0 + 0x00001000),
364 (0x001143c0 + 0x00001800),
365 (0x001543c0 + 0x00000000),
366 (0x001543c0 + 0x00000800),
367 (0x001543c0 + 0x00001000),
368 (0x001543c0 + 0x00001800),
369 (0x001943c0 + 0x00000000),
370 (0x001943c0 + 0x00000800),
371 (0x001943c0 + 0x00001000),
372 (0x001943c0 + 0x00001800),
373 (0x001d43c0 + 0x00000000),
374 (0x001d43c0 + 0x00000800),
375 (0x001d43c0 + 0x00001000),
376 (0x001d43c0 + 0x00001800),
377 };
378
379 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
380 (0x000143e0 + 0x00000000),
381 (0x000143e0 + 0x00000800),
382 (0x000143e0 + 0x00001000),
383 (0x000143e0 + 0x00001800),
384 (0x000543e0 + 0x00000000),
385 (0x000543e0 + 0x00000800),
386 (0x000543e0 + 0x00001000),
387 (0x000543e0 + 0x00001800),
388 (0x000943e0 + 0x00000000),
389 (0x000943e0 + 0x00000800),
390 (0x000943e0 + 0x00001000),
391 (0x000943e0 + 0x00001800),
392 (0x000d43e0 + 0x00000000),
393 (0x000d43e0 + 0x00000800),
394 (0x000d43e0 + 0x00001000),
395 (0x000d43e0 + 0x00001800),
396 (0x001143e0 + 0x00000000),
397 (0x001143e0 + 0x00000800),
398 (0x001143e0 + 0x00001000),
399 (0x001143e0 + 0x00001800),
400 (0x001543e0 + 0x00000000),
401 (0x001543e0 + 0x00000800),
402 (0x001543e0 + 0x00001000),
403 (0x001543e0 + 0x00001800),
404 (0x001943e0 + 0x00000000),
405 (0x001943e0 + 0x00000800),
406 (0x001943e0 + 0x00001000),
407 (0x001943e0 + 0x00001800),
408 (0x001d43e0 + 0x00000000),
409 (0x001d43e0 + 0x00000800),
410 (0x001d43e0 + 0x00001000),
411 (0x001d43e0 + 0x00001800),
412 };
413
gmc_v9_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)414 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
415 struct amdgpu_irq_src *src,
416 unsigned int type,
417 enum amdgpu_interrupt_state state)
418 {
419 u32 bits, i, tmp, reg;
420
421 /* Devices newer then VEGA10/12 shall have these programming
422 * sequences performed by PSP BL
423 */
424 if (adev->asic_type >= CHIP_VEGA20)
425 return 0;
426
427 bits = 0x7f;
428
429 switch (state) {
430 case AMDGPU_IRQ_STATE_DISABLE:
431 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
432 reg = ecc_umc_mcumc_ctrl_addrs[i];
433 tmp = RREG32(reg);
434 tmp &= ~bits;
435 WREG32(reg, tmp);
436 }
437 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
438 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
439 tmp = RREG32(reg);
440 tmp &= ~bits;
441 WREG32(reg, tmp);
442 }
443 break;
444 case AMDGPU_IRQ_STATE_ENABLE:
445 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
446 reg = ecc_umc_mcumc_ctrl_addrs[i];
447 tmp = RREG32(reg);
448 tmp |= bits;
449 WREG32(reg, tmp);
450 }
451 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
452 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
453 tmp = RREG32(reg);
454 tmp |= bits;
455 WREG32(reg, tmp);
456 }
457 break;
458 default:
459 break;
460 }
461
462 return 0;
463 }
464
gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)465 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
466 struct amdgpu_irq_src *src,
467 unsigned int type,
468 enum amdgpu_interrupt_state state)
469 {
470 struct amdgpu_vmhub *hub;
471 u32 tmp, reg, bits, i, j;
472
473 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
474 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
475 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
477 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
478 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
479 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
480
481 switch (state) {
482 case AMDGPU_IRQ_STATE_DISABLE:
483 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
484 hub = &adev->vmhub[j];
485 for (i = 0; i < 16; i++) {
486 reg = hub->vm_context0_cntl + i;
487
488 /* This works because this interrupt is only
489 * enabled at init/resume and disabled in
490 * fini/suspend, so the overall state doesn't
491 * change over the course of suspend/resume.
492 */
493 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
494 continue;
495
496 if (j >= AMDGPU_MMHUB0(0))
497 tmp = RREG32_SOC15_IP(MMHUB, reg);
498 else
499 tmp = RREG32_XCC(reg, j);
500
501 tmp &= ~bits;
502
503 if (j >= AMDGPU_MMHUB0(0))
504 WREG32_SOC15_IP(MMHUB, reg, tmp);
505 else
506 WREG32_XCC(reg, tmp, j);
507 }
508 }
509 break;
510 case AMDGPU_IRQ_STATE_ENABLE:
511 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
512 hub = &adev->vmhub[j];
513 for (i = 0; i < 16; i++) {
514 reg = hub->vm_context0_cntl + i;
515
516 /* This works because this interrupt is only
517 * enabled at init/resume and disabled in
518 * fini/suspend, so the overall state doesn't
519 * change over the course of suspend/resume.
520 */
521 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
522 continue;
523
524 if (j >= AMDGPU_MMHUB0(0))
525 tmp = RREG32_SOC15_IP(MMHUB, reg);
526 else
527 tmp = RREG32_XCC(reg, j);
528
529 tmp |= bits;
530
531 if (j >= AMDGPU_MMHUB0(0))
532 WREG32_SOC15_IP(MMHUB, reg, tmp);
533 else
534 WREG32_XCC(reg, tmp, j);
535 }
536 }
537 break;
538 default:
539 break;
540 }
541
542 return 0;
543 }
544
gmc_v9_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)545 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
546 struct amdgpu_irq_src *source,
547 struct amdgpu_iv_entry *entry)
548 {
549 bool retry_fault = !!(entry->src_data[1] & 0x80);
550 bool write_fault = !!(entry->src_data[1] & 0x20);
551 uint32_t status = 0, cid = 0, rw = 0, fed = 0;
552 struct amdgpu_task_info *task_info;
553 struct amdgpu_vmhub *hub;
554 const char *mmhub_cid;
555 const char *hub_name;
556 unsigned int vmhub;
557 u64 addr;
558 uint32_t cam_index = 0;
559 int ret, xcc_id = 0;
560 uint32_t node_id;
561
562 node_id = entry->node_id;
563
564 addr = (u64)entry->src_data[0] << 12;
565 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
566
567 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
568 hub_name = "mmhub0";
569 vmhub = AMDGPU_MMHUB0(node_id / 4);
570 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
571 hub_name = "mmhub1";
572 vmhub = AMDGPU_MMHUB1(0);
573 } else {
574 hub_name = "gfxhub0";
575 if (adev->gfx.funcs->ih_node_to_logical_xcc) {
576 xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
577 node_id);
578 if (xcc_id < 0)
579 xcc_id = 0;
580 }
581 vmhub = xcc_id;
582 }
583 hub = &adev->vmhub[vmhub];
584
585 if (retry_fault) {
586 if (adev->irq.retry_cam_enabled) {
587 /* Delegate it to a different ring if the hardware hasn't
588 * already done it.
589 */
590 if (entry->ih == &adev->irq.ih) {
591 amdgpu_irq_delegate(adev, entry, 8);
592 return 1;
593 }
594
595 cam_index = entry->src_data[2] & 0x3ff;
596
597 ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
598 addr, entry->timestamp, write_fault);
599 WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
600 if (ret)
601 return 1;
602 } else {
603 /* Process it onyl if it's the first fault for this address */
604 if (entry->ih != &adev->irq.ih_soft &&
605 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
606 entry->timestamp))
607 return 1;
608
609 /* Delegate it to a different ring if the hardware hasn't
610 * already done it.
611 */
612 if (entry->ih == &adev->irq.ih) {
613 amdgpu_irq_delegate(adev, entry, 8);
614 return 1;
615 }
616
617 /* Try to handle the recoverable page faults by filling page
618 * tables
619 */
620 if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
621 addr, entry->timestamp, write_fault))
622 return 1;
623 }
624 }
625
626 if (!printk_ratelimit())
627 return 0;
628
629 dev_err(adev->dev,
630 "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", hub_name,
631 retry_fault ? "retry" : "no-retry",
632 entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
633
634 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
635 if (task_info) {
636 dev_err(adev->dev,
637 " for process %s pid %d thread %s pid %d)\n",
638 task_info->process_name, task_info->tgid,
639 task_info->task_name, task_info->pid);
640 amdgpu_vm_put_task_info(task_info);
641 }
642
643 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
644 addr, entry->client_id,
645 soc15_ih_clientid_name[entry->client_id]);
646
647 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
648 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
649 dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
650 node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
651 node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
652
653 if (amdgpu_sriov_vf(adev))
654 return 0;
655
656 /*
657 * Issue a dummy read to wait for the status register to
658 * be updated to avoid reading an incorrect value due to
659 * the new fast GRBM interface.
660 */
661 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
662 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
663 RREG32(hub->vm_l2_pro_fault_status);
664
665 status = RREG32(hub->vm_l2_pro_fault_status);
666 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
667 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
668 fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
669
670 /* for fed error, kfd will handle it, return directly */
671 if (fed && amdgpu_ras_is_poison_mode_supported(adev) &&
672 (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
673 return 0;
674
675 /* Only print L2 fault status if the status register could be read and
676 * contains useful information
677 */
678 if (!status)
679 return 0;
680
681 if (!amdgpu_sriov_vf(adev))
682 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
683
684 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
685
686 dev_err(adev->dev,
687 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
688 status);
689 if (entry->vmid_src == AMDGPU_GFXHUB(0)) {
690 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
691 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
692 gfxhub_client_ids[cid],
693 cid);
694 } else {
695 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
696 case IP_VERSION(9, 0, 0):
697 mmhub_cid = mmhub_client_ids_vega10[cid][rw];
698 break;
699 case IP_VERSION(9, 3, 0):
700 mmhub_cid = mmhub_client_ids_vega12[cid][rw];
701 break;
702 case IP_VERSION(9, 4, 0):
703 mmhub_cid = mmhub_client_ids_vega20[cid][rw];
704 break;
705 case IP_VERSION(9, 4, 1):
706 mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
707 break;
708 case IP_VERSION(9, 1, 0):
709 case IP_VERSION(9, 2, 0):
710 mmhub_cid = mmhub_client_ids_raven[cid][rw];
711 break;
712 case IP_VERSION(1, 5, 0):
713 case IP_VERSION(2, 4, 0):
714 mmhub_cid = mmhub_client_ids_renoir[cid][rw];
715 break;
716 case IP_VERSION(1, 8, 0):
717 case IP_VERSION(9, 4, 2):
718 mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
719 break;
720 default:
721 mmhub_cid = NULL;
722 break;
723 }
724 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
725 mmhub_cid ? mmhub_cid : "unknown", cid);
726 }
727 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
728 REG_GET_FIELD(status,
729 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
730 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
731 REG_GET_FIELD(status,
732 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
733 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
734 REG_GET_FIELD(status,
735 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
736 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
737 REG_GET_FIELD(status,
738 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
739 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
740 return 0;
741 }
742
743 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
744 .set = gmc_v9_0_vm_fault_interrupt_state,
745 .process = gmc_v9_0_process_interrupt,
746 };
747
748
749 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
750 .set = gmc_v9_0_ecc_interrupt_state,
751 .process = amdgpu_umc_process_ecc_irq,
752 };
753
gmc_v9_0_set_irq_funcs(struct amdgpu_device * adev)754 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
755 {
756 adev->gmc.vm_fault.num_types = 1;
757 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
758
759 if (!amdgpu_sriov_vf(adev) &&
760 !adev->gmc.xgmi.connected_to_cpu &&
761 !adev->gmc.is_app_apu) {
762 adev->gmc.ecc_irq.num_types = 1;
763 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
764 }
765 }
766
gmc_v9_0_get_invalidate_req(unsigned int vmid,uint32_t flush_type)767 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
768 uint32_t flush_type)
769 {
770 u32 req = 0;
771
772 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
773 PER_VMID_INVALIDATE_REQ, 1 << vmid);
774 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
775 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
776 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
777 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
778 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
779 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
780 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
781 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
782
783 return req;
784 }
785
786 /**
787 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
788 *
789 * @adev: amdgpu_device pointer
790 * @vmhub: vmhub type
791 *
792 */
gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)793 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
794 uint32_t vmhub)
795 {
796 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
797 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
798 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
799 return false;
800
801 return ((vmhub == AMDGPU_MMHUB0(0) ||
802 vmhub == AMDGPU_MMHUB1(0)) &&
803 (!amdgpu_sriov_vf(adev)) &&
804 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
805 (adev->apu_flags & AMD_APU_IS_PICASSO))));
806 }
807
gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)808 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
809 uint8_t vmid, uint16_t *p_pasid)
810 {
811 uint32_t value;
812
813 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
814 + vmid);
815 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
816
817 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
818 }
819
820 /*
821 * GART
822 * VMID 0 is the physical GPU addresses as used by the kernel.
823 * VMIDs 1-15 are used for userspace clients and are handled
824 * by the amdgpu vm/hsa code.
825 */
826
827 /**
828 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
829 *
830 * @adev: amdgpu_device pointer
831 * @vmid: vm instance to flush
832 * @vmhub: which hub to flush
833 * @flush_type: the flush type
834 *
835 * Flush the TLB for the requested page table using certain type.
836 */
gmc_v9_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)837 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
838 uint32_t vmhub, uint32_t flush_type)
839 {
840 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
841 u32 j, inv_req, tmp, sem, req, ack, inst;
842 const unsigned int eng = 17;
843 struct amdgpu_vmhub *hub;
844
845 BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
846
847 hub = &adev->vmhub[vmhub];
848 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
849 sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
850 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
851 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
852
853 if (vmhub >= AMDGPU_MMHUB0(0))
854 inst = 0;
855 else
856 inst = vmhub;
857
858 /* This is necessary for SRIOV as well as for GFXOFF to function
859 * properly under bare metal
860 */
861 if (adev->gfx.kiq[inst].ring.sched.ready &&
862 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
863 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
864 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
865
866 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
867 1 << vmid, inst);
868 return;
869 }
870
871 /* This path is needed before KIQ/MES/GFXOFF are set up */
872 spin_lock(&adev->gmc.invalidate_lock);
873
874 /*
875 * It may lose gpuvm invalidate acknowldege state across power-gating
876 * off cycle, add semaphore acquire before invalidation and semaphore
877 * release after invalidation to avoid entering power gated state
878 * to WA the Issue
879 */
880
881 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
882 if (use_semaphore) {
883 for (j = 0; j < adev->usec_timeout; j++) {
884 /* a read return value of 1 means semaphore acquire */
885 if (vmhub >= AMDGPU_MMHUB0(0))
886 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, GET_INST(GC, inst));
887 else
888 tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, GET_INST(GC, inst));
889 if (tmp & 0x1)
890 break;
891 udelay(1);
892 }
893
894 if (j >= adev->usec_timeout)
895 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
896 }
897
898 if (vmhub >= AMDGPU_MMHUB0(0))
899 WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, GET_INST(GC, inst));
900 else
901 WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, GET_INST(GC, inst));
902
903 /*
904 * Issue a dummy read to wait for the ACK register to
905 * be cleared to avoid a false ACK due to the new fast
906 * GRBM interface.
907 */
908 if ((vmhub == AMDGPU_GFXHUB(0)) &&
909 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
910 RREG32_NO_KIQ(req);
911
912 for (j = 0; j < adev->usec_timeout; j++) {
913 if (vmhub >= AMDGPU_MMHUB0(0))
914 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, GET_INST(GC, inst));
915 else
916 tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, GET_INST(GC, inst));
917 if (tmp & (1 << vmid))
918 break;
919 udelay(1);
920 }
921
922 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
923 if (use_semaphore) {
924 /*
925 * add semaphore release after invalidation,
926 * write with 0 means semaphore release
927 */
928 if (vmhub >= AMDGPU_MMHUB0(0))
929 WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, GET_INST(GC, inst));
930 else
931 WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, GET_INST(GC, inst));
932 }
933
934 spin_unlock(&adev->gmc.invalidate_lock);
935
936 if (j < adev->usec_timeout)
937 return;
938
939 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
940 }
941
942 /**
943 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
944 *
945 * @adev: amdgpu_device pointer
946 * @pasid: pasid to be flush
947 * @flush_type: the flush type
948 * @all_hub: flush all hubs
949 * @inst: is used to select which instance of KIQ to use for the invalidation
950 *
951 * Flush the TLB for the requested pasid.
952 */
gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub,uint32_t inst)953 static void gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
954 uint16_t pasid, uint32_t flush_type,
955 bool all_hub, uint32_t inst)
956 {
957 uint16_t queried;
958 int i, vmid;
959
960 for (vmid = 1; vmid < 16; vmid++) {
961 bool valid;
962
963 valid = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
964 &queried);
965 if (!valid || queried != pasid)
966 continue;
967
968 if (all_hub) {
969 for_each_set_bit(i, adev->vmhubs_mask,
970 AMDGPU_MAX_VMHUBS)
971 gmc_v9_0_flush_gpu_tlb(adev, vmid, i,
972 flush_type);
973 } else {
974 gmc_v9_0_flush_gpu_tlb(adev, vmid,
975 AMDGPU_GFXHUB(0),
976 flush_type);
977 }
978 }
979 }
980
gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)981 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
982 unsigned int vmid, uint64_t pd_addr)
983 {
984 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
985 struct amdgpu_device *adev = ring->adev;
986 struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
987 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
988 unsigned int eng = ring->vm_inv_eng;
989
990 /*
991 * It may lose gpuvm invalidate acknowldege state across power-gating
992 * off cycle, add semaphore acquire before invalidation and semaphore
993 * release after invalidation to avoid entering power gated state
994 * to WA the Issue
995 */
996
997 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
998 if (use_semaphore)
999 /* a read return value of 1 means semaphore acuqire */
1000 amdgpu_ring_emit_reg_wait(ring,
1001 hub->vm_inv_eng0_sem +
1002 hub->eng_distance * eng, 0x1, 0x1);
1003
1004 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
1005 (hub->ctx_addr_distance * vmid),
1006 lower_32_bits(pd_addr));
1007
1008 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
1009 (hub->ctx_addr_distance * vmid),
1010 upper_32_bits(pd_addr));
1011
1012 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
1013 hub->eng_distance * eng,
1014 hub->vm_inv_eng0_ack +
1015 hub->eng_distance * eng,
1016 req, 1 << vmid);
1017
1018 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
1019 if (use_semaphore)
1020 /*
1021 * add semaphore release after invalidation,
1022 * write with 0 means semaphore release
1023 */
1024 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
1025 hub->eng_distance * eng, 0);
1026
1027 return pd_addr;
1028 }
1029
gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned int vmid,unsigned int pasid)1030 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
1031 unsigned int pasid)
1032 {
1033 struct amdgpu_device *adev = ring->adev;
1034 uint32_t reg;
1035
1036 /* Do nothing because there's no lut register for mmhub1. */
1037 if (ring->vm_hub == AMDGPU_MMHUB1(0))
1038 return;
1039
1040 if (ring->vm_hub == AMDGPU_GFXHUB(0))
1041 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
1042 else
1043 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
1044
1045 amdgpu_ring_emit_wreg(ring, reg, pasid);
1046 }
1047
1048 /*
1049 * PTE format on VEGA 10:
1050 * 63:59 reserved
1051 * 58:57 mtype
1052 * 56 F
1053 * 55 L
1054 * 54 P
1055 * 53 SW
1056 * 52 T
1057 * 50:48 reserved
1058 * 47:12 4k physical page base address
1059 * 11:7 fragment
1060 * 6 write
1061 * 5 read
1062 * 4 exe
1063 * 3 Z
1064 * 2 snooped
1065 * 1 system
1066 * 0 valid
1067 *
1068 * PDE format on VEGA 10:
1069 * 63:59 block fragment size
1070 * 58:55 reserved
1071 * 54 P
1072 * 53:48 reserved
1073 * 47:6 physical base address of PD or PTE
1074 * 5:3 reserved
1075 * 2 C
1076 * 1 system
1077 * 0 valid
1078 */
1079
gmc_v9_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)1080 static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1081
1082 {
1083 switch (flags) {
1084 case AMDGPU_VM_MTYPE_DEFAULT:
1085 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1086 case AMDGPU_VM_MTYPE_NC:
1087 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1088 case AMDGPU_VM_MTYPE_WC:
1089 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_WC);
1090 case AMDGPU_VM_MTYPE_RW:
1091 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_RW);
1092 case AMDGPU_VM_MTYPE_CC:
1093 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_CC);
1094 case AMDGPU_VM_MTYPE_UC:
1095 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC);
1096 default:
1097 return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
1098 }
1099 }
1100
gmc_v9_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)1101 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1102 uint64_t *addr, uint64_t *flags)
1103 {
1104 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1105 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1106 BUG_ON(*addr & 0xFFFF00000000003FULL);
1107
1108 if (!adev->gmc.translate_further)
1109 return;
1110
1111 if (level == AMDGPU_VM_PDB1) {
1112 /* Set the block fragment size */
1113 if (!(*flags & AMDGPU_PDE_PTE))
1114 *flags |= AMDGPU_PDE_BFS(0x9);
1115
1116 } else if (level == AMDGPU_VM_PDB0) {
1117 if (*flags & AMDGPU_PDE_PTE) {
1118 *flags &= ~AMDGPU_PDE_PTE;
1119 if (!(*flags & AMDGPU_PTE_VALID))
1120 *addr |= 1 << PAGE_SHIFT;
1121 } else {
1122 *flags |= AMDGPU_PTE_TF;
1123 }
1124 }
1125 }
1126
gmc_v9_0_get_coherence_flags(struct amdgpu_device * adev,struct amdgpu_bo * bo,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)1127 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
1128 struct amdgpu_bo *bo,
1129 struct amdgpu_bo_va_mapping *mapping,
1130 uint64_t *flags)
1131 {
1132 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1133 bool is_vram = bo->tbo.resource &&
1134 bo->tbo.resource->mem_type == TTM_PL_VRAM;
1135 bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
1136 AMDGPU_GEM_CREATE_EXT_COHERENT);
1137 bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
1138 bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
1139 struct amdgpu_vm *vm = mapping->bo_va->base.vm;
1140 unsigned int mtype_local, mtype;
1141 bool snoop = false;
1142 bool is_local;
1143
1144 dma_resv_assert_held(bo->tbo.base.resv);
1145
1146 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1147 case IP_VERSION(9, 4, 1):
1148 case IP_VERSION(9, 4, 2):
1149 if (is_vram) {
1150 if (bo_adev == adev) {
1151 if (uncached)
1152 mtype = MTYPE_UC;
1153 else if (coherent)
1154 mtype = MTYPE_CC;
1155 else
1156 mtype = MTYPE_RW;
1157 /* FIXME: is this still needed? Or does
1158 * amdgpu_ttm_tt_pde_flags already handle this?
1159 */
1160 if ((amdgpu_ip_version(adev, GC_HWIP, 0) ==
1161 IP_VERSION(9, 4, 2) ||
1162 amdgpu_ip_version(adev, GC_HWIP, 0) ==
1163 IP_VERSION(9, 4, 3)) &&
1164 adev->gmc.xgmi.connected_to_cpu)
1165 snoop = true;
1166 } else {
1167 if (uncached || coherent)
1168 mtype = MTYPE_UC;
1169 else
1170 mtype = MTYPE_NC;
1171 if (mapping->bo_va->is_xgmi)
1172 snoop = true;
1173 }
1174 } else {
1175 if (uncached || coherent)
1176 mtype = MTYPE_UC;
1177 else
1178 mtype = MTYPE_NC;
1179 /* FIXME: is this still needed? Or does
1180 * amdgpu_ttm_tt_pde_flags already handle this?
1181 */
1182 snoop = true;
1183 }
1184 break;
1185 case IP_VERSION(9, 4, 3):
1186 case IP_VERSION(9, 4, 4):
1187 /* Only local VRAM BOs or system memory on non-NUMA APUs
1188 * can be assumed to be local in their entirety. Choose
1189 * MTYPE_NC as safe fallback for all system memory BOs on
1190 * NUMA systems. Their MTYPE can be overridden per-page in
1191 * gmc_v9_0_override_vm_pte_flags.
1192 */
1193 mtype_local = MTYPE_RW;
1194 if (amdgpu_mtype_local == 1) {
1195 DRM_INFO_ONCE("Using MTYPE_NC for local memory\n");
1196 mtype_local = MTYPE_NC;
1197 } else if (amdgpu_mtype_local == 2) {
1198 DRM_INFO_ONCE("Using MTYPE_CC for local memory\n");
1199 mtype_local = MTYPE_CC;
1200 } else {
1201 DRM_INFO_ONCE("Using MTYPE_RW for local memory\n");
1202 }
1203 is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
1204 num_possible_nodes() <= 1) ||
1205 (is_vram && adev == bo_adev &&
1206 KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
1207 snoop = true;
1208 if (uncached) {
1209 mtype = MTYPE_UC;
1210 } else if (ext_coherent) {
1211 if (adev->rev_id)
1212 mtype = is_local ? MTYPE_CC : MTYPE_UC;
1213 else
1214 mtype = MTYPE_UC;
1215 } else if (adev->flags & AMD_IS_APU) {
1216 mtype = is_local ? mtype_local : MTYPE_NC;
1217 } else {
1218 /* dGPU */
1219 if (is_local)
1220 mtype = mtype_local;
1221 else if (is_vram)
1222 mtype = MTYPE_NC;
1223 else
1224 mtype = MTYPE_UC;
1225 }
1226
1227 break;
1228 default:
1229 if (uncached || coherent)
1230 mtype = MTYPE_UC;
1231 else
1232 mtype = MTYPE_NC;
1233
1234 /* FIXME: is this still needed? Or does
1235 * amdgpu_ttm_tt_pde_flags already handle this?
1236 */
1237 if (!is_vram)
1238 snoop = true;
1239 }
1240
1241 if (mtype != MTYPE_NC)
1242 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype);
1243
1244 *flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1245 }
1246
gmc_v9_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)1247 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1248 struct amdgpu_bo_va_mapping *mapping,
1249 uint64_t *flags)
1250 {
1251 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
1252
1253 *flags &= ~AMDGPU_PTE_EXECUTABLE;
1254 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1255
1256 *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1257 *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1258
1259 if (mapping->flags & AMDGPU_PTE_PRT) {
1260 *flags |= AMDGPU_PTE_PRT;
1261 *flags &= ~AMDGPU_PTE_VALID;
1262 }
1263
1264 if ((*flags & AMDGPU_PTE_VALID) && bo)
1265 gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
1266 }
1267
gmc_v9_0_override_vm_pte_flags(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t addr,uint64_t * flags)1268 static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
1269 struct amdgpu_vm *vm,
1270 uint64_t addr, uint64_t *flags)
1271 {
1272 int local_node, nid;
1273
1274 /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
1275 * memory can use more efficient MTYPEs.
1276 */
1277 if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
1278 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4))
1279 return;
1280
1281 /* Only direct-mapped memory allows us to determine the NUMA node from
1282 * the DMA address.
1283 */
1284 if (!adev->ram_is_direct_mapped) {
1285 dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
1286 return;
1287 }
1288
1289 /* MTYPE_NC is the same default and can be overridden.
1290 * MTYPE_UC will be present if the memory is extended-coherent
1291 * and can also be overridden.
1292 */
1293 if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1294 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC) &&
1295 (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1296 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC)) {
1297 dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n");
1298 return;
1299 }
1300
1301 /* FIXME: Only supported on native mode for now. For carve-out, the
1302 * NUMA affinity of the GPU/VM needs to come from the PCI info because
1303 * memory partitions are not associated with different NUMA nodes.
1304 */
1305 if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
1306 local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
1307 } else {
1308 dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
1309 return;
1310 }
1311
1312 /* Only handle real RAM. Mappings of PCIe resources don't have struct
1313 * page or NUMA nodes.
1314 */
1315 if (!page_is_ram(addr >> PAGE_SHIFT)) {
1316 dev_dbg_ratelimited(adev->dev, "Page is not RAM.\n");
1317 return;
1318 }
1319 nid = pfn_to_nid(addr >> PAGE_SHIFT);
1320 dev_dbg_ratelimited(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
1321 vm->mem_id, local_node, nid);
1322 if (nid == local_node) {
1323 uint64_t old_flags = *flags;
1324 if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) ==
1325 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC)) {
1326 unsigned int mtype_local = MTYPE_RW;
1327
1328 if (amdgpu_mtype_local == 1)
1329 mtype_local = MTYPE_NC;
1330 else if (amdgpu_mtype_local == 2)
1331 mtype_local = MTYPE_CC;
1332
1333 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local);
1334 } else if (adev->rev_id) {
1335 /* MTYPE_UC case */
1336 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
1337 }
1338
1339 dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n",
1340 old_flags, *flags);
1341 }
1342 }
1343
gmc_v9_0_get_vbios_fb_size(struct amdgpu_device * adev)1344 static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1345 {
1346 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1347 unsigned int size;
1348
1349 /* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1350
1351 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1352 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1353 } else {
1354 u32 viewport;
1355
1356 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1357 case IP_VERSION(1, 0, 0):
1358 case IP_VERSION(1, 0, 1):
1359 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1360 size = (REG_GET_FIELD(viewport,
1361 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1362 REG_GET_FIELD(viewport,
1363 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1364 4);
1365 break;
1366 case IP_VERSION(2, 1, 0):
1367 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1368 size = (REG_GET_FIELD(viewport,
1369 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1370 REG_GET_FIELD(viewport,
1371 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1372 4);
1373 break;
1374 default:
1375 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1376 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1377 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1378 4);
1379 break;
1380 }
1381 }
1382
1383 return size;
1384 }
1385
1386 static enum amdgpu_memory_partition
gmc_v9_0_get_memory_partition(struct amdgpu_device * adev,u32 * supp_modes)1387 gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
1388 {
1389 enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
1390
1391 if (adev->nbio.funcs->get_memory_partition_mode)
1392 mode = adev->nbio.funcs->get_memory_partition_mode(adev,
1393 supp_modes);
1394
1395 return mode;
1396 }
1397
1398 static enum amdgpu_memory_partition
gmc_v9_0_query_vf_memory_partition(struct amdgpu_device * adev)1399 gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev)
1400 {
1401 switch (adev->gmc.num_mem_partitions) {
1402 case 0:
1403 return UNKNOWN_MEMORY_PARTITION_MODE;
1404 case 1:
1405 return AMDGPU_NPS1_PARTITION_MODE;
1406 case 2:
1407 return AMDGPU_NPS2_PARTITION_MODE;
1408 case 4:
1409 return AMDGPU_NPS4_PARTITION_MODE;
1410 default:
1411 return AMDGPU_NPS1_PARTITION_MODE;
1412 }
1413
1414 return AMDGPU_NPS1_PARTITION_MODE;
1415 }
1416
1417 static enum amdgpu_memory_partition
gmc_v9_0_query_memory_partition(struct amdgpu_device * adev)1418 gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
1419 {
1420 if (amdgpu_sriov_vf(adev))
1421 return gmc_v9_0_query_vf_memory_partition(adev);
1422
1423 return gmc_v9_0_get_memory_partition(adev, NULL);
1424 }
1425
gmc_v9_0_need_reset_on_init(struct amdgpu_device * adev)1426 static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
1427 {
1428 if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested &&
1429 adev->nbio.funcs->is_nps_switch_requested(adev)) {
1430 adev->gmc.reset_flags |= AMDGPU_GMC_INIT_RESET_NPS;
1431 return true;
1432 }
1433
1434 return false;
1435 }
1436
1437 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1438 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1439 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1440 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1441 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1442 .map_mtype = gmc_v9_0_map_mtype,
1443 .get_vm_pde = gmc_v9_0_get_vm_pde,
1444 .get_vm_pte = gmc_v9_0_get_vm_pte,
1445 .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
1446 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1447 .query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
1448 .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
1449 .need_reset_on_init = &gmc_v9_0_need_reset_on_init,
1450 };
1451
gmc_v9_0_set_gmc_funcs(struct amdgpu_device * adev)1452 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1453 {
1454 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1455 }
1456
gmc_v9_0_set_umc_funcs(struct amdgpu_device * adev)1457 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1458 {
1459 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1460 case IP_VERSION(6, 0, 0):
1461 adev->umc.funcs = &umc_v6_0_funcs;
1462 break;
1463 case IP_VERSION(6, 1, 1):
1464 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1465 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1466 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1467 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1468 adev->umc.retire_unit = 1;
1469 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1470 adev->umc.ras = &umc_v6_1_ras;
1471 break;
1472 case IP_VERSION(6, 1, 2):
1473 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1474 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1475 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1476 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1477 adev->umc.retire_unit = 1;
1478 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1479 adev->umc.ras = &umc_v6_1_ras;
1480 break;
1481 case IP_VERSION(6, 7, 0):
1482 adev->umc.max_ras_err_cnt_per_query =
1483 UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1484 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1485 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1486 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1487 adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
1488 if (!adev->gmc.xgmi.connected_to_cpu)
1489 adev->umc.ras = &umc_v6_7_ras;
1490 if (1 & adev->smuio.funcs->get_die_id(adev))
1491 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1492 else
1493 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1494 break;
1495 case IP_VERSION(12, 0, 0):
1496 adev->umc.max_ras_err_cnt_per_query =
1497 UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
1498 adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM;
1499 adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
1500 adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
1501 adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
1502 adev->umc.active_mask = adev->aid_mask;
1503 adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
1504 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1505 adev->umc.ras = &umc_v12_0_ras;
1506 break;
1507 default:
1508 break;
1509 }
1510 }
1511
gmc_v9_0_set_mmhub_funcs(struct amdgpu_device * adev)1512 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1513 {
1514 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1515 case IP_VERSION(9, 4, 1):
1516 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1517 break;
1518 case IP_VERSION(9, 4, 2):
1519 adev->mmhub.funcs = &mmhub_v1_7_funcs;
1520 break;
1521 case IP_VERSION(1, 8, 0):
1522 adev->mmhub.funcs = &mmhub_v1_8_funcs;
1523 break;
1524 default:
1525 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1526 break;
1527 }
1528 }
1529
gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device * adev)1530 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1531 {
1532 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1533 case IP_VERSION(9, 4, 0):
1534 adev->mmhub.ras = &mmhub_v1_0_ras;
1535 break;
1536 case IP_VERSION(9, 4, 1):
1537 adev->mmhub.ras = &mmhub_v9_4_ras;
1538 break;
1539 case IP_VERSION(9, 4, 2):
1540 adev->mmhub.ras = &mmhub_v1_7_ras;
1541 break;
1542 case IP_VERSION(1, 8, 0):
1543 adev->mmhub.ras = &mmhub_v1_8_ras;
1544 break;
1545 default:
1546 /* mmhub ras is not available */
1547 break;
1548 }
1549 }
1550
gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device * adev)1551 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1552 {
1553 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1554 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
1555 adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
1556 else
1557 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1558 }
1559
gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device * adev)1560 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1561 {
1562 adev->hdp.ras = &hdp_v4_0_ras;
1563 }
1564
gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device * adev)1565 static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
1566 {
1567 struct amdgpu_mca *mca = &adev->mca;
1568
1569 /* is UMC the right IP to check for MCA? Maybe DF? */
1570 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1571 case IP_VERSION(6, 7, 0):
1572 if (!adev->gmc.xgmi.connected_to_cpu) {
1573 mca->mp0.ras = &mca_v3_0_mp0_ras;
1574 mca->mp1.ras = &mca_v3_0_mp1_ras;
1575 mca->mpio.ras = &mca_v3_0_mpio_ras;
1576 }
1577 break;
1578 default:
1579 break;
1580 }
1581 }
1582
gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device * adev)1583 static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
1584 {
1585 if (!adev->gmc.xgmi.connected_to_cpu)
1586 adev->gmc.xgmi.ras = &xgmi_ras;
1587 }
1588
gmc_v9_0_init_nps_details(struct amdgpu_device * adev)1589 static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
1590 {
1591 adev->gmc.supported_nps_modes = 0;
1592
1593 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
1594 return;
1595
1596 /*TODO: Check PSP version also which supports NPS switch. Otherwise keep
1597 * supported modes as 0.
1598 */
1599 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1600 case IP_VERSION(9, 4, 3):
1601 case IP_VERSION(9, 4, 4):
1602 adev->gmc.supported_nps_modes =
1603 BIT(AMDGPU_NPS1_PARTITION_MODE) |
1604 BIT(AMDGPU_NPS4_PARTITION_MODE);
1605 break;
1606 default:
1607 break;
1608 }
1609 }
1610
gmc_v9_0_early_init(struct amdgpu_ip_block * ip_block)1611 static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
1612 {
1613 struct amdgpu_device *adev = ip_block->adev;
1614
1615 /*
1616 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
1617 * in their IP discovery tables
1618 */
1619 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
1620 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
1621 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1622 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
1623 adev->gmc.xgmi.supported = true;
1624
1625 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
1626 adev->gmc.xgmi.supported = true;
1627 adev->gmc.xgmi.connected_to_cpu =
1628 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1629 }
1630
1631 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1632 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
1633 enum amdgpu_pkg_type pkg_type =
1634 adev->smuio.funcs->get_pkg_type(adev);
1635 /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
1636 * and the APU, can be in used two possible modes:
1637 * - carveout mode
1638 * - native APU mode
1639 * "is_app_apu" can be used to identify the APU in the native
1640 * mode.
1641 */
1642 adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU &&
1643 !pci_resource_len(adev->pdev, 0));
1644 }
1645
1646 gmc_v9_0_set_gmc_funcs(adev);
1647 gmc_v9_0_set_irq_funcs(adev);
1648 gmc_v9_0_set_umc_funcs(adev);
1649 gmc_v9_0_set_mmhub_funcs(adev);
1650 gmc_v9_0_set_mmhub_ras_funcs(adev);
1651 gmc_v9_0_set_gfxhub_funcs(adev);
1652 gmc_v9_0_set_hdp_ras_funcs(adev);
1653 gmc_v9_0_set_mca_ras_funcs(adev);
1654 gmc_v9_0_set_xgmi_ras_funcs(adev);
1655
1656 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1657 adev->gmc.shared_aperture_end =
1658 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1659 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1660 adev->gmc.private_aperture_end =
1661 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1662 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
1663
1664 return 0;
1665 }
1666
gmc_v9_0_late_init(struct amdgpu_ip_block * ip_block)1667 static int gmc_v9_0_late_init(struct amdgpu_ip_block *ip_block)
1668 {
1669 struct amdgpu_device *adev = ip_block->adev;
1670 int r;
1671
1672 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1673 if (r)
1674 return r;
1675
1676 /*
1677 * Workaround performance drop issue with VBIOS enables partial
1678 * writes, while disables HBM ECC for vega10.
1679 */
1680 if (!amdgpu_sriov_vf(adev) &&
1681 (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) {
1682 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1683 if (adev->df.funcs &&
1684 adev->df.funcs->enable_ecc_force_par_wr_rmw)
1685 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1686 }
1687 }
1688
1689 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1690 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
1691 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP);
1692 }
1693
1694 r = amdgpu_gmc_ras_late_init(adev);
1695 if (r)
1696 return r;
1697
1698 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1699 }
1700
gmc_v9_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)1701 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1702 struct amdgpu_gmc *mc)
1703 {
1704 u64 base = adev->mmhub.funcs->get_fb_location(adev);
1705
1706 amdgpu_gmc_set_agp_default(adev, mc);
1707
1708 /* add the xgmi offset of the physical node */
1709 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1710 if (adev->gmc.xgmi.connected_to_cpu) {
1711 amdgpu_gmc_sysvm_location(adev, mc);
1712 } else {
1713 amdgpu_gmc_vram_location(adev, mc, base);
1714 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
1715 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
1716 amdgpu_gmc_agp_location(adev, mc);
1717 }
1718 /* base offset of vram pages */
1719 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1720
1721 /* XXX: add the xgmi offset of the physical node? */
1722 adev->vm_manager.vram_base_offset +=
1723 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1724 }
1725
1726 /**
1727 * gmc_v9_0_mc_init - initialize the memory controller driver params
1728 *
1729 * @adev: amdgpu_device pointer
1730 *
1731 * Look up the amount of vram, vram width, and decide how to place
1732 * vram and gart within the GPU's physical address space.
1733 * Returns 0 for success.
1734 */
gmc_v9_0_mc_init(struct amdgpu_device * adev)1735 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1736 {
1737 int r;
1738
1739 /* size in MB on si */
1740 if (!adev->gmc.is_app_apu) {
1741 adev->gmc.mc_vram_size =
1742 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1743 } else {
1744 DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n");
1745 adev->gmc.mc_vram_size = 0;
1746 }
1747 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1748
1749 if (!(adev->flags & AMD_IS_APU) &&
1750 !adev->gmc.xgmi.connected_to_cpu) {
1751 r = amdgpu_device_resize_fb_bar(adev);
1752 if (r)
1753 return r;
1754 }
1755 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1756 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1757
1758 #ifdef CONFIG_X86_64
1759 /*
1760 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1761 * interface can use VRAM through here as it appears system reserved
1762 * memory in host address space.
1763 *
1764 * For APUs, VRAM is just the stolen system memory and can be accessed
1765 * directly.
1766 *
1767 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1768 */
1769
1770 /* check whether both host-gpu and gpu-gpu xgmi links exist */
1771 if ((!amdgpu_sriov_vf(adev) &&
1772 (adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
1773 (adev->gmc.xgmi.supported &&
1774 adev->gmc.xgmi.connected_to_cpu)) {
1775 adev->gmc.aper_base =
1776 adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1777 adev->gmc.xgmi.physical_node_id *
1778 adev->gmc.xgmi.node_segment_size;
1779 adev->gmc.aper_size = adev->gmc.real_vram_size;
1780 }
1781
1782 #endif
1783 adev->gmc.visible_vram_size = adev->gmc.aper_size;
1784
1785 /* set the gart size */
1786 if (amdgpu_gart_size == -1) {
1787 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1788 case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
1789 case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
1790 case IP_VERSION(9, 4, 0):
1791 case IP_VERSION(9, 4, 1):
1792 case IP_VERSION(9, 4, 2):
1793 case IP_VERSION(9, 4, 3):
1794 case IP_VERSION(9, 4, 4):
1795 default:
1796 adev->gmc.gart_size = 512ULL << 20;
1797 break;
1798 case IP_VERSION(9, 1, 0): /* DCE SG support */
1799 case IP_VERSION(9, 2, 2): /* DCE SG support */
1800 case IP_VERSION(9, 3, 0):
1801 adev->gmc.gart_size = 1024ULL << 20;
1802 break;
1803 }
1804 } else {
1805 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1806 }
1807
1808 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1809
1810 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1811
1812 return 0;
1813 }
1814
gmc_v9_0_gart_init(struct amdgpu_device * adev)1815 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1816 {
1817 int r;
1818
1819 if (adev->gart.bo) {
1820 WARN(1, "VEGA10 PCIE GART already initialized\n");
1821 return 0;
1822 }
1823
1824 if (adev->gmc.xgmi.connected_to_cpu) {
1825 adev->gmc.vmid0_page_table_depth = 1;
1826 adev->gmc.vmid0_page_table_block_size = 12;
1827 } else {
1828 adev->gmc.vmid0_page_table_depth = 0;
1829 adev->gmc.vmid0_page_table_block_size = 0;
1830 }
1831
1832 /* Initialize common gart structure */
1833 r = amdgpu_gart_init(adev);
1834 if (r)
1835 return r;
1836 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1837 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC) |
1838 AMDGPU_PTE_EXECUTABLE;
1839
1840 if (!adev->gmc.real_vram_size) {
1841 dev_info(adev->dev, "Put GART in system memory for APU\n");
1842 r = amdgpu_gart_table_ram_alloc(adev);
1843 if (r)
1844 dev_err(adev->dev, "Failed to allocate GART in system memory\n");
1845 } else {
1846 r = amdgpu_gart_table_vram_alloc(adev);
1847 if (r)
1848 return r;
1849
1850 if (adev->gmc.xgmi.connected_to_cpu)
1851 r = amdgpu_gmc_pdb0_alloc(adev);
1852 }
1853
1854 return r;
1855 }
1856
1857 /**
1858 * gmc_v9_0_save_registers - saves regs
1859 *
1860 * @adev: amdgpu_device pointer
1861 *
1862 * This saves potential register values that should be
1863 * restored upon resume
1864 */
gmc_v9_0_save_registers(struct amdgpu_device * adev)1865 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1866 {
1867 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1868 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1)))
1869 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1870 }
1871
gmc_v9_0_validate_partition_info(struct amdgpu_device * adev)1872 static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
1873 {
1874 enum amdgpu_memory_partition mode;
1875 u32 supp_modes;
1876 bool valid;
1877
1878 mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
1879
1880 /* Mode detected by hardware not present in supported modes */
1881 if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
1882 !(BIT(mode - 1) & supp_modes))
1883 return false;
1884
1885 switch (mode) {
1886 case UNKNOWN_MEMORY_PARTITION_MODE:
1887 case AMDGPU_NPS1_PARTITION_MODE:
1888 valid = (adev->gmc.num_mem_partitions == 1);
1889 break;
1890 case AMDGPU_NPS2_PARTITION_MODE:
1891 valid = (adev->gmc.num_mem_partitions == 2);
1892 break;
1893 case AMDGPU_NPS4_PARTITION_MODE:
1894 valid = (adev->gmc.num_mem_partitions == 3 ||
1895 adev->gmc.num_mem_partitions == 4);
1896 break;
1897 default:
1898 valid = false;
1899 }
1900
1901 return valid;
1902 }
1903
gmc_v9_0_is_node_present(int * node_ids,int num_ids,int nid)1904 static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
1905 {
1906 int i;
1907
1908 /* Check if node with id 'nid' is present in 'node_ids' array */
1909 for (i = 0; i < num_ids; ++i)
1910 if (node_ids[i] == nid)
1911 return true;
1912
1913 return false;
1914 }
1915
1916 static void
gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device * adev,struct amdgpu_mem_partition_info * mem_ranges)1917 gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
1918 struct amdgpu_mem_partition_info *mem_ranges)
1919 {
1920 struct amdgpu_numa_info numa_info;
1921 int node_ids[MAX_MEM_RANGES];
1922 int num_ranges = 0, ret;
1923 int num_xcc, xcc_id;
1924 uint32_t xcc_mask;
1925
1926 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1927 xcc_mask = (1U << num_xcc) - 1;
1928
1929 for_each_inst(xcc_id, xcc_mask) {
1930 ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
1931 if (ret)
1932 continue;
1933
1934 if (numa_info.nid == NUMA_NO_NODE) {
1935 mem_ranges[0].size = numa_info.size;
1936 mem_ranges[0].numa.node = numa_info.nid;
1937 num_ranges = 1;
1938 break;
1939 }
1940
1941 if (gmc_v9_0_is_node_present(node_ids, num_ranges,
1942 numa_info.nid))
1943 continue;
1944
1945 node_ids[num_ranges] = numa_info.nid;
1946 mem_ranges[num_ranges].numa.node = numa_info.nid;
1947 mem_ranges[num_ranges].size = numa_info.size;
1948 ++num_ranges;
1949 }
1950
1951 adev->gmc.num_mem_partitions = num_ranges;
1952 }
1953
1954 static void
gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device * adev,struct amdgpu_mem_partition_info * mem_ranges)1955 gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
1956 struct amdgpu_mem_partition_info *mem_ranges)
1957 {
1958 enum amdgpu_memory_partition mode;
1959 u32 start_addr = 0, size;
1960 int i, r, l;
1961
1962 mode = gmc_v9_0_query_memory_partition(adev);
1963
1964 switch (mode) {
1965 case UNKNOWN_MEMORY_PARTITION_MODE:
1966 adev->gmc.num_mem_partitions = 0;
1967 break;
1968 case AMDGPU_NPS1_PARTITION_MODE:
1969 adev->gmc.num_mem_partitions = 1;
1970 break;
1971 case AMDGPU_NPS2_PARTITION_MODE:
1972 adev->gmc.num_mem_partitions = 2;
1973 break;
1974 case AMDGPU_NPS4_PARTITION_MODE:
1975 if (adev->flags & AMD_IS_APU)
1976 adev->gmc.num_mem_partitions = 3;
1977 else
1978 adev->gmc.num_mem_partitions = 4;
1979 break;
1980 default:
1981 adev->gmc.num_mem_partitions = 1;
1982 break;
1983 }
1984
1985 /* Use NPS range info, if populated */
1986 r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
1987 &adev->gmc.num_mem_partitions);
1988 if (!r) {
1989 l = 0;
1990 for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
1991 if (mem_ranges[i].range.lpfn >
1992 mem_ranges[i - 1].range.lpfn)
1993 l = i;
1994 }
1995
1996 } else {
1997 if (!adev->gmc.num_mem_partitions) {
1998 dev_err(adev->dev,
1999 "Not able to detect NPS mode, fall back to NPS1");
2000 adev->gmc.num_mem_partitions = 1;
2001 }
2002 /* Fallback to sw based calculation */
2003 size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
2004 size /= adev->gmc.num_mem_partitions;
2005
2006 for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
2007 mem_ranges[i].range.fpfn = start_addr;
2008 mem_ranges[i].size =
2009 ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
2010 mem_ranges[i].range.lpfn = start_addr + size - 1;
2011 start_addr += size;
2012 }
2013
2014 l = adev->gmc.num_mem_partitions - 1;
2015 }
2016
2017 /* Adjust the last one */
2018 mem_ranges[l].range.lpfn =
2019 (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
2020 mem_ranges[l].size =
2021 adev->gmc.real_vram_size -
2022 ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
2023 }
2024
gmc_v9_0_init_mem_ranges(struct amdgpu_device * adev)2025 static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
2026 {
2027 bool valid;
2028
2029 adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES,
2030 sizeof(struct amdgpu_mem_partition_info),
2031 GFP_KERNEL);
2032 if (!adev->gmc.mem_partitions)
2033 return -ENOMEM;
2034
2035 /* TODO : Get the range from PSP/Discovery for dGPU */
2036 if (adev->gmc.is_app_apu)
2037 gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
2038 else
2039 gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
2040
2041 if (amdgpu_sriov_vf(adev))
2042 valid = true;
2043 else
2044 valid = gmc_v9_0_validate_partition_info(adev);
2045 if (!valid) {
2046 /* TODO: handle invalid case */
2047 dev_WARN(adev->dev,
2048 "Mem ranges not matching with hardware config");
2049 }
2050
2051 return 0;
2052 }
2053
gmc_v9_4_3_init_vram_info(struct amdgpu_device * adev)2054 static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
2055 {
2056 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
2057 adev->gmc.vram_width = 128 * 64;
2058 }
2059
gmc_v9_0_sw_init(struct amdgpu_ip_block * ip_block)2060 static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
2061 {
2062 int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
2063 struct amdgpu_device *adev = ip_block->adev;
2064 unsigned long inst_mask = adev->aid_mask;
2065
2066 adev->gfxhub.funcs->init(adev);
2067
2068 adev->mmhub.funcs->init(adev);
2069
2070 spin_lock_init(&adev->gmc.invalidate_lock);
2071
2072 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2073 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
2074 gmc_v9_4_3_init_vram_info(adev);
2075 } else if (!adev->bios) {
2076 if (adev->flags & AMD_IS_APU) {
2077 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
2078 adev->gmc.vram_width = 64 * 64;
2079 } else {
2080 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
2081 adev->gmc.vram_width = 128 * 64;
2082 }
2083 } else {
2084 r = amdgpu_atomfirmware_get_vram_info(adev,
2085 &vram_width, &vram_type, &vram_vendor);
2086 if (amdgpu_sriov_vf(adev))
2087 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
2088 * and DF related registers is not readable, seems hardcord is the
2089 * only way to set the correct vram_width
2090 */
2091 adev->gmc.vram_width = 2048;
2092 else if (amdgpu_emu_mode != 1)
2093 adev->gmc.vram_width = vram_width;
2094
2095 if (!adev->gmc.vram_width) {
2096 int chansize, numchan;
2097
2098 /* hbm memory channel size */
2099 if (adev->flags & AMD_IS_APU)
2100 chansize = 64;
2101 else
2102 chansize = 128;
2103 if (adev->df.funcs &&
2104 adev->df.funcs->get_hbm_channel_number) {
2105 numchan = adev->df.funcs->get_hbm_channel_number(adev);
2106 adev->gmc.vram_width = numchan * chansize;
2107 }
2108 }
2109
2110 adev->gmc.vram_type = vram_type;
2111 adev->gmc.vram_vendor = vram_vendor;
2112 }
2113 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2114 case IP_VERSION(9, 1, 0):
2115 case IP_VERSION(9, 2, 2):
2116 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2117 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2118
2119 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
2120 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2121 } else {
2122 /* vm_size is 128TB + 512GB for legacy 3-level page support */
2123 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
2124 adev->gmc.translate_further =
2125 adev->vm_manager.num_level > 1;
2126 }
2127 break;
2128 case IP_VERSION(9, 0, 1):
2129 case IP_VERSION(9, 2, 1):
2130 case IP_VERSION(9, 4, 0):
2131 case IP_VERSION(9, 3, 0):
2132 case IP_VERSION(9, 4, 2):
2133 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2134 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2135
2136 /*
2137 * To fulfill 4-level page support,
2138 * vm size is 256TB (48bit), maximum size of Vega10,
2139 * block size 512 (9bit)
2140 */
2141
2142 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2143 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
2144 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2145 break;
2146 case IP_VERSION(9, 4, 1):
2147 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2148 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2149 set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask);
2150
2151 /* Keep the vm size same with Vega20 */
2152 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2153 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2154 break;
2155 case IP_VERSION(9, 4, 3):
2156 case IP_VERSION(9, 4, 4):
2157 bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
2158 NUM_XCC(adev->gfx.xcc_mask));
2159
2160 inst_mask <<= AMDGPU_MMHUB0(0);
2161 bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
2162
2163 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2164 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2165 break;
2166 default:
2167 break;
2168 }
2169
2170 /* This interrupt is VMC page fault.*/
2171 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
2172 &adev->gmc.vm_fault);
2173 if (r)
2174 return r;
2175
2176 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
2177 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
2178 &adev->gmc.vm_fault);
2179 if (r)
2180 return r;
2181 }
2182
2183 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
2184 &adev->gmc.vm_fault);
2185
2186 if (r)
2187 return r;
2188
2189 if (!amdgpu_sriov_vf(adev) &&
2190 !adev->gmc.xgmi.connected_to_cpu &&
2191 !adev->gmc.is_app_apu) {
2192 /* interrupt sent to DF. */
2193 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
2194 &adev->gmc.ecc_irq);
2195 if (r)
2196 return r;
2197 }
2198
2199 /* Set the internal MC address mask
2200 * This is the max address of the GPU's
2201 * internal address space.
2202 */
2203 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
2204
2205 dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >=
2206 IP_VERSION(9, 4, 2) ?
2207 48 :
2208 44;
2209 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
2210 if (r) {
2211 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
2212 return r;
2213 }
2214 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
2215
2216 r = gmc_v9_0_mc_init(adev);
2217 if (r)
2218 return r;
2219
2220 amdgpu_gmc_get_vbios_allocations(adev);
2221
2222 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2223 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
2224 r = gmc_v9_0_init_mem_ranges(adev);
2225 if (r)
2226 return r;
2227 }
2228
2229 /* Memory manager */
2230 r = amdgpu_bo_init(adev);
2231 if (r)
2232 return r;
2233
2234 r = gmc_v9_0_gart_init(adev);
2235 if (r)
2236 return r;
2237
2238 gmc_v9_0_init_nps_details(adev);
2239 /*
2240 * number of VMs
2241 * VMID 0 is reserved for System
2242 * amdgpu graphics/compute will use VMIDs 1..n-1
2243 * amdkfd will use VMIDs n..15
2244 *
2245 * The first KFD VMID is 8 for GPUs with graphics, 3 for
2246 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
2247 * for video processing.
2248 */
2249 adev->vm_manager.first_kfd_vmid =
2250 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
2251 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
2252 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2253 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) ?
2254 3 :
2255 8;
2256
2257 amdgpu_vm_manager_init(adev);
2258
2259 gmc_v9_0_save_registers(adev);
2260
2261 r = amdgpu_gmc_ras_sw_init(adev);
2262 if (r)
2263 return r;
2264
2265 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2266 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2267 amdgpu_gmc_sysfs_init(adev);
2268
2269 return 0;
2270 }
2271
gmc_v9_0_sw_fini(struct amdgpu_ip_block * ip_block)2272 static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
2273 {
2274 struct amdgpu_device *adev = ip_block->adev;
2275
2276 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2277 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2278 amdgpu_gmc_sysfs_fini(adev);
2279
2280 amdgpu_gmc_ras_fini(adev);
2281 amdgpu_gem_force_release(adev);
2282 amdgpu_vm_manager_fini(adev);
2283 if (!adev->gmc.real_vram_size) {
2284 dev_info(adev->dev, "Put GART in system memory for APU free\n");
2285 amdgpu_gart_table_ram_free(adev);
2286 } else {
2287 amdgpu_gart_table_vram_free(adev);
2288 }
2289 amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
2290 amdgpu_bo_fini(adev);
2291
2292 adev->gmc.num_mem_partitions = 0;
2293 kfree(adev->gmc.mem_partitions);
2294
2295 return 0;
2296 }
2297
gmc_v9_0_init_golden_registers(struct amdgpu_device * adev)2298 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
2299 {
2300 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
2301 case IP_VERSION(9, 0, 0):
2302 if (amdgpu_sriov_vf(adev))
2303 break;
2304 fallthrough;
2305 case IP_VERSION(9, 4, 0):
2306 soc15_program_register_sequence(adev,
2307 golden_settings_mmhub_1_0_0,
2308 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
2309 soc15_program_register_sequence(adev,
2310 golden_settings_athub_1_0_0,
2311 ARRAY_SIZE(golden_settings_athub_1_0_0));
2312 break;
2313 case IP_VERSION(9, 1, 0):
2314 case IP_VERSION(9, 2, 0):
2315 /* TODO for renoir */
2316 soc15_program_register_sequence(adev,
2317 golden_settings_athub_1_0_0,
2318 ARRAY_SIZE(golden_settings_athub_1_0_0));
2319 break;
2320 default:
2321 break;
2322 }
2323 }
2324
2325 /**
2326 * gmc_v9_0_restore_registers - restores regs
2327 *
2328 * @adev: amdgpu_device pointer
2329 *
2330 * This restores register values, saved at suspend.
2331 */
gmc_v9_0_restore_registers(struct amdgpu_device * adev)2332 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
2333 {
2334 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
2335 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) {
2336 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
2337 WARN_ON(adev->gmc.sdpif_register !=
2338 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
2339 }
2340 }
2341
2342 /**
2343 * gmc_v9_0_gart_enable - gart enable
2344 *
2345 * @adev: amdgpu_device pointer
2346 */
gmc_v9_0_gart_enable(struct amdgpu_device * adev)2347 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
2348 {
2349 int r;
2350
2351 if (adev->gmc.xgmi.connected_to_cpu)
2352 amdgpu_gmc_init_pdb0(adev);
2353
2354 if (adev->gart.bo == NULL) {
2355 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
2356 return -EINVAL;
2357 }
2358
2359 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
2360
2361 if (!adev->in_s0ix) {
2362 r = adev->gfxhub.funcs->gart_enable(adev);
2363 if (r)
2364 return r;
2365 }
2366
2367 r = adev->mmhub.funcs->gart_enable(adev);
2368 if (r)
2369 return r;
2370
2371 DRM_INFO("PCIE GART of %uM enabled.\n",
2372 (unsigned int)(adev->gmc.gart_size >> 20));
2373 if (adev->gmc.pdb0_bo)
2374 DRM_INFO("PDB0 located at 0x%016llX\n",
2375 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
2376 DRM_INFO("PTB located at 0x%016llX\n",
2377 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
2378
2379 return 0;
2380 }
2381
gmc_v9_0_hw_init(struct amdgpu_ip_block * ip_block)2382 static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
2383 {
2384 struct amdgpu_device *adev = ip_block->adev;
2385 bool value;
2386 int i, r;
2387
2388 adev->gmc.flush_pasid_uses_kiq = true;
2389
2390 /* Vega20+XGMI caches PTEs in TC and TLB. Add a heavy-weight TLB flush
2391 * (type 2), which flushes both. Due to a race condition with
2392 * concurrent memory accesses using the same TLB cache line, we still
2393 * need a second TLB flush after this.
2394 */
2395 adev->gmc.flush_tlb_needs_extra_type_2 =
2396 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
2397 adev->gmc.xgmi.num_physical_nodes;
2398 /*
2399 * TODO: This workaround is badly documented and had a buggy
2400 * implementation. We should probably verify what we do here.
2401 */
2402 adev->gmc.flush_tlb_needs_extra_type_0 =
2403 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
2404 adev->rev_id == 0;
2405
2406 /* The sequence of these two function calls matters.*/
2407 gmc_v9_0_init_golden_registers(adev);
2408
2409 if (adev->mode_info.num_crtc) {
2410 /* Lockout access through VGA aperture*/
2411 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
2412 /* disable VGA render */
2413 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
2414 }
2415
2416 if (adev->mmhub.funcs->update_power_gating)
2417 adev->mmhub.funcs->update_power_gating(adev, true);
2418
2419 adev->hdp.funcs->init_registers(adev);
2420
2421 /* After HDP is initialized, flush HDP.*/
2422 adev->hdp.funcs->flush_hdp(adev, NULL);
2423
2424 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
2425 value = false;
2426 else
2427 value = true;
2428
2429 if (!amdgpu_sriov_vf(adev)) {
2430 if (!adev->in_s0ix)
2431 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2432 adev->mmhub.funcs->set_fault_enable_default(adev, value);
2433 }
2434 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2435 if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0)))
2436 continue;
2437 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
2438 }
2439
2440 if (adev->umc.funcs && adev->umc.funcs->init_registers)
2441 adev->umc.funcs->init_registers(adev);
2442
2443 r = gmc_v9_0_gart_enable(adev);
2444 if (r)
2445 return r;
2446
2447 if (amdgpu_emu_mode == 1)
2448 return amdgpu_gmc_vram_checking(adev);
2449
2450 return 0;
2451 }
2452
2453 /**
2454 * gmc_v9_0_gart_disable - gart disable
2455 *
2456 * @adev: amdgpu_device pointer
2457 *
2458 * This disables all VM page table.
2459 */
gmc_v9_0_gart_disable(struct amdgpu_device * adev)2460 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
2461 {
2462 if (!adev->in_s0ix)
2463 adev->gfxhub.funcs->gart_disable(adev);
2464 adev->mmhub.funcs->gart_disable(adev);
2465 }
2466
gmc_v9_0_hw_fini(struct amdgpu_ip_block * ip_block)2467 static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
2468 {
2469 struct amdgpu_device *adev = ip_block->adev;
2470
2471 gmc_v9_0_gart_disable(adev);
2472
2473 if (amdgpu_sriov_vf(adev)) {
2474 /* full access mode, so don't touch any GMC register */
2475 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
2476 return 0;
2477 }
2478
2479 /*
2480 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
2481 * a correct cached state for GMC. Otherwise, the "gate" again
2482 * operation on S3 resuming will fail due to wrong cached state.
2483 */
2484 if (adev->mmhub.funcs->update_power_gating)
2485 adev->mmhub.funcs->update_power_gating(adev, false);
2486
2487 /*
2488 * For minimal init, late_init is not called, hence VM fault/RAS irqs
2489 * are not enabled.
2490 */
2491 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
2492 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
2493
2494 if (adev->gmc.ecc_irq.funcs &&
2495 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
2496 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
2497 }
2498
2499 return 0;
2500 }
2501
gmc_v9_0_suspend(struct amdgpu_ip_block * ip_block)2502 static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block)
2503 {
2504 return gmc_v9_0_hw_fini(ip_block);
2505 }
2506
gmc_v9_0_resume(struct amdgpu_ip_block * ip_block)2507 static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
2508 {
2509 struct amdgpu_device *adev = ip_block->adev;
2510 int r;
2511
2512 /* If a reset is done for NPS mode switch, read the memory range
2513 * information again.
2514 */
2515 if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) {
2516 gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
2517 adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS;
2518 }
2519
2520 r = gmc_v9_0_hw_init(ip_block);
2521 if (r)
2522 return r;
2523
2524 amdgpu_vmid_reset_all(ip_block->adev);
2525
2526 return 0;
2527 }
2528
gmc_v9_0_is_idle(void * handle)2529 static bool gmc_v9_0_is_idle(void *handle)
2530 {
2531 /* MC is always ready in GMC v9.*/
2532 return true;
2533 }
2534
gmc_v9_0_wait_for_idle(struct amdgpu_ip_block * ip_block)2535 static int gmc_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
2536 {
2537 /* There is no need to wait for MC idle in GMC v9.*/
2538 return 0;
2539 }
2540
gmc_v9_0_soft_reset(struct amdgpu_ip_block * ip_block)2541 static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
2542 {
2543 /* XXX for emulation.*/
2544 return 0;
2545 }
2546
gmc_v9_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)2547 static int gmc_v9_0_set_clockgating_state(void *handle,
2548 enum amd_clockgating_state state)
2549 {
2550 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2551
2552 adev->mmhub.funcs->set_clockgating(adev, state);
2553
2554 athub_v1_0_set_clockgating(adev, state);
2555
2556 return 0;
2557 }
2558
gmc_v9_0_get_clockgating_state(void * handle,u64 * flags)2559 static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
2560 {
2561 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2562
2563 adev->mmhub.funcs->get_clockgating(adev, flags);
2564
2565 athub_v1_0_get_clockgating(adev, flags);
2566 }
2567
gmc_v9_0_set_powergating_state(void * handle,enum amd_powergating_state state)2568 static int gmc_v9_0_set_powergating_state(void *handle,
2569 enum amd_powergating_state state)
2570 {
2571 return 0;
2572 }
2573
2574 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
2575 .name = "gmc_v9_0",
2576 .early_init = gmc_v9_0_early_init,
2577 .late_init = gmc_v9_0_late_init,
2578 .sw_init = gmc_v9_0_sw_init,
2579 .sw_fini = gmc_v9_0_sw_fini,
2580 .hw_init = gmc_v9_0_hw_init,
2581 .hw_fini = gmc_v9_0_hw_fini,
2582 .suspend = gmc_v9_0_suspend,
2583 .resume = gmc_v9_0_resume,
2584 .is_idle = gmc_v9_0_is_idle,
2585 .wait_for_idle = gmc_v9_0_wait_for_idle,
2586 .soft_reset = gmc_v9_0_soft_reset,
2587 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
2588 .set_powergating_state = gmc_v9_0_set_powergating_state,
2589 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
2590 };
2591
2592 const struct amdgpu_ip_block_version gmc_v9_0_ip_block = {
2593 .type = AMD_IP_BLOCK_TYPE_GMC,
2594 .major = 9,
2595 .minor = 0,
2596 .rev = 0,
2597 .funcs = &gmc_v9_0_ip_funcs,
2598 };
2599