1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <linux/pci.h>
26
27 #include <drm/drm_cache.h>
28
29 #include "amdgpu.h"
30 #include "gmc_v9_0.h"
31 #include "amdgpu_atomfirmware.h"
32 #include "amdgpu_gem.h"
33
34 #include "gc/gc_9_0_sh_mask.h"
35 #include "dce/dce_12_0_offset.h"
36 #include "dce/dce_12_0_sh_mask.h"
37 #include "vega10_enum.h"
38 #include "mmhub/mmhub_1_0_offset.h"
39 #include "athub/athub_1_0_sh_mask.h"
40 #include "athub/athub_1_0_offset.h"
41 #include "oss/osssys_4_0_offset.h"
42
43 #include "soc15.h"
44 #include "soc15d.h"
45 #include "soc15_common.h"
46 #include "umc/umc_6_0_sh_mask.h"
47
48 #include "gfxhub_v1_0.h"
49 #include "mmhub_v1_0.h"
50 #include "athub_v1_0.h"
51 #include "gfxhub_v1_1.h"
52 #include "gfxhub_v1_2.h"
53 #include "mmhub_v9_4.h"
54 #include "mmhub_v1_7.h"
55 #include "mmhub_v1_8.h"
56 #include "umc_v6_1.h"
57 #include "umc_v6_0.h"
58 #include "umc_v6_7.h"
59 #include "umc_v12_0.h"
60 #include "hdp_v4_0.h"
61 #include "mca_v3_0.h"
62
63 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
64
65 #include "amdgpu_ras.h"
66 #include "amdgpu_xgmi.h"
67
68 /* add these here since we already include dce12 headers and these are for DCN */
69 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
70 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
71 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
72 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
73 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
74 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
75 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
76 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
77
78 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
79 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
80
81 static const char * const gfxhub_client_ids[] = {
82 "CB",
83 "DB",
84 "IA",
85 "WD",
86 "CPF",
87 "CPC",
88 "CPG",
89 "RLC",
90 "TCP",
91 "SQC (inst)",
92 "SQC (data)",
93 "SQG",
94 "PA",
95 };
96
97 static const char *mmhub_client_ids_raven[][2] = {
98 [0][0] = "MP1",
99 [1][0] = "MP0",
100 [2][0] = "VCN",
101 [3][0] = "VCNU",
102 [4][0] = "HDP",
103 [5][0] = "DCE",
104 [13][0] = "UTCL2",
105 [19][0] = "TLS",
106 [26][0] = "OSS",
107 [27][0] = "SDMA0",
108 [0][1] = "MP1",
109 [1][1] = "MP0",
110 [2][1] = "VCN",
111 [3][1] = "VCNU",
112 [4][1] = "HDP",
113 [5][1] = "XDP",
114 [6][1] = "DBGU0",
115 [7][1] = "DCE",
116 [8][1] = "DCEDWB0",
117 [9][1] = "DCEDWB1",
118 [26][1] = "OSS",
119 [27][1] = "SDMA0",
120 };
121
122 static const char *mmhub_client_ids_renoir[][2] = {
123 [0][0] = "MP1",
124 [1][0] = "MP0",
125 [2][0] = "HDP",
126 [4][0] = "DCEDMC",
127 [5][0] = "DCEVGA",
128 [13][0] = "UTCL2",
129 [19][0] = "TLS",
130 [26][0] = "OSS",
131 [27][0] = "SDMA0",
132 [28][0] = "VCN",
133 [29][0] = "VCNU",
134 [30][0] = "JPEG",
135 [0][1] = "MP1",
136 [1][1] = "MP0",
137 [2][1] = "HDP",
138 [3][1] = "XDP",
139 [6][1] = "DBGU0",
140 [7][1] = "DCEDMC",
141 [8][1] = "DCEVGA",
142 [9][1] = "DCEDWB",
143 [26][1] = "OSS",
144 [27][1] = "SDMA0",
145 [28][1] = "VCN",
146 [29][1] = "VCNU",
147 [30][1] = "JPEG",
148 };
149
150 static const char *mmhub_client_ids_vega10[][2] = {
151 [0][0] = "MP0",
152 [1][0] = "UVD",
153 [2][0] = "UVDU",
154 [3][0] = "HDP",
155 [13][0] = "UTCL2",
156 [14][0] = "OSS",
157 [15][0] = "SDMA1",
158 [32+0][0] = "VCE0",
159 [32+1][0] = "VCE0U",
160 [32+2][0] = "XDMA",
161 [32+3][0] = "DCE",
162 [32+4][0] = "MP1",
163 [32+14][0] = "SDMA0",
164 [0][1] = "MP0",
165 [1][1] = "UVD",
166 [2][1] = "UVDU",
167 [3][1] = "DBGU0",
168 [4][1] = "HDP",
169 [5][1] = "XDP",
170 [14][1] = "OSS",
171 [15][1] = "SDMA0",
172 [32+0][1] = "VCE0",
173 [32+1][1] = "VCE0U",
174 [32+2][1] = "XDMA",
175 [32+3][1] = "DCE",
176 [32+4][1] = "DCEDWB",
177 [32+5][1] = "MP1",
178 [32+6][1] = "DBGU1",
179 [32+14][1] = "SDMA1",
180 };
181
182 static const char *mmhub_client_ids_vega12[][2] = {
183 [0][0] = "MP0",
184 [1][0] = "VCE0",
185 [2][0] = "VCE0U",
186 [3][0] = "HDP",
187 [13][0] = "UTCL2",
188 [14][0] = "OSS",
189 [15][0] = "SDMA1",
190 [32+0][0] = "DCE",
191 [32+1][0] = "XDMA",
192 [32+2][0] = "UVD",
193 [32+3][0] = "UVDU",
194 [32+4][0] = "MP1",
195 [32+15][0] = "SDMA0",
196 [0][1] = "MP0",
197 [1][1] = "VCE0",
198 [2][1] = "VCE0U",
199 [3][1] = "DBGU0",
200 [4][1] = "HDP",
201 [5][1] = "XDP",
202 [14][1] = "OSS",
203 [15][1] = "SDMA0",
204 [32+0][1] = "DCE",
205 [32+1][1] = "DCEDWB",
206 [32+2][1] = "XDMA",
207 [32+3][1] = "UVD",
208 [32+4][1] = "UVDU",
209 [32+5][1] = "MP1",
210 [32+6][1] = "DBGU1",
211 [32+15][1] = "SDMA1",
212 };
213
214 static const char *mmhub_client_ids_vega20[][2] = {
215 [0][0] = "XDMA",
216 [1][0] = "DCE",
217 [2][0] = "VCE0",
218 [3][0] = "VCE0U",
219 [4][0] = "UVD",
220 [5][0] = "UVD1U",
221 [13][0] = "OSS",
222 [14][0] = "HDP",
223 [15][0] = "SDMA0",
224 [32+0][0] = "UVD",
225 [32+1][0] = "UVDU",
226 [32+2][0] = "MP1",
227 [32+3][0] = "MP0",
228 [32+12][0] = "UTCL2",
229 [32+14][0] = "SDMA1",
230 [0][1] = "XDMA",
231 [1][1] = "DCE",
232 [2][1] = "DCEDWB",
233 [3][1] = "VCE0",
234 [4][1] = "VCE0U",
235 [5][1] = "UVD1",
236 [6][1] = "UVD1U",
237 [7][1] = "DBGU0",
238 [8][1] = "XDP",
239 [13][1] = "OSS",
240 [14][1] = "HDP",
241 [15][1] = "SDMA0",
242 [32+0][1] = "UVD",
243 [32+1][1] = "UVDU",
244 [32+2][1] = "DBGU1",
245 [32+3][1] = "MP1",
246 [32+4][1] = "MP0",
247 [32+14][1] = "SDMA1",
248 };
249
250 static const char *mmhub_client_ids_arcturus[][2] = {
251 [0][0] = "DBGU1",
252 [1][0] = "XDP",
253 [2][0] = "MP1",
254 [14][0] = "HDP",
255 [171][0] = "JPEG",
256 [172][0] = "VCN",
257 [173][0] = "VCNU",
258 [203][0] = "JPEG1",
259 [204][0] = "VCN1",
260 [205][0] = "VCN1U",
261 [256][0] = "SDMA0",
262 [257][0] = "SDMA1",
263 [258][0] = "SDMA2",
264 [259][0] = "SDMA3",
265 [260][0] = "SDMA4",
266 [261][0] = "SDMA5",
267 [262][0] = "SDMA6",
268 [263][0] = "SDMA7",
269 [384][0] = "OSS",
270 [0][1] = "DBGU1",
271 [1][1] = "XDP",
272 [2][1] = "MP1",
273 [14][1] = "HDP",
274 [171][1] = "JPEG",
275 [172][1] = "VCN",
276 [173][1] = "VCNU",
277 [203][1] = "JPEG1",
278 [204][1] = "VCN1",
279 [205][1] = "VCN1U",
280 [256][1] = "SDMA0",
281 [257][1] = "SDMA1",
282 [258][1] = "SDMA2",
283 [259][1] = "SDMA3",
284 [260][1] = "SDMA4",
285 [261][1] = "SDMA5",
286 [262][1] = "SDMA6",
287 [263][1] = "SDMA7",
288 [384][1] = "OSS",
289 };
290
291 static const char *mmhub_client_ids_aldebaran[][2] = {
292 [2][0] = "MP1",
293 [3][0] = "MP0",
294 [32+1][0] = "DBGU_IO0",
295 [32+2][0] = "DBGU_IO2",
296 [32+4][0] = "MPIO",
297 [96+11][0] = "JPEG0",
298 [96+12][0] = "VCN0",
299 [96+13][0] = "VCNU0",
300 [128+11][0] = "JPEG1",
301 [128+12][0] = "VCN1",
302 [128+13][0] = "VCNU1",
303 [160+1][0] = "XDP",
304 [160+14][0] = "HDP",
305 [256+0][0] = "SDMA0",
306 [256+1][0] = "SDMA1",
307 [256+2][0] = "SDMA2",
308 [256+3][0] = "SDMA3",
309 [256+4][0] = "SDMA4",
310 [384+0][0] = "OSS",
311 [2][1] = "MP1",
312 [3][1] = "MP0",
313 [32+1][1] = "DBGU_IO0",
314 [32+2][1] = "DBGU_IO2",
315 [32+4][1] = "MPIO",
316 [96+11][1] = "JPEG0",
317 [96+12][1] = "VCN0",
318 [96+13][1] = "VCNU0",
319 [128+11][1] = "JPEG1",
320 [128+12][1] = "VCN1",
321 [128+13][1] = "VCNU1",
322 [160+1][1] = "XDP",
323 [160+14][1] = "HDP",
324 [256+0][1] = "SDMA0",
325 [256+1][1] = "SDMA1",
326 [256+2][1] = "SDMA2",
327 [256+3][1] = "SDMA3",
328 [256+4][1] = "SDMA4",
329 [384+0][1] = "OSS",
330 };
331
332 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = {
333 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
334 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
335 };
336
337 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = {
338 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
339 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
340 };
341
342 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
343 (0x000143c0 + 0x00000000),
344 (0x000143c0 + 0x00000800),
345 (0x000143c0 + 0x00001000),
346 (0x000143c0 + 0x00001800),
347 (0x000543c0 + 0x00000000),
348 (0x000543c0 + 0x00000800),
349 (0x000543c0 + 0x00001000),
350 (0x000543c0 + 0x00001800),
351 (0x000943c0 + 0x00000000),
352 (0x000943c0 + 0x00000800),
353 (0x000943c0 + 0x00001000),
354 (0x000943c0 + 0x00001800),
355 (0x000d43c0 + 0x00000000),
356 (0x000d43c0 + 0x00000800),
357 (0x000d43c0 + 0x00001000),
358 (0x000d43c0 + 0x00001800),
359 (0x001143c0 + 0x00000000),
360 (0x001143c0 + 0x00000800),
361 (0x001143c0 + 0x00001000),
362 (0x001143c0 + 0x00001800),
363 (0x001543c0 + 0x00000000),
364 (0x001543c0 + 0x00000800),
365 (0x001543c0 + 0x00001000),
366 (0x001543c0 + 0x00001800),
367 (0x001943c0 + 0x00000000),
368 (0x001943c0 + 0x00000800),
369 (0x001943c0 + 0x00001000),
370 (0x001943c0 + 0x00001800),
371 (0x001d43c0 + 0x00000000),
372 (0x001d43c0 + 0x00000800),
373 (0x001d43c0 + 0x00001000),
374 (0x001d43c0 + 0x00001800),
375 };
376
377 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
378 (0x000143e0 + 0x00000000),
379 (0x000143e0 + 0x00000800),
380 (0x000143e0 + 0x00001000),
381 (0x000143e0 + 0x00001800),
382 (0x000543e0 + 0x00000000),
383 (0x000543e0 + 0x00000800),
384 (0x000543e0 + 0x00001000),
385 (0x000543e0 + 0x00001800),
386 (0x000943e0 + 0x00000000),
387 (0x000943e0 + 0x00000800),
388 (0x000943e0 + 0x00001000),
389 (0x000943e0 + 0x00001800),
390 (0x000d43e0 + 0x00000000),
391 (0x000d43e0 + 0x00000800),
392 (0x000d43e0 + 0x00001000),
393 (0x000d43e0 + 0x00001800),
394 (0x001143e0 + 0x00000000),
395 (0x001143e0 + 0x00000800),
396 (0x001143e0 + 0x00001000),
397 (0x001143e0 + 0x00001800),
398 (0x001543e0 + 0x00000000),
399 (0x001543e0 + 0x00000800),
400 (0x001543e0 + 0x00001000),
401 (0x001543e0 + 0x00001800),
402 (0x001943e0 + 0x00000000),
403 (0x001943e0 + 0x00000800),
404 (0x001943e0 + 0x00001000),
405 (0x001943e0 + 0x00001800),
406 (0x001d43e0 + 0x00000000),
407 (0x001d43e0 + 0x00000800),
408 (0x001d43e0 + 0x00001000),
409 (0x001d43e0 + 0x00001800),
410 };
411
gmc_v9_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)412 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
413 struct amdgpu_irq_src *src,
414 unsigned int type,
415 enum amdgpu_interrupt_state state)
416 {
417 u32 bits, i, tmp, reg;
418
419 /* Devices newer then VEGA10/12 shall have these programming
420 * sequences performed by PSP BL
421 */
422 if (adev->asic_type >= CHIP_VEGA20)
423 return 0;
424
425 bits = 0x7f;
426
427 switch (state) {
428 case AMDGPU_IRQ_STATE_DISABLE:
429 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
430 reg = ecc_umc_mcumc_ctrl_addrs[i];
431 tmp = RREG32(reg);
432 tmp &= ~bits;
433 WREG32(reg, tmp);
434 }
435 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
436 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
437 tmp = RREG32(reg);
438 tmp &= ~bits;
439 WREG32(reg, tmp);
440 }
441 break;
442 case AMDGPU_IRQ_STATE_ENABLE:
443 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
444 reg = ecc_umc_mcumc_ctrl_addrs[i];
445 tmp = RREG32(reg);
446 tmp |= bits;
447 WREG32(reg, tmp);
448 }
449 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
450 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
451 tmp = RREG32(reg);
452 tmp |= bits;
453 WREG32(reg, tmp);
454 }
455 break;
456 default:
457 break;
458 }
459
460 return 0;
461 }
462
gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)463 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
464 struct amdgpu_irq_src *src,
465 unsigned int type,
466 enum amdgpu_interrupt_state state)
467 {
468 struct amdgpu_vmhub *hub;
469 u32 tmp, reg, bits, i, j;
470
471 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
472 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
473 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
474 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
475 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
477 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
478
479 switch (state) {
480 case AMDGPU_IRQ_STATE_DISABLE:
481 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
482 hub = &adev->vmhub[j];
483 for (i = 0; i < 16; i++) {
484 reg = hub->vm_context0_cntl + i;
485
486 /* This works because this interrupt is only
487 * enabled at init/resume and disabled in
488 * fini/suspend, so the overall state doesn't
489 * change over the course of suspend/resume.
490 */
491 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
492 continue;
493
494 if (j >= AMDGPU_MMHUB0(0))
495 tmp = RREG32_SOC15_IP(MMHUB, reg);
496 else
497 tmp = RREG32_XCC(reg, j);
498
499 tmp &= ~bits;
500
501 if (j >= AMDGPU_MMHUB0(0))
502 WREG32_SOC15_IP(MMHUB, reg, tmp);
503 else
504 WREG32_XCC(reg, tmp, j);
505 }
506 }
507 break;
508 case AMDGPU_IRQ_STATE_ENABLE:
509 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
510 hub = &adev->vmhub[j];
511 for (i = 0; i < 16; i++) {
512 reg = hub->vm_context0_cntl + i;
513
514 /* This works because this interrupt is only
515 * enabled at init/resume and disabled in
516 * fini/suspend, so the overall state doesn't
517 * change over the course of suspend/resume.
518 */
519 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
520 continue;
521
522 if (j >= AMDGPU_MMHUB0(0))
523 tmp = RREG32_SOC15_IP(MMHUB, reg);
524 else
525 tmp = RREG32_XCC(reg, j);
526
527 tmp |= bits;
528
529 if (j >= AMDGPU_MMHUB0(0))
530 WREG32_SOC15_IP(MMHUB, reg, tmp);
531 else
532 WREG32_XCC(reg, tmp, j);
533 }
534 }
535 break;
536 default:
537 break;
538 }
539
540 return 0;
541 }
542
gmc_v9_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)543 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
544 struct amdgpu_irq_src *source,
545 struct amdgpu_iv_entry *entry)
546 {
547 bool retry_fault = !!(entry->src_data[1] &
548 AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
549 bool write_fault = !!(entry->src_data[1] &
550 AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
551 uint32_t status = 0, cid = 0, rw = 0, fed = 0;
552 struct amdgpu_task_info *task_info;
553 struct amdgpu_vmhub *hub;
554 const char *mmhub_cid;
555 const char *hub_name;
556 unsigned int vmhub;
557 u64 addr;
558 uint32_t cam_index = 0;
559 int ret, xcc_id = 0;
560 uint32_t node_id;
561
562 node_id = entry->node_id;
563
564 addr = (u64)entry->src_data[0] << 12;
565 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
566
567 if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
568 hub_name = "mmhub0";
569 vmhub = AMDGPU_MMHUB0(node_id / 4);
570 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
571 hub_name = "mmhub1";
572 vmhub = AMDGPU_MMHUB1(0);
573 } else {
574 hub_name = "gfxhub0";
575 if (adev->gfx.funcs->ih_node_to_logical_xcc) {
576 xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
577 node_id);
578 if (xcc_id < 0)
579 xcc_id = 0;
580 }
581 vmhub = xcc_id;
582 }
583 hub = &adev->vmhub[vmhub];
584
585 if (retry_fault) {
586 cam_index = entry->src_data[2] & 0x3ff;
587
588 ret = amdgpu_gmc_handle_retry_fault(adev, entry, addr, cam_index, node_id,
589 write_fault);
590 /* Returning 1 here also prevents sending the IV to the KFD */
591 if (ret == 1)
592 return 1;
593 }
594
595 if (kgd2kfd_vmfault_fast_path(adev, entry, retry_fault))
596 return 1;
597
598 if (!printk_ratelimit())
599 return 0;
600
601 dev_err(adev->dev,
602 "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", hub_name,
603 retry_fault ? "retry" : "no-retry",
604 entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
605
606 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
607 if (task_info) {
608 amdgpu_vm_print_task_info(adev, task_info);
609 amdgpu_vm_put_task_info(task_info);
610 }
611
612 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
613 addr, entry->client_id,
614 soc15_ih_clientid_name[entry->client_id]);
615
616 if (amdgpu_is_multi_aid(adev))
617 dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
618 node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
619 node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
620
621 if (amdgpu_sriov_vf(adev))
622 return 0;
623
624 /*
625 * Issue a dummy read to wait for the status register to
626 * be updated to avoid reading an incorrect value due to
627 * the new fast GRBM interface.
628 */
629 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
630 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
631 RREG32(hub->vm_l2_pro_fault_status);
632
633 status = RREG32(hub->vm_l2_pro_fault_status);
634 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
635 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
636 fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
637
638 /* for fed error, kfd will handle it, return directly */
639 if (fed && amdgpu_ras_is_poison_mode_supported(adev) &&
640 (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
641 return 0;
642
643 /* Only print L2 fault status if the status register could be read and
644 * contains useful information
645 */
646 if (!status)
647 return 0;
648
649 if (!amdgpu_sriov_vf(adev))
650 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
651
652 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
653
654 dev_err(adev->dev,
655 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
656 status);
657 if (entry->vmid_src == AMDGPU_GFXHUB(0)) {
658 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
659 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
660 gfxhub_client_ids[cid],
661 cid);
662 } else {
663 mmhub_cid = amdgpu_mmhub_client_name(&adev->mmhub, cid, rw);
664 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
665 mmhub_cid ? mmhub_cid : "unknown", cid);
666 }
667 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
668 REG_GET_FIELD(status,
669 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
670 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
671 REG_GET_FIELD(status,
672 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
673 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
674 REG_GET_FIELD(status,
675 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
676 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
677 REG_GET_FIELD(status,
678 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
679 dev_err(adev->dev, "\t RW: 0x%x\n", rw);
680 return 0;
681 }
682
683 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
684 .set = gmc_v9_0_vm_fault_interrupt_state,
685 .process = gmc_v9_0_process_interrupt,
686 };
687
688
689 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
690 .set = gmc_v9_0_ecc_interrupt_state,
691 .process = amdgpu_umc_process_ecc_irq,
692 };
693
gmc_v9_0_set_irq_funcs(struct amdgpu_device * adev)694 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
695 {
696 adev->gmc.vm_fault.num_types = 1;
697 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
698
699 if (!amdgpu_sriov_vf(adev) &&
700 !adev->gmc.xgmi.connected_to_cpu &&
701 !adev->gmc.is_app_apu) {
702 adev->gmc.ecc_irq.num_types = 1;
703 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
704 }
705 }
706
gmc_v9_0_get_invalidate_req(unsigned int vmid,uint32_t flush_type)707 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
708 uint32_t flush_type)
709 {
710 u32 req = 0;
711
712 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
713 PER_VMID_INVALIDATE_REQ, 1 << vmid);
714 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
715 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
716 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
717 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
718 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
719 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
720 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
721 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
722
723 return req;
724 }
725
726 /**
727 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
728 *
729 * @adev: amdgpu_device pointer
730 * @vmhub: vmhub type
731 *
732 */
gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)733 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
734 uint32_t vmhub)
735 {
736 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
737 amdgpu_is_multi_aid(adev))
738 return false;
739
740 return ((vmhub == AMDGPU_MMHUB0(0) ||
741 vmhub == AMDGPU_MMHUB1(0)) &&
742 (!amdgpu_sriov_vf(adev)) &&
743 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
744 (adev->apu_flags & AMD_APU_IS_PICASSO))));
745 }
746
gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)747 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
748 uint8_t vmid, uint16_t *p_pasid)
749 {
750 uint32_t value;
751
752 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
753 + vmid);
754 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
755
756 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
757 }
758
759 /*
760 * GART
761 * VMID 0 is the physical GPU addresses as used by the kernel.
762 * VMIDs 1-15 are used for userspace clients and are handled
763 * by the amdgpu vm/hsa code.
764 */
765
766 /**
767 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
768 *
769 * @adev: amdgpu_device pointer
770 * @vmid: vm instance to flush
771 * @vmhub: which hub to flush
772 * @flush_type: the flush type
773 *
774 * Flush the TLB for the requested page table using certain type.
775 */
gmc_v9_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)776 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
777 uint32_t vmhub, uint32_t flush_type)
778 {
779 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
780 u32 j, inv_req, tmp, sem, req, ack, inst;
781 const unsigned int eng = 17;
782 struct amdgpu_vmhub *hub;
783
784 BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
785
786 hub = &adev->vmhub[vmhub];
787 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
788 sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
789 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
790 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
791
792 if (vmhub >= AMDGPU_MMHUB0(0))
793 inst = 0;
794 else
795 inst = vmhub;
796
797 /* This is necessary for SRIOV as well as for GFXOFF to function
798 * properly under bare metal
799 */
800 if (adev->gfx.kiq[inst].ring.sched.ready &&
801 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
802 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
803 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
804
805 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
806 1 << vmid, inst);
807 return;
808 }
809
810 /* This path is needed before KIQ/MES/GFXOFF are set up */
811 spin_lock(&adev->gmc.invalidate_lock);
812
813 /*
814 * It may lose gpuvm invalidate acknowldege state across power-gating
815 * off cycle, add semaphore acquire before invalidation and semaphore
816 * release after invalidation to avoid entering power gated state
817 * to WA the Issue
818 */
819
820 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
821 if (use_semaphore) {
822 for (j = 0; j < adev->usec_timeout; j++) {
823 /* a read return value of 1 means semaphore acquire */
824 if (vmhub >= AMDGPU_MMHUB0(0))
825 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, GET_INST(GC, inst));
826 else
827 tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, GET_INST(GC, inst));
828 if (tmp & 0x1)
829 break;
830 udelay(1);
831 }
832
833 if (j >= adev->usec_timeout)
834 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
835 }
836
837 if (vmhub >= AMDGPU_MMHUB0(0))
838 WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, GET_INST(GC, inst));
839 else
840 WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, GET_INST(GC, inst));
841
842 /*
843 * Issue a dummy read to wait for the ACK register to
844 * be cleared to avoid a false ACK due to the new fast
845 * GRBM interface.
846 */
847 if ((vmhub == AMDGPU_GFXHUB(0)) &&
848 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
849 RREG32_NO_KIQ(req);
850
851 for (j = 0; j < adev->usec_timeout; j++) {
852 if (vmhub >= AMDGPU_MMHUB0(0))
853 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, GET_INST(GC, inst));
854 else
855 tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, GET_INST(GC, inst));
856 if (tmp & (1 << vmid))
857 break;
858 udelay(1);
859 }
860
861 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
862 if (use_semaphore) {
863 /*
864 * add semaphore release after invalidation,
865 * write with 0 means semaphore release
866 */
867 if (vmhub >= AMDGPU_MMHUB0(0))
868 WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, GET_INST(GC, inst));
869 else
870 WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, GET_INST(GC, inst));
871 }
872
873 spin_unlock(&adev->gmc.invalidate_lock);
874
875 if (j < adev->usec_timeout)
876 return;
877
878 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
879 }
880
881 /**
882 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
883 *
884 * @adev: amdgpu_device pointer
885 * @pasid: pasid to be flush
886 * @flush_type: the flush type
887 * @all_hub: flush all hubs
888 * @inst: is used to select which instance of KIQ to use for the invalidation
889 *
890 * Flush the TLB for the requested pasid.
891 */
gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub,uint32_t inst)892 static void gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
893 uint16_t pasid, uint32_t flush_type,
894 bool all_hub, uint32_t inst)
895 {
896 uint16_t queried;
897 int i, vmid;
898
899 for (vmid = 1; vmid < 16; vmid++) {
900 bool valid;
901
902 valid = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
903 &queried);
904 if (!valid || queried != pasid)
905 continue;
906
907 if (all_hub) {
908 for_each_set_bit(i, adev->vmhubs_mask,
909 AMDGPU_MAX_VMHUBS)
910 gmc_v9_0_flush_gpu_tlb(adev, vmid, i,
911 flush_type);
912 } else {
913 gmc_v9_0_flush_gpu_tlb(adev, vmid,
914 AMDGPU_GFXHUB(0),
915 flush_type);
916 }
917 }
918 }
919
gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)920 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
921 unsigned int vmid, uint64_t pd_addr)
922 {
923 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
924 struct amdgpu_device *adev = ring->adev;
925 struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
926 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
927 unsigned int eng = ring->vm_inv_eng;
928
929 /*
930 * It may lose gpuvm invalidate acknowldege state across power-gating
931 * off cycle, add semaphore acquire before invalidation and semaphore
932 * release after invalidation to avoid entering power gated state
933 * to WA the Issue
934 */
935
936 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
937 if (use_semaphore)
938 /* a read return value of 1 means semaphore acuqire */
939 amdgpu_ring_emit_reg_wait(ring,
940 hub->vm_inv_eng0_sem +
941 hub->eng_distance * eng, 0x1, 0x1);
942
943 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
944 (hub->ctx_addr_distance * vmid),
945 lower_32_bits(pd_addr));
946
947 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
948 (hub->ctx_addr_distance * vmid),
949 upper_32_bits(pd_addr));
950
951 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
952 hub->eng_distance * eng,
953 hub->vm_inv_eng0_ack +
954 hub->eng_distance * eng,
955 req, 1 << vmid);
956
957 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
958 if (use_semaphore)
959 /*
960 * add semaphore release after invalidation,
961 * write with 0 means semaphore release
962 */
963 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
964 hub->eng_distance * eng, 0);
965
966 return pd_addr;
967 }
968
gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned int vmid,unsigned int pasid)969 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
970 unsigned int pasid)
971 {
972 struct amdgpu_device *adev = ring->adev;
973 uint32_t reg;
974
975 /* Do nothing because there's no lut register for mmhub1. */
976 if (ring->vm_hub == AMDGPU_MMHUB1(0))
977 return;
978
979 if (ring->vm_hub == AMDGPU_GFXHUB(0))
980 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
981 else
982 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
983
984 amdgpu_ring_emit_wreg(ring, reg, pasid);
985 }
986
987 /*
988 * PTE format on VEGA 10:
989 * 63:59 reserved
990 * 58:57 mtype
991 * 56 F
992 * 55 L
993 * 54 P
994 * 53 SW
995 * 52 T
996 * 50:48 reserved
997 * 47:12 4k physical page base address
998 * 11:7 fragment
999 * 6 write
1000 * 5 read
1001 * 4 exe
1002 * 3 Z
1003 * 2 snooped
1004 * 1 system
1005 * 0 valid
1006 *
1007 * PDE format on VEGA 10:
1008 * 63:59 block fragment size
1009 * 58:55 reserved
1010 * 54 P
1011 * 53:48 reserved
1012 * 47:6 physical base address of PD or PTE
1013 * 5:3 reserved
1014 * 2 C
1015 * 1 system
1016 * 0 valid
1017 */
1018
gmc_v9_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)1019 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
1020 uint64_t *addr, uint64_t *flags)
1021 {
1022 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
1023 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
1024 BUG_ON(*addr & 0xFFFF00000000003FULL);
1025
1026 if (!adev->gmc.translate_further)
1027 return;
1028
1029 if (level == AMDGPU_VM_PDB1) {
1030 /* Set the block fragment size */
1031 if (!(*flags & AMDGPU_PDE_PTE))
1032 *flags |= AMDGPU_PDE_BFS(0x9);
1033
1034 } else if (level == AMDGPU_VM_PDB0) {
1035 if (*flags & AMDGPU_PDE_PTE) {
1036 *flags &= ~AMDGPU_PDE_PTE;
1037 if (!(*flags & AMDGPU_PTE_VALID))
1038 *addr |= 1 << PAGE_SHIFT;
1039 } else {
1040 *flags |= AMDGPU_PTE_TF;
1041 }
1042 }
1043 }
1044
gmc_v9_0_get_coherence_flags(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo,uint32_t vm_flags,uint64_t * flags)1045 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
1046 struct amdgpu_vm *vm,
1047 struct amdgpu_bo *bo,
1048 uint32_t vm_flags,
1049 uint64_t *flags)
1050 {
1051 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1052 bool is_vram = bo->tbo.resource &&
1053 bo->tbo.resource->mem_type == TTM_PL_VRAM;
1054 bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
1055 AMDGPU_GEM_CREATE_EXT_COHERENT);
1056 bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
1057 bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
1058 unsigned int mtype_local, mtype;
1059 uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
1060 bool snoop = false;
1061 bool is_local;
1062
1063 dma_resv_assert_held(bo->tbo.base.resv);
1064
1065 switch (gc_ip_version) {
1066 case IP_VERSION(9, 4, 1):
1067 case IP_VERSION(9, 4, 2):
1068 if (is_vram) {
1069 if (bo_adev == adev) {
1070 if (uncached)
1071 mtype = MTYPE_UC;
1072 else if (coherent)
1073 mtype = MTYPE_CC;
1074 else
1075 mtype = MTYPE_RW;
1076 /* FIXME: is this still needed? Or does
1077 * amdgpu_ttm_tt_pde_flags already handle this?
1078 */
1079 if (gc_ip_version == IP_VERSION(9, 4, 2) &&
1080 adev->gmc.xgmi.connected_to_cpu)
1081 snoop = true;
1082 } else {
1083 if (uncached || coherent)
1084 mtype = MTYPE_UC;
1085 else
1086 mtype = MTYPE_NC;
1087 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1088 snoop = true;
1089 }
1090 } else {
1091 if (uncached || coherent)
1092 mtype = MTYPE_UC;
1093 else
1094 mtype = MTYPE_NC;
1095 /* FIXME: is this still needed? Or does
1096 * amdgpu_ttm_tt_pde_flags already handle this?
1097 */
1098 snoop = true;
1099 }
1100 break;
1101 case IP_VERSION(9, 4, 3):
1102 case IP_VERSION(9, 4, 4):
1103 case IP_VERSION(9, 5, 0):
1104 /* Only local VRAM BOs or system memory on non-NUMA APUs
1105 * can be assumed to be local in their entirety. Choose
1106 * MTYPE_NC as safe fallback for all system memory BOs on
1107 * NUMA systems. Their MTYPE can be overridden per-page in
1108 * gmc_v9_0_override_vm_pte_flags.
1109 */
1110 mtype_local = MTYPE_RW;
1111 if (amdgpu_mtype_local == 1) {
1112 drm_info_once(adev_to_drm(adev), "Using MTYPE_NC for local memory\n");
1113 mtype_local = MTYPE_NC;
1114 } else if (amdgpu_mtype_local == 2) {
1115 drm_info_once(adev_to_drm(adev), "Using MTYPE_CC for local memory\n");
1116 mtype_local = MTYPE_CC;
1117 } else {
1118 drm_info_once(adev_to_drm(adev), "Using MTYPE_RW for local memory\n");
1119 }
1120 is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
1121 num_possible_nodes() <= 1) ||
1122 (is_vram && adev == bo_adev &&
1123 KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
1124 snoop = true;
1125 if (uncached) {
1126 mtype = MTYPE_UC;
1127 } else if (ext_coherent) {
1128 mtype = is_local ? MTYPE_CC : MTYPE_UC;
1129 } else if (adev->flags & AMD_IS_APU) {
1130 mtype = is_local ? mtype_local : MTYPE_NC;
1131 } else {
1132 /* dGPU */
1133 if (is_local)
1134 mtype = mtype_local;
1135 else if (gc_ip_version < IP_VERSION(9, 5, 0) && !is_vram)
1136 mtype = MTYPE_UC;
1137 else
1138 mtype = MTYPE_NC;
1139 }
1140
1141 break;
1142 default:
1143 if (uncached || coherent)
1144 mtype = MTYPE_UC;
1145 else
1146 mtype = MTYPE_NC;
1147
1148 /* FIXME: is this still needed? Or does
1149 * amdgpu_ttm_tt_pde_flags already handle this?
1150 */
1151 if (!is_vram)
1152 snoop = true;
1153 }
1154
1155 if (mtype != MTYPE_NC)
1156 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype);
1157
1158 *flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1159 }
1160
gmc_v9_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo,uint32_t vm_flags,uint64_t * flags)1161 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1162 struct amdgpu_vm *vm,
1163 struct amdgpu_bo *bo,
1164 uint32_t vm_flags,
1165 uint64_t *flags)
1166 {
1167 if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
1168 *flags |= AMDGPU_PTE_EXECUTABLE;
1169 else
1170 *flags &= ~AMDGPU_PTE_EXECUTABLE;
1171
1172 switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
1173 case AMDGPU_VM_MTYPE_DEFAULT:
1174 case AMDGPU_VM_MTYPE_NC:
1175 default:
1176 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
1177 break;
1178 case AMDGPU_VM_MTYPE_WC:
1179 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
1180 break;
1181 case AMDGPU_VM_MTYPE_RW:
1182 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
1183 break;
1184 case AMDGPU_VM_MTYPE_CC:
1185 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
1186 break;
1187 case AMDGPU_VM_MTYPE_UC:
1188 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
1189 break;
1190 }
1191
1192 if (vm_flags & AMDGPU_VM_PAGE_PRT) {
1193 *flags |= AMDGPU_PTE_PRT;
1194 *flags &= ~AMDGPU_PTE_VALID;
1195 }
1196
1197 if ((*flags & AMDGPU_PTE_VALID) && bo)
1198 gmc_v9_0_get_coherence_flags(adev, vm, bo, vm_flags, flags);
1199 }
1200
gmc_v9_0_override_vm_pte_flags(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t addr,uint64_t * flags)1201 static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
1202 struct amdgpu_vm *vm,
1203 uint64_t addr, uint64_t *flags)
1204 {
1205 int local_node, nid;
1206
1207 /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
1208 * memory can use more efficient MTYPEs.
1209 */
1210 if (!(adev->flags & AMD_IS_APU) ||
1211 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3))
1212 return;
1213
1214 /* Only direct-mapped memory allows us to determine the NUMA node from
1215 * the DMA address.
1216 */
1217 if (!adev->ram_is_direct_mapped) {
1218 dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n");
1219 return;
1220 }
1221
1222 /* MTYPE_NC is the same default and can be overridden.
1223 * MTYPE_UC will be present if the memory is extended-coherent
1224 * and can also be overridden.
1225 */
1226 if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1227 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC) &&
1228 (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1229 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC)) {
1230 dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n");
1231 return;
1232 }
1233
1234 /* FIXME: Only supported on native mode for now. For carve-out, the
1235 * NUMA affinity of the GPU/VM needs to come from the PCI info because
1236 * memory partitions are not associated with different NUMA nodes.
1237 */
1238 if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
1239 local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
1240 } else {
1241 dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n");
1242 return;
1243 }
1244
1245 /* Only handle real RAM. Mappings of PCIe resources don't have struct
1246 * page or NUMA nodes.
1247 */
1248 if (!page_is_ram(addr >> PAGE_SHIFT)) {
1249 dev_dbg_ratelimited(adev->dev, "Page is not RAM.\n");
1250 return;
1251 }
1252 nid = pfn_to_nid(addr >> PAGE_SHIFT);
1253 dev_dbg_ratelimited(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
1254 vm->mem_id, local_node, nid);
1255 if (nid == local_node) {
1256 uint64_t old_flags = *flags;
1257 if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) ==
1258 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC)) {
1259 unsigned int mtype_local = MTYPE_RW;
1260
1261 if (amdgpu_mtype_local == 1)
1262 mtype_local = MTYPE_NC;
1263 else if (amdgpu_mtype_local == 2)
1264 mtype_local = MTYPE_CC;
1265
1266 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local);
1267 } else {
1268 /* MTYPE_UC case */
1269 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
1270 }
1271
1272 dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n",
1273 old_flags, *flags);
1274 }
1275 }
1276
gmc_v9_0_get_vbios_fb_size(struct amdgpu_device * adev)1277 static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
1278 {
1279 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
1280 unsigned int size;
1281
1282 /* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1283
1284 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1285 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1286 } else {
1287 u32 viewport;
1288
1289 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1290 case IP_VERSION(1, 0, 0):
1291 case IP_VERSION(1, 0, 1):
1292 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
1293 size = (REG_GET_FIELD(viewport,
1294 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1295 REG_GET_FIELD(viewport,
1296 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1297 4);
1298 break;
1299 case IP_VERSION(2, 1, 0):
1300 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1301 size = (REG_GET_FIELD(viewport,
1302 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1303 REG_GET_FIELD(viewport,
1304 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1305 4);
1306 break;
1307 default:
1308 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
1309 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1310 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1311 4);
1312 break;
1313 }
1314 }
1315
1316 return size;
1317 }
1318
gmc_v9_0_need_reset_on_init(struct amdgpu_device * adev)1319 static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
1320 {
1321 if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested &&
1322 adev->nbio.funcs->is_nps_switch_requested(adev)) {
1323 adev->gmc.reset_flags |= AMDGPU_GMC_INIT_RESET_NPS;
1324 return true;
1325 }
1326
1327 return false;
1328 }
1329
1330 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1331 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1332 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
1333 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1334 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
1335 .get_vm_pde = gmc_v9_0_get_vm_pde,
1336 .get_vm_pte = gmc_v9_0_get_vm_pte,
1337 .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
1338 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1339 .query_mem_partition_mode = &amdgpu_gmc_query_memory_partition,
1340 .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
1341 .need_reset_on_init = &gmc_v9_0_need_reset_on_init,
1342 };
1343
gmc_v9_0_set_gmc_funcs(struct amdgpu_device * adev)1344 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1345 {
1346 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1347 }
1348
gmc_v9_0_set_umc_funcs(struct amdgpu_device * adev)1349 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
1350 {
1351 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1352 case IP_VERSION(6, 0, 0):
1353 adev->umc.funcs = &umc_v6_0_funcs;
1354 break;
1355 case IP_VERSION(6, 1, 1):
1356 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1357 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1358 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1359 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1360 adev->umc.retire_unit = 1;
1361 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1362 adev->umc.ras = &umc_v6_1_ras;
1363 break;
1364 case IP_VERSION(6, 1, 2):
1365 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
1366 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
1367 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
1368 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1369 adev->umc.retire_unit = 1;
1370 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1371 adev->umc.ras = &umc_v6_1_ras;
1372 break;
1373 case IP_VERSION(6, 7, 0):
1374 adev->umc.max_ras_err_cnt_per_query =
1375 UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1376 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1377 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1378 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1379 adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
1380 if (!adev->gmc.xgmi.connected_to_cpu)
1381 adev->umc.ras = &umc_v6_7_ras;
1382 if (1 & adev->smuio.funcs->get_die_id(adev))
1383 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1384 else
1385 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1386 break;
1387 case IP_VERSION(12, 0, 0):
1388 case IP_VERSION(12, 5, 0):
1389 adev->umc.max_ras_err_cnt_per_query =
1390 UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
1391 adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM;
1392 adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
1393 adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
1394 adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
1395 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1396 adev->umc.ras = &umc_v12_0_ras;
1397 break;
1398 default:
1399 break;
1400 }
1401 }
1402
gmc_v9_0_init_mmhub_client_info(struct amdgpu_device * adev)1403 static void gmc_v9_0_init_mmhub_client_info(struct amdgpu_device *adev)
1404 {
1405 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1406 case IP_VERSION(9, 0, 0):
1407 amdgpu_mmhub_init_client_info(&adev->mmhub,
1408 mmhub_client_ids_vega10,
1409 ARRAY_SIZE(mmhub_client_ids_vega10));
1410 break;
1411 case IP_VERSION(9, 3, 0):
1412 amdgpu_mmhub_init_client_info(&adev->mmhub,
1413 mmhub_client_ids_vega12,
1414 ARRAY_SIZE(mmhub_client_ids_vega12));
1415 break;
1416 case IP_VERSION(9, 4, 0):
1417 amdgpu_mmhub_init_client_info(&adev->mmhub,
1418 mmhub_client_ids_vega20,
1419 ARRAY_SIZE(mmhub_client_ids_vega20));
1420 break;
1421 case IP_VERSION(9, 4, 1):
1422 amdgpu_mmhub_init_client_info(&adev->mmhub,
1423 mmhub_client_ids_arcturus,
1424 ARRAY_SIZE(mmhub_client_ids_arcturus));
1425 break;
1426 case IP_VERSION(9, 1, 0):
1427 case IP_VERSION(9, 2, 0):
1428 amdgpu_mmhub_init_client_info(&adev->mmhub,
1429 mmhub_client_ids_raven,
1430 ARRAY_SIZE(mmhub_client_ids_raven));
1431 break;
1432 case IP_VERSION(1, 5, 0):
1433 case IP_VERSION(2, 4, 0):
1434 amdgpu_mmhub_init_client_info(&adev->mmhub,
1435 mmhub_client_ids_renoir,
1436 ARRAY_SIZE(mmhub_client_ids_renoir));
1437 break;
1438 case IP_VERSION(1, 8, 0):
1439 case IP_VERSION(9, 4, 2):
1440 amdgpu_mmhub_init_client_info(&adev->mmhub,
1441 mmhub_client_ids_aldebaran,
1442 ARRAY_SIZE(mmhub_client_ids_aldebaran));
1443 break;
1444 default:
1445 break;
1446 }
1447 }
1448
gmc_v9_0_set_mmhub_funcs(struct amdgpu_device * adev)1449 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
1450 {
1451 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1452 case IP_VERSION(9, 4, 1):
1453 adev->mmhub.funcs = &mmhub_v9_4_funcs;
1454 break;
1455 case IP_VERSION(9, 4, 2):
1456 adev->mmhub.funcs = &mmhub_v1_7_funcs;
1457 break;
1458 case IP_VERSION(1, 8, 0):
1459 case IP_VERSION(1, 8, 1):
1460 adev->mmhub.funcs = &mmhub_v1_8_funcs;
1461 break;
1462 default:
1463 adev->mmhub.funcs = &mmhub_v1_0_funcs;
1464 break;
1465 }
1466
1467 gmc_v9_0_init_mmhub_client_info(adev);
1468 }
1469
gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device * adev)1470 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1471 {
1472 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
1473 case IP_VERSION(9, 4, 0):
1474 adev->mmhub.ras = &mmhub_v1_0_ras;
1475 break;
1476 case IP_VERSION(9, 4, 1):
1477 adev->mmhub.ras = &mmhub_v9_4_ras;
1478 break;
1479 case IP_VERSION(9, 4, 2):
1480 adev->mmhub.ras = &mmhub_v1_7_ras;
1481 break;
1482 case IP_VERSION(1, 8, 0):
1483 case IP_VERSION(1, 8, 1):
1484 adev->mmhub.ras = &mmhub_v1_8_ras;
1485 break;
1486 default:
1487 /* mmhub ras is not available */
1488 break;
1489 }
1490 }
1491
gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device * adev)1492 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
1493 {
1494 if (amdgpu_is_multi_aid(adev))
1495 adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
1496 else
1497 adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
1498 }
1499
gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device * adev)1500 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
1501 {
1502 adev->hdp.ras = &hdp_v4_0_ras;
1503 }
1504
gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device * adev)1505 static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
1506 {
1507 struct amdgpu_mca *mca = &adev->mca;
1508
1509 /* is UMC the right IP to check for MCA? Maybe DF? */
1510 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
1511 case IP_VERSION(6, 7, 0):
1512 if (!adev->gmc.xgmi.connected_to_cpu) {
1513 mca->mp0.ras = &mca_v3_0_mp0_ras;
1514 mca->mp1.ras = &mca_v3_0_mp1_ras;
1515 mca->mpio.ras = &mca_v3_0_mpio_ras;
1516 }
1517 break;
1518 default:
1519 break;
1520 }
1521 }
1522
gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device * adev)1523 static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
1524 {
1525 if (!adev->gmc.xgmi.connected_to_cpu)
1526 adev->gmc.xgmi.ras = &xgmi_ras;
1527 }
1528
gmc_v9_0_init_nps_details(struct amdgpu_device * adev)1529 static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
1530 {
1531 enum amdgpu_memory_partition mode;
1532 uint32_t supp_modes;
1533 int i;
1534
1535 adev->gmc.supported_nps_modes = 0;
1536
1537 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
1538 return;
1539
1540 mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
1541
1542 /* Mode detected by hardware and supported modes available */
1543 if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) {
1544 while ((i = ffs(supp_modes))) {
1545 if (AMDGPU_ALL_NPS_MASK & BIT(i))
1546 adev->gmc.supported_nps_modes |= BIT(i);
1547 supp_modes &= supp_modes - 1;
1548 }
1549 } else {
1550 /*TODO: Check PSP version also which supports NPS switch. Otherwise keep
1551 * supported modes as 0.
1552 */
1553 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1554 case IP_VERSION(9, 4, 3):
1555 case IP_VERSION(9, 4, 4):
1556 adev->gmc.supported_nps_modes =
1557 BIT(AMDGPU_NPS1_PARTITION_MODE) |
1558 BIT(AMDGPU_NPS4_PARTITION_MODE);
1559 break;
1560 default:
1561 break;
1562 }
1563 }
1564 }
1565
gmc_v9_0_early_init(struct amdgpu_ip_block * ip_block)1566 static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
1567 {
1568 struct amdgpu_device *adev = ip_block->adev;
1569
1570 /*
1571 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
1572 * in their IP discovery tables
1573 */
1574 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
1575 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
1576 amdgpu_is_multi_aid(adev))
1577 adev->gmc.xgmi.supported = true;
1578
1579 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
1580 adev->gmc.xgmi.supported = true;
1581 adev->gmc.xgmi.connected_to_cpu =
1582 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
1583 }
1584
1585 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
1586 enum amdgpu_pkg_type pkg_type =
1587 adev->smuio.funcs->get_pkg_type(adev);
1588 /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
1589 * and the APU, can be in used two possible modes:
1590 * - carveout mode
1591 * - native APU mode
1592 * "is_app_apu" can be used to identify the APU in the native
1593 * mode.
1594 */
1595 adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU &&
1596 !pci_resource_len(adev->pdev, 0));
1597 }
1598
1599 gmc_v9_0_set_gmc_funcs(adev);
1600 gmc_v9_0_set_irq_funcs(adev);
1601 gmc_v9_0_set_umc_funcs(adev);
1602 gmc_v9_0_set_mmhub_funcs(adev);
1603 gmc_v9_0_set_mmhub_ras_funcs(adev);
1604 gmc_v9_0_set_gfxhub_funcs(adev);
1605 gmc_v9_0_set_hdp_ras_funcs(adev);
1606 gmc_v9_0_set_mca_ras_funcs(adev);
1607 gmc_v9_0_set_xgmi_ras_funcs(adev);
1608
1609 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1610 adev->gmc.shared_aperture_end =
1611 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1612 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1613 adev->gmc.private_aperture_end =
1614 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1615 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
1616
1617 return 0;
1618 }
1619
gmc_v9_0_late_init(struct amdgpu_ip_block * ip_block)1620 static int gmc_v9_0_late_init(struct amdgpu_ip_block *ip_block)
1621 {
1622 struct amdgpu_device *adev = ip_block->adev;
1623 int r;
1624
1625 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1626 if (r)
1627 return r;
1628
1629 /*
1630 * Workaround performance drop issue with VBIOS enables partial
1631 * writes, while disables HBM ECC for vega10.
1632 */
1633 if (!amdgpu_sriov_vf(adev) &&
1634 (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) {
1635 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1636 if (adev->df.funcs &&
1637 adev->df.funcs->enable_ecc_force_par_wr_rmw)
1638 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
1639 }
1640 }
1641
1642 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1643 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
1644 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP);
1645 }
1646
1647 r = amdgpu_gmc_ras_late_init(adev);
1648 if (r)
1649 return r;
1650
1651 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1652 }
1653
gmc_v9_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)1654 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1655 struct amdgpu_gmc *mc)
1656 {
1657 u64 base = adev->mmhub.funcs->get_fb_location(adev);
1658
1659 amdgpu_gmc_set_agp_default(adev, mc);
1660
1661 /* add the xgmi offset of the physical node */
1662 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1663 if (amdgpu_gmc_is_pdb0_enabled(adev)) {
1664 amdgpu_gmc_sysvm_location(adev, mc);
1665 } else {
1666 amdgpu_gmc_vram_location(adev, mc, base);
1667 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
1668 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
1669 amdgpu_gmc_agp_location(adev, mc);
1670 }
1671 /* base offset of vram pages */
1672 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1673
1674 /* XXX: add the xgmi offset of the physical node? */
1675 adev->vm_manager.vram_base_offset +=
1676 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1677 }
1678
1679 /**
1680 * gmc_v9_0_mc_init - initialize the memory controller driver params
1681 *
1682 * @adev: amdgpu_device pointer
1683 *
1684 * Look up the amount of vram, vram width, and decide how to place
1685 * vram and gart within the GPU's physical address space.
1686 * Returns 0 for success.
1687 */
gmc_v9_0_mc_init(struct amdgpu_device * adev)1688 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1689 {
1690 int r;
1691
1692 /* size in MB on si */
1693 if (!adev->gmc.is_app_apu) {
1694 adev->gmc.mc_vram_size =
1695 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1696 } else {
1697 DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n");
1698 adev->gmc.mc_vram_size = 0;
1699 }
1700 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1701
1702 if (!(adev->flags & AMD_IS_APU) &&
1703 !adev->gmc.xgmi.connected_to_cpu) {
1704 r = amdgpu_device_resize_fb_bar(adev);
1705 if (r)
1706 return r;
1707 }
1708 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1709 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1710
1711 #ifdef CONFIG_X86_64
1712 /*
1713 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
1714 * interface can use VRAM through here as it appears system reserved
1715 * memory in host address space.
1716 *
1717 * For APUs, VRAM is just the stolen system memory and can be accessed
1718 * directly.
1719 *
1720 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
1721 */
1722
1723 /* check whether both host-gpu and gpu-gpu xgmi links exist */
1724 if ((!amdgpu_sriov_vf(adev) &&
1725 (adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
1726 (adev->gmc.xgmi.supported &&
1727 adev->gmc.xgmi.connected_to_cpu)) {
1728 adev->gmc.aper_base =
1729 adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1730 adev->gmc.xgmi.physical_node_id *
1731 adev->gmc.xgmi.node_segment_size;
1732 adev->gmc.aper_size = adev->gmc.real_vram_size;
1733 }
1734
1735 #endif
1736 adev->gmc.visible_vram_size = adev->gmc.aper_size;
1737
1738 /* set the gart size */
1739 if (amdgpu_gart_size == -1) {
1740 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1741 case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
1742 case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
1743 case IP_VERSION(9, 4, 0):
1744 case IP_VERSION(9, 4, 1):
1745 case IP_VERSION(9, 4, 2):
1746 case IP_VERSION(9, 4, 3):
1747 case IP_VERSION(9, 4, 4):
1748 case IP_VERSION(9, 5, 0):
1749 default:
1750 adev->gmc.gart_size = 512ULL << 20;
1751 break;
1752 case IP_VERSION(9, 1, 0): /* DCE SG support */
1753 case IP_VERSION(9, 2, 2): /* DCE SG support */
1754 case IP_VERSION(9, 3, 0):
1755 adev->gmc.gart_size = 1024ULL << 20;
1756 break;
1757 }
1758 } else {
1759 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1760 }
1761
1762 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1763
1764 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1765
1766 return 0;
1767 }
1768
gmc_v9_0_gart_init(struct amdgpu_device * adev)1769 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1770 {
1771 int r;
1772
1773 if (adev->gart.bo) {
1774 WARN(1, "VEGA10 PCIE GART already initialized\n");
1775 return 0;
1776 }
1777
1778 if (amdgpu_gmc_is_pdb0_enabled(adev)) {
1779 adev->gmc.vmid0_page_table_depth = 1;
1780 adev->gmc.vmid0_page_table_block_size = 12;
1781 } else {
1782 adev->gmc.vmid0_page_table_depth = 0;
1783 adev->gmc.vmid0_page_table_block_size = 0;
1784 }
1785
1786 /* Initialize common gart structure */
1787 r = amdgpu_gart_init(adev);
1788 if (r)
1789 return r;
1790 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
1791 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC) |
1792 AMDGPU_PTE_EXECUTABLE;
1793
1794 if (!adev->gmc.real_vram_size) {
1795 dev_info(adev->dev, "Put GART in system memory for APU\n");
1796 r = amdgpu_gart_table_ram_alloc(adev);
1797 if (r)
1798 dev_err(adev->dev, "Failed to allocate GART in system memory\n");
1799 } else {
1800 r = amdgpu_gart_table_vram_alloc(adev);
1801 if (r)
1802 return r;
1803
1804 if (amdgpu_gmc_is_pdb0_enabled(adev))
1805 r = amdgpu_gmc_pdb0_alloc(adev);
1806 }
1807
1808 return r;
1809 }
1810
1811 /**
1812 * gmc_v9_0_save_registers - saves regs
1813 *
1814 * @adev: amdgpu_device pointer
1815 *
1816 * This saves potential register values that should be
1817 * restored upon resume
1818 */
gmc_v9_0_save_registers(struct amdgpu_device * adev)1819 static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1820 {
1821 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
1822 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1)))
1823 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1824 }
1825
gmc_v9_0_init_vram_info(struct amdgpu_device * adev)1826 static void gmc_v9_0_init_vram_info(struct amdgpu_device *adev)
1827 {
1828 static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
1829 int dev_var = adev->pdev->device & 0xF;
1830 u32 vram_info;
1831
1832 if (adev->gmc.is_app_apu) {
1833 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
1834 adev->gmc.vram_width = 128 * 64;
1835 } else if (adev->flags & AMD_IS_APU) {
1836 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
1837 adev->gmc.vram_width = 64 * 64;
1838 } else if (amdgpu_is_multi_aid(adev)) {
1839 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
1840 adev->gmc.vram_width = 128 * 64;
1841
1842 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
1843 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
1844
1845 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) &&
1846 adev->rev_id == 0x3)
1847 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
1848
1849 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
1850 (dev_var == 0x5))
1851 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
1852
1853 if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
1854 vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
1855 adev->gmc.vram_vendor = vram_info & 0xF;
1856 }
1857 }
1858 }
1859
gmc_v9_0_sw_init(struct amdgpu_ip_block * ip_block)1860 static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
1861 {
1862 int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
1863 struct amdgpu_device *adev = ip_block->adev;
1864 unsigned long inst_mask = adev->aid_mask;
1865
1866 adev->gfxhub.funcs->init(adev);
1867
1868 adev->mmhub.funcs->init(adev);
1869
1870 spin_lock_init(&adev->gmc.invalidate_lock);
1871
1872 if (!adev->bios) {
1873 gmc_v9_0_init_vram_info(adev);
1874 } else {
1875 r = amdgpu_gmc_get_vram_info(adev,
1876 &vram_width, &vram_type, &vram_vendor);
1877 if (amdgpu_sriov_vf(adev))
1878 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1879 * and DF related registers is not readable, seems hardcord is the
1880 * only way to set the correct vram_width
1881 */
1882 adev->gmc.vram_width = 2048;
1883 else if (amdgpu_emu_mode != 1)
1884 adev->gmc.vram_width = vram_width;
1885
1886 if (!adev->gmc.vram_width) {
1887 int chansize, numchan;
1888
1889 /* hbm memory channel size */
1890 if (adev->flags & AMD_IS_APU)
1891 chansize = 64;
1892 else
1893 chansize = 128;
1894 if (adev->df.funcs &&
1895 adev->df.funcs->get_hbm_channel_number) {
1896 numchan = adev->df.funcs->get_hbm_channel_number(adev);
1897 adev->gmc.vram_width = numchan * chansize;
1898 }
1899 }
1900
1901 adev->gmc.vram_type = vram_type;
1902 adev->gmc.vram_vendor = vram_vendor;
1903 }
1904
1905 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1906 case IP_VERSION(9, 1, 0):
1907 case IP_VERSION(9, 2, 2):
1908 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
1909 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
1910
1911 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1912 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1913 } else {
1914 /* vm_size is 128TB + 512GB for legacy 3-level page support */
1915 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1916 adev->gmc.translate_further =
1917 adev->vm_manager.num_level > 1;
1918 }
1919 break;
1920 case IP_VERSION(9, 0, 1):
1921 case IP_VERSION(9, 2, 1):
1922 case IP_VERSION(9, 4, 0):
1923 case IP_VERSION(9, 3, 0):
1924 case IP_VERSION(9, 4, 2):
1925 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
1926 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
1927
1928 /*
1929 * To fulfill 4-level page support,
1930 * vm size is 256TB (48bit), maximum size of Vega10,
1931 * block size 512 (9bit)
1932 */
1933
1934 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1935 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
1936 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
1937 break;
1938 case IP_VERSION(9, 4, 1):
1939 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
1940 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
1941 set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask);
1942
1943 /* Keep the vm size same with Vega20 */
1944 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1945 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
1946 break;
1947 case IP_VERSION(9, 4, 3):
1948 case IP_VERSION(9, 4, 4):
1949 case IP_VERSION(9, 5, 0):
1950 bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
1951 NUM_XCC(adev->gfx.xcc_mask));
1952
1953 inst_mask <<= AMDGPU_MMHUB0(0);
1954 bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
1955
1956 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1957 adev->gmc.translate_further = adev->vm_manager.num_level > 1;
1958 break;
1959 default:
1960 break;
1961 }
1962
1963 /* This interrupt is VMC page fault.*/
1964 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1965 &adev->gmc.vm_fault);
1966 if (r)
1967 return r;
1968
1969 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
1970 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
1971 &adev->gmc.vm_fault);
1972 if (r)
1973 return r;
1974 }
1975
1976 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1977 &adev->gmc.vm_fault);
1978
1979 if (r)
1980 return r;
1981
1982 if (!amdgpu_sriov_vf(adev) &&
1983 !adev->gmc.xgmi.connected_to_cpu &&
1984 !adev->gmc.is_app_apu) {
1985 /* interrupt sent to DF. */
1986 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1987 &adev->gmc.ecc_irq);
1988 if (r)
1989 return r;
1990 }
1991
1992 /* Set the internal MC address mask
1993 * This is the max address of the GPU's
1994 * internal address space.
1995 */
1996 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1997
1998 dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >=
1999 IP_VERSION(9, 4, 2) ?
2000 48 :
2001 44;
2002 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
2003 if (r) {
2004 drm_warn(adev_to_drm(adev), "No suitable DMA available.\n");
2005 return r;
2006 }
2007 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
2008
2009 r = gmc_v9_0_mc_init(adev);
2010 if (r)
2011 return r;
2012
2013 if (amdgpu_is_multi_aid(adev)) {
2014 r = amdgpu_gmc_init_mem_ranges(adev);
2015 if (r)
2016 return r;
2017 }
2018
2019 /* Memory manager */
2020 r = amdgpu_bo_init(adev);
2021 if (r)
2022 return r;
2023
2024 r = gmc_v9_0_gart_init(adev);
2025 if (r)
2026 return r;
2027
2028 gmc_v9_0_init_nps_details(adev);
2029 /*
2030 * number of VMs
2031 * VMID 0 is reserved for System
2032 * amdgpu graphics/compute will use VMIDs 1..n-1
2033 * amdkfd will use VMIDs n..15
2034 *
2035 * The first KFD VMID is 8 for GPUs with graphics, 3 for
2036 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
2037 * for video processing.
2038 */
2039 adev->vm_manager.first_kfd_vmid =
2040 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
2041 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
2042 amdgpu_is_multi_aid(adev)) ?
2043 3 :
2044 8;
2045
2046 amdgpu_vm_manager_init(adev);
2047
2048 gmc_v9_0_save_registers(adev);
2049
2050 r = amdgpu_gmc_ras_sw_init(adev);
2051 if (r)
2052 return r;
2053
2054 if (amdgpu_is_multi_aid(adev))
2055 amdgpu_gmc_sysfs_init(adev);
2056
2057 return 0;
2058 }
2059
gmc_v9_0_sw_fini(struct amdgpu_ip_block * ip_block)2060 static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
2061 {
2062 struct amdgpu_device *adev = ip_block->adev;
2063
2064 if (amdgpu_is_multi_aid(adev))
2065 amdgpu_gmc_sysfs_fini(adev);
2066
2067 amdgpu_gmc_ras_fini(adev);
2068 amdgpu_gem_force_release(adev);
2069 amdgpu_vm_manager_fini(adev);
2070 if (!adev->gmc.real_vram_size) {
2071 dev_info(adev->dev, "Put GART in system memory for APU free\n");
2072 amdgpu_gart_table_ram_free(adev);
2073 } else {
2074 amdgpu_gart_table_vram_free(adev);
2075 }
2076 amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
2077 amdgpu_bo_fini(adev);
2078
2079 adev->gmc.num_mem_partitions = 0;
2080 kfree(adev->gmc.mem_partitions);
2081
2082 return 0;
2083 }
2084
gmc_v9_0_init_golden_registers(struct amdgpu_device * adev)2085 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
2086 {
2087 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
2088 case IP_VERSION(9, 0, 0):
2089 if (amdgpu_sriov_vf(adev))
2090 break;
2091 fallthrough;
2092 case IP_VERSION(9, 4, 0):
2093 soc15_program_register_sequence(adev,
2094 golden_settings_mmhub_1_0_0,
2095 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
2096 soc15_program_register_sequence(adev,
2097 golden_settings_athub_1_0_0,
2098 ARRAY_SIZE(golden_settings_athub_1_0_0));
2099 break;
2100 case IP_VERSION(9, 1, 0):
2101 case IP_VERSION(9, 2, 0):
2102 /* TODO for renoir */
2103 soc15_program_register_sequence(adev,
2104 golden_settings_athub_1_0_0,
2105 ARRAY_SIZE(golden_settings_athub_1_0_0));
2106 break;
2107 default:
2108 break;
2109 }
2110 }
2111
2112 /**
2113 * gmc_v9_0_restore_registers - restores regs
2114 *
2115 * @adev: amdgpu_device pointer
2116 *
2117 * This restores register values, saved at suspend.
2118 */
gmc_v9_0_restore_registers(struct amdgpu_device * adev)2119 void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
2120 {
2121 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) ||
2122 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) {
2123 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
2124 WARN_ON(adev->gmc.sdpif_register !=
2125 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
2126 }
2127 }
2128
2129 /**
2130 * gmc_v9_0_gart_enable - gart enable
2131 *
2132 * @adev: amdgpu_device pointer
2133 */
gmc_v9_0_gart_enable(struct amdgpu_device * adev)2134 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
2135 {
2136 int r;
2137
2138 if (amdgpu_gmc_is_pdb0_enabled(adev))
2139 amdgpu_gmc_init_pdb0(adev);
2140
2141 if (adev->gart.bo == NULL) {
2142 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
2143 return -EINVAL;
2144 }
2145
2146 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
2147
2148 if (!adev->in_s0ix) {
2149 r = adev->gfxhub.funcs->gart_enable(adev);
2150 if (r)
2151 return r;
2152 }
2153
2154 r = adev->mmhub.funcs->gart_enable(adev);
2155 if (r)
2156 return r;
2157
2158 drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled.\n",
2159 (unsigned int)(adev->gmc.gart_size >> 20));
2160 if (adev->gmc.pdb0_bo)
2161 drm_info(adev_to_drm(adev), "PDB0 located at 0x%016llX\n",
2162 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
2163 drm_info(adev_to_drm(adev), "PTB located at 0x%016llX\n",
2164 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
2165
2166 return 0;
2167 }
2168
gmc_v9_0_hw_init(struct amdgpu_ip_block * ip_block)2169 static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
2170 {
2171 struct amdgpu_device *adev = ip_block->adev;
2172 bool value;
2173 int i, r;
2174
2175 adev->gmc.flush_pasid_uses_kiq = true;
2176
2177 /* Vega20+XGMI caches PTEs in TC and TLB. Add a heavy-weight TLB flush
2178 * (type 2), which flushes both. Due to a race condition with
2179 * concurrent memory accesses using the same TLB cache line, we still
2180 * need a second TLB flush after this.
2181 */
2182 adev->gmc.flush_tlb_needs_extra_type_2 =
2183 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) &&
2184 adev->gmc.xgmi.num_physical_nodes;
2185
2186 /* The sequence of these two function calls matters.*/
2187 gmc_v9_0_init_golden_registers(adev);
2188
2189 if (adev->mode_info.num_crtc) {
2190 /* Lockout access through VGA aperture*/
2191 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
2192 /* disable VGA render */
2193 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
2194 }
2195
2196 if (adev->mmhub.funcs->update_power_gating)
2197 adev->mmhub.funcs->update_power_gating(adev, true);
2198
2199 adev->hdp.funcs->init_registers(adev);
2200
2201 /* After HDP is initialized, flush HDP.*/
2202 amdgpu_device_flush_hdp(adev, NULL);
2203
2204 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
2205 value = false;
2206 else
2207 value = true;
2208
2209 if (!amdgpu_sriov_vf(adev)) {
2210 if (!adev->in_s0ix)
2211 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
2212 adev->mmhub.funcs->set_fault_enable_default(adev, value);
2213 }
2214 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2215 if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0)))
2216 continue;
2217 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
2218 }
2219
2220 if (adev->umc.funcs && adev->umc.funcs->init_registers)
2221 adev->umc.funcs->init_registers(adev);
2222
2223 r = gmc_v9_0_gart_enable(adev);
2224 if (r)
2225 return r;
2226
2227 if (amdgpu_emu_mode == 1)
2228 return amdgpu_gmc_vram_checking(adev);
2229
2230 return 0;
2231 }
2232
2233 /**
2234 * gmc_v9_0_gart_disable - gart disable
2235 *
2236 * @adev: amdgpu_device pointer
2237 *
2238 * This disables all VM page table.
2239 */
gmc_v9_0_gart_disable(struct amdgpu_device * adev)2240 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
2241 {
2242 if (!adev->in_s0ix)
2243 adev->gfxhub.funcs->gart_disable(adev);
2244 adev->mmhub.funcs->gart_disable(adev);
2245 }
2246
gmc_v9_0_hw_fini(struct amdgpu_ip_block * ip_block)2247 static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
2248 {
2249 struct amdgpu_device *adev = ip_block->adev;
2250
2251 gmc_v9_0_gart_disable(adev);
2252
2253 if (amdgpu_sriov_vf(adev)) {
2254 /* full access mode, so don't touch any GMC register */
2255 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
2256 return 0;
2257 }
2258
2259 /*
2260 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
2261 * a correct cached state for GMC. Otherwise, the "gate" again
2262 * operation on S3 resuming will fail due to wrong cached state.
2263 */
2264 if (adev->mmhub.funcs->update_power_gating)
2265 adev->mmhub.funcs->update_power_gating(adev, false);
2266
2267 /*
2268 * For minimal init, late_init is not called, hence VM fault/RAS irqs
2269 * are not enabled.
2270 */
2271 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
2272 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
2273
2274 if (adev->gmc.ecc_irq.funcs &&
2275 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
2276 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
2277 }
2278
2279 return 0;
2280 }
2281
gmc_v9_0_suspend(struct amdgpu_ip_block * ip_block)2282 static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block)
2283 {
2284 return gmc_v9_0_hw_fini(ip_block);
2285 }
2286
gmc_v9_0_resume(struct amdgpu_ip_block * ip_block)2287 static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
2288 {
2289 struct amdgpu_device *adev = ip_block->adev;
2290 int r;
2291
2292 /* If a reset is done for NPS mode switch, read the memory range
2293 * information again.
2294 */
2295 if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) {
2296 amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
2297 adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS;
2298 }
2299
2300 r = gmc_v9_0_hw_init(ip_block);
2301 if (r)
2302 return r;
2303
2304 amdgpu_vmid_reset_all(ip_block->adev);
2305
2306 return 0;
2307 }
2308
gmc_v9_0_is_idle(struct amdgpu_ip_block * ip_block)2309 static bool gmc_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
2310 {
2311 /* MC is always ready in GMC v9.*/
2312 return true;
2313 }
2314
gmc_v9_0_wait_for_idle(struct amdgpu_ip_block * ip_block)2315 static int gmc_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
2316 {
2317 /* There is no need to wait for MC idle in GMC v9.*/
2318 return 0;
2319 }
2320
gmc_v9_0_soft_reset(struct amdgpu_ip_block * ip_block)2321 static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
2322 {
2323 /* XXX for emulation.*/
2324 return 0;
2325 }
2326
gmc_v9_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)2327 static int gmc_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2328 enum amd_clockgating_state state)
2329 {
2330 struct amdgpu_device *adev = ip_block->adev;
2331
2332 adev->mmhub.funcs->set_clockgating(adev, state);
2333
2334 athub_v1_0_set_clockgating(adev, state);
2335
2336 return 0;
2337 }
2338
gmc_v9_0_get_clockgating_state(struct amdgpu_ip_block * ip_block,u64 * flags)2339 static void gmc_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
2340 {
2341 struct amdgpu_device *adev = ip_block->adev;
2342
2343 adev->mmhub.funcs->get_clockgating(adev, flags);
2344
2345 athub_v1_0_get_clockgating(adev, flags);
2346 }
2347
gmc_v9_0_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)2348 static int gmc_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
2349 enum amd_powergating_state state)
2350 {
2351 return 0;
2352 }
2353
2354 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
2355 .name = "gmc_v9_0",
2356 .early_init = gmc_v9_0_early_init,
2357 .late_init = gmc_v9_0_late_init,
2358 .sw_init = gmc_v9_0_sw_init,
2359 .sw_fini = gmc_v9_0_sw_fini,
2360 .hw_init = gmc_v9_0_hw_init,
2361 .hw_fini = gmc_v9_0_hw_fini,
2362 .suspend = gmc_v9_0_suspend,
2363 .resume = gmc_v9_0_resume,
2364 .is_idle = gmc_v9_0_is_idle,
2365 .wait_for_idle = gmc_v9_0_wait_for_idle,
2366 .soft_reset = gmc_v9_0_soft_reset,
2367 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
2368 .set_powergating_state = gmc_v9_0_set_powergating_state,
2369 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
2370 };
2371
2372 const struct amdgpu_ip_block_version gmc_v9_0_ip_block = {
2373 .type = AMD_IP_BLOCK_TYPE_GMC,
2374 .major = 9,
2375 .minor = 0,
2376 .rev = 0,
2377 .funcs = &gmc_v9_0_ip_funcs,
2378 };
2379