1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/pci.h> 26 27 #include <drm/drm_cache.h> 28 29 #include "amdgpu.h" 30 #include "gmc_v9_0.h" 31 #include "amdgpu_atomfirmware.h" 32 #include "amdgpu_gem.h" 33 34 #include "gc/gc_9_0_sh_mask.h" 35 #include "dce/dce_12_0_offset.h" 36 #include "dce/dce_12_0_sh_mask.h" 37 #include "vega10_enum.h" 38 #include "mmhub/mmhub_1_0_offset.h" 39 #include "athub/athub_1_0_sh_mask.h" 40 #include "athub/athub_1_0_offset.h" 41 #include "oss/osssys_4_0_offset.h" 42 43 #include "soc15.h" 44 #include "soc15d.h" 45 #include "soc15_common.h" 46 #include "umc/umc_6_0_sh_mask.h" 47 48 #include "gfxhub_v1_0.h" 49 #include "mmhub_v1_0.h" 50 #include "athub_v1_0.h" 51 #include "gfxhub_v1_1.h" 52 #include "gfxhub_v1_2.h" 53 #include "mmhub_v9_4.h" 54 #include "mmhub_v1_7.h" 55 #include "mmhub_v1_8.h" 56 #include "umc_v6_1.h" 57 #include "umc_v6_0.h" 58 #include "umc_v6_7.h" 59 #include "umc_v12_0.h" 60 #include "hdp_v4_0.h" 61 #include "mca_v3_0.h" 62 63 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 64 65 #include "amdgpu_ras.h" 66 #include "amdgpu_xgmi.h" 67 68 /* add these here since we already include dce12 headers and these are for DCN */ 69 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d 70 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 71 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0 72 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 73 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL 74 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L 75 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d 76 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2 77 78 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea 79 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2 80 81 static const char * const gfxhub_client_ids[] = { 82 "CB", 83 "DB", 84 "IA", 85 "WD", 86 "CPF", 87 "CPC", 88 "CPG", 89 "RLC", 90 "TCP", 91 "SQC (inst)", 92 "SQC (data)", 93 "SQG", 94 "PA", 95 }; 96 97 static const char *mmhub_client_ids_raven[][2] = { 98 [0][0] = "MP1", 99 [1][0] = "MP0", 100 [2][0] = "VCN", 101 [3][0] = "VCNU", 102 [4][0] = "HDP", 103 [5][0] = "DCE", 104 [13][0] = "UTCL2", 105 [19][0] = "TLS", 106 [26][0] = "OSS", 107 [27][0] = "SDMA0", 108 [0][1] = "MP1", 109 [1][1] = "MP0", 110 [2][1] = "VCN", 111 [3][1] = "VCNU", 112 [4][1] = "HDP", 113 [5][1] = "XDP", 114 [6][1] = "DBGU0", 115 [7][1] = "DCE", 116 [8][1] = "DCEDWB0", 117 [9][1] = "DCEDWB1", 118 [26][1] = "OSS", 119 [27][1] = "SDMA0", 120 }; 121 122 static const char *mmhub_client_ids_renoir[][2] = { 123 [0][0] = "MP1", 124 [1][0] = "MP0", 125 [2][0] = "HDP", 126 [4][0] = "DCEDMC", 127 [5][0] = "DCEVGA", 128 [13][0] = "UTCL2", 129 [19][0] = "TLS", 130 [26][0] = "OSS", 131 [27][0] = "SDMA0", 132 [28][0] = "VCN", 133 [29][0] = "VCNU", 134 [30][0] = "JPEG", 135 [0][1] = "MP1", 136 [1][1] = "MP0", 137 [2][1] = "HDP", 138 [3][1] = "XDP", 139 [6][1] = "DBGU0", 140 [7][1] = "DCEDMC", 141 [8][1] = "DCEVGA", 142 [9][1] = "DCEDWB", 143 [26][1] = "OSS", 144 [27][1] = "SDMA0", 145 [28][1] = "VCN", 146 [29][1] = "VCNU", 147 [30][1] = "JPEG", 148 }; 149 150 static const char *mmhub_client_ids_vega10[][2] = { 151 [0][0] = "MP0", 152 [1][0] = "UVD", 153 [2][0] = "UVDU", 154 [3][0] = "HDP", 155 [13][0] = "UTCL2", 156 [14][0] = "OSS", 157 [15][0] = "SDMA1", 158 [32+0][0] = "VCE0", 159 [32+1][0] = "VCE0U", 160 [32+2][0] = "XDMA", 161 [32+3][0] = "DCE", 162 [32+4][0] = "MP1", 163 [32+14][0] = "SDMA0", 164 [0][1] = "MP0", 165 [1][1] = "UVD", 166 [2][1] = "UVDU", 167 [3][1] = "DBGU0", 168 [4][1] = "HDP", 169 [5][1] = "XDP", 170 [14][1] = "OSS", 171 [15][1] = "SDMA0", 172 [32+0][1] = "VCE0", 173 [32+1][1] = "VCE0U", 174 [32+2][1] = "XDMA", 175 [32+3][1] = "DCE", 176 [32+4][1] = "DCEDWB", 177 [32+5][1] = "MP1", 178 [32+6][1] = "DBGU1", 179 [32+14][1] = "SDMA1", 180 }; 181 182 static const char *mmhub_client_ids_vega12[][2] = { 183 [0][0] = "MP0", 184 [1][0] = "VCE0", 185 [2][0] = "VCE0U", 186 [3][0] = "HDP", 187 [13][0] = "UTCL2", 188 [14][0] = "OSS", 189 [15][0] = "SDMA1", 190 [32+0][0] = "DCE", 191 [32+1][0] = "XDMA", 192 [32+2][0] = "UVD", 193 [32+3][0] = "UVDU", 194 [32+4][0] = "MP1", 195 [32+15][0] = "SDMA0", 196 [0][1] = "MP0", 197 [1][1] = "VCE0", 198 [2][1] = "VCE0U", 199 [3][1] = "DBGU0", 200 [4][1] = "HDP", 201 [5][1] = "XDP", 202 [14][1] = "OSS", 203 [15][1] = "SDMA0", 204 [32+0][1] = "DCE", 205 [32+1][1] = "DCEDWB", 206 [32+2][1] = "XDMA", 207 [32+3][1] = "UVD", 208 [32+4][1] = "UVDU", 209 [32+5][1] = "MP1", 210 [32+6][1] = "DBGU1", 211 [32+15][1] = "SDMA1", 212 }; 213 214 static const char *mmhub_client_ids_vega20[][2] = { 215 [0][0] = "XDMA", 216 [1][0] = "DCE", 217 [2][0] = "VCE0", 218 [3][0] = "VCE0U", 219 [4][0] = "UVD", 220 [5][0] = "UVD1U", 221 [13][0] = "OSS", 222 [14][0] = "HDP", 223 [15][0] = "SDMA0", 224 [32+0][0] = "UVD", 225 [32+1][0] = "UVDU", 226 [32+2][0] = "MP1", 227 [32+3][0] = "MP0", 228 [32+12][0] = "UTCL2", 229 [32+14][0] = "SDMA1", 230 [0][1] = "XDMA", 231 [1][1] = "DCE", 232 [2][1] = "DCEDWB", 233 [3][1] = "VCE0", 234 [4][1] = "VCE0U", 235 [5][1] = "UVD1", 236 [6][1] = "UVD1U", 237 [7][1] = "DBGU0", 238 [8][1] = "XDP", 239 [13][1] = "OSS", 240 [14][1] = "HDP", 241 [15][1] = "SDMA0", 242 [32+0][1] = "UVD", 243 [32+1][1] = "UVDU", 244 [32+2][1] = "DBGU1", 245 [32+3][1] = "MP1", 246 [32+4][1] = "MP0", 247 [32+14][1] = "SDMA1", 248 }; 249 250 static const char *mmhub_client_ids_arcturus[][2] = { 251 [0][0] = "DBGU1", 252 [1][0] = "XDP", 253 [2][0] = "MP1", 254 [14][0] = "HDP", 255 [171][0] = "JPEG", 256 [172][0] = "VCN", 257 [173][0] = "VCNU", 258 [203][0] = "JPEG1", 259 [204][0] = "VCN1", 260 [205][0] = "VCN1U", 261 [256][0] = "SDMA0", 262 [257][0] = "SDMA1", 263 [258][0] = "SDMA2", 264 [259][0] = "SDMA3", 265 [260][0] = "SDMA4", 266 [261][0] = "SDMA5", 267 [262][0] = "SDMA6", 268 [263][0] = "SDMA7", 269 [384][0] = "OSS", 270 [0][1] = "DBGU1", 271 [1][1] = "XDP", 272 [2][1] = "MP1", 273 [14][1] = "HDP", 274 [171][1] = "JPEG", 275 [172][1] = "VCN", 276 [173][1] = "VCNU", 277 [203][1] = "JPEG1", 278 [204][1] = "VCN1", 279 [205][1] = "VCN1U", 280 [256][1] = "SDMA0", 281 [257][1] = "SDMA1", 282 [258][1] = "SDMA2", 283 [259][1] = "SDMA3", 284 [260][1] = "SDMA4", 285 [261][1] = "SDMA5", 286 [262][1] = "SDMA6", 287 [263][1] = "SDMA7", 288 [384][1] = "OSS", 289 }; 290 291 static const char *mmhub_client_ids_aldebaran[][2] = { 292 [2][0] = "MP1", 293 [3][0] = "MP0", 294 [32+1][0] = "DBGU_IO0", 295 [32+2][0] = "DBGU_IO2", 296 [32+4][0] = "MPIO", 297 [96+11][0] = "JPEG0", 298 [96+12][0] = "VCN0", 299 [96+13][0] = "VCNU0", 300 [128+11][0] = "JPEG1", 301 [128+12][0] = "VCN1", 302 [128+13][0] = "VCNU1", 303 [160+1][0] = "XDP", 304 [160+14][0] = "HDP", 305 [256+0][0] = "SDMA0", 306 [256+1][0] = "SDMA1", 307 [256+2][0] = "SDMA2", 308 [256+3][0] = "SDMA3", 309 [256+4][0] = "SDMA4", 310 [384+0][0] = "OSS", 311 [2][1] = "MP1", 312 [3][1] = "MP0", 313 [32+1][1] = "DBGU_IO0", 314 [32+2][1] = "DBGU_IO2", 315 [32+4][1] = "MPIO", 316 [96+11][1] = "JPEG0", 317 [96+12][1] = "VCN0", 318 [96+13][1] = "VCNU0", 319 [128+11][1] = "JPEG1", 320 [128+12][1] = "VCN1", 321 [128+13][1] = "VCNU1", 322 [160+1][1] = "XDP", 323 [160+14][1] = "HDP", 324 [256+0][1] = "SDMA0", 325 [256+1][1] = "SDMA1", 326 [256+2][1] = "SDMA2", 327 [256+3][1] = "SDMA3", 328 [256+4][1] = "SDMA4", 329 [384+0][1] = "OSS", 330 }; 331 332 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = { 333 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa), 334 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565) 335 }; 336 337 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = { 338 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800), 339 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008) 340 }; 341 342 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = { 343 (0x000143c0 + 0x00000000), 344 (0x000143c0 + 0x00000800), 345 (0x000143c0 + 0x00001000), 346 (0x000143c0 + 0x00001800), 347 (0x000543c0 + 0x00000000), 348 (0x000543c0 + 0x00000800), 349 (0x000543c0 + 0x00001000), 350 (0x000543c0 + 0x00001800), 351 (0x000943c0 + 0x00000000), 352 (0x000943c0 + 0x00000800), 353 (0x000943c0 + 0x00001000), 354 (0x000943c0 + 0x00001800), 355 (0x000d43c0 + 0x00000000), 356 (0x000d43c0 + 0x00000800), 357 (0x000d43c0 + 0x00001000), 358 (0x000d43c0 + 0x00001800), 359 (0x001143c0 + 0x00000000), 360 (0x001143c0 + 0x00000800), 361 (0x001143c0 + 0x00001000), 362 (0x001143c0 + 0x00001800), 363 (0x001543c0 + 0x00000000), 364 (0x001543c0 + 0x00000800), 365 (0x001543c0 + 0x00001000), 366 (0x001543c0 + 0x00001800), 367 (0x001943c0 + 0x00000000), 368 (0x001943c0 + 0x00000800), 369 (0x001943c0 + 0x00001000), 370 (0x001943c0 + 0x00001800), 371 (0x001d43c0 + 0x00000000), 372 (0x001d43c0 + 0x00000800), 373 (0x001d43c0 + 0x00001000), 374 (0x001d43c0 + 0x00001800), 375 }; 376 377 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = { 378 (0x000143e0 + 0x00000000), 379 (0x000143e0 + 0x00000800), 380 (0x000143e0 + 0x00001000), 381 (0x000143e0 + 0x00001800), 382 (0x000543e0 + 0x00000000), 383 (0x000543e0 + 0x00000800), 384 (0x000543e0 + 0x00001000), 385 (0x000543e0 + 0x00001800), 386 (0x000943e0 + 0x00000000), 387 (0x000943e0 + 0x00000800), 388 (0x000943e0 + 0x00001000), 389 (0x000943e0 + 0x00001800), 390 (0x000d43e0 + 0x00000000), 391 (0x000d43e0 + 0x00000800), 392 (0x000d43e0 + 0x00001000), 393 (0x000d43e0 + 0x00001800), 394 (0x001143e0 + 0x00000000), 395 (0x001143e0 + 0x00000800), 396 (0x001143e0 + 0x00001000), 397 (0x001143e0 + 0x00001800), 398 (0x001543e0 + 0x00000000), 399 (0x001543e0 + 0x00000800), 400 (0x001543e0 + 0x00001000), 401 (0x001543e0 + 0x00001800), 402 (0x001943e0 + 0x00000000), 403 (0x001943e0 + 0x00000800), 404 (0x001943e0 + 0x00001000), 405 (0x001943e0 + 0x00001800), 406 (0x001d43e0 + 0x00000000), 407 (0x001d43e0 + 0x00000800), 408 (0x001d43e0 + 0x00001000), 409 (0x001d43e0 + 0x00001800), 410 }; 411 412 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, 413 struct amdgpu_irq_src *src, 414 unsigned int type, 415 enum amdgpu_interrupt_state state) 416 { 417 u32 bits, i, tmp, reg; 418 419 /* Devices newer then VEGA10/12 shall have these programming 420 * sequences performed by PSP BL 421 */ 422 if (adev->asic_type >= CHIP_VEGA20) 423 return 0; 424 425 bits = 0x7f; 426 427 switch (state) { 428 case AMDGPU_IRQ_STATE_DISABLE: 429 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) { 430 reg = ecc_umc_mcumc_ctrl_addrs[i]; 431 tmp = RREG32(reg); 432 tmp &= ~bits; 433 WREG32(reg, tmp); 434 } 435 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) { 436 reg = ecc_umc_mcumc_ctrl_mask_addrs[i]; 437 tmp = RREG32(reg); 438 tmp &= ~bits; 439 WREG32(reg, tmp); 440 } 441 break; 442 case AMDGPU_IRQ_STATE_ENABLE: 443 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) { 444 reg = ecc_umc_mcumc_ctrl_addrs[i]; 445 tmp = RREG32(reg); 446 tmp |= bits; 447 WREG32(reg, tmp); 448 } 449 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) { 450 reg = ecc_umc_mcumc_ctrl_mask_addrs[i]; 451 tmp = RREG32(reg); 452 tmp |= bits; 453 WREG32(reg, tmp); 454 } 455 break; 456 default: 457 break; 458 } 459 460 return 0; 461 } 462 463 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 464 struct amdgpu_irq_src *src, 465 unsigned int type, 466 enum amdgpu_interrupt_state state) 467 { 468 struct amdgpu_vmhub *hub; 469 u32 tmp, reg, bits, i, j; 470 471 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 472 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 473 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 474 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 475 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 476 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 477 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; 478 479 switch (state) { 480 case AMDGPU_IRQ_STATE_DISABLE: 481 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { 482 hub = &adev->vmhub[j]; 483 for (i = 0; i < 16; i++) { 484 reg = hub->vm_context0_cntl + i; 485 486 /* This works because this interrupt is only 487 * enabled at init/resume and disabled in 488 * fini/suspend, so the overall state doesn't 489 * change over the course of suspend/resume. 490 */ 491 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) 492 continue; 493 494 if (j >= AMDGPU_MMHUB0(0)) 495 tmp = RREG32_SOC15_IP(MMHUB, reg); 496 else 497 tmp = RREG32_XCC(reg, j); 498 499 tmp &= ~bits; 500 501 if (j >= AMDGPU_MMHUB0(0)) 502 WREG32_SOC15_IP(MMHUB, reg, tmp); 503 else 504 WREG32_XCC(reg, tmp, j); 505 } 506 } 507 break; 508 case AMDGPU_IRQ_STATE_ENABLE: 509 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { 510 hub = &adev->vmhub[j]; 511 for (i = 0; i < 16; i++) { 512 reg = hub->vm_context0_cntl + i; 513 514 /* This works because this interrupt is only 515 * enabled at init/resume and disabled in 516 * fini/suspend, so the overall state doesn't 517 * change over the course of suspend/resume. 518 */ 519 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) 520 continue; 521 522 if (j >= AMDGPU_MMHUB0(0)) 523 tmp = RREG32_SOC15_IP(MMHUB, reg); 524 else 525 tmp = RREG32_XCC(reg, j); 526 527 tmp |= bits; 528 529 if (j >= AMDGPU_MMHUB0(0)) 530 WREG32_SOC15_IP(MMHUB, reg, tmp); 531 else 532 WREG32_XCC(reg, tmp, j); 533 } 534 } 535 break; 536 default: 537 break; 538 } 539 540 return 0; 541 } 542 543 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, 544 struct amdgpu_irq_src *source, 545 struct amdgpu_iv_entry *entry) 546 { 547 bool retry_fault = !!(entry->src_data[1] & 548 AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY); 549 bool write_fault = !!(entry->src_data[1] & 550 AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE); 551 uint32_t status = 0, cid = 0, rw = 0, fed = 0; 552 struct amdgpu_task_info *task_info; 553 struct amdgpu_vmhub *hub; 554 const char *mmhub_cid; 555 const char *hub_name; 556 unsigned int vmhub; 557 u64 addr; 558 uint32_t cam_index = 0; 559 int ret, xcc_id = 0; 560 uint32_t node_id; 561 562 node_id = entry->node_id; 563 564 addr = (u64)entry->src_data[0] << 12; 565 addr |= ((u64)entry->src_data[1] & 0xf) << 44; 566 567 if (entry->client_id == SOC15_IH_CLIENTID_VMC) { 568 hub_name = "mmhub0"; 569 vmhub = AMDGPU_MMHUB0(node_id / 4); 570 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { 571 hub_name = "mmhub1"; 572 vmhub = AMDGPU_MMHUB1(0); 573 } else { 574 hub_name = "gfxhub0"; 575 if (adev->gfx.funcs->ih_node_to_logical_xcc) { 576 xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev, 577 node_id); 578 if (xcc_id < 0) 579 xcc_id = 0; 580 } 581 vmhub = xcc_id; 582 } 583 hub = &adev->vmhub[vmhub]; 584 585 if (retry_fault) { 586 cam_index = entry->src_data[2] & 0x3ff; 587 588 ret = amdgpu_gmc_handle_retry_fault(adev, entry, addr, cam_index, node_id, 589 write_fault); 590 /* Returning 1 here also prevents sending the IV to the KFD */ 591 if (ret == 1) 592 return 1; 593 } 594 595 if (kgd2kfd_vmfault_fast_path(adev, entry, retry_fault)) 596 return 1; 597 598 if (!printk_ratelimit()) 599 return 0; 600 601 dev_err(adev->dev, 602 "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", hub_name, 603 retry_fault ? "retry" : "no-retry", 604 entry->src_id, entry->ring_id, entry->vmid, entry->pasid); 605 606 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); 607 if (task_info) { 608 amdgpu_vm_print_task_info(adev, task_info); 609 amdgpu_vm_put_task_info(task_info); 610 } 611 612 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n", 613 addr, entry->client_id, 614 soc15_ih_clientid_name[entry->client_id]); 615 616 if (amdgpu_is_multi_aid(adev)) 617 dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n", 618 node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4, 619 node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : ""); 620 621 if (amdgpu_sriov_vf(adev)) 622 return 0; 623 624 /* 625 * Issue a dummy read to wait for the status register to 626 * be updated to avoid reading an incorrect value due to 627 * the new fast GRBM interface. 628 */ 629 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && 630 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))) 631 RREG32(hub->vm_l2_pro_fault_status); 632 633 status = RREG32(hub->vm_l2_pro_fault_status); 634 cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID); 635 rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW); 636 fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); 637 638 /* for fed error, kfd will handle it, return directly */ 639 if (fed && amdgpu_ras_is_poison_mode_supported(adev) && 640 (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) 641 return 0; 642 643 /* Only print L2 fault status if the status register could be read and 644 * contains useful information 645 */ 646 if (!status) 647 return 0; 648 649 if (!amdgpu_sriov_vf(adev)) 650 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); 651 652 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub); 653 654 dev_err(adev->dev, 655 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 656 status); 657 if (entry->vmid_src == AMDGPU_GFXHUB(0)) { 658 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", 659 cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : 660 gfxhub_client_ids[cid], 661 cid); 662 } else { 663 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 664 case IP_VERSION(9, 0, 0): 665 mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega10) ? 666 mmhub_client_ids_vega10[cid][rw] : NULL; 667 break; 668 case IP_VERSION(9, 3, 0): 669 mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega12) ? 670 mmhub_client_ids_vega12[cid][rw] : NULL; 671 break; 672 case IP_VERSION(9, 4, 0): 673 mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega20) ? 674 mmhub_client_ids_vega20[cid][rw] : NULL; 675 break; 676 case IP_VERSION(9, 4, 1): 677 mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_arcturus) ? 678 mmhub_client_ids_arcturus[cid][rw] : NULL; 679 break; 680 case IP_VERSION(9, 1, 0): 681 case IP_VERSION(9, 2, 0): 682 mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_raven) ? 683 mmhub_client_ids_raven[cid][rw] : NULL; 684 break; 685 case IP_VERSION(1, 5, 0): 686 case IP_VERSION(2, 4, 0): 687 mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_renoir) ? 688 mmhub_client_ids_renoir[cid][rw] : NULL; 689 break; 690 case IP_VERSION(1, 8, 0): 691 case IP_VERSION(9, 4, 2): 692 mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_aldebaran) ? 693 mmhub_client_ids_aldebaran[cid][rw] : NULL; 694 break; 695 default: 696 mmhub_cid = NULL; 697 break; 698 } 699 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", 700 mmhub_cid ? mmhub_cid : "unknown", cid); 701 } 702 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", 703 REG_GET_FIELD(status, 704 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); 705 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", 706 REG_GET_FIELD(status, 707 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); 708 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", 709 REG_GET_FIELD(status, 710 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); 711 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", 712 REG_GET_FIELD(status, 713 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); 714 dev_err(adev->dev, "\t RW: 0x%x\n", rw); 715 return 0; 716 } 717 718 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = { 719 .set = gmc_v9_0_vm_fault_interrupt_state, 720 .process = gmc_v9_0_process_interrupt, 721 }; 722 723 724 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = { 725 .set = gmc_v9_0_ecc_interrupt_state, 726 .process = amdgpu_umc_process_ecc_irq, 727 }; 728 729 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) 730 { 731 adev->gmc.vm_fault.num_types = 1; 732 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; 733 734 if (!amdgpu_sriov_vf(adev) && 735 !adev->gmc.xgmi.connected_to_cpu && 736 !adev->gmc.is_app_apu) { 737 adev->gmc.ecc_irq.num_types = 1; 738 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; 739 } 740 } 741 742 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, 743 uint32_t flush_type) 744 { 745 u32 req = 0; 746 747 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 748 PER_VMID_INVALIDATE_REQ, 1 << vmid); 749 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); 750 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 751 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 752 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); 753 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); 754 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); 755 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 756 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); 757 758 return req; 759 } 760 761 /** 762 * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore 763 * 764 * @adev: amdgpu_device pointer 765 * @vmhub: vmhub type 766 * 767 */ 768 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, 769 uint32_t vmhub) 770 { 771 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || 772 amdgpu_is_multi_aid(adev)) 773 return false; 774 775 return ((vmhub == AMDGPU_MMHUB0(0) || 776 vmhub == AMDGPU_MMHUB1(0)) && 777 (!amdgpu_sriov_vf(adev)) && 778 (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) && 779 (adev->apu_flags & AMD_APU_IS_PICASSO)))); 780 } 781 782 static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, 783 uint8_t vmid, uint16_t *p_pasid) 784 { 785 uint32_t value; 786 787 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) 788 + vmid); 789 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; 790 791 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); 792 } 793 794 /* 795 * GART 796 * VMID 0 is the physical GPU addresses as used by the kernel. 797 * VMIDs 1-15 are used for userspace clients and are handled 798 * by the amdgpu vm/hsa code. 799 */ 800 801 /** 802 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type 803 * 804 * @adev: amdgpu_device pointer 805 * @vmid: vm instance to flush 806 * @vmhub: which hub to flush 807 * @flush_type: the flush type 808 * 809 * Flush the TLB for the requested page table using certain type. 810 */ 811 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 812 uint32_t vmhub, uint32_t flush_type) 813 { 814 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); 815 u32 j, inv_req, tmp, sem, req, ack, inst; 816 const unsigned int eng = 17; 817 struct amdgpu_vmhub *hub; 818 819 BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS); 820 821 hub = &adev->vmhub[vmhub]; 822 inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); 823 sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng; 824 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; 825 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; 826 827 if (vmhub >= AMDGPU_MMHUB0(0)) 828 inst = 0; 829 else 830 inst = vmhub; 831 832 /* This is necessary for SRIOV as well as for GFXOFF to function 833 * properly under bare metal 834 */ 835 if (adev->gfx.kiq[inst].ring.sched.ready && 836 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { 837 uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng; 838 uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; 839 840 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, 841 1 << vmid, inst); 842 return; 843 } 844 845 /* This path is needed before KIQ/MES/GFXOFF are set up */ 846 spin_lock(&adev->gmc.invalidate_lock); 847 848 /* 849 * It may lose gpuvm invalidate acknowldege state across power-gating 850 * off cycle, add semaphore acquire before invalidation and semaphore 851 * release after invalidation to avoid entering power gated state 852 * to WA the Issue 853 */ 854 855 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 856 if (use_semaphore) { 857 for (j = 0; j < adev->usec_timeout; j++) { 858 /* a read return value of 1 means semaphore acquire */ 859 if (vmhub >= AMDGPU_MMHUB0(0)) 860 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, GET_INST(GC, inst)); 861 else 862 tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, GET_INST(GC, inst)); 863 if (tmp & 0x1) 864 break; 865 udelay(1); 866 } 867 868 if (j >= adev->usec_timeout) 869 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); 870 } 871 872 if (vmhub >= AMDGPU_MMHUB0(0)) 873 WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, GET_INST(GC, inst)); 874 else 875 WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, GET_INST(GC, inst)); 876 877 /* 878 * Issue a dummy read to wait for the ACK register to 879 * be cleared to avoid a false ACK due to the new fast 880 * GRBM interface. 881 */ 882 if ((vmhub == AMDGPU_GFXHUB(0)) && 883 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))) 884 RREG32_NO_KIQ(req); 885 886 for (j = 0; j < adev->usec_timeout; j++) { 887 if (vmhub >= AMDGPU_MMHUB0(0)) 888 tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, GET_INST(GC, inst)); 889 else 890 tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, GET_INST(GC, inst)); 891 if (tmp & (1 << vmid)) 892 break; 893 udelay(1); 894 } 895 896 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 897 if (use_semaphore) { 898 /* 899 * add semaphore release after invalidation, 900 * write with 0 means semaphore release 901 */ 902 if (vmhub >= AMDGPU_MMHUB0(0)) 903 WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, GET_INST(GC, inst)); 904 else 905 WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, GET_INST(GC, inst)); 906 } 907 908 spin_unlock(&adev->gmc.invalidate_lock); 909 910 if (j < adev->usec_timeout) 911 return; 912 913 DRM_ERROR("Timeout waiting for VM flush ACK!\n"); 914 } 915 916 /** 917 * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid 918 * 919 * @adev: amdgpu_device pointer 920 * @pasid: pasid to be flush 921 * @flush_type: the flush type 922 * @all_hub: flush all hubs 923 * @inst: is used to select which instance of KIQ to use for the invalidation 924 * 925 * Flush the TLB for the requested pasid. 926 */ 927 static void gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, 928 uint16_t pasid, uint32_t flush_type, 929 bool all_hub, uint32_t inst) 930 { 931 uint16_t queried; 932 int i, vmid; 933 934 for (vmid = 1; vmid < 16; vmid++) { 935 bool valid; 936 937 valid = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid, 938 &queried); 939 if (!valid || queried != pasid) 940 continue; 941 942 if (all_hub) { 943 for_each_set_bit(i, adev->vmhubs_mask, 944 AMDGPU_MAX_VMHUBS) 945 gmc_v9_0_flush_gpu_tlb(adev, vmid, i, 946 flush_type); 947 } else { 948 gmc_v9_0_flush_gpu_tlb(adev, vmid, 949 AMDGPU_GFXHUB(0), 950 flush_type); 951 } 952 } 953 } 954 955 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 956 unsigned int vmid, uint64_t pd_addr) 957 { 958 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); 959 struct amdgpu_device *adev = ring->adev; 960 struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub]; 961 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); 962 unsigned int eng = ring->vm_inv_eng; 963 964 /* 965 * It may lose gpuvm invalidate acknowldege state across power-gating 966 * off cycle, add semaphore acquire before invalidation and semaphore 967 * release after invalidation to avoid entering power gated state 968 * to WA the Issue 969 */ 970 971 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 972 if (use_semaphore) 973 /* a read return value of 1 means semaphore acuqire */ 974 amdgpu_ring_emit_reg_wait(ring, 975 hub->vm_inv_eng0_sem + 976 hub->eng_distance * eng, 0x1, 0x1); 977 978 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + 979 (hub->ctx_addr_distance * vmid), 980 lower_32_bits(pd_addr)); 981 982 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + 983 (hub->ctx_addr_distance * vmid), 984 upper_32_bits(pd_addr)); 985 986 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + 987 hub->eng_distance * eng, 988 hub->vm_inv_eng0_ack + 989 hub->eng_distance * eng, 990 req, 1 << vmid); 991 992 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 993 if (use_semaphore) 994 /* 995 * add semaphore release after invalidation, 996 * write with 0 means semaphore release 997 */ 998 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + 999 hub->eng_distance * eng, 0); 1000 1001 return pd_addr; 1002 } 1003 1004 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, 1005 unsigned int pasid) 1006 { 1007 struct amdgpu_device *adev = ring->adev; 1008 uint32_t reg; 1009 1010 /* Do nothing because there's no lut register for mmhub1. */ 1011 if (ring->vm_hub == AMDGPU_MMHUB1(0)) 1012 return; 1013 1014 if (ring->vm_hub == AMDGPU_GFXHUB(0)) 1015 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; 1016 else 1017 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; 1018 1019 amdgpu_ring_emit_wreg(ring, reg, pasid); 1020 } 1021 1022 /* 1023 * PTE format on VEGA 10: 1024 * 63:59 reserved 1025 * 58:57 mtype 1026 * 56 F 1027 * 55 L 1028 * 54 P 1029 * 53 SW 1030 * 52 T 1031 * 50:48 reserved 1032 * 47:12 4k physical page base address 1033 * 11:7 fragment 1034 * 6 write 1035 * 5 read 1036 * 4 exe 1037 * 3 Z 1038 * 2 snooped 1039 * 1 system 1040 * 0 valid 1041 * 1042 * PDE format on VEGA 10: 1043 * 63:59 block fragment size 1044 * 58:55 reserved 1045 * 54 P 1046 * 53:48 reserved 1047 * 47:6 physical base address of PD or PTE 1048 * 5:3 reserved 1049 * 2 C 1050 * 1 system 1051 * 0 valid 1052 */ 1053 1054 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, 1055 uint64_t *addr, uint64_t *flags) 1056 { 1057 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 1058 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); 1059 BUG_ON(*addr & 0xFFFF00000000003FULL); 1060 1061 if (!adev->gmc.translate_further) 1062 return; 1063 1064 if (level == AMDGPU_VM_PDB1) { 1065 /* Set the block fragment size */ 1066 if (!(*flags & AMDGPU_PDE_PTE)) 1067 *flags |= AMDGPU_PDE_BFS(0x9); 1068 1069 } else if (level == AMDGPU_VM_PDB0) { 1070 if (*flags & AMDGPU_PDE_PTE) { 1071 *flags &= ~AMDGPU_PDE_PTE; 1072 if (!(*flags & AMDGPU_PTE_VALID)) 1073 *addr |= 1 << PAGE_SHIFT; 1074 } else { 1075 *flags |= AMDGPU_PTE_TF; 1076 } 1077 } 1078 } 1079 1080 static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, 1081 struct amdgpu_vm *vm, 1082 struct amdgpu_bo *bo, 1083 uint32_t vm_flags, 1084 uint64_t *flags) 1085 { 1086 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); 1087 bool is_vram = bo->tbo.resource && 1088 bo->tbo.resource->mem_type == TTM_PL_VRAM; 1089 bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | 1090 AMDGPU_GEM_CREATE_EXT_COHERENT); 1091 bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT; 1092 bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; 1093 unsigned int mtype_local, mtype; 1094 uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0); 1095 bool snoop = false; 1096 bool is_local; 1097 1098 dma_resv_assert_held(bo->tbo.base.resv); 1099 1100 switch (gc_ip_version) { 1101 case IP_VERSION(9, 4, 1): 1102 case IP_VERSION(9, 4, 2): 1103 if (is_vram) { 1104 if (bo_adev == adev) { 1105 if (uncached) 1106 mtype = MTYPE_UC; 1107 else if (coherent) 1108 mtype = MTYPE_CC; 1109 else 1110 mtype = MTYPE_RW; 1111 /* FIXME: is this still needed? Or does 1112 * amdgpu_ttm_tt_pde_flags already handle this? 1113 */ 1114 if (gc_ip_version == IP_VERSION(9, 4, 2) && 1115 adev->gmc.xgmi.connected_to_cpu) 1116 snoop = true; 1117 } else { 1118 if (uncached || coherent) 1119 mtype = MTYPE_UC; 1120 else 1121 mtype = MTYPE_NC; 1122 if (amdgpu_xgmi_same_hive(adev, bo_adev)) 1123 snoop = true; 1124 } 1125 } else { 1126 if (uncached || coherent) 1127 mtype = MTYPE_UC; 1128 else 1129 mtype = MTYPE_NC; 1130 /* FIXME: is this still needed? Or does 1131 * amdgpu_ttm_tt_pde_flags already handle this? 1132 */ 1133 snoop = true; 1134 } 1135 break; 1136 case IP_VERSION(9, 4, 3): 1137 case IP_VERSION(9, 4, 4): 1138 case IP_VERSION(9, 5, 0): 1139 /* Only local VRAM BOs or system memory on non-NUMA APUs 1140 * can be assumed to be local in their entirety. Choose 1141 * MTYPE_NC as safe fallback for all system memory BOs on 1142 * NUMA systems. Their MTYPE can be overridden per-page in 1143 * gmc_v9_0_override_vm_pte_flags. 1144 */ 1145 mtype_local = MTYPE_RW; 1146 if (amdgpu_mtype_local == 1) { 1147 drm_info_once(adev_to_drm(adev), "Using MTYPE_NC for local memory\n"); 1148 mtype_local = MTYPE_NC; 1149 } else if (amdgpu_mtype_local == 2) { 1150 drm_info_once(adev_to_drm(adev), "Using MTYPE_CC for local memory\n"); 1151 mtype_local = MTYPE_CC; 1152 } else { 1153 drm_info_once(adev_to_drm(adev), "Using MTYPE_RW for local memory\n"); 1154 } 1155 is_local = (!is_vram && (adev->flags & AMD_IS_APU) && 1156 num_possible_nodes() <= 1) || 1157 (is_vram && adev == bo_adev && 1158 KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id); 1159 snoop = true; 1160 if (uncached) { 1161 mtype = MTYPE_UC; 1162 } else if (ext_coherent) { 1163 mtype = is_local ? MTYPE_CC : MTYPE_UC; 1164 } else if (adev->flags & AMD_IS_APU) { 1165 mtype = is_local ? mtype_local : MTYPE_NC; 1166 } else { 1167 /* dGPU */ 1168 if (is_local) 1169 mtype = mtype_local; 1170 else if (gc_ip_version < IP_VERSION(9, 5, 0) && !is_vram) 1171 mtype = MTYPE_UC; 1172 else 1173 mtype = MTYPE_NC; 1174 } 1175 1176 break; 1177 default: 1178 if (uncached || coherent) 1179 mtype = MTYPE_UC; 1180 else 1181 mtype = MTYPE_NC; 1182 1183 /* FIXME: is this still needed? Or does 1184 * amdgpu_ttm_tt_pde_flags already handle this? 1185 */ 1186 if (!is_vram) 1187 snoop = true; 1188 } 1189 1190 if (mtype != MTYPE_NC) 1191 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype); 1192 1193 *flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; 1194 } 1195 1196 static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, 1197 struct amdgpu_vm *vm, 1198 struct amdgpu_bo *bo, 1199 uint32_t vm_flags, 1200 uint64_t *flags) 1201 { 1202 if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE) 1203 *flags |= AMDGPU_PTE_EXECUTABLE; 1204 else 1205 *flags &= ~AMDGPU_PTE_EXECUTABLE; 1206 1207 switch (vm_flags & AMDGPU_VM_MTYPE_MASK) { 1208 case AMDGPU_VM_MTYPE_DEFAULT: 1209 case AMDGPU_VM_MTYPE_NC: 1210 default: 1211 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC); 1212 break; 1213 case AMDGPU_VM_MTYPE_WC: 1214 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC); 1215 break; 1216 case AMDGPU_VM_MTYPE_RW: 1217 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW); 1218 break; 1219 case AMDGPU_VM_MTYPE_CC: 1220 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC); 1221 break; 1222 case AMDGPU_VM_MTYPE_UC: 1223 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC); 1224 break; 1225 } 1226 1227 if (vm_flags & AMDGPU_VM_PAGE_PRT) { 1228 *flags |= AMDGPU_PTE_PRT; 1229 *flags &= ~AMDGPU_PTE_VALID; 1230 } 1231 1232 if ((*flags & AMDGPU_PTE_VALID) && bo) 1233 gmc_v9_0_get_coherence_flags(adev, vm, bo, vm_flags, flags); 1234 } 1235 1236 static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, 1237 struct amdgpu_vm *vm, 1238 uint64_t addr, uint64_t *flags) 1239 { 1240 int local_node, nid; 1241 1242 /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system 1243 * memory can use more efficient MTYPEs. 1244 */ 1245 if (!(adev->flags & AMD_IS_APU) || 1246 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) 1247 return; 1248 1249 /* Only direct-mapped memory allows us to determine the NUMA node from 1250 * the DMA address. 1251 */ 1252 if (!adev->ram_is_direct_mapped) { 1253 dev_dbg_ratelimited(adev->dev, "RAM is not direct mapped\n"); 1254 return; 1255 } 1256 1257 /* MTYPE_NC is the same default and can be overridden. 1258 * MTYPE_UC will be present if the memory is extended-coherent 1259 * and can also be overridden. 1260 */ 1261 if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) != 1262 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC) && 1263 (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) != 1264 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC)) { 1265 dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n"); 1266 return; 1267 } 1268 1269 /* FIXME: Only supported on native mode for now. For carve-out, the 1270 * NUMA affinity of the GPU/VM needs to come from the PCI info because 1271 * memory partitions are not associated with different NUMA nodes. 1272 */ 1273 if (adev->gmc.is_app_apu && vm->mem_id >= 0) { 1274 local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node; 1275 } else { 1276 dev_dbg_ratelimited(adev->dev, "Only native mode APU is supported.\n"); 1277 return; 1278 } 1279 1280 /* Only handle real RAM. Mappings of PCIe resources don't have struct 1281 * page or NUMA nodes. 1282 */ 1283 if (!page_is_ram(addr >> PAGE_SHIFT)) { 1284 dev_dbg_ratelimited(adev->dev, "Page is not RAM.\n"); 1285 return; 1286 } 1287 nid = pfn_to_nid(addr >> PAGE_SHIFT); 1288 dev_dbg_ratelimited(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n", 1289 vm->mem_id, local_node, nid); 1290 if (nid == local_node) { 1291 uint64_t old_flags = *flags; 1292 if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) == 1293 AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC)) { 1294 unsigned int mtype_local = MTYPE_RW; 1295 1296 if (amdgpu_mtype_local == 1) 1297 mtype_local = MTYPE_NC; 1298 else if (amdgpu_mtype_local == 2) 1299 mtype_local = MTYPE_CC; 1300 1301 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, mtype_local); 1302 } else { 1303 /* MTYPE_UC case */ 1304 *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC); 1305 } 1306 1307 dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n", 1308 old_flags, *flags); 1309 } 1310 } 1311 1312 static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) 1313 { 1314 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); 1315 unsigned int size; 1316 1317 /* TODO move to DC so GMC doesn't need to hard-code DCN registers */ 1318 1319 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { 1320 size = AMDGPU_VBIOS_VGA_ALLOCATION; 1321 } else { 1322 u32 viewport; 1323 1324 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1325 case IP_VERSION(1, 0, 0): 1326 case IP_VERSION(1, 0, 1): 1327 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); 1328 size = (REG_GET_FIELD(viewport, 1329 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * 1330 REG_GET_FIELD(viewport, 1331 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * 1332 4); 1333 break; 1334 case IP_VERSION(2, 1, 0): 1335 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2); 1336 size = (REG_GET_FIELD(viewport, 1337 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * 1338 REG_GET_FIELD(viewport, 1339 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * 1340 4); 1341 break; 1342 default: 1343 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); 1344 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * 1345 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) * 1346 4); 1347 break; 1348 } 1349 } 1350 1351 return size; 1352 } 1353 1354 static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev) 1355 { 1356 if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested && 1357 adev->nbio.funcs->is_nps_switch_requested(adev)) { 1358 adev->gmc.reset_flags |= AMDGPU_GMC_INIT_RESET_NPS; 1359 return true; 1360 } 1361 1362 return false; 1363 } 1364 1365 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { 1366 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, 1367 .flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid, 1368 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, 1369 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, 1370 .get_vm_pde = gmc_v9_0_get_vm_pde, 1371 .get_vm_pte = gmc_v9_0_get_vm_pte, 1372 .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags, 1373 .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, 1374 .query_mem_partition_mode = &amdgpu_gmc_query_memory_partition, 1375 .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition, 1376 .need_reset_on_init = &gmc_v9_0_need_reset_on_init, 1377 }; 1378 1379 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) 1380 { 1381 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; 1382 } 1383 1384 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) 1385 { 1386 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { 1387 case IP_VERSION(6, 0, 0): 1388 adev->umc.funcs = &umc_v6_0_funcs; 1389 break; 1390 case IP_VERSION(6, 1, 1): 1391 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; 1392 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; 1393 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; 1394 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; 1395 adev->umc.retire_unit = 1; 1396 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; 1397 adev->umc.ras = &umc_v6_1_ras; 1398 break; 1399 case IP_VERSION(6, 1, 2): 1400 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; 1401 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; 1402 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; 1403 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; 1404 adev->umc.retire_unit = 1; 1405 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; 1406 adev->umc.ras = &umc_v6_1_ras; 1407 break; 1408 case IP_VERSION(6, 7, 0): 1409 adev->umc.max_ras_err_cnt_per_query = 1410 UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL; 1411 adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM; 1412 adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; 1413 adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET; 1414 adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2); 1415 if (!adev->gmc.xgmi.connected_to_cpu) 1416 adev->umc.ras = &umc_v6_7_ras; 1417 if (1 & adev->smuio.funcs->get_die_id(adev)) 1418 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0]; 1419 else 1420 adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0]; 1421 break; 1422 case IP_VERSION(12, 0, 0): 1423 case IP_VERSION(12, 5, 0): 1424 adev->umc.max_ras_err_cnt_per_query = 1425 UMC_V12_0_TOTAL_CHANNEL_NUM(adev) * UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; 1426 adev->umc.channel_inst_num = UMC_V12_0_CHANNEL_INSTANCE_NUM; 1427 adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM; 1428 adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM; 1429 adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET; 1430 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) 1431 adev->umc.ras = &umc_v12_0_ras; 1432 break; 1433 default: 1434 break; 1435 } 1436 } 1437 1438 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) 1439 { 1440 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 1441 case IP_VERSION(9, 4, 1): 1442 adev->mmhub.funcs = &mmhub_v9_4_funcs; 1443 break; 1444 case IP_VERSION(9, 4, 2): 1445 adev->mmhub.funcs = &mmhub_v1_7_funcs; 1446 break; 1447 case IP_VERSION(1, 8, 0): 1448 case IP_VERSION(1, 8, 1): 1449 adev->mmhub.funcs = &mmhub_v1_8_funcs; 1450 break; 1451 default: 1452 adev->mmhub.funcs = &mmhub_v1_0_funcs; 1453 break; 1454 } 1455 } 1456 1457 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) 1458 { 1459 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 1460 case IP_VERSION(9, 4, 0): 1461 adev->mmhub.ras = &mmhub_v1_0_ras; 1462 break; 1463 case IP_VERSION(9, 4, 1): 1464 adev->mmhub.ras = &mmhub_v9_4_ras; 1465 break; 1466 case IP_VERSION(9, 4, 2): 1467 adev->mmhub.ras = &mmhub_v1_7_ras; 1468 break; 1469 case IP_VERSION(1, 8, 0): 1470 case IP_VERSION(1, 8, 1): 1471 adev->mmhub.ras = &mmhub_v1_8_ras; 1472 break; 1473 default: 1474 /* mmhub ras is not available */ 1475 break; 1476 } 1477 } 1478 1479 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) 1480 { 1481 if (amdgpu_is_multi_aid(adev)) 1482 adev->gfxhub.funcs = &gfxhub_v1_2_funcs; 1483 else 1484 adev->gfxhub.funcs = &gfxhub_v1_0_funcs; 1485 } 1486 1487 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) 1488 { 1489 adev->hdp.ras = &hdp_v4_0_ras; 1490 } 1491 1492 static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev) 1493 { 1494 struct amdgpu_mca *mca = &adev->mca; 1495 1496 /* is UMC the right IP to check for MCA? Maybe DF? */ 1497 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { 1498 case IP_VERSION(6, 7, 0): 1499 if (!adev->gmc.xgmi.connected_to_cpu) { 1500 mca->mp0.ras = &mca_v3_0_mp0_ras; 1501 mca->mp1.ras = &mca_v3_0_mp1_ras; 1502 mca->mpio.ras = &mca_v3_0_mpio_ras; 1503 } 1504 break; 1505 default: 1506 break; 1507 } 1508 } 1509 1510 static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev) 1511 { 1512 if (!adev->gmc.xgmi.connected_to_cpu) 1513 adev->gmc.xgmi.ras = &xgmi_ras; 1514 } 1515 1516 static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev) 1517 { 1518 enum amdgpu_memory_partition mode; 1519 uint32_t supp_modes; 1520 int i; 1521 1522 adev->gmc.supported_nps_modes = 0; 1523 1524 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) 1525 return; 1526 1527 mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes); 1528 1529 /* Mode detected by hardware and supported modes available */ 1530 if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) { 1531 while ((i = ffs(supp_modes))) { 1532 if (AMDGPU_ALL_NPS_MASK & BIT(i)) 1533 adev->gmc.supported_nps_modes |= BIT(i); 1534 supp_modes &= supp_modes - 1; 1535 } 1536 } else { 1537 /*TODO: Check PSP version also which supports NPS switch. Otherwise keep 1538 * supported modes as 0. 1539 */ 1540 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1541 case IP_VERSION(9, 4, 3): 1542 case IP_VERSION(9, 4, 4): 1543 adev->gmc.supported_nps_modes = 1544 BIT(AMDGPU_NPS1_PARTITION_MODE) | 1545 BIT(AMDGPU_NPS4_PARTITION_MODE); 1546 break; 1547 default: 1548 break; 1549 } 1550 } 1551 } 1552 1553 static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block) 1554 { 1555 struct amdgpu_device *adev = ip_block->adev; 1556 1557 /* 1558 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined 1559 * in their IP discovery tables 1560 */ 1561 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) || 1562 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || 1563 amdgpu_is_multi_aid(adev)) 1564 adev->gmc.xgmi.supported = true; 1565 1566 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) { 1567 adev->gmc.xgmi.supported = true; 1568 adev->gmc.xgmi.connected_to_cpu = 1569 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); 1570 } 1571 1572 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) { 1573 enum amdgpu_pkg_type pkg_type = 1574 adev->smuio.funcs->get_pkg_type(adev); 1575 /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present 1576 * and the APU, can be in used two possible modes: 1577 * - carveout mode 1578 * - native APU mode 1579 * "is_app_apu" can be used to identify the APU in the native 1580 * mode. 1581 */ 1582 adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU && 1583 !pci_resource_len(adev->pdev, 0)); 1584 } 1585 1586 gmc_v9_0_set_gmc_funcs(adev); 1587 gmc_v9_0_set_irq_funcs(adev); 1588 gmc_v9_0_set_umc_funcs(adev); 1589 gmc_v9_0_set_mmhub_funcs(adev); 1590 gmc_v9_0_set_mmhub_ras_funcs(adev); 1591 gmc_v9_0_set_gfxhub_funcs(adev); 1592 gmc_v9_0_set_hdp_ras_funcs(adev); 1593 gmc_v9_0_set_mca_ras_funcs(adev); 1594 gmc_v9_0_set_xgmi_ras_funcs(adev); 1595 1596 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 1597 adev->gmc.shared_aperture_end = 1598 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 1599 adev->gmc.private_aperture_start = 0x1000000000000000ULL; 1600 adev->gmc.private_aperture_end = 1601 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 1602 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; 1603 1604 return 0; 1605 } 1606 1607 static int gmc_v9_0_late_init(struct amdgpu_ip_block *ip_block) 1608 { 1609 struct amdgpu_device *adev = ip_block->adev; 1610 int r; 1611 1612 r = amdgpu_gmc_allocate_vm_inv_eng(adev); 1613 if (r) 1614 return r; 1615 1616 /* 1617 * Workaround performance drop issue with VBIOS enables partial 1618 * writes, while disables HBM ECC for vega10. 1619 */ 1620 if (!amdgpu_sriov_vf(adev) && 1621 (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) { 1622 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { 1623 if (adev->df.funcs && 1624 adev->df.funcs->enable_ecc_force_par_wr_rmw) 1625 adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); 1626 } 1627 } 1628 1629 if (!amdgpu_persistent_edc_harvesting_supported(adev)) { 1630 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB); 1631 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__HDP); 1632 } 1633 1634 r = amdgpu_gmc_ras_late_init(adev); 1635 if (r) 1636 return r; 1637 1638 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 1639 } 1640 1641 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, 1642 struct amdgpu_gmc *mc) 1643 { 1644 u64 base = adev->mmhub.funcs->get_fb_location(adev); 1645 1646 amdgpu_gmc_set_agp_default(adev, mc); 1647 1648 /* add the xgmi offset of the physical node */ 1649 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 1650 if (amdgpu_gmc_is_pdb0_enabled(adev)) { 1651 amdgpu_gmc_sysvm_location(adev, mc); 1652 } else { 1653 amdgpu_gmc_vram_location(adev, mc, base); 1654 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT); 1655 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1)) 1656 amdgpu_gmc_agp_location(adev, mc); 1657 } 1658 /* base offset of vram pages */ 1659 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); 1660 1661 /* XXX: add the xgmi offset of the physical node? */ 1662 adev->vm_manager.vram_base_offset += 1663 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 1664 } 1665 1666 /** 1667 * gmc_v9_0_mc_init - initialize the memory controller driver params 1668 * 1669 * @adev: amdgpu_device pointer 1670 * 1671 * Look up the amount of vram, vram width, and decide how to place 1672 * vram and gart within the GPU's physical address space. 1673 * Returns 0 for success. 1674 */ 1675 static int gmc_v9_0_mc_init(struct amdgpu_device *adev) 1676 { 1677 int r; 1678 1679 /* size in MB on si */ 1680 if (!adev->gmc.is_app_apu) { 1681 adev->gmc.mc_vram_size = 1682 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; 1683 } else { 1684 DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n"); 1685 adev->gmc.mc_vram_size = 0; 1686 } 1687 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 1688 1689 if (!(adev->flags & AMD_IS_APU) && 1690 !adev->gmc.xgmi.connected_to_cpu) { 1691 r = amdgpu_device_resize_fb_bar(adev); 1692 if (r) 1693 return r; 1694 } 1695 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 1696 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 1697 1698 #ifdef CONFIG_X86_64 1699 /* 1700 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi 1701 * interface can use VRAM through here as it appears system reserved 1702 * memory in host address space. 1703 * 1704 * For APUs, VRAM is just the stolen system memory and can be accessed 1705 * directly. 1706 * 1707 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR. 1708 */ 1709 1710 /* check whether both host-gpu and gpu-gpu xgmi links exist */ 1711 if ((!amdgpu_sriov_vf(adev) && 1712 (adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) || 1713 (adev->gmc.xgmi.supported && 1714 adev->gmc.xgmi.connected_to_cpu)) { 1715 adev->gmc.aper_base = 1716 adev->gfxhub.funcs->get_mc_fb_offset(adev) + 1717 adev->gmc.xgmi.physical_node_id * 1718 adev->gmc.xgmi.node_segment_size; 1719 adev->gmc.aper_size = adev->gmc.real_vram_size; 1720 } 1721 1722 #endif 1723 adev->gmc.visible_vram_size = adev->gmc.aper_size; 1724 1725 /* set the gart size */ 1726 if (amdgpu_gart_size == -1) { 1727 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1728 case IP_VERSION(9, 0, 1): /* all engines support GPUVM */ 1729 case IP_VERSION(9, 2, 1): /* all engines support GPUVM */ 1730 case IP_VERSION(9, 4, 0): 1731 case IP_VERSION(9, 4, 1): 1732 case IP_VERSION(9, 4, 2): 1733 case IP_VERSION(9, 4, 3): 1734 case IP_VERSION(9, 4, 4): 1735 case IP_VERSION(9, 5, 0): 1736 default: 1737 adev->gmc.gart_size = 512ULL << 20; 1738 break; 1739 case IP_VERSION(9, 1, 0): /* DCE SG support */ 1740 case IP_VERSION(9, 2, 2): /* DCE SG support */ 1741 case IP_VERSION(9, 3, 0): 1742 adev->gmc.gart_size = 1024ULL << 20; 1743 break; 1744 } 1745 } else { 1746 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 1747 } 1748 1749 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; 1750 1751 gmc_v9_0_vram_gtt_location(adev, &adev->gmc); 1752 1753 return 0; 1754 } 1755 1756 static int gmc_v9_0_gart_init(struct amdgpu_device *adev) 1757 { 1758 int r; 1759 1760 if (adev->gart.bo) { 1761 WARN(1, "VEGA10 PCIE GART already initialized\n"); 1762 return 0; 1763 } 1764 1765 if (amdgpu_gmc_is_pdb0_enabled(adev)) { 1766 adev->gmc.vmid0_page_table_depth = 1; 1767 adev->gmc.vmid0_page_table_block_size = 12; 1768 } else { 1769 adev->gmc.vmid0_page_table_depth = 0; 1770 adev->gmc.vmid0_page_table_block_size = 0; 1771 } 1772 1773 /* Initialize common gart structure */ 1774 r = amdgpu_gart_init(adev); 1775 if (r) 1776 return r; 1777 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 1778 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC) | 1779 AMDGPU_PTE_EXECUTABLE; 1780 1781 if (!adev->gmc.real_vram_size) { 1782 dev_info(adev->dev, "Put GART in system memory for APU\n"); 1783 r = amdgpu_gart_table_ram_alloc(adev); 1784 if (r) 1785 dev_err(adev->dev, "Failed to allocate GART in system memory\n"); 1786 } else { 1787 r = amdgpu_gart_table_vram_alloc(adev); 1788 if (r) 1789 return r; 1790 1791 if (amdgpu_gmc_is_pdb0_enabled(adev)) 1792 r = amdgpu_gmc_pdb0_alloc(adev); 1793 } 1794 1795 return r; 1796 } 1797 1798 /** 1799 * gmc_v9_0_save_registers - saves regs 1800 * 1801 * @adev: amdgpu_device pointer 1802 * 1803 * This saves potential register values that should be 1804 * restored upon resume 1805 */ 1806 static void gmc_v9_0_save_registers(struct amdgpu_device *adev) 1807 { 1808 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || 1809 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) 1810 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); 1811 } 1812 1813 static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) 1814 { 1815 static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; 1816 u32 vram_info; 1817 1818 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1819 adev->gmc.vram_width = 128 * 64; 1820 1821 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) 1822 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1823 1824 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) && 1825 adev->rev_id == 0x3) 1826 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1827 1828 if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) { 1829 vram_info = RREG32(regBIF_BIOS_SCRATCH_4); 1830 adev->gmc.vram_vendor = vram_info & 0xF; 1831 } 1832 } 1833 1834 static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) 1835 { 1836 int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits; 1837 struct amdgpu_device *adev = ip_block->adev; 1838 unsigned long inst_mask = adev->aid_mask; 1839 1840 adev->gfxhub.funcs->init(adev); 1841 1842 adev->mmhub.funcs->init(adev); 1843 1844 spin_lock_init(&adev->gmc.invalidate_lock); 1845 1846 if (amdgpu_is_multi_aid(adev)) { 1847 gmc_v9_4_3_init_vram_info(adev); 1848 } else if (!adev->bios) { 1849 if (adev->flags & AMD_IS_APU) { 1850 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; 1851 adev->gmc.vram_width = 64 * 64; 1852 } else { 1853 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1854 adev->gmc.vram_width = 128 * 64; 1855 } 1856 } else { 1857 r = amdgpu_atomfirmware_get_vram_info(adev, 1858 &vram_width, &vram_type, &vram_vendor); 1859 if (amdgpu_sriov_vf(adev)) 1860 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, 1861 * and DF related registers is not readable, seems hardcord is the 1862 * only way to set the correct vram_width 1863 */ 1864 adev->gmc.vram_width = 2048; 1865 else if (amdgpu_emu_mode != 1) 1866 adev->gmc.vram_width = vram_width; 1867 1868 if (!adev->gmc.vram_width) { 1869 int chansize, numchan; 1870 1871 /* hbm memory channel size */ 1872 if (adev->flags & AMD_IS_APU) 1873 chansize = 64; 1874 else 1875 chansize = 128; 1876 if (adev->df.funcs && 1877 adev->df.funcs->get_hbm_channel_number) { 1878 numchan = adev->df.funcs->get_hbm_channel_number(adev); 1879 adev->gmc.vram_width = numchan * chansize; 1880 } 1881 } 1882 1883 adev->gmc.vram_type = vram_type; 1884 adev->gmc.vram_vendor = vram_vendor; 1885 } 1886 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1887 case IP_VERSION(9, 1, 0): 1888 case IP_VERSION(9, 2, 2): 1889 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); 1890 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); 1891 1892 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { 1893 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1894 } else { 1895 /* vm_size is 128TB + 512GB for legacy 3-level page support */ 1896 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); 1897 adev->gmc.translate_further = 1898 adev->vm_manager.num_level > 1; 1899 } 1900 break; 1901 case IP_VERSION(9, 0, 1): 1902 case IP_VERSION(9, 2, 1): 1903 case IP_VERSION(9, 4, 0): 1904 case IP_VERSION(9, 3, 0): 1905 case IP_VERSION(9, 4, 2): 1906 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); 1907 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); 1908 1909 /* 1910 * To fulfill 4-level page support, 1911 * vm size is 256TB (48bit), maximum size of Vega10, 1912 * block size 512 (9bit) 1913 */ 1914 1915 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1916 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) 1917 adev->gmc.translate_further = adev->vm_manager.num_level > 1; 1918 break; 1919 case IP_VERSION(9, 4, 1): 1920 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); 1921 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); 1922 set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask); 1923 1924 /* Keep the vm size same with Vega20 */ 1925 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1926 adev->gmc.translate_further = adev->vm_manager.num_level > 1; 1927 break; 1928 case IP_VERSION(9, 4, 3): 1929 case IP_VERSION(9, 4, 4): 1930 case IP_VERSION(9, 5, 0): 1931 bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), 1932 NUM_XCC(adev->gfx.xcc_mask)); 1933 1934 inst_mask <<= AMDGPU_MMHUB0(0); 1935 bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32); 1936 1937 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1938 adev->gmc.translate_further = adev->vm_manager.num_level > 1; 1939 break; 1940 default: 1941 break; 1942 } 1943 1944 /* This interrupt is VMC page fault.*/ 1945 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, 1946 &adev->gmc.vm_fault); 1947 if (r) 1948 return r; 1949 1950 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) { 1951 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, 1952 &adev->gmc.vm_fault); 1953 if (r) 1954 return r; 1955 } 1956 1957 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, 1958 &adev->gmc.vm_fault); 1959 1960 if (r) 1961 return r; 1962 1963 if (!amdgpu_sriov_vf(adev) && 1964 !adev->gmc.xgmi.connected_to_cpu && 1965 !adev->gmc.is_app_apu) { 1966 /* interrupt sent to DF. */ 1967 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, 1968 &adev->gmc.ecc_irq); 1969 if (r) 1970 return r; 1971 } 1972 1973 /* Set the internal MC address mask 1974 * This is the max address of the GPU's 1975 * internal address space. 1976 */ 1977 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 1978 1979 dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >= 1980 IP_VERSION(9, 4, 2) ? 1981 48 : 1982 44; 1983 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits)); 1984 if (r) { 1985 drm_warn(adev_to_drm(adev), "No suitable DMA available.\n"); 1986 return r; 1987 } 1988 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits); 1989 1990 r = gmc_v9_0_mc_init(adev); 1991 if (r) 1992 return r; 1993 1994 amdgpu_gmc_get_vbios_allocations(adev); 1995 1996 if (amdgpu_is_multi_aid(adev)) { 1997 r = amdgpu_gmc_init_mem_ranges(adev); 1998 if (r) 1999 return r; 2000 } 2001 2002 /* Memory manager */ 2003 r = amdgpu_bo_init(adev); 2004 if (r) 2005 return r; 2006 2007 r = gmc_v9_0_gart_init(adev); 2008 if (r) 2009 return r; 2010 2011 gmc_v9_0_init_nps_details(adev); 2012 /* 2013 * number of VMs 2014 * VMID 0 is reserved for System 2015 * amdgpu graphics/compute will use VMIDs 1..n-1 2016 * amdkfd will use VMIDs n..15 2017 * 2018 * The first KFD VMID is 8 for GPUs with graphics, 3 for 2019 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs 2020 * for video processing. 2021 */ 2022 adev->vm_manager.first_kfd_vmid = 2023 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || 2024 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || 2025 amdgpu_is_multi_aid(adev)) ? 2026 3 : 2027 8; 2028 2029 amdgpu_vm_manager_init(adev); 2030 2031 gmc_v9_0_save_registers(adev); 2032 2033 r = amdgpu_gmc_ras_sw_init(adev); 2034 if (r) 2035 return r; 2036 2037 if (amdgpu_is_multi_aid(adev)) 2038 amdgpu_gmc_sysfs_init(adev); 2039 2040 return 0; 2041 } 2042 2043 static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block) 2044 { 2045 struct amdgpu_device *adev = ip_block->adev; 2046 2047 if (amdgpu_is_multi_aid(adev)) 2048 amdgpu_gmc_sysfs_fini(adev); 2049 2050 amdgpu_gmc_ras_fini(adev); 2051 amdgpu_gem_force_release(adev); 2052 amdgpu_vm_manager_fini(adev); 2053 if (!adev->gmc.real_vram_size) { 2054 dev_info(adev->dev, "Put GART in system memory for APU free\n"); 2055 amdgpu_gart_table_ram_free(adev); 2056 } else { 2057 amdgpu_gart_table_vram_free(adev); 2058 } 2059 amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0); 2060 amdgpu_bo_fini(adev); 2061 2062 adev->gmc.num_mem_partitions = 0; 2063 kfree(adev->gmc.mem_partitions); 2064 2065 return 0; 2066 } 2067 2068 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) 2069 { 2070 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 2071 case IP_VERSION(9, 0, 0): 2072 if (amdgpu_sriov_vf(adev)) 2073 break; 2074 fallthrough; 2075 case IP_VERSION(9, 4, 0): 2076 soc15_program_register_sequence(adev, 2077 golden_settings_mmhub_1_0_0, 2078 ARRAY_SIZE(golden_settings_mmhub_1_0_0)); 2079 soc15_program_register_sequence(adev, 2080 golden_settings_athub_1_0_0, 2081 ARRAY_SIZE(golden_settings_athub_1_0_0)); 2082 break; 2083 case IP_VERSION(9, 1, 0): 2084 case IP_VERSION(9, 2, 0): 2085 /* TODO for renoir */ 2086 soc15_program_register_sequence(adev, 2087 golden_settings_athub_1_0_0, 2088 ARRAY_SIZE(golden_settings_athub_1_0_0)); 2089 break; 2090 default: 2091 break; 2092 } 2093 } 2094 2095 /** 2096 * gmc_v9_0_restore_registers - restores regs 2097 * 2098 * @adev: amdgpu_device pointer 2099 * 2100 * This restores register values, saved at suspend. 2101 */ 2102 void gmc_v9_0_restore_registers(struct amdgpu_device *adev) 2103 { 2104 if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || 2105 (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) { 2106 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); 2107 WARN_ON(adev->gmc.sdpif_register != 2108 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0)); 2109 } 2110 } 2111 2112 /** 2113 * gmc_v9_0_gart_enable - gart enable 2114 * 2115 * @adev: amdgpu_device pointer 2116 */ 2117 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) 2118 { 2119 int r; 2120 2121 if (amdgpu_gmc_is_pdb0_enabled(adev)) 2122 amdgpu_gmc_init_pdb0(adev); 2123 2124 if (adev->gart.bo == NULL) { 2125 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 2126 return -EINVAL; 2127 } 2128 2129 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); 2130 2131 if (!adev->in_s0ix) { 2132 r = adev->gfxhub.funcs->gart_enable(adev); 2133 if (r) 2134 return r; 2135 } 2136 2137 r = adev->mmhub.funcs->gart_enable(adev); 2138 if (r) 2139 return r; 2140 2141 drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled.\n", 2142 (unsigned int)(adev->gmc.gart_size >> 20)); 2143 if (adev->gmc.pdb0_bo) 2144 drm_info(adev_to_drm(adev), "PDB0 located at 0x%016llX\n", 2145 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo)); 2146 drm_info(adev_to_drm(adev), "PTB located at 0x%016llX\n", 2147 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 2148 2149 return 0; 2150 } 2151 2152 static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block) 2153 { 2154 struct amdgpu_device *adev = ip_block->adev; 2155 bool value; 2156 int i, r; 2157 2158 adev->gmc.flush_pasid_uses_kiq = true; 2159 2160 /* Vega20+XGMI caches PTEs in TC and TLB. Add a heavy-weight TLB flush 2161 * (type 2), which flushes both. Due to a race condition with 2162 * concurrent memory accesses using the same TLB cache line, we still 2163 * need a second TLB flush after this. 2164 */ 2165 adev->gmc.flush_tlb_needs_extra_type_2 = 2166 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) && 2167 adev->gmc.xgmi.num_physical_nodes; 2168 2169 /* The sequence of these two function calls matters.*/ 2170 gmc_v9_0_init_golden_registers(adev); 2171 2172 if (adev->mode_info.num_crtc) { 2173 /* Lockout access through VGA aperture*/ 2174 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 2175 /* disable VGA render */ 2176 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 2177 } 2178 2179 if (adev->mmhub.funcs->update_power_gating) 2180 adev->mmhub.funcs->update_power_gating(adev, true); 2181 2182 adev->hdp.funcs->init_registers(adev); 2183 2184 /* After HDP is initialized, flush HDP.*/ 2185 amdgpu_device_flush_hdp(adev, NULL); 2186 2187 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 2188 value = false; 2189 else 2190 value = true; 2191 2192 if (!amdgpu_sriov_vf(adev)) { 2193 if (!adev->in_s0ix) 2194 adev->gfxhub.funcs->set_fault_enable_default(adev, value); 2195 adev->mmhub.funcs->set_fault_enable_default(adev, value); 2196 } 2197 for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { 2198 if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0))) 2199 continue; 2200 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); 2201 } 2202 2203 if (adev->umc.funcs && adev->umc.funcs->init_registers) 2204 adev->umc.funcs->init_registers(adev); 2205 2206 r = gmc_v9_0_gart_enable(adev); 2207 if (r) 2208 return r; 2209 2210 if (amdgpu_emu_mode == 1) 2211 return amdgpu_gmc_vram_checking(adev); 2212 2213 return 0; 2214 } 2215 2216 /** 2217 * gmc_v9_0_gart_disable - gart disable 2218 * 2219 * @adev: amdgpu_device pointer 2220 * 2221 * This disables all VM page table. 2222 */ 2223 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) 2224 { 2225 if (!adev->in_s0ix) 2226 adev->gfxhub.funcs->gart_disable(adev); 2227 adev->mmhub.funcs->gart_disable(adev); 2228 } 2229 2230 static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block) 2231 { 2232 struct amdgpu_device *adev = ip_block->adev; 2233 2234 gmc_v9_0_gart_disable(adev); 2235 2236 if (amdgpu_sriov_vf(adev)) { 2237 /* full access mode, so don't touch any GMC register */ 2238 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 2239 return 0; 2240 } 2241 2242 /* 2243 * Pair the operations did in gmc_v9_0_hw_init and thus maintain 2244 * a correct cached state for GMC. Otherwise, the "gate" again 2245 * operation on S3 resuming will fail due to wrong cached state. 2246 */ 2247 if (adev->mmhub.funcs->update_power_gating) 2248 adev->mmhub.funcs->update_power_gating(adev, false); 2249 2250 /* 2251 * For minimal init, late_init is not called, hence VM fault/RAS irqs 2252 * are not enabled. 2253 */ 2254 if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) { 2255 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 2256 2257 if (adev->gmc.ecc_irq.funcs && 2258 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) 2259 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); 2260 } 2261 2262 return 0; 2263 } 2264 2265 static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block) 2266 { 2267 return gmc_v9_0_hw_fini(ip_block); 2268 } 2269 2270 static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block) 2271 { 2272 struct amdgpu_device *adev = ip_block->adev; 2273 int r; 2274 2275 /* If a reset is done for NPS mode switch, read the memory range 2276 * information again. 2277 */ 2278 if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) { 2279 amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); 2280 adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS; 2281 } 2282 2283 r = gmc_v9_0_hw_init(ip_block); 2284 if (r) 2285 return r; 2286 2287 amdgpu_vmid_reset_all(ip_block->adev); 2288 2289 return 0; 2290 } 2291 2292 static bool gmc_v9_0_is_idle(struct amdgpu_ip_block *ip_block) 2293 { 2294 /* MC is always ready in GMC v9.*/ 2295 return true; 2296 } 2297 2298 static int gmc_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 2299 { 2300 /* There is no need to wait for MC idle in GMC v9.*/ 2301 return 0; 2302 } 2303 2304 static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block) 2305 { 2306 /* XXX for emulation.*/ 2307 return 0; 2308 } 2309 2310 static int gmc_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 2311 enum amd_clockgating_state state) 2312 { 2313 struct amdgpu_device *adev = ip_block->adev; 2314 2315 adev->mmhub.funcs->set_clockgating(adev, state); 2316 2317 athub_v1_0_set_clockgating(adev, state); 2318 2319 return 0; 2320 } 2321 2322 static void gmc_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) 2323 { 2324 struct amdgpu_device *adev = ip_block->adev; 2325 2326 adev->mmhub.funcs->get_clockgating(adev, flags); 2327 2328 athub_v1_0_get_clockgating(adev, flags); 2329 } 2330 2331 static int gmc_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 2332 enum amd_powergating_state state) 2333 { 2334 return 0; 2335 } 2336 2337 const struct amd_ip_funcs gmc_v9_0_ip_funcs = { 2338 .name = "gmc_v9_0", 2339 .early_init = gmc_v9_0_early_init, 2340 .late_init = gmc_v9_0_late_init, 2341 .sw_init = gmc_v9_0_sw_init, 2342 .sw_fini = gmc_v9_0_sw_fini, 2343 .hw_init = gmc_v9_0_hw_init, 2344 .hw_fini = gmc_v9_0_hw_fini, 2345 .suspend = gmc_v9_0_suspend, 2346 .resume = gmc_v9_0_resume, 2347 .is_idle = gmc_v9_0_is_idle, 2348 .wait_for_idle = gmc_v9_0_wait_for_idle, 2349 .soft_reset = gmc_v9_0_soft_reset, 2350 .set_clockgating_state = gmc_v9_0_set_clockgating_state, 2351 .set_powergating_state = gmc_v9_0_set_powergating_state, 2352 .get_clockgating_state = gmc_v9_0_get_clockgating_state, 2353 }; 2354 2355 const struct amdgpu_ip_block_version gmc_v9_0_ip_block = { 2356 .type = AMD_IP_BLOCK_TYPE_GMC, 2357 .major = 9, 2358 .minor = 0, 2359 .rev = 0, 2360 .funcs = &gmc_v9_0_ip_funcs, 2361 }; 2362