1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "mmhub_v1_8.h" 25 26 #include "mmhub/mmhub_1_8_0_offset.h" 27 #include "mmhub/mmhub_1_8_0_sh_mask.h" 28 #include "vega10_enum.h" 29 30 #include "soc15_common.h" 31 #include "soc15.h" 32 #include "amdgpu_ras.h" 33 34 #define regVM_L2_CNTL3_DEFAULT 0x80100007 35 #define regVM_L2_CNTL4_DEFAULT 0x000000c1 36 #define mmSMNAID_AID0_MCA_SMU 0x03b30400 37 38 static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev) 39 { 40 u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE); 41 u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP); 42 43 base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK; 44 base <<= 24; 45 46 top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK; 47 top <<= 24; 48 49 adev->gmc.fb_start = base; 50 adev->gmc.fb_end = top; 51 52 return base; 53 } 54 55 static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, 56 uint64_t page_table_base) 57 { 58 struct amdgpu_vmhub *hub; 59 u32 inst_mask; 60 int i; 61 62 inst_mask = adev->aid_mask; 63 for_each_inst(i, inst_mask) { 64 hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; 65 WREG32_SOC15_OFFSET(MMHUB, i, 66 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, 67 hub->ctx_addr_distance * vmid, 68 lower_32_bits(page_table_base)); 69 70 WREG32_SOC15_OFFSET(MMHUB, i, 71 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, 72 hub->ctx_addr_distance * vmid, 73 upper_32_bits(page_table_base)); 74 } 75 } 76 77 static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) 78 { 79 uint64_t pt_base; 80 u32 inst_mask; 81 int i; 82 83 if (adev->gmc.pdb0_bo) 84 pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); 85 else 86 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 87 88 mmhub_v1_8_setup_vm_pt_regs(adev, 0, pt_base); 89 90 /* If use GART for FB translation, vmid0 page table covers both 91 * vram and system memory (gart) 92 */ 93 inst_mask = adev->aid_mask; 94 for_each_inst(i, inst_mask) { 95 if (adev->gmc.pdb0_bo) { 96 WREG32_SOC15(MMHUB, i, 97 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 98 (u32)(adev->gmc.fb_start >> 12)); 99 WREG32_SOC15(MMHUB, i, 100 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 101 (u32)(adev->gmc.fb_start >> 44)); 102 103 WREG32_SOC15(MMHUB, i, 104 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 105 (u32)(adev->gmc.gart_end >> 12)); 106 WREG32_SOC15(MMHUB, i, 107 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 108 (u32)(adev->gmc.gart_end >> 44)); 109 110 } else { 111 WREG32_SOC15(MMHUB, i, 112 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 113 (u32)(adev->gmc.gart_start >> 12)); 114 WREG32_SOC15(MMHUB, i, 115 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 116 (u32)(adev->gmc.gart_start >> 44)); 117 118 WREG32_SOC15(MMHUB, i, 119 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 120 (u32)(adev->gmc.gart_end >> 12)); 121 WREG32_SOC15(MMHUB, i, 122 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 123 (u32)(adev->gmc.gart_end >> 44)); 124 } 125 } 126 } 127 128 static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev) 129 { 130 uint32_t tmp, inst_mask; 131 uint64_t value; 132 int i; 133 134 if (amdgpu_sriov_vf(adev)) 135 return; 136 137 inst_mask = adev->aid_mask; 138 for_each_inst(i, inst_mask) { 139 /* Program the AGP BAR */ 140 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0); 141 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 142 adev->gmc.agp_start >> 24); 143 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 144 adev->gmc.agp_end >> 24); 145 146 /* Program the system aperture low logical page number. */ 147 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 148 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); 149 150 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 151 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 152 153 /* In the case squeezing vram into GART aperture, we don't use 154 * FB aperture and AGP aperture. Disable them. 155 */ 156 if (adev->gmc.pdb0_bo) { 157 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 0xFFFFFF); 158 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 0); 159 WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_TOP, 0); 160 WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_BASE, 161 0x00FFFFFF); 162 WREG32_SOC15(MMHUB, i, 163 regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 164 0x3FFFFFFF); 165 WREG32_SOC15(MMHUB, i, 166 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); 167 } 168 169 /* Set default page address. */ 170 value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 171 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 172 (u32)(value >> 12)); 173 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 174 (u32)(value >> 44)); 175 176 /* Program "protection fault". */ 177 WREG32_SOC15(MMHUB, i, 178 regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 179 (u32)(adev->dummy_page_addr >> 12)); 180 WREG32_SOC15(MMHUB, i, 181 regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 182 (u32)((u64)adev->dummy_page_addr >> 44)); 183 184 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2); 185 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, 186 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 187 WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp); 188 } 189 } 190 191 static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) 192 { 193 uint32_t tmp, inst_mask; 194 int i; 195 196 /* Setup TLB control */ 197 inst_mask = adev->aid_mask; 198 for_each_inst(i, inst_mask) { 199 tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL); 200 201 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 202 1); 203 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 204 SYSTEM_ACCESS_MODE, 3); 205 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 206 ENABLE_ADVANCED_DRIVER_MODEL, 1); 207 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 208 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 209 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 210 MTYPE, MTYPE_UC);/* XXX for emulation. */ 211 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); 212 213 WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp); 214 } 215 } 216 217 static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) 218 { 219 uint32_t tmp, inst_mask; 220 int i; 221 222 if (amdgpu_sriov_vf(adev)) 223 return; 224 225 /* Setup L2 cache */ 226 inst_mask = adev->aid_mask; 227 for_each_inst(i, inst_mask) { 228 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL); 229 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); 230 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 231 ENABLE_L2_FRAGMENT_PROCESSING, 1); 232 /* XXX for emulation, Refer to closed source code.*/ 233 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 234 L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); 235 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 236 0); 237 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 238 CONTEXT1_IDENTITY_ACCESS_MODE, 1); 239 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 240 IDENTITY_MODE_FRAGMENT_SIZE, 0); 241 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL, tmp); 242 243 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL2); 244 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 245 1); 246 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 247 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL2, tmp); 248 249 tmp = regVM_L2_CNTL3_DEFAULT; 250 if (adev->gmc.translate_further) { 251 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); 252 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 253 L2_CACHE_BIGK_FRAGMENT_SIZE, 9); 254 } else { 255 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); 256 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 257 L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 258 } 259 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp); 260 261 tmp = regVM_L2_CNTL4_DEFAULT; 262 /* For AMD APP APUs setup WC memory */ 263 if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) { 264 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 265 VMC_TAP_PDE_REQUEST_PHYSICAL, 1); 266 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 267 VMC_TAP_PTE_REQUEST_PHYSICAL, 1); 268 } else { 269 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 270 VMC_TAP_PDE_REQUEST_PHYSICAL, 0); 271 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 272 VMC_TAP_PTE_REQUEST_PHYSICAL, 0); 273 } 274 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp); 275 } 276 } 277 278 static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev) 279 { 280 uint32_t tmp, inst_mask; 281 int i; 282 283 inst_mask = adev->aid_mask; 284 for_each_inst(i, inst_mask) { 285 tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL); 286 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 287 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 288 adev->gmc.vmid0_page_table_depth); 289 tmp = REG_SET_FIELD(tmp, 290 VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE, 291 adev->gmc.vmid0_page_table_block_size); 292 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, 293 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); 294 WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL, tmp); 295 } 296 } 297 298 static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev) 299 { 300 u32 inst_mask; 301 int i; 302 303 if (amdgpu_sriov_vf(adev)) 304 return; 305 306 inst_mask = adev->aid_mask; 307 for_each_inst(i, inst_mask) { 308 WREG32_SOC15(MMHUB, i, 309 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 310 0XFFFFFFFF); 311 WREG32_SOC15(MMHUB, i, 312 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 313 0x0000000F); 314 315 WREG32_SOC15(MMHUB, i, 316 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 317 0); 318 WREG32_SOC15(MMHUB, i, 319 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 320 0); 321 322 WREG32_SOC15(MMHUB, i, 323 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); 324 WREG32_SOC15(MMHUB, i, 325 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); 326 } 327 } 328 329 static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) 330 { 331 struct amdgpu_vmhub *hub; 332 unsigned int num_level, block_size; 333 uint32_t tmp, inst_mask; 334 int i, j; 335 336 num_level = adev->vm_manager.num_level; 337 block_size = adev->vm_manager.block_size; 338 if (adev->gmc.translate_further) 339 num_level -= 1; 340 else 341 block_size -= 9; 342 343 inst_mask = adev->aid_mask; 344 for_each_inst(j, inst_mask) { 345 hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; 346 for (i = 0; i <= 14; i++) { 347 tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL, 348 i * hub->ctx_distance); 349 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 350 ENABLE_CONTEXT, 1); 351 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 352 PAGE_TABLE_DEPTH, num_level); 353 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 354 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 355 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 356 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 357 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 358 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 359 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 360 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 361 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 362 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 363 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 364 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 365 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 366 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 367 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 368 PAGE_TABLE_BLOCK_SIZE, 369 block_size); 370 /* On 9.4.3, XNACK can be enabled in the SQ 371 * per-process. Retry faults need to be enabled for 372 * that to work. 373 */ 374 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 375 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1); 376 WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL, 377 i * hub->ctx_distance, tmp); 378 WREG32_SOC15_OFFSET(MMHUB, j, 379 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, 380 i * hub->ctx_addr_distance, 0); 381 WREG32_SOC15_OFFSET(MMHUB, j, 382 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, 383 i * hub->ctx_addr_distance, 0); 384 WREG32_SOC15_OFFSET(MMHUB, j, 385 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, 386 i * hub->ctx_addr_distance, 387 lower_32_bits(adev->vm_manager.max_pfn - 1)); 388 WREG32_SOC15_OFFSET(MMHUB, j, 389 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, 390 i * hub->ctx_addr_distance, 391 upper_32_bits(adev->vm_manager.max_pfn - 1)); 392 } 393 } 394 } 395 396 static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev) 397 { 398 struct amdgpu_vmhub *hub; 399 u32 i, j, inst_mask; 400 401 inst_mask = adev->aid_mask; 402 for_each_inst(j, inst_mask) { 403 hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; 404 for (i = 0; i < 18; ++i) { 405 WREG32_SOC15_OFFSET(MMHUB, j, 406 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, 407 i * hub->eng_addr_distance, 0xffffffff); 408 WREG32_SOC15_OFFSET(MMHUB, j, 409 regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, 410 i * hub->eng_addr_distance, 0x1f); 411 } 412 } 413 } 414 415 static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) 416 { 417 /* GART Enable. */ 418 mmhub_v1_8_init_gart_aperture_regs(adev); 419 mmhub_v1_8_init_system_aperture_regs(adev); 420 mmhub_v1_8_init_tlb_regs(adev); 421 mmhub_v1_8_init_cache_regs(adev); 422 423 mmhub_v1_8_enable_system_domain(adev); 424 mmhub_v1_8_disable_identity_aperture(adev); 425 mmhub_v1_8_setup_vmid_config(adev); 426 mmhub_v1_8_program_invalidation(adev); 427 428 return 0; 429 } 430 431 static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) 432 { 433 struct amdgpu_vmhub *hub; 434 u32 tmp; 435 u32 i, j, inst_mask; 436 437 /* Disable all tables */ 438 inst_mask = adev->aid_mask; 439 for_each_inst(j, inst_mask) { 440 hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; 441 for (i = 0; i < 16; i++) 442 WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL, 443 i * hub->ctx_distance, 0); 444 445 /* Setup TLB control */ 446 tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL); 447 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 448 0); 449 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 450 ENABLE_ADVANCED_DRIVER_MODEL, 0); 451 WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp); 452 453 if (!amdgpu_sriov_vf(adev)) { 454 /* Setup L2 cache */ 455 tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL); 456 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 457 0); 458 WREG32_SOC15(MMHUB, j, regVM_L2_CNTL, tmp); 459 WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0); 460 } 461 } 462 } 463 464 /** 465 * mmhub_v1_8_set_fault_enable_default - update GART/VM fault handling 466 * 467 * @adev: amdgpu_device pointer 468 * @value: true redirects VM faults to the default page 469 */ 470 static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value) 471 { 472 u32 tmp, inst_mask; 473 int i; 474 475 if (amdgpu_sriov_vf(adev)) 476 return; 477 478 inst_mask = adev->aid_mask; 479 for_each_inst(i, inst_mask) { 480 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL); 481 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 482 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 483 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 484 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 485 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 486 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); 487 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 488 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); 489 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 490 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, 491 value); 492 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 493 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); 494 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 495 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 496 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 497 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 498 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 499 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 500 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 501 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 502 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 503 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 504 if (!value) { 505 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 506 CRASH_ON_NO_RETRY_FAULT, 1); 507 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 508 CRASH_ON_RETRY_FAULT, 1); 509 } 510 511 WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp); 512 } 513 } 514 515 static void mmhub_v1_8_init(struct amdgpu_device *adev) 516 { 517 struct amdgpu_vmhub *hub; 518 u32 inst_mask; 519 int i; 520 521 inst_mask = adev->aid_mask; 522 for_each_inst(i, inst_mask) { 523 hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; 524 525 hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i, 526 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); 527 hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, i, 528 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); 529 hub->vm_inv_eng0_req = 530 SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_REQ); 531 hub->vm_inv_eng0_ack = 532 SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_ACK); 533 hub->vm_context0_cntl = 534 SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_CNTL); 535 hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, i, 536 regVM_L2_PROTECTION_FAULT_STATUS); 537 hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, i, 538 regVM_L2_PROTECTION_FAULT_CNTL); 539 540 hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL; 541 hub->ctx_addr_distance = 542 regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - 543 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 544 hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - 545 regVM_INVALIDATE_ENG0_REQ; 546 hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - 547 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; 548 } 549 } 550 551 static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev, 552 enum amd_clockgating_state state) 553 { 554 return 0; 555 } 556 557 static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags) 558 { 559 560 } 561 562 static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev, 563 int hub_inst) 564 { 565 u32 fed, status; 566 567 status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS); 568 fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); 569 /* reset page fault status */ 570 WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst, 571 regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); 572 573 return fed; 574 } 575 576 const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = { 577 .get_fb_location = mmhub_v1_8_get_fb_location, 578 .init = mmhub_v1_8_init, 579 .gart_enable = mmhub_v1_8_gart_enable, 580 .set_fault_enable_default = mmhub_v1_8_set_fault_enable_default, 581 .gart_disable = mmhub_v1_8_gart_disable, 582 .setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs, 583 .set_clockgating = mmhub_v1_8_set_clockgating, 584 .get_clockgating = mmhub_v1_8_get_clockgating, 585 .query_utcl2_poison_status = mmhub_v1_8_query_utcl2_poison_status, 586 }; 587 588 static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = { 589 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_CE_ERR_STATUS_LO, regMMEA0_CE_ERR_STATUS_HI), 590 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"}, 591 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_CE_ERR_STATUS_LO, regMMEA1_CE_ERR_STATUS_HI), 592 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"}, 593 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_CE_ERR_STATUS_LO, regMMEA2_CE_ERR_STATUS_HI), 594 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"}, 595 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_CE_ERR_STATUS_LO, regMMEA3_CE_ERR_STATUS_HI), 596 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"}, 597 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_CE_ERR_STATUS_LO, regMMEA4_CE_ERR_STATUS_HI), 598 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"}, 599 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_CE_ERR_STATUS_LO, regMM_CANE_CE_ERR_STATUS_HI), 600 1, 0, "MM_CANE"}, 601 }; 602 603 static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ue_reg_list[] = { 604 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_UE_ERR_STATUS_LO, regMMEA0_UE_ERR_STATUS_HI), 605 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"}, 606 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_UE_ERR_STATUS_LO, regMMEA1_UE_ERR_STATUS_HI), 607 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"}, 608 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_UE_ERR_STATUS_LO, regMMEA2_UE_ERR_STATUS_HI), 609 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"}, 610 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_UE_ERR_STATUS_LO, regMMEA3_UE_ERR_STATUS_HI), 611 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"}, 612 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_UE_ERR_STATUS_LO, regMMEA4_UE_ERR_STATUS_HI), 613 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"}, 614 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_UE_ERR_STATUS_LO, regMM_CANE_UE_ERR_STATUS_HI), 615 1, 0, "MM_CANE"}, 616 }; 617 618 static const struct amdgpu_ras_memory_id_entry mmhub_v1_8_ras_memory_list[] = { 619 {AMDGPU_MMHUB_WGMI_PAGEMEM, "MMEA_WGMI_PAGEMEM"}, 620 {AMDGPU_MMHUB_RGMI_PAGEMEM, "MMEA_RGMI_PAGEMEM"}, 621 {AMDGPU_MMHUB_WDRAM_PAGEMEM, "MMEA_WDRAM_PAGEMEM"}, 622 {AMDGPU_MMHUB_RDRAM_PAGEMEM, "MMEA_RDRAM_PAGEMEM"}, 623 {AMDGPU_MMHUB_WIO_CMDMEM, "MMEA_WIO_CMDMEM"}, 624 {AMDGPU_MMHUB_RIO_CMDMEM, "MMEA_RIO_CMDMEM"}, 625 {AMDGPU_MMHUB_WGMI_CMDMEM, "MMEA_WGMI_CMDMEM"}, 626 {AMDGPU_MMHUB_RGMI_CMDMEM, "MMEA_RGMI_CMDMEM"}, 627 {AMDGPU_MMHUB_WDRAM_CMDMEM, "MMEA_WDRAM_CMDMEM"}, 628 {AMDGPU_MMHUB_RDRAM_CMDMEM, "MMEA_RDRAM_CMDMEM"}, 629 {AMDGPU_MMHUB_MAM_DMEM0, "MMEA_MAM_DMEM0"}, 630 {AMDGPU_MMHUB_MAM_DMEM1, "MMEA_MAM_DMEM1"}, 631 {AMDGPU_MMHUB_MAM_DMEM2, "MMEA_MAM_DMEM2"}, 632 {AMDGPU_MMHUB_MAM_DMEM3, "MMEA_MAM_DMEM3"}, 633 {AMDGPU_MMHUB_WRET_TAGMEM, "MMEA_WRET_TAGMEM"}, 634 {AMDGPU_MMHUB_RRET_TAGMEM, "MMEA_RRET_TAGMEM"}, 635 {AMDGPU_MMHUB_WIO_DATAMEM, "MMEA_WIO_DATAMEM"}, 636 {AMDGPU_MMHUB_WGMI_DATAMEM, "MMEA_WGMI_DATAMEM"}, 637 {AMDGPU_MMHUB_WDRAM_DATAMEM, "MMEA_WDRAM_DATAMEM"}, 638 }; 639 640 static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev, 641 uint32_t mmhub_inst, 642 void *ras_err_status) 643 { 644 struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status; 645 unsigned long ue_count = 0, ce_count = 0; 646 647 /* NOTE: mmhub is converted by aid_mask and the range is 0-3, 648 * which can be used as die ID directly */ 649 struct amdgpu_smuio_mcm_config_info mcm_info = { 650 .socket_id = adev->smuio.funcs->get_socket_id(adev), 651 .die_id = mmhub_inst, 652 }; 653 654 amdgpu_ras_inst_query_ras_error_count(adev, 655 mmhub_v1_8_ce_reg_list, 656 ARRAY_SIZE(mmhub_v1_8_ce_reg_list), 657 mmhub_v1_8_ras_memory_list, 658 ARRAY_SIZE(mmhub_v1_8_ras_memory_list), 659 mmhub_inst, 660 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 661 &ce_count); 662 amdgpu_ras_inst_query_ras_error_count(adev, 663 mmhub_v1_8_ue_reg_list, 664 ARRAY_SIZE(mmhub_v1_8_ue_reg_list), 665 mmhub_v1_8_ras_memory_list, 666 ARRAY_SIZE(mmhub_v1_8_ras_memory_list), 667 mmhub_inst, 668 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 669 &ue_count); 670 671 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); 672 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); 673 } 674 675 static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev, 676 void *ras_err_status) 677 { 678 uint32_t inst_mask; 679 uint32_t i; 680 681 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { 682 dev_warn(adev->dev, "MMHUB RAS is not supported\n"); 683 return; 684 } 685 686 inst_mask = adev->aid_mask; 687 for_each_inst(i, inst_mask) 688 mmhub_v1_8_inst_query_ras_error_count(adev, i, ras_err_status); 689 } 690 691 static void mmhub_v1_8_inst_reset_ras_error_count(struct amdgpu_device *adev, 692 uint32_t mmhub_inst) 693 { 694 amdgpu_ras_inst_reset_ras_error_count(adev, 695 mmhub_v1_8_ce_reg_list, 696 ARRAY_SIZE(mmhub_v1_8_ce_reg_list), 697 mmhub_inst); 698 amdgpu_ras_inst_reset_ras_error_count(adev, 699 mmhub_v1_8_ue_reg_list, 700 ARRAY_SIZE(mmhub_v1_8_ue_reg_list), 701 mmhub_inst); 702 } 703 704 static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev) 705 { 706 uint32_t inst_mask; 707 uint32_t i; 708 709 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { 710 dev_warn(adev->dev, "MMHUB RAS is not supported\n"); 711 return; 712 } 713 714 inst_mask = adev->aid_mask; 715 for_each_inst(i, inst_mask) 716 mmhub_v1_8_inst_reset_ras_error_count(adev, i); 717 } 718 719 static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = { 720 .query_ras_error_count = mmhub_v1_8_query_ras_error_count, 721 .reset_ras_error_count = mmhub_v1_8_reset_ras_error_count, 722 }; 723 724 static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, 725 enum aca_smu_type type, void *data) 726 { 727 struct aca_bank_info info; 728 u64 misc0; 729 int ret; 730 731 ret = aca_bank_info_decode(bank, &info); 732 if (ret) 733 return ret; 734 735 misc0 = bank->regs[ACA_REG_IDX_MISC0]; 736 switch (type) { 737 case ACA_SMU_TYPE_UE: 738 ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, 739 1ULL); 740 break; 741 case ACA_SMU_TYPE_CE: 742 ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, 743 ACA_REG__MISC0__ERRCNT(misc0)); 744 break; 745 default: 746 return -EINVAL; 747 } 748 749 return ret; 750 } 751 752 /* reference to smu driver if header file */ 753 static int mmhub_v1_8_err_codes[] = { 754 0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */ 755 5, 6, 7, 8, 9, /* CODE_EA0 - 4 */ 756 10, /* CODE_UTCL2_ROUTER */ 757 11, /* CODE_VML2 */ 758 12, /* CODE_VML2_WALKER */ 759 13, /* CODE_MMCANE */ 760 }; 761 762 static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, 763 enum aca_smu_type type, void *data) 764 { 765 u32 instlo; 766 767 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 768 instlo &= GENMASK(31, 1); 769 770 if (instlo != mmSMNAID_AID0_MCA_SMU) 771 return false; 772 773 if (aca_bank_check_error_codes(handle->adev, bank, 774 mmhub_v1_8_err_codes, 775 ARRAY_SIZE(mmhub_v1_8_err_codes))) 776 return false; 777 778 return true; 779 } 780 781 static const struct aca_bank_ops mmhub_v1_8_aca_bank_ops = { 782 .aca_bank_parser = mmhub_v1_8_aca_bank_parser, 783 .aca_bank_is_valid = mmhub_v1_8_aca_bank_is_valid, 784 }; 785 786 static const struct aca_info mmhub_v1_8_aca_info = { 787 .hwip = ACA_HWIP_TYPE_SMU, 788 .mask = ACA_ERROR_UE_MASK, 789 .bank_ops = &mmhub_v1_8_aca_bank_ops, 790 }; 791 792 static int mmhub_v1_8_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 793 { 794 int r; 795 796 r = amdgpu_ras_block_late_init(adev, ras_block); 797 if (r) 798 return r; 799 800 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__MMHUB, 801 &mmhub_v1_8_aca_info, NULL); 802 if (r) 803 goto late_fini; 804 805 return 0; 806 807 late_fini: 808 amdgpu_ras_block_late_fini(adev, ras_block); 809 810 return r; 811 } 812 813 struct amdgpu_mmhub_ras mmhub_v1_8_ras = { 814 .ras_block = { 815 .hw_ops = &mmhub_v1_8_ras_hw_ops, 816 .ras_late_init = mmhub_v1_8_ras_late_init, 817 }, 818 }; 819