1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/pci.h> 25 26 #include <drm/drm_cache.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atomfirmware.h" 30 #include "gmc_v11_0.h" 31 #include "umc_v8_10.h" 32 #include "athub/athub_3_0_0_sh_mask.h" 33 #include "athub/athub_3_0_0_offset.h" 34 #include "dcn/dcn_3_2_0_offset.h" 35 #include "dcn/dcn_3_2_0_sh_mask.h" 36 #include "oss/osssys_6_0_0_offset.h" 37 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 38 #include "navi10_enum.h" 39 #include "soc15.h" 40 #include "soc15d.h" 41 #include "soc15_common.h" 42 #include "nbio_v4_3.h" 43 #include "gfxhub_v3_0.h" 44 #include "gfxhub_v3_0_3.h" 45 #include "gfxhub_v11_5_0.h" 46 #include "mmhub_v3_0.h" 47 #include "mmhub_v3_0_1.h" 48 #include "mmhub_v3_0_2.h" 49 #include "mmhub_v3_3.h" 50 #include "athub_v3_0.h" 51 52 53 static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev, 54 struct amdgpu_irq_src *src, 55 unsigned int type, 56 enum amdgpu_interrupt_state state) 57 { 58 return 0; 59 } 60 61 static int 62 gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 63 struct amdgpu_irq_src *src, unsigned int type, 64 enum amdgpu_interrupt_state state) 65 { 66 switch (state) { 67 case AMDGPU_IRQ_STATE_DISABLE: 68 /* MM HUB */ 69 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); 70 /* GFX HUB */ 71 /* This works because this interrupt is only 72 * enabled at init/resume and disabled in 73 * fini/suspend, so the overall state doesn't 74 * change over the course of suspend/resume. 75 */ 76 if (!adev->in_s0ix && (adev->in_runpm || adev->in_suspend || 77 amdgpu_in_reset(adev))) 78 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); 79 break; 80 case AMDGPU_IRQ_STATE_ENABLE: 81 /* MM HUB */ 82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); 83 /* GFX HUB */ 84 /* This works because this interrupt is only 85 * enabled at init/resume and disabled in 86 * fini/suspend, so the overall state doesn't 87 * change over the course of suspend/resume. 88 */ 89 if (!adev->in_s0ix) 90 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); 91 break; 92 default: 93 break; 94 } 95 96 return 0; 97 } 98 99 static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, 100 struct amdgpu_irq_src *source, 101 struct amdgpu_iv_entry *entry) 102 { 103 uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ? 104 AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0); 105 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index]; 106 uint32_t status = 0; 107 u64 addr; 108 109 addr = (u64)entry->src_data[0] << 12; 110 addr |= ((u64)entry->src_data[1] & 0xf) << 44; 111 112 if (!amdgpu_sriov_vf(adev)) { 113 /* 114 * Issue a dummy read to wait for the status register to 115 * be updated to avoid reading an incorrect value due to 116 * the new fast GRBM interface. 117 */ 118 if (entry->vmid_src == AMDGPU_GFXHUB(0)) 119 RREG32(hub->vm_l2_pro_fault_status); 120 121 status = RREG32(hub->vm_l2_pro_fault_status); 122 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); 123 124 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, 125 entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0)); 126 } 127 128 if (printk_ratelimit()) { 129 struct amdgpu_task_info *task_info; 130 131 dev_err(adev->dev, 132 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", 133 entry->vmid_src ? "mmhub" : "gfxhub", 134 entry->src_id, entry->ring_id, entry->vmid, entry->pasid); 135 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); 136 if (task_info) { 137 dev_err(adev->dev, 138 " in process %s pid %d thread %s pid %d)\n", 139 task_info->process_name, task_info->tgid, 140 task_info->task_name, task_info->pid); 141 amdgpu_vm_put_task_info(task_info); 142 } 143 144 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", 145 addr, entry->client_id); 146 147 if (!amdgpu_sriov_vf(adev)) 148 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); 149 } 150 151 return 0; 152 } 153 154 static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = { 155 .set = gmc_v11_0_vm_fault_interrupt_state, 156 .process = gmc_v11_0_process_interrupt, 157 }; 158 159 static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = { 160 .set = gmc_v11_0_ecc_interrupt_state, 161 .process = amdgpu_umc_process_ecc_irq, 162 }; 163 164 static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev) 165 { 166 adev->gmc.vm_fault.num_types = 1; 167 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs; 168 169 if (!amdgpu_sriov_vf(adev)) { 170 adev->gmc.ecc_irq.num_types = 1; 171 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs; 172 } 173 } 174 175 /** 176 * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore 177 * 178 * @adev: amdgpu_device pointer 179 * @vmhub: vmhub type 180 * 181 */ 182 static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev, 183 uint32_t vmhub) 184 { 185 return ((vmhub == AMDGPU_MMHUB0(0)) && 186 (!amdgpu_sriov_vf(adev))); 187 } 188 189 static bool gmc_v11_0_get_vmid_pasid_mapping_info( 190 struct amdgpu_device *adev, 191 uint8_t vmid, uint16_t *p_pasid) 192 { 193 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff; 194 195 return !!(*p_pasid); 196 } 197 198 /** 199 * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback 200 * 201 * @adev: amdgpu_device pointer 202 * @vmid: vm instance to flush 203 * @vmhub: which hub to flush 204 * @flush_type: the flush type 205 * 206 * Flush the TLB for the requested page table. 207 */ 208 static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 209 uint32_t vmhub, uint32_t flush_type) 210 { 211 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub); 212 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; 213 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); 214 /* Use register 17 for GART */ 215 const unsigned int eng = 17; 216 unsigned char hub_ip; 217 u32 sem, req, ack; 218 unsigned int i; 219 u32 tmp; 220 221 if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron) 222 return; 223 224 sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng; 225 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; 226 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; 227 228 /* flush hdp cache */ 229 adev->hdp.funcs->flush_hdp(adev, NULL); 230 231 /* This is necessary for SRIOV as well as for GFXOFF to function 232 * properly under bare metal 233 */ 234 if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) && 235 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { 236 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, 237 1 << vmid, GET_INST(GC, 0)); 238 return; 239 } 240 241 /* This path is needed before KIQ/MES/GFXOFF are set up */ 242 hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; 243 244 spin_lock(&adev->gmc.invalidate_lock); 245 /* 246 * It may lose gpuvm invalidate acknowldege state across power-gating 247 * off cycle, add semaphore acquire before invalidation and semaphore 248 * release after invalidation to avoid entering power gated state 249 * to WA the Issue 250 */ 251 252 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 253 if (use_semaphore) { 254 for (i = 0; i < adev->usec_timeout; i++) { 255 /* a read return value of 1 means semaphore acuqire */ 256 tmp = RREG32_RLC_NO_KIQ(sem, hub_ip); 257 if (tmp & 0x1) 258 break; 259 udelay(1); 260 } 261 262 if (i >= adev->usec_timeout) 263 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); 264 } 265 266 WREG32_RLC_NO_KIQ(req, inv_req, hub_ip); 267 268 /* Wait for ACK with a delay.*/ 269 for (i = 0; i < adev->usec_timeout; i++) { 270 tmp = RREG32_RLC_NO_KIQ(ack, hub_ip); 271 tmp &= 1 << vmid; 272 if (tmp) 273 break; 274 275 udelay(1); 276 } 277 278 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 279 if (use_semaphore) 280 WREG32_RLC_NO_KIQ(sem, 0, hub_ip); 281 282 /* Issue additional private vm invalidation to MMHUB */ 283 if ((vmhub != AMDGPU_GFXHUB(0)) && 284 (hub->vm_l2_bank_select_reserved_cid2) && 285 !amdgpu_sriov_vf(adev)) { 286 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); 287 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */ 288 inv_req |= (1 << 25); 289 /* Issue private invalidation */ 290 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req); 291 /* Read back to ensure invalidation is done*/ 292 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); 293 } 294 295 spin_unlock(&adev->gmc.invalidate_lock); 296 297 if (i >= adev->usec_timeout) 298 dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n"); 299 } 300 301 /** 302 * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid 303 * 304 * @adev: amdgpu_device pointer 305 * @pasid: pasid to be flush 306 * @flush_type: the flush type 307 * @all_hub: flush all hubs 308 * @inst: is used to select which instance of KIQ to use for the invalidation 309 * 310 * Flush the TLB for the requested pasid. 311 */ 312 static void gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, 313 uint16_t pasid, uint32_t flush_type, 314 bool all_hub, uint32_t inst) 315 { 316 uint16_t queried; 317 int vmid, i; 318 319 for (vmid = 1; vmid < 16; vmid++) { 320 bool valid; 321 322 valid = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid, 323 &queried); 324 if (!valid || queried != pasid) 325 continue; 326 327 if (all_hub) { 328 for_each_set_bit(i, adev->vmhubs_mask, 329 AMDGPU_MAX_VMHUBS) 330 gmc_v11_0_flush_gpu_tlb(adev, vmid, i, 331 flush_type); 332 } else { 333 gmc_v11_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 334 flush_type); 335 } 336 } 337 } 338 339 static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 340 unsigned int vmid, uint64_t pd_addr) 341 { 342 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); 343 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; 344 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); 345 unsigned int eng = ring->vm_inv_eng; 346 347 /* 348 * It may lose gpuvm invalidate acknowldege state across power-gating 349 * off cycle, add semaphore acquire before invalidation and semaphore 350 * release after invalidation to avoid entering power gated state 351 * to WA the Issue 352 */ 353 354 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 355 if (use_semaphore) 356 /* a read return value of 1 means semaphore acuqire */ 357 amdgpu_ring_emit_reg_wait(ring, 358 hub->vm_inv_eng0_sem + 359 hub->eng_distance * eng, 0x1, 0x1); 360 361 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + 362 (hub->ctx_addr_distance * vmid), 363 lower_32_bits(pd_addr)); 364 365 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + 366 (hub->ctx_addr_distance * vmid), 367 upper_32_bits(pd_addr)); 368 369 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + 370 hub->eng_distance * eng, 371 hub->vm_inv_eng0_ack + 372 hub->eng_distance * eng, 373 req, 1 << vmid); 374 375 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 376 if (use_semaphore) 377 /* 378 * add semaphore release after invalidation, 379 * write with 0 means semaphore release 380 */ 381 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + 382 hub->eng_distance * eng, 0); 383 384 return pd_addr; 385 } 386 387 static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, 388 unsigned int pasid) 389 { 390 struct amdgpu_device *adev = ring->adev; 391 uint32_t reg; 392 393 /* MES fw manages IH_VMID_x_LUT updating */ 394 if (ring->is_mes_queue) 395 return; 396 397 if (ring->vm_hub == AMDGPU_GFXHUB(0)) 398 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; 399 else 400 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid; 401 402 amdgpu_ring_emit_wreg(ring, reg, pasid); 403 } 404 405 /* 406 * PTE format: 407 * 63:59 reserved 408 * 58:57 reserved 409 * 56 F 410 * 55 L 411 * 54 reserved 412 * 53:52 SW 413 * 51 T 414 * 50:48 mtype 415 * 47:12 4k physical page base address 416 * 11:7 fragment 417 * 6 write 418 * 5 read 419 * 4 exe 420 * 3 Z 421 * 2 snooped 422 * 1 system 423 * 0 valid 424 * 425 * PDE format: 426 * 63:59 block fragment size 427 * 58:55 reserved 428 * 54 P 429 * 53:48 reserved 430 * 47:6 physical base address of PD or PTE 431 * 5:3 reserved 432 * 2 C 433 * 1 system 434 * 0 valid 435 */ 436 437 static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) 438 { 439 switch (flags) { 440 case AMDGPU_VM_MTYPE_DEFAULT: 441 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); 442 case AMDGPU_VM_MTYPE_NC: 443 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); 444 case AMDGPU_VM_MTYPE_WC: 445 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC); 446 case AMDGPU_VM_MTYPE_CC: 447 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC); 448 case AMDGPU_VM_MTYPE_UC: 449 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC); 450 default: 451 return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC); 452 } 453 } 454 455 static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level, 456 uint64_t *addr, uint64_t *flags) 457 { 458 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 459 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); 460 BUG_ON(*addr & 0xFFFF00000000003FULL); 461 462 if (!adev->gmc.translate_further) 463 return; 464 465 if (level == AMDGPU_VM_PDB1) { 466 /* Set the block fragment size */ 467 if (!(*flags & AMDGPU_PDE_PTE)) 468 *flags |= AMDGPU_PDE_BFS(0x9); 469 470 } else if (level == AMDGPU_VM_PDB0) { 471 if (*flags & AMDGPU_PDE_PTE) 472 *flags &= ~AMDGPU_PDE_PTE; 473 else 474 *flags |= AMDGPU_PTE_TF; 475 } 476 } 477 478 static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev, 479 struct amdgpu_bo_va_mapping *mapping, 480 uint64_t *flags) 481 { 482 struct amdgpu_bo *bo = mapping->bo_va->base.bo; 483 484 *flags &= ~AMDGPU_PTE_EXECUTABLE; 485 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 486 487 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; 488 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); 489 490 *flags &= ~AMDGPU_PTE_NOALLOC; 491 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC); 492 493 if (mapping->flags & AMDGPU_PTE_PRT) { 494 *flags |= AMDGPU_PTE_PRT; 495 *flags |= AMDGPU_PTE_SNOOPED; 496 *flags |= AMDGPU_PTE_LOG; 497 *flags |= AMDGPU_PTE_SYSTEM; 498 *flags &= ~AMDGPU_PTE_VALID; 499 } 500 501 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT | 502 AMDGPU_GEM_CREATE_EXT_COHERENT | 503 AMDGPU_GEM_CREATE_UNCACHED)) 504 *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC); 505 } 506 507 static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev) 508 { 509 u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL); 510 unsigned int size; 511 512 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { 513 size = AMDGPU_VBIOS_VGA_ALLOCATION; 514 } else { 515 u32 viewport; 516 u32 pitch; 517 518 viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); 519 pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH); 520 size = (REG_GET_FIELD(viewport, 521 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * 522 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) * 523 4); 524 } 525 526 return size; 527 } 528 529 static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = { 530 .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb, 531 .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid, 532 .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb, 533 .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping, 534 .map_mtype = gmc_v11_0_map_mtype, 535 .get_vm_pde = gmc_v11_0_get_vm_pde, 536 .get_vm_pte = gmc_v11_0_get_vm_pte, 537 .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size, 538 }; 539 540 static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev) 541 { 542 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs; 543 } 544 545 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) 546 { 547 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { 548 case IP_VERSION(8, 10, 0): 549 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM; 550 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM; 551 adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev); 552 adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET; 553 adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; 554 if (adev->umc.node_inst_num == 4) 555 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0]; 556 else 557 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0]; 558 adev->umc.ras = &umc_v8_10_ras; 559 break; 560 case IP_VERSION(8, 11, 0): 561 break; 562 default: 563 break; 564 } 565 } 566 567 568 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev) 569 { 570 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 571 case IP_VERSION(3, 0, 1): 572 adev->mmhub.funcs = &mmhub_v3_0_1_funcs; 573 break; 574 case IP_VERSION(3, 0, 2): 575 adev->mmhub.funcs = &mmhub_v3_0_2_funcs; 576 break; 577 case IP_VERSION(3, 3, 0): 578 case IP_VERSION(3, 3, 1): 579 adev->mmhub.funcs = &mmhub_v3_3_funcs; 580 break; 581 default: 582 adev->mmhub.funcs = &mmhub_v3_0_funcs; 583 break; 584 } 585 } 586 587 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) 588 { 589 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 590 case IP_VERSION(11, 0, 3): 591 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs; 592 break; 593 case IP_VERSION(11, 5, 0): 594 case IP_VERSION(11, 5, 1): 595 case IP_VERSION(11, 5, 2): 596 adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs; 597 break; 598 default: 599 adev->gfxhub.funcs = &gfxhub_v3_0_funcs; 600 break; 601 } 602 } 603 604 static int gmc_v11_0_early_init(void *handle) 605 { 606 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 607 608 gmc_v11_0_set_gfxhub_funcs(adev); 609 gmc_v11_0_set_mmhub_funcs(adev); 610 gmc_v11_0_set_gmc_funcs(adev); 611 gmc_v11_0_set_irq_funcs(adev); 612 gmc_v11_0_set_umc_funcs(adev); 613 614 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 615 adev->gmc.shared_aperture_end = 616 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 617 adev->gmc.private_aperture_start = 0x1000000000000000ULL; 618 adev->gmc.private_aperture_end = 619 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 620 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; 621 622 return 0; 623 } 624 625 static int gmc_v11_0_late_init(void *handle) 626 { 627 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 628 int r; 629 630 r = amdgpu_gmc_allocate_vm_inv_eng(adev); 631 if (r) 632 return r; 633 634 r = amdgpu_gmc_ras_late_init(adev); 635 if (r) 636 return r; 637 638 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 639 } 640 641 static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev, 642 struct amdgpu_gmc *mc) 643 { 644 u64 base = 0; 645 646 base = adev->mmhub.funcs->get_fb_location(adev); 647 648 amdgpu_gmc_set_agp_default(adev, mc); 649 amdgpu_gmc_vram_location(adev, &adev->gmc, base); 650 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH); 651 if (!amdgpu_sriov_vf(adev) && 652 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) && 653 (amdgpu_agp == 1)) 654 amdgpu_gmc_agp_location(adev, mc); 655 656 /* base offset of vram pages */ 657 if (amdgpu_sriov_vf(adev)) 658 adev->vm_manager.vram_base_offset = 0; 659 else 660 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev); 661 } 662 663 /** 664 * gmc_v11_0_mc_init - initialize the memory controller driver params 665 * 666 * @adev: amdgpu_device pointer 667 * 668 * Look up the amount of vram, vram width, and decide how to place 669 * vram and gart within the GPU's physical address space. 670 * Returns 0 for success. 671 */ 672 static int gmc_v11_0_mc_init(struct amdgpu_device *adev) 673 { 674 int r; 675 676 /* size in MB on si */ 677 adev->gmc.mc_vram_size = 678 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; 679 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 680 681 if (!(adev->flags & AMD_IS_APU)) { 682 r = amdgpu_device_resize_fb_bar(adev); 683 if (r) 684 return r; 685 } 686 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 687 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 688 689 #ifdef CONFIG_X86_64 690 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { 691 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev); 692 adev->gmc.aper_size = adev->gmc.real_vram_size; 693 } 694 #endif 695 /* In case the PCI BAR is larger than the actual amount of vram */ 696 adev->gmc.visible_vram_size = adev->gmc.aper_size; 697 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 698 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 699 700 /* set the gart size */ 701 if (amdgpu_gart_size == -1) 702 adev->gmc.gart_size = 512ULL << 20; 703 else 704 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 705 706 gmc_v11_0_vram_gtt_location(adev, &adev->gmc); 707 708 return 0; 709 } 710 711 static int gmc_v11_0_gart_init(struct amdgpu_device *adev) 712 { 713 int r; 714 715 if (adev->gart.bo) { 716 WARN(1, "PCIE GART already initialized\n"); 717 return 0; 718 } 719 720 /* Initialize common gart structure */ 721 r = amdgpu_gart_init(adev); 722 if (r) 723 return r; 724 725 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 726 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC) | 727 AMDGPU_PTE_EXECUTABLE; 728 729 return amdgpu_gart_table_vram_alloc(adev); 730 } 731 732 static int gmc_v11_0_sw_init(void *handle) 733 { 734 int r, vram_width = 0, vram_type = 0, vram_vendor = 0; 735 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 736 737 adev->mmhub.funcs->init(adev); 738 739 adev->gfxhub.funcs->init(adev); 740 741 spin_lock_init(&adev->gmc.invalidate_lock); 742 743 r = amdgpu_atomfirmware_get_vram_info(adev, 744 &vram_width, &vram_type, &vram_vendor); 745 adev->gmc.vram_width = vram_width; 746 747 adev->gmc.vram_type = vram_type; 748 adev->gmc.vram_vendor = vram_vendor; 749 750 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 751 case IP_VERSION(11, 0, 0): 752 case IP_VERSION(11, 0, 1): 753 case IP_VERSION(11, 0, 2): 754 case IP_VERSION(11, 0, 3): 755 case IP_VERSION(11, 0, 4): 756 case IP_VERSION(11, 5, 0): 757 case IP_VERSION(11, 5, 1): 758 case IP_VERSION(11, 5, 2): 759 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); 760 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); 761 /* 762 * To fulfill 4-level page support, 763 * vm size is 256TB (48bit), maximum size, 764 * block size 512 (9bit) 765 */ 766 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 767 break; 768 default: 769 break; 770 } 771 772 /* This interrupt is VMC page fault.*/ 773 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC, 774 VMC_1_0__SRCID__VM_FAULT, 775 &adev->gmc.vm_fault); 776 777 if (r) 778 return r; 779 780 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 781 UTCL2_1_0__SRCID__FAULT, 782 &adev->gmc.vm_fault); 783 if (r) 784 return r; 785 786 if (!amdgpu_sriov_vf(adev)) { 787 /* interrupt sent to DF. */ 788 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0, 789 &adev->gmc.ecc_irq); 790 if (r) 791 return r; 792 } 793 794 /* 795 * Set the internal MC address mask This is the max address of the GPU's 796 * internal address space. 797 */ 798 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 799 800 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); 801 if (r) { 802 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); 803 return r; 804 } 805 806 adev->need_swiotlb = drm_need_swiotlb(44); 807 808 r = gmc_v11_0_mc_init(adev); 809 if (r) 810 return r; 811 812 amdgpu_gmc_get_vbios_allocations(adev); 813 814 /* Memory manager */ 815 r = amdgpu_bo_init(adev); 816 if (r) 817 return r; 818 819 r = gmc_v11_0_gart_init(adev); 820 if (r) 821 return r; 822 823 /* 824 * number of VMs 825 * VMID 0 is reserved for System 826 * amdgpu graphics/compute will use VMIDs 1-7 827 * amdkfd will use VMIDs 8-15 828 */ 829 adev->vm_manager.first_kfd_vmid = 8; 830 831 amdgpu_vm_manager_init(adev); 832 833 r = amdgpu_gmc_ras_sw_init(adev); 834 if (r) 835 return r; 836 837 return 0; 838 } 839 840 /** 841 * gmc_v11_0_gart_fini - vm fini callback 842 * 843 * @adev: amdgpu_device pointer 844 * 845 * Tears down the driver GART/VM setup (CIK). 846 */ 847 static void gmc_v11_0_gart_fini(struct amdgpu_device *adev) 848 { 849 amdgpu_gart_table_vram_free(adev); 850 } 851 852 static int gmc_v11_0_sw_fini(void *handle) 853 { 854 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 855 856 amdgpu_vm_manager_fini(adev); 857 gmc_v11_0_gart_fini(adev); 858 amdgpu_gem_force_release(adev); 859 amdgpu_bo_fini(adev); 860 861 return 0; 862 } 863 864 static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev) 865 { 866 if (amdgpu_sriov_vf(adev)) { 867 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; 868 869 WREG32(hub->vm_contexts_disable, 0); 870 return; 871 } 872 } 873 874 /** 875 * gmc_v11_0_gart_enable - gart enable 876 * 877 * @adev: amdgpu_device pointer 878 */ 879 static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) 880 { 881 int r; 882 bool value; 883 884 if (adev->gart.bo == NULL) { 885 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 886 return -EINVAL; 887 } 888 889 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); 890 891 r = adev->mmhub.funcs->gart_enable(adev); 892 if (r) 893 return r; 894 895 /* Flush HDP after it is initialized */ 896 adev->hdp.funcs->flush_hdp(adev, NULL); 897 898 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 899 false : true; 900 901 adev->mmhub.funcs->set_fault_enable_default(adev, value); 902 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); 903 904 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 905 (unsigned int)(adev->gmc.gart_size >> 20), 906 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 907 908 return 0; 909 } 910 911 static int gmc_v11_0_hw_init(void *handle) 912 { 913 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 914 int r; 915 916 adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode; 917 918 /* The sequence of these two function calls matters.*/ 919 gmc_v11_0_init_golden_registers(adev); 920 921 r = gmc_v11_0_gart_enable(adev); 922 if (r) 923 return r; 924 925 if (adev->umc.funcs && adev->umc.funcs->init_registers) 926 adev->umc.funcs->init_registers(adev); 927 928 return 0; 929 } 930 931 /** 932 * gmc_v11_0_gart_disable - gart disable 933 * 934 * @adev: amdgpu_device pointer 935 * 936 * This disables all VM page table. 937 */ 938 static void gmc_v11_0_gart_disable(struct amdgpu_device *adev) 939 { 940 adev->mmhub.funcs->gart_disable(adev); 941 } 942 943 static int gmc_v11_0_hw_fini(void *handle) 944 { 945 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 946 947 if (amdgpu_sriov_vf(adev)) { 948 /* full access mode, so don't touch any GMC register */ 949 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 950 return 0; 951 } 952 953 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 954 955 if (adev->gmc.ecc_irq.funcs && 956 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) 957 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); 958 959 gmc_v11_0_gart_disable(adev); 960 961 return 0; 962 } 963 964 static int gmc_v11_0_suspend(void *handle) 965 { 966 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 967 968 gmc_v11_0_hw_fini(adev); 969 970 return 0; 971 } 972 973 static int gmc_v11_0_resume(void *handle) 974 { 975 int r; 976 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 977 978 r = gmc_v11_0_hw_init(adev); 979 if (r) 980 return r; 981 982 amdgpu_vmid_reset_all(adev); 983 984 return 0; 985 } 986 987 static bool gmc_v11_0_is_idle(void *handle) 988 { 989 /* MC is always ready in GMC v11.*/ 990 return true; 991 } 992 993 static int gmc_v11_0_wait_for_idle(void *handle) 994 { 995 /* There is no need to wait for MC idle in GMC v11.*/ 996 return 0; 997 } 998 999 static int gmc_v11_0_soft_reset(void *handle) 1000 { 1001 return 0; 1002 } 1003 1004 static int gmc_v11_0_set_clockgating_state(void *handle, 1005 enum amd_clockgating_state state) 1006 { 1007 int r; 1008 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1009 1010 r = adev->mmhub.funcs->set_clockgating(adev, state); 1011 if (r) 1012 return r; 1013 1014 return athub_v3_0_set_clockgating(adev, state); 1015 } 1016 1017 static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags) 1018 { 1019 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1020 1021 adev->mmhub.funcs->get_clockgating(adev, flags); 1022 1023 athub_v3_0_get_clockgating(adev, flags); 1024 } 1025 1026 static int gmc_v11_0_set_powergating_state(void *handle, 1027 enum amd_powergating_state state) 1028 { 1029 return 0; 1030 } 1031 1032 const struct amd_ip_funcs gmc_v11_0_ip_funcs = { 1033 .name = "gmc_v11_0", 1034 .early_init = gmc_v11_0_early_init, 1035 .sw_init = gmc_v11_0_sw_init, 1036 .hw_init = gmc_v11_0_hw_init, 1037 .late_init = gmc_v11_0_late_init, 1038 .sw_fini = gmc_v11_0_sw_fini, 1039 .hw_fini = gmc_v11_0_hw_fini, 1040 .suspend = gmc_v11_0_suspend, 1041 .resume = gmc_v11_0_resume, 1042 .is_idle = gmc_v11_0_is_idle, 1043 .wait_for_idle = gmc_v11_0_wait_for_idle, 1044 .soft_reset = gmc_v11_0_soft_reset, 1045 .set_clockgating_state = gmc_v11_0_set_clockgating_state, 1046 .set_powergating_state = gmc_v11_0_set_powergating_state, 1047 .get_clockgating_state = gmc_v11_0_get_clockgating_state, 1048 }; 1049 1050 const struct amdgpu_ip_block_version gmc_v11_0_ip_block = { 1051 .type = AMD_IP_BLOCK_TYPE_GMC, 1052 .major = 11, 1053 .minor = 0, 1054 .rev = 0, 1055 .funcs = &gmc_v11_0_ip_funcs, 1056 }; 1057