1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/pci.h> 25 26 #include <drm/drm_cache.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atomfirmware.h" 30 #include "gmc_v12_0.h" 31 #include "athub/athub_4_1_0_sh_mask.h" 32 #include "athub/athub_4_1_0_offset.h" 33 #include "oss/osssys_7_0_0_offset.h" 34 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 35 #include "soc24_enum.h" 36 #include "soc24.h" 37 #include "soc15d.h" 38 #include "soc15_common.h" 39 #include "nbif_v6_3_1.h" 40 #include "gfxhub_v12_0.h" 41 #include "mmhub_v4_1_0.h" 42 #include "athub_v4_1_0.h" 43 #include "umc_v8_14.h" 44 45 static int gmc_v12_0_ecc_interrupt_state(struct amdgpu_device *adev, 46 struct amdgpu_irq_src *src, 47 unsigned type, 48 enum amdgpu_interrupt_state state) 49 { 50 return 0; 51 } 52 53 static int gmc_v12_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 54 struct amdgpu_irq_src *src, unsigned type, 55 enum amdgpu_interrupt_state state) 56 { 57 switch (state) { 58 case AMDGPU_IRQ_STATE_DISABLE: 59 /* MM HUB */ 60 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); 61 /* GFX HUB */ 62 /* This works because this interrupt is only 63 * enabled at init/resume and disabled in 64 * fini/suspend, so the overall state doesn't 65 * change over the course of suspend/resume. 66 */ 67 if (!adev->in_s0ix) 68 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); 69 break; 70 case AMDGPU_IRQ_STATE_ENABLE: 71 /* MM HUB */ 72 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); 73 /* GFX HUB */ 74 /* This works because this interrupt is only 75 * enabled at init/resume and disabled in 76 * fini/suspend, so the overall state doesn't 77 * change over the course of suspend/resume. 78 */ 79 if (!adev->in_s0ix) 80 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); 81 break; 82 default: 83 break; 84 } 85 86 return 0; 87 } 88 89 static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev, 90 struct amdgpu_irq_src *source, 91 struct amdgpu_iv_entry *entry) 92 { 93 struct amdgpu_vmhub *hub; 94 uint32_t status = 0; 95 u64 addr; 96 97 addr = (u64)entry->src_data[0] << 12; 98 addr |= ((u64)entry->src_data[1] & 0xf) << 44; 99 100 if (entry->client_id == SOC21_IH_CLIENTID_VMC) 101 hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; 102 else 103 hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; 104 105 if (!amdgpu_sriov_vf(adev)) { 106 /* 107 * Issue a dummy read to wait for the status register to 108 * be updated to avoid reading an incorrect value due to 109 * the new fast GRBM interface. 110 */ 111 if (entry->vmid_src == AMDGPU_GFXHUB(0)) 112 RREG32(hub->vm_l2_pro_fault_status); 113 114 status = RREG32(hub->vm_l2_pro_fault_status); 115 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); 116 117 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, 118 entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0)); 119 } 120 121 if (printk_ratelimit()) { 122 struct amdgpu_task_info *task_info; 123 124 dev_err(adev->dev, 125 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", 126 entry->vmid_src ? "mmhub" : "gfxhub", 127 entry->src_id, entry->ring_id, entry->vmid, entry->pasid); 128 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); 129 if (task_info) { 130 dev_err(adev->dev, 131 " in process %s pid %d thread %s pid %d)\n", 132 task_info->process_name, task_info->tgid, 133 task_info->task_name, task_info->pid); 134 amdgpu_vm_put_task_info(task_info); 135 } 136 137 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", 138 addr, entry->client_id); 139 140 /* Only print L2 fault status if the status register could be read and 141 * contains useful information 142 */ 143 if (status != 0) 144 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); 145 } 146 147 return 0; 148 } 149 150 static const struct amdgpu_irq_src_funcs gmc_v12_0_irq_funcs = { 151 .set = gmc_v12_0_vm_fault_interrupt_state, 152 .process = gmc_v12_0_process_interrupt, 153 }; 154 155 static const struct amdgpu_irq_src_funcs gmc_v12_0_ecc_funcs = { 156 .set = gmc_v12_0_ecc_interrupt_state, 157 .process = amdgpu_umc_process_ecc_irq, 158 }; 159 160 static void gmc_v12_0_set_irq_funcs(struct amdgpu_device *adev) 161 { 162 adev->gmc.vm_fault.num_types = 1; 163 adev->gmc.vm_fault.funcs = &gmc_v12_0_irq_funcs; 164 165 if (!amdgpu_sriov_vf(adev)) { 166 adev->gmc.ecc_irq.num_types = 1; 167 adev->gmc.ecc_irq.funcs = &gmc_v12_0_ecc_funcs; 168 } 169 } 170 171 /** 172 * gmc_v12_0_use_invalidate_semaphore - judge whether to use semaphore 173 * 174 * @adev: amdgpu_device pointer 175 * @vmhub: vmhub type 176 * 177 */ 178 static bool gmc_v12_0_use_invalidate_semaphore(struct amdgpu_device *adev, 179 uint32_t vmhub) 180 { 181 return ((vmhub == AMDGPU_MMHUB0(0)) && 182 (!amdgpu_sriov_vf(adev))); 183 } 184 185 static bool gmc_v12_0_get_vmid_pasid_mapping_info( 186 struct amdgpu_device *adev, 187 uint8_t vmid, uint16_t *p_pasid) 188 { 189 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff; 190 191 return !!(*p_pasid); 192 } 193 194 /* 195 * GART 196 * VMID 0 is the physical GPU addresses as used by the kernel. 197 * VMIDs 1-15 are used for userspace clients and are handled 198 * by the amdgpu vm/hsa code. 199 */ 200 201 static void gmc_v12_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, 202 unsigned int vmhub, uint32_t flush_type) 203 { 204 bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(adev, vmhub); 205 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; 206 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); 207 u32 tmp; 208 /* Use register 17 for GART */ 209 const unsigned eng = 17; 210 unsigned int i; 211 unsigned char hub_ip = 0; 212 213 hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? 214 GC_HWIP : MMHUB_HWIP; 215 216 spin_lock(&adev->gmc.invalidate_lock); 217 /* 218 * It may lose gpuvm invalidate acknowldege state across power-gating 219 * off cycle, add semaphore acquire before invalidation and semaphore 220 * release after invalidation to avoid entering power gated state 221 * to WA the Issue 222 */ 223 224 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 225 if (use_semaphore) { 226 for (i = 0; i < adev->usec_timeout; i++) { 227 /* a read return value of 1 means semaphore acuqire */ 228 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + 229 hub->eng_distance * eng, hub_ip); 230 if (tmp & 0x1) 231 break; 232 udelay(1); 233 } 234 235 if (i >= adev->usec_timeout) 236 dev_err(adev->dev, 237 "Timeout waiting for sem acquire in VM flush!\n"); 238 } 239 240 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip); 241 242 /* Wait for ACK with a delay.*/ 243 for (i = 0; i < adev->usec_timeout; i++) { 244 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack + 245 hub->eng_distance * eng, hub_ip); 246 tmp &= 1 << vmid; 247 if (tmp) 248 break; 249 250 udelay(1); 251 } 252 253 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 254 if (use_semaphore) 255 /* 256 * add semaphore release after invalidation, 257 * write with 0 means semaphore release 258 */ 259 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + 260 hub->eng_distance * eng, 0, hub_ip); 261 262 /* Issue additional private vm invalidation to MMHUB */ 263 if ((vmhub != AMDGPU_GFXHUB(0)) && 264 (hub->vm_l2_bank_select_reserved_cid2) && 265 !amdgpu_sriov_vf(adev)) { 266 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); 267 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */ 268 inv_req |= (1 << 25); 269 /* Issue private invalidation */ 270 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req); 271 /* Read back to ensure invalidation is done*/ 272 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); 273 } 274 275 spin_unlock(&adev->gmc.invalidate_lock); 276 277 if (i < adev->usec_timeout) 278 return; 279 280 dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n"); 281 } 282 283 /** 284 * gmc_v12_0_flush_gpu_tlb - gart tlb flush callback 285 * 286 * @adev: amdgpu_device pointer 287 * @vmid: vm instance to flush 288 * @vmhub: which hub to flush 289 * @flush_type: the flush type 290 * 291 * Flush the TLB for the requested page table. 292 */ 293 static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 294 uint32_t vmhub, uint32_t flush_type) 295 { 296 if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron) 297 return; 298 299 /* flush hdp cache */ 300 amdgpu_device_flush_hdp(adev, NULL); 301 302 /* This is necessary for SRIOV as well as for GFXOFF to function 303 * properly under bare metal 304 */ 305 if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) && 306 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { 307 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; 308 const unsigned eng = 17; 309 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); 310 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; 311 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; 312 313 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, 314 1 << vmid, GET_INST(GC, 0)); 315 return; 316 } 317 318 mutex_lock(&adev->mman.gtt_window_lock); 319 gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0); 320 mutex_unlock(&adev->mman.gtt_window_lock); 321 return; 322 } 323 324 /** 325 * gmc_v12_0_flush_gpu_tlb_pasid - tlb flush via pasid 326 * 327 * @adev: amdgpu_device pointer 328 * @pasid: pasid to be flush 329 * @flush_type: the flush type 330 * @all_hub: flush all hubs 331 * @inst: is used to select which instance of KIQ to use for the invalidation 332 * 333 * Flush the TLB for the requested pasid. 334 */ 335 static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, 336 uint16_t pasid, uint32_t flush_type, 337 bool all_hub, uint32_t inst) 338 { 339 uint16_t queried; 340 int vmid, i; 341 342 for (vmid = 1; vmid < 16; vmid++) { 343 bool valid; 344 345 valid = gmc_v12_0_get_vmid_pasid_mapping_info(adev, vmid, 346 &queried); 347 if (!valid || queried != pasid) 348 continue; 349 350 if (all_hub) { 351 for_each_set_bit(i, adev->vmhubs_mask, 352 AMDGPU_MAX_VMHUBS) 353 gmc_v12_0_flush_gpu_tlb(adev, vmid, i, 354 flush_type); 355 } else { 356 gmc_v12_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 357 flush_type); 358 } 359 } 360 } 361 362 static uint64_t gmc_v12_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 363 unsigned vmid, uint64_t pd_addr) 364 { 365 bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(ring->adev, ring->vm_hub); 366 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; 367 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); 368 unsigned eng = ring->vm_inv_eng; 369 370 /* 371 * It may lose gpuvm invalidate acknowldege state across power-gating 372 * off cycle, add semaphore acquire before invalidation and semaphore 373 * release after invalidation to avoid entering power gated state 374 * to WA the Issue 375 */ 376 377 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 378 if (use_semaphore) 379 /* a read return value of 1 means semaphore acuqire */ 380 amdgpu_ring_emit_reg_wait(ring, 381 hub->vm_inv_eng0_sem + 382 hub->eng_distance * eng, 0x1, 0x1); 383 384 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + 385 (hub->ctx_addr_distance * vmid), 386 lower_32_bits(pd_addr)); 387 388 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + 389 (hub->ctx_addr_distance * vmid), 390 upper_32_bits(pd_addr)); 391 392 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + 393 hub->eng_distance * eng, 394 hub->vm_inv_eng0_ack + 395 hub->eng_distance * eng, 396 req, 1 << vmid); 397 398 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ 399 if (use_semaphore) 400 /* 401 * add semaphore release after invalidation, 402 * write with 0 means semaphore release 403 */ 404 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + 405 hub->eng_distance * eng, 0); 406 407 return pd_addr; 408 } 409 410 static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, 411 unsigned pasid) 412 { 413 struct amdgpu_device *adev = ring->adev; 414 uint32_t reg; 415 416 if (ring->vm_hub == AMDGPU_GFXHUB(0)) 417 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; 418 else 419 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid; 420 421 amdgpu_ring_emit_wreg(ring, reg, pasid); 422 } 423 424 /* 425 * PTE format: 426 * 63 P 427 * 62:59 reserved 428 * 58 D 429 * 57 G 430 * 56 T 431 * 55:54 M 432 * 53:52 SW 433 * 51:48 reserved for future 434 * 47:12 4k physical page base address 435 * 11:7 fragment 436 * 6 write 437 * 5 read 438 * 4 exe 439 * 3 Z 440 * 2 snooped 441 * 1 system 442 * 0 valid 443 * 444 * PDE format: 445 * 63 P 446 * 62:58 block fragment size 447 * 57 reserved 448 * 56 A 449 * 55:54 M 450 * 53:52 reserved 451 * 51:48 reserved for future 452 * 47:6 physical base address of PD or PTE 453 * 5:3 reserved 454 * 2 C 455 * 1 system 456 * 0 valid 457 */ 458 459 static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) 460 { 461 switch (flags) { 462 case AMDGPU_VM_MTYPE_DEFAULT: 463 return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC); 464 case AMDGPU_VM_MTYPE_NC: 465 return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC); 466 case AMDGPU_VM_MTYPE_UC: 467 return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC); 468 default: 469 return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC); 470 } 471 } 472 473 static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level, 474 uint64_t *addr, uint64_t *flags) 475 { 476 if (!(*flags & AMDGPU_PDE_PTE_GFX12) && !(*flags & AMDGPU_PTE_SYSTEM)) 477 *addr = adev->vm_manager.vram_base_offset + *addr - 478 adev->gmc.vram_start; 479 BUG_ON(*addr & 0xFFFF00000000003FULL); 480 481 if (!adev->gmc.translate_further) 482 return; 483 484 if (level == AMDGPU_VM_PDB1) { 485 /* Set the block fragment size */ 486 if (!(*flags & AMDGPU_PDE_PTE_GFX12)) 487 *flags |= AMDGPU_PDE_BFS_GFX12(0x9); 488 489 } else if (level == AMDGPU_VM_PDB0) { 490 if (*flags & AMDGPU_PDE_PTE_GFX12) 491 *flags &= ~AMDGPU_PDE_PTE_GFX12; 492 } 493 } 494 495 static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev, 496 struct amdgpu_bo_va_mapping *mapping, 497 uint64_t *flags) 498 { 499 struct amdgpu_bo *bo = mapping->bo_va->base.bo; 500 501 *flags &= ~AMDGPU_PTE_EXECUTABLE; 502 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; 503 504 *flags &= ~AMDGPU_PTE_MTYPE_GFX12_MASK; 505 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_GFX12_MASK); 506 507 if (mapping->flags & AMDGPU_PTE_PRT_GFX12) { 508 *flags |= AMDGPU_PTE_PRT_GFX12; 509 *flags |= AMDGPU_PTE_SNOOPED; 510 *flags |= AMDGPU_PTE_SYSTEM; 511 *flags |= AMDGPU_PTE_IS_PTE; 512 *flags &= ~AMDGPU_PTE_VALID; 513 } 514 515 if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC) 516 *flags |= AMDGPU_PTE_DCC; 517 518 if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED) 519 *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC); 520 } 521 522 static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev) 523 { 524 return 0; 525 } 526 527 static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev) 528 { 529 unsigned int max_tex_channel_caches, alignment; 530 531 if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) && 532 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1)) 533 return 0; 534 535 max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches; 536 if (is_power_of_2(max_tex_channel_caches)) 537 alignment = (unsigned int)(max_tex_channel_caches / SZ_4); 538 else 539 alignment = roundup_pow_of_two(max_tex_channel_caches); 540 541 return (unsigned int)(alignment * max_tex_channel_caches * SZ_1K); 542 } 543 544 static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = { 545 .flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb, 546 .flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid, 547 .emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb, 548 .emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping, 549 .map_mtype = gmc_v12_0_map_mtype, 550 .get_vm_pde = gmc_v12_0_get_vm_pde, 551 .get_vm_pte = gmc_v12_0_get_vm_pte, 552 .get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size, 553 .get_dcc_alignment = gmc_v12_0_get_dcc_alignment, 554 }; 555 556 static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev) 557 { 558 adev->gmc.gmc_funcs = &gmc_v12_0_gmc_funcs; 559 } 560 561 static void gmc_v12_0_set_umc_funcs(struct amdgpu_device *adev) 562 { 563 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { 564 case IP_VERSION(8, 14, 0): 565 adev->umc.channel_inst_num = UMC_V8_14_CHANNEL_INSTANCE_NUM; 566 adev->umc.umc_inst_num = UMC_V8_14_UMC_INSTANCE_NUM(adev); 567 adev->umc.node_inst_num = 0; 568 adev->umc.max_ras_err_cnt_per_query = UMC_V8_14_TOTAL_CHANNEL_NUM(adev); 569 adev->umc.channel_offs = UMC_V8_14_PER_CHANNEL_OFFSET; 570 adev->umc.ras = &umc_v8_14_ras; 571 break; 572 default: 573 break; 574 } 575 } 576 577 578 static void gmc_v12_0_set_mmhub_funcs(struct amdgpu_device *adev) 579 { 580 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 581 case IP_VERSION(4, 1, 0): 582 adev->mmhub.funcs = &mmhub_v4_1_0_funcs; 583 break; 584 default: 585 break; 586 } 587 } 588 589 static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev) 590 { 591 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 592 case IP_VERSION(12, 0, 0): 593 case IP_VERSION(12, 0, 1): 594 adev->gfxhub.funcs = &gfxhub_v12_0_funcs; 595 break; 596 default: 597 break; 598 } 599 } 600 601 static int gmc_v12_0_early_init(struct amdgpu_ip_block *ip_block) 602 { 603 struct amdgpu_device *adev = ip_block->adev; 604 605 gmc_v12_0_set_gfxhub_funcs(adev); 606 gmc_v12_0_set_mmhub_funcs(adev); 607 gmc_v12_0_set_gmc_funcs(adev); 608 gmc_v12_0_set_irq_funcs(adev); 609 gmc_v12_0_set_umc_funcs(adev); 610 611 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 612 adev->gmc.shared_aperture_end = 613 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 614 adev->gmc.private_aperture_start = 0x1000000000000000ULL; 615 adev->gmc.private_aperture_end = 616 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 617 618 return 0; 619 } 620 621 static int gmc_v12_0_late_init(struct amdgpu_ip_block *ip_block) 622 { 623 struct amdgpu_device *adev = ip_block->adev; 624 int r; 625 626 r = amdgpu_gmc_allocate_vm_inv_eng(adev); 627 if (r) 628 return r; 629 630 r = amdgpu_gmc_ras_late_init(adev); 631 if (r) 632 return r; 633 634 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 635 } 636 637 static void gmc_v12_0_vram_gtt_location(struct amdgpu_device *adev, 638 struct amdgpu_gmc *mc) 639 { 640 u64 base = 0; 641 642 base = adev->mmhub.funcs->get_fb_location(adev); 643 644 amdgpu_gmc_set_agp_default(adev, mc); 645 amdgpu_gmc_vram_location(adev, &adev->gmc, base); 646 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW); 647 if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1)) 648 amdgpu_gmc_agp_location(adev, mc); 649 650 /* base offset of vram pages */ 651 if (amdgpu_sriov_vf(adev)) 652 adev->vm_manager.vram_base_offset = 0; 653 else 654 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev); 655 } 656 657 /** 658 * gmc_v12_0_mc_init - initialize the memory controller driver params 659 * 660 * @adev: amdgpu_device pointer 661 * 662 * Look up the amount of vram, vram width, and decide how to place 663 * vram and gart within the GPU's physical address space. 664 * Returns 0 for success. 665 */ 666 static int gmc_v12_0_mc_init(struct amdgpu_device *adev) 667 { 668 int r; 669 670 /* size in MB on si */ 671 adev->gmc.mc_vram_size = 672 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; 673 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 674 675 if (!(adev->flags & AMD_IS_APU)) { 676 r = amdgpu_device_resize_fb_bar(adev); 677 if (r) 678 return r; 679 } 680 681 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 682 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 683 684 #ifdef CONFIG_X86_64 685 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { 686 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev); 687 adev->gmc.aper_size = adev->gmc.real_vram_size; 688 } 689 #endif 690 /* In case the PCI BAR is larger than the actual amount of vram */ 691 adev->gmc.visible_vram_size = adev->gmc.aper_size; 692 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 693 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 694 695 /* set the gart size */ 696 if (amdgpu_gart_size == -1) { 697 adev->gmc.gart_size = 512ULL << 20; 698 } else 699 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 700 701 gmc_v12_0_vram_gtt_location(adev, &adev->gmc); 702 703 return 0; 704 } 705 706 static int gmc_v12_0_gart_init(struct amdgpu_device *adev) 707 { 708 int r; 709 710 if (adev->gart.bo) { 711 WARN(1, "PCIE GART already initialized\n"); 712 return 0; 713 } 714 715 /* Initialize common gart structure */ 716 r = amdgpu_gart_init(adev); 717 if (r) 718 return r; 719 720 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 721 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC) | 722 AMDGPU_PTE_EXECUTABLE | 723 AMDGPU_PTE_IS_PTE; 724 725 return amdgpu_gart_table_vram_alloc(adev); 726 } 727 728 static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block) 729 { 730 int r, vram_width = 0, vram_type = 0, vram_vendor = 0; 731 struct amdgpu_device *adev = ip_block->adev; 732 733 adev->mmhub.funcs->init(adev); 734 735 adev->gfxhub.funcs->init(adev); 736 737 spin_lock_init(&adev->gmc.invalidate_lock); 738 739 r = amdgpu_atomfirmware_get_vram_info(adev, 740 &vram_width, &vram_type, &vram_vendor); 741 adev->gmc.vram_width = vram_width; 742 743 adev->gmc.vram_type = vram_type; 744 adev->gmc.vram_vendor = vram_vendor; 745 746 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 747 case IP_VERSION(12, 0, 0): 748 case IP_VERSION(12, 0, 1): 749 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); 750 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask); 751 /* 752 * To fulfill 4-level page support, 753 * vm size is 256TB (48bit), maximum size, 754 * block size 512 (9bit) 755 */ 756 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 757 break; 758 default: 759 break; 760 } 761 762 /* This interrupt is VMC page fault.*/ 763 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC, 764 VMC_1_0__SRCID__VM_FAULT, 765 &adev->gmc.vm_fault); 766 767 if (r) 768 return r; 769 770 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 771 UTCL2_1_0__SRCID__FAULT, 772 &adev->gmc.vm_fault); 773 if (r) 774 return r; 775 776 if (!amdgpu_sriov_vf(adev)) { 777 /* interrupt sent to DF. */ 778 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0, 779 &adev->gmc.ecc_irq); 780 if (r) 781 return r; 782 } 783 784 /* 785 * Set the internal MC address mask This is the max address of the GPU's 786 * internal address space. 787 */ 788 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 789 790 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); 791 if (r) { 792 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); 793 return r; 794 } 795 796 adev->need_swiotlb = drm_need_swiotlb(44); 797 798 r = gmc_v12_0_mc_init(adev); 799 if (r) 800 return r; 801 802 amdgpu_gmc_get_vbios_allocations(adev); 803 804 /* Memory manager */ 805 r = amdgpu_bo_init(adev); 806 if (r) 807 return r; 808 809 r = gmc_v12_0_gart_init(adev); 810 if (r) 811 return r; 812 813 /* 814 * number of VMs 815 * VMID 0 is reserved for System 816 * amdgpu graphics/compute will use VMIDs 1-7 817 * amdkfd will use VMIDs 8-15 818 */ 819 adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8; 820 821 amdgpu_vm_manager_init(adev); 822 823 r = amdgpu_gmc_ras_sw_init(adev); 824 if (r) 825 return r; 826 827 return 0; 828 } 829 830 /** 831 * gmc_v12_0_gart_fini - vm fini callback 832 * 833 * @adev: amdgpu_device pointer 834 * 835 * Tears down the driver GART/VM setup (CIK). 836 */ 837 static void gmc_v12_0_gart_fini(struct amdgpu_device *adev) 838 { 839 amdgpu_gart_table_vram_free(adev); 840 } 841 842 static int gmc_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) 843 { 844 struct amdgpu_device *adev = ip_block->adev; 845 846 amdgpu_vm_manager_fini(adev); 847 gmc_v12_0_gart_fini(adev); 848 amdgpu_gem_force_release(adev); 849 amdgpu_bo_fini(adev); 850 851 return 0; 852 } 853 854 static void gmc_v12_0_init_golden_registers(struct amdgpu_device *adev) 855 { 856 } 857 858 /** 859 * gmc_v12_0_gart_enable - gart enable 860 * 861 * @adev: amdgpu_device pointer 862 */ 863 static int gmc_v12_0_gart_enable(struct amdgpu_device *adev) 864 { 865 int r; 866 bool value; 867 868 if (adev->gart.bo == NULL) { 869 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 870 return -EINVAL; 871 } 872 873 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); 874 875 r = adev->mmhub.funcs->gart_enable(adev); 876 if (r) 877 return r; 878 879 /* Flush HDP after it is initialized */ 880 amdgpu_device_flush_hdp(adev, NULL); 881 882 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 883 false : true; 884 885 adev->mmhub.funcs->set_fault_enable_default(adev, value); 886 gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); 887 888 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", 889 (unsigned)(adev->gmc.gart_size >> 20), 890 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 891 892 return 0; 893 } 894 895 static int gmc_v12_0_hw_init(struct amdgpu_ip_block *ip_block) 896 { 897 int r; 898 struct amdgpu_device *adev = ip_block->adev; 899 900 /* The sequence of these two function calls matters.*/ 901 gmc_v12_0_init_golden_registers(adev); 902 903 r = gmc_v12_0_gart_enable(adev); 904 if (r) 905 return r; 906 907 if (adev->umc.funcs && adev->umc.funcs->init_registers) 908 adev->umc.funcs->init_registers(adev); 909 910 return 0; 911 } 912 913 /** 914 * gmc_v12_0_gart_disable - gart disable 915 * 916 * @adev: amdgpu_device pointer 917 * 918 * This disables all VM page table. 919 */ 920 static void gmc_v12_0_gart_disable(struct amdgpu_device *adev) 921 { 922 adev->mmhub.funcs->gart_disable(adev); 923 } 924 925 static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) 926 { 927 struct amdgpu_device *adev = ip_block->adev; 928 929 if (amdgpu_sriov_vf(adev)) { 930 /* full access mode, so don't touch any GMC register */ 931 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 932 return 0; 933 } 934 935 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 936 937 if (adev->gmc.ecc_irq.funcs && 938 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) 939 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); 940 941 gmc_v12_0_gart_disable(adev); 942 943 return 0; 944 } 945 946 static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block) 947 { 948 gmc_v12_0_hw_fini(ip_block); 949 950 return 0; 951 } 952 953 static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block) 954 { 955 int r; 956 957 r = gmc_v12_0_hw_init(ip_block); 958 if (r) 959 return r; 960 961 amdgpu_vmid_reset_all(ip_block->adev); 962 963 return 0; 964 } 965 966 static bool gmc_v12_0_is_idle(struct amdgpu_ip_block *ip_block) 967 { 968 /* MC is always ready in GMC v11.*/ 969 return true; 970 } 971 972 static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 973 { 974 /* There is no need to wait for MC idle in GMC v11.*/ 975 return 0; 976 } 977 978 static int gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 979 enum amd_clockgating_state state) 980 { 981 int r; 982 struct amdgpu_device *adev = ip_block->adev; 983 984 r = adev->mmhub.funcs->set_clockgating(adev, state); 985 if (r) 986 return r; 987 988 return athub_v4_1_0_set_clockgating(adev, state); 989 } 990 991 static void gmc_v12_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags) 992 { 993 struct amdgpu_device *adev = ip_block->adev; 994 995 adev->mmhub.funcs->get_clockgating(adev, flags); 996 997 athub_v4_1_0_get_clockgating(adev, flags); 998 } 999 1000 static int gmc_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 1001 enum amd_powergating_state state) 1002 { 1003 return 0; 1004 } 1005 1006 const struct amd_ip_funcs gmc_v12_0_ip_funcs = { 1007 .name = "gmc_v12_0", 1008 .early_init = gmc_v12_0_early_init, 1009 .sw_init = gmc_v12_0_sw_init, 1010 .hw_init = gmc_v12_0_hw_init, 1011 .late_init = gmc_v12_0_late_init, 1012 .sw_fini = gmc_v12_0_sw_fini, 1013 .hw_fini = gmc_v12_0_hw_fini, 1014 .suspend = gmc_v12_0_suspend, 1015 .resume = gmc_v12_0_resume, 1016 .is_idle = gmc_v12_0_is_idle, 1017 .wait_for_idle = gmc_v12_0_wait_for_idle, 1018 .set_clockgating_state = gmc_v12_0_set_clockgating_state, 1019 .set_powergating_state = gmc_v12_0_set_powergating_state, 1020 .get_clockgating_state = gmc_v12_0_get_clockgating_state, 1021 }; 1022 1023 const struct amdgpu_ip_block_version gmc_v12_0_ip_block = { 1024 .type = AMD_IP_BLOCK_TYPE_GMC, 1025 .major = 12, 1026 .minor = 0, 1027 .rev = 0, 1028 .funcs = &gmc_v12_0_ip_funcs, 1029 }; 1030