1 /* 2 * Copyright 2025 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "gmc_v12_1.h" 25 #include "soc15_common.h" 26 #include "soc_v1_0_enum.h" 27 #include "oss/osssys_7_1_0_offset.h" 28 #include "oss/osssys_7_1_0_sh_mask.h" 29 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 30 31 static int gmc_v12_1_vm_fault_interrupt_state(struct amdgpu_device *adev, 32 struct amdgpu_irq_src *src, 33 unsigned int type, 34 enum amdgpu_interrupt_state state) 35 { 36 struct amdgpu_vmhub *hub; 37 u32 tmp, reg, i, j; 38 39 switch (state) { 40 case AMDGPU_IRQ_STATE_DISABLE: 41 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { 42 hub = &adev->vmhub[j]; 43 for (i = 0; i < 16; i++) { 44 reg = hub->vm_context0_cntl + i; 45 46 /* This works because this interrupt is only 47 * enabled at init/resume and disabled in 48 * fini/suspend, so the overall state doesn't 49 * change over the course of suspend/resume. 50 */ 51 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) 52 continue; 53 54 if (j >= AMDGPU_MMHUB0(0)) 55 tmp = RREG32_SOC15_IP(MMHUB, reg); 56 else 57 tmp = RREG32_XCC(reg, j); 58 59 tmp &= ~hub->vm_cntx_cntl_vm_fault; 60 61 if (j >= AMDGPU_MMHUB0(0)) 62 WREG32_SOC15_IP(MMHUB, reg, tmp); 63 else 64 WREG32_XCC(reg, tmp, j); 65 } 66 } 67 break; 68 case AMDGPU_IRQ_STATE_ENABLE: 69 for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) { 70 hub = &adev->vmhub[j]; 71 for (i = 0; i < 16; i++) { 72 reg = hub->vm_context0_cntl + i; 73 74 /* This works because this interrupt is only 75 * enabled at init/resume and disabled in 76 * fini/suspend, so the overall state doesn't 77 * change over the course of suspend/resume. 78 */ 79 if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) 80 continue; 81 82 if (j >= AMDGPU_MMHUB0(0)) 83 tmp = RREG32_SOC15_IP(MMHUB, reg); 84 else 85 tmp = RREG32_XCC(reg, j); 86 87 tmp |= hub->vm_cntx_cntl_vm_fault; 88 89 if (j >= AMDGPU_MMHUB0(0)) 90 WREG32_SOC15_IP(MMHUB, reg, tmp); 91 else 92 WREG32_XCC(reg, tmp, j); 93 } 94 } 95 break; 96 default: 97 break; 98 } 99 100 return 0; 101 } 102 103 static int gmc_v12_1_process_interrupt(struct amdgpu_device *adev, 104 struct amdgpu_irq_src *source, 105 struct amdgpu_iv_entry *entry) 106 { 107 struct amdgpu_task_info *task_info; 108 bool retry_fault = false, write_fault = false; 109 unsigned int vmhub, node_id; 110 struct amdgpu_vmhub *hub; 111 uint32_t cam_index = 0; 112 const char *hub_name; 113 int ret, xcc_id = 0; 114 uint32_t status = 0; 115 u64 addr; 116 117 node_id = entry->node_id; 118 119 addr = (u64)entry->src_data[0] << 12; 120 addr |= ((u64)entry->src_data[1] & 0x1fff) << 44; 121 122 if (entry->src_id == UTCL2_1_0__SRCID__RETRY) { 123 retry_fault = true; 124 write_fault = !!(entry->src_data[1] & 0x200000); 125 } 126 127 if (entry->client_id == SOC_V1_0_IH_CLIENTID_VMC) { 128 hub_name = "mmhub0"; 129 vmhub = AMDGPU_MMHUB0(node_id / 4); 130 } else { 131 hub_name = "gfxhub0"; 132 if (adev->gfx.funcs->ih_node_to_logical_xcc) { 133 xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev, 134 node_id); 135 if (xcc_id < 0) 136 xcc_id = 0; 137 } 138 vmhub = xcc_id; 139 } 140 141 hub = &adev->vmhub[vmhub]; 142 143 if (retry_fault) { 144 if (adev->irq.retry_cam_enabled) { 145 /* Delegate it to a different ring if the hardware hasn't 146 * already done it. 147 */ 148 if (entry->ih == &adev->irq.ih) { 149 amdgpu_irq_delegate(adev, entry, 8); 150 return 1; 151 } 152 153 cam_index = entry->src_data[3] & 0x3ff; 154 155 ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, 156 addr, entry->timestamp, write_fault); 157 WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); 158 if (ret) 159 return 1; 160 } else { 161 /* Process it onyl if it's the first fault for this address */ 162 if (entry->ih != &adev->irq.ih_soft && 163 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid, 164 entry->timestamp)) 165 return 1; 166 167 /* Delegate it to a different ring if the hardware hasn't 168 * already done it. 169 */ 170 if (entry->ih == &adev->irq.ih) { 171 amdgpu_irq_delegate(adev, entry, 8); 172 return 1; 173 } 174 175 /* Try to handle the recoverable page faults by filling page 176 * tables 177 */ 178 if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, 179 addr, entry->timestamp, write_fault)) 180 return 1; 181 } 182 } 183 184 if (kgd2kfd_vmfault_fast_path(adev, entry, retry_fault)) 185 return 1; 186 187 if (!printk_ratelimit()) 188 return 0; 189 190 dev_err(adev->dev, 191 "[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", hub_name, 192 retry_fault ? "retry" : "no-retry", 193 entry->src_id, entry->ring_id, entry->vmid, entry->pasid); 194 195 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid); 196 if (task_info) { 197 amdgpu_vm_print_task_info(adev, task_info); 198 amdgpu_vm_put_task_info(task_info); 199 } 200 201 dev_err(adev->dev, " in page starting at address 0x%016llx from IH client %d (%s)\n", 202 addr, entry->client_id, soc_v1_0_ih_clientid_name[entry->client_id]); 203 204 if (amdgpu_sriov_vf(adev)) 205 return 0; 206 207 /* 208 * Issue a dummy read to wait for the status register to 209 * be updated to avoid reading an incorrect value due to 210 * the new fast GRBM interface. 211 */ 212 if (entry->vmid_src == AMDGPU_GFXHUB(0)) 213 RREG32(hub->vm_l2_pro_fault_status); 214 215 status = RREG32(hub->vm_l2_pro_fault_status); 216 217 /* Only print L2 fault status if the status register could be read and 218 * contains useful information 219 */ 220 if (!status) 221 return 0; 222 223 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); 224 225 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub); 226 227 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status); 228 229 return 0; 230 } 231 232 static bool gmc_v12_1_get_vmid_pasid_mapping_info(struct amdgpu_device *adev, 233 uint8_t vmid, uint8_t inst, 234 uint16_t *p_pasid) 235 { 236 uint16_t index; 237 238 if (inst/4) 239 index = 0xA + inst%4; 240 else 241 index = 0x2 + inst%4; 242 243 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX), index); 244 245 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff; 246 247 return !!(*p_pasid); 248 } 249 250 /* 251 * GART 252 * VMID 0 is the physical GPU addresses as used by the kernel. 253 * VMIDs 1-15 are used for userspace clients and are handled 254 * by the amdgpu vm/hsa code. 255 */ 256 257 static void gmc_v12_1_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, 258 unsigned int vmhub, uint32_t flush_type) 259 { 260 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; 261 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); 262 u32 tmp; 263 /* Use register 17 for GART */ 264 const unsigned eng = 17; 265 unsigned int i; 266 unsigned char hub_ip = 0; 267 268 hub_ip = (AMDGPU_IS_GFXHUB(vmhub)) ? 269 GC_HWIP : MMHUB_HWIP; 270 271 spin_lock(&adev->gmc.invalidate_lock); 272 273 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip); 274 275 /* Wait for ACK with a delay.*/ 276 for (i = 0; i < adev->usec_timeout; i++) { 277 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack + 278 hub->eng_distance * eng, hub_ip); 279 tmp &= 1 << vmid; 280 if (tmp) 281 break; 282 283 udelay(1); 284 } 285 286 /* Issue additional private vm invalidation to MMHUB */ 287 if (!AMDGPU_IS_GFXHUB(vmhub) && 288 (hub->vm_l2_bank_select_reserved_cid2) && 289 !amdgpu_sriov_vf(adev)) { 290 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); 291 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */ 292 inv_req |= (1 << 25); 293 /* Issue private invalidation */ 294 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req); 295 /* Read back to ensure invalidation is done*/ 296 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); 297 } 298 299 spin_unlock(&adev->gmc.invalidate_lock); 300 301 if (i < adev->usec_timeout) 302 return; 303 304 dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n"); 305 } 306 307 /** 308 * gmc_v12_1_flush_gpu_tlb - gart tlb flush callback 309 * 310 * @adev: amdgpu_device pointer 311 * @vmid: vm instance to flush 312 * @vmhub: which hub to flush 313 * @flush_type: the flush type 314 * 315 * Flush the TLB for the requested page table. 316 */ 317 static void gmc_v12_1_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 318 uint32_t vmhub, uint32_t flush_type) 319 { 320 u32 inst; 321 322 if (vmhub >= AMDGPU_MMHUB0(0)) 323 inst = 0; 324 else 325 inst = vmhub; 326 327 /* This is necessary for SRIOV as well as for GFXOFF to function 328 * properly under bare metal 329 */ 330 if (((adev->gfx.kiq[inst].ring.sched.ready || 331 adev->mes.ring[MES_PIPE_INST(inst, 0)].sched.ready) && 332 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)))) { 333 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; 334 const unsigned eng = 17; 335 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type); 336 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng; 337 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; 338 339 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req, 340 1 << vmid, inst); 341 return; 342 } 343 344 mutex_lock(&adev->mman.gtt_window_lock); 345 gmc_v12_1_flush_vm_hub(adev, vmid, vmhub, 0); 346 mutex_unlock(&adev->mman.gtt_window_lock); 347 return; 348 } 349 350 /** 351 * gmc_v12_1_flush_gpu_tlb_pasid - tlb flush via pasid 352 * 353 * @adev: amdgpu_device pointer 354 * @pasid: pasid to be flush 355 * @flush_type: the flush type 356 * @all_hub: flush all hubs 357 * @inst: is used to select which instance of KIQ to use for the invalidation 358 * 359 * Flush the TLB for the requested pasid. 360 */ 361 static void gmc_v12_1_flush_gpu_tlb_pasid(struct amdgpu_device *adev, 362 uint16_t pasid, uint32_t flush_type, 363 bool all_hub, uint32_t inst) 364 { 365 uint16_t queried; 366 int vmid, i; 367 368 for (vmid = 1; vmid < 16; vmid++) { 369 bool valid; 370 371 valid = gmc_v12_1_get_vmid_pasid_mapping_info(adev, vmid, inst, 372 &queried); 373 if (!valid || queried != pasid) 374 continue; 375 376 if (all_hub) { 377 for_each_set_bit(i, adev->vmhubs_mask, 378 AMDGPU_MAX_VMHUBS) 379 gmc_v12_1_flush_gpu_tlb(adev, vmid, i, 380 flush_type); 381 } else { 382 gmc_v12_1_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(inst), 383 flush_type); 384 } 385 } 386 } 387 388 static uint64_t gmc_v12_1_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 389 unsigned vmid, uint64_t pd_addr) 390 { 391 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; 392 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0); 393 unsigned eng = ring->vm_inv_eng; 394 395 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + 396 (hub->ctx_addr_distance * vmid), 397 lower_32_bits(pd_addr)); 398 399 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + 400 (hub->ctx_addr_distance * vmid), 401 upper_32_bits(pd_addr)); 402 403 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + 404 hub->eng_distance * eng, 405 hub->vm_inv_eng0_ack + 406 hub->eng_distance * eng, 407 req, 1 << vmid); 408 409 return pd_addr; 410 } 411 412 static void gmc_v12_1_emit_pasid_mapping(struct amdgpu_ring *ring, 413 unsigned vmid, unsigned pasid) 414 { 415 struct amdgpu_device *adev = ring->adev; 416 uint32_t reg; 417 418 if (ring->vm_hub == AMDGPU_GFXHUB(0)) 419 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; 420 else 421 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid; 422 423 amdgpu_ring_emit_wreg(ring, reg, pasid); 424 } 425 426 /* 427 * PTE format: 428 * 63 P 429 * 62:59 reserved 430 * 58 D 431 * 57 G 432 * 56 T 433 * 55:54 M 434 * 53:52 SW 435 * 51:48 reserved for future 436 * 47:12 4k physical page base address 437 * 11:7 fragment 438 * 6 write 439 * 5 read 440 * 4 exe 441 * 3 Z 442 * 2 snooped 443 * 1 system 444 * 0 valid 445 * 446 * PDE format: 447 * 63 P 448 * 62:58 block fragment size 449 * 57 reserved 450 * 56 A 451 * 55:54 M 452 * 53:52 reserved 453 * 51:48 reserved for future 454 * 47:6 physical base address of PD or PTE 455 * 5:3 reserved 456 * 2 C 457 * 1 system 458 * 0 valid 459 */ 460 461 static void gmc_v12_1_get_vm_pde(struct amdgpu_device *adev, int level, 462 uint64_t *addr, uint64_t *flags) 463 { 464 if (!(*flags & AMDGPU_PDE_PTE_GFX12) && !(*flags & AMDGPU_PTE_SYSTEM)) 465 *addr = adev->vm_manager.vram_base_offset + *addr - 466 adev->gmc.vram_start; 467 BUG_ON(*addr & 0xFFFF00000000003FULL); 468 469 *flags |= AMDGPU_PTE_SNOOPED; 470 471 if (!adev->gmc.translate_further) 472 return; 473 474 if (level == AMDGPU_VM_PDB1) { 475 /* Set the block fragment size */ 476 if (!(*flags & AMDGPU_PDE_PTE_GFX12)) 477 *flags |= AMDGPU_PDE_BFS_GFX12(0x9); 478 479 } else if (level == AMDGPU_VM_PDB0) { 480 if (*flags & AMDGPU_PDE_PTE_GFX12) 481 *flags &= ~AMDGPU_PDE_PTE_GFX12; 482 } 483 } 484 485 static void gmc_v12_1_get_coherence_flags(struct amdgpu_device *adev, 486 struct amdgpu_bo *bo, 487 uint64_t *flags) 488 { 489 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); 490 bool is_vram = bo->tbo.resource && 491 bo->tbo.resource->mem_type == TTM_PL_VRAM; 492 bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | 493 AMDGPU_GEM_CREATE_EXT_COHERENT); 494 bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT; 495 uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0); 496 bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; 497 unsigned int mtype, mtype_local; 498 bool snoop = false; 499 bool is_local; 500 501 switch (gc_ip_version) { 502 case IP_VERSION(12, 1, 0): 503 mtype_local = MTYPE_RW; 504 if (amdgpu_mtype_local == 1) { 505 DRM_INFO_ONCE("Using MTYPE_NC for local memory\n"); 506 mtype_local = MTYPE_NC; 507 } else if (amdgpu_mtype_local == 2) { 508 DRM_INFO_ONCE("MTYPE_CC not supported, using MTYPE_RW instead for local memory\n"); 509 } else { 510 DRM_INFO_ONCE("Using MTYPE_RW for local memory\n"); 511 } 512 513 is_local = (is_vram && adev == bo_adev); 514 snoop = true; 515 if (uncached) { 516 mtype = MTYPE_UC; 517 } else if (ext_coherent) { 518 mtype = is_local ? mtype_local : MTYPE_UC; 519 } else { 520 if (is_local) 521 mtype = mtype_local; 522 else 523 mtype = MTYPE_NC; 524 } 525 break; 526 default: 527 if (uncached || coherent) 528 mtype = MTYPE_UC; 529 else 530 mtype = MTYPE_NC; 531 } 532 533 if (mtype != MTYPE_NC) 534 *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, mtype); 535 536 *flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; 537 } 538 539 static void gmc_v12_1_get_vm_pte(struct amdgpu_device *adev, 540 struct amdgpu_vm *vm, 541 struct amdgpu_bo *bo, 542 uint32_t vm_flags, 543 uint64_t *flags) 544 { 545 if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE) 546 *flags |= AMDGPU_PTE_EXECUTABLE; 547 else 548 *flags &= ~AMDGPU_PTE_EXECUTABLE; 549 550 switch (vm_flags & AMDGPU_VM_MTYPE_MASK) { 551 case AMDGPU_VM_MTYPE_DEFAULT: 552 *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC); 553 break; 554 case AMDGPU_VM_MTYPE_NC: 555 default: 556 *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC); 557 break; 558 case AMDGPU_VM_MTYPE_UC: 559 *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC); 560 break; 561 } 562 563 if (vm_flags & AMDGPU_VM_PAGE_NOALLOC) 564 *flags |= AMDGPU_PTE_NOALLOC; 565 else 566 *flags &= ~AMDGPU_PTE_NOALLOC; 567 568 if (vm_flags & AMDGPU_VM_PAGE_PRT) { 569 *flags |= AMDGPU_PTE_SNOOPED; 570 *flags |= AMDGPU_PTE_SYSTEM; 571 *flags |= AMDGPU_PTE_IS_PTE; 572 *flags &= ~AMDGPU_PTE_VALID; 573 } 574 575 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT | 576 AMDGPU_GEM_CREATE_EXT_COHERENT | 577 AMDGPU_GEM_CREATE_UNCACHED)) 578 *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC); 579 580 if (adev->have_atomics_support) 581 *flags |= AMDGPU_PTE_BUS_ATOMICS; 582 583 if ((*flags & AMDGPU_PTE_VALID) && bo) 584 gmc_v12_1_get_coherence_flags(adev, bo, flags); 585 } 586 587 static const struct amdgpu_gmc_funcs gmc_v12_1_gmc_funcs = { 588 .flush_gpu_tlb = gmc_v12_1_flush_gpu_tlb, 589 .flush_gpu_tlb_pasid = gmc_v12_1_flush_gpu_tlb_pasid, 590 .emit_flush_gpu_tlb = gmc_v12_1_emit_flush_gpu_tlb, 591 .emit_pasid_mapping = gmc_v12_1_emit_pasid_mapping, 592 .get_vm_pde = gmc_v12_1_get_vm_pde, 593 .get_vm_pte = gmc_v12_1_get_vm_pte, 594 .query_mem_partition_mode = &amdgpu_gmc_query_memory_partition, 595 .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition, 596 }; 597 598 void gmc_v12_1_set_gmc_funcs(struct amdgpu_device *adev) 599 { 600 adev->gmc.gmc_funcs = &gmc_v12_1_gmc_funcs; 601 } 602 603 static const struct amdgpu_irq_src_funcs gmc_v12_1_irq_funcs = { 604 .set = gmc_v12_1_vm_fault_interrupt_state, 605 .process = gmc_v12_1_process_interrupt, 606 }; 607 608 void gmc_v12_1_set_irq_funcs(struct amdgpu_device *adev) 609 { 610 adev->gmc.vm_fault.num_types = 1; 611 adev->gmc.vm_fault.funcs = &gmc_v12_1_irq_funcs; 612 } 613 614 void gmc_v12_1_init_vram_info(struct amdgpu_device *adev) 615 { 616 /* TODO: query vram_info from ip discovery binary */ 617 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM4; 618 adev->gmc.vram_width = 384 * 64; 619 } 620