1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 27 #include <linux/io-64-nonatomic-lo-hi.h> 28 #ifdef CONFIG_X86 29 #include <asm/hypervisor.h> 30 #endif 31 32 #include "amdgpu.h" 33 #include "amdgpu_gmc.h" 34 #include "amdgpu_ras.h" 35 #include "amdgpu_xgmi.h" 36 37 #include <drm/drm_drv.h> 38 39 /** 40 * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0 41 * 42 * @adev: amdgpu_device pointer 43 * 44 * Allocate video memory for pdb0 and map it for CPU access 45 * Returns 0 for success, error for failure. 46 */ 47 int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev) 48 { 49 int r; 50 struct amdgpu_bo_param bp; 51 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; 52 uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21; 53 uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift; 54 55 memset(&bp, 0, sizeof(bp)); 56 bp.size = PAGE_ALIGN((npdes + 1) * 8); 57 bp.byte_align = PAGE_SIZE; 58 bp.domain = AMDGPU_GEM_DOMAIN_VRAM; 59 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 60 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 61 bp.type = ttm_bo_type_kernel; 62 bp.resv = NULL; 63 bp.bo_ptr_size = sizeof(struct amdgpu_bo); 64 65 r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo); 66 if (r) 67 return r; 68 69 r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false); 70 if (unlikely(r != 0)) 71 goto bo_reserve_failure; 72 73 r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM); 74 if (r) 75 goto bo_pin_failure; 76 r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0); 77 if (r) 78 goto bo_kmap_failure; 79 80 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); 81 return 0; 82 83 bo_kmap_failure: 84 amdgpu_bo_unpin(adev->gmc.pdb0_bo); 85 bo_pin_failure: 86 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); 87 bo_reserve_failure: 88 amdgpu_bo_unref(&adev->gmc.pdb0_bo); 89 return r; 90 } 91 92 /** 93 * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO 94 * 95 * @bo: the BO to get the PDE for 96 * @level: the level in the PD hirarchy 97 * @addr: resulting addr 98 * @flags: resulting flags 99 * 100 * Get the address and flags to be used for a PDE (Page Directory Entry). 101 */ 102 void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, 103 uint64_t *addr, uint64_t *flags) 104 { 105 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 106 107 switch (bo->tbo.resource->mem_type) { 108 case TTM_PL_TT: 109 *addr = bo->tbo.ttm->dma_address[0]; 110 break; 111 case TTM_PL_VRAM: 112 *addr = amdgpu_bo_gpu_offset(bo); 113 break; 114 default: 115 *addr = 0; 116 break; 117 } 118 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource); 119 amdgpu_gmc_get_vm_pde(adev, level, addr, flags); 120 } 121 122 /* 123 * amdgpu_gmc_pd_addr - return the address of the root directory 124 */ 125 uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo) 126 { 127 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 128 uint64_t pd_addr; 129 130 /* TODO: move that into ASIC specific code */ 131 if (adev->asic_type >= CHIP_VEGA10) { 132 uint64_t flags = AMDGPU_PTE_VALID; 133 134 amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags); 135 pd_addr |= flags; 136 } else { 137 pd_addr = amdgpu_bo_gpu_offset(bo); 138 } 139 return pd_addr; 140 } 141 142 /** 143 * amdgpu_gmc_set_pte_pde - update the page tables using CPU 144 * 145 * @adev: amdgpu_device pointer 146 * @cpu_pt_addr: cpu address of the page table 147 * @gpu_page_idx: entry in the page table to update 148 * @addr: dst addr to write into pte/pde 149 * @flags: access flags 150 * 151 * Update the page tables using CPU. 152 */ 153 int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, 154 uint32_t gpu_page_idx, uint64_t addr, 155 uint64_t flags) 156 { 157 void __iomem *ptr = (void *)cpu_pt_addr; 158 uint64_t value; 159 160 /* 161 * The following is for PTE only. GART does not have PDEs. 162 */ 163 value = addr & 0x0000FFFFFFFFF000ULL; 164 value |= flags; 165 writeq(value, ptr + (gpu_page_idx * 8)); 166 167 return 0; 168 } 169 170 /** 171 * amdgpu_gmc_agp_addr - return the address in the AGP address space 172 * 173 * @bo: TTM BO which needs the address, must be in GTT domain 174 * 175 * Tries to figure out how to access the BO through the AGP aperture. Returns 176 * AMDGPU_BO_INVALID_OFFSET if that is not possible. 177 */ 178 uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) 179 { 180 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 181 182 if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached) 183 return AMDGPU_BO_INVALID_OFFSET; 184 185 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) 186 return AMDGPU_BO_INVALID_OFFSET; 187 188 return adev->gmc.agp_start + bo->ttm->dma_address[0]; 189 } 190 191 /** 192 * amdgpu_gmc_vram_location - try to find VRAM location 193 * 194 * @adev: amdgpu device structure holding all necessary information 195 * @mc: memory controller structure holding memory information 196 * @base: base address at which to put VRAM 197 * 198 * Function will try to place VRAM at base address provided 199 * as parameter. 200 */ 201 void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, 202 u64 base) 203 { 204 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; 205 206 mc->vram_start = base; 207 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 208 if (limit && limit < mc->real_vram_size) 209 mc->real_vram_size = limit; 210 211 if (mc->xgmi.num_physical_nodes == 0) { 212 mc->fb_start = mc->vram_start; 213 mc->fb_end = mc->vram_end; 214 } 215 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 216 mc->mc_vram_size >> 20, mc->vram_start, 217 mc->vram_end, mc->real_vram_size >> 20); 218 } 219 220 /** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture 221 * 222 * @adev: amdgpu device structure holding all necessary information 223 * @mc: memory controller structure holding memory information 224 * 225 * This function is only used if use GART for FB translation. In such 226 * case, we use sysvm aperture (vmid0 page tables) for both vram 227 * and gart (aka system memory) access. 228 * 229 * GPUVM (and our organization of vmid0 page tables) require sysvm 230 * aperture to be placed at a location aligned with 8 times of native 231 * page size. For example, if vm_context0_cntl.page_table_block_size 232 * is 12, then native page size is 8G (2M*2^12), sysvm should start 233 * with a 64G aligned address. For simplicity, we just put sysvm at 234 * address 0. So vram start at address 0 and gart is right after vram. 235 */ 236 void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) 237 { 238 u64 hive_vram_start = 0; 239 u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1; 240 mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id; 241 mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1; 242 mc->gart_start = hive_vram_end + 1; 243 mc->gart_end = mc->gart_start + mc->gart_size - 1; 244 mc->fb_start = hive_vram_start; 245 mc->fb_end = hive_vram_end; 246 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 247 mc->mc_vram_size >> 20, mc->vram_start, 248 mc->vram_end, mc->real_vram_size >> 20); 249 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", 250 mc->gart_size >> 20, mc->gart_start, mc->gart_end); 251 } 252 253 /** 254 * amdgpu_gmc_gart_location - try to find GART location 255 * 256 * @adev: amdgpu device structure holding all necessary information 257 * @mc: memory controller structure holding memory information 258 * 259 * Function will place try to place GART before or after VRAM. 260 * If GART size is bigger than space left then we ajust GART size. 261 * Thus function will never fails. 262 */ 263 void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) 264 { 265 const uint64_t four_gb = 0x100000000ULL; 266 u64 size_af, size_bf; 267 /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/ 268 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); 269 270 /* VCE doesn't like it when BOs cross a 4GB segment, so align 271 * the GART base on a 4GB boundary as well. 272 */ 273 size_bf = mc->fb_start; 274 size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb); 275 276 if (mc->gart_size > max(size_bf, size_af)) { 277 dev_warn(adev->dev, "limiting GART\n"); 278 mc->gart_size = max(size_bf, size_af); 279 } 280 281 if ((size_bf >= mc->gart_size && size_bf < size_af) || 282 (size_af < mc->gart_size)) 283 mc->gart_start = 0; 284 else 285 mc->gart_start = max_mc_address - mc->gart_size + 1; 286 287 mc->gart_start &= ~(four_gb - 1); 288 mc->gart_end = mc->gart_start + mc->gart_size - 1; 289 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n", 290 mc->gart_size >> 20, mc->gart_start, mc->gart_end); 291 } 292 293 /** 294 * amdgpu_gmc_agp_location - try to find AGP location 295 * @adev: amdgpu device structure holding all necessary information 296 * @mc: memory controller structure holding memory information 297 * 298 * Function will place try to find a place for the AGP BAR in the MC address 299 * space. 300 * 301 * AGP BAR will be assigned the largest available hole in the address space. 302 * Should be called after VRAM and GART locations are setup. 303 */ 304 void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) 305 { 306 const uint64_t sixteen_gb = 1ULL << 34; 307 const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1); 308 u64 size_af, size_bf; 309 310 if (amdgpu_sriov_vf(adev)) { 311 mc->agp_start = 0xffffffffffff; 312 mc->agp_end = 0x0; 313 mc->agp_size = 0; 314 315 return; 316 } 317 318 if (mc->fb_start > mc->gart_start) { 319 size_bf = (mc->fb_start & sixteen_gb_mask) - 320 ALIGN(mc->gart_end + 1, sixteen_gb); 321 size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb); 322 } else { 323 size_bf = mc->fb_start & sixteen_gb_mask; 324 size_af = (mc->gart_start & sixteen_gb_mask) - 325 ALIGN(mc->fb_end + 1, sixteen_gb); 326 } 327 328 if (size_bf > size_af) { 329 mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask; 330 mc->agp_size = size_bf; 331 } else { 332 mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb); 333 mc->agp_size = size_af; 334 } 335 336 mc->agp_end = mc->agp_start + mc->agp_size - 1; 337 dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n", 338 mc->agp_size >> 20, mc->agp_start, mc->agp_end); 339 } 340 341 /** 342 * amdgpu_gmc_fault_key - get hask key from vm fault address and pasid 343 * 344 * @addr: 48 bit physical address, page aligned (36 significant bits) 345 * @pasid: 16 bit process address space identifier 346 */ 347 static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid) 348 { 349 return addr << 4 | pasid; 350 } 351 352 /** 353 * amdgpu_gmc_filter_faults - filter VM faults 354 * 355 * @adev: amdgpu device structure 356 * @ih: interrupt ring that the fault received from 357 * @addr: address of the VM fault 358 * @pasid: PASID of the process causing the fault 359 * @timestamp: timestamp of the fault 360 * 361 * Returns: 362 * True if the fault was filtered and should not be processed further. 363 * False if the fault is a new one and needs to be handled. 364 */ 365 bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, 366 struct amdgpu_ih_ring *ih, uint64_t addr, 367 uint16_t pasid, uint64_t timestamp) 368 { 369 struct amdgpu_gmc *gmc = &adev->gmc; 370 uint64_t stamp, key = amdgpu_gmc_fault_key(addr, pasid); 371 struct amdgpu_gmc_fault *fault; 372 uint32_t hash; 373 374 /* Stale retry fault if timestamp goes backward */ 375 if (amdgpu_ih_ts_after(timestamp, ih->processed_timestamp)) 376 return true; 377 378 /* If we don't have space left in the ring buffer return immediately */ 379 stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) - 380 AMDGPU_GMC_FAULT_TIMEOUT; 381 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp) 382 return true; 383 384 /* Try to find the fault in the hash */ 385 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER); 386 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; 387 while (fault->timestamp >= stamp) { 388 uint64_t tmp; 389 390 if (atomic64_read(&fault->key) == key) 391 return true; 392 393 tmp = fault->timestamp; 394 fault = &gmc->fault_ring[fault->next]; 395 396 /* Check if the entry was reused */ 397 if (fault->timestamp >= tmp) 398 break; 399 } 400 401 /* Add the fault to the ring */ 402 fault = &gmc->fault_ring[gmc->last_fault]; 403 atomic64_set(&fault->key, key); 404 fault->timestamp = timestamp; 405 406 /* And update the hash */ 407 fault->next = gmc->fault_hash[hash].idx; 408 gmc->fault_hash[hash].idx = gmc->last_fault++; 409 return false; 410 } 411 412 /** 413 * amdgpu_gmc_filter_faults_remove - remove address from VM faults filter 414 * 415 * @adev: amdgpu device structure 416 * @addr: address of the VM fault 417 * @pasid: PASID of the process causing the fault 418 * 419 * Remove the address from fault filter, then future vm fault on this address 420 * will pass to retry fault handler to recover. 421 */ 422 void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, 423 uint16_t pasid) 424 { 425 struct amdgpu_gmc *gmc = &adev->gmc; 426 uint64_t key = amdgpu_gmc_fault_key(addr, pasid); 427 struct amdgpu_gmc_fault *fault; 428 uint32_t hash; 429 uint64_t tmp; 430 431 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER); 432 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; 433 do { 434 if (atomic64_cmpxchg(&fault->key, key, 0) == key) 435 break; 436 437 tmp = fault->timestamp; 438 fault = &gmc->fault_ring[fault->next]; 439 } while (fault->timestamp < tmp); 440 } 441 442 int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev) 443 { 444 if (!adev->gmc.xgmi.connected_to_cpu) { 445 adev->gmc.xgmi.ras = &xgmi_ras; 446 amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block); 447 adev->gmc.xgmi.ras_if = &adev->gmc.xgmi.ras->ras_block.ras_comm; 448 } 449 450 return 0; 451 } 452 453 int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) 454 { 455 return 0; 456 } 457 458 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) 459 { 460 461 } 462 463 /* 464 * The latest engine allocation on gfx9/10 is: 465 * Engine 2, 3: firmware 466 * Engine 0, 1, 4~16: amdgpu ring, 467 * subject to change when ring number changes 468 * Engine 17: Gart flushes 469 */ 470 #define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 471 #define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 472 473 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) 474 { 475 struct amdgpu_ring *ring; 476 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = 477 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, 478 GFXHUB_FREE_VM_INV_ENGS_BITMAP}; 479 unsigned i; 480 unsigned vmhub, inv_eng; 481 482 if (adev->enable_mes) { 483 /* reserve engine 5 for firmware */ 484 for (vmhub = 0; vmhub < AMDGPU_MAX_VMHUBS; vmhub++) 485 vm_inv_engs[vmhub] &= ~(1 << 5); 486 } 487 488 for (i = 0; i < adev->num_rings; ++i) { 489 ring = adev->rings[i]; 490 vmhub = ring->funcs->vmhub; 491 492 if (ring == &adev->mes.ring) 493 continue; 494 495 inv_eng = ffs(vm_inv_engs[vmhub]); 496 if (!inv_eng) { 497 dev_err(adev->dev, "no VM inv eng for ring %s\n", 498 ring->name); 499 return -EINVAL; 500 } 501 502 ring->vm_inv_eng = inv_eng - 1; 503 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); 504 505 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", 506 ring->name, ring->vm_inv_eng, ring->funcs->vmhub); 507 } 508 509 return 0; 510 } 511 512 /** 513 * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ 514 * @adev: amdgpu_device pointer 515 * 516 * Check and set if an the device @adev supports Trusted Memory 517 * Zones (TMZ). 518 */ 519 void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) 520 { 521 switch (adev->ip_versions[GC_HWIP][0]) { 522 /* RAVEN */ 523 case IP_VERSION(9, 2, 2): 524 case IP_VERSION(9, 1, 0): 525 /* RENOIR looks like RAVEN */ 526 case IP_VERSION(9, 3, 0): 527 /* GC 10.3.7 */ 528 case IP_VERSION(10, 3, 7): 529 if (amdgpu_tmz == 0) { 530 adev->gmc.tmz_enabled = false; 531 dev_info(adev->dev, 532 "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n"); 533 } else { 534 adev->gmc.tmz_enabled = true; 535 dev_info(adev->dev, 536 "Trusted Memory Zone (TMZ) feature enabled\n"); 537 } 538 break; 539 case IP_VERSION(10, 1, 10): 540 case IP_VERSION(10, 1, 1): 541 case IP_VERSION(10, 1, 2): 542 case IP_VERSION(10, 1, 3): 543 case IP_VERSION(10, 3, 0): 544 case IP_VERSION(10, 3, 2): 545 case IP_VERSION(10, 3, 4): 546 case IP_VERSION(10, 3, 5): 547 /* VANGOGH */ 548 case IP_VERSION(10, 3, 1): 549 /* YELLOW_CARP*/ 550 case IP_VERSION(10, 3, 3): 551 case IP_VERSION(11, 0, 1): 552 case IP_VERSION(11, 0, 4): 553 /* Don't enable it by default yet. 554 */ 555 if (amdgpu_tmz < 1) { 556 adev->gmc.tmz_enabled = false; 557 dev_info(adev->dev, 558 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n"); 559 } else { 560 adev->gmc.tmz_enabled = true; 561 dev_info(adev->dev, 562 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n"); 563 } 564 break; 565 default: 566 adev->gmc.tmz_enabled = false; 567 dev_info(adev->dev, 568 "Trusted Memory Zone (TMZ) feature not supported\n"); 569 break; 570 } 571 } 572 573 /** 574 * amdgpu_gmc_noretry_set -- set per asic noretry defaults 575 * @adev: amdgpu_device pointer 576 * 577 * Set a per asic default for the no-retry parameter. 578 * 579 */ 580 void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) 581 { 582 struct amdgpu_gmc *gmc = &adev->gmc; 583 uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; 584 bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) || 585 gc_ver == IP_VERSION(9, 3, 0) || 586 gc_ver == IP_VERSION(9, 4, 0) || 587 gc_ver == IP_VERSION(9, 4, 1) || 588 gc_ver == IP_VERSION(9, 4, 2) || 589 gc_ver >= IP_VERSION(10, 3, 0)); 590 591 gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry; 592 } 593 594 void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, 595 bool enable) 596 { 597 struct amdgpu_vmhub *hub; 598 u32 tmp, reg, i; 599 600 hub = &adev->vmhub[hub_type]; 601 for (i = 0; i < 16; i++) { 602 reg = hub->vm_context0_cntl + hub->ctx_distance * i; 603 604 tmp = (hub_type == AMDGPU_GFXHUB_0) ? 605 RREG32_SOC15_IP(GC, reg) : 606 RREG32_SOC15_IP(MMHUB, reg); 607 608 if (enable) 609 tmp |= hub->vm_cntx_cntl_vm_fault; 610 else 611 tmp &= ~hub->vm_cntx_cntl_vm_fault; 612 613 (hub_type == AMDGPU_GFXHUB_0) ? 614 WREG32_SOC15_IP(GC, reg, tmp) : 615 WREG32_SOC15_IP(MMHUB, reg, tmp); 616 } 617 } 618 619 void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) 620 { 621 unsigned size; 622 623 /* 624 * Some ASICs need to reserve a region of video memory to avoid access 625 * from driver 626 */ 627 adev->mman.stolen_reserved_offset = 0; 628 adev->mman.stolen_reserved_size = 0; 629 630 /* 631 * TODO: 632 * Currently there is a bug where some memory client outside 633 * of the driver writes to first 8M of VRAM on S3 resume, 634 * this overrides GART which by default gets placed in first 8M and 635 * causes VM_FAULTS once GTT is accessed. 636 * Keep the stolen memory reservation until the while this is not solved. 637 */ 638 switch (adev->asic_type) { 639 case CHIP_VEGA10: 640 adev->mman.keep_stolen_vga_memory = true; 641 /* 642 * VEGA10 SRIOV VF with MS_HYPERV host needs some firmware reserved area. 643 */ 644 #ifdef CONFIG_X86 645 if (amdgpu_sriov_vf(adev) && hypervisor_is_type(X86_HYPER_MS_HYPERV)) { 646 adev->mman.stolen_reserved_offset = 0x500000; 647 adev->mman.stolen_reserved_size = 0x200000; 648 } 649 #endif 650 break; 651 case CHIP_RAVEN: 652 case CHIP_RENOIR: 653 adev->mman.keep_stolen_vga_memory = true; 654 break; 655 case CHIP_YELLOW_CARP: 656 if (amdgpu_discovery == 0) { 657 adev->mman.stolen_reserved_offset = 0x1ffb0000; 658 adev->mman.stolen_reserved_size = 64 * PAGE_SIZE; 659 } 660 break; 661 default: 662 adev->mman.keep_stolen_vga_memory = false; 663 break; 664 } 665 666 if (amdgpu_sriov_vf(adev) || 667 !amdgpu_device_has_display_hardware(adev)) { 668 size = 0; 669 } else { 670 size = amdgpu_gmc_get_vbios_fb_size(adev); 671 672 if (adev->mman.keep_stolen_vga_memory) 673 size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION); 674 } 675 676 /* set to 0 if the pre-OS buffer uses up most of vram */ 677 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) 678 size = 0; 679 680 if (size > AMDGPU_VBIOS_VGA_ALLOCATION) { 681 adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION; 682 adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size; 683 } else { 684 adev->mman.stolen_vga_size = size; 685 adev->mman.stolen_extended_size = 0; 686 } 687 } 688 689 /** 690 * amdgpu_gmc_init_pdb0 - initialize PDB0 691 * 692 * @adev: amdgpu_device pointer 693 * 694 * This function is only used when GART page table is used 695 * for FB address translatioin. In such a case, we construct 696 * a 2-level system VM page table: PDB0->PTB, to cover both 697 * VRAM of the hive and system memory. 698 * 699 * PDB0 is static, initialized once on driver initialization. 700 * The first n entries of PDB0 are used as PTE by setting 701 * P bit to 1, pointing to VRAM. The n+1'th entry points 702 * to a big PTB covering system memory. 703 * 704 */ 705 void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) 706 { 707 int i; 708 uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW? 709 /* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M 710 */ 711 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; 712 u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21; 713 u64 vram_addr = adev->vm_manager.vram_base_offset - 714 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 715 u64 vram_end = vram_addr + vram_size; 716 u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); 717 int idx; 718 719 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 720 return; 721 722 flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; 723 flags |= AMDGPU_PTE_WRITEABLE; 724 flags |= AMDGPU_PTE_SNOOPED; 725 flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1)); 726 flags |= AMDGPU_PDE_PTE; 727 728 /* The first n PDE0 entries are used as PTE, 729 * pointing to vram 730 */ 731 for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size) 732 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags); 733 734 /* The n+1'th PDE0 entry points to a huge 735 * PTB who has more than 512 entries each 736 * pointing to a 4K system page 737 */ 738 flags = AMDGPU_PTE_VALID; 739 flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED; 740 /* Requires gart_ptb_gpu_pa to be 4K aligned */ 741 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); 742 drm_dev_exit(idx); 743 } 744 745 /** 746 * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC 747 * address 748 * 749 * @adev: amdgpu_device pointer 750 * @mc_addr: MC address of buffer 751 */ 752 uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr) 753 { 754 return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; 755 } 756 757 /** 758 * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from 759 * GPU's view 760 * 761 * @adev: amdgpu_device pointer 762 * @bo: amdgpu buffer object 763 */ 764 uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) 765 { 766 return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo)); 767 } 768 769 /** 770 * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address 771 * from CPU's view 772 * 773 * @adev: amdgpu_device pointer 774 * @bo: amdgpu buffer object 775 */ 776 uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) 777 { 778 return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base; 779 } 780 781 int amdgpu_gmc_vram_checking(struct amdgpu_device *adev) 782 { 783 struct amdgpu_bo *vram_bo = NULL; 784 uint64_t vram_gpu = 0; 785 void *vram_ptr = NULL; 786 787 int ret, size = 0x100000; 788 uint8_t cptr[10]; 789 790 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 791 AMDGPU_GEM_DOMAIN_VRAM, 792 &vram_bo, 793 &vram_gpu, 794 &vram_ptr); 795 if (ret) 796 return ret; 797 798 memset(vram_ptr, 0x86, size); 799 memset(cptr, 0x86, 10); 800 801 /** 802 * Check the start, the mid, and the end of the memory if the content of 803 * each byte is the pattern "0x86". If yes, we suppose the vram bo is 804 * workable. 805 * 806 * Note: If check the each byte of whole 1M bo, it will cost too many 807 * seconds, so here, we just pick up three parts for emulation. 808 */ 809 ret = memcmp(vram_ptr, cptr, 10); 810 if (ret) 811 return ret; 812 813 ret = memcmp(vram_ptr + (size / 2), cptr, 10); 814 if (ret) 815 return ret; 816 817 ret = memcmp(vram_ptr + size - 10, cptr, 10); 818 if (ret) 819 return ret; 820 821 amdgpu_bo_free_kernel(&vram_bo, &vram_gpu, 822 &vram_ptr); 823 824 return 0; 825 } 826