1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <glisse@freedesktop.org> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 33 #include <linux/dma-mapping.h> 34 #include <linux/iommu.h> 35 #include <linux/pagemap.h> 36 #include <linux/sched/task.h> 37 #include <linux/sched/mm.h> 38 #include <linux/seq_file.h> 39 #include <linux/slab.h> 40 #include <linux/swap.h> 41 #include <linux/dma-buf.h> 42 #include <linux/sizes.h> 43 #include <linux/module.h> 44 45 #include <drm/drm_drv.h> 46 #include <drm/ttm/ttm_bo.h> 47 #include <drm/ttm/ttm_placement.h> 48 #include <drm/ttm/ttm_range_manager.h> 49 #include <drm/ttm/ttm_tt.h> 50 51 #include <drm/amdgpu_drm.h> 52 53 #include "amdgpu.h" 54 #include "amdgpu_object.h" 55 #include "amdgpu_trace.h" 56 #include "amdgpu_amdkfd.h" 57 #include "amdgpu_sdma.h" 58 #include "amdgpu_ras.h" 59 #include "amdgpu_hmm.h" 60 #include "amdgpu_atomfirmware.h" 61 #include "amdgpu_res_cursor.h" 62 #include "bif/bif_4_1_d.h" 63 64 MODULE_IMPORT_NS("DMA_BUF"); 65 66 #define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128) 67 68 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, 69 struct ttm_tt *ttm, 70 struct ttm_resource *bo_mem); 71 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, 72 struct ttm_tt *ttm); 73 74 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, 75 unsigned int type, 76 uint64_t size_in_page) 77 { 78 return ttm_range_man_init(&adev->mman.bdev, type, 79 false, size_in_page); 80 } 81 82 /** 83 * amdgpu_evict_flags - Compute placement flags 84 * 85 * @bo: The buffer object to evict 86 * @placement: Possible destination(s) for evicted BO 87 * 88 * Fill in placement data when ttm_bo_evict() is called 89 */ 90 static void amdgpu_evict_flags(struct ttm_buffer_object *bo, 91 struct ttm_placement *placement) 92 { 93 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 94 struct amdgpu_bo *abo; 95 static const struct ttm_place placements = { 96 .fpfn = 0, 97 .lpfn = 0, 98 .mem_type = TTM_PL_SYSTEM, 99 .flags = 0 100 }; 101 102 /* Don't handle scatter gather BOs */ 103 if (bo->type == ttm_bo_type_sg) { 104 placement->num_placement = 0; 105 return; 106 } 107 108 /* Object isn't an AMDGPU object so ignore */ 109 if (!amdgpu_bo_is_amdgpu_bo(bo)) { 110 placement->placement = &placements; 111 placement->num_placement = 1; 112 return; 113 } 114 115 abo = ttm_to_amdgpu_bo(bo); 116 if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) { 117 placement->num_placement = 0; 118 return; 119 } 120 121 switch (bo->resource->mem_type) { 122 case AMDGPU_PL_GDS: 123 case AMDGPU_PL_GWS: 124 case AMDGPU_PL_OA: 125 case AMDGPU_PL_DOORBELL: 126 case AMDGPU_PL_MMIO_REMAP: 127 placement->num_placement = 0; 128 return; 129 130 case TTM_PL_VRAM: 131 if (!adev->mman.buffer_funcs_enabled) { 132 /* Move to system memory */ 133 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 134 135 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 136 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && 137 amdgpu_res_cpu_visible(adev, bo->resource)) { 138 139 /* Try evicting to the CPU inaccessible part of VRAM 140 * first, but only set GTT as busy placement, so this 141 * BO will be evicted to GTT rather than causing other 142 * BOs to be evicted from VRAM 143 */ 144 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | 145 AMDGPU_GEM_DOMAIN_GTT | 146 AMDGPU_GEM_DOMAIN_CPU); 147 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 148 abo->placements[0].lpfn = 0; 149 abo->placements[0].flags |= TTM_PL_FLAG_DESIRED; 150 } else { 151 /* Move to GTT memory */ 152 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT | 153 AMDGPU_GEM_DOMAIN_CPU); 154 } 155 break; 156 case TTM_PL_TT: 157 case AMDGPU_PL_PREEMPT: 158 default: 159 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 160 break; 161 } 162 *placement = abo->placement; 163 } 164 165 static struct dma_fence * 166 amdgpu_ttm_job_submit(struct amdgpu_device *adev, struct amdgpu_ttm_buffer_entity *entity, 167 struct amdgpu_job *job, u32 num_dw) 168 { 169 struct amdgpu_ring *ring; 170 171 ring = adev->mman.buffer_funcs_ring; 172 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 173 WARN_ON(job->ibs[0].length_dw > num_dw); 174 175 lockdep_assert_held(&entity->lock); 176 177 return amdgpu_job_submit(job); 178 } 179 180 /** 181 * amdgpu_ttm_map_buffer - Map memory into the GART windows 182 * @entity: entity to run the window setup job 183 * @bo: buffer object to map 184 * @mem: memory object to map 185 * @mm_cur: range to map 186 * @window: which GART window to use 187 * @tmz: if we should setup a TMZ enabled mapping 188 * @size: in number of bytes to map, out number of bytes mapped 189 * @addr: resulting address inside the MC address space 190 * 191 * Setup one of the GART windows to access a specific piece of memory or return 192 * the physical address for local memory. 193 */ 194 static int amdgpu_ttm_map_buffer(struct amdgpu_ttm_buffer_entity *entity, 195 struct ttm_buffer_object *bo, 196 struct ttm_resource *mem, 197 struct amdgpu_res_cursor *mm_cur, 198 unsigned int window, 199 bool tmz, uint64_t *size, uint64_t *addr) 200 { 201 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 202 unsigned int offset, num_pages, num_dw, num_bytes; 203 uint64_t src_addr, dst_addr; 204 struct amdgpu_job *job; 205 void *cpu_addr; 206 uint64_t flags; 207 int r; 208 209 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < 210 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); 211 212 if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT)) 213 return -EINVAL; 214 215 /* Map only what can't be accessed directly */ 216 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) { 217 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) + 218 mm_cur->start; 219 return 0; 220 } 221 222 223 /* 224 * If start begins at an offset inside the page, then adjust the size 225 * and addr accordingly 226 */ 227 offset = mm_cur->start & ~PAGE_MASK; 228 229 num_pages = PFN_UP(*size + offset); 230 num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE); 231 232 *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset); 233 234 *addr = amdgpu_compute_gart_address(&adev->gmc, entity, window); 235 *addr += offset; 236 237 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 238 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; 239 240 r = amdgpu_job_alloc_with_ib(adev, &entity->base, 241 AMDGPU_FENCE_OWNER_UNDEFINED, 242 num_dw * 4 + num_bytes, 243 AMDGPU_IB_POOL_DELAYED, &job, 244 AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER); 245 if (r) 246 return r; 247 248 src_addr = num_dw * 4; 249 src_addr += job->ibs[0].gpu_addr; 250 251 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 252 dst_addr += (entity->gart_window_offs[window] >> AMDGPU_GPU_PAGE_SHIFT) * 8; 253 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, 254 dst_addr, num_bytes, 0); 255 256 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); 257 if (tmz) 258 flags |= AMDGPU_PTE_TMZ; 259 260 cpu_addr = &job->ibs[0].ptr[num_dw]; 261 262 if (mem->mem_type == TTM_PL_TT) { 263 dma_addr_t *dma_addr; 264 265 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; 266 amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr); 267 } else { 268 u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset; 269 270 amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr); 271 } 272 273 dma_fence_put(amdgpu_ttm_job_submit(adev, entity, job, num_dw)); 274 return 0; 275 } 276 277 /** 278 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy 279 * @adev: amdgpu device 280 * @entity: entity to run the jobs 281 * @src: buffer/address where to read from 282 * @dst: buffer/address where to write to 283 * @size: number of bytes to copy 284 * @tmz: if a secure copy should be used 285 * @resv: resv object to sync to 286 * @f: Returns the last fence if multiple jobs are submitted. 287 * 288 * The function copies @size bytes from {src->mem + src->offset} to 289 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a 290 * move and different for a BO to BO copy. 291 * 292 */ 293 __attribute__((nonnull)) 294 static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, 295 struct amdgpu_ttm_buffer_entity *entity, 296 const struct amdgpu_copy_mem *src, 297 const struct amdgpu_copy_mem *dst, 298 uint64_t size, bool tmz, 299 struct dma_resv *resv, 300 struct dma_fence **f) 301 { 302 struct amdgpu_res_cursor src_mm, dst_mm; 303 struct dma_fence *fence = NULL; 304 int r = 0; 305 uint32_t copy_flags = 0; 306 struct amdgpu_bo *abo_src, *abo_dst; 307 308 if (!adev->mman.buffer_funcs_enabled) { 309 dev_err(adev->dev, 310 "Trying to move memory with ring turned off.\n"); 311 return -EINVAL; 312 } 313 314 amdgpu_res_first(src->mem, src->offset, size, &src_mm); 315 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm); 316 317 mutex_lock(&entity->lock); 318 while (src_mm.remaining) { 319 uint64_t from, to, cur_size, tiling_flags; 320 uint32_t num_type, data_format, max_com, write_compress_disable; 321 struct dma_fence *next; 322 323 /* Never copy more than 256MiB at once to avoid a timeout */ 324 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20); 325 326 /* Map src to window 0 and dst to window 1. */ 327 r = amdgpu_ttm_map_buffer(entity, src->bo, src->mem, &src_mm, 328 0, tmz, &cur_size, &from); 329 if (r) 330 goto error; 331 332 r = amdgpu_ttm_map_buffer(entity, dst->bo, dst->mem, &dst_mm, 333 1, tmz, &cur_size, &to); 334 if (r) 335 goto error; 336 337 abo_src = ttm_to_amdgpu_bo(src->bo); 338 abo_dst = ttm_to_amdgpu_bo(dst->bo); 339 if (tmz) 340 copy_flags |= AMDGPU_COPY_FLAGS_TMZ; 341 if ((abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC) && 342 (abo_src->tbo.resource->mem_type == TTM_PL_VRAM)) 343 copy_flags |= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED; 344 if ((abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC) && 345 (dst->mem->mem_type == TTM_PL_VRAM)) { 346 copy_flags |= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED; 347 amdgpu_bo_get_tiling_flags(abo_dst, &tiling_flags); 348 max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK); 349 num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE); 350 data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT); 351 write_compress_disable = 352 AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE); 353 copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) | 354 AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) | 355 AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) | 356 AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE, 357 write_compress_disable)); 358 } 359 360 r = amdgpu_copy_buffer(adev, entity, from, to, cur_size, resv, 361 &next, true, copy_flags); 362 if (r) 363 goto error; 364 365 dma_fence_put(fence); 366 fence = next; 367 368 amdgpu_res_next(&src_mm, cur_size); 369 amdgpu_res_next(&dst_mm, cur_size); 370 } 371 error: 372 mutex_unlock(&entity->lock); 373 *f = fence; 374 return r; 375 } 376 377 /* 378 * amdgpu_move_blit - Copy an entire buffer to another buffer 379 * 380 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to 381 * help move buffers to and from VRAM. 382 */ 383 static int amdgpu_move_blit(struct ttm_buffer_object *bo, 384 bool evict, 385 struct ttm_resource *new_mem, 386 struct ttm_resource *old_mem) 387 { 388 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 389 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 390 struct amdgpu_copy_mem src, dst; 391 struct dma_fence *fence = NULL; 392 int r; 393 394 src.bo = bo; 395 dst.bo = bo; 396 src.mem = old_mem; 397 dst.mem = new_mem; 398 src.offset = 0; 399 dst.offset = 0; 400 401 r = amdgpu_ttm_copy_mem_to_mem(adev, 402 &adev->mman.move_entity, 403 &src, &dst, 404 new_mem->size, 405 amdgpu_bo_encrypted(abo), 406 bo->base.resv, &fence); 407 if (r) 408 goto error; 409 410 /* clear the space being freed */ 411 if (old_mem->mem_type == TTM_PL_VRAM && 412 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { 413 struct dma_fence *wipe_fence = NULL; 414 415 r = amdgpu_fill_buffer(&adev->mman.move_entity, 416 abo, 0, NULL, &wipe_fence, 417 AMDGPU_KERNEL_JOB_ID_MOVE_BLIT); 418 if (r) { 419 goto error; 420 } else if (wipe_fence) { 421 amdgpu_vram_mgr_set_cleared(bo->resource); 422 dma_fence_put(fence); 423 fence = wipe_fence; 424 } 425 } 426 427 /* Always block for VM page tables before committing the new location */ 428 if (bo->type == ttm_bo_type_kernel) 429 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem); 430 else 431 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem); 432 dma_fence_put(fence); 433 return r; 434 435 error: 436 if (fence) 437 dma_fence_wait(fence, false); 438 dma_fence_put(fence); 439 return r; 440 } 441 442 /** 443 * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU 444 * @adev: amdgpu device 445 * @res: the resource to check 446 * 447 * Returns: true if the full resource is CPU visible, false otherwise. 448 */ 449 bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, 450 struct ttm_resource *res) 451 { 452 struct amdgpu_res_cursor cursor; 453 454 if (!res) 455 return false; 456 457 if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || 458 res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL || 459 res->mem_type == AMDGPU_PL_MMIO_REMAP) 460 return true; 461 462 if (res->mem_type != TTM_PL_VRAM) 463 return false; 464 465 amdgpu_res_first(res, 0, res->size, &cursor); 466 while (cursor.remaining) { 467 if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size) 468 return false; 469 amdgpu_res_next(&cursor, cursor.size); 470 } 471 472 return true; 473 } 474 475 /* 476 * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy 477 * 478 * Called by amdgpu_bo_move() 479 */ 480 static bool amdgpu_res_copyable(struct amdgpu_device *adev, 481 struct ttm_resource *mem) 482 { 483 if (!amdgpu_res_cpu_visible(adev, mem)) 484 return false; 485 486 /* ttm_resource_ioremap only supports contiguous memory */ 487 if (mem->mem_type == TTM_PL_VRAM && 488 !(mem->placement & TTM_PL_FLAG_CONTIGUOUS)) 489 return false; 490 491 return true; 492 } 493 494 /* 495 * amdgpu_bo_move - Move a buffer object to a new memory location 496 * 497 * Called by ttm_bo_handle_move_mem() 498 */ 499 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, 500 struct ttm_operation_ctx *ctx, 501 struct ttm_resource *new_mem, 502 struct ttm_place *hop) 503 { 504 struct amdgpu_device *adev; 505 struct amdgpu_bo *abo; 506 struct ttm_resource *old_mem = bo->resource; 507 int r; 508 509 if (new_mem->mem_type == TTM_PL_TT || 510 new_mem->mem_type == AMDGPU_PL_PREEMPT) { 511 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); 512 if (r) 513 return r; 514 } 515 516 abo = ttm_to_amdgpu_bo(bo); 517 adev = amdgpu_ttm_adev(bo->bdev); 518 519 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && 520 bo->ttm == NULL)) { 521 amdgpu_bo_move_notify(bo, evict, new_mem); 522 ttm_bo_move_null(bo, new_mem); 523 return 0; 524 } 525 if (old_mem->mem_type == TTM_PL_SYSTEM && 526 (new_mem->mem_type == TTM_PL_TT || 527 new_mem->mem_type == AMDGPU_PL_PREEMPT)) { 528 amdgpu_bo_move_notify(bo, evict, new_mem); 529 ttm_bo_move_null(bo, new_mem); 530 return 0; 531 } 532 if ((old_mem->mem_type == TTM_PL_TT || 533 old_mem->mem_type == AMDGPU_PL_PREEMPT) && 534 new_mem->mem_type == TTM_PL_SYSTEM) { 535 r = ttm_bo_wait_ctx(bo, ctx); 536 if (r) 537 return r; 538 539 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); 540 amdgpu_bo_move_notify(bo, evict, new_mem); 541 ttm_resource_free(bo, &bo->resource); 542 ttm_bo_assign_mem(bo, new_mem); 543 return 0; 544 } 545 546 if (old_mem->mem_type == AMDGPU_PL_GDS || 547 old_mem->mem_type == AMDGPU_PL_GWS || 548 old_mem->mem_type == AMDGPU_PL_OA || 549 old_mem->mem_type == AMDGPU_PL_DOORBELL || 550 old_mem->mem_type == AMDGPU_PL_MMIO_REMAP || 551 new_mem->mem_type == AMDGPU_PL_GDS || 552 new_mem->mem_type == AMDGPU_PL_GWS || 553 new_mem->mem_type == AMDGPU_PL_OA || 554 new_mem->mem_type == AMDGPU_PL_DOORBELL || 555 new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) { 556 /* Nothing to save here */ 557 amdgpu_bo_move_notify(bo, evict, new_mem); 558 ttm_bo_move_null(bo, new_mem); 559 return 0; 560 } 561 562 if (bo->type == ttm_bo_type_device && 563 new_mem->mem_type == TTM_PL_VRAM && 564 old_mem->mem_type != TTM_PL_VRAM) { 565 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU 566 * accesses the BO after it's moved. 567 */ 568 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 569 } 570 571 if (adev->mman.buffer_funcs_enabled && 572 ((old_mem->mem_type == TTM_PL_SYSTEM && 573 new_mem->mem_type == TTM_PL_VRAM) || 574 (old_mem->mem_type == TTM_PL_VRAM && 575 new_mem->mem_type == TTM_PL_SYSTEM))) { 576 hop->fpfn = 0; 577 hop->lpfn = 0; 578 hop->mem_type = TTM_PL_TT; 579 hop->flags = TTM_PL_FLAG_TEMPORARY; 580 return -EMULTIHOP; 581 } 582 583 amdgpu_bo_move_notify(bo, evict, new_mem); 584 if (adev->mman.buffer_funcs_enabled) 585 r = amdgpu_move_blit(bo, evict, new_mem, old_mem); 586 else 587 r = -ENODEV; 588 589 if (r) { 590 /* Check that all memory is CPU accessible */ 591 if (!amdgpu_res_copyable(adev, old_mem) || 592 !amdgpu_res_copyable(adev, new_mem)) { 593 pr_err("Move buffer fallback to memcpy unavailable\n"); 594 return r; 595 } 596 597 r = ttm_bo_move_memcpy(bo, ctx, new_mem); 598 if (r) 599 return r; 600 } 601 602 /* update statistics after the move */ 603 if (evict) 604 atomic64_inc(&adev->num_evictions); 605 atomic64_add(bo->base.size, &adev->num_bytes_moved); 606 return 0; 607 } 608 609 /* 610 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault 611 * 612 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() 613 */ 614 static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, 615 struct ttm_resource *mem) 616 { 617 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 618 619 switch (mem->mem_type) { 620 case TTM_PL_SYSTEM: 621 /* system memory */ 622 return 0; 623 case TTM_PL_TT: 624 case AMDGPU_PL_PREEMPT: 625 break; 626 case TTM_PL_VRAM: 627 mem->bus.offset = mem->start << PAGE_SHIFT; 628 629 if (adev->mman.aper_base_kaddr && 630 mem->placement & TTM_PL_FLAG_CONTIGUOUS) 631 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + 632 mem->bus.offset; 633 634 mem->bus.offset += adev->gmc.aper_base; 635 mem->bus.is_iomem = true; 636 break; 637 case AMDGPU_PL_DOORBELL: 638 mem->bus.offset = mem->start << PAGE_SHIFT; 639 mem->bus.offset += adev->doorbell.base; 640 mem->bus.is_iomem = true; 641 mem->bus.caching = ttm_uncached; 642 break; 643 case AMDGPU_PL_MMIO_REMAP: 644 mem->bus.offset = mem->start << PAGE_SHIFT; 645 mem->bus.offset += adev->rmmio_remap.bus_addr; 646 mem->bus.is_iomem = true; 647 mem->bus.caching = ttm_uncached; 648 break; 649 default: 650 return -EINVAL; 651 } 652 return 0; 653 } 654 655 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, 656 unsigned long page_offset) 657 { 658 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 659 struct amdgpu_res_cursor cursor; 660 661 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0, 662 &cursor); 663 664 if (bo->resource->mem_type == AMDGPU_PL_DOORBELL) 665 return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT; 666 else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP) 667 return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT; 668 669 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; 670 } 671 672 /** 673 * amdgpu_ttm_domain_start - Returns GPU start address 674 * @adev: amdgpu device object 675 * @type: type of the memory 676 * 677 * Returns: 678 * GPU start address of a memory domain 679 */ 680 681 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type) 682 { 683 switch (type) { 684 case TTM_PL_TT: 685 return adev->gmc.gart_start; 686 case TTM_PL_VRAM: 687 return adev->gmc.vram_start; 688 } 689 690 return 0; 691 } 692 693 /* 694 * TTM backend functions. 695 */ 696 struct amdgpu_ttm_tt { 697 struct ttm_tt ttm; 698 struct drm_gem_object *gobj; 699 u64 offset; 700 uint64_t userptr; 701 struct task_struct *usertask; 702 uint32_t userflags; 703 bool bound; 704 int32_t pool_id; 705 }; 706 707 #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm) 708 709 #ifdef CONFIG_DRM_AMDGPU_USERPTR 710 /* 711 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user 712 * memory and start HMM tracking CPU page table update 713 * 714 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only 715 * once afterwards to stop HMM tracking. Its the caller responsibility to ensure 716 * that range is a valid memory and it is freed too. 717 */ 718 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, 719 struct amdgpu_hmm_range *range) 720 { 721 struct ttm_tt *ttm = bo->tbo.ttm; 722 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 723 unsigned long start = gtt->userptr; 724 struct vm_area_struct *vma; 725 struct mm_struct *mm; 726 bool readonly; 727 int r = 0; 728 729 mm = bo->notifier.mm; 730 if (unlikely(!mm)) { 731 DRM_DEBUG_DRIVER("BO is not registered?\n"); 732 return -EFAULT; 733 } 734 735 if (!mmget_not_zero(mm)) /* Happens during process shutdown */ 736 return -ESRCH; 737 738 mmap_read_lock(mm); 739 vma = vma_lookup(mm, start); 740 if (unlikely(!vma)) { 741 r = -EFAULT; 742 goto out_unlock; 743 } 744 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) && 745 vma->vm_file)) { 746 r = -EPERM; 747 goto out_unlock; 748 } 749 750 readonly = amdgpu_ttm_tt_is_readonly(ttm); 751 r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages, 752 readonly, NULL, range); 753 out_unlock: 754 mmap_read_unlock(mm); 755 if (r) 756 pr_debug("failed %d to get user pages 0x%lx\n", r, start); 757 758 mmput(mm); 759 760 return r; 761 } 762 763 #endif 764 765 /* 766 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. 767 * 768 * Called by amdgpu_cs_list_validate(). This creates the page list 769 * that backs user memory and will ultimately be mapped into the device 770 * address space. 771 */ 772 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range) 773 { 774 unsigned long i; 775 776 for (i = 0; i < ttm->num_pages; ++i) 777 ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL; 778 } 779 780 /* 781 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages 782 * 783 * Called by amdgpu_ttm_backend_bind() 784 **/ 785 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, 786 struct ttm_tt *ttm) 787 { 788 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 789 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 790 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 791 enum dma_data_direction direction = write ? 792 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 793 int r; 794 795 /* Allocate an SG array and squash pages into it */ 796 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, 797 (u64)ttm->num_pages << PAGE_SHIFT, 798 GFP_KERNEL); 799 if (r) 800 goto release_sg; 801 802 /* Map SG to device */ 803 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); 804 if (r) 805 goto release_sg_table; 806 807 /* convert SG to linear array of pages and dma addresses */ 808 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, 809 ttm->num_pages); 810 811 return 0; 812 813 release_sg_table: 814 sg_free_table(ttm->sg); 815 release_sg: 816 kfree(ttm->sg); 817 ttm->sg = NULL; 818 return r; 819 } 820 821 /* 822 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages 823 */ 824 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, 825 struct ttm_tt *ttm) 826 { 827 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 828 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 829 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 830 enum dma_data_direction direction = write ? 831 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 832 833 /* double check that we don't free the table twice */ 834 if (!ttm->sg || !ttm->sg->sgl) 835 return; 836 837 /* unmap the pages mapped to the device */ 838 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); 839 sg_free_table(ttm->sg); 840 } 841 842 /* 843 * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ... 844 * MQDn+CtrlStackn where n is the number of XCCs per partition. 845 * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD 846 * and uses memory type default, UC. The rest of pages_per_xcc are 847 * Ctrl stack and modify their memory type to NC. 848 */ 849 static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev, 850 struct ttm_tt *ttm, uint64_t flags) 851 { 852 struct amdgpu_ttm_tt *gtt = (void *)ttm; 853 uint64_t total_pages = ttm->num_pages; 854 int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp); 855 uint64_t page_idx, pages_per_xcc; 856 int i; 857 uint64_t ctrl_flags = AMDGPU_PTE_MTYPE_VG10(flags, AMDGPU_MTYPE_NC); 858 859 pages_per_xcc = total_pages; 860 do_div(pages_per_xcc, num_xcc); 861 862 for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) { 863 /* MQD page: use default flags */ 864 amdgpu_gart_bind(adev, 865 gtt->offset + (page_idx << PAGE_SHIFT), 866 1, >t->ttm.dma_address[page_idx], flags); 867 /* 868 * Ctrl pages - modify the memory type to NC (ctrl_flags) from 869 * the second page of the BO onward. 870 */ 871 amdgpu_gart_bind(adev, 872 gtt->offset + ((page_idx + 1) << PAGE_SHIFT), 873 pages_per_xcc - 1, 874 >t->ttm.dma_address[page_idx + 1], 875 ctrl_flags); 876 } 877 } 878 879 static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, 880 struct ttm_buffer_object *tbo, 881 uint64_t flags) 882 { 883 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); 884 struct ttm_tt *ttm = tbo->ttm; 885 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 886 887 if (amdgpu_bo_encrypted(abo)) 888 flags |= AMDGPU_PTE_TMZ; 889 890 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { 891 amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags); 892 } else { 893 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, 894 gtt->ttm.dma_address, flags); 895 } 896 gtt->bound = true; 897 } 898 899 /* 900 * amdgpu_ttm_backend_bind - Bind GTT memory 901 * 902 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). 903 * This handles binding GTT memory to the device address space. 904 */ 905 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, 906 struct ttm_tt *ttm, 907 struct ttm_resource *bo_mem) 908 { 909 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 910 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 911 uint64_t flags; 912 int r; 913 914 if (!bo_mem) 915 return -EINVAL; 916 917 if (gtt->bound) 918 return 0; 919 920 if (gtt->userptr) { 921 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); 922 if (r) { 923 dev_err(adev->dev, "failed to pin userptr\n"); 924 return r; 925 } 926 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) { 927 if (!ttm->sg) { 928 struct dma_buf_attachment *attach; 929 struct sg_table *sgt; 930 931 attach = gtt->gobj->import_attach; 932 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 933 if (IS_ERR(sgt)) 934 return PTR_ERR(sgt); 935 936 ttm->sg = sgt; 937 } 938 939 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, 940 ttm->num_pages); 941 } 942 943 if (!ttm->num_pages) { 944 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", 945 ttm->num_pages, bo_mem, ttm); 946 } 947 948 if (bo_mem->mem_type != TTM_PL_TT || 949 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { 950 gtt->offset = AMDGPU_BO_INVALID_OFFSET; 951 return 0; 952 } 953 954 /* compute PTE flags relevant to this BO memory */ 955 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); 956 957 /* bind pages into GART page tables */ 958 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; 959 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, 960 gtt->ttm.dma_address, flags); 961 gtt->bound = true; 962 return 0; 963 } 964 965 /* 966 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either 967 * through AGP or GART aperture. 968 * 969 * If bo is accessible through AGP aperture, then use AGP aperture 970 * to access bo; otherwise allocate logical space in GART aperture 971 * and map bo to GART aperture. 972 */ 973 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) 974 { 975 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 976 struct ttm_operation_ctx ctx = { false, false }; 977 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm); 978 struct ttm_placement placement; 979 struct ttm_place placements; 980 struct ttm_resource *tmp; 981 uint64_t addr, flags; 982 int r; 983 984 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET) 985 return 0; 986 987 addr = amdgpu_gmc_agp_addr(bo); 988 if (addr != AMDGPU_BO_INVALID_OFFSET) 989 return 0; 990 991 /* allocate GART space */ 992 placement.num_placement = 1; 993 placement.placement = &placements; 994 placements.fpfn = 0; 995 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; 996 placements.mem_type = TTM_PL_TT; 997 placements.flags = bo->resource->placement; 998 999 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); 1000 if (unlikely(r)) 1001 return r; 1002 1003 /* compute PTE flags for this buffer object */ 1004 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp); 1005 1006 /* Bind pages */ 1007 gtt->offset = (u64)tmp->start << PAGE_SHIFT; 1008 amdgpu_ttm_gart_bind(adev, bo, flags); 1009 amdgpu_gart_invalidate_tlb(adev); 1010 ttm_resource_free(bo, &bo->resource); 1011 ttm_bo_assign_mem(bo, tmp); 1012 1013 return 0; 1014 } 1015 1016 /* 1017 * amdgpu_ttm_recover_gart - Rebind GTT pages 1018 * 1019 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to 1020 * rebind GTT pages during a GPU reset. 1021 */ 1022 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) 1023 { 1024 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 1025 uint64_t flags; 1026 1027 if (!tbo->ttm) 1028 return; 1029 1030 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); 1031 amdgpu_ttm_gart_bind(adev, tbo, flags); 1032 } 1033 1034 /* 1035 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages 1036 * 1037 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and 1038 * ttm_tt_destroy(). 1039 */ 1040 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, 1041 struct ttm_tt *ttm) 1042 { 1043 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 1044 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1045 1046 /* if the pages have userptr pinning then clear that first */ 1047 if (gtt->userptr) { 1048 amdgpu_ttm_tt_unpin_userptr(bdev, ttm); 1049 } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) { 1050 struct dma_buf_attachment *attach; 1051 1052 attach = gtt->gobj->import_attach; 1053 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); 1054 ttm->sg = NULL; 1055 } 1056 1057 if (!gtt->bound) 1058 return; 1059 1060 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) 1061 return; 1062 1063 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ 1064 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); 1065 gtt->bound = false; 1066 } 1067 1068 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, 1069 struct ttm_tt *ttm) 1070 { 1071 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1072 1073 if (gtt->usertask) 1074 put_task_struct(gtt->usertask); 1075 1076 ttm_tt_fini(>t->ttm); 1077 kfree(gtt); 1078 } 1079 1080 /** 1081 * amdgpu_ttm_mmio_remap_alloc_sgt - build an sg_table for MMIO_REMAP I/O aperture 1082 * @adev: amdgpu device providing the remap BAR base (adev->rmmio_remap.bus_addr) 1083 * @res: TTM resource of the BO to export; expected to live in AMDGPU_PL_MMIO_REMAP 1084 * @dev: importing device to map for (typically @attach->dev in dma-buf paths) 1085 * @dir: DMA data direction for the importer (passed to dma_map_resource()) 1086 * @sgt: output; on success, set to a newly allocated sg_table describing the I/O span 1087 * 1088 * The HDP flush page (AMDGPU_PL_MMIO_REMAP) is a fixed hardware I/O window in a PCI 1089 * BAR—there are no struct pages to back it. Importers still need a DMA address list, 1090 * so we synthesize a minimal sg_table and populate it from dma_map_resource(), not 1091 * from pages. Using the common amdgpu_res_cursor walker keeps the offset/size math 1092 * consistent with other TTM/manager users. 1093 * 1094 * - @res is assumed to be a small, contiguous I/O region (typically a single 4 KiB 1095 * page) in AMDGPU_PL_MMIO_REMAP. Callers should validate placement before calling. 1096 * - The sg entry is created with sg_set_page(sg, NULL, …) to reflect I/O space. 1097 * - The mapping uses DMA_ATTR_SKIP_CPU_SYNC because this is MMIO, not cacheable RAM. 1098 * - Peer reachability / p2pdma policy checks must be done by the caller. 1099 * 1100 * Return: 1101 * * 0 on success, with *@sgt set to a valid table that must be freed via 1102 * amdgpu_ttm_mmio_remap_free_sgt(). 1103 * * -ENOMEM if allocation of the sg_table fails. 1104 * * -EIO if dma_map_resource() fails. 1105 * 1106 */ 1107 int amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device *adev, 1108 struct ttm_resource *res, 1109 struct device *dev, 1110 enum dma_data_direction dir, 1111 struct sg_table **sgt) 1112 { 1113 struct amdgpu_res_cursor cur; 1114 dma_addr_t dma; 1115 resource_size_t phys; 1116 struct scatterlist *sg; 1117 int r; 1118 1119 /* Walk the resource once; MMIO_REMAP is expected to be contiguous+small. */ 1120 amdgpu_res_first(res, 0, res->size, &cur); 1121 1122 /* Translate byte offset in the remap window into a host physical BAR address. */ 1123 phys = adev->rmmio_remap.bus_addr + cur.start; 1124 1125 /* Build a single-entry sg_table mapped as I/O (no struct page backing). */ 1126 *sgt = kzalloc_obj(**sgt); 1127 if (!*sgt) 1128 return -ENOMEM; 1129 r = sg_alloc_table(*sgt, 1, GFP_KERNEL); 1130 if (r) { 1131 kfree(*sgt); 1132 return r; 1133 } 1134 sg = (*sgt)->sgl; 1135 sg_set_page(sg, NULL, cur.size, 0); /* WHY: I/O space → no pages */ 1136 1137 dma = dma_map_resource(dev, phys, cur.size, dir, DMA_ATTR_SKIP_CPU_SYNC); 1138 if (dma_mapping_error(dev, dma)) { 1139 sg_free_table(*sgt); 1140 kfree(*sgt); 1141 return -EIO; 1142 } 1143 sg_dma_address(sg) = dma; 1144 sg_dma_len(sg) = cur.size; 1145 return 0; 1146 } 1147 1148 void amdgpu_ttm_mmio_remap_free_sgt(struct device *dev, 1149 enum dma_data_direction dir, 1150 struct sg_table *sgt) 1151 { 1152 struct scatterlist *sg = sgt->sgl; 1153 1154 dma_unmap_resource(dev, sg_dma_address(sg), sg_dma_len(sg), 1155 dir, DMA_ATTR_SKIP_CPU_SYNC); 1156 sg_free_table(sgt); 1157 kfree(sgt); 1158 } 1159 1160 /** 1161 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO 1162 * 1163 * @bo: The buffer object to create a GTT ttm_tt object around 1164 * @page_flags: Page flags to be added to the ttm_tt object 1165 * 1166 * Called by ttm_tt_create(). 1167 */ 1168 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, 1169 uint32_t page_flags) 1170 { 1171 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 1172 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1173 struct amdgpu_ttm_tt *gtt; 1174 enum ttm_caching caching; 1175 1176 gtt = kzalloc_obj(struct amdgpu_ttm_tt); 1177 if (!gtt) 1178 return NULL; 1179 1180 gtt->gobj = &bo->base; 1181 if (adev->gmc.mem_partitions && abo->xcp_id >= 0) 1182 gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); 1183 else 1184 gtt->pool_id = abo->xcp_id; 1185 1186 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 1187 caching = ttm_write_combined; 1188 else 1189 caching = ttm_cached; 1190 1191 /* allocate space for the uninitialized page entries */ 1192 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) { 1193 kfree(gtt); 1194 return NULL; 1195 } 1196 return >t->ttm; 1197 } 1198 1199 /* 1200 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device 1201 * 1202 * Map the pages of a ttm_tt object to an address space visible 1203 * to the underlying device. 1204 */ 1205 static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, 1206 struct ttm_tt *ttm, 1207 struct ttm_operation_ctx *ctx) 1208 { 1209 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 1210 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1211 struct ttm_pool *pool; 1212 pgoff_t i; 1213 int ret; 1214 1215 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ 1216 if (gtt->userptr) { 1217 ttm->sg = kzalloc_obj(struct sg_table); 1218 if (!ttm->sg) 1219 return -ENOMEM; 1220 return 0; 1221 } 1222 1223 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) 1224 return 0; 1225 1226 if (adev->mman.ttm_pools && gtt->pool_id >= 0) 1227 pool = &adev->mman.ttm_pools[gtt->pool_id]; 1228 else 1229 pool = &adev->mman.bdev.pool; 1230 ret = ttm_pool_alloc(pool, ttm, ctx); 1231 if (ret) 1232 return ret; 1233 1234 for (i = 0; i < ttm->num_pages; ++i) 1235 ttm->pages[i]->mapping = bdev->dev_mapping; 1236 1237 return 0; 1238 } 1239 1240 /* 1241 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays 1242 * 1243 * Unmaps pages of a ttm_tt object from the device address space and 1244 * unpopulates the page array backing it. 1245 */ 1246 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, 1247 struct ttm_tt *ttm) 1248 { 1249 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1250 struct amdgpu_device *adev; 1251 struct ttm_pool *pool; 1252 pgoff_t i; 1253 1254 amdgpu_ttm_backend_unbind(bdev, ttm); 1255 1256 if (gtt->userptr) { 1257 amdgpu_ttm_tt_set_user_pages(ttm, NULL); 1258 kfree(ttm->sg); 1259 ttm->sg = NULL; 1260 return; 1261 } 1262 1263 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) 1264 return; 1265 1266 for (i = 0; i < ttm->num_pages; ++i) 1267 ttm->pages[i]->mapping = NULL; 1268 1269 adev = amdgpu_ttm_adev(bdev); 1270 1271 if (adev->mman.ttm_pools && gtt->pool_id >= 0) 1272 pool = &adev->mman.ttm_pools[gtt->pool_id]; 1273 else 1274 pool = &adev->mman.bdev.pool; 1275 1276 return ttm_pool_free(pool, ttm); 1277 } 1278 1279 /** 1280 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current 1281 * task 1282 * 1283 * @tbo: The ttm_buffer_object that contains the userptr 1284 * @user_addr: The returned value 1285 */ 1286 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, 1287 uint64_t *user_addr) 1288 { 1289 struct amdgpu_ttm_tt *gtt; 1290 1291 if (!tbo->ttm) 1292 return -EINVAL; 1293 1294 gtt = (void *)tbo->ttm; 1295 *user_addr = gtt->userptr; 1296 return 0; 1297 } 1298 1299 /** 1300 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current 1301 * task 1302 * 1303 * @bo: The ttm_buffer_object to bind this userptr to 1304 * @addr: The address in the current tasks VM space to use 1305 * @flags: Requirements of userptr object. 1306 * 1307 * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to 1308 * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to 1309 * initialize GPU VM for a KFD process. 1310 */ 1311 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, 1312 uint64_t addr, uint32_t flags) 1313 { 1314 struct amdgpu_ttm_tt *gtt; 1315 1316 if (!bo->ttm) { 1317 /* TODO: We want a separate TTM object type for userptrs */ 1318 bo->ttm = amdgpu_ttm_tt_create(bo, 0); 1319 if (bo->ttm == NULL) 1320 return -ENOMEM; 1321 } 1322 1323 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */ 1324 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL; 1325 1326 gtt = ttm_to_amdgpu_ttm_tt(bo->ttm); 1327 gtt->userptr = addr; 1328 gtt->userflags = flags; 1329 1330 if (gtt->usertask) 1331 put_task_struct(gtt->usertask); 1332 gtt->usertask = current->group_leader; 1333 get_task_struct(gtt->usertask); 1334 1335 return 0; 1336 } 1337 1338 /* 1339 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object 1340 */ 1341 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) 1342 { 1343 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1344 1345 if (gtt == NULL) 1346 return NULL; 1347 1348 if (gtt->usertask == NULL) 1349 return NULL; 1350 1351 return gtt->usertask->mm; 1352 } 1353 1354 /* 1355 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an 1356 * address range for the current task. 1357 * 1358 */ 1359 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 1360 unsigned long end, unsigned long *userptr) 1361 { 1362 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1363 unsigned long size; 1364 1365 if (gtt == NULL || !gtt->userptr) 1366 return false; 1367 1368 /* Return false if no part of the ttm_tt object lies within 1369 * the range 1370 */ 1371 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE; 1372 if (gtt->userptr > end || gtt->userptr + size <= start) 1373 return false; 1374 1375 if (userptr) 1376 *userptr = gtt->userptr; 1377 return true; 1378 } 1379 1380 /* 1381 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr? 1382 */ 1383 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) 1384 { 1385 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1386 1387 if (gtt == NULL || !gtt->userptr) 1388 return false; 1389 1390 return true; 1391 } 1392 1393 /* 1394 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? 1395 */ 1396 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) 1397 { 1398 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); 1399 1400 if (gtt == NULL) 1401 return false; 1402 1403 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); 1404 } 1405 1406 /** 1407 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object 1408 * 1409 * @ttm: The ttm_tt object to compute the flags for 1410 * @mem: The memory registry backing this ttm_tt object 1411 * 1412 * Figure out the flags to use for a VM PDE (Page Directory Entry). 1413 */ 1414 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) 1415 { 1416 uint64_t flags = 0; 1417 1418 if (mem && mem->mem_type != TTM_PL_SYSTEM) 1419 flags |= AMDGPU_PTE_VALID; 1420 1421 if (mem && (mem->mem_type == TTM_PL_TT || 1422 mem->mem_type == AMDGPU_PL_DOORBELL || 1423 mem->mem_type == AMDGPU_PL_PREEMPT || 1424 mem->mem_type == AMDGPU_PL_MMIO_REMAP)) { 1425 flags |= AMDGPU_PTE_SYSTEM; 1426 1427 if (ttm && ttm->caching == ttm_cached) 1428 flags |= AMDGPU_PTE_SNOOPED; 1429 } 1430 1431 if (mem && mem->mem_type == TTM_PL_VRAM && 1432 mem->bus.caching == ttm_cached) 1433 flags |= AMDGPU_PTE_SNOOPED; 1434 1435 return flags; 1436 } 1437 1438 /** 1439 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object 1440 * 1441 * @adev: amdgpu_device pointer 1442 * @ttm: The ttm_tt object to compute the flags for 1443 * @mem: The memory registry backing this ttm_tt object 1444 * 1445 * Figure out the flags to use for a VM PTE (Page Table Entry). 1446 */ 1447 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 1448 struct ttm_resource *mem) 1449 { 1450 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); 1451 1452 flags |= adev->gart.gart_pte_flags; 1453 flags |= AMDGPU_PTE_READABLE; 1454 1455 if (!amdgpu_ttm_tt_is_readonly(ttm)) 1456 flags |= AMDGPU_PTE_WRITEABLE; 1457 1458 return flags; 1459 } 1460 1461 /* 1462 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer 1463 * object. 1464 * 1465 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on 1466 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until 1467 * it can find space for a new object and by ttm_bo_force_list_clean() which is 1468 * used to clean out a memory space. 1469 */ 1470 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 1471 const struct ttm_place *place) 1472 { 1473 struct dma_resv_iter resv_cursor; 1474 struct dma_fence *f; 1475 1476 if (!amdgpu_bo_is_amdgpu_bo(bo)) 1477 return ttm_bo_eviction_valuable(bo, place); 1478 1479 /* Swapout? */ 1480 if (bo->resource->mem_type == TTM_PL_SYSTEM) 1481 return true; 1482 1483 if (bo->type == ttm_bo_type_kernel && 1484 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) 1485 return false; 1486 1487 /* If bo is a KFD BO, check if the bo belongs to the current process. 1488 * If true, then return false as any KFD process needs all its BOs to 1489 * be resident to run successfully 1490 */ 1491 dma_resv_for_each_fence(&resv_cursor, bo->base.resv, 1492 DMA_RESV_USAGE_BOOKKEEP, f) { 1493 if (amdkfd_fence_check_mm(f, current->mm) && 1494 !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) 1495 return false; 1496 } 1497 1498 /* Preemptible BOs don't own system resources managed by the 1499 * driver (pages, VRAM, GART space). They point to resources 1500 * owned by someone else (e.g. pageable memory in user mode 1501 * or a DMABuf). They are used in a preemptible context so we 1502 * can guarantee no deadlocks and good QoS in case of MMU 1503 * notifiers or DMABuf move notifiers from the resource owner. 1504 */ 1505 if (bo->resource->mem_type == AMDGPU_PL_PREEMPT) 1506 return false; 1507 1508 if (bo->resource->mem_type == TTM_PL_TT && 1509 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) 1510 return false; 1511 1512 return ttm_bo_eviction_valuable(bo, place); 1513 } 1514 1515 static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, 1516 void *buf, size_t size, bool write) 1517 { 1518 while (size) { 1519 uint64_t aligned_pos = ALIGN_DOWN(pos, 4); 1520 uint64_t bytes = 4 - (pos & 0x3); 1521 uint32_t shift = (pos & 0x3) * 8; 1522 uint32_t mask = 0xffffffff << shift; 1523 uint32_t value = 0; 1524 1525 if (size < bytes) { 1526 mask &= 0xffffffff >> (bytes - size) * 8; 1527 bytes = size; 1528 } 1529 1530 if (mask != 0xffffffff) { 1531 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false); 1532 if (write) { 1533 value &= ~mask; 1534 value |= (*(uint32_t *)buf << shift) & mask; 1535 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true); 1536 } else { 1537 value = (value & mask) >> shift; 1538 memcpy(buf, &value, bytes); 1539 } 1540 } else { 1541 amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write); 1542 } 1543 1544 pos += bytes; 1545 buf += bytes; 1546 size -= bytes; 1547 } 1548 } 1549 1550 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, 1551 unsigned long offset, void *buf, 1552 int len, int write) 1553 { 1554 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1555 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 1556 struct amdgpu_res_cursor src_mm; 1557 struct amdgpu_job *job; 1558 struct dma_fence *fence; 1559 uint64_t src_addr, dst_addr; 1560 unsigned int num_dw; 1561 int r, idx; 1562 1563 if (len != PAGE_SIZE) 1564 return -EINVAL; 1565 1566 if (!adev->mman.sdma_access_ptr) 1567 return -EACCES; 1568 1569 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 1570 return -ENODEV; 1571 1572 if (write) 1573 memcpy(adev->mman.sdma_access_ptr, buf, len); 1574 1575 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 1576 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.default_entity.base, 1577 AMDGPU_FENCE_OWNER_UNDEFINED, 1578 num_dw * 4, AMDGPU_IB_POOL_DELAYED, 1579 &job, 1580 AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA); 1581 if (r) 1582 goto out; 1583 1584 mutex_lock(&adev->mman.default_entity.lock); 1585 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); 1586 src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + 1587 src_mm.start; 1588 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); 1589 if (write) 1590 swap(src_addr, dst_addr); 1591 1592 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, 1593 PAGE_SIZE, 0); 1594 1595 fence = amdgpu_ttm_job_submit(adev, &adev->mman.default_entity, job, num_dw); 1596 mutex_unlock(&adev->mman.default_entity.lock); 1597 1598 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) 1599 r = -ETIMEDOUT; 1600 dma_fence_put(fence); 1601 1602 if (!(r || write)) 1603 memcpy(buf, adev->mman.sdma_access_ptr, len); 1604 out: 1605 drm_dev_exit(idx); 1606 return r; 1607 } 1608 1609 /** 1610 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. 1611 * 1612 * @bo: The buffer object to read/write 1613 * @offset: Offset into buffer object 1614 * @buf: Secondary buffer to write/read from 1615 * @len: Length in bytes of access 1616 * @write: true if writing 1617 * 1618 * This is used to access VRAM that backs a buffer object via MMIO 1619 * access for debugging purposes. 1620 */ 1621 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, 1622 unsigned long offset, void *buf, int len, 1623 int write) 1624 { 1625 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1626 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 1627 struct amdgpu_res_cursor cursor; 1628 int ret = 0; 1629 1630 if (bo->resource->mem_type != TTM_PL_VRAM) 1631 return -EIO; 1632 1633 if (amdgpu_device_has_timeouts_enabled(adev) && 1634 !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write)) 1635 return len; 1636 1637 amdgpu_res_first(bo->resource, offset, len, &cursor); 1638 while (cursor.remaining) { 1639 size_t count, size = cursor.size; 1640 loff_t pos = cursor.start; 1641 1642 count = amdgpu_device_aper_access(adev, pos, buf, size, write); 1643 size -= count; 1644 if (size) { 1645 /* using MM to access rest vram and handle un-aligned address */ 1646 pos += count; 1647 buf += count; 1648 amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write); 1649 } 1650 1651 ret += cursor.size; 1652 buf += cursor.size; 1653 amdgpu_res_next(&cursor, cursor.size); 1654 } 1655 1656 return ret; 1657 } 1658 1659 static void 1660 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) 1661 { 1662 amdgpu_bo_move_notify(bo, false, NULL); 1663 } 1664 1665 static struct ttm_device_funcs amdgpu_bo_driver = { 1666 .ttm_tt_create = &amdgpu_ttm_tt_create, 1667 .ttm_tt_populate = &amdgpu_ttm_tt_populate, 1668 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, 1669 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, 1670 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, 1671 .evict_flags = &amdgpu_evict_flags, 1672 .move = &amdgpu_bo_move, 1673 .delete_mem_notify = &amdgpu_bo_delete_mem_notify, 1674 .release_notify = &amdgpu_bo_release_notify, 1675 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, 1676 .io_mem_pfn = amdgpu_ttm_io_mem_pfn, 1677 .access_memory = &amdgpu_ttm_access_memory, 1678 }; 1679 1680 /* 1681 * Firmware Reservation functions 1682 */ 1683 /** 1684 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram 1685 * 1686 * @adev: amdgpu_device pointer 1687 * 1688 * free fw reserved vram if it has been reserved. 1689 */ 1690 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) 1691 { 1692 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo, 1693 NULL, &adev->mman.fw_vram_usage_va); 1694 } 1695 1696 /* 1697 * Driver Reservation functions 1698 */ 1699 /** 1700 * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram 1701 * 1702 * @adev: amdgpu_device pointer 1703 * 1704 * free drv reserved vram if it has been reserved. 1705 */ 1706 static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev) 1707 { 1708 amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo, 1709 NULL, 1710 &adev->mman.drv_vram_usage_va); 1711 } 1712 1713 /** 1714 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw 1715 * 1716 * @adev: amdgpu_device pointer 1717 * 1718 * create bo vram reservation from fw. 1719 */ 1720 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) 1721 { 1722 uint64_t vram_size = adev->gmc.visible_vram_size; 1723 1724 adev->mman.fw_vram_usage_va = NULL; 1725 adev->mman.fw_vram_usage_reserved_bo = NULL; 1726 1727 if (adev->mman.fw_vram_usage_size == 0 || 1728 adev->mman.fw_vram_usage_size > vram_size) 1729 return 0; 1730 1731 return amdgpu_bo_create_kernel_at(adev, 1732 adev->mman.fw_vram_usage_start_offset, 1733 adev->mman.fw_vram_usage_size, 1734 &adev->mman.fw_vram_usage_reserved_bo, 1735 &adev->mman.fw_vram_usage_va); 1736 } 1737 1738 /** 1739 * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver 1740 * 1741 * @adev: amdgpu_device pointer 1742 * 1743 * create bo vram reservation from drv. 1744 */ 1745 static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev) 1746 { 1747 u64 vram_size = adev->gmc.visible_vram_size; 1748 1749 adev->mman.drv_vram_usage_va = NULL; 1750 adev->mman.drv_vram_usage_reserved_bo = NULL; 1751 1752 if (adev->mman.drv_vram_usage_size == 0 || 1753 adev->mman.drv_vram_usage_size > vram_size) 1754 return 0; 1755 1756 return amdgpu_bo_create_kernel_at(adev, 1757 adev->mman.drv_vram_usage_start_offset, 1758 adev->mman.drv_vram_usage_size, 1759 &adev->mman.drv_vram_usage_reserved_bo, 1760 &adev->mman.drv_vram_usage_va); 1761 } 1762 1763 /* 1764 * Memoy training reservation functions 1765 */ 1766 1767 /** 1768 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram 1769 * 1770 * @adev: amdgpu_device pointer 1771 * 1772 * free memory training reserved vram if it has been reserved. 1773 */ 1774 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) 1775 { 1776 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; 1777 1778 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 1779 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); 1780 ctx->c2p_bo = NULL; 1781 1782 return 0; 1783 } 1784 1785 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev, 1786 uint32_t reserve_size) 1787 { 1788 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; 1789 1790 memset(ctx, 0, sizeof(*ctx)); 1791 1792 ctx->c2p_train_data_offset = 1793 ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M); 1794 ctx->p2c_train_data_offset = 1795 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); 1796 ctx->train_data_size = 1797 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; 1798 1799 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 1800 ctx->train_data_size, 1801 ctx->p2c_train_data_offset, 1802 ctx->c2p_train_data_offset); 1803 } 1804 1805 /* 1806 * reserve TMR memory at the top of VRAM which holds 1807 * IP Discovery data and is protected by PSP. 1808 */ 1809 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) 1810 { 1811 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; 1812 bool mem_train_support = false; 1813 uint32_t reserve_size = 0; 1814 int ret; 1815 1816 if (adev->bios && !amdgpu_sriov_vf(adev)) { 1817 if (amdgpu_atomfirmware_mem_training_supported(adev)) 1818 mem_train_support = true; 1819 else 1820 DRM_DEBUG("memory training does not support!\n"); 1821 } 1822 1823 /* 1824 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all 1825 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc) 1826 * 1827 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip 1828 * discovery data and G6 memory training data respectively 1829 */ 1830 if (adev->bios) 1831 reserve_size = 1832 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); 1833 1834 if (!adev->bios && 1835 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || 1836 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) || 1837 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))) 1838 reserve_size = max(reserve_size, (uint32_t)280 << 20); 1839 else if (!adev->bios && 1840 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) { 1841 if (hweight32(adev->aid_mask) == 1) 1842 reserve_size = max(reserve_size, (uint32_t)128 << 20); 1843 else 1844 reserve_size = max(reserve_size, (uint32_t)144 << 20); 1845 } else if (!reserve_size) 1846 reserve_size = DISCOVERY_TMR_OFFSET; 1847 1848 if (mem_train_support) { 1849 /* reserve vram for mem train according to TMR location */ 1850 amdgpu_ttm_training_data_block_init(adev, reserve_size); 1851 ret = amdgpu_bo_create_kernel_at(adev, 1852 ctx->c2p_train_data_offset, 1853 ctx->train_data_size, 1854 &ctx->c2p_bo, 1855 NULL); 1856 if (ret) { 1857 dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret); 1858 amdgpu_ttm_training_reserve_vram_fini(adev); 1859 return ret; 1860 } 1861 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; 1862 } 1863 1864 ret = amdgpu_bo_create_kernel_at( 1865 adev, adev->gmc.real_vram_size - reserve_size, reserve_size, 1866 &adev->mman.fw_reserved_memory, NULL); 1867 if (ret) { 1868 dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret); 1869 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, 1870 NULL); 1871 return ret; 1872 } 1873 1874 return 0; 1875 } 1876 1877 static int amdgpu_ttm_pools_init(struct amdgpu_device *adev) 1878 { 1879 int i; 1880 1881 if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions) 1882 return 0; 1883 1884 adev->mman.ttm_pools = kzalloc_objs(*adev->mman.ttm_pools, 1885 adev->gmc.num_mem_partitions); 1886 if (!adev->mman.ttm_pools) 1887 return -ENOMEM; 1888 1889 for (i = 0; i < adev->gmc.num_mem_partitions; i++) { 1890 ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev, 1891 adev->gmc.mem_partitions[i].numa.node, 1892 TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M))); 1893 } 1894 return 0; 1895 } 1896 1897 static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev) 1898 { 1899 int i; 1900 1901 if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools) 1902 return; 1903 1904 for (i = 0; i < adev->gmc.num_mem_partitions; i++) 1905 ttm_pool_fini(&adev->mman.ttm_pools[i]); 1906 1907 kfree(adev->mman.ttm_pools); 1908 adev->mman.ttm_pools = NULL; 1909 } 1910 1911 /** 1912 * amdgpu_ttm_alloc_mmio_remap_bo - Allocate the singleton MMIO_REMAP BO 1913 * @adev: amdgpu device 1914 * 1915 * Allocates a global BO with backing AMDGPU_PL_MMIO_REMAP when the 1916 * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host 1917 * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular 1918 * GEM object (amdgpu_bo_create). 1919 * 1920 * Return: 1921 * * 0 on success or intentional skip (feature not present/unsupported) 1922 * * negative errno on allocation failure 1923 */ 1924 static int amdgpu_ttm_alloc_mmio_remap_bo(struct amdgpu_device *adev) 1925 { 1926 struct ttm_operation_ctx ctx = { false, false }; 1927 struct ttm_placement placement; 1928 struct ttm_buffer_object *tbo; 1929 struct ttm_place placements; 1930 struct amdgpu_bo_param bp; 1931 struct ttm_resource *tmp; 1932 int r; 1933 1934 /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */ 1935 if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE) 1936 return 0; 1937 1938 /* 1939 * Allocate a BO first and then move it to AMDGPU_PL_MMIO_REMAP. 1940 * The initial TTM resource assigned by amdgpu_bo_create() is 1941 * replaced below with a fixed MMIO_REMAP placement. 1942 */ 1943 memset(&bp, 0, sizeof(bp)); 1944 bp.type = ttm_bo_type_device; 1945 bp.size = AMDGPU_GPU_PAGE_SIZE; 1946 bp.byte_align = AMDGPU_GPU_PAGE_SIZE; 1947 bp.domain = 0; 1948 bp.flags = 0; 1949 bp.resv = NULL; 1950 bp.bo_ptr_size = sizeof(struct amdgpu_bo); 1951 r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo); 1952 if (r) 1953 return r; 1954 1955 r = amdgpu_bo_reserve(adev->rmmio_remap.bo, true); 1956 if (r) 1957 goto err_unref; 1958 1959 tbo = &adev->rmmio_remap.bo->tbo; 1960 1961 /* 1962 * MMIO_REMAP is a fixed I/O placement (AMDGPU_PL_MMIO_REMAP). 1963 */ 1964 placement.num_placement = 1; 1965 placement.placement = &placements; 1966 placements.fpfn = 0; 1967 placements.lpfn = 0; 1968 placements.mem_type = AMDGPU_PL_MMIO_REMAP; 1969 placements.flags = 0; 1970 /* Force the BO into the fixed MMIO_REMAP placement */ 1971 r = ttm_bo_mem_space(tbo, &placement, &tmp, &ctx); 1972 if (unlikely(r)) 1973 goto err_unlock; 1974 1975 ttm_resource_free(tbo, &tbo->resource); 1976 ttm_bo_assign_mem(tbo, tmp); 1977 ttm_bo_pin(tbo); 1978 1979 amdgpu_bo_unreserve(adev->rmmio_remap.bo); 1980 return 0; 1981 1982 err_unlock: 1983 amdgpu_bo_unreserve(adev->rmmio_remap.bo); 1984 1985 err_unref: 1986 amdgpu_bo_unref(&adev->rmmio_remap.bo); 1987 adev->rmmio_remap.bo = NULL; 1988 return r; 1989 } 1990 1991 /** 1992 * amdgpu_ttm_free_mmio_remap_bo - Free the singleton MMIO_REMAP BO 1993 * @adev: amdgpu device 1994 * 1995 * Frees the kernel-owned MMIO_REMAP BO if it was allocated by 1996 * amdgpu_ttm_mmio_remap_bo_init(). 1997 */ 1998 static void amdgpu_ttm_free_mmio_remap_bo(struct amdgpu_device *adev) 1999 { 2000 if (!adev->rmmio_remap.bo) 2001 return; 2002 2003 if (!amdgpu_bo_reserve(adev->rmmio_remap.bo, true)) { 2004 ttm_bo_unpin(&adev->rmmio_remap.bo->tbo); 2005 amdgpu_bo_unreserve(adev->rmmio_remap.bo); 2006 } 2007 2008 /* 2009 * At this point we rely on normal DRM teardown ordering: 2010 * no new user ioctls can access the global MMIO_REMAP BO 2011 * once TTM teardown begins. 2012 */ 2013 amdgpu_bo_unref(&adev->rmmio_remap.bo); 2014 adev->rmmio_remap.bo = NULL; 2015 } 2016 2017 static int amdgpu_ttm_buffer_entity_init(struct amdgpu_gtt_mgr *mgr, 2018 struct amdgpu_ttm_buffer_entity *entity, 2019 enum drm_sched_priority prio, 2020 struct drm_gpu_scheduler **scheds, 2021 int num_schedulers, 2022 u32 num_gart_windows) 2023 { 2024 int i, r, num_pages; 2025 2026 r = drm_sched_entity_init(&entity->base, prio, scheds, num_schedulers, NULL); 2027 if (r) 2028 return r; 2029 2030 mutex_init(&entity->lock); 2031 2032 if (ARRAY_SIZE(entity->gart_window_offs) < num_gart_windows) 2033 return -EINVAL; 2034 if (num_gart_windows == 0) 2035 return 0; 2036 2037 num_pages = num_gart_windows * AMDGPU_GTT_MAX_TRANSFER_SIZE; 2038 r = amdgpu_gtt_mgr_alloc_entries(mgr, &entity->gart_node, num_pages, 2039 DRM_MM_INSERT_BEST); 2040 if (r) { 2041 drm_sched_entity_destroy(&entity->base); 2042 return r; 2043 } 2044 2045 for (i = 0; i < num_gart_windows; i++) { 2046 entity->gart_window_offs[i] = 2047 amdgpu_gtt_node_to_byte_offset(&entity->gart_node) + 2048 i * AMDGPU_GTT_MAX_TRANSFER_SIZE * PAGE_SIZE; 2049 } 2050 2051 return 0; 2052 } 2053 2054 static void amdgpu_ttm_buffer_entity_fini(struct amdgpu_gtt_mgr *mgr, 2055 struct amdgpu_ttm_buffer_entity *entity) 2056 { 2057 amdgpu_gtt_mgr_free_entries(mgr, &entity->gart_node); 2058 drm_sched_entity_destroy(&entity->base); 2059 } 2060 2061 /* 2062 * amdgpu_ttm_init - Init the memory management (ttm) as well as various 2063 * gtt/vram related fields. 2064 * 2065 * This initializes all of the memory space pools that the TTM layer 2066 * will need such as the GTT space (system memory mapped to the device), 2067 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which 2068 * can be mapped per VMID. 2069 */ 2070 int amdgpu_ttm_init(struct amdgpu_device *adev) 2071 { 2072 uint64_t gtt_size; 2073 int r; 2074 2075 dma_set_max_seg_size(adev->dev, UINT_MAX); 2076 /* No others user of address space so set it to 0 */ 2077 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, 2078 adev_to_drm(adev)->anon_inode->i_mapping, 2079 adev_to_drm(adev)->vma_offset_manager, 2080 (adev->need_swiotlb ? 2081 TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) | 2082 (dma_addressing_limited(adev->dev) ? 2083 TTM_ALLOCATION_POOL_USE_DMA32 : 0) | 2084 TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M))); 2085 if (r) { 2086 dev_err(adev->dev, 2087 "failed initializing buffer object driver(%d).\n", r); 2088 return r; 2089 } 2090 2091 r = amdgpu_ttm_pools_init(adev); 2092 if (r) { 2093 dev_err(adev->dev, "failed to init ttm pools(%d).\n", r); 2094 return r; 2095 } 2096 adev->mman.initialized = true; 2097 2098 if (!adev->gmc.is_app_apu) { 2099 /* Initialize VRAM pool with all of VRAM divided into pages */ 2100 r = amdgpu_vram_mgr_init(adev); 2101 if (r) { 2102 dev_err(adev->dev, "Failed initializing VRAM heap.\n"); 2103 return r; 2104 } 2105 } 2106 2107 /* Change the size here instead of the init above so only lpfn is affected */ 2108 amdgpu_ttm_set_buffer_funcs_status(adev, false); 2109 #ifdef CONFIG_64BIT 2110 #ifdef CONFIG_X86 2111 if (adev->gmc.xgmi.connected_to_cpu) 2112 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base, 2113 adev->gmc.visible_vram_size); 2114 2115 else if (adev->gmc.is_app_apu) 2116 DRM_DEBUG_DRIVER( 2117 "No need to ioremap when real vram size is 0\n"); 2118 else 2119 #endif 2120 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base, 2121 adev->gmc.visible_vram_size); 2122 #endif 2123 2124 /* 2125 *The reserved vram for firmware must be pinned to the specified 2126 *place on the VRAM, so reserve it early. 2127 */ 2128 r = amdgpu_ttm_fw_reserve_vram_init(adev); 2129 if (r) 2130 return r; 2131 2132 /* 2133 * The reserved VRAM for the driver must be pinned to a specific 2134 * location in VRAM, so reserve it early. 2135 */ 2136 r = amdgpu_ttm_drv_reserve_vram_init(adev); 2137 if (r) 2138 return r; 2139 2140 /* 2141 * only NAVI10 and later ASICs support IP discovery. 2142 * If IP discovery is enabled, a block of memory should be 2143 * reserved for it. 2144 */ 2145 if (adev->discovery.reserve_tmr) { 2146 r = amdgpu_ttm_reserve_tmr(adev); 2147 if (r) 2148 return r; 2149 } 2150 2151 /* allocate memory as required for VGA 2152 * This is used for VGA emulation and pre-OS scanout buffers to 2153 * avoid display artifacts while transitioning between pre-OS 2154 * and driver. 2155 */ 2156 if (!adev->gmc.is_app_apu) { 2157 r = amdgpu_bo_create_kernel_at(adev, 0, 2158 adev->mman.stolen_vga_size, 2159 &adev->mman.stolen_vga_memory, 2160 NULL); 2161 if (r) 2162 return r; 2163 2164 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, 2165 adev->mman.stolen_extended_size, 2166 &adev->mman.stolen_extended_memory, 2167 NULL); 2168 2169 if (r) 2170 return r; 2171 2172 r = amdgpu_bo_create_kernel_at(adev, 2173 adev->mman.stolen_reserved_offset, 2174 adev->mman.stolen_reserved_size, 2175 &adev->mman.stolen_reserved_memory, 2176 NULL); 2177 if (r) 2178 return r; 2179 } else { 2180 DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n"); 2181 } 2182 2183 dev_info(adev->dev, " %uM of VRAM memory ready\n", 2184 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024))); 2185 2186 /* Compute GTT size, either based on TTM limit 2187 * or whatever the user passed on module init. 2188 */ 2189 gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT; 2190 if (amdgpu_gtt_size != -1) { 2191 uint64_t configured_size = (uint64_t)amdgpu_gtt_size << 20; 2192 2193 drm_warn(&adev->ddev, 2194 "Configuring gttsize via module parameter is deprecated, please use ttm.pages_limit\n"); 2195 if (gtt_size != configured_size) 2196 drm_warn(&adev->ddev, 2197 "GTT size has been set as %llu but TTM size has been set as %llu, this is unusual\n", 2198 configured_size, gtt_size); 2199 2200 gtt_size = configured_size; 2201 } 2202 2203 /* Initialize GTT memory pool */ 2204 r = amdgpu_gtt_mgr_init(adev, gtt_size); 2205 if (r) { 2206 dev_err(adev->dev, "Failed initializing GTT heap.\n"); 2207 return r; 2208 } 2209 dev_info(adev->dev, " %uM of GTT memory ready.\n", 2210 (unsigned int)(gtt_size / (1024 * 1024))); 2211 2212 if (adev->flags & AMD_IS_APU) { 2213 if (adev->gmc.real_vram_size < gtt_size) 2214 adev->apu_prefer_gtt = true; 2215 } 2216 2217 /* Initialize doorbell pool on PCI BAR */ 2218 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE); 2219 if (r) { 2220 dev_err(adev->dev, "Failed initializing doorbell heap.\n"); 2221 return r; 2222 } 2223 2224 /* Create a boorbell page for kernel usages */ 2225 r = amdgpu_doorbell_create_kernel_doorbells(adev); 2226 if (r) { 2227 dev_err(adev->dev, "Failed to initialize kernel doorbells.\n"); 2228 return r; 2229 } 2230 2231 /* Initialize MMIO-remap pool (single page 4K) */ 2232 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1); 2233 if (r) { 2234 dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n"); 2235 return r; 2236 } 2237 2238 /* Allocate the singleton MMIO_REMAP BO if supported */ 2239 r = amdgpu_ttm_alloc_mmio_remap_bo(adev); 2240 if (r) 2241 return r; 2242 2243 /* Initialize preemptible memory pool */ 2244 r = amdgpu_preempt_mgr_init(adev); 2245 if (r) { 2246 dev_err(adev->dev, "Failed initializing PREEMPT heap.\n"); 2247 return r; 2248 } 2249 2250 /* Initialize various on-chip memory pools */ 2251 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); 2252 if (r) { 2253 dev_err(adev->dev, "Failed initializing GDS heap.\n"); 2254 return r; 2255 } 2256 2257 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size); 2258 if (r) { 2259 dev_err(adev->dev, "Failed initializing gws heap.\n"); 2260 return r; 2261 } 2262 2263 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size); 2264 if (r) { 2265 dev_err(adev->dev, "Failed initializing oa heap.\n"); 2266 return r; 2267 } 2268 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, 2269 AMDGPU_GEM_DOMAIN_GTT, 2270 &adev->mman.sdma_access_bo, NULL, 2271 &adev->mman.sdma_access_ptr)) 2272 drm_warn(adev_to_drm(adev), 2273 "Debug VRAM access will use slowpath MM access\n"); 2274 2275 return 0; 2276 } 2277 2278 /* 2279 * amdgpu_ttm_fini - De-initialize the TTM memory pools 2280 */ 2281 void amdgpu_ttm_fini(struct amdgpu_device *adev) 2282 { 2283 int idx; 2284 2285 if (!adev->mman.initialized) 2286 return; 2287 2288 amdgpu_ttm_pools_fini(adev); 2289 2290 amdgpu_ttm_training_reserve_vram_fini(adev); 2291 /* return the stolen vga memory back to VRAM */ 2292 if (!adev->gmc.is_app_apu) { 2293 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 2294 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 2295 /* return the FW reserved memory back to VRAM */ 2296 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, 2297 NULL); 2298 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, 2299 NULL); 2300 if (adev->mman.stolen_reserved_size) 2301 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, 2302 NULL, NULL); 2303 } 2304 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, 2305 &adev->mman.sdma_access_ptr); 2306 2307 amdgpu_ttm_free_mmio_remap_bo(adev); 2308 amdgpu_ttm_fw_reserve_vram_fini(adev); 2309 amdgpu_ttm_drv_reserve_vram_fini(adev); 2310 2311 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 2312 2313 if (adev->mman.aper_base_kaddr) 2314 iounmap(adev->mman.aper_base_kaddr); 2315 adev->mman.aper_base_kaddr = NULL; 2316 2317 drm_dev_exit(idx); 2318 } 2319 2320 if (!adev->gmc.is_app_apu) 2321 amdgpu_vram_mgr_fini(adev); 2322 amdgpu_gtt_mgr_fini(adev); 2323 amdgpu_preempt_mgr_fini(adev); 2324 amdgpu_doorbell_fini(adev); 2325 2326 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); 2327 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); 2328 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); 2329 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL); 2330 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP); 2331 ttm_device_fini(&adev->mman.bdev); 2332 adev->mman.initialized = false; 2333 dev_info(adev->dev, " ttm finalized\n"); 2334 } 2335 2336 /** 2337 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions 2338 * 2339 * @adev: amdgpu_device pointer 2340 * @enable: true when we can use buffer functions. 2341 * 2342 * Enable/disable use of buffer functions during suspend/resume. This should 2343 * only be called at bootup or when userspace isn't running. 2344 */ 2345 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) 2346 { 2347 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 2348 uint64_t size; 2349 int r; 2350 2351 if (!adev->mman.initialized || amdgpu_in_reset(adev) || 2352 adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu) 2353 return; 2354 2355 if (enable) { 2356 struct amdgpu_ring *ring; 2357 struct drm_gpu_scheduler *sched; 2358 2359 if (!adev->mman.buffer_funcs_ring || !adev->mman.buffer_funcs_ring->sched.ready) { 2360 dev_warn(adev->dev, "Not enabling DMA transfers for in kernel use"); 2361 return; 2362 } 2363 2364 ring = adev->mman.buffer_funcs_ring; 2365 sched = &ring->sched; 2366 r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, 2367 &adev->mman.default_entity, 2368 DRM_SCHED_PRIORITY_KERNEL, 2369 &sched, 1, 0); 2370 if (r < 0) { 2371 dev_err(adev->dev, 2372 "Failed setting up TTM entity (%d)\n", r); 2373 return; 2374 } 2375 2376 r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, 2377 &adev->mman.clear_entity, 2378 DRM_SCHED_PRIORITY_NORMAL, 2379 &sched, 1, 1); 2380 if (r < 0) { 2381 dev_err(adev->dev, 2382 "Failed setting up TTM BO clear entity (%d)\n", r); 2383 goto error_free_default_entity; 2384 } 2385 2386 r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, 2387 &adev->mman.move_entity, 2388 DRM_SCHED_PRIORITY_NORMAL, 2389 &sched, 1, 2); 2390 if (r < 0) { 2391 dev_err(adev->dev, 2392 "Failed setting up TTM BO move entity (%d)\n", r); 2393 goto error_free_clear_entity; 2394 } 2395 } else { 2396 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2397 &adev->mman.default_entity); 2398 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2399 &adev->mman.clear_entity); 2400 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2401 &adev->mman.move_entity); 2402 /* Drop all the old fences since re-creating the scheduler entities 2403 * will allocate new contexts. 2404 */ 2405 ttm_resource_manager_cleanup(man); 2406 } 2407 2408 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 2409 if (enable) 2410 size = adev->gmc.real_vram_size; 2411 else 2412 size = adev->gmc.visible_vram_size; 2413 man->size = size; 2414 adev->mman.buffer_funcs_enabled = enable; 2415 2416 return; 2417 2418 error_free_clear_entity: 2419 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2420 &adev->mman.clear_entity); 2421 error_free_default_entity: 2422 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2423 &adev->mman.default_entity); 2424 } 2425 2426 static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, 2427 struct amdgpu_ttm_buffer_entity *entity, 2428 unsigned int num_dw, 2429 struct dma_resv *resv, 2430 bool vm_needs_flush, 2431 struct amdgpu_job **job, 2432 u64 k_job_id) 2433 { 2434 enum amdgpu_ib_pool_type pool = AMDGPU_IB_POOL_DELAYED; 2435 int r; 2436 r = amdgpu_job_alloc_with_ib(adev, &entity->base, 2437 AMDGPU_FENCE_OWNER_UNDEFINED, 2438 num_dw * 4, pool, job, k_job_id); 2439 if (r) 2440 return r; 2441 2442 if (vm_needs_flush) { 2443 (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ? 2444 adev->gmc.pdb0_bo : 2445 adev->gart.bo); 2446 (*job)->vm_needs_flush = true; 2447 } 2448 if (!resv) 2449 return 0; 2450 2451 return drm_sched_job_add_resv_dependencies(&(*job)->base, resv, 2452 DMA_RESV_USAGE_BOOKKEEP); 2453 } 2454 2455 int amdgpu_copy_buffer(struct amdgpu_device *adev, 2456 struct amdgpu_ttm_buffer_entity *entity, 2457 uint64_t src_offset, 2458 uint64_t dst_offset, uint32_t byte_count, 2459 struct dma_resv *resv, 2460 struct dma_fence **fence, 2461 bool vm_needs_flush, uint32_t copy_flags) 2462 { 2463 unsigned int num_loops, num_dw; 2464 struct amdgpu_ring *ring; 2465 struct amdgpu_job *job; 2466 uint32_t max_bytes; 2467 unsigned int i; 2468 int r; 2469 2470 ring = adev->mman.buffer_funcs_ring; 2471 2472 if (!ring->sched.ready) { 2473 dev_err(adev->dev, 2474 "Trying to move memory with ring turned off.\n"); 2475 return -EINVAL; 2476 } 2477 2478 max_bytes = adev->mman.buffer_funcs->copy_max_bytes; 2479 num_loops = DIV_ROUND_UP(byte_count, max_bytes); 2480 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); 2481 r = amdgpu_ttm_prepare_job(adev, entity, num_dw, 2482 resv, vm_needs_flush, &job, 2483 AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER); 2484 if (r) 2485 goto error_free; 2486 2487 for (i = 0; i < num_loops; i++) { 2488 uint32_t cur_size_in_bytes = min(byte_count, max_bytes); 2489 2490 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, 2491 dst_offset, cur_size_in_bytes, copy_flags); 2492 src_offset += cur_size_in_bytes; 2493 dst_offset += cur_size_in_bytes; 2494 byte_count -= cur_size_in_bytes; 2495 } 2496 2497 *fence = amdgpu_ttm_job_submit(adev, entity, job, num_dw); 2498 2499 return 0; 2500 2501 error_free: 2502 amdgpu_job_free(job); 2503 dev_err(adev->dev, "Error scheduling IBs (%d)\n", r); 2504 return r; 2505 } 2506 2507 static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev, 2508 struct amdgpu_ttm_buffer_entity *entity, 2509 uint32_t src_data, 2510 uint64_t dst_addr, uint32_t byte_count, 2511 struct dma_resv *resv, 2512 struct dma_fence **fence, 2513 bool vm_needs_flush, 2514 u64 k_job_id) 2515 { 2516 unsigned int num_loops, num_dw; 2517 struct amdgpu_job *job; 2518 uint32_t max_bytes; 2519 unsigned int i; 2520 int r; 2521 2522 max_bytes = adev->mman.buffer_funcs->fill_max_bytes; 2523 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes); 2524 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8); 2525 r = amdgpu_ttm_prepare_job(adev, entity, num_dw, resv, 2526 vm_needs_flush, &job, k_job_id); 2527 if (r) 2528 return r; 2529 2530 for (i = 0; i < num_loops; i++) { 2531 uint32_t cur_size = min(byte_count, max_bytes); 2532 2533 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr, 2534 cur_size); 2535 2536 dst_addr += cur_size; 2537 byte_count -= cur_size; 2538 } 2539 2540 *fence = amdgpu_ttm_job_submit(adev, entity, job, num_dw); 2541 return 0; 2542 } 2543 2544 /** 2545 * amdgpu_ttm_clear_buffer - clear memory buffers 2546 * @bo: amdgpu buffer object 2547 * @resv: reservation object 2548 * @fence: dma_fence associated with the operation 2549 * 2550 * Clear the memory buffer resource. 2551 * 2552 * Returns: 2553 * 0 for success or a negative error code on failure. 2554 */ 2555 int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, 2556 struct dma_resv *resv, 2557 struct dma_fence **fence) 2558 { 2559 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 2560 struct amdgpu_ttm_buffer_entity *entity; 2561 struct amdgpu_res_cursor cursor; 2562 u64 addr; 2563 int r = 0; 2564 2565 if (!adev->mman.buffer_funcs_enabled) 2566 return -EINVAL; 2567 2568 if (!fence) 2569 return -EINVAL; 2570 2571 entity = &adev->mman.clear_entity; 2572 *fence = dma_fence_get_stub(); 2573 2574 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); 2575 2576 mutex_lock(&entity->lock); 2577 while (cursor.remaining) { 2578 struct dma_fence *next = NULL; 2579 u64 size; 2580 2581 if (amdgpu_res_cleared(&cursor)) { 2582 amdgpu_res_next(&cursor, cursor.size); 2583 continue; 2584 } 2585 2586 /* Never clear more than 256MiB at once to avoid timeouts */ 2587 size = min(cursor.size, 256ULL << 20); 2588 2589 r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &cursor, 2590 0, false, &size, &addr); 2591 if (r) 2592 goto err; 2593 2594 r = amdgpu_ttm_fill_mem(adev, entity, 0, addr, size, resv, 2595 &next, true, 2596 AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER); 2597 if (r) 2598 goto err; 2599 2600 dma_fence_put(*fence); 2601 *fence = next; 2602 2603 amdgpu_res_next(&cursor, size); 2604 } 2605 err: 2606 mutex_unlock(&entity->lock); 2607 2608 return r; 2609 } 2610 2611 int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity, 2612 struct amdgpu_bo *bo, 2613 uint32_t src_data, 2614 struct dma_resv *resv, 2615 struct dma_fence **f, 2616 u64 k_job_id) 2617 { 2618 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 2619 struct dma_fence *fence = NULL; 2620 struct amdgpu_res_cursor dst; 2621 int r; 2622 2623 if (!adev->mman.buffer_funcs_enabled) { 2624 dev_err(adev->dev, 2625 "Trying to clear memory with ring turned off.\n"); 2626 return -EINVAL; 2627 } 2628 2629 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst); 2630 2631 mutex_lock(&entity->lock); 2632 while (dst.remaining) { 2633 struct dma_fence *next; 2634 uint64_t cur_size, to; 2635 2636 /* Never fill more than 256MiB at once to avoid timeouts */ 2637 cur_size = min(dst.size, 256ULL << 20); 2638 2639 r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &dst, 2640 0, false, &cur_size, &to); 2641 if (r) 2642 goto error; 2643 2644 r = amdgpu_ttm_fill_mem(adev, entity, 2645 src_data, to, cur_size, resv, 2646 &next, true, k_job_id); 2647 if (r) 2648 goto error; 2649 2650 dma_fence_put(fence); 2651 fence = next; 2652 2653 amdgpu_res_next(&dst, cur_size); 2654 } 2655 error: 2656 mutex_unlock(&entity->lock); 2657 if (f) 2658 *f = dma_fence_get(fence); 2659 dma_fence_put(fence); 2660 return r; 2661 } 2662 2663 /** 2664 * amdgpu_ttm_evict_resources - evict memory buffers 2665 * @adev: amdgpu device object 2666 * @mem_type: evicted BO's memory type 2667 * 2668 * Evicts all @mem_type buffers on the lru list of the memory type. 2669 * 2670 * Returns: 2671 * 0 for success or a negative error code on failure. 2672 */ 2673 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) 2674 { 2675 struct ttm_resource_manager *man; 2676 2677 switch (mem_type) { 2678 case TTM_PL_VRAM: 2679 case TTM_PL_TT: 2680 case AMDGPU_PL_GWS: 2681 case AMDGPU_PL_GDS: 2682 case AMDGPU_PL_OA: 2683 man = ttm_manager_type(&adev->mman.bdev, mem_type); 2684 break; 2685 default: 2686 dev_err(adev->dev, "Trying to evict invalid memory type\n"); 2687 return -EINVAL; 2688 } 2689 2690 return ttm_resource_manager_evict_all(&adev->mman.bdev, man); 2691 } 2692 2693 #if defined(CONFIG_DEBUG_FS) 2694 2695 static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) 2696 { 2697 struct amdgpu_device *adev = m->private; 2698 2699 return ttm_pool_debugfs(&adev->mman.bdev.pool, m); 2700 } 2701 2702 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool); 2703 2704 /* 2705 * amdgpu_ttm_vram_read - Linear read access to VRAM 2706 * 2707 * Accesses VRAM via MMIO for debugging purposes. 2708 */ 2709 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, 2710 size_t size, loff_t *pos) 2711 { 2712 struct amdgpu_device *adev = file_inode(f)->i_private; 2713 ssize_t result = 0; 2714 2715 if (size & 0x3 || *pos & 0x3) 2716 return -EINVAL; 2717 2718 if (*pos >= adev->gmc.mc_vram_size) 2719 return -ENXIO; 2720 2721 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos)); 2722 while (size) { 2723 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4); 2724 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ]; 2725 2726 amdgpu_device_vram_access(adev, *pos, value, bytes, false); 2727 if (copy_to_user(buf, value, bytes)) 2728 return -EFAULT; 2729 2730 result += bytes; 2731 buf += bytes; 2732 *pos += bytes; 2733 size -= bytes; 2734 } 2735 2736 return result; 2737 } 2738 2739 /* 2740 * amdgpu_ttm_vram_write - Linear write access to VRAM 2741 * 2742 * Accesses VRAM via MMIO for debugging purposes. 2743 */ 2744 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, 2745 size_t size, loff_t *pos) 2746 { 2747 struct amdgpu_device *adev = file_inode(f)->i_private; 2748 ssize_t result = 0; 2749 int r; 2750 2751 if (size & 0x3 || *pos & 0x3) 2752 return -EINVAL; 2753 2754 if (*pos >= adev->gmc.mc_vram_size) 2755 return -ENXIO; 2756 2757 while (size) { 2758 uint32_t value; 2759 2760 if (*pos >= adev->gmc.mc_vram_size) 2761 return result; 2762 2763 r = get_user(value, (uint32_t *)buf); 2764 if (r) 2765 return r; 2766 2767 amdgpu_device_mm_access(adev, *pos, &value, 4, true); 2768 2769 result += 4; 2770 buf += 4; 2771 *pos += 4; 2772 size -= 4; 2773 } 2774 2775 return result; 2776 } 2777 2778 static const struct file_operations amdgpu_ttm_vram_fops = { 2779 .owner = THIS_MODULE, 2780 .read = amdgpu_ttm_vram_read, 2781 .write = amdgpu_ttm_vram_write, 2782 .llseek = default_llseek, 2783 }; 2784 2785 /* 2786 * amdgpu_iomem_read - Virtual read access to GPU mapped memory 2787 * 2788 * This function is used to read memory that has been mapped to the 2789 * GPU and the known addresses are not physical addresses but instead 2790 * bus addresses (e.g., what you'd put in an IB or ring buffer). 2791 */ 2792 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, 2793 size_t size, loff_t *pos) 2794 { 2795 struct amdgpu_device *adev = file_inode(f)->i_private; 2796 struct iommu_domain *dom; 2797 ssize_t result = 0; 2798 int r; 2799 2800 /* retrieve the IOMMU domain if any for this device */ 2801 dom = iommu_get_domain_for_dev(adev->dev); 2802 2803 while (size) { 2804 phys_addr_t addr = *pos & PAGE_MASK; 2805 loff_t off = *pos & ~PAGE_MASK; 2806 size_t bytes = PAGE_SIZE - off; 2807 unsigned long pfn; 2808 struct page *p; 2809 void *ptr; 2810 2811 bytes = min(bytes, size); 2812 2813 /* Translate the bus address to a physical address. If 2814 * the domain is NULL it means there is no IOMMU active 2815 * and the address translation is the identity 2816 */ 2817 addr = dom ? iommu_iova_to_phys(dom, addr) : addr; 2818 2819 pfn = addr >> PAGE_SHIFT; 2820 if (!pfn_valid(pfn)) 2821 return -EPERM; 2822 2823 p = pfn_to_page(pfn); 2824 if (p->mapping != adev->mman.bdev.dev_mapping) 2825 return -EPERM; 2826 2827 ptr = kmap_local_page(p); 2828 r = copy_to_user(buf, ptr + off, bytes); 2829 kunmap_local(ptr); 2830 if (r) 2831 return -EFAULT; 2832 2833 size -= bytes; 2834 *pos += bytes; 2835 result += bytes; 2836 } 2837 2838 return result; 2839 } 2840 2841 /* 2842 * amdgpu_iomem_write - Virtual write access to GPU mapped memory 2843 * 2844 * This function is used to write memory that has been mapped to the 2845 * GPU and the known addresses are not physical addresses but instead 2846 * bus addresses (e.g., what you'd put in an IB or ring buffer). 2847 */ 2848 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, 2849 size_t size, loff_t *pos) 2850 { 2851 struct amdgpu_device *adev = file_inode(f)->i_private; 2852 struct iommu_domain *dom; 2853 ssize_t result = 0; 2854 int r; 2855 2856 dom = iommu_get_domain_for_dev(adev->dev); 2857 2858 while (size) { 2859 phys_addr_t addr = *pos & PAGE_MASK; 2860 loff_t off = *pos & ~PAGE_MASK; 2861 size_t bytes = PAGE_SIZE - off; 2862 unsigned long pfn; 2863 struct page *p; 2864 void *ptr; 2865 2866 bytes = min(bytes, size); 2867 2868 addr = dom ? iommu_iova_to_phys(dom, addr) : addr; 2869 2870 pfn = addr >> PAGE_SHIFT; 2871 if (!pfn_valid(pfn)) 2872 return -EPERM; 2873 2874 p = pfn_to_page(pfn); 2875 if (p->mapping != adev->mman.bdev.dev_mapping) 2876 return -EPERM; 2877 2878 ptr = kmap_local_page(p); 2879 r = copy_from_user(ptr + off, buf, bytes); 2880 kunmap_local(ptr); 2881 if (r) 2882 return -EFAULT; 2883 2884 size -= bytes; 2885 *pos += bytes; 2886 result += bytes; 2887 } 2888 2889 return result; 2890 } 2891 2892 static const struct file_operations amdgpu_ttm_iomem_fops = { 2893 .owner = THIS_MODULE, 2894 .read = amdgpu_iomem_read, 2895 .write = amdgpu_iomem_write, 2896 .llseek = default_llseek 2897 }; 2898 2899 #endif 2900 2901 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) 2902 { 2903 #if defined(CONFIG_DEBUG_FS) 2904 struct drm_minor *minor = adev_to_drm(adev)->primary; 2905 struct dentry *root = minor->debugfs_root; 2906 2907 debugfs_create_file_size("amdgpu_vram", 0444, root, adev, 2908 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size); 2909 debugfs_create_file("amdgpu_iomem", 0444, root, adev, 2910 &amdgpu_ttm_iomem_fops); 2911 debugfs_create_file("ttm_page_pool", 0444, root, adev, 2912 &amdgpu_ttm_page_pool_fops); 2913 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2914 TTM_PL_VRAM), 2915 root, "amdgpu_vram_mm"); 2916 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2917 TTM_PL_TT), 2918 root, "amdgpu_gtt_mm"); 2919 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2920 AMDGPU_PL_GDS), 2921 root, "amdgpu_gds_mm"); 2922 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2923 AMDGPU_PL_GWS), 2924 root, "amdgpu_gws_mm"); 2925 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, 2926 AMDGPU_PL_OA), 2927 root, "amdgpu_oa_mm"); 2928 2929 #endif 2930 } 2931