1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 #ifndef __AMDGPU_VM_H__ 25 #define __AMDGPU_VM_H__ 26 27 #include <linux/idr.h> 28 #include <linux/kfifo.h> 29 #include <linux/rbtree.h> 30 #include <drm/gpu_scheduler.h> 31 #include <drm/drm_file.h> 32 #include <drm/ttm/ttm_bo.h> 33 #include <linux/sched/mm.h> 34 35 #include "amdgpu_sync.h" 36 #include "amdgpu_ring.h" 37 #include "amdgpu_ids.h" 38 #include "amdgpu_ttm.h" 39 40 struct drm_exec; 41 42 struct amdgpu_bo_va; 43 struct amdgpu_job; 44 struct amdgpu_bo_list_entry; 45 struct amdgpu_bo_vm; 46 47 /* 48 * GPUVM handling 49 */ 50 51 /* Maximum number of PTEs the hardware can write with one command */ 52 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF 53 54 /* number of entries in page table */ 55 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size) 56 57 #define AMDGPU_PTE_VALID (1ULL << 0) 58 #define AMDGPU_PTE_SYSTEM (1ULL << 1) 59 #define AMDGPU_PTE_SNOOPED (1ULL << 2) 60 61 /* RV+ */ 62 #define AMDGPU_PTE_TMZ (1ULL << 3) 63 64 /* VI only */ 65 #define AMDGPU_PTE_EXECUTABLE (1ULL << 4) 66 67 #define AMDGPU_PTE_READABLE (1ULL << 5) 68 #define AMDGPU_PTE_WRITEABLE (1ULL << 6) 69 70 #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7) 71 72 /* TILED for VEGA10, reserved for older ASICs */ 73 #define AMDGPU_PTE_PRT (1ULL << 51) 74 75 /* PDE is handled as PTE for VEGA10 */ 76 #define AMDGPU_PDE_PTE (1ULL << 54) 77 78 #define AMDGPU_PTE_LOG (1ULL << 55) 79 80 /* PTE is handled as PDE for VEGA10 (Translate Further) */ 81 #define AMDGPU_PTE_TF (1ULL << 56) 82 83 /* MALL noalloc for sienna_cichlid, reserved for older ASICs */ 84 #define AMDGPU_PTE_NOALLOC (1ULL << 58) 85 86 /* PDE Block Fragment Size for VEGA10 */ 87 #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59) 88 89 /* Flag combination to set no-retry with TF disabled */ 90 #define AMDGPU_VM_NORETRY_FLAGS (AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \ 91 AMDGPU_PTE_TF) 92 93 /* Flag combination to set no-retry with TF enabled */ 94 #define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \ 95 AMDGPU_PTE_PRT) 96 /* For GFX9 */ 97 #define AMDGPU_PTE_MTYPE_VG10_SHIFT(mtype) ((uint64_t)(mtype) << 57) 98 #define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10_SHIFT(3ULL) 99 #define AMDGPU_PTE_MTYPE_VG10(flags, mtype) \ 100 (((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_VG10_MASK)) | \ 101 AMDGPU_PTE_MTYPE_VG10_SHIFT(mtype)) 102 103 #define AMDGPU_MTYPE_NC 0 104 #define AMDGPU_MTYPE_CC 2 105 106 #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \ 107 | AMDGPU_PTE_SNOOPED \ 108 | AMDGPU_PTE_EXECUTABLE \ 109 | AMDGPU_PTE_READABLE \ 110 | AMDGPU_PTE_WRITEABLE \ 111 | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC)) 112 113 /* gfx10 */ 114 #define AMDGPU_PTE_MTYPE_NV10_SHIFT(mtype) ((uint64_t)(mtype) << 48) 115 #define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10_SHIFT(7ULL) 116 #define AMDGPU_PTE_MTYPE_NV10(flags, mtype) \ 117 (((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_NV10_MASK)) | \ 118 AMDGPU_PTE_MTYPE_NV10_SHIFT(mtype)) 119 120 /* gfx12 */ 121 #define AMDGPU_PTE_PRT_GFX12 (1ULL << 56) 122 #define AMDGPU_PTE_PRT_FLAG(adev) \ 123 ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PTE_PRT_GFX12 : AMDGPU_PTE_PRT) 124 125 #define AMDGPU_PTE_MTYPE_GFX12_SHIFT(mtype) ((uint64_t)(mtype) << 54) 126 #define AMDGPU_PTE_MTYPE_GFX12_MASK AMDGPU_PTE_MTYPE_GFX12_SHIFT(3ULL) 127 #define AMDGPU_PTE_MTYPE_GFX12(flags, mtype) \ 128 (((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_GFX12_MASK)) | \ 129 AMDGPU_PTE_MTYPE_GFX12_SHIFT(mtype)) 130 131 #define AMDGPU_PTE_DCC (1ULL << 58) 132 #define AMDGPU_PTE_IS_PTE (1ULL << 63) 133 134 /* PDE Block Fragment Size for gfx v12 */ 135 #define AMDGPU_PDE_BFS_GFX12(a) ((uint64_t)((a) & 0x1fULL) << 58) 136 #define AMDGPU_PDE_BFS_FLAG(adev, a) \ 137 ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_BFS_GFX12(a) : AMDGPU_PDE_BFS(a)) 138 /* PDE is handled as PTE for gfx v12 */ 139 #define AMDGPU_PDE_PTE_GFX12 (1ULL << 63) 140 #define AMDGPU_PDE_PTE_FLAG(adev) \ 141 ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_PTE_GFX12 : AMDGPU_PDE_PTE) 142 143 /* How to program VM fault handling */ 144 #define AMDGPU_VM_FAULT_STOP_NEVER 0 145 #define AMDGPU_VM_FAULT_STOP_FIRST 1 146 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 147 148 /* How much VRAM be reserved for page tables */ 149 #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20) 150 151 /* 152 * max number of VMHUB 153 * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1 154 */ 155 #define AMDGPU_MAX_VMHUBS 13 156 #define AMDGPU_GFXHUB_START 0 157 #define AMDGPU_MMHUB0_START 8 158 #define AMDGPU_MMHUB1_START 12 159 #define AMDGPU_GFXHUB(x) (AMDGPU_GFXHUB_START + (x)) 160 #define AMDGPU_MMHUB0(x) (AMDGPU_MMHUB0_START + (x)) 161 #define AMDGPU_MMHUB1(x) (AMDGPU_MMHUB1_START + (x)) 162 163 #define AMDGPU_IS_GFXHUB(x) ((x) >= AMDGPU_GFXHUB_START && (x) < AMDGPU_MMHUB0_START) 164 #define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START) 165 #define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS) 166 167 /* Reserve space at top/bottom of address space for kernel use */ 168 #define AMDGPU_VA_RESERVED_CSA_SIZE (2ULL << 20) 169 #define AMDGPU_VA_RESERVED_CSA_START(adev) (((adev)->vm_manager.max_pfn \ 170 << AMDGPU_GPU_PAGE_SHIFT) \ 171 - AMDGPU_VA_RESERVED_CSA_SIZE) 172 #define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20) 173 #define AMDGPU_VA_RESERVED_SEQ64_START(adev) (AMDGPU_VA_RESERVED_CSA_START(adev) \ 174 - AMDGPU_VA_RESERVED_SEQ64_SIZE) 175 #define AMDGPU_VA_RESERVED_TRAP_SIZE (2ULL << 12) 176 #define AMDGPU_VA_RESERVED_TRAP_START(adev) (AMDGPU_VA_RESERVED_SEQ64_START(adev) \ 177 - AMDGPU_VA_RESERVED_TRAP_SIZE) 178 #define AMDGPU_VA_RESERVED_BOTTOM (1ULL << 16) 179 #define AMDGPU_VA_RESERVED_TOP (AMDGPU_VA_RESERVED_TRAP_SIZE + \ 180 AMDGPU_VA_RESERVED_SEQ64_SIZE + \ 181 AMDGPU_VA_RESERVED_CSA_SIZE) 182 183 /* See vm_update_mode */ 184 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) 185 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) 186 187 /* VMPT level enumerate, and the hiberachy is: 188 * PDB2->PDB1->PDB0->PTB 189 */ 190 enum amdgpu_vm_level { 191 AMDGPU_VM_PDB2, 192 AMDGPU_VM_PDB1, 193 AMDGPU_VM_PDB0, 194 AMDGPU_VM_PTB 195 }; 196 197 /* base structure for tracking BO usage in a VM */ 198 struct amdgpu_vm_bo_base { 199 /* constant after initialization */ 200 struct amdgpu_vm *vm; 201 struct amdgpu_bo *bo; 202 203 /* protected by bo being reserved */ 204 struct amdgpu_vm_bo_base *next; 205 206 /* protected by vm reservation and invalidated_lock */ 207 struct list_head vm_status; 208 209 /* if the bo is counted as shared in mem stats 210 * protected by vm BO being reserved */ 211 bool shared; 212 213 /* protected by the BO being reserved */ 214 bool moved; 215 }; 216 217 /* provided by hw blocks that can write ptes, e.g., sdma */ 218 struct amdgpu_vm_pte_funcs { 219 /* number of dw to reserve per operation */ 220 unsigned copy_pte_num_dw; 221 222 /* copy pte entries from GART */ 223 void (*copy_pte)(struct amdgpu_ib *ib, 224 uint64_t pe, uint64_t src, 225 unsigned count); 226 227 /* write pte one entry at a time with addr mapping */ 228 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, 229 uint64_t value, unsigned count, 230 uint32_t incr); 231 /* for linear pte/pde updates without addr mapping */ 232 void (*set_pte_pde)(struct amdgpu_ib *ib, 233 uint64_t pe, 234 uint64_t addr, unsigned count, 235 uint32_t incr, uint64_t flags); 236 }; 237 238 struct amdgpu_task_info { 239 struct drm_wedge_task_info task; 240 char process_name[TASK_COMM_LEN]; 241 pid_t tgid; 242 struct kref refcount; 243 }; 244 245 /** 246 * struct amdgpu_vm_update_params 247 * 248 * Encapsulate some VM table update parameters to reduce 249 * the number of function parameters 250 * 251 */ 252 struct amdgpu_vm_update_params { 253 254 /** 255 * @adev: amdgpu device we do this update for 256 */ 257 struct amdgpu_device *adev; 258 259 /** 260 * @vm: optional amdgpu_vm we do this update for 261 */ 262 struct amdgpu_vm *vm; 263 264 /** 265 * @immediate: if changes should be made immediately 266 */ 267 bool immediate; 268 269 /** 270 * @unlocked: true if the root BO is not locked 271 */ 272 bool unlocked; 273 274 /** 275 * @pages_addr: 276 * 277 * DMA addresses to use for mapping 278 */ 279 dma_addr_t *pages_addr; 280 281 /** 282 * @job: job to used for hw submission 283 */ 284 struct amdgpu_job *job; 285 286 /** 287 * @num_dw_left: number of dw left for the IB 288 */ 289 unsigned int num_dw_left; 290 291 /** 292 * @needs_flush: true whenever we need to invalidate the TLB 293 */ 294 bool needs_flush; 295 296 /** 297 * @allow_override: true for memory that is not uncached: allows MTYPE 298 * to be overridden for NUMA local memory. 299 */ 300 bool allow_override; 301 302 /** 303 * @tlb_flush_waitlist: temporary storage for BOs until tlb_flush 304 */ 305 struct list_head tlb_flush_waitlist; 306 }; 307 308 struct amdgpu_vm_update_funcs { 309 int (*map_table)(struct amdgpu_bo_vm *bo); 310 int (*prepare)(struct amdgpu_vm_update_params *p, 311 struct amdgpu_sync *sync, u64 k_job_id); 312 int (*update)(struct amdgpu_vm_update_params *p, 313 struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr, 314 unsigned count, uint32_t incr, uint64_t flags); 315 int (*commit)(struct amdgpu_vm_update_params *p, 316 struct dma_fence **fence); 317 }; 318 319 struct amdgpu_vm_fault_info { 320 /* fault address */ 321 uint64_t addr; 322 /* fault status register */ 323 uint32_t status; 324 /* which vmhub? gfxhub, mmhub, etc. */ 325 unsigned int vmhub; 326 }; 327 328 struct amdgpu_mem_stats { 329 struct drm_memory_stats drm; 330 331 /* buffers that requested this placement but are currently evicted */ 332 uint64_t evicted; 333 }; 334 335 struct amdgpu_vm { 336 /* tree of virtual addresses mapped */ 337 struct rb_root_cached va; 338 339 /* Lock to prevent eviction while we are updating page tables 340 * use vm_eviction_lock/unlock(vm) 341 */ 342 struct mutex eviction_lock; 343 bool evicting; 344 unsigned int saved_flags; 345 346 /* Memory statistics for this vm, protected by stats_lock */ 347 spinlock_t stats_lock; 348 struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]; 349 350 /* 351 * The following lists contain amdgpu_vm_bo_base objects for either 352 * PDs, PTs or per VM BOs. The state transits are: 353 * 354 * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle 355 * 356 * Lists are protected by the root PD dma_resv lock. 357 */ 358 359 /* Per-VM and PT BOs who needs a validation */ 360 struct list_head evicted; 361 362 /* PT BOs which relocated and their parent need an update */ 363 struct list_head relocated; 364 365 /* per VM BOs moved, but not yet updated in the PT */ 366 struct list_head moved; 367 368 /* All BOs of this VM not currently in the state machine */ 369 struct list_head idle; 370 371 /* 372 * The following lists contain amdgpu_vm_bo_base objects for BOs which 373 * have their own dma_resv object and not depend on the root PD. Their 374 * state transits are: 375 * 376 * evicted_user or invalidated -> done 377 * 378 * Lists are protected by the invalidated_lock. 379 */ 380 spinlock_t invalidated_lock; 381 382 /* BOs for user mode queues that need a validation */ 383 struct list_head evicted_user; 384 385 /* regular invalidated BOs, but not yet updated in the PT */ 386 struct list_head invalidated; 387 388 /* BOs which are invalidated, has been updated in the PTs */ 389 struct list_head done; 390 391 /* 392 * This list contains amdgpu_bo_va_mapping objects which have been freed 393 * but not updated in the PTs 394 */ 395 struct list_head freed; 396 397 /* contains the page directory */ 398 struct amdgpu_vm_bo_base root; 399 struct dma_fence *last_update; 400 401 /* Scheduler entities for page table updates */ 402 struct drm_sched_entity immediate; 403 struct drm_sched_entity delayed; 404 405 /* Last finished delayed update */ 406 atomic64_t tlb_seq; 407 struct dma_fence *last_tlb_flush; 408 atomic64_t kfd_last_flushed_seq; 409 uint64_t tlb_fence_context; 410 411 /* How many times we had to re-generate the page tables */ 412 uint64_t generation; 413 414 /* Last unlocked submission to the scheduler entities */ 415 struct dma_fence *last_unlocked; 416 417 unsigned int pasid; 418 struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; 419 420 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ 421 bool use_cpu_for_update; 422 423 /* Functions to use for VM table updates */ 424 const struct amdgpu_vm_update_funcs *update_funcs; 425 426 /* Up to 128 pending retry page faults */ 427 DECLARE_KFIFO(faults, u64, 128); 428 429 /* Points to the KFD process VM info */ 430 struct amdkfd_process_info *process_info; 431 432 /* List node in amdkfd_process_info.vm_list_head */ 433 struct list_head vm_list_node; 434 435 /* Valid while the PD is reserved or fenced */ 436 uint64_t pd_phys_addr; 437 438 /* Some basic info about the task */ 439 struct amdgpu_task_info *task_info; 440 441 /* Store positions of group of BOs */ 442 struct ttm_lru_bulk_move lru_bulk_move; 443 /* Flag to indicate if VM is used for compute */ 444 bool is_compute_context; 445 446 /* Memory partition number, -1 means any partition */ 447 int8_t mem_id; 448 449 /* cached fault info */ 450 struct amdgpu_vm_fault_info fault_info; 451 }; 452 453 struct amdgpu_vm_manager { 454 /* Handling of VMIDs */ 455 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; 456 unsigned int first_kfd_vmid; 457 bool concurrent_flush; 458 459 /* Handling of VM fences */ 460 u64 fence_context; 461 unsigned seqno[AMDGPU_MAX_RINGS]; 462 463 uint64_t max_pfn; 464 uint32_t num_level; 465 uint32_t block_size; 466 uint32_t fragment_size; 467 enum amdgpu_vm_level root_level; 468 /* vram base address for page table entry */ 469 u64 vram_base_offset; 470 /* vm pte handling */ 471 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 472 struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS]; 473 unsigned vm_pte_num_scheds; 474 struct amdgpu_ring *page_fault; 475 476 /* partial resident texture handling */ 477 spinlock_t prt_lock; 478 atomic_t num_prt_users; 479 480 /* controls how VM page tables are updated for Graphics and Compute. 481 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU 482 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU 483 */ 484 int vm_update_mode; 485 486 /* PASID to VM mapping, will be used in interrupt context to 487 * look up VM of a page fault 488 */ 489 struct xarray pasids; 490 /* Global registration of recent page fault information */ 491 struct amdgpu_vm_fault_info fault_info; 492 }; 493 494 struct amdgpu_bo_va_mapping; 495 496 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) 497 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) 498 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 499 500 extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs; 501 extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs; 502 503 void amdgpu_vm_manager_init(struct amdgpu_device *adev); 504 void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 505 506 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, 507 u32 pasid); 508 509 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); 510 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id); 511 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); 512 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 513 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, 514 unsigned int num_fences); 515 int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, 516 unsigned int num_fences); 517 bool amdgpu_vm_ready(struct amdgpu_vm *vm); 518 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm); 519 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, 520 struct ww_acquire_ctx *ticket, 521 int (*callback)(void *p, struct amdgpu_bo *bo), 522 void *param); 523 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); 524 int amdgpu_vm_update_pdes(struct amdgpu_device *adev, 525 struct amdgpu_vm *vm, bool immediate); 526 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 527 struct amdgpu_vm *vm, 528 struct dma_fence **fence); 529 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 530 struct amdgpu_vm *vm, 531 struct ww_acquire_ctx *ticket); 532 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev, 533 struct amdgpu_vm *vm, 534 uint32_t flush_type, 535 uint32_t xcc_mask); 536 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, 537 struct amdgpu_vm *vm, struct amdgpu_bo *bo); 538 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, 539 bool immediate, bool unlocked, bool flush_tlb, 540 bool allow_override, struct amdgpu_sync *sync, 541 uint64_t start, uint64_t last, uint64_t flags, 542 uint64_t offset, uint64_t vram_base, 543 struct ttm_resource *res, dma_addr_t *pages_addr, 544 struct dma_fence **fence); 545 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 546 struct amdgpu_bo_va *bo_va, 547 bool clear); 548 bool amdgpu_vm_evictable(struct amdgpu_bo *bo); 549 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted); 550 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base, 551 struct ttm_resource *new_res, int sign); 552 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo); 553 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem, 554 bool evicted); 555 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 556 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 557 struct amdgpu_bo *bo); 558 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 559 struct amdgpu_vm *vm, 560 struct amdgpu_bo *bo); 561 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 562 struct amdgpu_bo_va *bo_va, 563 uint64_t addr, uint64_t offset, 564 uint64_t size, uint32_t flags); 565 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 566 struct amdgpu_bo_va *bo_va, 567 uint64_t addr, uint64_t offset, 568 uint64_t size, uint32_t flags); 569 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 570 struct amdgpu_bo_va *bo_va, 571 uint64_t addr); 572 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 573 struct amdgpu_vm *vm, 574 uint64_t saddr, uint64_t size); 575 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 576 uint64_t addr); 577 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); 578 void amdgpu_vm_bo_del(struct amdgpu_device *adev, 579 struct amdgpu_bo_va *bo_va); 580 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, 581 uint32_t fragment_size_default, unsigned max_level, 582 unsigned max_bits); 583 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 584 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 585 struct amdgpu_job *job); 586 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); 587 588 struct amdgpu_task_info * 589 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid); 590 591 struct amdgpu_task_info * 592 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm); 593 594 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info); 595 596 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, 597 u32 vmid, u32 node_id, uint64_t addr, uint64_t ts, 598 bool write_fault); 599 600 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); 601 602 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, 603 struct amdgpu_vm *vm); 604 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, 605 struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]); 606 607 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, 608 struct amdgpu_bo_vm *vmbo, bool immediate); 609 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, 610 int level, bool immediate, struct amdgpu_bo_vm **vmbo, 611 int32_t xcp_id); 612 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm); 613 614 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params, 615 struct amdgpu_vm_bo_base *entry); 616 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, 617 uint64_t start, uint64_t end, 618 uint64_t dst, uint64_t flags); 619 void amdgpu_vm_pt_free_work(struct work_struct *work); 620 void amdgpu_vm_pt_free_list(struct amdgpu_device *adev, 621 struct amdgpu_vm_update_params *params); 622 623 #if defined(CONFIG_DEBUG_FS) 624 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); 625 #endif 626 627 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm); 628 629 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo); 630 631 /** 632 * amdgpu_vm_tlb_seq - return tlb flush sequence number 633 * @vm: the amdgpu_vm structure to query 634 * 635 * Returns the tlb flush sequence number which indicates that the VM TLBs needs 636 * to be invalidated whenever the sequence number change. 637 */ 638 static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm) 639 { 640 unsigned long flags; 641 spinlock_t *lock; 642 643 /* 644 * Workaround to stop racing between the fence signaling and handling 645 * the cb. The lock is static after initially setting it up, just make 646 * sure that the dma_fence structure isn't freed up. 647 */ 648 rcu_read_lock(); 649 lock = vm->last_tlb_flush->lock; 650 rcu_read_unlock(); 651 652 spin_lock_irqsave(lock, flags); 653 spin_unlock_irqrestore(lock, flags); 654 655 return atomic64_read(&vm->tlb_seq); 656 } 657 658 /* 659 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS 660 * happens while holding this lock anywhere to prevent deadlocks when 661 * an MMU notifier runs in reclaim-FS context. 662 */ 663 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) 664 { 665 mutex_lock(&vm->eviction_lock); 666 vm->saved_flags = memalloc_noreclaim_save(); 667 } 668 669 static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) 670 { 671 if (mutex_trylock(&vm->eviction_lock)) { 672 vm->saved_flags = memalloc_noreclaim_save(); 673 return true; 674 } 675 return false; 676 } 677 678 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) 679 { 680 memalloc_noreclaim_restore(vm->saved_flags); 681 mutex_unlock(&vm->eviction_lock); 682 } 683 684 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev, 685 unsigned int pasid, 686 uint64_t addr, 687 uint32_t status, 688 unsigned int vmhub); 689 void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, 690 struct amdgpu_vm *vm, 691 struct dma_fence **fence); 692 693 void amdgpu_vm_print_task_info(struct amdgpu_device *adev, 694 struct amdgpu_task_info *task_info); 695 696 #define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \ 697 list_for_each_entry(mapping, &(bo_va)->valids, list) 698 #define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \ 699 list_for_each_entry(mapping, &(bo_va)->invalids, list) 700 701 #endif 702