1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König
23 */
24 #ifndef __AMDGPU_VM_H__
25 #define __AMDGPU_VM_H__
26
27 #include <linux/idr.h>
28 #include <linux/kfifo.h>
29 #include <linux/rbtree.h>
30 #include <drm/gpu_scheduler.h>
31 #include <drm/drm_file.h>
32 #include <drm/ttm/ttm_bo.h>
33 #include <linux/sched/mm.h>
34
35 #include "amdgpu_sync.h"
36 #include "amdgpu_ring.h"
37 #include "amdgpu_ids.h"
38
39 struct drm_exec;
40
41 struct amdgpu_bo_va;
42 struct amdgpu_job;
43 struct amdgpu_bo_list_entry;
44 struct amdgpu_bo_vm;
45
46 /*
47 * GPUVM handling
48 */
49
50 /* Maximum number of PTEs the hardware can write with one command */
51 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
52
53 /* number of entries in page table */
54 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
55
56 #define AMDGPU_PTE_VALID (1ULL << 0)
57 #define AMDGPU_PTE_SYSTEM (1ULL << 1)
58 #define AMDGPU_PTE_SNOOPED (1ULL << 2)
59
60 /* RV+ */
61 #define AMDGPU_PTE_TMZ (1ULL << 3)
62
63 /* VI only */
64 #define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
65
66 #define AMDGPU_PTE_READABLE (1ULL << 5)
67 #define AMDGPU_PTE_WRITEABLE (1ULL << 6)
68
69 #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
70
71 /* TILED for VEGA10, reserved for older ASICs */
72 #define AMDGPU_PTE_PRT (1ULL << 51)
73
74 /* PDE is handled as PTE for VEGA10 */
75 #define AMDGPU_PDE_PTE (1ULL << 54)
76
77 #define AMDGPU_PTE_LOG (1ULL << 55)
78
79 /* PTE is handled as PDE for VEGA10 (Translate Further) */
80 #define AMDGPU_PTE_TF (1ULL << 56)
81
82 /* MALL noalloc for sienna_cichlid, reserved for older ASICs */
83 #define AMDGPU_PTE_NOALLOC (1ULL << 58)
84
85 /* PDE Block Fragment Size for VEGA10 */
86 #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
87
88 /* Flag combination to set no-retry with TF disabled */
89 #define AMDGPU_VM_NORETRY_FLAGS (AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \
90 AMDGPU_PTE_TF)
91
92 /* Flag combination to set no-retry with TF enabled */
93 #define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \
94 AMDGPU_PTE_PRT)
95 /* For GFX9 */
96 #define AMDGPU_PTE_MTYPE_VG10_SHIFT(mtype) ((uint64_t)(mtype) << 57)
97 #define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10_SHIFT(3ULL)
98 #define AMDGPU_PTE_MTYPE_VG10(flags, mtype) \
99 (((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_VG10_MASK)) | \
100 AMDGPU_PTE_MTYPE_VG10_SHIFT(mtype))
101
102 #define AMDGPU_MTYPE_NC 0
103 #define AMDGPU_MTYPE_CC 2
104
105 #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
106 | AMDGPU_PTE_SNOOPED \
107 | AMDGPU_PTE_EXECUTABLE \
108 | AMDGPU_PTE_READABLE \
109 | AMDGPU_PTE_WRITEABLE \
110 | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
111
112 /* gfx10 */
113 #define AMDGPU_PTE_MTYPE_NV10_SHIFT(mtype) ((uint64_t)(mtype) << 48)
114 #define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10_SHIFT(7ULL)
115 #define AMDGPU_PTE_MTYPE_NV10(flags, mtype) \
116 (((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_NV10_MASK)) | \
117 AMDGPU_PTE_MTYPE_NV10_SHIFT(mtype))
118
119 /* gfx12 */
120 #define AMDGPU_PTE_PRT_GFX12 (1ULL << 56)
121 #define AMDGPU_PTE_PRT_FLAG(adev) \
122 ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PTE_PRT_GFX12 : AMDGPU_PTE_PRT)
123
124 #define AMDGPU_PTE_MTYPE_GFX12_SHIFT(mtype) ((uint64_t)(mtype) << 54)
125 #define AMDGPU_PTE_MTYPE_GFX12_MASK AMDGPU_PTE_MTYPE_GFX12_SHIFT(3ULL)
126 #define AMDGPU_PTE_MTYPE_GFX12(flags, mtype) \
127 (((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_GFX12_MASK)) | \
128 AMDGPU_PTE_MTYPE_GFX12_SHIFT(mtype))
129
130 #define AMDGPU_PTE_DCC (1ULL << 58)
131 #define AMDGPU_PTE_IS_PTE (1ULL << 63)
132
133 /* PDE Block Fragment Size for gfx v12 */
134 #define AMDGPU_PDE_BFS_GFX12(a) ((uint64_t)((a) & 0x1fULL) << 58)
135 #define AMDGPU_PDE_BFS_FLAG(adev, a) \
136 ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_BFS_GFX12(a) : AMDGPU_PDE_BFS(a))
137 /* PDE is handled as PTE for gfx v12 */
138 #define AMDGPU_PDE_PTE_GFX12 (1ULL << 63)
139 #define AMDGPU_PDE_PTE_FLAG(adev) \
140 ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_PTE_GFX12 : AMDGPU_PDE_PTE)
141
142 /* How to program VM fault handling */
143 #define AMDGPU_VM_FAULT_STOP_NEVER 0
144 #define AMDGPU_VM_FAULT_STOP_FIRST 1
145 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2
146
147 /* How much VRAM be reserved for page tables */
148 #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
149
150 /*
151 * max number of VMHUB
152 * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1
153 */
154 #define AMDGPU_MAX_VMHUBS 13
155 #define AMDGPU_GFXHUB_START 0
156 #define AMDGPU_MMHUB0_START 8
157 #define AMDGPU_MMHUB1_START 12
158 #define AMDGPU_GFXHUB(x) (AMDGPU_GFXHUB_START + (x))
159 #define AMDGPU_MMHUB0(x) (AMDGPU_MMHUB0_START + (x))
160 #define AMDGPU_MMHUB1(x) (AMDGPU_MMHUB1_START + (x))
161
162 #define AMDGPU_IS_GFXHUB(x) ((x) >= AMDGPU_GFXHUB_START && (x) < AMDGPU_MMHUB0_START)
163 #define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START)
164 #define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS)
165
166 /* Reserve space at top/bottom of address space for kernel use */
167 #define AMDGPU_VA_RESERVED_CSA_SIZE (2ULL << 20)
168 #define AMDGPU_VA_RESERVED_CSA_START(adev) (((adev)->vm_manager.max_pfn \
169 << AMDGPU_GPU_PAGE_SHIFT) \
170 - AMDGPU_VA_RESERVED_CSA_SIZE)
171 #define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20)
172 #define AMDGPU_VA_RESERVED_SEQ64_START(adev) (AMDGPU_VA_RESERVED_CSA_START(adev) \
173 - AMDGPU_VA_RESERVED_SEQ64_SIZE)
174 #define AMDGPU_VA_RESERVED_TRAP_SIZE (2ULL << 12)
175 #define AMDGPU_VA_RESERVED_TRAP_START(adev) (AMDGPU_VA_RESERVED_SEQ64_START(adev) \
176 - AMDGPU_VA_RESERVED_TRAP_SIZE)
177 #define AMDGPU_VA_RESERVED_BOTTOM (1ULL << 16)
178 #define AMDGPU_VA_RESERVED_TOP (AMDGPU_VA_RESERVED_TRAP_SIZE + \
179 AMDGPU_VA_RESERVED_SEQ64_SIZE + \
180 AMDGPU_VA_RESERVED_CSA_SIZE)
181
182 /* See vm_update_mode */
183 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
184 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
185
186 /* VMPT level enumerate, and the hiberachy is:
187 * PDB2->PDB1->PDB0->PTB
188 */
189 enum amdgpu_vm_level {
190 AMDGPU_VM_PDB2,
191 AMDGPU_VM_PDB1,
192 AMDGPU_VM_PDB0,
193 AMDGPU_VM_PTB
194 };
195
196 /* base structure for tracking BO usage in a VM */
197 struct amdgpu_vm_bo_base {
198 /* constant after initialization */
199 struct amdgpu_vm *vm;
200 struct amdgpu_bo *bo;
201
202 /* protected by bo being reserved */
203 struct amdgpu_vm_bo_base *next;
204
205 /* protected by spinlock */
206 struct list_head vm_status;
207
208 /* protected by the BO being reserved */
209 bool moved;
210 };
211
212 /* provided by hw blocks that can write ptes, e.g., sdma */
213 struct amdgpu_vm_pte_funcs {
214 /* number of dw to reserve per operation */
215 unsigned copy_pte_num_dw;
216
217 /* copy pte entries from GART */
218 void (*copy_pte)(struct amdgpu_ib *ib,
219 uint64_t pe, uint64_t src,
220 unsigned count);
221
222 /* write pte one entry at a time with addr mapping */
223 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
224 uint64_t value, unsigned count,
225 uint32_t incr);
226 /* for linear pte/pde updates without addr mapping */
227 void (*set_pte_pde)(struct amdgpu_ib *ib,
228 uint64_t pe,
229 uint64_t addr, unsigned count,
230 uint32_t incr, uint64_t flags);
231 };
232
233 struct amdgpu_task_info {
234 char process_name[TASK_COMM_LEN];
235 char task_name[TASK_COMM_LEN];
236 pid_t pid;
237 pid_t tgid;
238 struct kref refcount;
239 };
240
241 /**
242 * struct amdgpu_vm_update_params
243 *
244 * Encapsulate some VM table update parameters to reduce
245 * the number of function parameters
246 *
247 */
248 struct amdgpu_vm_update_params {
249
250 /**
251 * @adev: amdgpu device we do this update for
252 */
253 struct amdgpu_device *adev;
254
255 /**
256 * @vm: optional amdgpu_vm we do this update for
257 */
258 struct amdgpu_vm *vm;
259
260 /**
261 * @immediate: if changes should be made immediately
262 */
263 bool immediate;
264
265 /**
266 * @unlocked: true if the root BO is not locked
267 */
268 bool unlocked;
269
270 /**
271 * @pages_addr:
272 *
273 * DMA addresses to use for mapping
274 */
275 dma_addr_t *pages_addr;
276
277 /**
278 * @job: job to used for hw submission
279 */
280 struct amdgpu_job *job;
281
282 /**
283 * @num_dw_left: number of dw left for the IB
284 */
285 unsigned int num_dw_left;
286
287 /**
288 * @needs_flush: true whenever we need to invalidate the TLB
289 */
290 bool needs_flush;
291
292 /**
293 * @allow_override: true for memory that is not uncached: allows MTYPE
294 * to be overridden for NUMA local memory.
295 */
296 bool allow_override;
297
298 /**
299 * @tlb_flush_waitlist: temporary storage for BOs until tlb_flush
300 */
301 struct list_head tlb_flush_waitlist;
302 };
303
304 struct amdgpu_vm_update_funcs {
305 int (*map_table)(struct amdgpu_bo_vm *bo);
306 int (*prepare)(struct amdgpu_vm_update_params *p,
307 struct amdgpu_sync *sync);
308 int (*update)(struct amdgpu_vm_update_params *p,
309 struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
310 unsigned count, uint32_t incr, uint64_t flags);
311 int (*commit)(struct amdgpu_vm_update_params *p,
312 struct dma_fence **fence);
313 };
314
315 struct amdgpu_vm_fault_info {
316 /* fault address */
317 uint64_t addr;
318 /* fault status register */
319 uint32_t status;
320 /* which vmhub? gfxhub, mmhub, etc. */
321 unsigned int vmhub;
322 };
323
324 struct amdgpu_mem_stats {
325 struct drm_memory_stats drm;
326
327 /* buffers that requested this placement */
328 uint64_t requested;
329 /* buffers that requested this placement
330 * but are currently evicted */
331 uint64_t evicted;
332 };
333
334 struct amdgpu_vm {
335 /* tree of virtual addresses mapped */
336 struct rb_root_cached va;
337
338 /* Lock to prevent eviction while we are updating page tables
339 * use vm_eviction_lock/unlock(vm)
340 */
341 struct mutex eviction_lock;
342 bool evicting;
343 unsigned int saved_flags;
344
345 /* Lock to protect vm_bo add/del/move on all lists of vm */
346 spinlock_t status_lock;
347
348 /* Per-VM and PT BOs who needs a validation */
349 struct list_head evicted;
350
351 /* BOs for user mode queues that need a validation */
352 struct list_head evicted_user;
353
354 /* PT BOs which relocated and their parent need an update */
355 struct list_head relocated;
356
357 /* per VM BOs moved, but not yet updated in the PT */
358 struct list_head moved;
359
360 /* All BOs of this VM not currently in the state machine */
361 struct list_head idle;
362
363 /* regular invalidated BOs, but not yet updated in the PT */
364 struct list_head invalidated;
365
366 /* BO mappings freed, but not yet updated in the PT */
367 struct list_head freed;
368
369 /* BOs which are invalidated, has been updated in the PTs */
370 struct list_head done;
371
372 /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
373 struct list_head pt_freed;
374 struct work_struct pt_free_work;
375
376 /* contains the page directory */
377 struct amdgpu_vm_bo_base root;
378 struct dma_fence *last_update;
379
380 /* Scheduler entities for page table updates */
381 struct drm_sched_entity immediate;
382 struct drm_sched_entity delayed;
383
384 /* Last finished delayed update */
385 atomic64_t tlb_seq;
386 struct dma_fence *last_tlb_flush;
387 atomic64_t kfd_last_flushed_seq;
388 uint64_t tlb_fence_context;
389
390 /* How many times we had to re-generate the page tables */
391 uint64_t generation;
392
393 /* Last unlocked submission to the scheduler entities */
394 struct dma_fence *last_unlocked;
395
396 unsigned int pasid;
397 bool reserved_vmid[AMDGPU_MAX_VMHUBS];
398
399 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
400 bool use_cpu_for_update;
401
402 /* Functions to use for VM table updates */
403 const struct amdgpu_vm_update_funcs *update_funcs;
404
405 /* Up to 128 pending retry page faults */
406 DECLARE_KFIFO(faults, u64, 128);
407
408 /* Points to the KFD process VM info */
409 struct amdkfd_process_info *process_info;
410
411 /* List node in amdkfd_process_info.vm_list_head */
412 struct list_head vm_list_node;
413
414 /* Valid while the PD is reserved or fenced */
415 uint64_t pd_phys_addr;
416
417 /* Some basic info about the task */
418 struct amdgpu_task_info *task_info;
419
420 /* Store positions of group of BOs */
421 struct ttm_lru_bulk_move lru_bulk_move;
422 /* Flag to indicate if VM is used for compute */
423 bool is_compute_context;
424
425 /* Memory partition number, -1 means any partition */
426 int8_t mem_id;
427
428 /* cached fault info */
429 struct amdgpu_vm_fault_info fault_info;
430 };
431
432 struct amdgpu_vm_manager {
433 /* Handling of VMIDs */
434 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
435 unsigned int first_kfd_vmid;
436 bool concurrent_flush;
437
438 /* Handling of VM fences */
439 u64 fence_context;
440 unsigned seqno[AMDGPU_MAX_RINGS];
441
442 uint64_t max_pfn;
443 uint32_t num_level;
444 uint32_t block_size;
445 uint32_t fragment_size;
446 enum amdgpu_vm_level root_level;
447 /* vram base address for page table entry */
448 u64 vram_base_offset;
449 /* vm pte handling */
450 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
451 struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
452 unsigned vm_pte_num_scheds;
453 struct amdgpu_ring *page_fault;
454
455 /* partial resident texture handling */
456 spinlock_t prt_lock;
457 atomic_t num_prt_users;
458
459 /* controls how VM page tables are updated for Graphics and Compute.
460 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
461 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
462 */
463 int vm_update_mode;
464
465 /* PASID to VM mapping, will be used in interrupt context to
466 * look up VM of a page fault
467 */
468 struct xarray pasids;
469 /* Global registration of recent page fault information */
470 struct amdgpu_vm_fault_info fault_info;
471 };
472
473 struct amdgpu_bo_va_mapping;
474
475 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
476 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
477 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
478
479 extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
480 extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
481
482 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
483 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
484
485 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
486 u32 pasid);
487
488 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
489 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
490 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
491 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
492 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
493 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
494 unsigned int num_fences);
495 bool amdgpu_vm_ready(struct amdgpu_vm *vm);
496 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
497 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
498 struct ww_acquire_ctx *ticket,
499 int (*callback)(void *p, struct amdgpu_bo *bo),
500 void *param);
501 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
502 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
503 struct amdgpu_vm *vm, bool immediate);
504 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
505 struct amdgpu_vm *vm,
506 struct dma_fence **fence);
507 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
508 struct amdgpu_vm *vm,
509 struct ww_acquire_ctx *ticket);
510 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
511 struct amdgpu_vm *vm,
512 uint32_t flush_type,
513 uint32_t xcc_mask);
514 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
515 struct amdgpu_vm *vm, struct amdgpu_bo *bo);
516 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
517 bool immediate, bool unlocked, bool flush_tlb,
518 bool allow_override, struct amdgpu_sync *sync,
519 uint64_t start, uint64_t last, uint64_t flags,
520 uint64_t offset, uint64_t vram_base,
521 struct ttm_resource *res, dma_addr_t *pages_addr,
522 struct dma_fence **fence);
523 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
524 struct amdgpu_bo_va *bo_va,
525 bool clear);
526 bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
527 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
528 struct amdgpu_bo *bo, bool evicted);
529 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
530 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
531 struct amdgpu_bo *bo);
532 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
533 struct amdgpu_vm *vm,
534 struct amdgpu_bo *bo);
535 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
536 struct amdgpu_bo_va *bo_va,
537 uint64_t addr, uint64_t offset,
538 uint64_t size, uint64_t flags);
539 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
540 struct amdgpu_bo_va *bo_va,
541 uint64_t addr, uint64_t offset,
542 uint64_t size, uint64_t flags);
543 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
544 struct amdgpu_bo_va *bo_va,
545 uint64_t addr);
546 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
547 struct amdgpu_vm *vm,
548 uint64_t saddr, uint64_t size);
549 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
550 uint64_t addr);
551 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
552 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
553 struct amdgpu_bo_va *bo_va);
554 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
555 uint32_t fragment_size_default, unsigned max_level,
556 unsigned max_bits);
557 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
558 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
559 struct amdgpu_job *job);
560 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
561
562 struct amdgpu_task_info *
563 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid);
564
565 struct amdgpu_task_info *
566 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
567
568 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
569
570 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
571 u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
572 bool write_fault);
573
574 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
575
576 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
577 struct amdgpu_vm *vm);
578 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
579 struct amdgpu_mem_stats *stats,
580 unsigned int size);
581
582 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
583 struct amdgpu_bo_vm *vmbo, bool immediate);
584 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
585 int level, bool immediate, struct amdgpu_bo_vm **vmbo,
586 int32_t xcp_id);
587 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
588
589 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
590 struct amdgpu_vm_bo_base *entry);
591 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
592 uint64_t start, uint64_t end,
593 uint64_t dst, uint64_t flags);
594 void amdgpu_vm_pt_free_work(struct work_struct *work);
595 void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
596 struct amdgpu_vm_update_params *params);
597
598 #if defined(CONFIG_DEBUG_FS)
599 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
600 #endif
601
602 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
603
604 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo);
605
606 /**
607 * amdgpu_vm_tlb_seq - return tlb flush sequence number
608 * @vm: the amdgpu_vm structure to query
609 *
610 * Returns the tlb flush sequence number which indicates that the VM TLBs needs
611 * to be invalidated whenever the sequence number change.
612 */
amdgpu_vm_tlb_seq(struct amdgpu_vm * vm)613 static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
614 {
615 unsigned long flags;
616 spinlock_t *lock;
617
618 /*
619 * Workaround to stop racing between the fence signaling and handling
620 * the cb. The lock is static after initially setting it up, just make
621 * sure that the dma_fence structure isn't freed up.
622 */
623 rcu_read_lock();
624 lock = vm->last_tlb_flush->lock;
625 rcu_read_unlock();
626
627 spin_lock_irqsave(lock, flags);
628 spin_unlock_irqrestore(lock, flags);
629
630 return atomic64_read(&vm->tlb_seq);
631 }
632
633 /*
634 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
635 * happens while holding this lock anywhere to prevent deadlocks when
636 * an MMU notifier runs in reclaim-FS context.
637 */
amdgpu_vm_eviction_lock(struct amdgpu_vm * vm)638 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
639 {
640 mutex_lock(&vm->eviction_lock);
641 vm->saved_flags = memalloc_noreclaim_save();
642 }
643
amdgpu_vm_eviction_trylock(struct amdgpu_vm * vm)644 static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
645 {
646 if (mutex_trylock(&vm->eviction_lock)) {
647 vm->saved_flags = memalloc_noreclaim_save();
648 return true;
649 }
650 return false;
651 }
652
amdgpu_vm_eviction_unlock(struct amdgpu_vm * vm)653 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
654 {
655 memalloc_noreclaim_restore(vm->saved_flags);
656 mutex_unlock(&vm->eviction_lock);
657 }
658
659 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
660 unsigned int pasid,
661 uint64_t addr,
662 uint32_t status,
663 unsigned int vmhub);
664 void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
665 struct amdgpu_vm *vm,
666 struct dma_fence **fence);
667
668 #endif
669