xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h (revision 284fc30e66e602a5df58393860f67477d6a79339)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 #ifndef __AMDGPU_VM_H__
25 #define __AMDGPU_VM_H__
26 
27 #include <linux/idr.h>
28 #include <linux/kfifo.h>
29 #include <linux/rbtree.h>
30 #include <drm/gpu_scheduler.h>
31 #include <drm/drm_file.h>
32 #include <drm/ttm/ttm_bo.h>
33 #include <linux/sched/mm.h>
34 
35 #include "amdgpu_sync.h"
36 #include "amdgpu_ring.h"
37 #include "amdgpu_ids.h"
38 #include "amdgpu_ttm.h"
39 
40 struct drm_exec;
41 
42 struct amdgpu_bo_va;
43 struct amdgpu_job;
44 struct amdgpu_bo_list_entry;
45 struct amdgpu_bo_vm;
46 
47 /*
48  * GPUVM handling
49  */
50 
51 /* Maximum number of PTEs the hardware can write with one command */
52 #define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
53 
54 /* number of entries in page table */
55 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
56 
57 #define AMDGPU_PTE_VALID	(1ULL << 0)
58 #define AMDGPU_PTE_SYSTEM	(1ULL << 1)
59 #define AMDGPU_PTE_SNOOPED	(1ULL << 2)
60 
61 /* RV+ */
62 #define AMDGPU_PTE_TMZ		(1ULL << 3)
63 
64 /* VI only */
65 #define AMDGPU_PTE_EXECUTABLE	(1ULL << 4)
66 
67 #define AMDGPU_PTE_READABLE	(1ULL << 5)
68 #define AMDGPU_PTE_WRITEABLE	(1ULL << 6)
69 
70 #define AMDGPU_PTE_FRAG(x)	((x & 0x1fULL) << 7)
71 
72 /* TILED for VEGA10, reserved for older ASICs  */
73 #define AMDGPU_PTE_PRT		(1ULL << 51)
74 
75 /* PDE is handled as PTE for VEGA10 */
76 #define AMDGPU_PDE_PTE		(1ULL << 54)
77 
78 #define AMDGPU_PTE_LOG          (1ULL << 55)
79 
80 /* PTE is handled as PDE for VEGA10 (Translate Further) */
81 #define AMDGPU_PTE_TF		(1ULL << 56)
82 
83 /* MALL noalloc for sienna_cichlid, reserved for older ASICs  */
84 #define AMDGPU_PTE_NOALLOC	(1ULL << 58)
85 
86 /* PDE Block Fragment Size for VEGA10 */
87 #define AMDGPU_PDE_BFS(a)	((uint64_t)a << 59)
88 
89 /* Flag combination to set no-retry with TF disabled */
90 #define AMDGPU_VM_NORETRY_FLAGS	(AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \
91 				AMDGPU_PTE_TF)
92 
93 /* Flag combination to set no-retry with TF enabled */
94 #define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \
95 				   AMDGPU_PTE_PRT)
96 /* For GFX9 */
97 #define AMDGPU_PTE_MTYPE_VG10_SHIFT(mtype)	((uint64_t)(mtype) << 57)
98 #define AMDGPU_PTE_MTYPE_VG10_MASK	AMDGPU_PTE_MTYPE_VG10_SHIFT(3ULL)
99 #define AMDGPU_PTE_MTYPE_VG10(flags, mtype)			\
100 	(((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_VG10_MASK)) |	\
101 	  AMDGPU_PTE_MTYPE_VG10_SHIFT(mtype))
102 
103 #define AMDGPU_MTYPE_NC 0
104 #define AMDGPU_MTYPE_CC 2
105 
106 #define AMDGPU_PTE_DEFAULT_ATC  (AMDGPU_PTE_SYSTEM      \
107                                 | AMDGPU_PTE_SNOOPED    \
108                                 | AMDGPU_PTE_EXECUTABLE \
109                                 | AMDGPU_PTE_READABLE   \
110                                 | AMDGPU_PTE_WRITEABLE  \
111                                 | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
112 
113 /* gfx10 */
114 #define AMDGPU_PTE_MTYPE_NV10_SHIFT(mtype)	((uint64_t)(mtype) << 48)
115 #define AMDGPU_PTE_MTYPE_NV10_MASK     AMDGPU_PTE_MTYPE_NV10_SHIFT(7ULL)
116 #define AMDGPU_PTE_MTYPE_NV10(flags, mtype)			\
117 	(((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_NV10_MASK)) |	\
118 	  AMDGPU_PTE_MTYPE_NV10_SHIFT(mtype))
119 
120 /* gfx12 */
121 #define AMDGPU_PTE_PRT_GFX12		(1ULL << 56)
122 #define AMDGPU_PTE_PRT_FLAG(adev)	\
123 	((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PTE_PRT_GFX12 : AMDGPU_PTE_PRT)
124 
125 #define AMDGPU_PTE_MTYPE_GFX12_SHIFT(mtype)	((uint64_t)(mtype) << 54)
126 #define AMDGPU_PTE_MTYPE_GFX12_MASK	AMDGPU_PTE_MTYPE_GFX12_SHIFT(3ULL)
127 #define AMDGPU_PTE_MTYPE_GFX12(flags, mtype)				\
128 	(((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_GFX12_MASK)) |	\
129 	  AMDGPU_PTE_MTYPE_GFX12_SHIFT(mtype))
130 
131 #define AMDGPU_PTE_DCC			(1ULL << 58)
132 #define AMDGPU_PTE_IS_PTE		(1ULL << 63)
133 
134 /* PDE Block Fragment Size for gfx v12 */
135 #define AMDGPU_PDE_BFS_GFX12(a)		((uint64_t)((a) & 0x1fULL) << 58)
136 #define AMDGPU_PDE_BFS_FLAG(adev, a)	\
137 	((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_BFS_GFX12(a) : AMDGPU_PDE_BFS(a))
138 /* PDE is handled as PTE for gfx v12 */
139 #define AMDGPU_PDE_PTE_GFX12		(1ULL << 63)
140 #define AMDGPU_PDE_PTE_FLAG(adev)	\
141 	((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_PTE_GFX12 : AMDGPU_PDE_PTE)
142 
143 /* How to program VM fault handling */
144 #define AMDGPU_VM_FAULT_STOP_NEVER	0
145 #define AMDGPU_VM_FAULT_STOP_FIRST	1
146 #define AMDGPU_VM_FAULT_STOP_ALWAYS	2
147 
148 /* How much VRAM be reserved for page tables */
149 #define AMDGPU_VM_RESERVED_VRAM		(8ULL << 20)
150 
151 /*
152  * max number of VMHUB
153  * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1
154  */
155 #define AMDGPU_MAX_VMHUBS			13
156 #define AMDGPU_GFXHUB_START			0
157 #define AMDGPU_MMHUB0_START			8
158 #define AMDGPU_MMHUB1_START			12
159 #define AMDGPU_GFXHUB(x)			(AMDGPU_GFXHUB_START + (x))
160 #define AMDGPU_MMHUB0(x)			(AMDGPU_MMHUB0_START + (x))
161 #define AMDGPU_MMHUB1(x)			(AMDGPU_MMHUB1_START + (x))
162 
163 #define AMDGPU_IS_GFXHUB(x) ((x) >= AMDGPU_GFXHUB_START && (x) < AMDGPU_MMHUB0_START)
164 #define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START)
165 #define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS)
166 
167 /* Reserve space at top/bottom of address space for kernel use */
168 #define AMDGPU_VA_RESERVED_CSA_SIZE		(2ULL << 20)
169 #define AMDGPU_VA_RESERVED_CSA_START(adev)	(((adev)->vm_manager.max_pfn \
170 						  << AMDGPU_GPU_PAGE_SHIFT)  \
171 						 - AMDGPU_VA_RESERVED_CSA_SIZE)
172 #define AMDGPU_VA_RESERVED_SEQ64_SIZE		(2ULL << 20)
173 #define AMDGPU_VA_RESERVED_SEQ64_START(adev)	(AMDGPU_VA_RESERVED_CSA_START(adev) \
174 						 - AMDGPU_VA_RESERVED_SEQ64_SIZE)
175 #define AMDGPU_VA_RESERVED_TRAP_SIZE		(2ULL << 12)
176 #define AMDGPU_VA_RESERVED_TRAP_START(adev)	(AMDGPU_VA_RESERVED_SEQ64_START(adev) \
177 						 - AMDGPU_VA_RESERVED_TRAP_SIZE)
178 #define AMDGPU_VA_RESERVED_BOTTOM		(1ULL << 16)
179 #define AMDGPU_VA_RESERVED_TOP			(AMDGPU_VA_RESERVED_TRAP_SIZE + \
180 						 AMDGPU_VA_RESERVED_SEQ64_SIZE + \
181 						 AMDGPU_VA_RESERVED_CSA_SIZE)
182 
183 /* See vm_update_mode */
184 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
185 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
186 
187 /* VMPT level enumerate, and the hiberachy is:
188  * PDB2->PDB1->PDB0->PTB
189  */
190 enum amdgpu_vm_level {
191 	AMDGPU_VM_PDB2,
192 	AMDGPU_VM_PDB1,
193 	AMDGPU_VM_PDB0,
194 	AMDGPU_VM_PTB
195 };
196 
197 /* base structure for tracking BO usage in a VM */
198 struct amdgpu_vm_bo_base {
199 	/* constant after initialization */
200 	struct amdgpu_vm		*vm;
201 	struct amdgpu_bo		*bo;
202 
203 	/* protected by bo being reserved */
204 	struct amdgpu_vm_bo_base	*next;
205 
206 	/* protected by vm status_lock */
207 	struct list_head		vm_status;
208 
209 	/* if the bo is counted as shared in mem stats
210 	 * protected by vm status_lock */
211 	bool				shared;
212 
213 	/* protected by the BO being reserved */
214 	bool				moved;
215 };
216 
217 /* provided by hw blocks that can write ptes, e.g., sdma */
218 struct amdgpu_vm_pte_funcs {
219 	/* number of dw to reserve per operation */
220 	unsigned	copy_pte_num_dw;
221 
222 	/* copy pte entries from GART */
223 	void (*copy_pte)(struct amdgpu_ib *ib,
224 			 uint64_t pe, uint64_t src,
225 			 unsigned count);
226 
227 	/* write pte one entry at a time with addr mapping */
228 	void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
229 			  uint64_t value, unsigned count,
230 			  uint32_t incr);
231 	/* for linear pte/pde updates without addr mapping */
232 	void (*set_pte_pde)(struct amdgpu_ib *ib,
233 			    uint64_t pe,
234 			    uint64_t addr, unsigned count,
235 			    uint32_t incr, uint64_t flags);
236 };
237 
238 struct amdgpu_task_info {
239 	struct drm_wedge_task_info task;
240 	char		process_name[TASK_COMM_LEN];
241 	pid_t		tgid;
242 	struct kref	refcount;
243 };
244 
245 /**
246  * struct amdgpu_vm_update_params
247  *
248  * Encapsulate some VM table update parameters to reduce
249  * the number of function parameters
250  *
251  */
252 struct amdgpu_vm_update_params {
253 
254 	/**
255 	 * @adev: amdgpu device we do this update for
256 	 */
257 	struct amdgpu_device *adev;
258 
259 	/**
260 	 * @vm: optional amdgpu_vm we do this update for
261 	 */
262 	struct amdgpu_vm *vm;
263 
264 	/**
265 	 * @immediate: if changes should be made immediately
266 	 */
267 	bool immediate;
268 
269 	/**
270 	 * @unlocked: true if the root BO is not locked
271 	 */
272 	bool unlocked;
273 
274 	/**
275 	 * @pages_addr:
276 	 *
277 	 * DMA addresses to use for mapping
278 	 */
279 	dma_addr_t *pages_addr;
280 
281 	/**
282 	 * @job: job to used for hw submission
283 	 */
284 	struct amdgpu_job *job;
285 
286 	/**
287 	 * @num_dw_left: number of dw left for the IB
288 	 */
289 	unsigned int num_dw_left;
290 
291 	/**
292 	 * @needs_flush: true whenever we need to invalidate the TLB
293 	 */
294 	bool needs_flush;
295 
296 	/**
297 	 * @allow_override: true for memory that is not uncached: allows MTYPE
298 	 * to be overridden for NUMA local memory.
299 	 */
300 	bool allow_override;
301 
302 	/**
303 	 * @tlb_flush_waitlist: temporary storage for BOs until tlb_flush
304 	 */
305 	struct list_head tlb_flush_waitlist;
306 };
307 
308 struct amdgpu_vm_update_funcs {
309 	int (*map_table)(struct amdgpu_bo_vm *bo);
310 	int (*prepare)(struct amdgpu_vm_update_params *p,
311 		       struct amdgpu_sync *sync, u64 k_job_id);
312 	int (*update)(struct amdgpu_vm_update_params *p,
313 		      struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
314 		      unsigned count, uint32_t incr, uint64_t flags);
315 	int (*commit)(struct amdgpu_vm_update_params *p,
316 		      struct dma_fence **fence);
317 };
318 
319 struct amdgpu_vm_fault_info {
320 	/* fault address */
321 	uint64_t	addr;
322 	/* fault status register */
323 	uint32_t	status;
324 	/* which vmhub? gfxhub, mmhub, etc. */
325 	unsigned int	vmhub;
326 };
327 
328 struct amdgpu_mem_stats {
329 	struct drm_memory_stats drm;
330 
331 	/* buffers that requested this placement but are currently evicted */
332 	uint64_t evicted;
333 };
334 
335 struct amdgpu_vm {
336 	/* tree of virtual addresses mapped */
337 	struct rb_root_cached	va;
338 
339 	/* Lock to prevent eviction while we are updating page tables
340 	 * use vm_eviction_lock/unlock(vm)
341 	 */
342 	struct mutex		eviction_lock;
343 	bool			evicting;
344 	unsigned int		saved_flags;
345 
346 	/* Lock to protect vm_bo add/del/move on all lists of vm */
347 	spinlock_t		status_lock;
348 
349 	/* Memory statistics for this vm, protected by status_lock */
350 	struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
351 
352 	/*
353 	 * The following lists contain amdgpu_vm_bo_base objects for either
354 	 * PDs, PTs or per VM BOs. The state transits are:
355 	 *
356 	 * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
357 	 */
358 
359 	/* Per-VM and PT BOs who needs a validation */
360 	struct list_head	evicted;
361 
362 	/* PT BOs which relocated and their parent need an update */
363 	struct list_head	relocated;
364 
365 	/* per VM BOs moved, but not yet updated in the PT */
366 	struct list_head	moved;
367 
368 	/* All BOs of this VM not currently in the state machine */
369 	struct list_head	idle;
370 
371 	/*
372 	 * The following lists contain amdgpu_vm_bo_base objects for BOs which
373 	 * have their own dma_resv object and not depend on the root PD. Their
374 	 * state transits are:
375 	 *
376 	 * evicted_user or invalidated -> done
377 	 */
378 
379 	/* BOs for user mode queues that need a validation */
380 	struct list_head	evicted_user;
381 
382 	/* regular invalidated BOs, but not yet updated in the PT */
383 	struct list_head	invalidated;
384 
385 	/* BOs which are invalidated, has been updated in the PTs */
386 	struct list_head        done;
387 
388 	/*
389 	 * This list contains amdgpu_bo_va_mapping objects which have been freed
390 	 * but not updated in the PTs
391 	 */
392 	struct list_head	freed;
393 
394 	/* contains the page directory */
395 	struct amdgpu_vm_bo_base     root;
396 	struct dma_fence	*last_update;
397 
398 	/* Scheduler entities for page table updates */
399 	struct drm_sched_entity	immediate;
400 	struct drm_sched_entity	delayed;
401 
402 	/* Last finished delayed update */
403 	atomic64_t		tlb_seq;
404 	struct dma_fence	*last_tlb_flush;
405 	atomic64_t		kfd_last_flushed_seq;
406 	uint64_t		tlb_fence_context;
407 
408 	/* How many times we had to re-generate the page tables */
409 	uint64_t		generation;
410 
411 	/* Last unlocked submission to the scheduler entities */
412 	struct dma_fence	*last_unlocked;
413 
414 	unsigned int		pasid;
415 	struct amdgpu_vmid	*reserved_vmid[AMDGPU_MAX_VMHUBS];
416 
417 	/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
418 	bool					use_cpu_for_update;
419 
420 	/* Functions to use for VM table updates */
421 	const struct amdgpu_vm_update_funcs	*update_funcs;
422 
423 	/* Up to 128 pending retry page faults */
424 	DECLARE_KFIFO(faults, u64, 128);
425 
426 	/* Points to the KFD process VM info */
427 	struct amdkfd_process_info *process_info;
428 
429 	/* List node in amdkfd_process_info.vm_list_head */
430 	struct list_head	vm_list_node;
431 
432 	/* Valid while the PD is reserved or fenced */
433 	uint64_t		pd_phys_addr;
434 
435 	/* Some basic info about the task */
436 	struct amdgpu_task_info *task_info;
437 
438 	/* Store positions of group of BOs */
439 	struct ttm_lru_bulk_move lru_bulk_move;
440 	/* Flag to indicate if VM is used for compute */
441 	bool			is_compute_context;
442 
443 	/* Memory partition number, -1 means any partition */
444 	int8_t			mem_id;
445 
446 	/* cached fault info */
447 	struct amdgpu_vm_fault_info fault_info;
448 };
449 
450 struct amdgpu_vm_manager {
451 	/* Handling of VMIDs */
452 	struct amdgpu_vmid_mgr			id_mgr[AMDGPU_MAX_VMHUBS];
453 	unsigned int				first_kfd_vmid;
454 	bool					concurrent_flush;
455 
456 	/* Handling of VM fences */
457 	u64					fence_context;
458 	unsigned				seqno[AMDGPU_MAX_RINGS];
459 
460 	uint64_t				max_pfn;
461 	uint32_t				num_level;
462 	uint32_t				block_size;
463 	uint32_t				fragment_size;
464 	enum amdgpu_vm_level			root_level;
465 	/* vram base address for page table entry  */
466 	u64					vram_base_offset;
467 	/* vm pte handling */
468 	const struct amdgpu_vm_pte_funcs	*vm_pte_funcs;
469 	struct drm_gpu_scheduler		*vm_pte_scheds[AMDGPU_MAX_RINGS];
470 	unsigned				vm_pte_num_scheds;
471 	struct amdgpu_ring			*page_fault;
472 
473 	/* partial resident texture handling */
474 	spinlock_t				prt_lock;
475 	atomic_t				num_prt_users;
476 
477 	/* controls how VM page tables are updated for Graphics and Compute.
478 	 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
479 	 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
480 	 */
481 	int					vm_update_mode;
482 
483 	/* PASID to VM mapping, will be used in interrupt context to
484 	 * look up VM of a page fault
485 	 */
486 	struct xarray				pasids;
487 	/* Global registration of recent page fault information */
488 	struct amdgpu_vm_fault_info	fault_info;
489 };
490 
491 struct amdgpu_bo_va_mapping;
492 
493 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
494 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
495 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
496 
497 extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
498 extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
499 
500 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
501 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
502 
503 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
504 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id, uint32_t pasid);
505 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
506 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
507 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
508 		      unsigned int num_fences);
509 int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
510 			     unsigned int num_fences);
511 bool amdgpu_vm_ready(struct amdgpu_vm *vm);
512 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
513 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
514 		       struct ww_acquire_ctx *ticket,
515 		       int (*callback)(void *p, struct amdgpu_bo *bo),
516 		       void *param);
517 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
518 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
519 			  struct amdgpu_vm *vm, bool immediate);
520 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
521 			  struct amdgpu_vm *vm,
522 			  struct dma_fence **fence);
523 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
524 			   struct amdgpu_vm *vm,
525 			   struct ww_acquire_ctx *ticket);
526 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
527 				struct amdgpu_vm *vm,
528 				uint32_t flush_type,
529 				uint32_t xcc_mask);
530 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
531 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo);
532 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
533 			   bool immediate, bool unlocked, bool flush_tlb,
534 			   bool allow_override, struct amdgpu_sync *sync,
535 			   uint64_t start, uint64_t last, uint64_t flags,
536 			   uint64_t offset, uint64_t vram_base,
537 			   struct ttm_resource *res, dma_addr_t *pages_addr,
538 			   struct dma_fence **fence);
539 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
540 			struct amdgpu_bo_va *bo_va,
541 			bool clear);
542 bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
543 void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted);
544 void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
545 			    struct ttm_resource *new_res, int sign);
546 void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo);
547 void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
548 		       bool evicted);
549 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
550 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
551 				       struct amdgpu_bo *bo);
552 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
553 				      struct amdgpu_vm *vm,
554 				      struct amdgpu_bo *bo);
555 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
556 		     struct amdgpu_bo_va *bo_va,
557 		     uint64_t addr, uint64_t offset,
558 		     uint64_t size, uint32_t flags);
559 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
560 			     struct amdgpu_bo_va *bo_va,
561 			     uint64_t addr, uint64_t offset,
562 			     uint64_t size, uint32_t flags);
563 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
564 		       struct amdgpu_bo_va *bo_va,
565 		       uint64_t addr);
566 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
567 				struct amdgpu_vm *vm,
568 				uint64_t saddr, uint64_t size);
569 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
570 							 uint64_t addr);
571 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
572 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
573 		      struct amdgpu_bo_va *bo_va);
574 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
575 			   uint32_t fragment_size_default, unsigned max_level,
576 			   unsigned max_bits);
577 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
578 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
579 				  struct amdgpu_job *job);
580 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
581 
582 struct amdgpu_task_info *
583 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid);
584 
585 struct amdgpu_task_info *
586 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
587 
588 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
589 
590 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
591 			    u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
592 			    bool write_fault);
593 
594 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
595 
596 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
597 				struct amdgpu_vm *vm);
598 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
599 			  struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]);
600 
601 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
602 		       struct amdgpu_bo_vm *vmbo, bool immediate);
603 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
604 			int level, bool immediate, struct amdgpu_bo_vm **vmbo,
605 			int32_t xcp_id);
606 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
607 
608 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
609 			 struct amdgpu_vm_bo_base *entry);
610 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
611 			  uint64_t start, uint64_t end,
612 			  uint64_t dst, uint64_t flags);
613 void amdgpu_vm_pt_free_work(struct work_struct *work);
614 void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
615 			    struct amdgpu_vm_update_params *params);
616 
617 #if defined(CONFIG_DEBUG_FS)
618 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
619 #endif
620 
621 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
622 
623 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo);
624 
625 /**
626  * amdgpu_vm_tlb_seq - return tlb flush sequence number
627  * @vm: the amdgpu_vm structure to query
628  *
629  * Returns the tlb flush sequence number which indicates that the VM TLBs needs
630  * to be invalidated whenever the sequence number change.
631  */
amdgpu_vm_tlb_seq(struct amdgpu_vm * vm)632 static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
633 {
634 	unsigned long flags;
635 	spinlock_t *lock;
636 
637 	/*
638 	 * Workaround to stop racing between the fence signaling and handling
639 	 * the cb. The lock is static after initially setting it up, just make
640 	 * sure that the dma_fence structure isn't freed up.
641 	 */
642 	rcu_read_lock();
643 	lock = vm->last_tlb_flush->lock;
644 	rcu_read_unlock();
645 
646 	spin_lock_irqsave(lock, flags);
647 	spin_unlock_irqrestore(lock, flags);
648 
649 	return atomic64_read(&vm->tlb_seq);
650 }
651 
652 /*
653  * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
654  * happens while holding this lock anywhere to prevent deadlocks when
655  * an MMU notifier runs in reclaim-FS context.
656  */
amdgpu_vm_eviction_lock(struct amdgpu_vm * vm)657 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
658 {
659 	mutex_lock(&vm->eviction_lock);
660 	vm->saved_flags = memalloc_noreclaim_save();
661 }
662 
amdgpu_vm_eviction_trylock(struct amdgpu_vm * vm)663 static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
664 {
665 	if (mutex_trylock(&vm->eviction_lock)) {
666 		vm->saved_flags = memalloc_noreclaim_save();
667 		return true;
668 	}
669 	return false;
670 }
671 
amdgpu_vm_eviction_unlock(struct amdgpu_vm * vm)672 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
673 {
674 	memalloc_noreclaim_restore(vm->saved_flags);
675 	mutex_unlock(&vm->eviction_lock);
676 }
677 
678 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
679 				  unsigned int pasid,
680 				  uint64_t addr,
681 				  uint32_t status,
682 				  unsigned int vmhub);
683 void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
684 				 struct amdgpu_vm *vm,
685 				 struct dma_fence **fence);
686 
687 void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
688 			       struct amdgpu_task_info *task_info);
689 
690 #define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
691 		list_for_each_entry(mapping, &(bo_va)->valids, list)
692 #define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
693 		list_for_each_entry(mapping, &(bo_va)->invalids, list)
694 
695 #endif
696