xref: /linux/drivers/gpu/drm/xe/xe_vm_types.h (revision e332935a540eb76dd656663ca908eb0544d96757)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_TYPES_H_
7 #define _XE_VM_TYPES_H_
8 
9 #include <drm/drm_gpusvm.h>
10 #include <drm/drm_gpuvm.h>
11 
12 #include <linux/dma-resv.h>
13 #include <linux/kref.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/scatterlist.h>
16 
17 #include "xe_device_types.h"
18 #include "xe_pt_types.h"
19 #include "xe_range_fence.h"
20 
21 struct xe_bo;
22 struct xe_svm_range;
23 struct xe_sync_entry;
24 struct xe_user_fence;
25 struct xe_vm;
26 struct xe_vm_pgtable_update_op;
27 
28 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
29 #define TEST_VM_OPS_ERROR
30 #define FORCE_OP_ERROR	BIT(31)
31 
32 #define FORCE_OP_ERROR_LOCK	0
33 #define FORCE_OP_ERROR_PREPARE	1
34 #define FORCE_OP_ERROR_RUN	2
35 #define FORCE_OP_ERROR_COUNT	3
36 #endif
37 
38 #define XE_VMA_READ_ONLY	DRM_GPUVA_USERBITS
39 #define XE_VMA_DESTROYED	(DRM_GPUVA_USERBITS << 1)
40 #define XE_VMA_ATOMIC_PTE_BIT	(DRM_GPUVA_USERBITS << 2)
41 #define XE_VMA_PTE_4K		(DRM_GPUVA_USERBITS << 3)
42 #define XE_VMA_PTE_2M		(DRM_GPUVA_USERBITS << 4)
43 #define XE_VMA_PTE_1G		(DRM_GPUVA_USERBITS << 5)
44 #define XE_VMA_PTE_64K		(DRM_GPUVA_USERBITS << 6)
45 #define XE_VMA_PTE_COMPACT	(DRM_GPUVA_USERBITS << 7)
46 #define XE_VMA_DUMPABLE		(DRM_GPUVA_USERBITS << 8)
47 #define XE_VMA_SYSTEM_ALLOCATOR	(DRM_GPUVA_USERBITS << 9)
48 
49 /** struct xe_userptr - User pointer */
50 struct xe_userptr {
51 	/** @invalidate_link: Link for the vm::userptr.invalidated list */
52 	struct list_head invalidate_link;
53 	/** @userptr: link into VM repin list if userptr. */
54 	struct list_head repin_link;
55 	/**
56 	 * @notifier: MMU notifier for user pointer (invalidation call back)
57 	 */
58 	struct mmu_interval_notifier notifier;
59 	/** @sgt: storage for a scatter gather table */
60 	struct sg_table sgt;
61 	/** @sg: allocated scatter gather table */
62 	struct sg_table *sg;
63 	/** @notifier_seq: notifier sequence number */
64 	unsigned long notifier_seq;
65 	/** @unmap_mutex: Mutex protecting dma-unmapping */
66 	struct mutex unmap_mutex;
67 	/**
68 	 * @initial_bind: user pointer has been bound at least once.
69 	 * write: vm->userptr.notifier_lock in read mode and vm->resv held.
70 	 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
71 	 */
72 	bool initial_bind;
73 	/** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
74 	bool mapped;
75 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
76 	u32 divisor;
77 #endif
78 };
79 
80 struct xe_vma {
81 	/** @gpuva: Base GPUVA object */
82 	struct drm_gpuva gpuva;
83 
84 	/**
85 	 * @combined_links: links into lists which are mutually exclusive.
86 	 * Locking: vm lock in write mode OR vm lock in read mode and the vm's
87 	 * resv.
88 	 */
89 	union {
90 		/** @rebind: link into VM if this VMA needs rebinding. */
91 		struct list_head rebind;
92 		/** @destroy: link to contested list when VM is being closed. */
93 		struct list_head destroy;
94 	} combined_links;
95 
96 	union {
97 		/** @destroy_cb: callback to destroy VMA when unbind job is done */
98 		struct dma_fence_cb destroy_cb;
99 		/** @destroy_work: worker to destroy this BO */
100 		struct work_struct destroy_work;
101 	};
102 
103 	/** @tile_invalidated: VMA has been invalidated */
104 	u8 tile_invalidated;
105 
106 	/** @tile_mask: Tile mask of where to create binding for this VMA */
107 	u8 tile_mask;
108 
109 	/**
110 	 * @tile_present: GT mask of binding are present for this VMA.
111 	 * protected by vm->lock, vm->resv and for userptrs,
112 	 * vm->userptr.notifier_lock for writing. Needs either for reading,
113 	 * but if reading is done under the vm->lock only, it needs to be held
114 	 * in write mode.
115 	 */
116 	u8 tile_present;
117 
118 	/** @tile_staged: bind is staged for this VMA */
119 	u8 tile_staged;
120 
121 	/**
122 	 * @pat_index: The pat index to use when encoding the PTEs for this vma.
123 	 */
124 	u16 pat_index;
125 
126 	/**
127 	 * @ufence: The user fence that was provided with MAP.
128 	 * Needs to be signalled before UNMAP can be processed.
129 	 */
130 	struct xe_user_fence *ufence;
131 };
132 
133 /**
134  * struct xe_userptr_vma - A userptr vma subclass
135  * @vma: The vma.
136  * @userptr: Additional userptr information.
137  */
138 struct xe_userptr_vma {
139 	struct xe_vma vma;
140 	struct xe_userptr userptr;
141 };
142 
143 struct xe_device;
144 
145 struct xe_vm {
146 	/** @gpuvm: base GPUVM used to track VMAs */
147 	struct drm_gpuvm gpuvm;
148 
149 	/** @svm: Shared virtual memory state */
150 	struct {
151 		/** @svm.gpusvm: base GPUSVM used to track fault allocations */
152 		struct drm_gpusvm gpusvm;
153 		/**
154 		 * @svm.garbage_collector: Garbage collector which is used unmap
155 		 * SVM range's GPU bindings and destroy the ranges.
156 		 */
157 		struct {
158 			/** @svm.garbage_collector.lock: Protect's range list */
159 			spinlock_t lock;
160 			/**
161 			 * @svm.garbage_collector.range_list: List of SVM ranges
162 			 * in the garbage collector.
163 			 */
164 			struct list_head range_list;
165 			/**
166 			 * @svm.garbage_collector.work: Worker which the
167 			 * garbage collector runs on.
168 			 */
169 			struct work_struct work;
170 		} garbage_collector;
171 	} svm;
172 
173 	struct xe_device *xe;
174 
175 	/* exec queue used for (un)binding vma's */
176 	struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
177 
178 	/** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
179 	struct ttm_lru_bulk_move lru_bulk_move;
180 
181 	u64 size;
182 
183 	struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
184 	struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
185 
186 	/**
187 	 * @flags: flags for this VM, statically setup a creation time aside
188 	 * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
189 	 */
190 #define XE_VM_FLAG_64K			BIT(0)
191 #define XE_VM_FLAG_LR_MODE		BIT(1)
192 #define XE_VM_FLAG_MIGRATION		BIT(2)
193 #define XE_VM_FLAG_SCRATCH_PAGE		BIT(3)
194 #define XE_VM_FLAG_FAULT_MODE		BIT(4)
195 #define XE_VM_FLAG_BANNED		BIT(5)
196 #define XE_VM_FLAG_TILE_ID(flags)	FIELD_GET(GENMASK(7, 6), flags)
197 #define XE_VM_FLAG_SET_TILE_ID(tile)	FIELD_PREP(GENMASK(7, 6), (tile)->id)
198 #define XE_VM_FLAG_GSC			BIT(8)
199 	unsigned long flags;
200 
201 	/** @composite_fence_ctx: context composite fence */
202 	u64 composite_fence_ctx;
203 	/** @composite_fence_seqno: seqno for composite fence */
204 	u32 composite_fence_seqno;
205 
206 	/**
207 	 * @lock: outer most lock, protects objects of anything attached to this
208 	 * VM
209 	 */
210 	struct rw_semaphore lock;
211 	/**
212 	 * @snap_mutex: Mutex used to guard insertions and removals from gpuva,
213 	 * so we can take a snapshot safely from devcoredump.
214 	 */
215 	struct mutex snap_mutex;
216 
217 	/**
218 	 * @rebind_list: list of VMAs that need rebinding. Protected by the
219 	 * vm->lock in write mode, OR (the vm->lock in read mode and the
220 	 * vm resv).
221 	 */
222 	struct list_head rebind_list;
223 
224 	/**
225 	 * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
226 	 * from an irq context can be last put and the destroy needs to be able
227 	 * to sleep.
228 	 */
229 	struct work_struct destroy_work;
230 
231 	/**
232 	 * @rftree: range fence tree to track updates to page table structure.
233 	 * Used to implement conflict tracking between independent bind engines.
234 	 */
235 	struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
236 
237 	const struct xe_pt_ops *pt_ops;
238 
239 	/** @userptr: user pointer state */
240 	struct {
241 		/**
242 		 * @userptr.repin_list: list of VMAs which are user pointers,
243 		 * and needs repinning. Protected by @lock.
244 		 */
245 		struct list_head repin_list;
246 		/**
247 		 * @notifier_lock: protects notifier in write mode and
248 		 * submission in read mode.
249 		 */
250 		struct rw_semaphore notifier_lock;
251 		/**
252 		 * @userptr.invalidated_lock: Protects the
253 		 * @userptr.invalidated list.
254 		 */
255 		spinlock_t invalidated_lock;
256 		/**
257 		 * @userptr.invalidated: List of invalidated userptrs, not yet
258 		 * picked
259 		 * up for revalidation. Protected from access with the
260 		 * @invalidated_lock. Removing items from the list
261 		 * additionally requires @lock in write mode, and adding
262 		 * items to the list requires either the @userptr.notifer_lock in
263 		 * write mode, OR @lock in write mode.
264 		 */
265 		struct list_head invalidated;
266 	} userptr;
267 
268 	/** @preempt: preempt state */
269 	struct {
270 		/**
271 		 * @min_run_period_ms: The minimum run period before preempting
272 		 * an engine again
273 		 */
274 		s64 min_run_period_ms;
275 		/** @exec_queues: list of exec queues attached to this VM */
276 		struct list_head exec_queues;
277 		/** @num_exec_queues: number exec queues attached to this VM */
278 		int num_exec_queues;
279 		/**
280 		 * @rebind_deactivated: Whether rebind has been temporarily deactivated
281 		 * due to no work available. Protected by the vm resv.
282 		 */
283 		bool rebind_deactivated;
284 		/**
285 		 * @rebind_work: worker to rebind invalidated userptrs / evicted
286 		 * BOs
287 		 */
288 		struct work_struct rebind_work;
289 	} preempt;
290 
291 	/** @um: unified memory state */
292 	struct {
293 		/** @asid: address space ID, unique to each VM */
294 		u32 asid;
295 		/**
296 		 * @last_fault_vma: Last fault VMA, used for fast lookup when we
297 		 * get a flood of faults to the same VMA
298 		 */
299 		struct xe_vma *last_fault_vma;
300 	} usm;
301 
302 	/** @error_capture: allow to track errors */
303 	struct {
304 		/** @capture_once: capture only one error per VM */
305 		bool capture_once;
306 	} error_capture;
307 
308 	/**
309 	 * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
310 	 * protected by the vm resv.
311 	 */
312 	u64 tlb_flush_seqno;
313 	/**
314 	 * @validating: The task that is currently making bos resident for this vm.
315 	 * Protected by the VM's resv for writing. Opportunistic reading can be done
316 	 * using READ_ONCE. Note: This is a workaround for the
317 	 * TTM eviction_valuable() callback not being passed a struct
318 	 * ttm_operation_context(). Future work might want to address this.
319 	 */
320 	struct task_struct *validating;
321 	/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
322 	bool batch_invalidate_tlb;
323 	/** @xef: XE file handle for tracking this VM's drm client */
324 	struct xe_file *xef;
325 };
326 
327 /** struct xe_vma_op_map - VMA map operation */
328 struct xe_vma_op_map {
329 	/** @vma: VMA to map */
330 	struct xe_vma *vma;
331 	/** @immediate: Immediate bind */
332 	bool immediate;
333 	/** @read_only: Read only */
334 	bool read_only;
335 	/** @is_null: is NULL binding */
336 	bool is_null;
337 	/** @is_cpu_addr_mirror: is CPU address mirror binding */
338 	bool is_cpu_addr_mirror;
339 	/** @dumpable: whether BO is dumped on GPU hang */
340 	bool dumpable;
341 	/** @invalidate: invalidate the VMA before bind */
342 	bool invalidate_on_bind;
343 	/** @pat_index: The pat index to use for this operation. */
344 	u16 pat_index;
345 };
346 
347 /** struct xe_vma_op_remap - VMA remap operation */
348 struct xe_vma_op_remap {
349 	/** @prev: VMA preceding part of a split mapping */
350 	struct xe_vma *prev;
351 	/** @next: VMA subsequent part of a split mapping */
352 	struct xe_vma *next;
353 	/** @start: start of the VMA unmap */
354 	u64 start;
355 	/** @range: range of the VMA unmap */
356 	u64 range;
357 	/** @skip_prev: skip prev rebind */
358 	bool skip_prev;
359 	/** @skip_next: skip next rebind */
360 	bool skip_next;
361 	/** @unmap_done: unmap operation in done */
362 	bool unmap_done;
363 };
364 
365 /** struct xe_vma_op_prefetch - VMA prefetch operation */
366 struct xe_vma_op_prefetch {
367 	/** @region: memory region to prefetch to */
368 	u32 region;
369 };
370 
371 /** struct xe_vma_op_map_range - VMA map range operation */
372 struct xe_vma_op_map_range {
373 	/** @vma: VMA to map (system allocator VMA) */
374 	struct xe_vma *vma;
375 	/** @range: SVM range to map */
376 	struct xe_svm_range *range;
377 };
378 
379 /** struct xe_vma_op_unmap_range - VMA unmap range operation */
380 struct xe_vma_op_unmap_range {
381 	/** @range: SVM range to unmap */
382 	struct xe_svm_range *range;
383 };
384 
385 /** enum xe_vma_op_flags - flags for VMA operation */
386 enum xe_vma_op_flags {
387 	/** @XE_VMA_OP_COMMITTED: VMA operation committed */
388 	XE_VMA_OP_COMMITTED		= BIT(0),
389 	/** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
390 	XE_VMA_OP_PREV_COMMITTED	= BIT(1),
391 	/** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
392 	XE_VMA_OP_NEXT_COMMITTED	= BIT(2),
393 };
394 
395 /** enum xe_vma_subop - VMA sub-operation */
396 enum xe_vma_subop {
397 	/** @XE_VMA_SUBOP_MAP_RANGE: Map range */
398 	XE_VMA_SUBOP_MAP_RANGE,
399 	/** @XE_VMA_SUBOP_UNMAP_RANGE: Unmap range */
400 	XE_VMA_SUBOP_UNMAP_RANGE,
401 };
402 
403 /** struct xe_vma_op - VMA operation */
404 struct xe_vma_op {
405 	/** @base: GPUVA base operation */
406 	struct drm_gpuva_op base;
407 	/** @link: async operation link */
408 	struct list_head link;
409 	/** @flags: operation flags */
410 	enum xe_vma_op_flags flags;
411 	/** @subop: user defined sub-operation */
412 	enum xe_vma_subop subop;
413 	/** @tile_mask: Tile mask for operation */
414 	u8 tile_mask;
415 
416 	union {
417 		/** @map: VMA map operation specific data */
418 		struct xe_vma_op_map map;
419 		/** @remap: VMA remap operation specific data */
420 		struct xe_vma_op_remap remap;
421 		/** @prefetch: VMA prefetch operation specific data */
422 		struct xe_vma_op_prefetch prefetch;
423 		/** @map_range: VMA map range operation specific data */
424 		struct xe_vma_op_map_range map_range;
425 		/** @unmap_range: VMA unmap range operation specific data */
426 		struct xe_vma_op_unmap_range unmap_range;
427 	};
428 };
429 
430 /** struct xe_vma_ops - VMA operations */
431 struct xe_vma_ops {
432 	/** @list: list of VMA operations */
433 	struct list_head list;
434 	/** @vm: VM */
435 	struct xe_vm *vm;
436 	/** @q: exec queue for VMA operations */
437 	struct xe_exec_queue *q;
438 	/** @syncs: syncs these operation */
439 	struct xe_sync_entry *syncs;
440 	/** @num_syncs: number of syncs */
441 	u32 num_syncs;
442 	/** @pt_update_ops: page table update operations */
443 	struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
444 #ifdef TEST_VM_OPS_ERROR
445 	/** @inject_error: inject error to test error handling */
446 	bool inject_error;
447 #endif
448 };
449 
450 #endif
451