xref: /linux/drivers/gpu/drm/xe/xe_vm_types.h (revision 390db60f8e2bd21fae544917eb3a8618265c058c)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_TYPES_H_
7 #define _XE_VM_TYPES_H_
8 
9 #include <drm/drm_gpusvm.h>
10 #include <drm/drm_gpuvm.h>
11 
12 #include <linux/dma-resv.h>
13 #include <linux/kref.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/scatterlist.h>
16 
17 #include "xe_device_types.h"
18 #include "xe_pt_types.h"
19 #include "xe_range_fence.h"
20 #include "xe_userptr.h"
21 
22 struct xe_bo;
23 struct xe_svm_range;
24 struct xe_sync_entry;
25 struct xe_user_fence;
26 struct xe_vm;
27 struct xe_vm_pgtable_update_op;
28 
29 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
30 #define TEST_VM_OPS_ERROR
31 #define FORCE_OP_ERROR	BIT(31)
32 
33 #define FORCE_OP_ERROR_LOCK	0
34 #define FORCE_OP_ERROR_PREPARE	1
35 #define FORCE_OP_ERROR_RUN	2
36 #define FORCE_OP_ERROR_COUNT	3
37 #endif
38 
39 #define XE_VMA_READ_ONLY	DRM_GPUVA_USERBITS
40 #define XE_VMA_DESTROYED	(DRM_GPUVA_USERBITS << 1)
41 #define XE_VMA_ATOMIC_PTE_BIT	(DRM_GPUVA_USERBITS << 2)
42 #define XE_VMA_PTE_4K		(DRM_GPUVA_USERBITS << 3)
43 #define XE_VMA_PTE_2M		(DRM_GPUVA_USERBITS << 4)
44 #define XE_VMA_PTE_1G		(DRM_GPUVA_USERBITS << 5)
45 #define XE_VMA_PTE_64K		(DRM_GPUVA_USERBITS << 6)
46 #define XE_VMA_PTE_COMPACT	(DRM_GPUVA_USERBITS << 7)
47 #define XE_VMA_DUMPABLE		(DRM_GPUVA_USERBITS << 8)
48 #define XE_VMA_SYSTEM_ALLOCATOR	(DRM_GPUVA_USERBITS << 9)
49 #define XE_VMA_MADV_AUTORESET	(DRM_GPUVA_USERBITS << 10)
50 
51 /**
52  * struct xe_vma_mem_attr - memory attributes associated with vma
53  */
54 struct xe_vma_mem_attr {
55 	/** @preferred_loc: perferred memory_location */
56 	struct {
57 		/** @preferred_loc.migration_policy: Pages migration policy */
58 		u32 migration_policy;
59 
60 		/**
61 		 * @preferred_loc.devmem_fd: used for determining pagemap_fd
62 		 * requested by user DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM and
63 		 * DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE mean system memory or
64 		 * closest device memory respectively.
65 		 */
66 		u32 devmem_fd;
67 	} preferred_loc;
68 
69 	/**
70 	 * @atomic_access: The atomic access type for the vma
71 	 * See %DRM_XE_VMA_ATOMIC_UNDEFINED, %DRM_XE_VMA_ATOMIC_DEVICE,
72 	 * %DRM_XE_VMA_ATOMIC_GLOBAL, and %DRM_XE_VMA_ATOMIC_CPU for possible
73 	 * values. These are defined in uapi/drm/xe_drm.h.
74 	 */
75 	u32 atomic_access;
76 
77 	/**
78 	 * @default_pat_index: The pat index for VMA set during first bind by user.
79 	 */
80 	u16 default_pat_index;
81 
82 	/**
83 	 * @pat_index: The pat index to use when encoding the PTEs for this vma.
84 	 * same as default_pat_index unless overwritten by madvise.
85 	 */
86 	u16 pat_index;
87 };
88 
89 struct xe_vma {
90 	/** @gpuva: Base GPUVA object */
91 	struct drm_gpuva gpuva;
92 
93 	/**
94 	 * @combined_links: links into lists which are mutually exclusive.
95 	 * Locking: vm lock in write mode OR vm lock in read mode and the vm's
96 	 * resv.
97 	 */
98 	union {
99 		/** @rebind: link into VM if this VMA needs rebinding. */
100 		struct list_head rebind;
101 		/** @destroy: link to contested list when VM is being closed. */
102 		struct list_head destroy;
103 	} combined_links;
104 
105 	union {
106 		/** @destroy_cb: callback to destroy VMA when unbind job is done */
107 		struct dma_fence_cb destroy_cb;
108 		/** @destroy_work: worker to destroy this BO */
109 		struct work_struct destroy_work;
110 	};
111 
112 	/**
113 	 * @tile_invalidated: Tile mask of binding are invalidated for this VMA.
114 	 * protected by BO's resv and for userptrs, vm->svm.gpusvm.notifier_lock in
115 	 * write mode for writing or vm->svm.gpusvm.notifier_lock in read mode and
116 	 * the vm->resv. For stable reading, BO's resv or userptr
117 	 * vm->svm.gpusvm.notifier_lock in read mode is required. Can be
118 	 * opportunistically read with READ_ONCE outside of locks.
119 	 */
120 	u8 tile_invalidated;
121 
122 	/** @tile_mask: Tile mask of where to create binding for this VMA */
123 	u8 tile_mask;
124 
125 	/**
126 	 * @tile_present: Tile mask of binding are present for this VMA.
127 	 * protected by vm->lock, vm->resv and for userptrs,
128 	 * vm->svm.gpusvm.notifier_lock for writing. Needs either for reading,
129 	 * but if reading is done under the vm->lock only, it needs to be held
130 	 * in write mode.
131 	 */
132 	u8 tile_present;
133 
134 	/** @tile_staged: bind is staged for this VMA */
135 	u8 tile_staged;
136 
137 	/**
138 	 * @skip_invalidation: Used in madvise to avoid invalidation
139 	 * if mem attributes doesn't change
140 	 */
141 	bool skip_invalidation;
142 
143 	/**
144 	 * @ufence: The user fence that was provided with MAP.
145 	 * Needs to be signalled before UNMAP can be processed.
146 	 */
147 	struct xe_user_fence *ufence;
148 
149 	/**
150 	 * @attr: The attributes of vma which determines the migration policy
151 	 * and encoding of the PTEs for this vma.
152 	 */
153 	struct xe_vma_mem_attr attr;
154 };
155 
156 /**
157  * struct xe_userptr_vma - A userptr vma subclass
158  * @vma: The vma.
159  * @userptr: Additional userptr information.
160  */
161 struct xe_userptr_vma {
162 	struct xe_vma vma;
163 	struct xe_userptr userptr;
164 };
165 
166 struct xe_device;
167 
168 struct xe_vm {
169 	/** @gpuvm: base GPUVM used to track VMAs */
170 	struct drm_gpuvm gpuvm;
171 
172 	/** @svm: Shared virtual memory state */
173 	struct {
174 		/** @svm.gpusvm: base GPUSVM used to track fault allocations */
175 		struct drm_gpusvm gpusvm;
176 		/**
177 		 * @svm.garbage_collector: Garbage collector which is used unmap
178 		 * SVM range's GPU bindings and destroy the ranges.
179 		 */
180 		struct {
181 			/** @svm.garbage_collector.lock: Protect's range list */
182 			spinlock_t lock;
183 			/**
184 			 * @svm.garbage_collector.range_list: List of SVM ranges
185 			 * in the garbage collector.
186 			 */
187 			struct list_head range_list;
188 			/**
189 			 * @svm.garbage_collector.work: Worker which the
190 			 * garbage collector runs on.
191 			 */
192 			struct work_struct work;
193 		} garbage_collector;
194 	} svm;
195 
196 	struct xe_device *xe;
197 
198 	/* exec queue used for (un)binding vma's */
199 	struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
200 
201 	/** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
202 	struct ttm_lru_bulk_move lru_bulk_move;
203 
204 	u64 size;
205 
206 	struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
207 	struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
208 
209 	/**
210 	 * @flags: flags for this VM, statically setup a creation time aside
211 	 * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
212 	 */
213 #define XE_VM_FLAG_64K			BIT(0)
214 #define XE_VM_FLAG_LR_MODE		BIT(1)
215 #define XE_VM_FLAG_MIGRATION		BIT(2)
216 #define XE_VM_FLAG_SCRATCH_PAGE		BIT(3)
217 #define XE_VM_FLAG_FAULT_MODE		BIT(4)
218 #define XE_VM_FLAG_BANNED		BIT(5)
219 #define XE_VM_FLAG_TILE_ID(flags)	FIELD_GET(GENMASK(7, 6), flags)
220 #define XE_VM_FLAG_SET_TILE_ID(tile)	FIELD_PREP(GENMASK(7, 6), (tile)->id)
221 #define XE_VM_FLAG_GSC			BIT(8)
222 	unsigned long flags;
223 
224 	/** @composite_fence_ctx: context composite fence */
225 	u64 composite_fence_ctx;
226 	/** @composite_fence_seqno: seqno for composite fence */
227 	u32 composite_fence_seqno;
228 
229 	/**
230 	 * @lock: outer most lock, protects objects of anything attached to this
231 	 * VM
232 	 */
233 	struct rw_semaphore lock;
234 	/**
235 	 * @snap_mutex: Mutex used to guard insertions and removals from gpuva,
236 	 * so we can take a snapshot safely from devcoredump.
237 	 */
238 	struct mutex snap_mutex;
239 
240 	/**
241 	 * @rebind_list: list of VMAs that need rebinding. Protected by the
242 	 * vm->lock in write mode, OR (the vm->lock in read mode and the
243 	 * vm resv).
244 	 */
245 	struct list_head rebind_list;
246 
247 	/**
248 	 * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
249 	 * from an irq context can be last put and the destroy needs to be able
250 	 * to sleep.
251 	 */
252 	struct work_struct destroy_work;
253 
254 	/**
255 	 * @rftree: range fence tree to track updates to page table structure.
256 	 * Used to implement conflict tracking between independent bind engines.
257 	 */
258 	struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
259 
260 	const struct xe_pt_ops *pt_ops;
261 
262 	/** @userptr: user pointer state */
263 	struct xe_userptr_vm userptr;
264 
265 	/** @preempt: preempt state */
266 	struct {
267 		/**
268 		 * @min_run_period_ms: The minimum run period before preempting
269 		 * an engine again
270 		 */
271 		s64 min_run_period_ms;
272 		/** @exec_queues: list of exec queues attached to this VM */
273 		struct list_head exec_queues;
274 		/** @num_exec_queues: number exec queues attached to this VM */
275 		int num_exec_queues;
276 		/**
277 		 * @rebind_deactivated: Whether rebind has been temporarily deactivated
278 		 * due to no work available. Protected by the vm resv.
279 		 */
280 		bool rebind_deactivated;
281 		/**
282 		 * @rebind_work: worker to rebind invalidated userptrs / evicted
283 		 * BOs
284 		 */
285 		struct work_struct rebind_work;
286 		/**
287 		 * @preempt.pm_activate_link: Link to list of rebind workers to be
288 		 * kicked on resume.
289 		 */
290 		struct list_head pm_activate_link;
291 	} preempt;
292 
293 	/** @um: unified memory state */
294 	struct {
295 		/** @asid: address space ID, unique to each VM */
296 		u32 asid;
297 		/**
298 		 * @last_fault_vma: Last fault VMA, used for fast lookup when we
299 		 * get a flood of faults to the same VMA
300 		 */
301 		struct xe_vma *last_fault_vma;
302 	} usm;
303 
304 	/** @error_capture: allow to track errors */
305 	struct {
306 		/** @capture_once: capture only one error per VM */
307 		bool capture_once;
308 	} error_capture;
309 
310 	/**
311 	 * @validation: Validation data only valid with the vm resv held.
312 	 * Note: This is really task state of the task holding the vm resv,
313 	 * and moving forward we should
314 	 * come up with a better way of passing this down the call-
315 	 * chain.
316 	 */
317 	struct {
318 		/**
319 		 * @validation.validating: The task that is currently making bos resident.
320 		 * for this vm.
321 		 * Protected by the VM's resv for writing. Opportunistic reading can be done
322 		 * using READ_ONCE. Note: This is a workaround for the
323 		 * TTM eviction_valuable() callback not being passed a struct
324 		 * ttm_operation_context(). Future work might want to address this.
325 		 */
326 		struct task_struct *validating;
327 		/**
328 		 *  @validation.exec The drm_exec context used when locking the vm resv.
329 		 *  Protected by the vm's resv.
330 		 */
331 		struct drm_exec *_exec;
332 	} validation;
333 
334 	/**
335 	 * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
336 	 * protected by the vm resv.
337 	 */
338 	u64 tlb_flush_seqno;
339 	/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
340 	bool batch_invalidate_tlb;
341 	/** @xef: XE file handle for tracking this VM's drm client */
342 	struct xe_file *xef;
343 };
344 
345 /** struct xe_vma_op_map - VMA map operation */
346 struct xe_vma_op_map {
347 	/** @vma: VMA to map */
348 	struct xe_vma *vma;
349 	unsigned int vma_flags;
350 	/** @immediate: Immediate bind */
351 	bool immediate;
352 	/** @read_only: Read only */
353 	bool invalidate_on_bind;
354 	/** @pat_index: The pat index to use for this operation. */
355 	u16 pat_index;
356 };
357 
358 /** struct xe_vma_op_remap - VMA remap operation */
359 struct xe_vma_op_remap {
360 	/** @prev: VMA preceding part of a split mapping */
361 	struct xe_vma *prev;
362 	/** @next: VMA subsequent part of a split mapping */
363 	struct xe_vma *next;
364 	/** @start: start of the VMA unmap */
365 	u64 start;
366 	/** @range: range of the VMA unmap */
367 	u64 range;
368 	/** @skip_prev: skip prev rebind */
369 	bool skip_prev;
370 	/** @skip_next: skip next rebind */
371 	bool skip_next;
372 	/** @unmap_done: unmap operation in done */
373 	bool unmap_done;
374 };
375 
376 /** struct xe_vma_op_prefetch - VMA prefetch operation */
377 struct xe_vma_op_prefetch {
378 	/** @region: memory region to prefetch to */
379 	u32 region;
380 };
381 
382 /** struct xe_vma_op_map_range - VMA map range operation */
383 struct xe_vma_op_map_range {
384 	/** @vma: VMA to map (system allocator VMA) */
385 	struct xe_vma *vma;
386 	/** @range: SVM range to map */
387 	struct xe_svm_range *range;
388 };
389 
390 /** struct xe_vma_op_unmap_range - VMA unmap range operation */
391 struct xe_vma_op_unmap_range {
392 	/** @range: SVM range to unmap */
393 	struct xe_svm_range *range;
394 };
395 
396 /** struct xe_vma_op_prefetch_range - VMA prefetch range operation */
397 struct xe_vma_op_prefetch_range {
398 	/** @range: xarray for SVM ranges data */
399 	struct xarray range;
400 	/** @ranges_count: number of svm ranges to map */
401 	u32 ranges_count;
402 	/**
403 	 * @tile: Pointer to the tile structure containing memory to prefetch.
404 	 *        NULL if prefetch requested region is smem
405 	 */
406 	struct xe_tile *tile;
407 };
408 
409 /** enum xe_vma_op_flags - flags for VMA operation */
410 enum xe_vma_op_flags {
411 	/** @XE_VMA_OP_COMMITTED: VMA operation committed */
412 	XE_VMA_OP_COMMITTED		= BIT(0),
413 	/** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
414 	XE_VMA_OP_PREV_COMMITTED	= BIT(1),
415 	/** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
416 	XE_VMA_OP_NEXT_COMMITTED	= BIT(2),
417 };
418 
419 /** enum xe_vma_subop - VMA sub-operation */
420 enum xe_vma_subop {
421 	/** @XE_VMA_SUBOP_MAP_RANGE: Map range */
422 	XE_VMA_SUBOP_MAP_RANGE,
423 	/** @XE_VMA_SUBOP_UNMAP_RANGE: Unmap range */
424 	XE_VMA_SUBOP_UNMAP_RANGE,
425 };
426 
427 /** struct xe_vma_op - VMA operation */
428 struct xe_vma_op {
429 	/** @base: GPUVA base operation */
430 	struct drm_gpuva_op base;
431 	/** @link: async operation link */
432 	struct list_head link;
433 	/** @flags: operation flags */
434 	enum xe_vma_op_flags flags;
435 	/** @subop: user defined sub-operation */
436 	enum xe_vma_subop subop;
437 	/** @tile_mask: Tile mask for operation */
438 	u8 tile_mask;
439 
440 	union {
441 		/** @map: VMA map operation specific data */
442 		struct xe_vma_op_map map;
443 		/** @remap: VMA remap operation specific data */
444 		struct xe_vma_op_remap remap;
445 		/** @prefetch: VMA prefetch operation specific data */
446 		struct xe_vma_op_prefetch prefetch;
447 		/** @map_range: VMA map range operation specific data */
448 		struct xe_vma_op_map_range map_range;
449 		/** @unmap_range: VMA unmap range operation specific data */
450 		struct xe_vma_op_unmap_range unmap_range;
451 		/** @prefetch_range: VMA prefetch range operation specific data */
452 		struct xe_vma_op_prefetch_range prefetch_range;
453 	};
454 };
455 
456 /** struct xe_vma_ops - VMA operations */
457 struct xe_vma_ops {
458 	/** @list: list of VMA operations */
459 	struct list_head list;
460 	/** @vm: VM */
461 	struct xe_vm *vm;
462 	/** @q: exec queue for VMA operations */
463 	struct xe_exec_queue *q;
464 	/** @syncs: syncs these operation */
465 	struct xe_sync_entry *syncs;
466 	/** @num_syncs: number of syncs */
467 	u32 num_syncs;
468 	/** @pt_update_ops: page table update operations */
469 	struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
470 	/** @flag: signify the properties within xe_vma_ops*/
471 #define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
472 #define XE_VMA_OPS_FLAG_MADVISE          BIT(1)
473 #define XE_VMA_OPS_ARRAY_OF_BINDS	 BIT(2)
474 	u32 flags;
475 #ifdef TEST_VM_OPS_ERROR
476 	/** @inject_error: inject error to test error handling */
477 	bool inject_error;
478 #endif
479 };
480 
481 #endif
482