xref: /linux/drivers/gpu/drm/xe/xe_vm_types.h (revision 72c181399b01bb4836d1fabaa9f5f6438c82178e)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_VM_TYPES_H_
7 #define _XE_VM_TYPES_H_
8 
9 #include <drm/drm_gpusvm.h>
10 #include <drm/drm_gpuvm.h>
11 
12 #include <linux/dma-resv.h>
13 #include <linux/kref.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/scatterlist.h>
16 
17 #include "xe_device_types.h"
18 #include "xe_pt_types.h"
19 #include "xe_range_fence.h"
20 #include "xe_userptr.h"
21 
22 struct xe_bo;
23 struct xe_svm_range;
24 struct xe_sync_entry;
25 struct xe_user_fence;
26 struct xe_vm;
27 struct xe_vm_pgtable_update_op;
28 
29 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
30 #define TEST_VM_OPS_ERROR
31 #define FORCE_OP_ERROR	BIT(31)
32 
33 #define FORCE_OP_ERROR_LOCK	0
34 #define FORCE_OP_ERROR_PREPARE	1
35 #define FORCE_OP_ERROR_RUN	2
36 #define FORCE_OP_ERROR_COUNT	3
37 #endif
38 
39 #define XE_VMA_READ_ONLY	DRM_GPUVA_USERBITS
40 #define XE_VMA_DESTROYED	(DRM_GPUVA_USERBITS << 1)
41 #define XE_VMA_ATOMIC_PTE_BIT	(DRM_GPUVA_USERBITS << 2)
42 #define XE_VMA_PTE_4K		(DRM_GPUVA_USERBITS << 3)
43 #define XE_VMA_PTE_2M		(DRM_GPUVA_USERBITS << 4)
44 #define XE_VMA_PTE_1G		(DRM_GPUVA_USERBITS << 5)
45 #define XE_VMA_PTE_64K		(DRM_GPUVA_USERBITS << 6)
46 #define XE_VMA_PTE_COMPACT	(DRM_GPUVA_USERBITS << 7)
47 #define XE_VMA_DUMPABLE		(DRM_GPUVA_USERBITS << 8)
48 #define XE_VMA_SYSTEM_ALLOCATOR	(DRM_GPUVA_USERBITS << 9)
49 
50 /**
51  * struct xe_vma_mem_attr - memory attributes associated with vma
52  */
53 struct xe_vma_mem_attr {
54 	/** @preferred_loc: perferred memory_location */
55 	struct {
56 		/** @preferred_loc.migration_policy: Pages migration policy */
57 		u32 migration_policy;
58 
59 		/**
60 		 * @preferred_loc.devmem_fd: used for determining pagemap_fd
61 		 * requested by user DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM and
62 		 * DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE mean system memory or
63 		 * closest device memory respectively.
64 		 */
65 		u32 devmem_fd;
66 	} preferred_loc;
67 
68 	/**
69 	 * @atomic_access: The atomic access type for the vma
70 	 * See %DRM_XE_VMA_ATOMIC_UNDEFINED, %DRM_XE_VMA_ATOMIC_DEVICE,
71 	 * %DRM_XE_VMA_ATOMIC_GLOBAL, and %DRM_XE_VMA_ATOMIC_CPU for possible
72 	 * values. These are defined in uapi/drm/xe_drm.h.
73 	 */
74 	u32 atomic_access;
75 
76 	/**
77 	 * @default_pat_index: The pat index for VMA set during first bind by user.
78 	 */
79 	u16 default_pat_index;
80 
81 	/**
82 	 * @pat_index: The pat index to use when encoding the PTEs for this vma.
83 	 * same as default_pat_index unless overwritten by madvise.
84 	 */
85 	u16 pat_index;
86 };
87 
88 struct xe_vma {
89 	/** @gpuva: Base GPUVA object */
90 	struct drm_gpuva gpuva;
91 
92 	/**
93 	 * @combined_links: links into lists which are mutually exclusive.
94 	 * Locking: vm lock in write mode OR vm lock in read mode and the vm's
95 	 * resv.
96 	 */
97 	union {
98 		/** @rebind: link into VM if this VMA needs rebinding. */
99 		struct list_head rebind;
100 		/** @destroy: link to contested list when VM is being closed. */
101 		struct list_head destroy;
102 	} combined_links;
103 
104 	union {
105 		/** @destroy_cb: callback to destroy VMA when unbind job is done */
106 		struct dma_fence_cb destroy_cb;
107 		/** @destroy_work: worker to destroy this BO */
108 		struct work_struct destroy_work;
109 	};
110 
111 	/**
112 	 * @tile_invalidated: Tile mask of binding are invalidated for this VMA.
113 	 * protected by BO's resv and for userptrs, vm->svm.gpusvm.notifier_lock in
114 	 * write mode for writing or vm->svm.gpusvm.notifier_lock in read mode and
115 	 * the vm->resv. For stable reading, BO's resv or userptr
116 	 * vm->svm.gpusvm.notifier_lock in read mode is required. Can be
117 	 * opportunistically read with READ_ONCE outside of locks.
118 	 */
119 	u8 tile_invalidated;
120 
121 	/** @tile_mask: Tile mask of where to create binding for this VMA */
122 	u8 tile_mask;
123 
124 	/**
125 	 * @tile_present: Tile mask of binding are present for this VMA.
126 	 * protected by vm->lock, vm->resv and for userptrs,
127 	 * vm->svm.gpusvm.notifier_lock for writing. Needs either for reading,
128 	 * but if reading is done under the vm->lock only, it needs to be held
129 	 * in write mode.
130 	 */
131 	u8 tile_present;
132 
133 	/** @tile_staged: bind is staged for this VMA */
134 	u8 tile_staged;
135 
136 	/**
137 	 * @skip_invalidation: Used in madvise to avoid invalidation
138 	 * if mem attributes doesn't change
139 	 */
140 	bool skip_invalidation;
141 
142 	/**
143 	 * @ufence: The user fence that was provided with MAP.
144 	 * Needs to be signalled before UNMAP can be processed.
145 	 */
146 	struct xe_user_fence *ufence;
147 
148 	/**
149 	 * @attr: The attributes of vma which determines the migration policy
150 	 * and encoding of the PTEs for this vma.
151 	 */
152 	struct xe_vma_mem_attr attr;
153 };
154 
155 /**
156  * struct xe_userptr_vma - A userptr vma subclass
157  * @vma: The vma.
158  * @userptr: Additional userptr information.
159  */
160 struct xe_userptr_vma {
161 	struct xe_vma vma;
162 	struct xe_userptr userptr;
163 };
164 
165 struct xe_device;
166 
167 struct xe_vm {
168 	/** @gpuvm: base GPUVM used to track VMAs */
169 	struct drm_gpuvm gpuvm;
170 
171 	/** @svm: Shared virtual memory state */
172 	struct {
173 		/** @svm.gpusvm: base GPUSVM used to track fault allocations */
174 		struct drm_gpusvm gpusvm;
175 		/**
176 		 * @svm.garbage_collector: Garbage collector which is used unmap
177 		 * SVM range's GPU bindings and destroy the ranges.
178 		 */
179 		struct {
180 			/** @svm.garbage_collector.lock: Protect's range list */
181 			spinlock_t lock;
182 			/**
183 			 * @svm.garbage_collector.range_list: List of SVM ranges
184 			 * in the garbage collector.
185 			 */
186 			struct list_head range_list;
187 			/**
188 			 * @svm.garbage_collector.work: Worker which the
189 			 * garbage collector runs on.
190 			 */
191 			struct work_struct work;
192 		} garbage_collector;
193 	} svm;
194 
195 	struct xe_device *xe;
196 
197 	/* exec queue used for (un)binding vma's */
198 	struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
199 
200 	/** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
201 	struct ttm_lru_bulk_move lru_bulk_move;
202 
203 	u64 size;
204 
205 	struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
206 	struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
207 
208 	/**
209 	 * @flags: flags for this VM, statically setup a creation time aside
210 	 * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
211 	 */
212 #define XE_VM_FLAG_64K			BIT(0)
213 #define XE_VM_FLAG_LR_MODE		BIT(1)
214 #define XE_VM_FLAG_MIGRATION		BIT(2)
215 #define XE_VM_FLAG_SCRATCH_PAGE		BIT(3)
216 #define XE_VM_FLAG_FAULT_MODE		BIT(4)
217 #define XE_VM_FLAG_BANNED		BIT(5)
218 #define XE_VM_FLAG_TILE_ID(flags)	FIELD_GET(GENMASK(7, 6), flags)
219 #define XE_VM_FLAG_SET_TILE_ID(tile)	FIELD_PREP(GENMASK(7, 6), (tile)->id)
220 #define XE_VM_FLAG_GSC			BIT(8)
221 	unsigned long flags;
222 
223 	/** @composite_fence_ctx: context composite fence */
224 	u64 composite_fence_ctx;
225 	/** @composite_fence_seqno: seqno for composite fence */
226 	u32 composite_fence_seqno;
227 
228 	/**
229 	 * @lock: outer most lock, protects objects of anything attached to this
230 	 * VM
231 	 */
232 	struct rw_semaphore lock;
233 	/**
234 	 * @snap_mutex: Mutex used to guard insertions and removals from gpuva,
235 	 * so we can take a snapshot safely from devcoredump.
236 	 */
237 	struct mutex snap_mutex;
238 
239 	/**
240 	 * @rebind_list: list of VMAs that need rebinding. Protected by the
241 	 * vm->lock in write mode, OR (the vm->lock in read mode and the
242 	 * vm resv).
243 	 */
244 	struct list_head rebind_list;
245 
246 	/**
247 	 * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
248 	 * from an irq context can be last put and the destroy needs to be able
249 	 * to sleep.
250 	 */
251 	struct work_struct destroy_work;
252 
253 	/**
254 	 * @rftree: range fence tree to track updates to page table structure.
255 	 * Used to implement conflict tracking between independent bind engines.
256 	 */
257 	struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
258 
259 	const struct xe_pt_ops *pt_ops;
260 
261 	/** @userptr: user pointer state */
262 	struct xe_userptr_vm userptr;
263 
264 	/** @preempt: preempt state */
265 	struct {
266 		/**
267 		 * @min_run_period_ms: The minimum run period before preempting
268 		 * an engine again
269 		 */
270 		s64 min_run_period_ms;
271 		/** @exec_queues: list of exec queues attached to this VM */
272 		struct list_head exec_queues;
273 		/** @num_exec_queues: number exec queues attached to this VM */
274 		int num_exec_queues;
275 		/**
276 		 * @rebind_deactivated: Whether rebind has been temporarily deactivated
277 		 * due to no work available. Protected by the vm resv.
278 		 */
279 		bool rebind_deactivated;
280 		/**
281 		 * @rebind_work: worker to rebind invalidated userptrs / evicted
282 		 * BOs
283 		 */
284 		struct work_struct rebind_work;
285 		/**
286 		 * @preempt.pm_activate_link: Link to list of rebind workers to be
287 		 * kicked on resume.
288 		 */
289 		struct list_head pm_activate_link;
290 	} preempt;
291 
292 	/** @um: unified memory state */
293 	struct {
294 		/** @asid: address space ID, unique to each VM */
295 		u32 asid;
296 		/**
297 		 * @last_fault_vma: Last fault VMA, used for fast lookup when we
298 		 * get a flood of faults to the same VMA
299 		 */
300 		struct xe_vma *last_fault_vma;
301 	} usm;
302 
303 	/** @error_capture: allow to track errors */
304 	struct {
305 		/** @capture_once: capture only one error per VM */
306 		bool capture_once;
307 	} error_capture;
308 
309 	/**
310 	 * @validation: Validation data only valid with the vm resv held.
311 	 * Note: This is really task state of the task holding the vm resv,
312 	 * and moving forward we should
313 	 * come up with a better way of passing this down the call-
314 	 * chain.
315 	 */
316 	struct {
317 		/**
318 		 * @validation.validating: The task that is currently making bos resident.
319 		 * for this vm.
320 		 * Protected by the VM's resv for writing. Opportunistic reading can be done
321 		 * using READ_ONCE. Note: This is a workaround for the
322 		 * TTM eviction_valuable() callback not being passed a struct
323 		 * ttm_operation_context(). Future work might want to address this.
324 		 */
325 		struct task_struct *validating;
326 		/**
327 		 *  @validation.exec The drm_exec context used when locking the vm resv.
328 		 *  Protected by the vm's resv.
329 		 */
330 		struct drm_exec *_exec;
331 	} validation;
332 
333 	/**
334 	 * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
335 	 * protected by the vm resv.
336 	 */
337 	u64 tlb_flush_seqno;
338 	/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
339 	bool batch_invalidate_tlb;
340 	/** @xef: XE file handle for tracking this VM's drm client */
341 	struct xe_file *xef;
342 };
343 
344 /** struct xe_vma_op_map - VMA map operation */
345 struct xe_vma_op_map {
346 	/** @vma: VMA to map */
347 	struct xe_vma *vma;
348 	/** @immediate: Immediate bind */
349 	bool immediate;
350 	/** @read_only: Read only */
351 	bool read_only;
352 	/** @is_null: is NULL binding */
353 	bool is_null;
354 	/** @is_cpu_addr_mirror: is CPU address mirror binding */
355 	bool is_cpu_addr_mirror;
356 	/** @dumpable: whether BO is dumped on GPU hang */
357 	bool dumpable;
358 	/** @invalidate: invalidate the VMA before bind */
359 	bool invalidate_on_bind;
360 	/** @pat_index: The pat index to use for this operation. */
361 	u16 pat_index;
362 };
363 
364 /** struct xe_vma_op_remap - VMA remap operation */
365 struct xe_vma_op_remap {
366 	/** @prev: VMA preceding part of a split mapping */
367 	struct xe_vma *prev;
368 	/** @next: VMA subsequent part of a split mapping */
369 	struct xe_vma *next;
370 	/** @start: start of the VMA unmap */
371 	u64 start;
372 	/** @range: range of the VMA unmap */
373 	u64 range;
374 	/** @skip_prev: skip prev rebind */
375 	bool skip_prev;
376 	/** @skip_next: skip next rebind */
377 	bool skip_next;
378 	/** @unmap_done: unmap operation in done */
379 	bool unmap_done;
380 };
381 
382 /** struct xe_vma_op_prefetch - VMA prefetch operation */
383 struct xe_vma_op_prefetch {
384 	/** @region: memory region to prefetch to */
385 	u32 region;
386 };
387 
388 /** struct xe_vma_op_map_range - VMA map range operation */
389 struct xe_vma_op_map_range {
390 	/** @vma: VMA to map (system allocator VMA) */
391 	struct xe_vma *vma;
392 	/** @range: SVM range to map */
393 	struct xe_svm_range *range;
394 };
395 
396 /** struct xe_vma_op_unmap_range - VMA unmap range operation */
397 struct xe_vma_op_unmap_range {
398 	/** @range: SVM range to unmap */
399 	struct xe_svm_range *range;
400 };
401 
402 /** struct xe_vma_op_prefetch_range - VMA prefetch range operation */
403 struct xe_vma_op_prefetch_range {
404 	/** @range: xarray for SVM ranges data */
405 	struct xarray range;
406 	/** @ranges_count: number of svm ranges to map */
407 	u32 ranges_count;
408 	/**
409 	 * @tile: Pointer to the tile structure containing memory to prefetch.
410 	 *        NULL if prefetch requested region is smem
411 	 */
412 	struct xe_tile *tile;
413 };
414 
415 /** enum xe_vma_op_flags - flags for VMA operation */
416 enum xe_vma_op_flags {
417 	/** @XE_VMA_OP_COMMITTED: VMA operation committed */
418 	XE_VMA_OP_COMMITTED		= BIT(0),
419 	/** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
420 	XE_VMA_OP_PREV_COMMITTED	= BIT(1),
421 	/** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
422 	XE_VMA_OP_NEXT_COMMITTED	= BIT(2),
423 };
424 
425 /** enum xe_vma_subop - VMA sub-operation */
426 enum xe_vma_subop {
427 	/** @XE_VMA_SUBOP_MAP_RANGE: Map range */
428 	XE_VMA_SUBOP_MAP_RANGE,
429 	/** @XE_VMA_SUBOP_UNMAP_RANGE: Unmap range */
430 	XE_VMA_SUBOP_UNMAP_RANGE,
431 };
432 
433 /** struct xe_vma_op - VMA operation */
434 struct xe_vma_op {
435 	/** @base: GPUVA base operation */
436 	struct drm_gpuva_op base;
437 	/** @link: async operation link */
438 	struct list_head link;
439 	/** @flags: operation flags */
440 	enum xe_vma_op_flags flags;
441 	/** @subop: user defined sub-operation */
442 	enum xe_vma_subop subop;
443 	/** @tile_mask: Tile mask for operation */
444 	u8 tile_mask;
445 
446 	union {
447 		/** @map: VMA map operation specific data */
448 		struct xe_vma_op_map map;
449 		/** @remap: VMA remap operation specific data */
450 		struct xe_vma_op_remap remap;
451 		/** @prefetch: VMA prefetch operation specific data */
452 		struct xe_vma_op_prefetch prefetch;
453 		/** @map_range: VMA map range operation specific data */
454 		struct xe_vma_op_map_range map_range;
455 		/** @unmap_range: VMA unmap range operation specific data */
456 		struct xe_vma_op_unmap_range unmap_range;
457 		/** @prefetch_range: VMA prefetch range operation specific data */
458 		struct xe_vma_op_prefetch_range prefetch_range;
459 	};
460 };
461 
462 /** struct xe_vma_ops - VMA operations */
463 struct xe_vma_ops {
464 	/** @list: list of VMA operations */
465 	struct list_head list;
466 	/** @vm: VM */
467 	struct xe_vm *vm;
468 	/** @q: exec queue for VMA operations */
469 	struct xe_exec_queue *q;
470 	/** @syncs: syncs these operation */
471 	struct xe_sync_entry *syncs;
472 	/** @num_syncs: number of syncs */
473 	u32 num_syncs;
474 	/** @pt_update_ops: page table update operations */
475 	struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
476 	/** @flag: signify the properties within xe_vma_ops*/
477 #define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
478 #define XE_VMA_OPS_FLAG_MADVISE          BIT(1)
479 	u32 flags;
480 #ifdef TEST_VM_OPS_ERROR
481 	/** @inject_error: inject error to test error handling */
482 	bool inject_error;
483 #endif
484 };
485 
486 #endif
487