xref: /linux/drivers/gpu/drm/xe/xe_vm.h (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1dd08ebf6SMatthew Brost /* SPDX-License-Identifier: MIT */
2dd08ebf6SMatthew Brost /*
3dd08ebf6SMatthew Brost  * Copyright © 2021 Intel Corporation
4dd08ebf6SMatthew Brost  */
5dd08ebf6SMatthew Brost 
6dd08ebf6SMatthew Brost #ifndef _XE_VM_H_
7dd08ebf6SMatthew Brost #define _XE_VM_H_
8dd08ebf6SMatthew Brost 
9*7348a9a1SMichal Wajdeczko #include "xe_assert.h"
10b06d47beSMatthew Brost #include "xe_bo_types.h"
11dd08ebf6SMatthew Brost #include "xe_macros.h"
12dd08ebf6SMatthew Brost #include "xe_map.h"
13dd08ebf6SMatthew Brost #include "xe_vm_types.h"
14dd08ebf6SMatthew Brost 
15dd08ebf6SMatthew Brost struct drm_device;
16dd08ebf6SMatthew Brost struct drm_printer;
17dd08ebf6SMatthew Brost struct drm_file;
18dd08ebf6SMatthew Brost 
19dd08ebf6SMatthew Brost struct ttm_buffer_object;
20dd08ebf6SMatthew Brost struct ttm_validate_buffer;
21dd08ebf6SMatthew Brost 
229b9529ceSFrancois Dugast struct xe_exec_queue;
23dd08ebf6SMatthew Brost struct xe_file;
24dd08ebf6SMatthew Brost struct xe_sync_entry;
25d490ecf5SThomas Hellström struct drm_exec;
26dd08ebf6SMatthew Brost 
27dd08ebf6SMatthew Brost struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
28dd08ebf6SMatthew Brost 
29dd08ebf6SMatthew Brost struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
30dd08ebf6SMatthew Brost int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
31dd08ebf6SMatthew Brost 
32dd08ebf6SMatthew Brost static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
33dd08ebf6SMatthew Brost {
34b06d47beSMatthew Brost 	drm_gpuvm_get(&vm->gpuvm);
35dd08ebf6SMatthew Brost 	return vm;
36dd08ebf6SMatthew Brost }
37dd08ebf6SMatthew Brost 
38dd08ebf6SMatthew Brost static inline void xe_vm_put(struct xe_vm *vm)
39dd08ebf6SMatthew Brost {
40b06d47beSMatthew Brost 	drm_gpuvm_put(&vm->gpuvm);
41dd08ebf6SMatthew Brost }
42dd08ebf6SMatthew Brost 
43d00e9cc2SThomas Hellström int xe_vm_lock(struct xe_vm *vm, bool intr);
44dd08ebf6SMatthew Brost 
45d00e9cc2SThomas Hellström void xe_vm_unlock(struct xe_vm *vm);
46dd08ebf6SMatthew Brost 
47dd08ebf6SMatthew Brost static inline bool xe_vm_is_closed(struct xe_vm *vm)
48dd08ebf6SMatthew Brost {
499d858b69SMatthew Brost 	/* Only guaranteed not to change when vm->lock is held */
50dd08ebf6SMatthew Brost 	return !vm->size;
51dd08ebf6SMatthew Brost }
52dd08ebf6SMatthew Brost 
539d858b69SMatthew Brost static inline bool xe_vm_is_banned(struct xe_vm *vm)
549d858b69SMatthew Brost {
559d858b69SMatthew Brost 	return vm->flags & XE_VM_FLAG_BANNED;
569d858b69SMatthew Brost }
579d858b69SMatthew Brost 
589d858b69SMatthew Brost static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
599d858b69SMatthew Brost {
609d858b69SMatthew Brost 	lockdep_assert_held(&vm->lock);
619d858b69SMatthew Brost 	return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
629d858b69SMatthew Brost }
639d858b69SMatthew Brost 
64dd08ebf6SMatthew Brost struct xe_vma *
65b06d47beSMatthew Brost xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
66b06d47beSMatthew Brost 
6706951c2eSThomas Hellström /**
6806951c2eSThomas Hellström  * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
6906951c2eSThomas Hellström  * @vm: The vm
7006951c2eSThomas Hellström  *
7106951c2eSThomas Hellström  * Return: whether the vm populates unmapped areas with scratch PTEs
7206951c2eSThomas Hellström  */
7306951c2eSThomas Hellström static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
7406951c2eSThomas Hellström {
7506951c2eSThomas Hellström 	return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
7606951c2eSThomas Hellström }
7706951c2eSThomas Hellström 
7824f947d5SThomas Hellström /**
7924f947d5SThomas Hellström  * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
8024f947d5SThomas Hellström  * @gpuvm: The struct drm_gpuvm pointer
8124f947d5SThomas Hellström  *
8224f947d5SThomas Hellström  * Return: Pointer to the embedding struct xe_vm.
8324f947d5SThomas Hellström  */
8424f947d5SThomas Hellström static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
8524f947d5SThomas Hellström {
8624f947d5SThomas Hellström 	return container_of(gpuvm, struct xe_vm, gpuvm);
8724f947d5SThomas Hellström }
8824f947d5SThomas Hellström 
89b06d47beSMatthew Brost static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
90b06d47beSMatthew Brost {
9124f947d5SThomas Hellström 	return gpuvm_to_vm(gpuva->vm);
92b06d47beSMatthew Brost }
93b06d47beSMatthew Brost 
94b06d47beSMatthew Brost static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
95b06d47beSMatthew Brost {
96b06d47beSMatthew Brost 	return container_of(gpuva, struct xe_vma, gpuva);
97b06d47beSMatthew Brost }
98b06d47beSMatthew Brost 
99b06d47beSMatthew Brost static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
100b06d47beSMatthew Brost {
101b06d47beSMatthew Brost 	return container_of(op, struct xe_vma_op, base);
102b06d47beSMatthew Brost }
10321ed3327SMatthew Brost 
10421ed3327SMatthew Brost /**
10521ed3327SMatthew Brost  * DOC: Provide accessors for vma members to facilitate easy change of
10621ed3327SMatthew Brost  * implementation.
10721ed3327SMatthew Brost  */
10821ed3327SMatthew Brost static inline u64 xe_vma_start(struct xe_vma *vma)
10921ed3327SMatthew Brost {
110b06d47beSMatthew Brost 	return vma->gpuva.va.addr;
11121ed3327SMatthew Brost }
11221ed3327SMatthew Brost 
11321ed3327SMatthew Brost static inline u64 xe_vma_size(struct xe_vma *vma)
11421ed3327SMatthew Brost {
115b06d47beSMatthew Brost 	return vma->gpuva.va.range;
11621ed3327SMatthew Brost }
11721ed3327SMatthew Brost 
11821ed3327SMatthew Brost static inline u64 xe_vma_end(struct xe_vma *vma)
11921ed3327SMatthew Brost {
12021ed3327SMatthew Brost 	return xe_vma_start(vma) + xe_vma_size(vma);
12121ed3327SMatthew Brost }
12221ed3327SMatthew Brost 
12321ed3327SMatthew Brost static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
12421ed3327SMatthew Brost {
125b06d47beSMatthew Brost 	return vma->gpuva.gem.offset;
12621ed3327SMatthew Brost }
12721ed3327SMatthew Brost 
12821ed3327SMatthew Brost static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
12921ed3327SMatthew Brost {
130b06d47beSMatthew Brost 	return !vma->gpuva.gem.obj ? NULL :
131b06d47beSMatthew Brost 		container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
13221ed3327SMatthew Brost }
13321ed3327SMatthew Brost 
13421ed3327SMatthew Brost static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
13521ed3327SMatthew Brost {
136b06d47beSMatthew Brost 	return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
13721ed3327SMatthew Brost }
13821ed3327SMatthew Brost 
13921ed3327SMatthew Brost static inline bool xe_vma_read_only(struct xe_vma *vma)
14021ed3327SMatthew Brost {
141b06d47beSMatthew Brost 	return vma->gpuva.flags & XE_VMA_READ_ONLY;
14221ed3327SMatthew Brost }
14321ed3327SMatthew Brost 
14421ed3327SMatthew Brost static inline u64 xe_vma_userptr(struct xe_vma *vma)
14521ed3327SMatthew Brost {
146b06d47beSMatthew Brost 	return vma->gpuva.gem.offset;
14721ed3327SMatthew Brost }
14821ed3327SMatthew Brost 
14921ed3327SMatthew Brost static inline bool xe_vma_is_null(struct xe_vma *vma)
15021ed3327SMatthew Brost {
151b06d47beSMatthew Brost 	return vma->gpuva.flags & DRM_GPUVA_SPARSE;
15221ed3327SMatthew Brost }
15321ed3327SMatthew Brost 
15421ed3327SMatthew Brost static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
15521ed3327SMatthew Brost {
15621ed3327SMatthew Brost 	return !xe_vma_bo(vma);
15721ed3327SMatthew Brost }
15821ed3327SMatthew Brost 
15921ed3327SMatthew Brost static inline bool xe_vma_is_userptr(struct xe_vma *vma)
16021ed3327SMatthew Brost {
16121ed3327SMatthew Brost 	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
16221ed3327SMatthew Brost }
163dd08ebf6SMatthew Brost 
1645bd24e78SThomas Hellström /**
1655bd24e78SThomas Hellström  * to_userptr_vma() - Return a pointer to an embedding userptr vma
1665bd24e78SThomas Hellström  * @vma: Pointer to the embedded struct xe_vma
1675bd24e78SThomas Hellström  *
1685bd24e78SThomas Hellström  * Return: Pointer to the embedding userptr vma
1695bd24e78SThomas Hellström  */
1705bd24e78SThomas Hellström static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
1715bd24e78SThomas Hellström {
1725bd24e78SThomas Hellström 	xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
1735bd24e78SThomas Hellström 	return container_of(vma, struct xe_userptr_vma, vma);
1745bd24e78SThomas Hellström }
1755bd24e78SThomas Hellström 
176876611c2SMatt Roper u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
177dd08ebf6SMatthew Brost 
178dd08ebf6SMatthew Brost int xe_vm_create_ioctl(struct drm_device *dev, void *data,
179dd08ebf6SMatthew Brost 		       struct drm_file *file);
180dd08ebf6SMatthew Brost int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
181dd08ebf6SMatthew Brost 			struct drm_file *file);
182dd08ebf6SMatthew Brost int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
183dd08ebf6SMatthew Brost 		     struct drm_file *file);
184dd08ebf6SMatthew Brost 
185dd08ebf6SMatthew Brost void xe_vm_close_and_put(struct xe_vm *vm);
186dd08ebf6SMatthew Brost 
187dd08ebf6SMatthew Brost static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
188dd08ebf6SMatthew Brost {
189dd08ebf6SMatthew Brost 	return vm->flags & XE_VM_FLAG_FAULT_MODE;
190dd08ebf6SMatthew Brost }
191dd08ebf6SMatthew Brost 
192fdb6a053SThomas Hellström static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
193dd08ebf6SMatthew Brost {
194fdb6a053SThomas Hellström 	return vm->flags & XE_VM_FLAG_LR_MODE;
195fdb6a053SThomas Hellström }
196fdb6a053SThomas Hellström 
197fdb6a053SThomas Hellström static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
198fdb6a053SThomas Hellström {
199fdb6a053SThomas Hellström 	return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
200dd08ebf6SMatthew Brost }
201dd08ebf6SMatthew Brost 
2029b9529ceSFrancois Dugast int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
203abce4e4bSMatthew Brost void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
204dd08ebf6SMatthew Brost 
205dd08ebf6SMatthew Brost int xe_vm_userptr_pin(struct xe_vm *vm);
206dd08ebf6SMatthew Brost 
207dd08ebf6SMatthew Brost int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
208dd08ebf6SMatthew Brost 
209dd08ebf6SMatthew Brost int xe_vm_userptr_check_repin(struct xe_vm *vm);
210dd08ebf6SMatthew Brost 
2115a091affSThomas Hellström int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
212bf69918bSMatthew Brost struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
213bf69918bSMatthew Brost 				u8 tile_mask);
214dd08ebf6SMatthew Brost 
215dd08ebf6SMatthew Brost int xe_vm_invalidate_vma(struct xe_vma *vma);
216dd08ebf6SMatthew Brost 
217342206b7SMatthew Brost static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
218342206b7SMatthew Brost {
219fdb6a053SThomas Hellström 	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
220342206b7SMatthew Brost 	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
221342206b7SMatthew Brost }
222342206b7SMatthew Brost 
2238e41443eSThomas Hellström /**
2248e41443eSThomas Hellström  * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
2258e41443eSThomas Hellström  * vms.
2268e41443eSThomas Hellström  * @vm: The vm.
2278e41443eSThomas Hellström  *
2288e41443eSThomas Hellström  * If the rebind functionality on a compute vm was disabled due
2298e41443eSThomas Hellström  * to nothing to execute. Reactivate it and run the rebind worker.
2308e41443eSThomas Hellström  * This function should be called after submitting a batch to a compute vm.
2318e41443eSThomas Hellström  */
2328e41443eSThomas Hellström static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
2338e41443eSThomas Hellström {
234fdb6a053SThomas Hellström 	if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
2358e41443eSThomas Hellström 		vm->preempt.rebind_deactivated = false;
236342206b7SMatthew Brost 		xe_vm_queue_rebind_worker(vm);
2378e41443eSThomas Hellström 	}
2388e41443eSThomas Hellström }
2398e41443eSThomas Hellström 
2405bd24e78SThomas Hellström int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
241dd08ebf6SMatthew Brost 
2425bd24e78SThomas Hellström int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
243dd08ebf6SMatthew Brost 
244d490ecf5SThomas Hellström bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
245dd08ebf6SMatthew Brost 
2467ee7dd6fSThomas Hellström int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
2477ee7dd6fSThomas Hellström 
2487ee7dd6fSThomas Hellström int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
2497ee7dd6fSThomas Hellström 			  unsigned int num_fences);
2502714d509SThomas Hellström 
251b06d47beSMatthew Brost /**
252b06d47beSMatthew Brost  * xe_vm_resv() - Return's the vm's reservation object
253b06d47beSMatthew Brost  * @vm: The vm
254b06d47beSMatthew Brost  *
255b06d47beSMatthew Brost  * Return: Pointer to the vm's reservation object.
256b06d47beSMatthew Brost  */
257b06d47beSMatthew Brost static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
258b06d47beSMatthew Brost {
259b06d47beSMatthew Brost 	return drm_gpuvm_resv(&vm->gpuvm);
260b06d47beSMatthew Brost }
261b06d47beSMatthew Brost 
262b06d47beSMatthew Brost /**
263b06d47beSMatthew Brost  * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
264b06d47beSMatthew Brost  * @vm: The vm
265b06d47beSMatthew Brost  */
266b06d47beSMatthew Brost #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
267b06d47beSMatthew Brost 
268dd08ebf6SMatthew Brost #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
269dd08ebf6SMatthew Brost #define vm_dbg drm_dbg
270dd08ebf6SMatthew Brost #else
271dd08ebf6SMatthew Brost __printf(2, 3)
272dd08ebf6SMatthew Brost static inline void vm_dbg(const struct drm_device *dev,
273dd08ebf6SMatthew Brost 			  const char *format, ...)
274dd08ebf6SMatthew Brost { /* noop */ }
275dd08ebf6SMatthew Brost #endif
276dd08ebf6SMatthew Brost #endif
2770eb2a18aSMaarten Lankhorst 
2780eb2a18aSMaarten Lankhorst struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
2790eb2a18aSMaarten Lankhorst void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
2800eb2a18aSMaarten Lankhorst void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
2810eb2a18aSMaarten Lankhorst void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
282