1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #ifndef _XE_VM_H_ 7 #define _XE_VM_H_ 8 9 #include "xe_bo_types.h" 10 #include "xe_macros.h" 11 #include "xe_map.h" 12 #include "xe_vm_types.h" 13 14 struct drm_device; 15 struct drm_printer; 16 struct drm_file; 17 18 struct ttm_buffer_object; 19 struct ttm_validate_buffer; 20 21 struct xe_exec_queue; 22 struct xe_file; 23 struct xe_sync_entry; 24 struct drm_exec; 25 26 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags); 27 28 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id); 29 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node); 30 31 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm) 32 { 33 drm_gpuvm_get(&vm->gpuvm); 34 return vm; 35 } 36 37 static inline void xe_vm_put(struct xe_vm *vm) 38 { 39 drm_gpuvm_put(&vm->gpuvm); 40 } 41 42 int xe_vm_lock(struct xe_vm *vm, bool intr); 43 44 void xe_vm_unlock(struct xe_vm *vm); 45 46 static inline bool xe_vm_is_closed(struct xe_vm *vm) 47 { 48 /* Only guaranteed not to change when vm->lock is held */ 49 return !vm->size; 50 } 51 52 static inline bool xe_vm_is_banned(struct xe_vm *vm) 53 { 54 return vm->flags & XE_VM_FLAG_BANNED; 55 } 56 57 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm) 58 { 59 lockdep_assert_held(&vm->lock); 60 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm); 61 } 62 63 struct xe_vma * 64 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range); 65 66 /** 67 * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs 68 * @vm: The vm 69 * 70 * Return: whether the vm populates unmapped areas with scratch PTEs 71 */ 72 static inline bool xe_vm_has_scratch(const struct xe_vm *vm) 73 { 74 return vm->flags & XE_VM_FLAG_SCRATCH_PAGE; 75 } 76 77 /** 78 * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer 79 * @gpuvm: The struct drm_gpuvm pointer 80 * 81 * Return: Pointer to the embedding struct xe_vm. 82 */ 83 static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm) 84 { 85 return container_of(gpuvm, struct xe_vm, gpuvm); 86 } 87 88 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva) 89 { 90 return gpuvm_to_vm(gpuva->vm); 91 } 92 93 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva) 94 { 95 return container_of(gpuva, struct xe_vma, gpuva); 96 } 97 98 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op) 99 { 100 return container_of(op, struct xe_vma_op, base); 101 } 102 103 /** 104 * DOC: Provide accessors for vma members to facilitate easy change of 105 * implementation. 106 */ 107 static inline u64 xe_vma_start(struct xe_vma *vma) 108 { 109 return vma->gpuva.va.addr; 110 } 111 112 static inline u64 xe_vma_size(struct xe_vma *vma) 113 { 114 return vma->gpuva.va.range; 115 } 116 117 static inline u64 xe_vma_end(struct xe_vma *vma) 118 { 119 return xe_vma_start(vma) + xe_vma_size(vma); 120 } 121 122 static inline u64 xe_vma_bo_offset(struct xe_vma *vma) 123 { 124 return vma->gpuva.gem.offset; 125 } 126 127 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma) 128 { 129 return !vma->gpuva.gem.obj ? NULL : 130 container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base); 131 } 132 133 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma) 134 { 135 return container_of(vma->gpuva.vm, struct xe_vm, gpuvm); 136 } 137 138 static inline bool xe_vma_read_only(struct xe_vma *vma) 139 { 140 return vma->gpuva.flags & XE_VMA_READ_ONLY; 141 } 142 143 static inline u64 xe_vma_userptr(struct xe_vma *vma) 144 { 145 return vma->gpuva.gem.offset; 146 } 147 148 static inline bool xe_vma_is_null(struct xe_vma *vma) 149 { 150 return vma->gpuva.flags & DRM_GPUVA_SPARSE; 151 } 152 153 static inline bool xe_vma_has_no_bo(struct xe_vma *vma) 154 { 155 return !xe_vma_bo(vma); 156 } 157 158 static inline bool xe_vma_is_userptr(struct xe_vma *vma) 159 { 160 return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma); 161 } 162 163 /** 164 * to_userptr_vma() - Return a pointer to an embedding userptr vma 165 * @vma: Pointer to the embedded struct xe_vma 166 * 167 * Return: Pointer to the embedding userptr vma 168 */ 169 static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma) 170 { 171 xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma)); 172 return container_of(vma, struct xe_userptr_vma, vma); 173 } 174 175 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile); 176 177 int xe_vm_create_ioctl(struct drm_device *dev, void *data, 178 struct drm_file *file); 179 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data, 180 struct drm_file *file); 181 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, 182 struct drm_file *file); 183 184 void xe_vm_close_and_put(struct xe_vm *vm); 185 186 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm) 187 { 188 return vm->flags & XE_VM_FLAG_FAULT_MODE; 189 } 190 191 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm) 192 { 193 return vm->flags & XE_VM_FLAG_LR_MODE; 194 } 195 196 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm) 197 { 198 return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm); 199 } 200 201 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q); 202 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q); 203 204 int xe_vm_userptr_pin(struct xe_vm *vm); 205 206 int __xe_vm_userptr_needs_repin(struct xe_vm *vm); 207 208 int xe_vm_userptr_check_repin(struct xe_vm *vm); 209 210 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker); 211 212 int xe_vm_invalidate_vma(struct xe_vma *vma); 213 214 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) 215 { 216 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); 217 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work); 218 } 219 220 /** 221 * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute 222 * vms. 223 * @vm: The vm. 224 * 225 * If the rebind functionality on a compute vm was disabled due 226 * to nothing to execute. Reactivate it and run the rebind worker. 227 * This function should be called after submitting a batch to a compute vm. 228 */ 229 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) 230 { 231 if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) { 232 vm->preempt.rebind_deactivated = false; 233 xe_vm_queue_rebind_worker(vm); 234 } 235 } 236 237 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma); 238 239 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma); 240 241 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end); 242 243 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id); 244 245 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma); 246 247 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, 248 unsigned int num_fences); 249 250 /** 251 * xe_vm_resv() - Return's the vm's reservation object 252 * @vm: The vm 253 * 254 * Return: Pointer to the vm's reservation object. 255 */ 256 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm) 257 { 258 return drm_gpuvm_resv(&vm->gpuvm); 259 } 260 261 /** 262 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held. 263 * @vm: The vm 264 */ 265 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm)) 266 267 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) 268 #define vm_dbg drm_dbg 269 #else 270 __printf(2, 3) 271 static inline void vm_dbg(const struct drm_device *dev, 272 const char *format, ...) 273 { /* noop */ } 274 #endif 275 #endif 276 277 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm); 278 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap); 279 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p); 280 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap); 281