1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #ifndef _XE_VM_H_ 7 #define _XE_VM_H_ 8 9 #include "xe_assert.h" 10 #include "xe_bo_types.h" 11 #include "xe_macros.h" 12 #include "xe_map.h" 13 #include "xe_vm_types.h" 14 15 struct drm_device; 16 struct drm_printer; 17 struct drm_file; 18 19 struct ttm_buffer_object; 20 21 struct dma_fence; 22 23 struct xe_exec_queue; 24 struct xe_file; 25 struct xe_sync_entry; 26 struct xe_svm_range; 27 struct drm_exec; 28 29 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef); 30 31 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id); 32 int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node); 33 34 static inline struct xe_vm *xe_vm_get(struct xe_vm *vm) 35 { 36 drm_gpuvm_get(&vm->gpuvm); 37 return vm; 38 } 39 40 static inline void xe_vm_put(struct xe_vm *vm) 41 { 42 drm_gpuvm_put(&vm->gpuvm); 43 } 44 45 int xe_vm_lock(struct xe_vm *vm, bool intr); 46 47 void xe_vm_unlock(struct xe_vm *vm); 48 49 static inline bool xe_vm_is_closed(struct xe_vm *vm) 50 { 51 /* Only guaranteed not to change when vm->lock is held */ 52 return !vm->size; 53 } 54 55 static inline bool xe_vm_is_banned(struct xe_vm *vm) 56 { 57 return vm->flags & XE_VM_FLAG_BANNED; 58 } 59 60 static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm) 61 { 62 lockdep_assert_held(&vm->lock); 63 return xe_vm_is_closed(vm) || xe_vm_is_banned(vm); 64 } 65 66 struct xe_vma * 67 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range); 68 69 /** 70 * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs 71 * @vm: The vm 72 * 73 * Return: whether the vm populates unmapped areas with scratch PTEs 74 */ 75 static inline bool xe_vm_has_scratch(const struct xe_vm *vm) 76 { 77 return vm->flags & XE_VM_FLAG_SCRATCH_PAGE; 78 } 79 80 /** 81 * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer 82 * @gpuvm: The struct drm_gpuvm pointer 83 * 84 * Return: Pointer to the embedding struct xe_vm. 85 */ 86 static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm) 87 { 88 return container_of(gpuvm, struct xe_vm, gpuvm); 89 } 90 91 static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva) 92 { 93 return gpuvm_to_vm(gpuva->vm); 94 } 95 96 static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva) 97 { 98 return container_of(gpuva, struct xe_vma, gpuva); 99 } 100 101 static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op) 102 { 103 return container_of(op, struct xe_vma_op, base); 104 } 105 106 /** 107 * DOC: Provide accessors for vma members to facilitate easy change of 108 * implementation. 109 */ 110 static inline u64 xe_vma_start(struct xe_vma *vma) 111 { 112 return vma->gpuva.va.addr; 113 } 114 115 static inline u64 xe_vma_size(struct xe_vma *vma) 116 { 117 return vma->gpuva.va.range; 118 } 119 120 static inline u64 xe_vma_end(struct xe_vma *vma) 121 { 122 return xe_vma_start(vma) + xe_vma_size(vma); 123 } 124 125 static inline u64 xe_vma_bo_offset(struct xe_vma *vma) 126 { 127 return vma->gpuva.gem.offset; 128 } 129 130 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma) 131 { 132 return !vma->gpuva.gem.obj ? NULL : 133 container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base); 134 } 135 136 static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma) 137 { 138 return container_of(vma->gpuva.vm, struct xe_vm, gpuvm); 139 } 140 141 static inline bool xe_vma_read_only(struct xe_vma *vma) 142 { 143 return vma->gpuva.flags & XE_VMA_READ_ONLY; 144 } 145 146 static inline u64 xe_vma_userptr(struct xe_vma *vma) 147 { 148 return vma->gpuva.gem.offset; 149 } 150 151 static inline bool xe_vma_is_null(struct xe_vma *vma) 152 { 153 return vma->gpuva.flags & DRM_GPUVA_SPARSE; 154 } 155 156 static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma) 157 { 158 return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR; 159 } 160 161 static inline bool xe_vma_has_no_bo(struct xe_vma *vma) 162 { 163 return !xe_vma_bo(vma); 164 } 165 166 static inline bool xe_vma_is_userptr(struct xe_vma *vma) 167 { 168 return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) && 169 !xe_vma_is_cpu_addr_mirror(vma); 170 } 171 172 struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr); 173 174 /** 175 * to_userptr_vma() - Return a pointer to an embedding userptr vma 176 * @vma: Pointer to the embedded struct xe_vma 177 * 178 * Return: Pointer to the embedding userptr vma 179 */ 180 static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma) 181 { 182 xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma)); 183 return container_of(vma, struct xe_userptr_vma, vma); 184 } 185 186 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile); 187 188 int xe_vm_create_ioctl(struct drm_device *dev, void *data, 189 struct drm_file *file); 190 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data, 191 struct drm_file *file); 192 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, 193 struct drm_file *file); 194 195 void xe_vm_close_and_put(struct xe_vm *vm); 196 197 static inline bool xe_vm_in_fault_mode(struct xe_vm *vm) 198 { 199 return vm->flags & XE_VM_FLAG_FAULT_MODE; 200 } 201 202 static inline bool xe_vm_in_lr_mode(struct xe_vm *vm) 203 { 204 return vm->flags & XE_VM_FLAG_LR_MODE; 205 } 206 207 static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm) 208 { 209 return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm); 210 } 211 212 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q); 213 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q); 214 215 int xe_vm_userptr_pin(struct xe_vm *vm); 216 217 int __xe_vm_userptr_needs_repin(struct xe_vm *vm); 218 219 int xe_vm_userptr_check_repin(struct xe_vm *vm); 220 221 int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker); 222 struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, 223 u8 tile_mask); 224 struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm, 225 struct xe_vma *vma, 226 struct xe_svm_range *range, 227 u8 tile_mask); 228 struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm, 229 struct xe_svm_range *range); 230 231 int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start, 232 u64 end, u8 tile_mask); 233 234 int xe_vm_invalidate_vma(struct xe_vma *vma); 235 236 int xe_vm_validate_protected(struct xe_vm *vm); 237 238 static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) 239 { 240 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); 241 queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work); 242 } 243 244 /** 245 * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute 246 * vms. 247 * @vm: The vm. 248 * 249 * If the rebind functionality on a compute vm was disabled due 250 * to nothing to execute. Reactivate it and run the rebind worker. 251 * This function should be called after submitting a batch to a compute vm. 252 */ 253 static inline void xe_vm_reactivate_rebind(struct xe_vm *vm) 254 { 255 if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) { 256 vm->preempt.rebind_deactivated = false; 257 xe_vm_queue_rebind_worker(vm); 258 } 259 } 260 261 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma); 262 263 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma); 264 265 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end); 266 267 int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma); 268 269 int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec, 270 unsigned int num_fences); 271 272 struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo, 273 struct xe_exec_queue *q, u64 addr, 274 enum xe_cache_level cache_lvl); 275 276 void xe_vm_resume_rebind_worker(struct xe_vm *vm); 277 278 /** 279 * xe_vm_resv() - Return's the vm's reservation object 280 * @vm: The vm 281 * 282 * Return: Pointer to the vm's reservation object. 283 */ 284 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm) 285 { 286 return drm_gpuvm_resv(&vm->gpuvm); 287 } 288 289 void xe_vm_kill(struct xe_vm *vm, bool unlocked); 290 291 /** 292 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held. 293 * @vm: The vm 294 */ 295 #define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm)) 296 297 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) 298 #define vm_dbg drm_dbg 299 #else 300 __printf(2, 3) 301 static inline void vm_dbg(const struct drm_device *dev, 302 const char *format, ...) 303 { /* noop */ } 304 #endif 305 306 struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm); 307 void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap); 308 void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p); 309 void xe_vm_snapshot_free(struct xe_vm_snapshot *snap); 310 311 /** 312 * xe_vm_set_validating() - Register this task as currently making bos resident 313 * @allow_res_evict: Allow eviction of buffer objects bound to @vm when 314 * validating. 315 * @vm: Pointer to the vm or NULL. 316 * 317 * Register this task as currently making bos resident for the vm. Intended 318 * to avoid eviction by the same task of shared bos bound to the vm. 319 * Call with the vm's resv lock held. 320 */ 321 static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict) 322 { 323 if (vm && !allow_res_evict) { 324 xe_vm_assert_held(vm); 325 /* Pairs with READ_ONCE in xe_vm_is_validating() */ 326 WRITE_ONCE(vm->validating, current); 327 } 328 } 329 330 /** 331 * xe_vm_clear_validating() - Unregister this task as currently making bos resident 332 * @vm: Pointer to the vm or NULL 333 * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same 334 * value as for xe_vm_set_validation(). 335 * 336 * Register this task as currently making bos resident for the vm. Intended 337 * to avoid eviction by the same task of shared bos bound to the vm. 338 * Call with the vm's resv lock held. 339 */ 340 static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict) 341 { 342 if (vm && !allow_res_evict) { 343 /* Pairs with READ_ONCE in xe_vm_is_validating() */ 344 WRITE_ONCE(vm->validating, NULL); 345 } 346 } 347 348 /** 349 * xe_vm_is_validating() - Whether bos bound to the vm are currently being made resident 350 * by the current task. 351 * @vm: Pointer to the vm. 352 * 353 * If this function returns %true, we should be in a vm resv locked region, since 354 * the current process is the same task that called xe_vm_set_validating(). 355 * The function asserts that that's indeed the case. 356 * 357 * Return: %true if the task is currently making bos resident, %false otherwise. 358 */ 359 static inline bool xe_vm_is_validating(struct xe_vm *vm) 360 { 361 /* Pairs with WRITE_ONCE in xe_vm_is_validating() */ 362 if (READ_ONCE(vm->validating) == current) { 363 xe_vm_assert_held(vm); 364 return true; 365 } 366 return false; 367 } 368 369 /** 370 * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has 371 * a valid GPU mapping 372 * @tile: The tile which the GPU mapping belongs to 373 * @tile_present: Tile present mask 374 * @tile_invalidated: Tile invalidated mask 375 * 376 * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths 377 * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable 378 * without the notifier lock in userptr or SVM cases, and not reliable without 379 * the BO dma-resv lock in the BO case. As such, they should only be used in 380 * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB 381 * invalidation) where it is harmless. 382 * 383 * Return: True is there are valid GPU pages, False otherwise 384 */ 385 #define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \ 386 ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id)) 387 388 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) 389 void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma); 390 #else 391 static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma) 392 { 393 } 394 #endif 395 #endif 396