xref: /linux/drivers/gpu/drm/xe/xe_userptr.h (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #ifndef _XE_USERPTR_H_
7 #define _XE_USERPTR_H_
8 
9 #include <linux/list.h>
10 #include <linux/mutex.h>
11 #include <linux/notifier.h>
12 #include <linux/scatterlist.h>
13 #include <linux/spinlock.h>
14 
15 #include <drm/drm_gpusvm.h>
16 
17 #include "xe_tlb_inval_types.h"
18 
19 struct xe_vm;
20 struct xe_vma;
21 struct xe_userptr_vma;
22 
23 /** struct xe_userptr_vm - User pointer VM level state */
24 struct xe_userptr_vm {
25 	/**
26 	 * @userptr.repin_list: list of VMAs which are user pointers,
27 	 * and needs repinning. Protected by @lock.
28 	 */
29 	struct list_head repin_list;
30 	/**
31 	 * @userptr.invalidated_lock: Protects the
32 	 * @userptr.invalidated list.
33 	 */
34 	spinlock_t invalidated_lock;
35 	/**
36 	 * @userptr.invalidated: List of invalidated userptrs, not yet
37 	 * picked
38 	 * up for revalidation. Protected from access with the
39 	 * @invalidated_lock. Removing items from the list
40 	 * additionally requires @lock in write mode, and adding
41 	 * items to the list requires either the @svm.gpusvm.notifier_lock in
42 	 * write mode, OR @lock in write mode.
43 	 */
44 	struct list_head invalidated;
45 };
46 
47 /** struct xe_userptr - User pointer */
48 struct xe_userptr {
49 	/** @invalidate_link: Link for the vm::userptr.invalidated list */
50 	struct list_head invalidate_link;
51 	/** @userptr: link into VM repin list if userptr. */
52 	struct list_head repin_link;
53 	/**
54 	 * @pages: gpusvm pages for this user pointer.
55 	 */
56 	struct drm_gpusvm_pages pages;
57 	/**
58 	 * @notifier: MMU notifier for user pointer (invalidation call back)
59 	 */
60 	struct mmu_interval_notifier notifier;
61 	/**
62 	 * @finish: MMU notifier finish structure for two-pass invalidation.
63 	 * Embedded here to avoid allocation in the notifier callback.
64 	 * Protected by struct xe_vm::svm.gpusvm.notifier_lock in write mode
65 	 * alternatively by the same lock in read mode *and* the vm resv held.
66 	 */
67 	struct mmu_interval_notifier_finish finish;
68 	/**
69 	 * @inval_batch: TLB invalidation batch for deferred completion.
70 	 * Stores an in-flight TLB invalidation submitted during a two-pass
71 	 * notifier so the wait can be deferred to a subsequent pass, allowing
72 	 * multiple GPUs to be signalled before any of them are waited on.
73 	 * Protected using the same locking as @finish.
74 	 */
75 	struct xe_tlb_inval_batch inval_batch;
76 	/**
77 	 * @finish_inuse: Whether @finish is currently in use by an in-progress
78 	 * two-pass invalidation.
79 	 * Protected using the same locking as @finish.
80 	 */
81 	bool finish_inuse;
82 	/**
83 	 * @tlb_inval_submitted: Whether a TLB invalidation has been submitted
84 	 * via @inval_batch and is pending completion.  When set, the next pass
85 	 * must call xe_tlb_inval_batch_wait() before reusing @inval_batch.
86 	 * Protected using the same locking as @finish.
87 	 */
88 	bool tlb_inval_submitted;
89 	/**
90 	 * @initial_bind: user pointer has been bound at least once.
91 	 * write: vm->svm.gpusvm.notifier_lock in read mode and vm->resv held.
92 	 * read: vm->svm.gpusvm.notifier_lock in write mode or vm->resv held.
93 	 */
94 	bool initial_bind;
95 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
96 	u32 divisor;
97 #endif
98 };
99 
100 #if IS_ENABLED(CONFIG_DRM_GPUSVM)
101 void xe_userptr_remove(struct xe_userptr_vma *uvma);
102 int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start,
103 		     unsigned long range);
104 void xe_userptr_destroy(struct xe_userptr_vma *uvma);
105 
106 int xe_vm_userptr_pin(struct xe_vm *vm);
107 int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
108 int xe_vm_userptr_check_repin(struct xe_vm *vm);
109 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
110 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
111 #else
112 static inline void xe_userptr_remove(struct xe_userptr_vma *uvma) {}
113 
114 static inline int xe_userptr_setup(struct xe_userptr_vma *uvma,
115 				   unsigned long start, unsigned long range)
116 {
117 	return -ENODEV;
118 }
119 
120 static inline void xe_userptr_destroy(struct xe_userptr_vma *uvma) {}
121 
122 static inline int xe_vm_userptr_pin(struct xe_vm *vm) { return 0; }
123 static inline int __xe_vm_userptr_needs_repin(struct xe_vm *vm) { return 0; }
124 static inline int xe_vm_userptr_check_repin(struct xe_vm *vm) { return 0; }
125 static inline int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) { return -ENODEV; }
126 static inline int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma) { return -ENODEV; };
127 #endif
128 
129 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
130 void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
131 #else
132 static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
133 {
134 }
135 #endif
136 #endif
137