xref: /linux/drivers/gpu/drm/xe/xe_svm.h (revision 28472374291c380c22f40deec07a90d09bcbffb6)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8 
9 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
10 
11 #include <drm/drm_pagemap.h>
12 #include <drm/drm_gpusvm.h>
13 
14 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
15 
16 struct xe_bo;
17 struct xe_gt;
18 struct xe_tile;
19 struct xe_vm;
20 struct xe_vma;
21 struct xe_vram_region;
22 
23 /** struct xe_svm_range - SVM range */
24 struct xe_svm_range {
25 	/** @base: base drm_gpusvm_range */
26 	struct drm_gpusvm_range base;
27 	/**
28 	 * @garbage_collector_link: Link into VM's garbage collect SVM range
29 	 * list. Protected by VM's garbage collect lock.
30 	 */
31 	struct list_head garbage_collector_link;
32 	/**
33 	 * @tile_present: Tile mask of binding is present for this range.
34 	 * Protected by GPU SVM notifier lock.
35 	 */
36 	u8 tile_present;
37 	/**
38 	 * @tile_invalidated: Tile mask of binding is invalidated for this
39 	 * range. Protected by GPU SVM notifier lock.
40 	 */
41 	u8 tile_invalidated;
42 	/**
43 	 * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
44 	 * locking.
45 	 */
46 	u8 skip_migrate	:1;
47 };
48 
49 /**
50  * xe_svm_range_pages_valid() - SVM range pages valid
51  * @range: SVM range
52  *
53  * Return: True if SVM range pages are valid, False otherwise
54  */
55 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
56 {
57 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
58 }
59 
60 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
61 
62 int xe_svm_init(struct xe_vm *vm);
63 
64 void xe_svm_fini(struct xe_vm *vm);
65 
66 void xe_svm_close(struct xe_vm *vm);
67 
68 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
69 			    struct xe_gt *gt, u64 fault_addr,
70 			    bool atomic);
71 
72 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
73 
74 int xe_svm_bo_evict(struct xe_bo *bo);
75 
76 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
77 
78 /**
79  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
80  * @range: SVM range
81  *
82  * Return: True if SVM range has a DMA mapping, False otherwise
83  */
84 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
85 {
86 	lockdep_assert_held(&range->base.gpusvm->notifier_lock);
87 	return range->base.flags.has_dma_mapping;
88 }
89 
90 #define xe_svm_assert_in_notifier(vm__) \
91 	lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
92 
93 #define xe_svm_notifier_lock(vm__)	\
94 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
95 
96 #define xe_svm_notifier_unlock(vm__)	\
97 	drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
98 
99 void xe_svm_flush(struct xe_vm *vm);
100 
101 #else
102 #include <linux/interval_tree.h>
103 
104 struct drm_pagemap_device_addr;
105 struct xe_bo;
106 struct xe_gt;
107 struct xe_vm;
108 struct xe_vma;
109 struct xe_tile;
110 struct xe_vram_region;
111 
112 #define XE_INTERCONNECT_VRAM 1
113 
114 struct xe_svm_range {
115 	struct {
116 		struct interval_tree_node itree;
117 		const struct drm_pagemap_device_addr *dma_addr;
118 	} base;
119 	u32 tile_present;
120 	u32 tile_invalidated;
121 };
122 
123 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
124 {
125 	return false;
126 }
127 
128 static inline
129 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
130 {
131 	return 0;
132 }
133 
134 static inline
135 int xe_svm_init(struct xe_vm *vm)
136 {
137 	return 0;
138 }
139 
140 static inline
141 void xe_svm_fini(struct xe_vm *vm)
142 {
143 }
144 
145 static inline
146 void xe_svm_close(struct xe_vm *vm)
147 {
148 }
149 
150 static inline
151 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
152 			    struct xe_gt *gt, u64 fault_addr,
153 			    bool atomic)
154 {
155 	return 0;
156 }
157 
158 static inline
159 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
160 {
161 	return false;
162 }
163 
164 static inline
165 int xe_svm_bo_evict(struct xe_bo *bo)
166 {
167 	return 0;
168 }
169 
170 static inline
171 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
172 {
173 }
174 
175 #define xe_svm_assert_in_notifier(...) do {} while (0)
176 #define xe_svm_range_has_dma_mapping(...) false
177 
178 static inline void xe_svm_notifier_lock(struct xe_vm *vm)
179 {
180 }
181 
182 static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
183 {
184 }
185 
186 static inline void xe_svm_flush(struct xe_vm *vm)
187 {
188 }
189 #endif
190 #endif
191