xref: /linux/drivers/gpu/drm/xe/xe_svm.h (revision a4871e6201c46c8e1d04308265b4b4c5753c8209)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8 
9 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
10 
11 #include <drm/drm_pagemap.h>
12 #include <drm/drm_gpusvm.h>
13 
14 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
15 
16 struct xe_bo;
17 struct xe_gt;
18 struct xe_tile;
19 struct xe_vm;
20 struct xe_vma;
21 struct xe_vram_region;
22 
23 /** struct xe_svm_range - SVM range */
24 struct xe_svm_range {
25 	/** @base: base drm_gpusvm_range */
26 	struct drm_gpusvm_range base;
27 	/**
28 	 * @garbage_collector_link: Link into VM's garbage collect SVM range
29 	 * list. Protected by VM's garbage collect lock.
30 	 */
31 	struct list_head garbage_collector_link;
32 	/**
33 	 * @tile_present: Tile mask of binding is present for this range.
34 	 * Protected by GPU SVM notifier lock.
35 	 */
36 	u8 tile_present;
37 	/**
38 	 * @tile_invalidated: Tile mask of binding is invalidated for this
39 	 * range. Protected by GPU SVM notifier lock.
40 	 */
41 	u8 tile_invalidated;
42 	/**
43 	 * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
44 	 * locking.
45 	 */
46 	u8 skip_migrate	:1;
47 };
48 
49 /**
50  * xe_svm_range_pages_valid() - SVM range pages valid
51  * @range: SVM range
52  *
53  * Return: True if SVM range pages are valid, False otherwise
54  */
55 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
56 {
57 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
58 }
59 
60 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
61 
62 int xe_svm_init(struct xe_vm *vm);
63 
64 void xe_svm_fini(struct xe_vm *vm);
65 
66 void xe_svm_close(struct xe_vm *vm);
67 
68 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
69 			    struct xe_gt *gt, u64 fault_addr,
70 			    bool atomic);
71 
72 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
73 
74 int xe_svm_bo_evict(struct xe_bo *bo);
75 
76 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
77 
78 /**
79  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
80  * @range: SVM range
81  *
82  * Return: True if SVM range has a DMA mapping, False otherwise
83  */
84 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
85 {
86 	lockdep_assert_held(&range->base.gpusvm->notifier_lock);
87 	return range->base.flags.has_dma_mapping;
88 }
89 
90 #define xe_svm_assert_in_notifier(vm__) \
91 	lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
92 
93 #define xe_svm_notifier_lock(vm__)	\
94 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
95 
96 #define xe_svm_notifier_unlock(vm__)	\
97 	drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
98 
99 #else
100 #include <linux/interval_tree.h>
101 
102 struct drm_pagemap_device_addr;
103 struct xe_bo;
104 struct xe_gt;
105 struct xe_vm;
106 struct xe_vma;
107 struct xe_tile;
108 struct xe_vram_region;
109 
110 #define XE_INTERCONNECT_VRAM 1
111 
112 struct xe_svm_range {
113 	struct {
114 		struct interval_tree_node itree;
115 		const struct drm_pagemap_device_addr *dma_addr;
116 	} base;
117 	u32 tile_present;
118 	u32 tile_invalidated;
119 };
120 
121 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
122 {
123 	return false;
124 }
125 
126 static inline
127 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
128 {
129 	return 0;
130 }
131 
132 static inline
133 int xe_svm_init(struct xe_vm *vm)
134 {
135 	return 0;
136 }
137 
138 static inline
139 void xe_svm_fini(struct xe_vm *vm)
140 {
141 }
142 
143 static inline
144 void xe_svm_close(struct xe_vm *vm)
145 {
146 }
147 
148 static inline
149 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
150 			    struct xe_gt *gt, u64 fault_addr,
151 			    bool atomic)
152 {
153 	return 0;
154 }
155 
156 static inline
157 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
158 {
159 	return false;
160 }
161 
162 static inline
163 int xe_svm_bo_evict(struct xe_bo *bo)
164 {
165 	return 0;
166 }
167 
168 static inline
169 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
170 {
171 }
172 
173 #define xe_svm_assert_in_notifier(...) do {} while (0)
174 #define xe_svm_range_has_dma_mapping(...) false
175 
176 static inline void xe_svm_notifier_lock(struct xe_vm *vm)
177 {
178 }
179 
180 static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
181 {
182 }
183 #endif
184 #endif
185