xref: /linux/drivers/gpu/drm/xe/xe_svm.h (revision 85502b2214d50ba0ddf2a5fb454e4d28a160d175)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8 
9 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
10 
11 #include <drm/drm_pagemap.h>
12 #include <drm/drm_gpusvm.h>
13 
14 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
15 
16 struct xe_bo;
17 struct xe_gt;
18 struct xe_tile;
19 struct xe_vm;
20 struct xe_vma;
21 struct xe_vram_region;
22 
23 /** struct xe_svm_range - SVM range */
24 struct xe_svm_range {
25 	/** @base: base drm_gpusvm_range */
26 	struct drm_gpusvm_range base;
27 	/**
28 	 * @garbage_collector_link: Link into VM's garbage collect SVM range
29 	 * list. Protected by VM's garbage collect lock.
30 	 */
31 	struct list_head garbage_collector_link;
32 	/**
33 	 * @tile_present: Tile mask of binding is present for this range.
34 	 * Protected by GPU SVM notifier lock.
35 	 */
36 	u8 tile_present;
37 	/**
38 	 * @tile_invalidated: Tile mask of binding is invalidated for this
39 	 * range. Protected by GPU SVM notifier lock.
40 	 */
41 	u8 tile_invalidated;
42 };
43 
44 /**
45  * xe_svm_range_pages_valid() - SVM range pages valid
46  * @range: SVM range
47  *
48  * Return: True if SVM range pages are valid, False otherwise
49  */
xe_svm_range_pages_valid(struct xe_svm_range * range)50 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
51 {
52 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
53 }
54 
55 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
56 
57 int xe_svm_init(struct xe_vm *vm);
58 
59 void xe_svm_fini(struct xe_vm *vm);
60 
61 void xe_svm_close(struct xe_vm *vm);
62 
63 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
64 			    struct xe_gt *gt, u64 fault_addr,
65 			    bool atomic);
66 
67 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
68 
69 int xe_svm_bo_evict(struct xe_bo *bo);
70 
71 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
72 
73 /**
74  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
75  * @range: SVM range
76  *
77  * Return: True if SVM range has a DMA mapping, False otherwise
78  */
xe_svm_range_has_dma_mapping(struct xe_svm_range * range)79 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
80 {
81 	lockdep_assert_held(&range->base.gpusvm->notifier_lock);
82 	return range->base.flags.has_dma_mapping;
83 }
84 
85 #define xe_svm_assert_in_notifier(vm__) \
86 	lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
87 
88 #define xe_svm_notifier_lock(vm__)	\
89 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
90 
91 #define xe_svm_notifier_unlock(vm__)	\
92 	drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
93 
94 void xe_svm_flush(struct xe_vm *vm);
95 
96 #else
97 #include <linux/interval_tree.h>
98 
99 struct drm_pagemap_device_addr;
100 struct xe_bo;
101 struct xe_gt;
102 struct xe_vm;
103 struct xe_vma;
104 struct xe_tile;
105 struct xe_vram_region;
106 
107 #define XE_INTERCONNECT_VRAM 1
108 
109 struct xe_svm_range {
110 	struct {
111 		struct interval_tree_node itree;
112 		const struct drm_pagemap_device_addr *dma_addr;
113 	} base;
114 	u32 tile_present;
115 	u32 tile_invalidated;
116 };
117 
xe_svm_range_pages_valid(struct xe_svm_range * range)118 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
119 {
120 	return false;
121 }
122 
123 static inline
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)124 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
125 {
126 	return 0;
127 }
128 
129 static inline
xe_svm_init(struct xe_vm * vm)130 int xe_svm_init(struct xe_vm *vm)
131 {
132 	return 0;
133 }
134 
135 static inline
xe_svm_fini(struct xe_vm * vm)136 void xe_svm_fini(struct xe_vm *vm)
137 {
138 }
139 
140 static inline
xe_svm_close(struct xe_vm * vm)141 void xe_svm_close(struct xe_vm *vm)
142 {
143 }
144 
145 static inline
xe_svm_handle_pagefault(struct xe_vm * vm,struct xe_vma * vma,struct xe_gt * gt,u64 fault_addr,bool atomic)146 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
147 			    struct xe_gt *gt, u64 fault_addr,
148 			    bool atomic)
149 {
150 	return 0;
151 }
152 
153 static inline
xe_svm_has_mapping(struct xe_vm * vm,u64 start,u64 end)154 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
155 {
156 	return false;
157 }
158 
159 static inline
xe_svm_bo_evict(struct xe_bo * bo)160 int xe_svm_bo_evict(struct xe_bo *bo)
161 {
162 	return 0;
163 }
164 
165 static inline
xe_svm_range_debug(struct xe_svm_range * range,const char * operation)166 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
167 {
168 }
169 
170 #define xe_svm_assert_in_notifier(...) do {} while (0)
171 #define xe_svm_range_has_dma_mapping(...) false
172 
xe_svm_notifier_lock(struct xe_vm * vm)173 static inline void xe_svm_notifier_lock(struct xe_vm *vm)
174 {
175 }
176 
xe_svm_notifier_unlock(struct xe_vm * vm)177 static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
178 {
179 }
180 
xe_svm_flush(struct xe_vm * vm)181 static inline void xe_svm_flush(struct xe_vm *vm)
182 {
183 }
184 #endif
185 #endif
186