xref: /linux/drivers/gpu/drm/xe/xe_svm.h (revision 2de3f38fbf89d3cb96d1237aa7a10c0f6480f450)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8 
9 #include <drm/drm_pagemap.h>
10 #include <drm/drm_gpusvm.h>
11 
12 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
13 
14 struct xe_bo;
15 struct xe_gt;
16 struct xe_tile;
17 struct xe_vm;
18 struct xe_vma;
19 struct xe_vram_region;
20 
21 /** struct xe_svm_range - SVM range */
22 struct xe_svm_range {
23 	/** @base: base drm_gpusvm_range */
24 	struct drm_gpusvm_range base;
25 	/**
26 	 * @garbage_collector_link: Link into VM's garbage collect SVM range
27 	 * list. Protected by VM's garbage collect lock.
28 	 */
29 	struct list_head garbage_collector_link;
30 	/**
31 	 * @tile_present: Tile mask of binding is present for this range.
32 	 * Protected by GPU SVM notifier lock.
33 	 */
34 	u8 tile_present;
35 	/**
36 	 * @tile_invalidated: Tile mask of binding is invalidated for this
37 	 * range. Protected by GPU SVM notifier lock.
38 	 */
39 	u8 tile_invalidated;
40 	/**
41 	 * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
42 	 * locking.
43 	 */
44 	u8 skip_migrate	:1;
45 };
46 
47 #if IS_ENABLED(CONFIG_DRM_GPUSVM)
48 /**
49  * xe_svm_range_pages_valid() - SVM range pages valid
50  * @range: SVM range
51  *
52  * Return: True if SVM range pages are valid, False otherwise
53  */
54 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
55 {
56 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
57 }
58 
59 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
60 
61 int xe_svm_init(struct xe_vm *vm);
62 
63 void xe_svm_fini(struct xe_vm *vm);
64 
65 void xe_svm_close(struct xe_vm *vm);
66 
67 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
68 			    struct xe_gt *gt, u64 fault_addr,
69 			    bool atomic);
70 
71 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
72 
73 int xe_svm_bo_evict(struct xe_bo *bo);
74 
75 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
76 #else
77 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
78 {
79 	return false;
80 }
81 
82 static inline
83 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
84 {
85 	return 0;
86 }
87 
88 static inline
89 int xe_svm_init(struct xe_vm *vm)
90 {
91 	return 0;
92 }
93 
94 static inline
95 void xe_svm_fini(struct xe_vm *vm)
96 {
97 }
98 
99 static inline
100 void xe_svm_close(struct xe_vm *vm)
101 {
102 }
103 
104 static inline
105 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
106 			    struct xe_gt *gt, u64 fault_addr,
107 			    bool atomic)
108 {
109 	return 0;
110 }
111 
112 static inline
113 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
114 {
115 	return false;
116 }
117 
118 static inline
119 int xe_svm_bo_evict(struct xe_bo *bo)
120 {
121 	return 0;
122 }
123 
124 static inline
125 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
126 {
127 }
128 #endif
129 
130 /**
131  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
132  * @range: SVM range
133  *
134  * Return: True if SVM range has a DMA mapping, False otherwise
135  */
136 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
137 {
138 	lockdep_assert_held(&range->base.gpusvm->notifier_lock);
139 	return range->base.flags.has_dma_mapping;
140 }
141 
142 #define xe_svm_assert_in_notifier(vm__) \
143 	lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
144 
145 #define xe_svm_notifier_lock(vm__)	\
146 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
147 
148 #define xe_svm_notifier_unlock(vm__)	\
149 	drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
150 
151 #endif
152