xref: /linux/drivers/gpu/drm/xe/xe_svm.h (revision 17e548405a81665fd14cee960db7d093d1396400)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8 
9 #include <drm/drm_pagemap.h>
10 #include <drm/drm_gpusvm.h>
11 
12 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
13 
14 struct xe_bo;
15 struct xe_vram_region;
16 struct xe_tile;
17 struct xe_vm;
18 struct xe_vma;
19 
20 /** struct xe_svm_range - SVM range */
21 struct xe_svm_range {
22 	/** @base: base drm_gpusvm_range */
23 	struct drm_gpusvm_range base;
24 	/**
25 	 * @garbage_collector_link: Link into VM's garbage collect SVM range
26 	 * list. Protected by VM's garbage collect lock.
27 	 */
28 	struct list_head garbage_collector_link;
29 	/**
30 	 * @tile_present: Tile mask of binding is present for this range.
31 	 * Protected by GPU SVM notifier lock.
32 	 */
33 	u8 tile_present;
34 	/**
35 	 * @tile_invalidated: Tile mask of binding is invalidated for this
36 	 * range. Protected by GPU SVM notifier lock.
37 	 */
38 	u8 tile_invalidated;
39 	/**
40 	 * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
41 	 * locking.
42 	 */
43 	u8 skip_migrate	:1;
44 };
45 
46 #if IS_ENABLED(CONFIG_DRM_GPUSVM)
47 /**
48  * xe_svm_range_pages_valid() - SVM range pages valid
49  * @range: SVM range
50  *
51  * Return: True if SVM range pages are valid, False otherwise
52  */
53 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
54 {
55 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
56 }
57 
58 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
59 
60 int xe_svm_init(struct xe_vm *vm);
61 
62 void xe_svm_fini(struct xe_vm *vm);
63 
64 void xe_svm_close(struct xe_vm *vm);
65 
66 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
67 			    struct xe_tile *tile, u64 fault_addr,
68 			    bool atomic);
69 
70 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
71 
72 int xe_svm_bo_evict(struct xe_bo *bo);
73 
74 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
75 #else
76 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
77 {
78 	return false;
79 }
80 
81 static inline
82 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
83 {
84 	return 0;
85 }
86 
87 static inline
88 int xe_svm_init(struct xe_vm *vm)
89 {
90 	return 0;
91 }
92 
93 static inline
94 void xe_svm_fini(struct xe_vm *vm)
95 {
96 }
97 
98 static inline
99 void xe_svm_close(struct xe_vm *vm)
100 {
101 }
102 
103 static inline
104 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
105 			    struct xe_tile *tile, u64 fault_addr,
106 			    bool atomic)
107 {
108 	return 0;
109 }
110 
111 static inline
112 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
113 {
114 	return false;
115 }
116 
117 static inline
118 int xe_svm_bo_evict(struct xe_bo *bo)
119 {
120 	return 0;
121 }
122 
123 static inline
124 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
125 {
126 }
127 #endif
128 
129 /**
130  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
131  * @range: SVM range
132  *
133  * Return: True if SVM range has a DMA mapping, False otherwise
134  */
135 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
136 {
137 	lockdep_assert_held(&range->base.gpusvm->notifier_lock);
138 	return range->base.flags.has_dma_mapping;
139 }
140 
141 #define xe_svm_assert_in_notifier(vm__) \
142 	lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
143 
144 #define xe_svm_notifier_lock(vm__)	\
145 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
146 
147 #define xe_svm_notifier_unlock(vm__)	\
148 	drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
149 
150 #endif
151