xref: /linux/tools/testing/vma/include/stubs.h (revision a1f0dacaaba14c7f949f5c6ab876944034620904)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 
3 #pragma once
4 
5 /*
6  * Contains declarations that are STUBBED, that is that are rendered no-ops, in
7  * order to faciliate userland VMA testing.
8  */
9 
10 /* Forward declarations. */
11 struct mm_struct;
12 struct vm_area_struct;
13 struct vm_area_desc;
14 struct pagetable_move_control;
15 struct mmap_action;
16 struct file;
17 struct anon_vma;
18 struct anon_vma_chain;
19 struct address_space;
20 struct unmap_desc;
21 
22 #define __bitwise
23 #define __randomize_layout
24 
25 #define FIRST_USER_ADDRESS	0UL
26 #define USER_PGTABLES_CEILING	0UL
27 
28 #define vma_policy(vma) NULL
29 
30 #define down_write_nest_lock(sem, nest_lock)
31 
32 #define data_race(expr) expr
33 
34 #define ASSERT_EXCLUSIVE_WRITER(x)
35 
36 struct vm_userfaultfd_ctx {};
37 struct mempolicy {};
38 struct mmu_gather {};
39 struct mutex {};
40 struct vm_fault {};
41 
42 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
43 					      struct list_head *uf)
44 {
45 }
46 
47 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
48 {
49 	return 0;
50 }
51 
52 static inline void free_pgd_range(struct mmu_gather *tlb,
53 			unsigned long addr, unsigned long end,
54 			unsigned long floor, unsigned long ceiling)
55 {
56 }
57 
58 static inline int ksm_execve(struct mm_struct *mm)
59 {
60 	return 0;
61 }
62 
63 static inline void ksm_exit(struct mm_struct *mm)
64 {
65 }
66 
67 static inline void vma_numab_state_init(struct vm_area_struct *vma)
68 {
69 }
70 
71 static inline void vma_numab_state_free(struct vm_area_struct *vma)
72 {
73 }
74 
75 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
76 				     struct vm_area_struct *new_vma)
77 {
78 }
79 
80 static inline void free_anon_vma_name(struct vm_area_struct *vma)
81 {
82 }
83 
84 static inline void mmap_action_prepare(struct mmap_action *action,
85 					   struct vm_area_desc *desc)
86 {
87 }
88 
89 static inline int mmap_action_complete(struct mmap_action *action,
90 					   struct vm_area_struct *vma)
91 {
92 	return 0;
93 }
94 
95 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
96 {
97 }
98 
99 static inline bool shmem_file(struct file *file)
100 {
101 	return false;
102 }
103 
104 static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm,
105 		const struct file *file, vm_flags_t vm_flags)
106 {
107 	return vm_flags;
108 }
109 
110 static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
111 {
112 }
113 
114 static inline int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
115 		unsigned long pfn, unsigned long size, pgprot_t pgprot)
116 {
117 	return 0;
118 }
119 
120 static inline int do_munmap(struct mm_struct *, unsigned long, size_t,
121 		struct list_head *uf)
122 {
123 	return 0;
124 }
125 
126 /* Currently stubbed but we may later wish to un-stub. */
127 static inline void vm_acct_memory(long pages);
128 
129 static inline void mmap_assert_locked(struct mm_struct *mm)
130 {
131 }
132 
133 
134 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
135 {
136 }
137 
138 static inline void i_mmap_unlock_write(struct address_space *mapping)
139 {
140 }
141 
142 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
143 					 unsigned long start,
144 					 unsigned long end,
145 					 struct list_head *unmaps)
146 {
147 	return 0;
148 }
149 
150 static inline void mmap_write_downgrade(struct mm_struct *mm)
151 {
152 }
153 
154 static inline void mmap_read_unlock(struct mm_struct *mm)
155 {
156 }
157 
158 static inline void mmap_write_unlock(struct mm_struct *mm)
159 {
160 }
161 
162 static inline int mmap_write_lock_killable(struct mm_struct *mm)
163 {
164 	return 0;
165 }
166 
167 static inline bool can_modify_mm(struct mm_struct *mm,
168 				 unsigned long start,
169 				 unsigned long end)
170 {
171 	return true;
172 }
173 
174 static inline void arch_unmap(struct mm_struct *mm,
175 				 unsigned long start,
176 				 unsigned long end)
177 {
178 }
179 
180 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
181 {
182 	return true;
183 }
184 
185 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
186 			  vm_flags_t vm_flags)
187 {
188 }
189 
190 static inline bool mapping_can_writeback(struct address_space *mapping)
191 {
192 	return true;
193 }
194 
195 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
196 {
197 	return false;
198 }
199 
200 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
201 {
202 	return false;
203 }
204 
205 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
206 {
207 	return false;
208 }
209 
210 static inline void mmap_assert_write_locked(struct mm_struct *mm)
211 {
212 }
213 
214 static inline void mutex_lock(struct mutex *lock)
215 {
216 }
217 
218 static inline void mutex_unlock(struct mutex *lock)
219 {
220 }
221 
222 static inline bool mutex_is_locked(struct mutex *lock)
223 {
224 	return true;
225 }
226 
227 static inline bool signal_pending(void *p)
228 {
229 	return false;
230 }
231 
232 static inline bool is_file_hugepages(struct file *file)
233 {
234 	return false;
235 }
236 
237 static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
238 {
239 	return 0;
240 }
241 
242 static inline bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags,
243 				 unsigned long npages)
244 {
245 	return true;
246 }
247 
248 static inline int shmem_zero_setup(struct vm_area_struct *vma)
249 {
250 	return 0;
251 }
252 
253 
254 static inline void vm_acct_memory(long pages)
255 {
256 }
257 
258 static inline void vma_interval_tree_insert(struct vm_area_struct *vma,
259 					    struct rb_root_cached *rb)
260 {
261 }
262 
263 static inline void vma_interval_tree_remove(struct vm_area_struct *vma,
264 					    struct rb_root_cached *rb)
265 {
266 }
267 
268 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
269 {
270 }
271 
272 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *avc,
273 						 struct rb_root_cached *rb)
274 {
275 }
276 
277 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *avc,
278 						 struct rb_root_cached *rb)
279 {
280 }
281 
282 static inline void uprobe_mmap(struct vm_area_struct *vma)
283 {
284 }
285 
286 static inline void uprobe_munmap(struct vm_area_struct *vma,
287 				 unsigned long start, unsigned long end)
288 {
289 }
290 
291 static inline void i_mmap_lock_write(struct address_space *mapping)
292 {
293 }
294 
295 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
296 {
297 }
298 
299 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
300 {
301 }
302 
303 static inline void ksm_add_vma(struct vm_area_struct *vma)
304 {
305 }
306 
307 static inline void perf_event_mmap(struct vm_area_struct *vma)
308 {
309 }
310 
311 static inline bool vma_is_dax(struct vm_area_struct *vma)
312 {
313 	return false;
314 }
315 
316 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
317 {
318 	return NULL;
319 }
320 
321 static inline bool arch_validate_flags(vm_flags_t flags)
322 {
323 	return true;
324 }
325 
326 static inline void vma_close(struct vm_area_struct *vma)
327 {
328 }
329 
330 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
331 {
332 	return 0;
333 }
334 
335 static inline int is_hugepage_only_range(struct mm_struct *mm,
336 					unsigned long addr, unsigned long len)
337 {
338 	return 0;
339 }
340 
341 static inline bool capable(int cap)
342 {
343 	return true;
344 }
345 
346 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
347 {
348 	return NULL;
349 }
350 
351 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
352 					struct vm_userfaultfd_ctx vm_ctx)
353 {
354 	return true;
355 }
356 
357 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
358 				    struct anon_vma_name *anon_name2)
359 {
360 	return true;
361 }
362 
363 static inline void might_sleep(void)
364 {
365 }
366 
367 static inline void fput(struct file *file)
368 {
369 }
370 
371 static inline void mpol_put(struct mempolicy *pol)
372 {
373 }
374 
375 static inline void lru_add_drain(void)
376 {
377 }
378 
379 static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
380 {
381 }
382 
383 static inline void update_hiwater_rss(struct mm_struct *mm)
384 {
385 }
386 
387 static inline void update_hiwater_vm(struct mm_struct *mm)
388 {
389 }
390 
391 static inline void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
392 {
393 }
394 
395 static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
396 {
397 }
398 
399 static inline void mapping_unmap_writable(struct address_space *mapping)
400 {
401 }
402 
403 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
404 {
405 }
406 
407 static inline void tlb_finish_mmu(struct mmu_gather *tlb)
408 {
409 }
410 
411 static inline struct file *get_file(struct file *f)
412 {
413 	return f;
414 }
415 
416 static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
417 {
418 	return 0;
419 }
420 
421 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
422 					 unsigned long start,
423 					 unsigned long end,
424 					 struct vm_area_struct *next)
425 {
426 }
427 
428 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
429