xref: /linux/tools/testing/vma/include/stubs.h (revision 3e4bb2706817710d9461394da8b75be79981586b)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 
3 #pragma once
4 
5 /*
6  * Contains declarations that are STUBBED, that is that are rendered no-ops, in
7  * order to faciliate userland VMA testing.
8  */
9 
10 /* Forward declarations. */
11 struct mm_struct;
12 struct vm_area_struct;
13 struct vm_area_desc;
14 struct pagetable_move_control;
15 struct mmap_action;
16 struct file;
17 struct anon_vma;
18 struct anon_vma_chain;
19 struct address_space;
20 struct unmap_desc;
21 
22 #define __bitwise
23 #define __randomize_layout
24 
25 #define FIRST_USER_ADDRESS	0UL
26 #define USER_PGTABLES_CEILING	0UL
27 
28 #define vma_policy(vma) NULL
29 
30 #define down_write_nest_lock(sem, nest_lock)
31 
32 #define data_race(expr) expr
33 
34 #define ASSERT_EXCLUSIVE_WRITER(x)
35 
36 struct vm_userfaultfd_ctx {};
37 struct mempolicy {};
38 struct mmu_gather {};
39 struct mutex {};
40 struct vm_fault {};
41 
42 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
43 					      struct list_head *uf)
44 {
45 }
46 
47 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
48 {
49 	return 0;
50 }
51 
52 static inline void free_pgd_range(struct mmu_gather *tlb,
53 			unsigned long addr, unsigned long end,
54 			unsigned long floor, unsigned long ceiling)
55 {
56 }
57 
58 static inline int ksm_execve(struct mm_struct *mm)
59 {
60 	return 0;
61 }
62 
63 static inline void ksm_exit(struct mm_struct *mm)
64 {
65 }
66 
67 static inline void vma_numab_state_init(struct vm_area_struct *vma)
68 {
69 }
70 
71 static inline void vma_numab_state_free(struct vm_area_struct *vma)
72 {
73 }
74 
75 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
76 				     struct vm_area_struct *new_vma)
77 {
78 }
79 
80 static inline void free_anon_vma_name(struct vm_area_struct *vma)
81 {
82 }
83 
84 static inline int mmap_action_prepare(struct vm_area_desc *desc)
85 {
86 	return 0;
87 }
88 
89 static inline int mmap_action_complete(struct vm_area_struct *vma,
90 				       struct mmap_action *action)
91 {
92 	return 0;
93 }
94 
95 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
96 {
97 }
98 
99 static inline bool shmem_file(struct file *file)
100 {
101 	return false;
102 }
103 
104 static inline vma_flags_t ksm_vma_flags(struct mm_struct *mm,
105 		const struct file *file, vma_flags_t vma_flags)
106 {
107 	return vma_flags;
108 }
109 
110 static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
111 {
112 }
113 
114 static inline int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
115 		unsigned long pfn, unsigned long size, pgprot_t pgprot)
116 {
117 	return 0;
118 }
119 
120 static inline int do_munmap(struct mm_struct *, unsigned long, size_t,
121 		struct list_head *uf)
122 {
123 	return 0;
124 }
125 
126 /* Currently stubbed but we may later wish to un-stub. */
127 static inline void vm_acct_memory(long pages);
128 
129 static inline void mmap_assert_locked(struct mm_struct *mm)
130 {
131 }
132 
133 
134 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
135 {
136 }
137 
138 static inline void i_mmap_unlock_write(struct address_space *mapping)
139 {
140 }
141 
142 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
143 					 unsigned long start,
144 					 unsigned long end,
145 					 struct list_head *unmaps)
146 {
147 	return 0;
148 }
149 
150 static inline void mmap_write_downgrade(struct mm_struct *mm)
151 {
152 }
153 
154 static inline void mmap_read_unlock(struct mm_struct *mm)
155 {
156 }
157 
158 static inline void mmap_write_unlock(struct mm_struct *mm)
159 {
160 }
161 
162 static inline int mmap_write_lock_killable(struct mm_struct *mm)
163 {
164 	return 0;
165 }
166 
167 static inline bool can_modify_mm(struct mm_struct *mm,
168 				 unsigned long start,
169 				 unsigned long end)
170 {
171 	return true;
172 }
173 
174 static inline void arch_unmap(struct mm_struct *mm,
175 				 unsigned long start,
176 				 unsigned long end)
177 {
178 }
179 
180 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
181 {
182 	return true;
183 }
184 
185 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
186 			  vm_flags_t vm_flags)
187 {
188 }
189 
190 static inline bool mapping_can_writeback(struct address_space *mapping)
191 {
192 	return true;
193 }
194 
195 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
196 {
197 	return false;
198 }
199 
200 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
201 {
202 	return false;
203 }
204 
205 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
206 {
207 	return false;
208 }
209 
210 static inline void mmap_assert_write_locked(struct mm_struct *mm)
211 {
212 }
213 
214 static inline void mutex_lock(struct mutex *lock)
215 {
216 }
217 
218 static inline void mutex_unlock(struct mutex *lock)
219 {
220 }
221 
222 static inline bool mutex_is_locked(struct mutex *lock)
223 {
224 	return true;
225 }
226 
227 static inline bool signal_pending(void *p)
228 {
229 	return false;
230 }
231 
232 static inline bool is_file_hugepages(const struct file *file)
233 {
234 	return false;
235 }
236 
237 static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
238 {
239 	return 0;
240 }
241 
242 static inline bool may_expand_vm(struct mm_struct *mm,
243 				 const vma_flags_t *vma_flags,
244 				 unsigned long npages)
245 {
246 	return true;
247 }
248 
249 static inline int shmem_zero_setup(struct vm_area_struct *vma)
250 {
251 	return 0;
252 }
253 
254 
255 static inline void vm_acct_memory(long pages)
256 {
257 }
258 
259 static inline void vma_interval_tree_insert(struct vm_area_struct *vma,
260 					    struct rb_root_cached *rb)
261 {
262 }
263 
264 static inline void vma_interval_tree_remove(struct vm_area_struct *vma,
265 					    struct rb_root_cached *rb)
266 {
267 }
268 
269 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
270 {
271 }
272 
273 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *avc,
274 						 struct rb_root_cached *rb)
275 {
276 }
277 
278 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *avc,
279 						 struct rb_root_cached *rb)
280 {
281 }
282 
283 static inline void uprobe_mmap(struct vm_area_struct *vma)
284 {
285 }
286 
287 static inline void uprobe_munmap(struct vm_area_struct *vma,
288 				 unsigned long start, unsigned long end)
289 {
290 }
291 
292 static inline void i_mmap_lock_write(struct address_space *mapping)
293 {
294 }
295 
296 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
297 {
298 }
299 
300 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
301 {
302 }
303 
304 static inline void ksm_add_vma(struct vm_area_struct *vma)
305 {
306 }
307 
308 static inline void perf_event_mmap(struct vm_area_struct *vma)
309 {
310 }
311 
312 static inline bool vma_is_dax(struct vm_area_struct *vma)
313 {
314 	return false;
315 }
316 
317 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
318 {
319 	return NULL;
320 }
321 
322 static inline bool arch_validate_flags(vm_flags_t flags)
323 {
324 	return true;
325 }
326 
327 static inline void vma_close(struct vm_area_struct *vma)
328 {
329 }
330 
331 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
332 {
333 	return 0;
334 }
335 
336 static inline int is_hugepage_only_range(struct mm_struct *mm,
337 					unsigned long addr, unsigned long len)
338 {
339 	return 0;
340 }
341 
342 static inline bool capable(int cap)
343 {
344 	return true;
345 }
346 
347 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
348 {
349 	return NULL;
350 }
351 
352 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
353 					struct vm_userfaultfd_ctx vm_ctx)
354 {
355 	return true;
356 }
357 
358 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
359 				    struct anon_vma_name *anon_name2)
360 {
361 	return true;
362 }
363 
364 static inline void might_sleep(void)
365 {
366 }
367 
368 static inline void fput(struct file *file)
369 {
370 }
371 
372 static inline void mpol_put(struct mempolicy *pol)
373 {
374 }
375 
376 static inline void lru_add_drain(void)
377 {
378 }
379 
380 static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
381 {
382 }
383 
384 static inline void update_hiwater_rss(struct mm_struct *mm)
385 {
386 }
387 
388 static inline void update_hiwater_vm(struct mm_struct *mm)
389 {
390 }
391 
392 static inline void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
393 {
394 }
395 
396 static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
397 {
398 }
399 
400 static inline void mapping_unmap_writable(struct address_space *mapping)
401 {
402 }
403 
404 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
405 {
406 }
407 
408 static inline void tlb_finish_mmu(struct mmu_gather *tlb)
409 {
410 }
411 
412 static inline struct file *get_file(struct file *f)
413 {
414 	return f;
415 }
416 
417 static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
418 {
419 	return 0;
420 }
421 
422 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
423 					 unsigned long start,
424 					 unsigned long end,
425 					 struct vm_area_struct *next)
426 {
427 }
428 
429 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
430 
431 static inline bool vma_supports_mlock(const struct vm_area_struct *vma)
432 {
433 	return false;
434 }
435