xref: /linux/tools/testing/vma/include/stubs.h (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 
3 #pragma once
4 
5 /*
6  * Contains declarations that are STUBBED, that is that are rendered no-ops, in
7  * order to faciliate userland VMA testing.
8  */
9 
10 /* Forward declarations. */
11 struct mm_struct;
12 struct vm_area_struct;
13 struct vm_area_desc;
14 struct pagetable_move_control;
15 struct mmap_action;
16 struct file;
17 struct anon_vma;
18 struct anon_vma_chain;
19 struct address_space;
20 struct unmap_desc;
21 
22 #define __bitwise
23 #define __randomize_layout
24 
25 #define FIRST_USER_ADDRESS	0UL
26 #define USER_PGTABLES_CEILING	0UL
27 
28 #define vma_policy(vma) NULL
29 
30 #define down_write_nest_lock(sem, nest_lock)
31 
32 #define data_race(expr) expr
33 
34 #define ASSERT_EXCLUSIVE_WRITER(x)
35 
36 struct vm_userfaultfd_ctx {};
37 struct mempolicy {};
38 struct mmu_gather {};
39 struct mutex {};
40 struct vm_fault {};
41 
userfaultfd_unmap_complete(struct mm_struct * mm,struct list_head * uf)42 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
43 					      struct list_head *uf)
44 {
45 }
46 
move_page_tables(struct pagetable_move_control * pmc)47 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
48 {
49 	return 0;
50 }
51 
free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)52 static inline void free_pgd_range(struct mmu_gather *tlb,
53 			unsigned long addr, unsigned long end,
54 			unsigned long floor, unsigned long ceiling)
55 {
56 }
57 
ksm_execve(struct mm_struct * mm)58 static inline int ksm_execve(struct mm_struct *mm)
59 {
60 	return 0;
61 }
62 
ksm_exit(struct mm_struct * mm)63 static inline void ksm_exit(struct mm_struct *mm)
64 {
65 }
66 
vma_numab_state_init(struct vm_area_struct * vma)67 static inline void vma_numab_state_init(struct vm_area_struct *vma)
68 {
69 }
70 
vma_numab_state_free(struct vm_area_struct * vma)71 static inline void vma_numab_state_free(struct vm_area_struct *vma)
72 {
73 }
74 
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)75 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
76 				     struct vm_area_struct *new_vma)
77 {
78 }
79 
free_anon_vma_name(struct vm_area_struct * vma)80 static inline void free_anon_vma_name(struct vm_area_struct *vma)
81 {
82 }
83 
mmap_action_prepare(struct vm_area_desc * desc)84 static inline int mmap_action_prepare(struct vm_area_desc *desc)
85 {
86 	return 0;
87 }
88 
mmap_action_complete(struct vm_area_struct * vma,struct mmap_action * action,bool is_compat)89 static inline int mmap_action_complete(struct vm_area_struct *vma,
90 				       struct mmap_action *action,
91 				       bool is_compat)
92 {
93 	return 0;
94 }
95 
fixup_hugetlb_reservations(struct vm_area_struct * vma)96 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
97 {
98 }
99 
shmem_file(struct file * file)100 static inline bool shmem_file(struct file *file)
101 {
102 	return false;
103 }
104 
ksm_vma_flags(struct mm_struct * mm,const struct file * file,vma_flags_t vma_flags)105 static inline vma_flags_t ksm_vma_flags(struct mm_struct *mm,
106 		const struct file *file, vma_flags_t vma_flags)
107 {
108 	return vma_flags;
109 }
110 
remap_pfn_range_prepare(struct vm_area_desc * desc,unsigned long pfn)111 static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
112 {
113 }
114 
remap_pfn_range_complete(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t pgprot)115 static inline int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
116 		unsigned long pfn, unsigned long size, pgprot_t pgprot)
117 {
118 	return 0;
119 }
120 
do_munmap(struct mm_struct *,unsigned long,size_t,struct list_head * uf)121 static inline int do_munmap(struct mm_struct *, unsigned long, size_t,
122 		struct list_head *uf)
123 {
124 	return 0;
125 }
126 
127 /* Currently stubbed but we may later wish to un-stub. */
128 static inline void vm_acct_memory(long pages);
129 
mmap_assert_locked(struct mm_struct * mm)130 static inline void mmap_assert_locked(struct mm_struct *mm)
131 {
132 }
133 
134 
anon_vma_unlock_write(struct anon_vma * anon_vma)135 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
136 {
137 }
138 
i_mmap_unlock_write(struct address_space * mapping)139 static inline void i_mmap_unlock_write(struct address_space *mapping)
140 {
141 }
142 
userfaultfd_unmap_prep(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * unmaps)143 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
144 					 unsigned long start,
145 					 unsigned long end,
146 					 struct list_head *unmaps)
147 {
148 	return 0;
149 }
150 
mmap_write_downgrade(struct mm_struct * mm)151 static inline void mmap_write_downgrade(struct mm_struct *mm)
152 {
153 }
154 
mmap_read_unlock(struct mm_struct * mm)155 static inline void mmap_read_unlock(struct mm_struct *mm)
156 {
157 }
158 
mmap_write_unlock(struct mm_struct * mm)159 static inline void mmap_write_unlock(struct mm_struct *mm)
160 {
161 }
162 
mmap_write_lock_killable(struct mm_struct * mm)163 static inline int mmap_write_lock_killable(struct mm_struct *mm)
164 {
165 	return 0;
166 }
167 
can_modify_mm(struct mm_struct * mm,unsigned long start,unsigned long end)168 static inline bool can_modify_mm(struct mm_struct *mm,
169 				 unsigned long start,
170 				 unsigned long end)
171 {
172 	return true;
173 }
174 
arch_unmap(struct mm_struct * mm,unsigned long start,unsigned long end)175 static inline void arch_unmap(struct mm_struct *mm,
176 				 unsigned long start,
177 				 unsigned long end)
178 {
179 }
180 
mpol_equal(struct mempolicy * a,struct mempolicy * b)181 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
182 {
183 	return true;
184 }
185 
khugepaged_enter_vma(struct vm_area_struct * vma,vm_flags_t vm_flags)186 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
187 			  vm_flags_t vm_flags)
188 {
189 }
190 
mapping_can_writeback(struct address_space * mapping)191 static inline bool mapping_can_writeback(struct address_space *mapping)
192 {
193 	return true;
194 }
195 
is_vm_hugetlb_page(struct vm_area_struct * vma)196 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
197 {
198 	return false;
199 }
200 
vma_soft_dirty_enabled(struct vm_area_struct * vma)201 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
202 {
203 	return false;
204 }
205 
userfaultfd_wp(struct vm_area_struct * vma)206 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
207 {
208 	return false;
209 }
210 
mmap_assert_write_locked(struct mm_struct * mm)211 static inline void mmap_assert_write_locked(struct mm_struct *mm)
212 {
213 }
214 
mutex_lock(struct mutex * lock)215 static inline void mutex_lock(struct mutex *lock)
216 {
217 }
218 
mutex_unlock(struct mutex * lock)219 static inline void mutex_unlock(struct mutex *lock)
220 {
221 }
222 
mutex_is_locked(struct mutex * lock)223 static inline bool mutex_is_locked(struct mutex *lock)
224 {
225 	return true;
226 }
227 
signal_pending(void * p)228 static inline bool signal_pending(void *p)
229 {
230 	return false;
231 }
232 
is_file_hugepages(const struct file * file)233 static inline bool is_file_hugepages(const struct file *file)
234 {
235 	return false;
236 }
237 
security_vm_enough_memory_mm(struct mm_struct * mm,long pages)238 static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
239 {
240 	return 0;
241 }
242 
may_expand_vm(struct mm_struct * mm,const vma_flags_t * vma_flags,unsigned long npages)243 static inline bool may_expand_vm(struct mm_struct *mm,
244 				 const vma_flags_t *vma_flags,
245 				 unsigned long npages)
246 {
247 	return true;
248 }
249 
shmem_zero_setup(struct vm_area_struct * vma)250 static inline int shmem_zero_setup(struct vm_area_struct *vma)
251 {
252 	return 0;
253 }
254 
255 
vm_acct_memory(long pages)256 static inline void vm_acct_memory(long pages)
257 {
258 }
259 
vma_interval_tree_insert(struct vm_area_struct * vma,struct rb_root_cached * rb)260 static inline void vma_interval_tree_insert(struct vm_area_struct *vma,
261 					    struct rb_root_cached *rb)
262 {
263 }
264 
vma_interval_tree_remove(struct vm_area_struct * vma,struct rb_root_cached * rb)265 static inline void vma_interval_tree_remove(struct vm_area_struct *vma,
266 					    struct rb_root_cached *rb)
267 {
268 }
269 
flush_dcache_mmap_unlock(struct address_space * mapping)270 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
271 {
272 }
273 
anon_vma_interval_tree_insert(struct anon_vma_chain * avc,struct rb_root_cached * rb)274 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *avc,
275 						 struct rb_root_cached *rb)
276 {
277 }
278 
anon_vma_interval_tree_remove(struct anon_vma_chain * avc,struct rb_root_cached * rb)279 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *avc,
280 						 struct rb_root_cached *rb)
281 {
282 }
283 
uprobe_mmap(struct vm_area_struct * vma)284 static inline void uprobe_mmap(struct vm_area_struct *vma)
285 {
286 }
287 
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)288 static inline void uprobe_munmap(struct vm_area_struct *vma,
289 				 unsigned long start, unsigned long end)
290 {
291 }
292 
i_mmap_lock_write(struct address_space * mapping)293 static inline void i_mmap_lock_write(struct address_space *mapping)
294 {
295 }
296 
anon_vma_lock_write(struct anon_vma * anon_vma)297 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
298 {
299 }
300 
vma_assert_write_locked(struct vm_area_struct * vma)301 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
302 {
303 }
304 
ksm_add_vma(struct vm_area_struct * vma)305 static inline void ksm_add_vma(struct vm_area_struct *vma)
306 {
307 }
308 
perf_event_mmap(struct vm_area_struct * vma)309 static inline void perf_event_mmap(struct vm_area_struct *vma)
310 {
311 }
312 
vma_is_dax(struct vm_area_struct * vma)313 static inline bool vma_is_dax(struct vm_area_struct *vma)
314 {
315 	return false;
316 }
317 
get_gate_vma(struct mm_struct * mm)318 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
319 {
320 	return NULL;
321 }
322 
arch_validate_flags(vm_flags_t flags)323 static inline bool arch_validate_flags(vm_flags_t flags)
324 {
325 	return true;
326 }
327 
vma_close(struct vm_area_struct * vma)328 static inline void vma_close(struct vm_area_struct *vma)
329 {
330 }
331 
mmap_file(struct file * file,struct vm_area_struct * vma)332 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
333 {
334 	return 0;
335 }
336 
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)337 static inline int is_hugepage_only_range(struct mm_struct *mm,
338 					unsigned long addr, unsigned long len)
339 {
340 	return 0;
341 }
342 
capable(int cap)343 static inline bool capable(int cap)
344 {
345 	return true;
346 }
347 
anon_vma_name(struct vm_area_struct * vma)348 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
349 {
350 	return NULL;
351 }
352 
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)353 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
354 					struct vm_userfaultfd_ctx vm_ctx)
355 {
356 	return true;
357 }
358 
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)359 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
360 				    struct anon_vma_name *anon_name2)
361 {
362 	return true;
363 }
364 
might_sleep(void)365 static inline void might_sleep(void)
366 {
367 }
368 
fput(struct file * file)369 static inline void fput(struct file *file)
370 {
371 }
372 
mpol_put(struct mempolicy * pol)373 static inline void mpol_put(struct mempolicy *pol)
374 {
375 }
376 
lru_add_drain(void)377 static inline void lru_add_drain(void)
378 {
379 }
380 
tlb_gather_mmu(struct mmu_gather * tlb,struct mm_struct * mm)381 static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
382 {
383 }
384 
update_hiwater_rss(struct mm_struct * mm)385 static inline void update_hiwater_rss(struct mm_struct *mm)
386 {
387 }
388 
update_hiwater_vm(struct mm_struct * mm)389 static inline void update_hiwater_vm(struct mm_struct *mm)
390 {
391 }
392 
unmap_vmas(struct mmu_gather * tlb,struct unmap_desc * unmap)393 static inline void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
394 {
395 }
396 
free_pgtables(struct mmu_gather * tlb,struct unmap_desc * unmap)397 static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
398 {
399 }
400 
mapping_unmap_writable(struct address_space * mapping)401 static inline void mapping_unmap_writable(struct address_space *mapping)
402 {
403 }
404 
flush_dcache_mmap_lock(struct address_space * mapping)405 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
406 {
407 }
408 
tlb_finish_mmu(struct mmu_gather * tlb)409 static inline void tlb_finish_mmu(struct mmu_gather *tlb)
410 {
411 }
412 
get_file(struct file * f)413 static inline struct file *get_file(struct file *f)
414 {
415 	return f;
416 }
417 
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)418 static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
419 {
420 	return 0;
421 }
422 
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct vm_area_struct * next)423 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
424 					 unsigned long start,
425 					 unsigned long end,
426 					 struct vm_area_struct *next)
427 {
428 }
429 
hugetlb_split(struct vm_area_struct *,unsigned long)430 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
431 
vma_supports_mlock(const struct vm_area_struct * vma)432 static inline bool vma_supports_mlock(const struct vm_area_struct *vma)
433 {
434 	return false;
435 }
436