1 /* SPDX-License-Identifier: GPL-2.0+ */
2
3 #pragma once
4
5 /*
6 * Contains declarations that are STUBBED, that is that are rendered no-ops, in
7 * order to faciliate userland VMA testing.
8 */
9
10 /* Forward declarations. */
11 struct mm_struct;
12 struct vm_area_struct;
13 struct vm_area_desc;
14 struct pagetable_move_control;
15 struct mmap_action;
16 struct file;
17 struct anon_vma;
18 struct anon_vma_chain;
19 struct address_space;
20 struct unmap_desc;
21
22 #define __bitwise
23 #define __randomize_layout
24
25 #define FIRST_USER_ADDRESS 0UL
26 #define USER_PGTABLES_CEILING 0UL
27
28 #define vma_policy(vma) NULL
29
30 #define down_write_nest_lock(sem, nest_lock)
31
32 #define data_race(expr) expr
33
34 #define ASSERT_EXCLUSIVE_WRITER(x)
35
36 struct vm_userfaultfd_ctx {};
37 struct mempolicy {};
38 struct mmu_gather {};
39 struct mutex {};
40 struct vm_fault {};
41
userfaultfd_unmap_complete(struct mm_struct * mm,struct list_head * uf)42 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
43 struct list_head *uf)
44 {
45 }
46
move_page_tables(struct pagetable_move_control * pmc)47 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc)
48 {
49 return 0;
50 }
51
free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)52 static inline void free_pgd_range(struct mmu_gather *tlb,
53 unsigned long addr, unsigned long end,
54 unsigned long floor, unsigned long ceiling)
55 {
56 }
57
ksm_execve(struct mm_struct * mm)58 static inline int ksm_execve(struct mm_struct *mm)
59 {
60 return 0;
61 }
62
ksm_exit(struct mm_struct * mm)63 static inline void ksm_exit(struct mm_struct *mm)
64 {
65 }
66
vma_numab_state_init(struct vm_area_struct * vma)67 static inline void vma_numab_state_init(struct vm_area_struct *vma)
68 {
69 }
70
vma_numab_state_free(struct vm_area_struct * vma)71 static inline void vma_numab_state_free(struct vm_area_struct *vma)
72 {
73 }
74
dup_anon_vma_name(struct vm_area_struct * orig_vma,struct vm_area_struct * new_vma)75 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
76 struct vm_area_struct *new_vma)
77 {
78 }
79
free_anon_vma_name(struct vm_area_struct * vma)80 static inline void free_anon_vma_name(struct vm_area_struct *vma)
81 {
82 }
83
mmap_action_prepare(struct mmap_action * action,struct vm_area_desc * desc)84 static inline void mmap_action_prepare(struct mmap_action *action,
85 struct vm_area_desc *desc)
86 {
87 }
88
mmap_action_complete(struct mmap_action * action,struct vm_area_struct * vma)89 static inline int mmap_action_complete(struct mmap_action *action,
90 struct vm_area_struct *vma)
91 {
92 return 0;
93 }
94
fixup_hugetlb_reservations(struct vm_area_struct * vma)95 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
96 {
97 }
98
shmem_file(struct file * file)99 static inline bool shmem_file(struct file *file)
100 {
101 return false;
102 }
103
ksm_vma_flags(const struct mm_struct * mm,const struct file * file,vm_flags_t vm_flags)104 static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm,
105 const struct file *file, vm_flags_t vm_flags)
106 {
107 return vm_flags;
108 }
109
remap_pfn_range_prepare(struct vm_area_desc * desc,unsigned long pfn)110 static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
111 {
112 }
113
remap_pfn_range_complete(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t pgprot)114 static inline int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
115 unsigned long pfn, unsigned long size, pgprot_t pgprot)
116 {
117 return 0;
118 }
119
do_munmap(struct mm_struct *,unsigned long,size_t,struct list_head * uf)120 static inline int do_munmap(struct mm_struct *, unsigned long, size_t,
121 struct list_head *uf)
122 {
123 return 0;
124 }
125
126 /* Currently stubbed but we may later wish to un-stub. */
127 static inline void vm_acct_memory(long pages);
128
mmap_assert_locked(struct mm_struct * mm)129 static inline void mmap_assert_locked(struct mm_struct *mm)
130 {
131 }
132
133
anon_vma_unlock_write(struct anon_vma * anon_vma)134 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
135 {
136 }
137
i_mmap_unlock_write(struct address_space * mapping)138 static inline void i_mmap_unlock_write(struct address_space *mapping)
139 {
140 }
141
userfaultfd_unmap_prep(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * unmaps)142 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
143 unsigned long start,
144 unsigned long end,
145 struct list_head *unmaps)
146 {
147 return 0;
148 }
149
mmap_write_downgrade(struct mm_struct * mm)150 static inline void mmap_write_downgrade(struct mm_struct *mm)
151 {
152 }
153
mmap_read_unlock(struct mm_struct * mm)154 static inline void mmap_read_unlock(struct mm_struct *mm)
155 {
156 }
157
mmap_write_unlock(struct mm_struct * mm)158 static inline void mmap_write_unlock(struct mm_struct *mm)
159 {
160 }
161
mmap_write_lock_killable(struct mm_struct * mm)162 static inline int mmap_write_lock_killable(struct mm_struct *mm)
163 {
164 return 0;
165 }
166
can_modify_mm(struct mm_struct * mm,unsigned long start,unsigned long end)167 static inline bool can_modify_mm(struct mm_struct *mm,
168 unsigned long start,
169 unsigned long end)
170 {
171 return true;
172 }
173
arch_unmap(struct mm_struct * mm,unsigned long start,unsigned long end)174 static inline void arch_unmap(struct mm_struct *mm,
175 unsigned long start,
176 unsigned long end)
177 {
178 }
179
mpol_equal(struct mempolicy * a,struct mempolicy * b)180 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
181 {
182 return true;
183 }
184
khugepaged_enter_vma(struct vm_area_struct * vma,vm_flags_t vm_flags)185 static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
186 vm_flags_t vm_flags)
187 {
188 }
189
mapping_can_writeback(struct address_space * mapping)190 static inline bool mapping_can_writeback(struct address_space *mapping)
191 {
192 return true;
193 }
194
is_vm_hugetlb_page(struct vm_area_struct * vma)195 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
196 {
197 return false;
198 }
199
vma_soft_dirty_enabled(struct vm_area_struct * vma)200 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
201 {
202 return false;
203 }
204
userfaultfd_wp(struct vm_area_struct * vma)205 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
206 {
207 return false;
208 }
209
mmap_assert_write_locked(struct mm_struct * mm)210 static inline void mmap_assert_write_locked(struct mm_struct *mm)
211 {
212 }
213
mutex_lock(struct mutex * lock)214 static inline void mutex_lock(struct mutex *lock)
215 {
216 }
217
mutex_unlock(struct mutex * lock)218 static inline void mutex_unlock(struct mutex *lock)
219 {
220 }
221
mutex_is_locked(struct mutex * lock)222 static inline bool mutex_is_locked(struct mutex *lock)
223 {
224 return true;
225 }
226
signal_pending(void * p)227 static inline bool signal_pending(void *p)
228 {
229 return false;
230 }
231
is_file_hugepages(struct file * file)232 static inline bool is_file_hugepages(struct file *file)
233 {
234 return false;
235 }
236
security_vm_enough_memory_mm(struct mm_struct * mm,long pages)237 static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
238 {
239 return 0;
240 }
241
may_expand_vm(struct mm_struct * mm,vm_flags_t flags,unsigned long npages)242 static inline bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags,
243 unsigned long npages)
244 {
245 return true;
246 }
247
shmem_zero_setup(struct vm_area_struct * vma)248 static inline int shmem_zero_setup(struct vm_area_struct *vma)
249 {
250 return 0;
251 }
252
253
vm_acct_memory(long pages)254 static inline void vm_acct_memory(long pages)
255 {
256 }
257
vma_interval_tree_insert(struct vm_area_struct * vma,struct rb_root_cached * rb)258 static inline void vma_interval_tree_insert(struct vm_area_struct *vma,
259 struct rb_root_cached *rb)
260 {
261 }
262
vma_interval_tree_remove(struct vm_area_struct * vma,struct rb_root_cached * rb)263 static inline void vma_interval_tree_remove(struct vm_area_struct *vma,
264 struct rb_root_cached *rb)
265 {
266 }
267
flush_dcache_mmap_unlock(struct address_space * mapping)268 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
269 {
270 }
271
anon_vma_interval_tree_insert(struct anon_vma_chain * avc,struct rb_root_cached * rb)272 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *avc,
273 struct rb_root_cached *rb)
274 {
275 }
276
anon_vma_interval_tree_remove(struct anon_vma_chain * avc,struct rb_root_cached * rb)277 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *avc,
278 struct rb_root_cached *rb)
279 {
280 }
281
uprobe_mmap(struct vm_area_struct * vma)282 static inline void uprobe_mmap(struct vm_area_struct *vma)
283 {
284 }
285
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)286 static inline void uprobe_munmap(struct vm_area_struct *vma,
287 unsigned long start, unsigned long end)
288 {
289 }
290
i_mmap_lock_write(struct address_space * mapping)291 static inline void i_mmap_lock_write(struct address_space *mapping)
292 {
293 }
294
anon_vma_lock_write(struct anon_vma * anon_vma)295 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
296 {
297 }
298
vma_assert_write_locked(struct vm_area_struct * vma)299 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
300 {
301 }
302
ksm_add_vma(struct vm_area_struct * vma)303 static inline void ksm_add_vma(struct vm_area_struct *vma)
304 {
305 }
306
perf_event_mmap(struct vm_area_struct * vma)307 static inline void perf_event_mmap(struct vm_area_struct *vma)
308 {
309 }
310
vma_is_dax(struct vm_area_struct * vma)311 static inline bool vma_is_dax(struct vm_area_struct *vma)
312 {
313 return false;
314 }
315
get_gate_vma(struct mm_struct * mm)316 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
317 {
318 return NULL;
319 }
320
arch_validate_flags(vm_flags_t flags)321 static inline bool arch_validate_flags(vm_flags_t flags)
322 {
323 return true;
324 }
325
vma_close(struct vm_area_struct * vma)326 static inline void vma_close(struct vm_area_struct *vma)
327 {
328 }
329
mmap_file(struct file * file,struct vm_area_struct * vma)330 static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
331 {
332 return 0;
333 }
334
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)335 static inline int is_hugepage_only_range(struct mm_struct *mm,
336 unsigned long addr, unsigned long len)
337 {
338 return 0;
339 }
340
capable(int cap)341 static inline bool capable(int cap)
342 {
343 return true;
344 }
345
anon_vma_name(struct vm_area_struct * vma)346 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
347 {
348 return NULL;
349 }
350
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)351 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
352 struct vm_userfaultfd_ctx vm_ctx)
353 {
354 return true;
355 }
356
anon_vma_name_eq(struct anon_vma_name * anon_name1,struct anon_vma_name * anon_name2)357 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
358 struct anon_vma_name *anon_name2)
359 {
360 return true;
361 }
362
might_sleep(void)363 static inline void might_sleep(void)
364 {
365 }
366
fput(struct file * file)367 static inline void fput(struct file *file)
368 {
369 }
370
mpol_put(struct mempolicy * pol)371 static inline void mpol_put(struct mempolicy *pol)
372 {
373 }
374
lru_add_drain(void)375 static inline void lru_add_drain(void)
376 {
377 }
378
tlb_gather_mmu(struct mmu_gather * tlb,struct mm_struct * mm)379 static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
380 {
381 }
382
update_hiwater_rss(struct mm_struct * mm)383 static inline void update_hiwater_rss(struct mm_struct *mm)
384 {
385 }
386
update_hiwater_vm(struct mm_struct * mm)387 static inline void update_hiwater_vm(struct mm_struct *mm)
388 {
389 }
390
unmap_vmas(struct mmu_gather * tlb,struct unmap_desc * unmap)391 static inline void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
392 {
393 }
394
free_pgtables(struct mmu_gather * tlb,struct unmap_desc * unmap)395 static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
396 {
397 }
398
mapping_unmap_writable(struct address_space * mapping)399 static inline void mapping_unmap_writable(struct address_space *mapping)
400 {
401 }
402
flush_dcache_mmap_lock(struct address_space * mapping)403 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
404 {
405 }
406
tlb_finish_mmu(struct mmu_gather * tlb)407 static inline void tlb_finish_mmu(struct mmu_gather *tlb)
408 {
409 }
410
get_file(struct file * f)411 static inline struct file *get_file(struct file *f)
412 {
413 return f;
414 }
415
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)416 static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
417 {
418 return 0;
419 }
420
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct vm_area_struct * next)421 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
422 unsigned long start,
423 unsigned long end,
424 struct vm_area_struct *next)
425 {
426 }
427
hugetlb_split(struct vm_area_struct *,unsigned long)428 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
429