1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 3 #pragma once 4 5 /* 6 * Contains declarations that are STUBBED, that is that are rendered no-ops, in 7 * order to faciliate userland VMA testing. 8 */ 9 10 /* Forward declarations. */ 11 struct mm_struct; 12 struct vm_area_struct; 13 struct vm_area_desc; 14 struct pagetable_move_control; 15 struct mmap_action; 16 struct file; 17 struct anon_vma; 18 struct anon_vma_chain; 19 struct address_space; 20 struct unmap_desc; 21 22 #define __bitwise 23 #define __randomize_layout 24 25 #define FIRST_USER_ADDRESS 0UL 26 #define USER_PGTABLES_CEILING 0UL 27 28 #define vma_policy(vma) NULL 29 30 #define down_write_nest_lock(sem, nest_lock) 31 32 #define data_race(expr) expr 33 34 #define ASSERT_EXCLUSIVE_WRITER(x) 35 36 struct vm_userfaultfd_ctx {}; 37 struct mempolicy {}; 38 struct mmu_gather {}; 39 struct mutex {}; 40 struct vm_fault {}; 41 42 static inline void userfaultfd_unmap_complete(struct mm_struct *mm, 43 struct list_head *uf) 44 { 45 } 46 47 static inline unsigned long move_page_tables(struct pagetable_move_control *pmc) 48 { 49 return 0; 50 } 51 52 static inline void free_pgd_range(struct mmu_gather *tlb, 53 unsigned long addr, unsigned long end, 54 unsigned long floor, unsigned long ceiling) 55 { 56 } 57 58 static inline int ksm_execve(struct mm_struct *mm) 59 { 60 return 0; 61 } 62 63 static inline void ksm_exit(struct mm_struct *mm) 64 { 65 } 66 67 static inline void vma_numab_state_init(struct vm_area_struct *vma) 68 { 69 } 70 71 static inline void vma_numab_state_free(struct vm_area_struct *vma) 72 { 73 } 74 75 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 76 struct vm_area_struct *new_vma) 77 { 78 } 79 80 static inline void free_anon_vma_name(struct vm_area_struct *vma) 81 { 82 } 83 84 static inline int mmap_action_prepare(struct vm_area_desc *desc) 85 { 86 return 0; 87 } 88 89 static inline int mmap_action_complete(struct vm_area_struct *vma, 90 struct mmap_action *action, 91 bool is_compat) 92 { 93 return 0; 94 } 95 96 static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma) 97 { 98 } 99 100 static inline bool shmem_file(struct file *file) 101 { 102 return false; 103 } 104 105 static inline vma_flags_t ksm_vma_flags(struct mm_struct *mm, 106 const struct file *file, vma_flags_t vma_flags) 107 { 108 return vma_flags; 109 } 110 111 static inline void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn) 112 { 113 } 114 115 static inline int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr, 116 unsigned long pfn, unsigned long size, pgprot_t pgprot) 117 { 118 return 0; 119 } 120 121 static inline int do_munmap(struct mm_struct *, unsigned long, size_t, 122 struct list_head *uf) 123 { 124 return 0; 125 } 126 127 /* Currently stubbed but we may later wish to un-stub. */ 128 static inline void vm_acct_memory(long pages); 129 130 static inline void mmap_assert_locked(struct mm_struct *mm) 131 { 132 } 133 134 135 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) 136 { 137 } 138 139 static inline void i_mmap_unlock_write(struct address_space *mapping) 140 { 141 } 142 143 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, 144 unsigned long start, 145 unsigned long end, 146 struct list_head *unmaps) 147 { 148 return 0; 149 } 150 151 static inline void mmap_write_downgrade(struct mm_struct *mm) 152 { 153 } 154 155 static inline void mmap_read_unlock(struct mm_struct *mm) 156 { 157 } 158 159 static inline void mmap_write_unlock(struct mm_struct *mm) 160 { 161 } 162 163 static inline int mmap_write_lock_killable(struct mm_struct *mm) 164 { 165 return 0; 166 } 167 168 static inline bool can_modify_mm(struct mm_struct *mm, 169 unsigned long start, 170 unsigned long end) 171 { 172 return true; 173 } 174 175 static inline void arch_unmap(struct mm_struct *mm, 176 unsigned long start, 177 unsigned long end) 178 { 179 } 180 181 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) 182 { 183 return true; 184 } 185 186 static inline void khugepaged_enter_vma(struct vm_area_struct *vma, 187 vm_flags_t vm_flags) 188 { 189 } 190 191 static inline bool mapping_can_writeback(struct address_space *mapping) 192 { 193 return true; 194 } 195 196 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) 197 { 198 return false; 199 } 200 201 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma) 202 { 203 return false; 204 } 205 206 static inline bool userfaultfd_wp(struct vm_area_struct *vma) 207 { 208 return false; 209 } 210 211 static inline void mmap_assert_write_locked(struct mm_struct *mm) 212 { 213 } 214 215 static inline void mutex_lock(struct mutex *lock) 216 { 217 } 218 219 static inline void mutex_unlock(struct mutex *lock) 220 { 221 } 222 223 static inline bool mutex_is_locked(struct mutex *lock) 224 { 225 return true; 226 } 227 228 static inline bool signal_pending(void *p) 229 { 230 return false; 231 } 232 233 static inline bool is_file_hugepages(const struct file *file) 234 { 235 return false; 236 } 237 238 static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) 239 { 240 return 0; 241 } 242 243 static inline bool may_expand_vm(struct mm_struct *mm, 244 const vma_flags_t *vma_flags, 245 unsigned long npages) 246 { 247 return true; 248 } 249 250 static inline int shmem_zero_setup(struct vm_area_struct *vma) 251 { 252 return 0; 253 } 254 255 256 static inline void vm_acct_memory(long pages) 257 { 258 } 259 260 static inline void vma_interval_tree_insert(struct vm_area_struct *vma, 261 struct rb_root_cached *rb) 262 { 263 } 264 265 static inline void vma_interval_tree_remove(struct vm_area_struct *vma, 266 struct rb_root_cached *rb) 267 { 268 } 269 270 static inline void flush_dcache_mmap_unlock(struct address_space *mapping) 271 { 272 } 273 274 static inline void anon_vma_interval_tree_insert(struct anon_vma_chain *avc, 275 struct rb_root_cached *rb) 276 { 277 } 278 279 static inline void anon_vma_interval_tree_remove(struct anon_vma_chain *avc, 280 struct rb_root_cached *rb) 281 { 282 } 283 284 static inline void uprobe_mmap(struct vm_area_struct *vma) 285 { 286 } 287 288 static inline void uprobe_munmap(struct vm_area_struct *vma, 289 unsigned long start, unsigned long end) 290 { 291 } 292 293 static inline void i_mmap_lock_write(struct address_space *mapping) 294 { 295 } 296 297 static inline void anon_vma_lock_write(struct anon_vma *anon_vma) 298 { 299 } 300 301 static inline void vma_assert_write_locked(struct vm_area_struct *vma) 302 { 303 } 304 305 static inline void ksm_add_vma(struct vm_area_struct *vma) 306 { 307 } 308 309 static inline void perf_event_mmap(struct vm_area_struct *vma) 310 { 311 } 312 313 static inline bool vma_is_dax(struct vm_area_struct *vma) 314 { 315 return false; 316 } 317 318 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 319 { 320 return NULL; 321 } 322 323 static inline bool arch_validate_flags(vm_flags_t flags) 324 { 325 return true; 326 } 327 328 static inline void vma_close(struct vm_area_struct *vma) 329 { 330 } 331 332 static inline int mmap_file(struct file *file, struct vm_area_struct *vma) 333 { 334 return 0; 335 } 336 337 static inline int is_hugepage_only_range(struct mm_struct *mm, 338 unsigned long addr, unsigned long len) 339 { 340 return 0; 341 } 342 343 static inline bool capable(int cap) 344 { 345 return true; 346 } 347 348 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 349 { 350 return NULL; 351 } 352 353 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, 354 struct vm_userfaultfd_ctx vm_ctx) 355 { 356 return true; 357 } 358 359 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 360 struct anon_vma_name *anon_name2) 361 { 362 return true; 363 } 364 365 static inline void might_sleep(void) 366 { 367 } 368 369 static inline void fput(struct file *file) 370 { 371 } 372 373 static inline void mpol_put(struct mempolicy *pol) 374 { 375 } 376 377 static inline void lru_add_drain(void) 378 { 379 } 380 381 static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) 382 { 383 } 384 385 static inline void update_hiwater_rss(struct mm_struct *mm) 386 { 387 } 388 389 static inline void update_hiwater_vm(struct mm_struct *mm) 390 { 391 } 392 393 static inline void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap) 394 { 395 } 396 397 static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap) 398 { 399 } 400 401 static inline void mapping_unmap_writable(struct address_space *mapping) 402 { 403 } 404 405 static inline void flush_dcache_mmap_lock(struct address_space *mapping) 406 { 407 } 408 409 static inline void tlb_finish_mmu(struct mmu_gather *tlb) 410 { 411 } 412 413 static inline struct file *get_file(struct file *f) 414 { 415 return f; 416 } 417 418 static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 419 { 420 return 0; 421 } 422 423 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 424 unsigned long start, 425 unsigned long end, 426 struct vm_area_struct *next) 427 { 428 } 429 430 static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {} 431 432 static inline bool vma_supports_mlock(const struct vm_area_struct *vma) 433 { 434 return false; 435 } 436