1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include "shared.h"
4
5
6 bool fail_prealloc;
7 unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
8 unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
9 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
10
11 const struct vm_operations_struct vma_dummy_vm_ops;
12 struct anon_vma dummy_anon_vma;
13 struct task_struct __current;
14
alloc_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t vm_flags)15 struct vm_area_struct *alloc_vma(struct mm_struct *mm,
16 unsigned long start, unsigned long end,
17 pgoff_t pgoff, vm_flags_t vm_flags)
18 {
19 struct vm_area_struct *vma = vm_area_alloc(mm);
20
21 if (vma == NULL)
22 return NULL;
23
24 vma->vm_start = start;
25 vma->vm_end = end;
26 vma->vm_pgoff = pgoff;
27 vm_flags_reset(vma, vm_flags);
28 vma_assert_detached(vma);
29
30 return vma;
31 }
32
detach_free_vma(struct vm_area_struct * vma)33 void detach_free_vma(struct vm_area_struct *vma)
34 {
35 vma_mark_detached(vma);
36 vm_area_free(vma);
37 }
38
alloc_and_link_vma(struct mm_struct * mm,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t vm_flags)39 struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
40 unsigned long start, unsigned long end,
41 pgoff_t pgoff, vm_flags_t vm_flags)
42 {
43 struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags);
44
45 if (vma == NULL)
46 return NULL;
47
48 if (attach_vma(mm, vma)) {
49 detach_free_vma(vma);
50 return NULL;
51 }
52
53 /*
54 * Reset this counter which we use to track whether writes have
55 * begun. Linking to the tree will have caused this to be incremented,
56 * which means we will get a false positive otherwise.
57 */
58 vma->vm_lock_seq = UINT_MAX;
59
60 return vma;
61 }
62
reset_dummy_anon_vma(void)63 void reset_dummy_anon_vma(void)
64 {
65 dummy_anon_vma.was_cloned = false;
66 dummy_anon_vma.was_unlinked = false;
67 }
68
cleanup_mm(struct mm_struct * mm,struct vma_iterator * vmi)69 int cleanup_mm(struct mm_struct *mm, struct vma_iterator *vmi)
70 {
71 struct vm_area_struct *vma;
72 int count = 0;
73
74 fail_prealloc = false;
75 reset_dummy_anon_vma();
76
77 vma_iter_set(vmi, 0);
78 for_each_vma(*vmi, vma) {
79 detach_free_vma(vma);
80 count++;
81 }
82
83 mtree_destroy(&mm->mm_mt);
84 mm->map_count = 0;
85 return count;
86 }
87
vma_write_started(struct vm_area_struct * vma)88 bool vma_write_started(struct vm_area_struct *vma)
89 {
90 int seq = vma->vm_lock_seq;
91
92 /* We reset after each check. */
93 vma->vm_lock_seq = UINT_MAX;
94
95 /* The vma_start_write() stub simply increments this value. */
96 return seq > -1;
97 }
98
__vma_set_dummy_anon_vma(struct vm_area_struct * vma,struct anon_vma_chain * avc,struct anon_vma * anon_vma)99 void __vma_set_dummy_anon_vma(struct vm_area_struct *vma,
100 struct anon_vma_chain *avc, struct anon_vma *anon_vma)
101 {
102 vma->anon_vma = anon_vma;
103 INIT_LIST_HEAD(&vma->anon_vma_chain);
104 list_add(&avc->same_vma, &vma->anon_vma_chain);
105 avc->anon_vma = vma->anon_vma;
106 }
107
vma_set_dummy_anon_vma(struct vm_area_struct * vma,struct anon_vma_chain * avc)108 void vma_set_dummy_anon_vma(struct vm_area_struct *vma,
109 struct anon_vma_chain *avc)
110 {
111 __vma_set_dummy_anon_vma(vma, avc, &dummy_anon_vma);
112 }
113
get_current(void)114 struct task_struct *get_current(void)
115 {
116 return &__current;
117 }
118
rlimit(unsigned int limit)119 unsigned long rlimit(unsigned int limit)
120 {
121 return (unsigned long)-1;
122 }
123
vma_set_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)124 void vma_set_range(struct vm_area_struct *vma,
125 unsigned long start, unsigned long end,
126 pgoff_t pgoff)
127 {
128 vma->vm_start = start;
129 vma->vm_end = end;
130 vma->vm_pgoff = pgoff;
131 }
132