xref: /linux/arch/mips/mm/mmap.c (revision d257f9bf06129613de539ea71ecea60848b662cd)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2011 Wind River Systems,
7  *   written by Ralf Baechle <ralf@linux-mips.org>
8  */
9 #include <linux/compiler.h>
10 #include <linux/errno.h>
11 #include <linux/mm.h>
12 #include <linux/mman.h>
13 #include <linux/export.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/sched/signal.h>
17 #include <linux/sched/mm.h>
18 
19 unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
20 EXPORT_SYMBOL(shm_align_mask);
21 
22 /* gap between mmap and stack */
23 #define MIN_GAP (128*1024*1024UL)
24 #define MAX_GAP ((TASK_SIZE)/6*5)
25 
26 static int mmap_is_legacy(void)
27 {
28 	if (current->personality & ADDR_COMPAT_LAYOUT)
29 		return 1;
30 
31 	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
32 		return 1;
33 
34 	return sysctl_legacy_va_layout;
35 }
36 
37 static unsigned long mmap_base(unsigned long rnd)
38 {
39 	unsigned long gap = rlimit(RLIMIT_STACK);
40 
41 	if (gap < MIN_GAP)
42 		gap = MIN_GAP;
43 	else if (gap > MAX_GAP)
44 		gap = MAX_GAP;
45 
46 	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
47 }
48 
49 #define COLOUR_ALIGN(addr, pgoff)				\
50 	((((addr) + shm_align_mask) & ~shm_align_mask) +	\
51 	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
52 
53 enum mmap_allocation_direction {UP, DOWN};
54 
55 static unsigned long arch_get_unmapped_area_common(struct file *filp,
56 	unsigned long addr0, unsigned long len, unsigned long pgoff,
57 	unsigned long flags, enum mmap_allocation_direction dir)
58 {
59 	struct mm_struct *mm = current->mm;
60 	struct vm_area_struct *vma;
61 	unsigned long addr = addr0;
62 	int do_color_align;
63 	struct vm_unmapped_area_info info;
64 
65 	if (unlikely(len > TASK_SIZE))
66 		return -ENOMEM;
67 
68 	if (flags & MAP_FIXED) {
69 		/* Even MAP_FIXED mappings must reside within TASK_SIZE */
70 		if (TASK_SIZE - len < addr)
71 			return -EINVAL;
72 
73 		/*
74 		 * We do not accept a shared mapping if it would violate
75 		 * cache aliasing constraints.
76 		 */
77 		if ((flags & MAP_SHARED) &&
78 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
79 			return -EINVAL;
80 		return addr;
81 	}
82 
83 	do_color_align = 0;
84 	if (filp || (flags & MAP_SHARED))
85 		do_color_align = 1;
86 
87 	/* requesting a specific address */
88 	if (addr) {
89 		if (do_color_align)
90 			addr = COLOUR_ALIGN(addr, pgoff);
91 		else
92 			addr = PAGE_ALIGN(addr);
93 
94 		vma = find_vma(mm, addr);
95 		if (TASK_SIZE - len >= addr &&
96 		    (!vma || addr + len <= vm_start_gap(vma)))
97 			return addr;
98 	}
99 
100 	info.length = len;
101 	info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
102 	info.align_offset = pgoff << PAGE_SHIFT;
103 
104 	if (dir == DOWN) {
105 		info.flags = VM_UNMAPPED_AREA_TOPDOWN;
106 		info.low_limit = PAGE_SIZE;
107 		info.high_limit = mm->mmap_base;
108 		addr = vm_unmapped_area(&info);
109 
110 		if (!(addr & ~PAGE_MASK))
111 			return addr;
112 
113 		/*
114 		 * A failed mmap() very likely causes application failure,
115 		 * so fall back to the bottom-up function here. This scenario
116 		 * can happen with large stack limits and large mmap()
117 		 * allocations.
118 		 */
119 	}
120 
121 	info.flags = 0;
122 	info.low_limit = mm->mmap_base;
123 	info.high_limit = TASK_SIZE;
124 	return vm_unmapped_area(&info);
125 }
126 
127 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
128 	unsigned long len, unsigned long pgoff, unsigned long flags)
129 {
130 	return arch_get_unmapped_area_common(filp,
131 			addr0, len, pgoff, flags, UP);
132 }
133 
134 /*
135  * There is no need to export this but sched.h declares the function as
136  * extern so making it static here results in an error.
137  */
138 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
139 	unsigned long addr0, unsigned long len, unsigned long pgoff,
140 	unsigned long flags)
141 {
142 	return arch_get_unmapped_area_common(filp,
143 			addr0, len, pgoff, flags, DOWN);
144 }
145 
146 unsigned long arch_mmap_rnd(void)
147 {
148 	unsigned long rnd;
149 
150 #ifdef CONFIG_COMPAT
151 	if (TASK_IS_32BIT_ADDR)
152 		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
153 	else
154 #endif /* CONFIG_COMPAT */
155 		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
156 
157 	return rnd << PAGE_SHIFT;
158 }
159 
160 void arch_pick_mmap_layout(struct mm_struct *mm)
161 {
162 	unsigned long random_factor = 0UL;
163 
164 	if (current->flags & PF_RANDOMIZE)
165 		random_factor = arch_mmap_rnd();
166 
167 	if (mmap_is_legacy()) {
168 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
169 		mm->get_unmapped_area = arch_get_unmapped_area;
170 	} else {
171 		mm->mmap_base = mmap_base(random_factor);
172 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
173 	}
174 }
175 
176 static inline unsigned long brk_rnd(void)
177 {
178 	unsigned long rnd = get_random_long();
179 
180 	rnd = rnd << PAGE_SHIFT;
181 	/* 8MB for 32bit, 256MB for 64bit */
182 	if (TASK_IS_32BIT_ADDR)
183 		rnd = rnd & 0x7ffffful;
184 	else
185 		rnd = rnd & 0xffffffful;
186 
187 	return rnd;
188 }
189 
190 unsigned long arch_randomize_brk(struct mm_struct *mm)
191 {
192 	unsigned long base = mm->brk;
193 	unsigned long ret;
194 
195 	ret = PAGE_ALIGN(base + brk_rnd());
196 
197 	if (ret < mm->brk)
198 		return mm->brk;
199 
200 	return ret;
201 }
202 
203 int __virt_addr_valid(const volatile void *kaddr)
204 {
205 	return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
206 }
207 EXPORT_SYMBOL_GPL(__virt_addr_valid);
208