xref: /linux/arch/x86/kernel/sys_x86_64.c (revision 37744feebc086908fd89760650f458ab19071750)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/compat.h>
3 #include <linux/errno.h>
4 #include <linux/sched.h>
5 #include <linux/sched/mm.h>
6 #include <linux/syscalls.h>
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/smp.h>
10 #include <linux/sem.h>
11 #include <linux/msg.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/mman.h>
15 #include <linux/file.h>
16 #include <linux/utsname.h>
17 #include <linux/personality.h>
18 #include <linux/random.h>
19 #include <linux/uaccess.h>
20 #include <linux/elf.h>
21 
22 #include <asm/elf.h>
23 #include <asm/ia32.h>
24 
25 /*
26  * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27  */
28 static unsigned long get_align_mask(void)
29 {
30 	/* handle 32- and 64-bit case with a single conditional */
31 	if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
32 		return 0;
33 
34 	if (!(current->flags & PF_RANDOMIZE))
35 		return 0;
36 
37 	return va_align.mask;
38 }
39 
40 /*
41  * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
42  * va_align.bits, [12:upper_bit), are set to a random value instead of
43  * zeroing them. This random value is computed once per boot. This form
44  * of ASLR is known as "per-boot ASLR".
45  *
46  * To achieve this, the random value is added to the info.align_offset
47  * value before calling vm_unmapped_area() or ORed directly to the
48  * address.
49  */
50 static unsigned long get_align_bits(void)
51 {
52 	return va_align.bits & get_align_mask();
53 }
54 
55 unsigned long align_vdso_addr(unsigned long addr)
56 {
57 	unsigned long align_mask = get_align_mask();
58 	addr = (addr + align_mask) & ~align_mask;
59 	return addr | get_align_bits();
60 }
61 
62 static int __init control_va_addr_alignment(char *str)
63 {
64 	/* guard against enabling this on other CPU families */
65 	if (va_align.flags < 0)
66 		return 1;
67 
68 	if (*str == 0)
69 		return 1;
70 
71 	if (*str == '=')
72 		str++;
73 
74 	if (!strcmp(str, "32"))
75 		va_align.flags = ALIGN_VA_32;
76 	else if (!strcmp(str, "64"))
77 		va_align.flags = ALIGN_VA_64;
78 	else if (!strcmp(str, "off"))
79 		va_align.flags = 0;
80 	else if (!strcmp(str, "on"))
81 		va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
82 	else
83 		return 0;
84 
85 	return 1;
86 }
87 __setup("align_va_addr", control_va_addr_alignment);
88 
89 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
90 		unsigned long, prot, unsigned long, flags,
91 		unsigned long, fd, unsigned long, off)
92 {
93 	long error;
94 	error = -EINVAL;
95 	if (off & ~PAGE_MASK)
96 		goto out;
97 
98 	error = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
99 out:
100 	return error;
101 }
102 
103 static void find_start_end(unsigned long addr, unsigned long flags,
104 		unsigned long *begin, unsigned long *end)
105 {
106 	if (!in_32bit_syscall() && (flags & MAP_32BIT)) {
107 		/* This is usually used needed to map code in small
108 		   model, so it needs to be in the first 31bit. Limit
109 		   it to that.  This means we need to move the
110 		   unmapped base down for this case. This can give
111 		   conflicts with the heap, but we assume that glibc
112 		   malloc knows how to fall back to mmap. Give it 1GB
113 		   of playground for now. -AK */
114 		*begin = 0x40000000;
115 		*end = 0x80000000;
116 		if (current->flags & PF_RANDOMIZE) {
117 			*begin = randomize_page(*begin, 0x02000000);
118 		}
119 		return;
120 	}
121 
122 	*begin	= get_mmap_base(1);
123 	if (in_32bit_syscall())
124 		*end = task_size_32bit();
125 	else
126 		*end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
127 }
128 
129 unsigned long
130 arch_get_unmapped_area(struct file *filp, unsigned long addr,
131 		unsigned long len, unsigned long pgoff, unsigned long flags)
132 {
133 	struct mm_struct *mm = current->mm;
134 	struct vm_area_struct *vma;
135 	struct vm_unmapped_area_info info;
136 	unsigned long begin, end;
137 
138 	if (flags & MAP_FIXED)
139 		return addr;
140 
141 	find_start_end(addr, flags, &begin, &end);
142 
143 	if (len > end)
144 		return -ENOMEM;
145 
146 	if (addr) {
147 		addr = PAGE_ALIGN(addr);
148 		vma = find_vma(mm, addr);
149 		if (end - len >= addr &&
150 		    (!vma || addr + len <= vm_start_gap(vma)))
151 			return addr;
152 	}
153 
154 	info.flags = 0;
155 	info.length = len;
156 	info.low_limit = begin;
157 	info.high_limit = end;
158 	info.align_mask = 0;
159 	info.align_offset = pgoff << PAGE_SHIFT;
160 	if (filp) {
161 		info.align_mask = get_align_mask();
162 		info.align_offset += get_align_bits();
163 	}
164 	return vm_unmapped_area(&info);
165 }
166 
167 unsigned long
168 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
169 			  const unsigned long len, const unsigned long pgoff,
170 			  const unsigned long flags)
171 {
172 	struct vm_area_struct *vma;
173 	struct mm_struct *mm = current->mm;
174 	unsigned long addr = addr0;
175 	struct vm_unmapped_area_info info;
176 
177 	/* requested length too big for entire address space */
178 	if (len > TASK_SIZE)
179 		return -ENOMEM;
180 
181 	/* No address checking. See comment at mmap_address_hint_valid() */
182 	if (flags & MAP_FIXED)
183 		return addr;
184 
185 	/* for MAP_32BIT mappings we force the legacy mmap base */
186 	if (!in_32bit_syscall() && (flags & MAP_32BIT))
187 		goto bottomup;
188 
189 	/* requesting a specific address */
190 	if (addr) {
191 		addr &= PAGE_MASK;
192 		if (!mmap_address_hint_valid(addr, len))
193 			goto get_unmapped_area;
194 
195 		vma = find_vma(mm, addr);
196 		if (!vma || addr + len <= vm_start_gap(vma))
197 			return addr;
198 	}
199 get_unmapped_area:
200 
201 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
202 	info.length = len;
203 	info.low_limit = PAGE_SIZE;
204 	info.high_limit = get_mmap_base(0);
205 
206 	/*
207 	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
208 	 * in the full address space.
209 	 *
210 	 * !in_32bit_syscall() check to avoid high addresses for x32
211 	 * (and make it no op on native i386).
212 	 */
213 	if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
214 		info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
215 
216 	info.align_mask = 0;
217 	info.align_offset = pgoff << PAGE_SHIFT;
218 	if (filp) {
219 		info.align_mask = get_align_mask();
220 		info.align_offset += get_align_bits();
221 	}
222 	addr = vm_unmapped_area(&info);
223 	if (!(addr & ~PAGE_MASK))
224 		return addr;
225 	VM_BUG_ON(addr != -ENOMEM);
226 
227 bottomup:
228 	/*
229 	 * A failed mmap() very likely causes application failure,
230 	 * so fall back to the bottom-up function here. This scenario
231 	 * can happen with large stack limits and large mmap()
232 	 * allocations.
233 	 */
234 	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
235 }
236