xref: /linux/arch/s390/mm/mmap.c (revision c8b90d40d5bba8e6fba457b8a7c10d3c0d467e37)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  flexible mmap layout support
4  *
5  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6  * All Rights Reserved.
7  *
8  * Started by Ingo Molnar <mingo@elte.hu>
9  */
10 
11 #include <linux/elf-randomize.h>
12 #include <linux/personality.h>
13 #include <linux/mm.h>
14 #include <linux/mman.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/mm.h>
17 #include <linux/random.h>
18 #include <linux/compat.h>
19 #include <linux/security.h>
20 #include <linux/hugetlb.h>
21 #include <asm/elf.h>
22 
23 static unsigned long stack_maxrandom_size(void)
24 {
25 	if (!(current->flags & PF_RANDOMIZE))
26 		return 0;
27 	return STACK_RND_MASK << PAGE_SHIFT;
28 }
29 
30 static inline int mmap_is_legacy(struct rlimit *rlim_stack)
31 {
32 	if (current->personality & ADDR_COMPAT_LAYOUT)
33 		return 1;
34 	if (rlim_stack->rlim_cur == RLIM_INFINITY)
35 		return 1;
36 	return sysctl_legacy_va_layout;
37 }
38 
39 unsigned long arch_mmap_rnd(void)
40 {
41 	return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
42 }
43 
44 static unsigned long mmap_base_legacy(unsigned long rnd)
45 {
46 	return TASK_UNMAPPED_BASE + rnd;
47 }
48 
49 static inline unsigned long mmap_base(unsigned long rnd,
50 				      struct rlimit *rlim_stack)
51 {
52 	unsigned long gap = rlim_stack->rlim_cur;
53 	unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
54 	unsigned long gap_min, gap_max;
55 
56 	/* Values close to RLIM_INFINITY can overflow. */
57 	if (gap + pad > gap)
58 		gap += pad;
59 
60 	/*
61 	 * Top of mmap area (just below the process stack).
62 	 * Leave at least a ~128 MB hole.
63 	 */
64 	gap_min = SZ_128M;
65 	gap_max = (STACK_TOP / 6) * 5;
66 
67 	if (gap < gap_min)
68 		gap = gap_min;
69 	else if (gap > gap_max)
70 		gap = gap_max;
71 
72 	return PAGE_ALIGN(STACK_TOP - gap - rnd);
73 }
74 
75 static int get_align_mask(struct file *filp, unsigned long flags)
76 {
77 	if (filp && is_file_hugepages(filp))
78 		return huge_page_mask_align(filp);
79 	if (!(current->flags & PF_RANDOMIZE))
80 		return 0;
81 	if (filp || (flags & MAP_SHARED))
82 		return MMAP_ALIGN_MASK << PAGE_SHIFT;
83 	return 0;
84 }
85 
86 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
87 				     unsigned long len, unsigned long pgoff,
88 				     unsigned long flags, vm_flags_t vm_flags)
89 {
90 	struct mm_struct *mm = current->mm;
91 	struct vm_area_struct *vma;
92 	struct vm_unmapped_area_info info = {};
93 
94 	if (len > TASK_SIZE - mmap_min_addr)
95 		return -ENOMEM;
96 
97 	if (flags & MAP_FIXED)
98 		goto check_asce_limit;
99 
100 	if (addr) {
101 		addr = PAGE_ALIGN(addr);
102 		vma = find_vma(mm, addr);
103 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
104 		    (!vma || addr + len <= vm_start_gap(vma)))
105 			goto check_asce_limit;
106 	}
107 
108 	info.length = len;
109 	info.low_limit = mm->mmap_base;
110 	info.high_limit = TASK_SIZE;
111 	info.align_mask = get_align_mask(filp, flags);
112 	if (!(filp && is_file_hugepages(filp)))
113 		info.align_offset = pgoff << PAGE_SHIFT;
114 	addr = vm_unmapped_area(&info);
115 	if (offset_in_page(addr))
116 		return addr;
117 
118 check_asce_limit:
119 	return check_asce_limit(mm, addr, len);
120 }
121 
122 unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
123 					     unsigned long len, unsigned long pgoff,
124 					     unsigned long flags, vm_flags_t vm_flags)
125 {
126 	struct vm_area_struct *vma;
127 	struct mm_struct *mm = current->mm;
128 	struct vm_unmapped_area_info info = {};
129 
130 	/* requested length too big for entire address space */
131 	if (len > TASK_SIZE - mmap_min_addr)
132 		return -ENOMEM;
133 
134 	if (flags & MAP_FIXED)
135 		goto check_asce_limit;
136 
137 	/* requesting a specific address */
138 	if (addr) {
139 		addr = PAGE_ALIGN(addr);
140 		vma = find_vma(mm, addr);
141 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
142 				(!vma || addr + len <= vm_start_gap(vma)))
143 			goto check_asce_limit;
144 	}
145 
146 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
147 	info.length = len;
148 	info.low_limit = PAGE_SIZE;
149 	info.high_limit = mm->mmap_base;
150 	info.align_mask = get_align_mask(filp, flags);
151 	if (!(filp && is_file_hugepages(filp)))
152 		info.align_offset = pgoff << PAGE_SHIFT;
153 	addr = vm_unmapped_area(&info);
154 
155 	/*
156 	 * A failed mmap() very likely causes application failure,
157 	 * so fall back to the bottom-up function here. This scenario
158 	 * can happen with large stack limits and large mmap()
159 	 * allocations.
160 	 */
161 	if (offset_in_page(addr)) {
162 		VM_BUG_ON(addr != -ENOMEM);
163 		info.flags = 0;
164 		info.low_limit = TASK_UNMAPPED_BASE;
165 		info.high_limit = TASK_SIZE;
166 		addr = vm_unmapped_area(&info);
167 		if (offset_in_page(addr))
168 			return addr;
169 	}
170 
171 check_asce_limit:
172 	return check_asce_limit(mm, addr, len);
173 }
174 
175 /*
176  * This function, called very early during the creation of a new
177  * process VM image, sets up which VM layout function to use:
178  */
179 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
180 {
181 	unsigned long random_factor = 0UL;
182 
183 	if (current->flags & PF_RANDOMIZE)
184 		random_factor = arch_mmap_rnd();
185 
186 	/*
187 	 * Fall back to the standard layout if the personality
188 	 * bit is set, or if the expected stack growth is unlimited:
189 	 */
190 	if (mmap_is_legacy(rlim_stack)) {
191 		mm->mmap_base = mmap_base_legacy(random_factor);
192 		clear_bit(MMF_TOPDOWN, &mm->flags);
193 	} else {
194 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
195 		set_bit(MMF_TOPDOWN, &mm->flags);
196 	}
197 }
198 
199 static const pgprot_t protection_map[16] = {
200 	[VM_NONE]					= PAGE_NONE,
201 	[VM_READ]					= PAGE_RO,
202 	[VM_WRITE]					= PAGE_RO,
203 	[VM_WRITE | VM_READ]				= PAGE_RO,
204 	[VM_EXEC]					= PAGE_RX,
205 	[VM_EXEC | VM_READ]				= PAGE_RX,
206 	[VM_EXEC | VM_WRITE]				= PAGE_RX,
207 	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_RX,
208 	[VM_SHARED]					= PAGE_NONE,
209 	[VM_SHARED | VM_READ]				= PAGE_RO,
210 	[VM_SHARED | VM_WRITE]				= PAGE_RW,
211 	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_RW,
212 	[VM_SHARED | VM_EXEC]				= PAGE_RX,
213 	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_RX,
214 	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_RWX,
215 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_RWX
216 };
217 DECLARE_VM_GET_PAGE_PROT
218