xref: /linux/mm/mprotect.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  *  mm/mprotect.c
3  *
4  *  (C) Copyright 1994 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  *
7  *  Address space accounting code	<alan@redhat.com>
8  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
16 #include <linux/fs.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <asm/uaccess.h>
25 #include <asm/pgtable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 
29 static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
30 		unsigned long addr, unsigned long end, pgprot_t newprot)
31 {
32 	pte_t *pte, oldpte;
33 	spinlock_t *ptl;
34 
35 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
36 	do {
37 		oldpte = *pte;
38 		if (pte_present(oldpte)) {
39 			pte_t ptent;
40 
41 			/* Avoid an SMP race with hardware updated dirty/clean
42 			 * bits by wiping the pte and then setting the new pte
43 			 * into place.
44 			 */
45 			ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
46 			set_pte_at(mm, addr, pte, ptent);
47 			lazy_mmu_prot_update(ptent);
48 #ifdef CONFIG_MIGRATION
49 		} else if (!pte_file(oldpte)) {
50 			swp_entry_t entry = pte_to_swp_entry(oldpte);
51 
52 			if (is_write_migration_entry(entry)) {
53 				/*
54 				 * A protection check is difficult so
55 				 * just be safe and disable write
56 				 */
57 				make_migration_entry_read(&entry);
58 				set_pte_at(mm, addr, pte,
59 					swp_entry_to_pte(entry));
60 			}
61 #endif
62 		}
63 
64 	} while (pte++, addr += PAGE_SIZE, addr != end);
65 	pte_unmap_unlock(pte - 1, ptl);
66 }
67 
68 static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
69 		unsigned long addr, unsigned long end, pgprot_t newprot)
70 {
71 	pmd_t *pmd;
72 	unsigned long next;
73 
74 	pmd = pmd_offset(pud, addr);
75 	do {
76 		next = pmd_addr_end(addr, end);
77 		if (pmd_none_or_clear_bad(pmd))
78 			continue;
79 		change_pte_range(mm, pmd, addr, next, newprot);
80 	} while (pmd++, addr = next, addr != end);
81 }
82 
83 static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
84 		unsigned long addr, unsigned long end, pgprot_t newprot)
85 {
86 	pud_t *pud;
87 	unsigned long next;
88 
89 	pud = pud_offset(pgd, addr);
90 	do {
91 		next = pud_addr_end(addr, end);
92 		if (pud_none_or_clear_bad(pud))
93 			continue;
94 		change_pmd_range(mm, pud, addr, next, newprot);
95 	} while (pud++, addr = next, addr != end);
96 }
97 
98 static void change_protection(struct vm_area_struct *vma,
99 		unsigned long addr, unsigned long end, pgprot_t newprot)
100 {
101 	struct mm_struct *mm = vma->vm_mm;
102 	pgd_t *pgd;
103 	unsigned long next;
104 	unsigned long start = addr;
105 
106 	BUG_ON(addr >= end);
107 	pgd = pgd_offset(mm, addr);
108 	flush_cache_range(vma, addr, end);
109 	do {
110 		next = pgd_addr_end(addr, end);
111 		if (pgd_none_or_clear_bad(pgd))
112 			continue;
113 		change_pud_range(mm, pgd, addr, next, newprot);
114 	} while (pgd++, addr = next, addr != end);
115 	flush_tlb_range(vma, start, end);
116 }
117 
118 static int
119 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
120 	unsigned long start, unsigned long end, unsigned long newflags)
121 {
122 	struct mm_struct *mm = vma->vm_mm;
123 	unsigned long oldflags = vma->vm_flags;
124 	long nrpages = (end - start) >> PAGE_SHIFT;
125 	unsigned long charged = 0;
126 	unsigned int mask;
127 	pgprot_t newprot;
128 	pgoff_t pgoff;
129 	int error;
130 
131 	if (newflags == oldflags) {
132 		*pprev = vma;
133 		return 0;
134 	}
135 
136 	/*
137 	 * If we make a private mapping writable we increase our commit;
138 	 * but (without finer accounting) cannot reduce our commit if we
139 	 * make it unwritable again.
140 	 *
141 	 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
142 	 * a MAP_NORESERVE private mapping to writable will now reserve.
143 	 */
144 	if (newflags & VM_WRITE) {
145 		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
146 			charged = nrpages;
147 			if (security_vm_enough_memory(charged))
148 				return -ENOMEM;
149 			newflags |= VM_ACCOUNT;
150 		}
151 	}
152 
153 	/*
154 	 * First try to merge with previous and/or next vma.
155 	 */
156 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
157 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
158 			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
159 	if (*pprev) {
160 		vma = *pprev;
161 		goto success;
162 	}
163 
164 	*pprev = vma;
165 
166 	if (start != vma->vm_start) {
167 		error = split_vma(mm, vma, start, 1);
168 		if (error)
169 			goto fail;
170 	}
171 
172 	if (end != vma->vm_end) {
173 		error = split_vma(mm, vma, end, 0);
174 		if (error)
175 			goto fail;
176 	}
177 
178 success:
179 	/* Don't make the VMA automatically writable if it's shared, but the
180 	 * backer wishes to know when pages are first written to */
181 	mask = VM_READ|VM_WRITE|VM_EXEC|VM_SHARED;
182 	if (vma->vm_ops && vma->vm_ops->page_mkwrite)
183 		mask &= ~VM_SHARED;
184 
185 	newprot = protection_map[newflags & mask];
186 
187 	/*
188 	 * vm_flags and vm_page_prot are protected by the mmap_sem
189 	 * held in write mode.
190 	 */
191 	vma->vm_flags = newflags;
192 	vma->vm_page_prot = newprot;
193 	if (is_vm_hugetlb_page(vma))
194 		hugetlb_change_protection(vma, start, end, newprot);
195 	else
196 		change_protection(vma, start, end, newprot);
197 	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
198 	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
199 	return 0;
200 
201 fail:
202 	vm_unacct_memory(charged);
203 	return error;
204 }
205 
206 asmlinkage long
207 sys_mprotect(unsigned long start, size_t len, unsigned long prot)
208 {
209 	unsigned long vm_flags, nstart, end, tmp, reqprot;
210 	struct vm_area_struct *vma, *prev;
211 	int error = -EINVAL;
212 	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
213 	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
214 	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
215 		return -EINVAL;
216 
217 	if (start & ~PAGE_MASK)
218 		return -EINVAL;
219 	if (!len)
220 		return 0;
221 	len = PAGE_ALIGN(len);
222 	end = start + len;
223 	if (end <= start)
224 		return -ENOMEM;
225 	if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
226 		return -EINVAL;
227 
228 	reqprot = prot;
229 	/*
230 	 * Does the application expect PROT_READ to imply PROT_EXEC:
231 	 */
232 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
233 		prot |= PROT_EXEC;
234 
235 	vm_flags = calc_vm_prot_bits(prot);
236 
237 	down_write(&current->mm->mmap_sem);
238 
239 	vma = find_vma_prev(current->mm, start, &prev);
240 	error = -ENOMEM;
241 	if (!vma)
242 		goto out;
243 	if (unlikely(grows & PROT_GROWSDOWN)) {
244 		if (vma->vm_start >= end)
245 			goto out;
246 		start = vma->vm_start;
247 		error = -EINVAL;
248 		if (!(vma->vm_flags & VM_GROWSDOWN))
249 			goto out;
250 	}
251 	else {
252 		if (vma->vm_start > start)
253 			goto out;
254 		if (unlikely(grows & PROT_GROWSUP)) {
255 			end = vma->vm_end;
256 			error = -EINVAL;
257 			if (!(vma->vm_flags & VM_GROWSUP))
258 				goto out;
259 		}
260 	}
261 	if (start > vma->vm_start)
262 		prev = vma;
263 
264 	for (nstart = start ; ; ) {
265 		unsigned long newflags;
266 
267 		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
268 
269 		newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
270 
271 		/* newflags >> 4 shift VM_MAY% in place of VM_% */
272 		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
273 			error = -EACCES;
274 			goto out;
275 		}
276 
277 		error = security_file_mprotect(vma, reqprot, prot);
278 		if (error)
279 			goto out;
280 
281 		tmp = vma->vm_end;
282 		if (tmp > end)
283 			tmp = end;
284 		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
285 		if (error)
286 			goto out;
287 		nstart = tmp;
288 
289 		if (nstart < prev->vm_end)
290 			nstart = prev->vm_end;
291 		if (nstart >= end)
292 			goto out;
293 
294 		vma = prev->vm_next;
295 		if (!vma || vma->vm_start != nstart) {
296 			error = -ENOMEM;
297 			goto out;
298 		}
299 	}
300 out:
301 	up_write(&current->mm->mmap_sem);
302 	return error;
303 }
304