xref: /linux/mm/mprotect.c (revision 91325f31afc1026de28665cf1a7b6e157fa4d39d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  mm/mprotect.c
4  *
5  *  (C) Copyright 1994 Linus Torvalds
6  *  (C) Copyright 2002 Christoph Hellwig
7  *
8  *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
9  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
10  */
11 
12 #include <linux/pagewalk.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
16 #include <linux/fs.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/migrate.h>
26 #include <linux/perf_event.h>
27 #include <linux/pkeys.h>
28 #include <linux/ksm.h>
29 #include <linux/uaccess.h>
30 #include <linux/mm_inline.h>
31 #include <linux/pgtable.h>
32 #include <linux/sched/sysctl.h>
33 #include <linux/userfaultfd_k.h>
34 #include <linux/memory-tiers.h>
35 #include <uapi/linux/mman.h>
36 #include <asm/cacheflush.h>
37 #include <asm/mmu_context.h>
38 #include <asm/tlbflush.h>
39 #include <asm/tlb.h>
40 
41 #include "internal.h"
42 
maybe_change_pte_writable(struct vm_area_struct * vma,pte_t pte)43 static bool maybe_change_pte_writable(struct vm_area_struct *vma, pte_t pte)
44 {
45 	if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
46 		return false;
47 
48 	/* Don't touch entries that are not even readable. */
49 	if (pte_protnone(pte))
50 		return false;
51 
52 	/* Do we need write faults for softdirty tracking? */
53 	if (pte_needs_soft_dirty_wp(vma, pte))
54 		return false;
55 
56 	/* Do we need write faults for uffd-wp tracking? */
57 	if (userfaultfd_pte_wp(vma, pte))
58 		return false;
59 
60 	return true;
61 }
62 
can_change_private_pte_writable(struct vm_area_struct * vma,unsigned long addr,pte_t pte)63 static bool can_change_private_pte_writable(struct vm_area_struct *vma,
64 					    unsigned long addr, pte_t pte)
65 {
66 	struct page *page;
67 
68 	if (!maybe_change_pte_writable(vma, pte))
69 		return false;
70 
71 	/*
72 	 * Writable MAP_PRIVATE mapping: We can only special-case on
73 	 * exclusive anonymous pages, because we know that our
74 	 * write-fault handler similarly would map them writable without
75 	 * any additional checks while holding the PT lock.
76 	 */
77 	page = vm_normal_page(vma, addr, pte);
78 	return page && PageAnon(page) && PageAnonExclusive(page);
79 }
80 
can_change_shared_pte_writable(struct vm_area_struct * vma,pte_t pte)81 static bool can_change_shared_pte_writable(struct vm_area_struct *vma,
82 					   pte_t pte)
83 {
84 	if (!maybe_change_pte_writable(vma, pte))
85 		return false;
86 
87 	VM_WARN_ON_ONCE(is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte));
88 
89 	/*
90 	 * Writable MAP_SHARED mapping: "clean" might indicate that the FS still
91 	 * needs a real write-fault for writenotify
92 	 * (see vma_wants_writenotify()). If "dirty", the assumption is that the
93 	 * FS was already notified and we can simply mark the PTE writable
94 	 * just like the write-fault handler would do.
95 	 */
96 	return pte_dirty(pte);
97 }
98 
can_change_pte_writable(struct vm_area_struct * vma,unsigned long addr,pte_t pte)99 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
100 			     pte_t pte)
101 {
102 	if (!(vma->vm_flags & VM_SHARED))
103 		return can_change_private_pte_writable(vma, addr, pte);
104 
105 	return can_change_shared_pte_writable(vma, pte);
106 }
107 
mprotect_folio_pte_batch(struct folio * folio,pte_t * ptep,pte_t pte,int max_nr_ptes,fpb_t flags)108 static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
109 				    pte_t pte, int max_nr_ptes, fpb_t flags)
110 {
111 	/* No underlying folio, so cannot batch */
112 	if (!folio)
113 		return 1;
114 
115 	if (!folio_test_large(folio))
116 		return 1;
117 
118 	return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags);
119 }
120 
prot_numa_skip(struct vm_area_struct * vma,unsigned long addr,pte_t oldpte,pte_t * pte,int target_node,struct folio * folio)121 static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
122 			   pte_t oldpte, pte_t *pte, int target_node,
123 			   struct folio *folio)
124 {
125 	bool ret = true;
126 	bool toptier;
127 	int nid;
128 
129 	/* Avoid TLB flush if possible */
130 	if (pte_protnone(oldpte))
131 		goto skip;
132 
133 	if (!folio)
134 		goto skip;
135 
136 	if (folio_is_zone_device(folio) || folio_test_ksm(folio))
137 		goto skip;
138 
139 	/* Also skip shared copy-on-write pages */
140 	if (is_cow_mapping(vma->vm_flags) &&
141 	    (folio_maybe_dma_pinned(folio) || folio_maybe_mapped_shared(folio)))
142 		goto skip;
143 
144 	/*
145 	 * While migration can move some dirty pages,
146 	 * it cannot move them all from MIGRATE_ASYNC
147 	 * context.
148 	 */
149 	if (folio_is_file_lru(folio) && folio_test_dirty(folio))
150 		goto skip;
151 
152 	/*
153 	 * Don't mess with PTEs if page is already on the node
154 	 * a single-threaded process is running on.
155 	 */
156 	nid = folio_nid(folio);
157 	if (target_node == nid)
158 		goto skip;
159 
160 	toptier = node_is_toptier(nid);
161 
162 	/*
163 	 * Skip scanning top tier node if normal numa
164 	 * balancing is disabled
165 	 */
166 	if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && toptier)
167 		goto skip;
168 
169 	ret = false;
170 	if (folio_use_access_time(folio))
171 		folio_xchg_access_time(folio, jiffies_to_msecs(jiffies));
172 
173 skip:
174 	return ret;
175 }
176 
177 /* Set nr_ptes number of ptes, starting from idx */
prot_commit_flush_ptes(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t oldpte,pte_t ptent,int nr_ptes,int idx,bool set_write,struct mmu_gather * tlb)178 static void prot_commit_flush_ptes(struct vm_area_struct *vma, unsigned long addr,
179 		pte_t *ptep, pte_t oldpte, pte_t ptent, int nr_ptes,
180 		int idx, bool set_write, struct mmu_gather *tlb)
181 {
182 	/*
183 	 * Advance the position in the batch by idx; note that if idx > 0,
184 	 * then the nr_ptes passed here is <= batch size - idx.
185 	 */
186 	addr += idx * PAGE_SIZE;
187 	ptep += idx;
188 	oldpte = pte_advance_pfn(oldpte, idx);
189 	ptent = pte_advance_pfn(ptent, idx);
190 
191 	if (set_write)
192 		ptent = pte_mkwrite(ptent, vma);
193 
194 	modify_prot_commit_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes);
195 	if (pte_needs_flush(oldpte, ptent))
196 		tlb_flush_pte_range(tlb, addr, nr_ptes * PAGE_SIZE);
197 }
198 
199 /*
200  * Get max length of consecutive ptes pointing to PageAnonExclusive() pages or
201  * !PageAnonExclusive() pages, starting from start_idx. Caller must enforce
202  * that the ptes point to consecutive pages of the same anon large folio.
203  */
page_anon_exclusive_sub_batch(int start_idx,int max_len,struct page * first_page,bool expected_anon_exclusive)204 static int page_anon_exclusive_sub_batch(int start_idx, int max_len,
205 		struct page *first_page, bool expected_anon_exclusive)
206 {
207 	int idx;
208 
209 	for (idx = start_idx + 1; idx < start_idx + max_len; ++idx) {
210 		if (expected_anon_exclusive != PageAnonExclusive(first_page + idx))
211 			break;
212 	}
213 	return idx - start_idx;
214 }
215 
216 /*
217  * This function is a result of trying our very best to retain the
218  * "avoid the write-fault handler" optimization. In can_change_pte_writable(),
219  * if the vma is a private vma, and we cannot determine whether to change
220  * the pte to writable just from the vma and the pte, we then need to look
221  * at the actual page pointed to by the pte. Unfortunately, if we have a
222  * batch of ptes pointing to consecutive pages of the same anon large folio,
223  * the anon-exclusivity (or the negation) of the first page does not guarantee
224  * the anon-exclusivity (or the negation) of the other pages corresponding to
225  * the pte batch; hence in this case it is incorrect to decide to change or
226  * not change the ptes to writable just by using information from the first
227  * pte of the batch. Therefore, we must individually check all pages and
228  * retrieve sub-batches.
229  */
commit_anon_folio_batch(struct vm_area_struct * vma,struct folio * folio,struct page * first_page,unsigned long addr,pte_t * ptep,pte_t oldpte,pte_t ptent,int nr_ptes,struct mmu_gather * tlb)230 static void commit_anon_folio_batch(struct vm_area_struct *vma,
231 		struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep,
232 		pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
233 {
234 	bool expected_anon_exclusive;
235 	int sub_batch_idx = 0;
236 	int len;
237 
238 	while (nr_ptes) {
239 		expected_anon_exclusive = PageAnonExclusive(first_page + sub_batch_idx);
240 		len = page_anon_exclusive_sub_batch(sub_batch_idx, nr_ptes,
241 					first_page, expected_anon_exclusive);
242 		prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, len,
243 				       sub_batch_idx, expected_anon_exclusive, tlb);
244 		sub_batch_idx += len;
245 		nr_ptes -= len;
246 	}
247 }
248 
set_write_prot_commit_flush_ptes(struct vm_area_struct * vma,struct folio * folio,struct page * page,unsigned long addr,pte_t * ptep,pte_t oldpte,pte_t ptent,int nr_ptes,struct mmu_gather * tlb)249 static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
250 		struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep,
251 		pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
252 {
253 	bool set_write;
254 
255 	if (vma->vm_flags & VM_SHARED) {
256 		set_write = can_change_shared_pte_writable(vma, ptent);
257 		prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes,
258 				       /* idx = */ 0, set_write, tlb);
259 		return;
260 	}
261 
262 	set_write = maybe_change_pte_writable(vma, ptent) &&
263 		    (folio && folio_test_anon(folio));
264 	if (!set_write) {
265 		prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes,
266 				       /* idx = */ 0, set_write, tlb);
267 		return;
268 	}
269 	commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb);
270 }
271 
change_pte_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,pgprot_t newprot,unsigned long cp_flags)272 static long change_pte_range(struct mmu_gather *tlb,
273 		struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
274 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
275 {
276 	pte_t *pte, oldpte;
277 	spinlock_t *ptl;
278 	long pages = 0;
279 	int target_node = NUMA_NO_NODE;
280 	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
281 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
282 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
283 	int nr_ptes;
284 
285 	tlb_change_page_size(tlb, PAGE_SIZE);
286 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
287 	if (!pte)
288 		return -EAGAIN;
289 
290 	/* Get target node for single threaded private VMAs */
291 	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
292 	    atomic_read(&vma->vm_mm->mm_users) == 1)
293 		target_node = numa_node_id();
294 
295 	flush_tlb_batched_pending(vma->vm_mm);
296 	arch_enter_lazy_mmu_mode();
297 	do {
298 		nr_ptes = 1;
299 		oldpte = ptep_get(pte);
300 		if (pte_present(oldpte)) {
301 			const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE;
302 			int max_nr_ptes = (end - addr) >> PAGE_SHIFT;
303 			struct folio *folio = NULL;
304 			struct page *page;
305 			pte_t ptent;
306 
307 			page = vm_normal_page(vma, addr, oldpte);
308 			if (page)
309 				folio = page_folio(page);
310 			/*
311 			 * Avoid trapping faults against the zero or KSM
312 			 * pages. See similar comment in change_huge_pmd.
313 			 */
314 			if (prot_numa) {
315 				int ret = prot_numa_skip(vma, addr, oldpte, pte,
316 							 target_node, folio);
317 				if (ret) {
318 
319 					/* determine batch to skip */
320 					nr_ptes = mprotect_folio_pte_batch(folio,
321 						  pte, oldpte, max_nr_ptes, /* flags = */ 0);
322 					continue;
323 				}
324 			}
325 
326 			nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);
327 
328 			oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes);
329 			ptent = pte_modify(oldpte, newprot);
330 
331 			if (uffd_wp)
332 				ptent = pte_mkuffd_wp(ptent);
333 			else if (uffd_wp_resolve)
334 				ptent = pte_clear_uffd_wp(ptent);
335 
336 			/*
337 			 * In some writable, shared mappings, we might want
338 			 * to catch actual write access -- see
339 			 * vma_wants_writenotify().
340 			 *
341 			 * In all writable, private mappings, we have to
342 			 * properly handle COW.
343 			 *
344 			 * In both cases, we can sometimes still change PTEs
345 			 * writable and avoid the write-fault handler, for
346 			 * example, if a PTE is already dirty and no other
347 			 * COW or special handling is required.
348 			 */
349 			if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
350 			     !pte_write(ptent))
351 				set_write_prot_commit_flush_ptes(vma, folio, page,
352 				addr, pte, oldpte, ptent, nr_ptes, tlb);
353 			else
354 				prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent,
355 					nr_ptes, /* idx = */ 0, /* set_write = */ false, tlb);
356 			pages += nr_ptes;
357 		} else if (is_swap_pte(oldpte)) {
358 			swp_entry_t entry = pte_to_swp_entry(oldpte);
359 			pte_t newpte;
360 
361 			if (is_writable_migration_entry(entry)) {
362 				struct folio *folio = pfn_swap_entry_folio(entry);
363 
364 				/*
365 				 * A protection check is difficult so
366 				 * just be safe and disable write
367 				 */
368 				if (folio_test_anon(folio))
369 					entry = make_readable_exclusive_migration_entry(
370 							     swp_offset(entry));
371 				else
372 					entry = make_readable_migration_entry(swp_offset(entry));
373 				newpte = swp_entry_to_pte(entry);
374 				if (pte_swp_soft_dirty(oldpte))
375 					newpte = pte_swp_mksoft_dirty(newpte);
376 			} else if (is_writable_device_private_entry(entry)) {
377 				/*
378 				 * We do not preserve soft-dirtiness. See
379 				 * copy_nonpresent_pte() for explanation.
380 				 */
381 				entry = make_readable_device_private_entry(
382 							swp_offset(entry));
383 				newpte = swp_entry_to_pte(entry);
384 				if (pte_swp_uffd_wp(oldpte))
385 					newpte = pte_swp_mkuffd_wp(newpte);
386 			} else if (is_pte_marker_entry(entry)) {
387 				/*
388 				 * Ignore error swap entries unconditionally,
389 				 * because any access should sigbus/sigsegv
390 				 * anyway.
391 				 */
392 				if (is_poisoned_swp_entry(entry) ||
393 				    is_guard_swp_entry(entry))
394 					continue;
395 				/*
396 				 * If this is uffd-wp pte marker and we'd like
397 				 * to unprotect it, drop it; the next page
398 				 * fault will trigger without uffd trapping.
399 				 */
400 				if (uffd_wp_resolve) {
401 					pte_clear(vma->vm_mm, addr, pte);
402 					pages++;
403 				}
404 				continue;
405 			} else {
406 				newpte = oldpte;
407 			}
408 
409 			if (uffd_wp)
410 				newpte = pte_swp_mkuffd_wp(newpte);
411 			else if (uffd_wp_resolve)
412 				newpte = pte_swp_clear_uffd_wp(newpte);
413 
414 			if (!pte_same(oldpte, newpte)) {
415 				set_pte_at(vma->vm_mm, addr, pte, newpte);
416 				pages++;
417 			}
418 		} else {
419 			/* It must be an none page, or what else?.. */
420 			WARN_ON_ONCE(!pte_none(oldpte));
421 
422 			/*
423 			 * Nobody plays with any none ptes besides
424 			 * userfaultfd when applying the protections.
425 			 */
426 			if (likely(!uffd_wp))
427 				continue;
428 
429 			if (userfaultfd_wp_use_markers(vma)) {
430 				/*
431 				 * For file-backed mem, we need to be able to
432 				 * wr-protect a none pte, because even if the
433 				 * pte is none, the page/swap cache could
434 				 * exist.  Doing that by install a marker.
435 				 */
436 				set_pte_at(vma->vm_mm, addr, pte,
437 					   make_pte_marker(PTE_MARKER_UFFD_WP));
438 				pages++;
439 			}
440 		}
441 	} while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end);
442 	arch_leave_lazy_mmu_mode();
443 	pte_unmap_unlock(pte - 1, ptl);
444 
445 	return pages;
446 }
447 
448 /*
449  * Return true if we want to split THPs into PTE mappings in change
450  * protection procedure, false otherwise.
451  */
452 static inline bool
pgtable_split_needed(struct vm_area_struct * vma,unsigned long cp_flags)453 pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags)
454 {
455 	/*
456 	 * pte markers only resides in pte level, if we need pte markers,
457 	 * we need to split.  For example, we cannot wr-protect a file thp
458 	 * (e.g. 2M shmem) because file thp is handled differently when
459 	 * split by erasing the pmd so far.
460 	 */
461 	return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
462 }
463 
464 /*
465  * Return true if we want to populate pgtables in change protection
466  * procedure, false otherwise
467  */
468 static inline bool
pgtable_populate_needed(struct vm_area_struct * vma,unsigned long cp_flags)469 pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags)
470 {
471 	/* If not within ioctl(UFFDIO_WRITEPROTECT), then don't bother */
472 	if (!(cp_flags & MM_CP_UFFD_WP))
473 		return false;
474 
475 	/* Populate if the userfaultfd mode requires pte markers */
476 	return userfaultfd_wp_use_markers(vma);
477 }
478 
479 /*
480  * Populate the pgtable underneath for whatever reason if requested.
481  * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable
482  * allocation failures during page faults by kicking OOM and returning
483  * error.
484  */
485 #define  change_pmd_prepare(vma, pmd, cp_flags)				\
486 	({								\
487 		long err = 0;						\
488 		if (unlikely(pgtable_populate_needed(vma, cp_flags))) {	\
489 			if (pte_alloc(vma->vm_mm, pmd))			\
490 				err = -ENOMEM;				\
491 		}							\
492 		err;							\
493 	})
494 
495 /*
496  * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to
497  * have separate change_pmd_prepare() because pte_alloc() returns 0 on success,
498  * while {pmd|pud|p4d}_alloc() returns the valid pointer on success.
499  */
500 #define  change_prepare(vma, high, low, addr, cp_flags)			\
501 	  ({								\
502 		long err = 0;						\
503 		if (unlikely(pgtable_populate_needed(vma, cp_flags))) {	\
504 			low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
505 			if (p == NULL)					\
506 				err = -ENOMEM;				\
507 		}							\
508 		err;							\
509 	})
510 
change_pmd_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,pgprot_t newprot,unsigned long cp_flags)511 static inline long change_pmd_range(struct mmu_gather *tlb,
512 		struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
513 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
514 {
515 	pmd_t *pmd;
516 	unsigned long next;
517 	long pages = 0;
518 	unsigned long nr_huge_updates = 0;
519 
520 	pmd = pmd_offset(pud, addr);
521 	do {
522 		long ret;
523 		pmd_t _pmd;
524 again:
525 		next = pmd_addr_end(addr, end);
526 
527 		ret = change_pmd_prepare(vma, pmd, cp_flags);
528 		if (ret) {
529 			pages = ret;
530 			break;
531 		}
532 
533 		if (pmd_none(*pmd))
534 			goto next;
535 
536 		_pmd = pmdp_get_lockless(pmd);
537 		if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd)) {
538 			if ((next - addr != HPAGE_PMD_SIZE) ||
539 			    pgtable_split_needed(vma, cp_flags)) {
540 				__split_huge_pmd(vma, pmd, addr, false);
541 				/*
542 				 * For file-backed, the pmd could have been
543 				 * cleared; make sure pmd populated if
544 				 * necessary, then fall-through to pte level.
545 				 */
546 				ret = change_pmd_prepare(vma, pmd, cp_flags);
547 				if (ret) {
548 					pages = ret;
549 					break;
550 				}
551 			} else {
552 				ret = change_huge_pmd(tlb, vma, pmd,
553 						addr, newprot, cp_flags);
554 				if (ret) {
555 					if (ret == HPAGE_PMD_NR) {
556 						pages += HPAGE_PMD_NR;
557 						nr_huge_updates++;
558 					}
559 
560 					/* huge pmd was handled */
561 					goto next;
562 				}
563 			}
564 			/* fall through, the trans huge pmd just split */
565 		}
566 
567 		ret = change_pte_range(tlb, vma, pmd, addr, next, newprot,
568 				       cp_flags);
569 		if (ret < 0)
570 			goto again;
571 		pages += ret;
572 next:
573 		cond_resched();
574 	} while (pmd++, addr = next, addr != end);
575 
576 	if (nr_huge_updates)
577 		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
578 	return pages;
579 }
580 
change_pud_range(struct mmu_gather * tlb,struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,pgprot_t newprot,unsigned long cp_flags)581 static inline long change_pud_range(struct mmu_gather *tlb,
582 		struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
583 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
584 {
585 	struct mmu_notifier_range range;
586 	pud_t *pudp, pud;
587 	unsigned long next;
588 	long pages = 0, ret;
589 
590 	range.start = 0;
591 
592 	pudp = pud_offset(p4d, addr);
593 	do {
594 again:
595 		next = pud_addr_end(addr, end);
596 		ret = change_prepare(vma, pudp, pmd, addr, cp_flags);
597 		if (ret) {
598 			pages = ret;
599 			break;
600 		}
601 
602 		pud = READ_ONCE(*pudp);
603 		if (pud_none(pud))
604 			continue;
605 
606 		if (!range.start) {
607 			mmu_notifier_range_init(&range,
608 						MMU_NOTIFY_PROTECTION_VMA, 0,
609 						vma->vm_mm, addr, end);
610 			mmu_notifier_invalidate_range_start(&range);
611 		}
612 
613 		if (pud_leaf(pud)) {
614 			if ((next - addr != PUD_SIZE) ||
615 			    pgtable_split_needed(vma, cp_flags)) {
616 				__split_huge_pud(vma, pudp, addr);
617 				goto again;
618 			} else {
619 				ret = change_huge_pud(tlb, vma, pudp,
620 						      addr, newprot, cp_flags);
621 				if (ret == 0)
622 					goto again;
623 				/* huge pud was handled */
624 				if (ret == HPAGE_PUD_NR)
625 					pages += HPAGE_PUD_NR;
626 				continue;
627 			}
628 		}
629 
630 		pages += change_pmd_range(tlb, vma, pudp, addr, next, newprot,
631 					  cp_flags);
632 	} while (pudp++, addr = next, addr != end);
633 
634 	if (range.start)
635 		mmu_notifier_invalidate_range_end(&range);
636 
637 	return pages;
638 }
639 
change_p4d_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,pgprot_t newprot,unsigned long cp_flags)640 static inline long change_p4d_range(struct mmu_gather *tlb,
641 		struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
642 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
643 {
644 	p4d_t *p4d;
645 	unsigned long next;
646 	long pages = 0, ret;
647 
648 	p4d = p4d_offset(pgd, addr);
649 	do {
650 		next = p4d_addr_end(addr, end);
651 		ret = change_prepare(vma, p4d, pud, addr, cp_flags);
652 		if (ret)
653 			return ret;
654 		if (p4d_none_or_clear_bad(p4d))
655 			continue;
656 		pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
657 					  cp_flags);
658 	} while (p4d++, addr = next, addr != end);
659 
660 	return pages;
661 }
662 
change_protection_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end,pgprot_t newprot,unsigned long cp_flags)663 static long change_protection_range(struct mmu_gather *tlb,
664 		struct vm_area_struct *vma, unsigned long addr,
665 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
666 {
667 	struct mm_struct *mm = vma->vm_mm;
668 	pgd_t *pgd;
669 	unsigned long next;
670 	long pages = 0, ret;
671 
672 	BUG_ON(addr >= end);
673 	pgd = pgd_offset(mm, addr);
674 	tlb_start_vma(tlb, vma);
675 	do {
676 		next = pgd_addr_end(addr, end);
677 		ret = change_prepare(vma, pgd, p4d, addr, cp_flags);
678 		if (ret) {
679 			pages = ret;
680 			break;
681 		}
682 		if (pgd_none_or_clear_bad(pgd))
683 			continue;
684 		pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
685 					  cp_flags);
686 	} while (pgd++, addr = next, addr != end);
687 
688 	tlb_end_vma(tlb, vma);
689 
690 	return pages;
691 }
692 
change_protection(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long cp_flags)693 long change_protection(struct mmu_gather *tlb,
694 		       struct vm_area_struct *vma, unsigned long start,
695 		       unsigned long end, unsigned long cp_flags)
696 {
697 	pgprot_t newprot = vma->vm_page_prot;
698 	long pages;
699 
700 	BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
701 
702 #ifdef CONFIG_NUMA_BALANCING
703 	/*
704 	 * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking)
705 	 * are expected to reflect their requirements via VMA flags such that
706 	 * vma_set_page_prot() will adjust vma->vm_page_prot accordingly.
707 	 */
708 	if (cp_flags & MM_CP_PROT_NUMA)
709 		newprot = PAGE_NONE;
710 #else
711 	WARN_ON_ONCE(cp_flags & MM_CP_PROT_NUMA);
712 #endif
713 
714 	if (is_vm_hugetlb_page(vma))
715 		pages = hugetlb_change_protection(vma, start, end, newprot,
716 						  cp_flags);
717 	else
718 		pages = change_protection_range(tlb, vma, start, end, newprot,
719 						cp_flags);
720 
721 	return pages;
722 }
723 
prot_none_pte_entry(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)724 static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
725 			       unsigned long next, struct mm_walk *walk)
726 {
727 	return pfn_modify_allowed(pte_pfn(ptep_get(pte)),
728 				  *(pgprot_t *)(walk->private)) ?
729 		0 : -EACCES;
730 }
731 
prot_none_hugetlb_entry(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long next,struct mm_walk * walk)732 static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
733 				   unsigned long addr, unsigned long next,
734 				   struct mm_walk *walk)
735 {
736 	return pfn_modify_allowed(pte_pfn(ptep_get(pte)),
737 				  *(pgprot_t *)(walk->private)) ?
738 		0 : -EACCES;
739 }
740 
prot_none_test(unsigned long addr,unsigned long next,struct mm_walk * walk)741 static int prot_none_test(unsigned long addr, unsigned long next,
742 			  struct mm_walk *walk)
743 {
744 	return 0;
745 }
746 
747 static const struct mm_walk_ops prot_none_walk_ops = {
748 	.pte_entry		= prot_none_pte_entry,
749 	.hugetlb_entry		= prot_none_hugetlb_entry,
750 	.test_walk		= prot_none_test,
751 	.walk_lock		= PGWALK_WRLOCK,
752 };
753 
754 int
mprotect_fixup(struct vma_iterator * vmi,struct mmu_gather * tlb,struct vm_area_struct * vma,struct vm_area_struct ** pprev,unsigned long start,unsigned long end,vm_flags_t newflags)755 mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
756 	       struct vm_area_struct *vma, struct vm_area_struct **pprev,
757 	       unsigned long start, unsigned long end, vm_flags_t newflags)
758 {
759 	struct mm_struct *mm = vma->vm_mm;
760 	vm_flags_t oldflags = READ_ONCE(vma->vm_flags);
761 	long nrpages = (end - start) >> PAGE_SHIFT;
762 	unsigned int mm_cp_flags = 0;
763 	unsigned long charged = 0;
764 	int error;
765 
766 	if (vma_is_sealed(vma))
767 		return -EPERM;
768 
769 	if (newflags == oldflags) {
770 		*pprev = vma;
771 		return 0;
772 	}
773 
774 	/*
775 	 * Do PROT_NONE PFN permission checks here when we can still
776 	 * bail out without undoing a lot of state. This is a rather
777 	 * uncommon case, so doesn't need to be very optimized.
778 	 */
779 	if (arch_has_pfn_modify_check() &&
780 	    (oldflags & (VM_PFNMAP|VM_MIXEDMAP)) &&
781 	    (newflags & VM_ACCESS_FLAGS) == 0) {
782 		pgprot_t new_pgprot = vm_get_page_prot(newflags);
783 
784 		error = walk_page_range(current->mm, start, end,
785 				&prot_none_walk_ops, &new_pgprot);
786 		if (error)
787 			return error;
788 	}
789 
790 	/*
791 	 * If we make a private mapping writable we increase our commit;
792 	 * but (without finer accounting) cannot reduce our commit if we
793 	 * make it unwritable again except in the anonymous case where no
794 	 * anon_vma has yet to be assigned.
795 	 *
796 	 * hugetlb mapping were accounted for even if read-only so there is
797 	 * no need to account for them here.
798 	 */
799 	if (newflags & VM_WRITE) {
800 		/* Check space limits when area turns into data. */
801 		if (!may_expand_vm(mm, newflags, nrpages) &&
802 				may_expand_vm(mm, oldflags, nrpages))
803 			return -ENOMEM;
804 		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
805 						VM_SHARED|VM_NORESERVE))) {
806 			charged = nrpages;
807 			if (security_vm_enough_memory_mm(mm, charged))
808 				return -ENOMEM;
809 			newflags |= VM_ACCOUNT;
810 		}
811 	} else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) &&
812 		   !vma->anon_vma) {
813 		newflags &= ~VM_ACCOUNT;
814 	}
815 
816 	vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags);
817 	if (IS_ERR(vma)) {
818 		error = PTR_ERR(vma);
819 		goto fail;
820 	}
821 
822 	*pprev = vma;
823 
824 	/*
825 	 * vm_flags and vm_page_prot are protected by the mmap_lock
826 	 * held in write mode.
827 	 */
828 	vma_start_write(vma);
829 	vm_flags_reset_once(vma, newflags);
830 	if (vma_wants_manual_pte_write_upgrade(vma))
831 		mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
832 	vma_set_page_prot(vma);
833 
834 	change_protection(tlb, vma, start, end, mm_cp_flags);
835 
836 	if ((oldflags & VM_ACCOUNT) && !(newflags & VM_ACCOUNT))
837 		vm_unacct_memory(nrpages);
838 
839 	/*
840 	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
841 	 * fault on access.
842 	 */
843 	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
844 			(newflags & VM_WRITE)) {
845 		populate_vma_page_range(vma, start, end, NULL);
846 	}
847 
848 	vm_stat_account(mm, oldflags, -nrpages);
849 	vm_stat_account(mm, newflags, nrpages);
850 	perf_event_mmap(vma);
851 	return 0;
852 
853 fail:
854 	vm_unacct_memory(charged);
855 	return error;
856 }
857 
858 /*
859  * pkey==-1 when doing a legacy mprotect()
860  */
do_mprotect_pkey(unsigned long start,size_t len,unsigned long prot,int pkey)861 static int do_mprotect_pkey(unsigned long start, size_t len,
862 		unsigned long prot, int pkey)
863 {
864 	unsigned long nstart, end, tmp, reqprot;
865 	struct vm_area_struct *vma, *prev;
866 	int error;
867 	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
868 	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
869 				(prot & PROT_READ);
870 	struct mmu_gather tlb;
871 	struct vma_iterator vmi;
872 
873 	start = untagged_addr(start);
874 
875 	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
876 	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
877 		return -EINVAL;
878 
879 	if (start & ~PAGE_MASK)
880 		return -EINVAL;
881 	if (!len)
882 		return 0;
883 	len = PAGE_ALIGN(len);
884 	end = start + len;
885 	if (end <= start)
886 		return -ENOMEM;
887 	if (!arch_validate_prot(prot, start))
888 		return -EINVAL;
889 
890 	reqprot = prot;
891 
892 	if (mmap_write_lock_killable(current->mm))
893 		return -EINTR;
894 
895 	/*
896 	 * If userspace did not allocate the pkey, do not let
897 	 * them use it here.
898 	 */
899 	error = -EINVAL;
900 	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
901 		goto out;
902 
903 	vma_iter_init(&vmi, current->mm, start);
904 	vma = vma_find(&vmi, end);
905 	error = -ENOMEM;
906 	if (!vma)
907 		goto out;
908 
909 	if (unlikely(grows & PROT_GROWSDOWN)) {
910 		if (vma->vm_start >= end)
911 			goto out;
912 		start = vma->vm_start;
913 		error = -EINVAL;
914 		if (!(vma->vm_flags & VM_GROWSDOWN))
915 			goto out;
916 	} else {
917 		if (vma->vm_start > start)
918 			goto out;
919 		if (unlikely(grows & PROT_GROWSUP)) {
920 			end = vma->vm_end;
921 			error = -EINVAL;
922 			if (!(vma->vm_flags & VM_GROWSUP))
923 				goto out;
924 		}
925 	}
926 
927 	prev = vma_prev(&vmi);
928 	if (start > vma->vm_start)
929 		prev = vma;
930 
931 	tlb_gather_mmu(&tlb, current->mm);
932 	nstart = start;
933 	tmp = vma->vm_start;
934 	for_each_vma_range(vmi, vma, end) {
935 		vm_flags_t mask_off_old_flags;
936 		vm_flags_t newflags;
937 		int new_vma_pkey;
938 
939 		if (vma->vm_start != tmp) {
940 			error = -ENOMEM;
941 			break;
942 		}
943 
944 		/* Does the application expect PROT_READ to imply PROT_EXEC */
945 		if (rier && (vma->vm_flags & VM_MAYEXEC))
946 			prot |= PROT_EXEC;
947 
948 		/*
949 		 * Each mprotect() call explicitly passes r/w/x permissions.
950 		 * If a permission is not passed to mprotect(), it must be
951 		 * cleared from the VMA.
952 		 */
953 		mask_off_old_flags = VM_ACCESS_FLAGS | VM_FLAGS_CLEAR;
954 
955 		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
956 		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
957 		newflags |= (vma->vm_flags & ~mask_off_old_flags);
958 
959 		/* newflags >> 4 shift VM_MAY% in place of VM_% */
960 		if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
961 			error = -EACCES;
962 			break;
963 		}
964 
965 		if (map_deny_write_exec(vma->vm_flags, newflags)) {
966 			error = -EACCES;
967 			break;
968 		}
969 
970 		/* Allow architectures to sanity-check the new flags */
971 		if (!arch_validate_flags(newflags)) {
972 			error = -EINVAL;
973 			break;
974 		}
975 
976 		error = security_file_mprotect(vma, reqprot, prot);
977 		if (error)
978 			break;
979 
980 		tmp = vma->vm_end;
981 		if (tmp > end)
982 			tmp = end;
983 
984 		if (vma->vm_ops && vma->vm_ops->mprotect) {
985 			error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
986 			if (error)
987 				break;
988 		}
989 
990 		error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags);
991 		if (error)
992 			break;
993 
994 		tmp = vma_iter_end(&vmi);
995 		nstart = tmp;
996 		prot = reqprot;
997 	}
998 	tlb_finish_mmu(&tlb);
999 
1000 	if (!error && tmp < end)
1001 		error = -ENOMEM;
1002 
1003 out:
1004 	mmap_write_unlock(current->mm);
1005 	return error;
1006 }
1007 
SYSCALL_DEFINE3(mprotect,unsigned long,start,size_t,len,unsigned long,prot)1008 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
1009 		unsigned long, prot)
1010 {
1011 	return do_mprotect_pkey(start, len, prot, -1);
1012 }
1013 
1014 #ifdef CONFIG_ARCH_HAS_PKEYS
1015 
SYSCALL_DEFINE4(pkey_mprotect,unsigned long,start,size_t,len,unsigned long,prot,int,pkey)1016 SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
1017 		unsigned long, prot, int, pkey)
1018 {
1019 	return do_mprotect_pkey(start, len, prot, pkey);
1020 }
1021 
SYSCALL_DEFINE2(pkey_alloc,unsigned long,flags,unsigned long,init_val)1022 SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
1023 {
1024 	int pkey;
1025 	int ret;
1026 
1027 	/* No flags supported yet. */
1028 	if (flags)
1029 		return -EINVAL;
1030 	/* check for unsupported init values */
1031 	if (init_val & ~PKEY_ACCESS_MASK)
1032 		return -EINVAL;
1033 
1034 	mmap_write_lock(current->mm);
1035 	pkey = mm_pkey_alloc(current->mm);
1036 
1037 	ret = -ENOSPC;
1038 	if (pkey == -1)
1039 		goto out;
1040 
1041 	ret = arch_set_user_pkey_access(current, pkey, init_val);
1042 	if (ret) {
1043 		mm_pkey_free(current->mm, pkey);
1044 		goto out;
1045 	}
1046 	ret = pkey;
1047 out:
1048 	mmap_write_unlock(current->mm);
1049 	return ret;
1050 }
1051 
SYSCALL_DEFINE1(pkey_free,int,pkey)1052 SYSCALL_DEFINE1(pkey_free, int, pkey)
1053 {
1054 	int ret;
1055 
1056 	mmap_write_lock(current->mm);
1057 	ret = mm_pkey_free(current->mm, pkey);
1058 	mmap_write_unlock(current->mm);
1059 
1060 	/*
1061 	 * We could provide warnings or errors if any VMA still
1062 	 * has the pkey set here.
1063 	 */
1064 	return ret;
1065 }
1066 
1067 #endif /* CONFIG_ARCH_HAS_PKEYS */
1068