xref: /linux/mm/mprotect.c (revision 5f7fb89a115d53b4a10bf7ba2733e78df281e98d)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  mm/mprotect.c
4   *
5   *  (C) Copyright 1994 Linus Torvalds
6   *  (C) Copyright 2002 Christoph Hellwig
7   *
8   *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
9   *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
10   */
11  
12  #include <linux/pagewalk.h>
13  #include <linux/hugetlb.h>
14  #include <linux/shm.h>
15  #include <linux/mman.h>
16  #include <linux/fs.h>
17  #include <linux/highmem.h>
18  #include <linux/security.h>
19  #include <linux/mempolicy.h>
20  #include <linux/personality.h>
21  #include <linux/syscalls.h>
22  #include <linux/swap.h>
23  #include <linux/swapops.h>
24  #include <linux/mmu_notifier.h>
25  #include <linux/migrate.h>
26  #include <linux/perf_event.h>
27  #include <linux/pkeys.h>
28  #include <linux/ksm.h>
29  #include <linux/uaccess.h>
30  #include <linux/mm_inline.h>
31  #include <linux/pgtable.h>
32  #include <linux/sched/sysctl.h>
33  #include <linux/userfaultfd_k.h>
34  #include <linux/memory-tiers.h>
35  #include <uapi/linux/mman.h>
36  #include <asm/cacheflush.h>
37  #include <asm/mmu_context.h>
38  #include <asm/tlbflush.h>
39  #include <asm/tlb.h>
40  
41  #include "internal.h"
42  
43  bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
44  			     pte_t pte)
45  {
46  	struct page *page;
47  
48  	if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
49  		return false;
50  
51  	/* Don't touch entries that are not even readable. */
52  	if (pte_protnone(pte))
53  		return false;
54  
55  	/* Do we need write faults for softdirty tracking? */
56  	if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
57  		return false;
58  
59  	/* Do we need write faults for uffd-wp tracking? */
60  	if (userfaultfd_pte_wp(vma, pte))
61  		return false;
62  
63  	if (!(vma->vm_flags & VM_SHARED)) {
64  		/*
65  		 * Writable MAP_PRIVATE mapping: We can only special-case on
66  		 * exclusive anonymous pages, because we know that our
67  		 * write-fault handler similarly would map them writable without
68  		 * any additional checks while holding the PT lock.
69  		 */
70  		page = vm_normal_page(vma, addr, pte);
71  		return page && PageAnon(page) && PageAnonExclusive(page);
72  	}
73  
74  	/*
75  	 * Writable MAP_SHARED mapping: "clean" might indicate that the FS still
76  	 * needs a real write-fault for writenotify
77  	 * (see vma_wants_writenotify()). If "dirty", the assumption is that the
78  	 * FS was already notified and we can simply mark the PTE writable
79  	 * just like the write-fault handler would do.
80  	 */
81  	return pte_dirty(pte);
82  }
83  
84  static long change_pte_range(struct mmu_gather *tlb,
85  		struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
86  		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
87  {
88  	pte_t *pte, oldpte;
89  	spinlock_t *ptl;
90  	long pages = 0;
91  	int target_node = NUMA_NO_NODE;
92  	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
93  	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
94  	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
95  
96  	tlb_change_page_size(tlb, PAGE_SIZE);
97  	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
98  	if (!pte)
99  		return -EAGAIN;
100  
101  	/* Get target node for single threaded private VMAs */
102  	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
103  	    atomic_read(&vma->vm_mm->mm_users) == 1)
104  		target_node = numa_node_id();
105  
106  	flush_tlb_batched_pending(vma->vm_mm);
107  	arch_enter_lazy_mmu_mode();
108  	do {
109  		oldpte = ptep_get(pte);
110  		if (pte_present(oldpte)) {
111  			pte_t ptent;
112  
113  			/*
114  			 * Avoid trapping faults against the zero or KSM
115  			 * pages. See similar comment in change_huge_pmd.
116  			 */
117  			if (prot_numa) {
118  				struct folio *folio;
119  				int nid;
120  				bool toptier;
121  
122  				/* Avoid TLB flush if possible */
123  				if (pte_protnone(oldpte))
124  					continue;
125  
126  				folio = vm_normal_folio(vma, addr, oldpte);
127  				if (!folio || folio_is_zone_device(folio) ||
128  				    folio_test_ksm(folio))
129  					continue;
130  
131  				/* Also skip shared copy-on-write pages */
132  				if (is_cow_mapping(vma->vm_flags) &&
133  				    (folio_maybe_dma_pinned(folio) ||
134  				     folio_likely_mapped_shared(folio)))
135  					continue;
136  
137  				/*
138  				 * While migration can move some dirty pages,
139  				 * it cannot move them all from MIGRATE_ASYNC
140  				 * context.
141  				 */
142  				if (folio_is_file_lru(folio) &&
143  				    folio_test_dirty(folio))
144  					continue;
145  
146  				/*
147  				 * Don't mess with PTEs if page is already on the node
148  				 * a single-threaded process is running on.
149  				 */
150  				nid = folio_nid(folio);
151  				if (target_node == nid)
152  					continue;
153  				toptier = node_is_toptier(nid);
154  
155  				/*
156  				 * Skip scanning top tier node if normal numa
157  				 * balancing is disabled
158  				 */
159  				if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
160  				    toptier)
161  					continue;
162  				if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
163  				    !toptier)
164  					folio_xchg_access_time(folio,
165  						jiffies_to_msecs(jiffies));
166  			}
167  
168  			oldpte = ptep_modify_prot_start(vma, addr, pte);
169  			ptent = pte_modify(oldpte, newprot);
170  
171  			if (uffd_wp)
172  				ptent = pte_mkuffd_wp(ptent);
173  			else if (uffd_wp_resolve)
174  				ptent = pte_clear_uffd_wp(ptent);
175  
176  			/*
177  			 * In some writable, shared mappings, we might want
178  			 * to catch actual write access -- see
179  			 * vma_wants_writenotify().
180  			 *
181  			 * In all writable, private mappings, we have to
182  			 * properly handle COW.
183  			 *
184  			 * In both cases, we can sometimes still change PTEs
185  			 * writable and avoid the write-fault handler, for
186  			 * example, if a PTE is already dirty and no other
187  			 * COW or special handling is required.
188  			 */
189  			if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
190  			    !pte_write(ptent) &&
191  			    can_change_pte_writable(vma, addr, ptent))
192  				ptent = pte_mkwrite(ptent, vma);
193  
194  			ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
195  			if (pte_needs_flush(oldpte, ptent))
196  				tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
197  			pages++;
198  		} else if (is_swap_pte(oldpte)) {
199  			swp_entry_t entry = pte_to_swp_entry(oldpte);
200  			pte_t newpte;
201  
202  			if (is_writable_migration_entry(entry)) {
203  				struct folio *folio = pfn_swap_entry_folio(entry);
204  
205  				/*
206  				 * A protection check is difficult so
207  				 * just be safe and disable write
208  				 */
209  				if (folio_test_anon(folio))
210  					entry = make_readable_exclusive_migration_entry(
211  							     swp_offset(entry));
212  				else
213  					entry = make_readable_migration_entry(swp_offset(entry));
214  				newpte = swp_entry_to_pte(entry);
215  				if (pte_swp_soft_dirty(oldpte))
216  					newpte = pte_swp_mksoft_dirty(newpte);
217  			} else if (is_writable_device_private_entry(entry)) {
218  				/*
219  				 * We do not preserve soft-dirtiness. See
220  				 * copy_nonpresent_pte() for explanation.
221  				 */
222  				entry = make_readable_device_private_entry(
223  							swp_offset(entry));
224  				newpte = swp_entry_to_pte(entry);
225  				if (pte_swp_uffd_wp(oldpte))
226  					newpte = pte_swp_mkuffd_wp(newpte);
227  			} else if (is_writable_device_exclusive_entry(entry)) {
228  				entry = make_readable_device_exclusive_entry(
229  							swp_offset(entry));
230  				newpte = swp_entry_to_pte(entry);
231  				if (pte_swp_soft_dirty(oldpte))
232  					newpte = pte_swp_mksoft_dirty(newpte);
233  				if (pte_swp_uffd_wp(oldpte))
234  					newpte = pte_swp_mkuffd_wp(newpte);
235  			} else if (is_pte_marker_entry(entry)) {
236  				/*
237  				 * Ignore error swap entries unconditionally,
238  				 * because any access should sigbus anyway.
239  				 */
240  				if (is_poisoned_swp_entry(entry))
241  					continue;
242  				/*
243  				 * If this is uffd-wp pte marker and we'd like
244  				 * to unprotect it, drop it; the next page
245  				 * fault will trigger without uffd trapping.
246  				 */
247  				if (uffd_wp_resolve) {
248  					pte_clear(vma->vm_mm, addr, pte);
249  					pages++;
250  				}
251  				continue;
252  			} else {
253  				newpte = oldpte;
254  			}
255  
256  			if (uffd_wp)
257  				newpte = pte_swp_mkuffd_wp(newpte);
258  			else if (uffd_wp_resolve)
259  				newpte = pte_swp_clear_uffd_wp(newpte);
260  
261  			if (!pte_same(oldpte, newpte)) {
262  				set_pte_at(vma->vm_mm, addr, pte, newpte);
263  				pages++;
264  			}
265  		} else {
266  			/* It must be an none page, or what else?.. */
267  			WARN_ON_ONCE(!pte_none(oldpte));
268  
269  			/*
270  			 * Nobody plays with any none ptes besides
271  			 * userfaultfd when applying the protections.
272  			 */
273  			if (likely(!uffd_wp))
274  				continue;
275  
276  			if (userfaultfd_wp_use_markers(vma)) {
277  				/*
278  				 * For file-backed mem, we need to be able to
279  				 * wr-protect a none pte, because even if the
280  				 * pte is none, the page/swap cache could
281  				 * exist.  Doing that by install a marker.
282  				 */
283  				set_pte_at(vma->vm_mm, addr, pte,
284  					   make_pte_marker(PTE_MARKER_UFFD_WP));
285  				pages++;
286  			}
287  		}
288  	} while (pte++, addr += PAGE_SIZE, addr != end);
289  	arch_leave_lazy_mmu_mode();
290  	pte_unmap_unlock(pte - 1, ptl);
291  
292  	return pages;
293  }
294  
295  /*
296   * Return true if we want to split THPs into PTE mappings in change
297   * protection procedure, false otherwise.
298   */
299  static inline bool
300  pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags)
301  {
302  	/*
303  	 * pte markers only resides in pte level, if we need pte markers,
304  	 * we need to split.  We cannot wr-protect shmem thp because file
305  	 * thp is handled differently when split by erasing the pmd so far.
306  	 */
307  	return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
308  }
309  
310  /*
311   * Return true if we want to populate pgtables in change protection
312   * procedure, false otherwise
313   */
314  static inline bool
315  pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags)
316  {
317  	/* If not within ioctl(UFFDIO_WRITEPROTECT), then don't bother */
318  	if (!(cp_flags & MM_CP_UFFD_WP))
319  		return false;
320  
321  	/* Populate if the userfaultfd mode requires pte markers */
322  	return userfaultfd_wp_use_markers(vma);
323  }
324  
325  /*
326   * Populate the pgtable underneath for whatever reason if requested.
327   * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable
328   * allocation failures during page faults by kicking OOM and returning
329   * error.
330   */
331  #define  change_pmd_prepare(vma, pmd, cp_flags)				\
332  	({								\
333  		long err = 0;						\
334  		if (unlikely(pgtable_populate_needed(vma, cp_flags))) {	\
335  			if (pte_alloc(vma->vm_mm, pmd))			\
336  				err = -ENOMEM;				\
337  		}							\
338  		err;							\
339  	})
340  
341  /*
342   * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to
343   * have separate change_pmd_prepare() because pte_alloc() returns 0 on success,
344   * while {pmd|pud|p4d}_alloc() returns the valid pointer on success.
345   */
346  #define  change_prepare(vma, high, low, addr, cp_flags)			\
347  	  ({								\
348  		long err = 0;						\
349  		if (unlikely(pgtable_populate_needed(vma, cp_flags))) {	\
350  			low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
351  			if (p == NULL)					\
352  				err = -ENOMEM;				\
353  		}							\
354  		err;							\
355  	})
356  
357  static inline long change_pmd_range(struct mmu_gather *tlb,
358  		struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
359  		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
360  {
361  	pmd_t *pmd;
362  	unsigned long next;
363  	long pages = 0;
364  	unsigned long nr_huge_updates = 0;
365  	struct mmu_notifier_range range;
366  
367  	range.start = 0;
368  
369  	pmd = pmd_offset(pud, addr);
370  	do {
371  		long ret;
372  		pmd_t _pmd;
373  again:
374  		next = pmd_addr_end(addr, end);
375  
376  		ret = change_pmd_prepare(vma, pmd, cp_flags);
377  		if (ret) {
378  			pages = ret;
379  			break;
380  		}
381  
382  		if (pmd_none(*pmd))
383  			goto next;
384  
385  		/* invoke the mmu notifier if the pmd is populated */
386  		if (!range.start) {
387  			mmu_notifier_range_init(&range,
388  				MMU_NOTIFY_PROTECTION_VMA, 0,
389  				vma->vm_mm, addr, end);
390  			mmu_notifier_invalidate_range_start(&range);
391  		}
392  
393  		_pmd = pmdp_get_lockless(pmd);
394  		if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
395  			if ((next - addr != HPAGE_PMD_SIZE) ||
396  			    pgtable_split_needed(vma, cp_flags)) {
397  				__split_huge_pmd(vma, pmd, addr, false, NULL);
398  				/*
399  				 * For file-backed, the pmd could have been
400  				 * cleared; make sure pmd populated if
401  				 * necessary, then fall-through to pte level.
402  				 */
403  				ret = change_pmd_prepare(vma, pmd, cp_flags);
404  				if (ret) {
405  					pages = ret;
406  					break;
407  				}
408  			} else {
409  				ret = change_huge_pmd(tlb, vma, pmd,
410  						addr, newprot, cp_flags);
411  				if (ret) {
412  					if (ret == HPAGE_PMD_NR) {
413  						pages += HPAGE_PMD_NR;
414  						nr_huge_updates++;
415  					}
416  
417  					/* huge pmd was handled */
418  					goto next;
419  				}
420  			}
421  			/* fall through, the trans huge pmd just split */
422  		}
423  
424  		ret = change_pte_range(tlb, vma, pmd, addr, next, newprot,
425  				       cp_flags);
426  		if (ret < 0)
427  			goto again;
428  		pages += ret;
429  next:
430  		cond_resched();
431  	} while (pmd++, addr = next, addr != end);
432  
433  	if (range.start)
434  		mmu_notifier_invalidate_range_end(&range);
435  
436  	if (nr_huge_updates)
437  		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
438  	return pages;
439  }
440  
441  static inline long change_pud_range(struct mmu_gather *tlb,
442  		struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
443  		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
444  {
445  	pud_t *pud;
446  	unsigned long next;
447  	long pages = 0, ret;
448  
449  	pud = pud_offset(p4d, addr);
450  	do {
451  		next = pud_addr_end(addr, end);
452  		ret = change_prepare(vma, pud, pmd, addr, cp_flags);
453  		if (ret)
454  			return ret;
455  		if (pud_none_or_clear_bad(pud))
456  			continue;
457  		pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
458  					  cp_flags);
459  	} while (pud++, addr = next, addr != end);
460  
461  	return pages;
462  }
463  
464  static inline long change_p4d_range(struct mmu_gather *tlb,
465  		struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
466  		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
467  {
468  	p4d_t *p4d;
469  	unsigned long next;
470  	long pages = 0, ret;
471  
472  	p4d = p4d_offset(pgd, addr);
473  	do {
474  		next = p4d_addr_end(addr, end);
475  		ret = change_prepare(vma, p4d, pud, addr, cp_flags);
476  		if (ret)
477  			return ret;
478  		if (p4d_none_or_clear_bad(p4d))
479  			continue;
480  		pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
481  					  cp_flags);
482  	} while (p4d++, addr = next, addr != end);
483  
484  	return pages;
485  }
486  
487  static long change_protection_range(struct mmu_gather *tlb,
488  		struct vm_area_struct *vma, unsigned long addr,
489  		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
490  {
491  	struct mm_struct *mm = vma->vm_mm;
492  	pgd_t *pgd;
493  	unsigned long next;
494  	long pages = 0, ret;
495  
496  	BUG_ON(addr >= end);
497  	pgd = pgd_offset(mm, addr);
498  	tlb_start_vma(tlb, vma);
499  	do {
500  		next = pgd_addr_end(addr, end);
501  		ret = change_prepare(vma, pgd, p4d, addr, cp_flags);
502  		if (ret) {
503  			pages = ret;
504  			break;
505  		}
506  		if (pgd_none_or_clear_bad(pgd))
507  			continue;
508  		pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
509  					  cp_flags);
510  	} while (pgd++, addr = next, addr != end);
511  
512  	tlb_end_vma(tlb, vma);
513  
514  	return pages;
515  }
516  
517  long change_protection(struct mmu_gather *tlb,
518  		       struct vm_area_struct *vma, unsigned long start,
519  		       unsigned long end, unsigned long cp_flags)
520  {
521  	pgprot_t newprot = vma->vm_page_prot;
522  	long pages;
523  
524  	BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
525  
526  #ifdef CONFIG_NUMA_BALANCING
527  	/*
528  	 * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking)
529  	 * are expected to reflect their requirements via VMA flags such that
530  	 * vma_set_page_prot() will adjust vma->vm_page_prot accordingly.
531  	 */
532  	if (cp_flags & MM_CP_PROT_NUMA)
533  		newprot = PAGE_NONE;
534  #else
535  	WARN_ON_ONCE(cp_flags & MM_CP_PROT_NUMA);
536  #endif
537  
538  	if (is_vm_hugetlb_page(vma))
539  		pages = hugetlb_change_protection(vma, start, end, newprot,
540  						  cp_flags);
541  	else
542  		pages = change_protection_range(tlb, vma, start, end, newprot,
543  						cp_flags);
544  
545  	return pages;
546  }
547  
548  static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
549  			       unsigned long next, struct mm_walk *walk)
550  {
551  	return pfn_modify_allowed(pte_pfn(ptep_get(pte)),
552  				  *(pgprot_t *)(walk->private)) ?
553  		0 : -EACCES;
554  }
555  
556  static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
557  				   unsigned long addr, unsigned long next,
558  				   struct mm_walk *walk)
559  {
560  	return pfn_modify_allowed(pte_pfn(ptep_get(pte)),
561  				  *(pgprot_t *)(walk->private)) ?
562  		0 : -EACCES;
563  }
564  
565  static int prot_none_test(unsigned long addr, unsigned long next,
566  			  struct mm_walk *walk)
567  {
568  	return 0;
569  }
570  
571  static const struct mm_walk_ops prot_none_walk_ops = {
572  	.pte_entry		= prot_none_pte_entry,
573  	.hugetlb_entry		= prot_none_hugetlb_entry,
574  	.test_walk		= prot_none_test,
575  	.walk_lock		= PGWALK_WRLOCK,
576  };
577  
578  int
579  mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
580  	       struct vm_area_struct *vma, struct vm_area_struct **pprev,
581  	       unsigned long start, unsigned long end, unsigned long newflags)
582  {
583  	struct mm_struct *mm = vma->vm_mm;
584  	unsigned long oldflags = vma->vm_flags;
585  	long nrpages = (end - start) >> PAGE_SHIFT;
586  	unsigned int mm_cp_flags = 0;
587  	unsigned long charged = 0;
588  	int error;
589  
590  	if (newflags == oldflags) {
591  		*pprev = vma;
592  		return 0;
593  	}
594  
595  	/*
596  	 * Do PROT_NONE PFN permission checks here when we can still
597  	 * bail out without undoing a lot of state. This is a rather
598  	 * uncommon case, so doesn't need to be very optimized.
599  	 */
600  	if (arch_has_pfn_modify_check() &&
601  	    (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
602  	    (newflags & VM_ACCESS_FLAGS) == 0) {
603  		pgprot_t new_pgprot = vm_get_page_prot(newflags);
604  
605  		error = walk_page_range(current->mm, start, end,
606  				&prot_none_walk_ops, &new_pgprot);
607  		if (error)
608  			return error;
609  	}
610  
611  	/*
612  	 * If we make a private mapping writable we increase our commit;
613  	 * but (without finer accounting) cannot reduce our commit if we
614  	 * make it unwritable again except in the anonymous case where no
615  	 * anon_vma has yet to be assigned.
616  	 *
617  	 * hugetlb mapping were accounted for even if read-only so there is
618  	 * no need to account for them here.
619  	 */
620  	if (newflags & VM_WRITE) {
621  		/* Check space limits when area turns into data. */
622  		if (!may_expand_vm(mm, newflags, nrpages) &&
623  				may_expand_vm(mm, oldflags, nrpages))
624  			return -ENOMEM;
625  		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
626  						VM_SHARED|VM_NORESERVE))) {
627  			charged = nrpages;
628  			if (security_vm_enough_memory_mm(mm, charged))
629  				return -ENOMEM;
630  			newflags |= VM_ACCOUNT;
631  		}
632  	} else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) &&
633  		   !vma->anon_vma) {
634  		newflags &= ~VM_ACCOUNT;
635  	}
636  
637  	vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags);
638  	if (IS_ERR(vma)) {
639  		error = PTR_ERR(vma);
640  		goto fail;
641  	}
642  
643  	*pprev = vma;
644  
645  	/*
646  	 * vm_flags and vm_page_prot are protected by the mmap_lock
647  	 * held in write mode.
648  	 */
649  	vma_start_write(vma);
650  	vm_flags_reset(vma, newflags);
651  	if (vma_wants_manual_pte_write_upgrade(vma))
652  		mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
653  	vma_set_page_prot(vma);
654  
655  	change_protection(tlb, vma, start, end, mm_cp_flags);
656  
657  	if ((oldflags & VM_ACCOUNT) && !(newflags & VM_ACCOUNT))
658  		vm_unacct_memory(nrpages);
659  
660  	/*
661  	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
662  	 * fault on access.
663  	 */
664  	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
665  			(newflags & VM_WRITE)) {
666  		populate_vma_page_range(vma, start, end, NULL);
667  	}
668  
669  	vm_stat_account(mm, oldflags, -nrpages);
670  	vm_stat_account(mm, newflags, nrpages);
671  	perf_event_mmap(vma);
672  	return 0;
673  
674  fail:
675  	vm_unacct_memory(charged);
676  	return error;
677  }
678  
679  /*
680   * pkey==-1 when doing a legacy mprotect()
681   */
682  static int do_mprotect_pkey(unsigned long start, size_t len,
683  		unsigned long prot, int pkey)
684  {
685  	unsigned long nstart, end, tmp, reqprot;
686  	struct vm_area_struct *vma, *prev;
687  	int error;
688  	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
689  	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
690  				(prot & PROT_READ);
691  	struct mmu_gather tlb;
692  	struct vma_iterator vmi;
693  
694  	start = untagged_addr(start);
695  
696  	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
697  	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
698  		return -EINVAL;
699  
700  	if (start & ~PAGE_MASK)
701  		return -EINVAL;
702  	if (!len)
703  		return 0;
704  	len = PAGE_ALIGN(len);
705  	end = start + len;
706  	if (end <= start)
707  		return -ENOMEM;
708  	if (!arch_validate_prot(prot, start))
709  		return -EINVAL;
710  
711  	reqprot = prot;
712  
713  	if (mmap_write_lock_killable(current->mm))
714  		return -EINTR;
715  
716  	/*
717  	 * If userspace did not allocate the pkey, do not let
718  	 * them use it here.
719  	 */
720  	error = -EINVAL;
721  	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
722  		goto out;
723  
724  	vma_iter_init(&vmi, current->mm, start);
725  	vma = vma_find(&vmi, end);
726  	error = -ENOMEM;
727  	if (!vma)
728  		goto out;
729  
730  	if (unlikely(grows & PROT_GROWSDOWN)) {
731  		if (vma->vm_start >= end)
732  			goto out;
733  		start = vma->vm_start;
734  		error = -EINVAL;
735  		if (!(vma->vm_flags & VM_GROWSDOWN))
736  			goto out;
737  	} else {
738  		if (vma->vm_start > start)
739  			goto out;
740  		if (unlikely(grows & PROT_GROWSUP)) {
741  			end = vma->vm_end;
742  			error = -EINVAL;
743  			if (!(vma->vm_flags & VM_GROWSUP))
744  				goto out;
745  		}
746  	}
747  
748  	/*
749  	 * checking if memory is sealed.
750  	 * can_modify_mm assumes we have acquired the lock on MM.
751  	 */
752  	if (unlikely(!can_modify_mm(current->mm, start, end))) {
753  		error = -EPERM;
754  		goto out;
755  	}
756  
757  	prev = vma_prev(&vmi);
758  	if (start > vma->vm_start)
759  		prev = vma;
760  
761  	tlb_gather_mmu(&tlb, current->mm);
762  	nstart = start;
763  	tmp = vma->vm_start;
764  	for_each_vma_range(vmi, vma, end) {
765  		unsigned long mask_off_old_flags;
766  		unsigned long newflags;
767  		int new_vma_pkey;
768  
769  		if (vma->vm_start != tmp) {
770  			error = -ENOMEM;
771  			break;
772  		}
773  
774  		/* Does the application expect PROT_READ to imply PROT_EXEC */
775  		if (rier && (vma->vm_flags & VM_MAYEXEC))
776  			prot |= PROT_EXEC;
777  
778  		/*
779  		 * Each mprotect() call explicitly passes r/w/x permissions.
780  		 * If a permission is not passed to mprotect(), it must be
781  		 * cleared from the VMA.
782  		 */
783  		mask_off_old_flags = VM_ACCESS_FLAGS | VM_FLAGS_CLEAR;
784  
785  		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
786  		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
787  		newflags |= (vma->vm_flags & ~mask_off_old_flags);
788  
789  		/* newflags >> 4 shift VM_MAY% in place of VM_% */
790  		if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
791  			error = -EACCES;
792  			break;
793  		}
794  
795  		if (map_deny_write_exec(vma, newflags)) {
796  			error = -EACCES;
797  			break;
798  		}
799  
800  		/* Allow architectures to sanity-check the new flags */
801  		if (!arch_validate_flags(newflags)) {
802  			error = -EINVAL;
803  			break;
804  		}
805  
806  		error = security_file_mprotect(vma, reqprot, prot);
807  		if (error)
808  			break;
809  
810  		tmp = vma->vm_end;
811  		if (tmp > end)
812  			tmp = end;
813  
814  		if (vma->vm_ops && vma->vm_ops->mprotect) {
815  			error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
816  			if (error)
817  				break;
818  		}
819  
820  		error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags);
821  		if (error)
822  			break;
823  
824  		tmp = vma_iter_end(&vmi);
825  		nstart = tmp;
826  		prot = reqprot;
827  	}
828  	tlb_finish_mmu(&tlb);
829  
830  	if (!error && tmp < end)
831  		error = -ENOMEM;
832  
833  out:
834  	mmap_write_unlock(current->mm);
835  	return error;
836  }
837  
838  SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
839  		unsigned long, prot)
840  {
841  	return do_mprotect_pkey(start, len, prot, -1);
842  }
843  
844  #ifdef CONFIG_ARCH_HAS_PKEYS
845  
846  SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
847  		unsigned long, prot, int, pkey)
848  {
849  	return do_mprotect_pkey(start, len, prot, pkey);
850  }
851  
852  SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
853  {
854  	int pkey;
855  	int ret;
856  
857  	/* No flags supported yet. */
858  	if (flags)
859  		return -EINVAL;
860  	/* check for unsupported init values */
861  	if (init_val & ~PKEY_ACCESS_MASK)
862  		return -EINVAL;
863  
864  	mmap_write_lock(current->mm);
865  	pkey = mm_pkey_alloc(current->mm);
866  
867  	ret = -ENOSPC;
868  	if (pkey == -1)
869  		goto out;
870  
871  	ret = arch_set_user_pkey_access(current, pkey, init_val);
872  	if (ret) {
873  		mm_pkey_free(current->mm, pkey);
874  		goto out;
875  	}
876  	ret = pkey;
877  out:
878  	mmap_write_unlock(current->mm);
879  	return ret;
880  }
881  
882  SYSCALL_DEFINE1(pkey_free, int, pkey)
883  {
884  	int ret;
885  
886  	mmap_write_lock(current->mm);
887  	ret = mm_pkey_free(current->mm, pkey);
888  	mmap_write_unlock(current->mm);
889  
890  	/*
891  	 * We could provide warnings or errors if any VMA still
892  	 * has the pkey set here.
893  	 */
894  	return ret;
895  }
896  
897  #endif /* CONFIG_ARCH_HAS_PKEYS */
898