xref: /linux/mm/mprotect.c (revision 100c85421b52e41269ada88f7d71a6b8a06c7a11)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  mm/mprotect.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *  (C) Copyright 1994 Linus Torvalds
61da177e4SLinus Torvalds  *  (C) Copyright 2002 Christoph Hellwig
71da177e4SLinus Torvalds  *
8046c6884SAlan Cox  *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
91da177e4SLinus Torvalds  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
101da177e4SLinus Torvalds  */
111da177e4SLinus Torvalds 
12a520110eSChristoph Hellwig #include <linux/pagewalk.h>
131da177e4SLinus Torvalds #include <linux/hugetlb.h>
141da177e4SLinus Torvalds #include <linux/shm.h>
151da177e4SLinus Torvalds #include <linux/mman.h>
161da177e4SLinus Torvalds #include <linux/fs.h>
171da177e4SLinus Torvalds #include <linux/highmem.h>
181da177e4SLinus Torvalds #include <linux/security.h>
191da177e4SLinus Torvalds #include <linux/mempolicy.h>
201da177e4SLinus Torvalds #include <linux/personality.h>
211da177e4SLinus Torvalds #include <linux/syscalls.h>
220697212aSChristoph Lameter #include <linux/swap.h>
230697212aSChristoph Lameter #include <linux/swapops.h>
24cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
2564cdd548SKOSAKI Motohiro #include <linux/migrate.h>
26cdd6c482SIngo Molnar #include <linux/perf_event.h>
27e8c24d3aSDave Hansen #include <linux/pkeys.h>
2864a9a34eSMel Gorman #include <linux/ksm.h>
297c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
3009a913a7SMel Gorman #include <linux/mm_inline.h>
31ca5999fdSMike Rapoport #include <linux/pgtable.h>
32a1a3a2fcSHuang Ying #include <linux/sched/sysctl.h>
33fe2567ebSPeter Xu #include <linux/userfaultfd_k.h>
34467b171aSAneesh Kumar K.V #include <linux/memory-tiers.h>
351da177e4SLinus Torvalds #include <asm/cacheflush.h>
36e8c24d3aSDave Hansen #include <asm/mmu_context.h>
371da177e4SLinus Torvalds #include <asm/tlbflush.h>
384a18419fSNadav Amit #include <asm/tlb.h>
391da177e4SLinus Torvalds 
4036f88188SKirill A. Shutemov #include "internal.h"
4136f88188SKirill A. Shutemov 
426a56ccbcSDavid Hildenbrand bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
436a56ccbcSDavid Hildenbrand 			     pte_t pte)
4464fe24a3SDavid Hildenbrand {
4564fe24a3SDavid Hildenbrand 	struct page *page;
4664fe24a3SDavid Hildenbrand 
477ea7e333SDavid Hildenbrand 	if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
487ea7e333SDavid Hildenbrand 		return false;
4964fe24a3SDavid Hildenbrand 
507ea7e333SDavid Hildenbrand 	/* Don't touch entries that are not even readable. */
51d8488773SNadav Amit 	if (pte_protnone(pte))
5264fe24a3SDavid Hildenbrand 		return false;
5364fe24a3SDavid Hildenbrand 
5464fe24a3SDavid Hildenbrand 	/* Do we need write faults for softdirty tracking? */
5576aefad6SPeter Xu 	if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
5664fe24a3SDavid Hildenbrand 		return false;
5764fe24a3SDavid Hildenbrand 
5864fe24a3SDavid Hildenbrand 	/* Do we need write faults for uffd-wp tracking? */
5964fe24a3SDavid Hildenbrand 	if (userfaultfd_pte_wp(vma, pte))
6064fe24a3SDavid Hildenbrand 		return false;
6164fe24a3SDavid Hildenbrand 
6264fe24a3SDavid Hildenbrand 	if (!(vma->vm_flags & VM_SHARED)) {
6364fe24a3SDavid Hildenbrand 		/*
647ea7e333SDavid Hildenbrand 		 * Writable MAP_PRIVATE mapping: We can only special-case on
657ea7e333SDavid Hildenbrand 		 * exclusive anonymous pages, because we know that our
667ea7e333SDavid Hildenbrand 		 * write-fault handler similarly would map them writable without
677ea7e333SDavid Hildenbrand 		 * any additional checks while holding the PT lock.
6864fe24a3SDavid Hildenbrand 		 */
6964fe24a3SDavid Hildenbrand 		page = vm_normal_page(vma, addr, pte);
70d8488773SNadav Amit 		return page && PageAnon(page) && PageAnonExclusive(page);
7164fe24a3SDavid Hildenbrand 	}
7264fe24a3SDavid Hildenbrand 
737ea7e333SDavid Hildenbrand 	/*
747ea7e333SDavid Hildenbrand 	 * Writable MAP_SHARED mapping: "clean" might indicate that the FS still
757ea7e333SDavid Hildenbrand 	 * needs a real write-fault for writenotify
767ea7e333SDavid Hildenbrand 	 * (see vma_wants_writenotify()). If "dirty", the assumption is that the
777ea7e333SDavid Hildenbrand 	 * FS was already notified and we can simply mark the PTE writable
787ea7e333SDavid Hildenbrand 	 * just like the write-fault handler would do.
797ea7e333SDavid Hildenbrand 	 */
80d8488773SNadav Amit 	return pte_dirty(pte);
8164fe24a3SDavid Hildenbrand }
8264fe24a3SDavid Hildenbrand 
83a79390f5SPeter Xu static long change_pte_range(struct mmu_gather *tlb,
844a18419fSNadav Amit 		struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
854a18419fSNadav Amit 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
861da177e4SLinus Torvalds {
870697212aSChristoph Lameter 	pte_t *pte, oldpte;
88705e87c0SHugh Dickins 	spinlock_t *ptl;
89a79390f5SPeter Xu 	long pages = 0;
903e321587SAndi Kleen 	int target_node = NUMA_NO_NODE;
9158705444SPeter Xu 	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
92292924b2SPeter Xu 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
93292924b2SPeter Xu 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
941da177e4SLinus Torvalds 
954a18419fSNadav Amit 	tlb_change_page_size(tlb, PAGE_SIZE);
96175ad4f1SAndrea Arcangeli 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
97670ddd8cSHugh Dickins 	if (!pte)
98670ddd8cSHugh Dickins 		return -EAGAIN;
991ad9f620SMel Gorman 
1003e321587SAndi Kleen 	/* Get target node for single threaded private VMAs */
1013e321587SAndi Kleen 	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
1023e321587SAndi Kleen 	    atomic_read(&vma->vm_mm->mm_users) == 1)
1033e321587SAndi Kleen 		target_node = numa_node_id();
1043e321587SAndi Kleen 
1053ea27719SMel Gorman 	flush_tlb_batched_pending(vma->vm_mm);
1066606c3e0SZachary Amsden 	arch_enter_lazy_mmu_mode();
1071da177e4SLinus Torvalds 	do {
108c33c7948SRyan Roberts 		oldpte = ptep_get(pte);
1090697212aSChristoph Lameter 		if (pte_present(oldpte)) {
1101da177e4SLinus Torvalds 			pte_t ptent;
1111da177e4SLinus Torvalds 
112e944fd67SMel Gorman 			/*
113e944fd67SMel Gorman 			 * Avoid trapping faults against the zero or KSM
114e944fd67SMel Gorman 			 * pages. See similar comment in change_huge_pmd.
115e944fd67SMel Gorman 			 */
116e944fd67SMel Gorman 			if (prot_numa) {
117ec177880SKefeng Wang 				struct folio *folio;
118a1a3a2fcSHuang Ying 				int nid;
11933024536SHuang Ying 				bool toptier;
120e944fd67SMel Gorman 
121a818f536SHuang Ying 				/* Avoid TLB flush if possible */
122a818f536SHuang Ying 				if (pte_protnone(oldpte))
123a818f536SHuang Ying 					continue;
124a818f536SHuang Ying 
125ec177880SKefeng Wang 				folio = vm_normal_folio(vma, addr, oldpte);
126ec177880SKefeng Wang 				if (!folio || folio_is_zone_device(folio) ||
127ec177880SKefeng Wang 				    folio_test_ksm(folio))
128e944fd67SMel Gorman 					continue;
12910c1045fSMel Gorman 
130859d4adcSHenry Willard 				/* Also skip shared copy-on-write pages */
131859d4adcSHenry Willard 				if (is_cow_mapping(vma->vm_flags) &&
132ec177880SKefeng Wang 				    folio_ref_count(folio) != 1)
133859d4adcSHenry Willard 					continue;
134859d4adcSHenry Willard 
13509a913a7SMel Gorman 				/*
13609a913a7SMel Gorman 				 * While migration can move some dirty pages,
13709a913a7SMel Gorman 				 * it cannot move them all from MIGRATE_ASYNC
13809a913a7SMel Gorman 				 * context.
13909a913a7SMel Gorman 				 */
140ec177880SKefeng Wang 				if (folio_is_file_lru(folio) &&
141ec177880SKefeng Wang 				    folio_test_dirty(folio))
14209a913a7SMel Gorman 					continue;
14309a913a7SMel Gorman 
1443e321587SAndi Kleen 				/*
1453e321587SAndi Kleen 				 * Don't mess with PTEs if page is already on the node
1463e321587SAndi Kleen 				 * a single-threaded process is running on.
1473e321587SAndi Kleen 				 */
148ec177880SKefeng Wang 				nid = folio_nid(folio);
149a1a3a2fcSHuang Ying 				if (target_node == nid)
150a1a3a2fcSHuang Ying 					continue;
15133024536SHuang Ying 				toptier = node_is_toptier(nid);
152a1a3a2fcSHuang Ying 
153a1a3a2fcSHuang Ying 				/*
154a1a3a2fcSHuang Ying 				 * Skip scanning top tier node if normal numa
155a1a3a2fcSHuang Ying 				 * balancing is disabled
156a1a3a2fcSHuang Ying 				 */
157a1a3a2fcSHuang Ying 				if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
15833024536SHuang Ying 				    toptier)
1593e321587SAndi Kleen 					continue;
16033024536SHuang Ying 				if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
16133024536SHuang Ying 				    !toptier)
162ec177880SKefeng Wang 					folio_xchg_access_time(folio,
16333024536SHuang Ying 						jiffies_to_msecs(jiffies));
164e944fd67SMel Gorman 			}
165e944fd67SMel Gorman 
16604a86453SAneesh Kumar K.V 			oldpte = ptep_modify_prot_start(vma, addr, pte);
16704a86453SAneesh Kumar K.V 			ptent = pte_modify(oldpte, newprot);
1688a0516edSMel Gorman 
169f1eb1bacSPeter Xu 			if (uffd_wp)
170292924b2SPeter Xu 				ptent = pte_mkuffd_wp(ptent);
171f1eb1bacSPeter Xu 			else if (uffd_wp_resolve)
172292924b2SPeter Xu 				ptent = pte_clear_uffd_wp(ptent);
173292924b2SPeter Xu 
17464fe24a3SDavid Hildenbrand 			/*
17564fe24a3SDavid Hildenbrand 			 * In some writable, shared mappings, we might want
17664fe24a3SDavid Hildenbrand 			 * to catch actual write access -- see
17764fe24a3SDavid Hildenbrand 			 * vma_wants_writenotify().
17864fe24a3SDavid Hildenbrand 			 *
17964fe24a3SDavid Hildenbrand 			 * In all writable, private mappings, we have to
18064fe24a3SDavid Hildenbrand 			 * properly handle COW.
18164fe24a3SDavid Hildenbrand 			 *
18264fe24a3SDavid Hildenbrand 			 * In both cases, we can sometimes still change PTEs
18364fe24a3SDavid Hildenbrand 			 * writable and avoid the write-fault handler, for
18464fe24a3SDavid Hildenbrand 			 * example, if a PTE is already dirty and no other
18564fe24a3SDavid Hildenbrand 			 * COW or special handling is required.
18664fe24a3SDavid Hildenbrand 			 */
18764fe24a3SDavid Hildenbrand 			if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
18864fe24a3SDavid Hildenbrand 			    !pte_write(ptent) &&
18964fe24a3SDavid Hildenbrand 			    can_change_pte_writable(vma, addr, ptent))
190161e393cSRick Edgecombe 				ptent = pte_mkwrite(ptent, vma);
19164fe24a3SDavid Hildenbrand 
19204a86453SAneesh Kumar K.V 			ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
193c9fe6656SNadav Amit 			if (pte_needs_flush(oldpte, ptent))
1944a18419fSNadav Amit 				tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
1954b10e7d5SMel Gorman 			pages++;
196f45ec5ffSPeter Xu 		} else if (is_swap_pte(oldpte)) {
1970697212aSChristoph Lameter 			swp_entry_t entry = pte_to_swp_entry(oldpte);
198f45ec5ffSPeter Xu 			pte_t newpte;
1990697212aSChristoph Lameter 
2004dd845b5SAlistair Popple 			if (is_writable_migration_entry(entry)) {
201*f2d571b0SMatthew Wilcox (Oracle) 				struct folio *folio = pfn_swap_entry_folio(entry);
2023d2f78f0SPeter Xu 
2030697212aSChristoph Lameter 				/*
2040697212aSChristoph Lameter 				 * A protection check is difficult so
2050697212aSChristoph Lameter 				 * just be safe and disable write
2060697212aSChristoph Lameter 				 */
207*f2d571b0SMatthew Wilcox (Oracle) 				if (folio_test_anon(folio))
2086c287605SDavid Hildenbrand 					entry = make_readable_exclusive_migration_entry(
2094dd845b5SAlistair Popple 							     swp_offset(entry));
2106c287605SDavid Hildenbrand 				else
2116c287605SDavid Hildenbrand 					entry = make_readable_migration_entry(swp_offset(entry));
212c3d16e16SCyrill Gorcunov 				newpte = swp_entry_to_pte(entry);
213c3d16e16SCyrill Gorcunov 				if (pte_swp_soft_dirty(oldpte))
214c3d16e16SCyrill Gorcunov 					newpte = pte_swp_mksoft_dirty(newpte);
2154dd845b5SAlistair Popple 			} else if (is_writable_device_private_entry(entry)) {
2165042db43SJérôme Glisse 				/*
2175042db43SJérôme Glisse 				 * We do not preserve soft-dirtiness. See
218eafcb7a9SMiaohe Lin 				 * copy_nonpresent_pte() for explanation.
2195042db43SJérôme Glisse 				 */
2204dd845b5SAlistair Popple 				entry = make_readable_device_private_entry(
2214dd845b5SAlistair Popple 							swp_offset(entry));
2225042db43SJérôme Glisse 				newpte = swp_entry_to_pte(entry);
223f45ec5ffSPeter Xu 				if (pte_swp_uffd_wp(oldpte))
224f45ec5ffSPeter Xu 					newpte = pte_swp_mkuffd_wp(newpte);
225b756a3b5SAlistair Popple 			} else if (is_writable_device_exclusive_entry(entry)) {
226b756a3b5SAlistair Popple 				entry = make_readable_device_exclusive_entry(
227b756a3b5SAlistair Popple 							swp_offset(entry));
228b756a3b5SAlistair Popple 				newpte = swp_entry_to_pte(entry);
229b756a3b5SAlistair Popple 				if (pte_swp_soft_dirty(oldpte))
230b756a3b5SAlistair Popple 					newpte = pte_swp_mksoft_dirty(newpte);
231b756a3b5SAlistair Popple 				if (pte_swp_uffd_wp(oldpte))
232b756a3b5SAlistair Popple 					newpte = pte_swp_mkuffd_wp(newpte);
2337e3ce3f8SPeter Xu 			} else if (is_pte_marker_entry(entry)) {
2347e3ce3f8SPeter Xu 				/*
235af19487fSAxel Rasmussen 				 * Ignore error swap entries unconditionally,
2367e3ce3f8SPeter Xu 				 * because any access should sigbus anyway.
2377e3ce3f8SPeter Xu 				 */
238af19487fSAxel Rasmussen 				if (is_poisoned_swp_entry(entry))
2397e3ce3f8SPeter Xu 					continue;
240fe2567ebSPeter Xu 				/*
241fe2567ebSPeter Xu 				 * If this is uffd-wp pte marker and we'd like
242fe2567ebSPeter Xu 				 * to unprotect it, drop it; the next page
243fe2567ebSPeter Xu 				 * fault will trigger without uffd trapping.
244fe2567ebSPeter Xu 				 */
245fe2567ebSPeter Xu 				if (uffd_wp_resolve) {
246fe2567ebSPeter Xu 					pte_clear(vma->vm_mm, addr, pte);
247fe2567ebSPeter Xu 					pages++;
248fe2567ebSPeter Xu 				}
2495c041f5dSPeter Xu 				continue;
250f45ec5ffSPeter Xu 			} else {
251f45ec5ffSPeter Xu 				newpte = oldpte;
252f45ec5ffSPeter Xu 			}
2535042db43SJérôme Glisse 
254f45ec5ffSPeter Xu 			if (uffd_wp)
255f45ec5ffSPeter Xu 				newpte = pte_swp_mkuffd_wp(newpte);
256f45ec5ffSPeter Xu 			else if (uffd_wp_resolve)
257f45ec5ffSPeter Xu 				newpte = pte_swp_clear_uffd_wp(newpte);
258f45ec5ffSPeter Xu 
259f45ec5ffSPeter Xu 			if (!pte_same(oldpte, newpte)) {
260f45ec5ffSPeter Xu 				set_pte_at(vma->vm_mm, addr, pte, newpte);
2615042db43SJérôme Glisse 				pages++;
2625042db43SJérôme Glisse 			}
263fe2567ebSPeter Xu 		} else {
264fe2567ebSPeter Xu 			/* It must be an none page, or what else?.. */
265fe2567ebSPeter Xu 			WARN_ON_ONCE(!pte_none(oldpte));
2662bad466cSPeter Xu 
2672bad466cSPeter Xu 			/*
2682bad466cSPeter Xu 			 * Nobody plays with any none ptes besides
2692bad466cSPeter Xu 			 * userfaultfd when applying the protections.
2702bad466cSPeter Xu 			 */
2712bad466cSPeter Xu 			if (likely(!uffd_wp))
2722bad466cSPeter Xu 				continue;
2732bad466cSPeter Xu 
2742bad466cSPeter Xu 			if (userfaultfd_wp_use_markers(vma)) {
275fe2567ebSPeter Xu 				/*
276fe2567ebSPeter Xu 				 * For file-backed mem, we need to be able to
277fe2567ebSPeter Xu 				 * wr-protect a none pte, because even if the
278fe2567ebSPeter Xu 				 * pte is none, the page/swap cache could
279fe2567ebSPeter Xu 				 * exist.  Doing that by install a marker.
280fe2567ebSPeter Xu 				 */
281fe2567ebSPeter Xu 				set_pte_at(vma->vm_mm, addr, pte,
282fe2567ebSPeter Xu 					   make_pte_marker(PTE_MARKER_UFFD_WP));
283fe2567ebSPeter Xu 				pages++;
284fe2567ebSPeter Xu 			}
285e920e14cSMel Gorman 		}
2861da177e4SLinus Torvalds 	} while (pte++, addr += PAGE_SIZE, addr != end);
2876606c3e0SZachary Amsden 	arch_leave_lazy_mmu_mode();
288705e87c0SHugh Dickins 	pte_unmap_unlock(pte - 1, ptl);
2897da4d641SPeter Zijlstra 
2907da4d641SPeter Zijlstra 	return pages;
2911da177e4SLinus Torvalds }
2921da177e4SLinus Torvalds 
2938b272b3cSMel Gorman /*
2942bad466cSPeter Xu  * Return true if we want to split THPs into PTE mappings in change
2952bad466cSPeter Xu  * protection procedure, false otherwise.
2962bad466cSPeter Xu  */
297fe2567ebSPeter Xu static inline bool
2982bad466cSPeter Xu pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags)
299fe2567ebSPeter Xu {
3002bad466cSPeter Xu 	/*
3012bad466cSPeter Xu 	 * pte markers only resides in pte level, if we need pte markers,
3022bad466cSPeter Xu 	 * we need to split.  We cannot wr-protect shmem thp because file
3032bad466cSPeter Xu 	 * thp is handled differently when split by erasing the pmd so far.
3042bad466cSPeter Xu 	 */
305fe2567ebSPeter Xu 	return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
306fe2567ebSPeter Xu }
307fe2567ebSPeter Xu 
308fe2567ebSPeter Xu /*
3092bad466cSPeter Xu  * Return true if we want to populate pgtables in change protection
3102bad466cSPeter Xu  * procedure, false otherwise
3112bad466cSPeter Xu  */
3122bad466cSPeter Xu static inline bool
3132bad466cSPeter Xu pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags)
3142bad466cSPeter Xu {
3152bad466cSPeter Xu 	/* If not within ioctl(UFFDIO_WRITEPROTECT), then don't bother */
3162bad466cSPeter Xu 	if (!(cp_flags & MM_CP_UFFD_WP))
3172bad466cSPeter Xu 		return false;
3182bad466cSPeter Xu 
3192bad466cSPeter Xu 	/* Populate if the userfaultfd mode requires pte markers */
3202bad466cSPeter Xu 	return userfaultfd_wp_use_markers(vma);
3212bad466cSPeter Xu }
3222bad466cSPeter Xu 
3232bad466cSPeter Xu /*
3242bad466cSPeter Xu  * Populate the pgtable underneath for whatever reason if requested.
3252bad466cSPeter Xu  * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable
3262bad466cSPeter Xu  * allocation failures during page faults by kicking OOM and returning
3272bad466cSPeter Xu  * error.
328fe2567ebSPeter Xu  */
329fe2567ebSPeter Xu #define  change_pmd_prepare(vma, pmd, cp_flags)				\
330d1751118SPeter Xu 	({								\
331d1751118SPeter Xu 		long err = 0;						\
3322bad466cSPeter Xu 		if (unlikely(pgtable_populate_needed(vma, cp_flags))) {	\
333d1751118SPeter Xu 			if (pte_alloc(vma->vm_mm, pmd))			\
334d1751118SPeter Xu 				err = -ENOMEM;				\
335fe2567ebSPeter Xu 		}							\
336d1751118SPeter Xu 		err;							\
337d1751118SPeter Xu 	})
338d1751118SPeter Xu 
339fe2567ebSPeter Xu /*
340fe2567ebSPeter Xu  * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to
341fe2567ebSPeter Xu  * have separate change_pmd_prepare() because pte_alloc() returns 0 on success,
342fe2567ebSPeter Xu  * while {pmd|pud|p4d}_alloc() returns the valid pointer on success.
343fe2567ebSPeter Xu  */
344fe2567ebSPeter Xu #define  change_prepare(vma, high, low, addr, cp_flags)			\
345d1751118SPeter Xu 	  ({								\
346d1751118SPeter Xu 		long err = 0;						\
3472bad466cSPeter Xu 		if (unlikely(pgtable_populate_needed(vma, cp_flags))) {	\
348fe2567ebSPeter Xu 			low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
349d1751118SPeter Xu 			if (p == NULL)					\
350d1751118SPeter Xu 				err = -ENOMEM;				\
351fe2567ebSPeter Xu 		}							\
352d1751118SPeter Xu 		err;							\
353d1751118SPeter Xu 	})
354fe2567ebSPeter Xu 
355a79390f5SPeter Xu static inline long change_pmd_range(struct mmu_gather *tlb,
3564a18419fSNadav Amit 		struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
3574a18419fSNadav Amit 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
3581da177e4SLinus Torvalds {
3591da177e4SLinus Torvalds 	pmd_t *pmd;
3601da177e4SLinus Torvalds 	unsigned long next;
361a79390f5SPeter Xu 	long pages = 0;
36272403b4aSMel Gorman 	unsigned long nr_huge_updates = 0;
363ac46d4f3SJérôme Glisse 	struct mmu_notifier_range range;
364ac46d4f3SJérôme Glisse 
365ac46d4f3SJérôme Glisse 	range.start = 0;
3661da177e4SLinus Torvalds 
3671da177e4SLinus Torvalds 	pmd = pmd_offset(pud, addr);
3681da177e4SLinus Torvalds 	do {
369d1751118SPeter Xu 		long ret;
370670ddd8cSHugh Dickins 		pmd_t _pmd;
371670ddd8cSHugh Dickins again:
3721da177e4SLinus Torvalds 		next = pmd_addr_end(addr, end);
3738b272b3cSMel Gorman 
374d1751118SPeter Xu 		ret = change_pmd_prepare(vma, pmd, cp_flags);
375d1751118SPeter Xu 		if (ret) {
376d1751118SPeter Xu 			pages = ret;
377d1751118SPeter Xu 			break;
378d1751118SPeter Xu 		}
379670ddd8cSHugh Dickins 
380670ddd8cSHugh Dickins 		if (pmd_none(*pmd))
3814991c09cSAnshuman Khandual 			goto next;
382a5338093SRik van Riel 
383a5338093SRik van Riel 		/* invoke the mmu notifier if the pmd is populated */
384ac46d4f3SJérôme Glisse 		if (!range.start) {
3857269f999SJérôme Glisse 			mmu_notifier_range_init(&range,
3867269f999SJérôme Glisse 				MMU_NOTIFY_PROTECTION_VMA, 0,
3877d4a8be0SAlistair Popple 				vma->vm_mm, addr, end);
388ac46d4f3SJérôme Glisse 			mmu_notifier_invalidate_range_start(&range);
389a5338093SRik van Riel 		}
390a5338093SRik van Riel 
391670ddd8cSHugh Dickins 		_pmd = pmdp_get_lockless(pmd);
392670ddd8cSHugh Dickins 		if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) {
393019c2d8bSPeter Xu 			if ((next - addr != HPAGE_PMD_SIZE) ||
3942bad466cSPeter Xu 			    pgtable_split_needed(vma, cp_flags)) {
395fd60775aSDavid Rientjes 				__split_huge_pmd(vma, pmd, addr, false, NULL);
396019c2d8bSPeter Xu 				/*
397019c2d8bSPeter Xu 				 * For file-backed, the pmd could have been
398019c2d8bSPeter Xu 				 * cleared; make sure pmd populated if
399019c2d8bSPeter Xu 				 * necessary, then fall-through to pte level.
400019c2d8bSPeter Xu 				 */
401d1751118SPeter Xu 				ret = change_pmd_prepare(vma, pmd, cp_flags);
402d1751118SPeter Xu 				if (ret) {
403d1751118SPeter Xu 					pages = ret;
404d1751118SPeter Xu 					break;
405d1751118SPeter Xu 				}
4066b9116a6SKirill A. Shutemov 			} else {
407670ddd8cSHugh Dickins 				ret = change_huge_pmd(tlb, vma, pmd,
4084a18419fSNadav Amit 						addr, newprot, cp_flags);
409670ddd8cSHugh Dickins 				if (ret) {
410670ddd8cSHugh Dickins 					if (ret == HPAGE_PMD_NR) {
41172403b4aSMel Gorman 						pages += HPAGE_PMD_NR;
41272403b4aSMel Gorman 						nr_huge_updates++;
41372403b4aSMel Gorman 					}
4141ad9f620SMel Gorman 
4151ad9f620SMel Gorman 					/* huge pmd was handled */
4164991c09cSAnshuman Khandual 					goto next;
4177da4d641SPeter Zijlstra 				}
418f123d74aSMel Gorman 			}
41988a9ab6eSRik van Riel 			/* fall through, the trans huge pmd just split */
420cd7548abSJohannes Weiner 		}
421670ddd8cSHugh Dickins 
422670ddd8cSHugh Dickins 		ret = change_pte_range(tlb, vma, pmd, addr, next, newprot,
423670ddd8cSHugh Dickins 				       cp_flags);
424670ddd8cSHugh Dickins 		if (ret < 0)
425670ddd8cSHugh Dickins 			goto again;
426670ddd8cSHugh Dickins 		pages += ret;
4274991c09cSAnshuman Khandual next:
4284991c09cSAnshuman Khandual 		cond_resched();
4291da177e4SLinus Torvalds 	} while (pmd++, addr = next, addr != end);
4307da4d641SPeter Zijlstra 
431ac46d4f3SJérôme Glisse 	if (range.start)
432ac46d4f3SJérôme Glisse 		mmu_notifier_invalidate_range_end(&range);
433a5338093SRik van Riel 
43472403b4aSMel Gorman 	if (nr_huge_updates)
43572403b4aSMel Gorman 		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
4367da4d641SPeter Zijlstra 	return pages;
4371da177e4SLinus Torvalds }
4381da177e4SLinus Torvalds 
439a79390f5SPeter Xu static inline long change_pud_range(struct mmu_gather *tlb,
4404a18419fSNadav Amit 		struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
4414a18419fSNadav Amit 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
4421da177e4SLinus Torvalds {
4431da177e4SLinus Torvalds 	pud_t *pud;
4441da177e4SLinus Torvalds 	unsigned long next;
445d1751118SPeter Xu 	long pages = 0, ret;
4461da177e4SLinus Torvalds 
447c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, addr);
4481da177e4SLinus Torvalds 	do {
4491da177e4SLinus Torvalds 		next = pud_addr_end(addr, end);
450d1751118SPeter Xu 		ret = change_prepare(vma, pud, pmd, addr, cp_flags);
451d1751118SPeter Xu 		if (ret)
452d1751118SPeter Xu 			return ret;
4531da177e4SLinus Torvalds 		if (pud_none_or_clear_bad(pud))
4541da177e4SLinus Torvalds 			continue;
4554a18419fSNadav Amit 		pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
45658705444SPeter Xu 					  cp_flags);
4571da177e4SLinus Torvalds 	} while (pud++, addr = next, addr != end);
4587da4d641SPeter Zijlstra 
4597da4d641SPeter Zijlstra 	return pages;
4601da177e4SLinus Torvalds }
4611da177e4SLinus Torvalds 
462a79390f5SPeter Xu static inline long change_p4d_range(struct mmu_gather *tlb,
4634a18419fSNadav Amit 		struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
4644a18419fSNadav Amit 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
465c2febafcSKirill A. Shutemov {
466c2febafcSKirill A. Shutemov 	p4d_t *p4d;
467c2febafcSKirill A. Shutemov 	unsigned long next;
468d1751118SPeter Xu 	long pages = 0, ret;
469c2febafcSKirill A. Shutemov 
470c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, addr);
471c2febafcSKirill A. Shutemov 	do {
472c2febafcSKirill A. Shutemov 		next = p4d_addr_end(addr, end);
473d1751118SPeter Xu 		ret = change_prepare(vma, p4d, pud, addr, cp_flags);
474d1751118SPeter Xu 		if (ret)
475d1751118SPeter Xu 			return ret;
476c2febafcSKirill A. Shutemov 		if (p4d_none_or_clear_bad(p4d))
477c2febafcSKirill A. Shutemov 			continue;
4784a18419fSNadav Amit 		pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
47958705444SPeter Xu 					  cp_flags);
480c2febafcSKirill A. Shutemov 	} while (p4d++, addr = next, addr != end);
481c2febafcSKirill A. Shutemov 
482c2febafcSKirill A. Shutemov 	return pages;
483c2febafcSKirill A. Shutemov }
484c2febafcSKirill A. Shutemov 
485a79390f5SPeter Xu static long change_protection_range(struct mmu_gather *tlb,
4864a18419fSNadav Amit 		struct vm_area_struct *vma, unsigned long addr,
4874a18419fSNadav Amit 		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
4881da177e4SLinus Torvalds {
4891da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
4901da177e4SLinus Torvalds 	pgd_t *pgd;
4911da177e4SLinus Torvalds 	unsigned long next;
492d1751118SPeter Xu 	long pages = 0, ret;
4931da177e4SLinus Torvalds 
4941da177e4SLinus Torvalds 	BUG_ON(addr >= end);
4951da177e4SLinus Torvalds 	pgd = pgd_offset(mm, addr);
4964a18419fSNadav Amit 	tlb_start_vma(tlb, vma);
4971da177e4SLinus Torvalds 	do {
4981da177e4SLinus Torvalds 		next = pgd_addr_end(addr, end);
499d1751118SPeter Xu 		ret = change_prepare(vma, pgd, p4d, addr, cp_flags);
500d1751118SPeter Xu 		if (ret) {
501d1751118SPeter Xu 			pages = ret;
502d1751118SPeter Xu 			break;
503d1751118SPeter Xu 		}
5041da177e4SLinus Torvalds 		if (pgd_none_or_clear_bad(pgd))
5051da177e4SLinus Torvalds 			continue;
5064a18419fSNadav Amit 		pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
50758705444SPeter Xu 					  cp_flags);
5081da177e4SLinus Torvalds 	} while (pgd++, addr = next, addr != end);
5097da4d641SPeter Zijlstra 
5104a18419fSNadav Amit 	tlb_end_vma(tlb, vma);
5117da4d641SPeter Zijlstra 
5127da4d641SPeter Zijlstra 	return pages;
5137da4d641SPeter Zijlstra }
5147da4d641SPeter Zijlstra 
515a79390f5SPeter Xu long change_protection(struct mmu_gather *tlb,
5164a18419fSNadav Amit 		       struct vm_area_struct *vma, unsigned long start,
5171ef488edSDavid Hildenbrand 		       unsigned long end, unsigned long cp_flags)
5187da4d641SPeter Zijlstra {
5191ef488edSDavid Hildenbrand 	pgprot_t newprot = vma->vm_page_prot;
520a79390f5SPeter Xu 	long pages;
5217da4d641SPeter Zijlstra 
522292924b2SPeter Xu 	BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
523292924b2SPeter Xu 
5241ef488edSDavid Hildenbrand #ifdef CONFIG_NUMA_BALANCING
5251ef488edSDavid Hildenbrand 	/*
5261ef488edSDavid Hildenbrand 	 * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking)
5271ef488edSDavid Hildenbrand 	 * are expected to reflect their requirements via VMA flags such that
5281ef488edSDavid Hildenbrand 	 * vma_set_page_prot() will adjust vma->vm_page_prot accordingly.
5291ef488edSDavid Hildenbrand 	 */
5301ef488edSDavid Hildenbrand 	if (cp_flags & MM_CP_PROT_NUMA)
5311ef488edSDavid Hildenbrand 		newprot = PAGE_NONE;
5321ef488edSDavid Hildenbrand #else
5331ef488edSDavid Hildenbrand 	WARN_ON_ONCE(cp_flags & MM_CP_PROT_NUMA);
5341ef488edSDavid Hildenbrand #endif
5351ef488edSDavid Hildenbrand 
5367da4d641SPeter Zijlstra 	if (is_vm_hugetlb_page(vma))
5375a90d5a1SPeter Xu 		pages = hugetlb_change_protection(vma, start, end, newprot,
5385a90d5a1SPeter Xu 						  cp_flags);
5397da4d641SPeter Zijlstra 	else
5404a18419fSNadav Amit 		pages = change_protection_range(tlb, vma, start, end, newprot,
54158705444SPeter Xu 						cp_flags);
5427da4d641SPeter Zijlstra 
5437da4d641SPeter Zijlstra 	return pages;
5441da177e4SLinus Torvalds }
5451da177e4SLinus Torvalds 
54642e4089cSAndi Kleen static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
54742e4089cSAndi Kleen 			       unsigned long next, struct mm_walk *walk)
54842e4089cSAndi Kleen {
549c33c7948SRyan Roberts 	return pfn_modify_allowed(pte_pfn(ptep_get(pte)),
550c33c7948SRyan Roberts 				  *(pgprot_t *)(walk->private)) ?
55142e4089cSAndi Kleen 		0 : -EACCES;
55242e4089cSAndi Kleen }
55342e4089cSAndi Kleen 
55442e4089cSAndi Kleen static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
55542e4089cSAndi Kleen 				   unsigned long addr, unsigned long next,
55642e4089cSAndi Kleen 				   struct mm_walk *walk)
55742e4089cSAndi Kleen {
558c33c7948SRyan Roberts 	return pfn_modify_allowed(pte_pfn(ptep_get(pte)),
559c33c7948SRyan Roberts 				  *(pgprot_t *)(walk->private)) ?
56042e4089cSAndi Kleen 		0 : -EACCES;
56142e4089cSAndi Kleen }
56242e4089cSAndi Kleen 
56342e4089cSAndi Kleen static int prot_none_test(unsigned long addr, unsigned long next,
56442e4089cSAndi Kleen 			  struct mm_walk *walk)
56542e4089cSAndi Kleen {
56642e4089cSAndi Kleen 	return 0;
56742e4089cSAndi Kleen }
56842e4089cSAndi Kleen 
5697b86ac33SChristoph Hellwig static const struct mm_walk_ops prot_none_walk_ops = {
57042e4089cSAndi Kleen 	.pte_entry		= prot_none_pte_entry,
57142e4089cSAndi Kleen 	.hugetlb_entry		= prot_none_hugetlb_entry,
57242e4089cSAndi Kleen 	.test_walk		= prot_none_test,
57349b06385SSuren Baghdasaryan 	.walk_lock		= PGWALK_WRLOCK,
57442e4089cSAndi Kleen };
57542e4089cSAndi Kleen 
576b6a2fea3SOllie Wild int
5772286a691SLiam R. Howlett mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
5782286a691SLiam R. Howlett 	       struct vm_area_struct *vma, struct vm_area_struct **pprev,
5792286a691SLiam R. Howlett 	       unsigned long start, unsigned long end, unsigned long newflags)
5801da177e4SLinus Torvalds {
5811da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
5821da177e4SLinus Torvalds 	unsigned long oldflags = vma->vm_flags;
5831da177e4SLinus Torvalds 	long nrpages = (end - start) >> PAGE_SHIFT;
584eb309ec8SDavid Hildenbrand 	unsigned int mm_cp_flags = 0;
5851da177e4SLinus Torvalds 	unsigned long charged = 0;
5861da177e4SLinus Torvalds 	int error;
5871da177e4SLinus Torvalds 
5881da177e4SLinus Torvalds 	if (newflags == oldflags) {
5891da177e4SLinus Torvalds 		*pprev = vma;
5901da177e4SLinus Torvalds 		return 0;
5911da177e4SLinus Torvalds 	}
5921da177e4SLinus Torvalds 
5931da177e4SLinus Torvalds 	/*
59442e4089cSAndi Kleen 	 * Do PROT_NONE PFN permission checks here when we can still
59542e4089cSAndi Kleen 	 * bail out without undoing a lot of state. This is a rather
59642e4089cSAndi Kleen 	 * uncommon case, so doesn't need to be very optimized.
59742e4089cSAndi Kleen 	 */
59842e4089cSAndi Kleen 	if (arch_has_pfn_modify_check() &&
59942e4089cSAndi Kleen 	    (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
6006cb4d9a2SAnshuman Khandual 	    (newflags & VM_ACCESS_FLAGS) == 0) {
6017b86ac33SChristoph Hellwig 		pgprot_t new_pgprot = vm_get_page_prot(newflags);
6027b86ac33SChristoph Hellwig 
6037b86ac33SChristoph Hellwig 		error = walk_page_range(current->mm, start, end,
6047b86ac33SChristoph Hellwig 				&prot_none_walk_ops, &new_pgprot);
60542e4089cSAndi Kleen 		if (error)
60642e4089cSAndi Kleen 			return error;
60742e4089cSAndi Kleen 	}
60842e4089cSAndi Kleen 
60942e4089cSAndi Kleen 	/*
6101da177e4SLinus Torvalds 	 * If we make a private mapping writable we increase our commit;
6111da177e4SLinus Torvalds 	 * but (without finer accounting) cannot reduce our commit if we
6129b914329SLorenzo Stoakes 	 * make it unwritable again except in the anonymous case where no
6139b914329SLorenzo Stoakes 	 * anon_vma has yet to be assigned.
6149b914329SLorenzo Stoakes 	 *
6159b914329SLorenzo Stoakes 	 * hugetlb mapping were accounted for even if read-only so there is
6169b914329SLorenzo Stoakes 	 * no need to account for them here.
6171da177e4SLinus Torvalds 	 */
6181da177e4SLinus Torvalds 	if (newflags & VM_WRITE) {
61984638335SKonstantin Khlebnikov 		/* Check space limits when area turns into data. */
62084638335SKonstantin Khlebnikov 		if (!may_expand_vm(mm, newflags, nrpages) &&
62184638335SKonstantin Khlebnikov 				may_expand_vm(mm, oldflags, nrpages))
62284638335SKonstantin Khlebnikov 			return -ENOMEM;
6235a6fe125SMel Gorman 		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
624cdfd4325SAndy Whitcroft 						VM_SHARED|VM_NORESERVE))) {
6251da177e4SLinus Torvalds 			charged = nrpages;
626191c5424SAl Viro 			if (security_vm_enough_memory_mm(mm, charged))
6271da177e4SLinus Torvalds 				return -ENOMEM;
6281da177e4SLinus Torvalds 			newflags |= VM_ACCOUNT;
6291da177e4SLinus Torvalds 		}
6309b914329SLorenzo Stoakes 	} else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) &&
6319b914329SLorenzo Stoakes 		   !vma->anon_vma) {
6329b914329SLorenzo Stoakes 		newflags &= ~VM_ACCOUNT;
6331da177e4SLinus Torvalds 	}
6341da177e4SLinus Torvalds 
63594d7d923SLorenzo Stoakes 	vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags);
63694d7d923SLorenzo Stoakes 	if (IS_ERR(vma)) {
63794d7d923SLorenzo Stoakes 		error = PTR_ERR(vma);
63894d7d923SLorenzo Stoakes 		goto fail;
6391da177e4SLinus Torvalds 	}
6401da177e4SLinus Torvalds 
6411da177e4SLinus Torvalds 	*pprev = vma;
6421da177e4SLinus Torvalds 
6431da177e4SLinus Torvalds 	/*
644c1e8d7c6SMichel Lespinasse 	 * vm_flags and vm_page_prot are protected by the mmap_lock
6451da177e4SLinus Torvalds 	 * held in write mode.
6461da177e4SLinus Torvalds 	 */
64760081bf1SSuren Baghdasaryan 	vma_start_write(vma);
6481c71222eSSuren Baghdasaryan 	vm_flags_reset(vma, newflags);
649eb309ec8SDavid Hildenbrand 	if (vma_wants_manual_pte_write_upgrade(vma))
650eb309ec8SDavid Hildenbrand 		mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
65164e45507SPeter Feiner 	vma_set_page_prot(vma);
652d08b3851SPeter Zijlstra 
6531ef488edSDavid Hildenbrand 	change_protection(tlb, vma, start, end, mm_cp_flags);
6547da4d641SPeter Zijlstra 
6559b914329SLorenzo Stoakes 	if ((oldflags & VM_ACCOUNT) && !(newflags & VM_ACCOUNT))
6569b914329SLorenzo Stoakes 		vm_unacct_memory(nrpages);
6579b914329SLorenzo Stoakes 
65836f88188SKirill A. Shutemov 	/*
65936f88188SKirill A. Shutemov 	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
66036f88188SKirill A. Shutemov 	 * fault on access.
66136f88188SKirill A. Shutemov 	 */
66236f88188SKirill A. Shutemov 	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
66336f88188SKirill A. Shutemov 			(newflags & VM_WRITE)) {
66436f88188SKirill A. Shutemov 		populate_vma_page_range(vma, start, end, NULL);
66536f88188SKirill A. Shutemov 	}
66636f88188SKirill A. Shutemov 
66784638335SKonstantin Khlebnikov 	vm_stat_account(mm, oldflags, -nrpages);
66884638335SKonstantin Khlebnikov 	vm_stat_account(mm, newflags, nrpages);
66963bfd738SPekka Enberg 	perf_event_mmap(vma);
6701da177e4SLinus Torvalds 	return 0;
6711da177e4SLinus Torvalds 
6721da177e4SLinus Torvalds fail:
6731da177e4SLinus Torvalds 	vm_unacct_memory(charged);
6741da177e4SLinus Torvalds 	return error;
6751da177e4SLinus Torvalds }
6761da177e4SLinus Torvalds 
6777d06d9c9SDave Hansen /*
6787d06d9c9SDave Hansen  * pkey==-1 when doing a legacy mprotect()
6797d06d9c9SDave Hansen  */
6807d06d9c9SDave Hansen static int do_mprotect_pkey(unsigned long start, size_t len,
6817d06d9c9SDave Hansen 		unsigned long prot, int pkey)
6821da177e4SLinus Torvalds {
68362b5f7d0SDave Hansen 	unsigned long nstart, end, tmp, reqprot;
6841da177e4SLinus Torvalds 	struct vm_area_struct *vma, *prev;
68548725bbcSXiu Jianfeng 	int error;
6861da177e4SLinus Torvalds 	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
687f138556dSPiotr Kwapulinski 	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
688f138556dSPiotr Kwapulinski 				(prot & PROT_READ);
6894a18419fSNadav Amit 	struct mmu_gather tlb;
6902286a691SLiam R. Howlett 	struct vma_iterator vmi;
691f138556dSPiotr Kwapulinski 
692057d3389SAndrey Konovalov 	start = untagged_addr(start);
693057d3389SAndrey Konovalov 
6941da177e4SLinus Torvalds 	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
6951da177e4SLinus Torvalds 	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
6961da177e4SLinus Torvalds 		return -EINVAL;
6971da177e4SLinus Torvalds 
6981da177e4SLinus Torvalds 	if (start & ~PAGE_MASK)
6991da177e4SLinus Torvalds 		return -EINVAL;
7001da177e4SLinus Torvalds 	if (!len)
7011da177e4SLinus Torvalds 		return 0;
7021da177e4SLinus Torvalds 	len = PAGE_ALIGN(len);
7031da177e4SLinus Torvalds 	end = start + len;
7041da177e4SLinus Torvalds 	if (end <= start)
7051da177e4SLinus Torvalds 		return -ENOMEM;
7069035cf9aSKhalid Aziz 	if (!arch_validate_prot(prot, start))
7071da177e4SLinus Torvalds 		return -EINVAL;
7081da177e4SLinus Torvalds 
7091da177e4SLinus Torvalds 	reqprot = prot;
7101da177e4SLinus Torvalds 
711d8ed45c5SMichel Lespinasse 	if (mmap_write_lock_killable(current->mm))
712dc0ef0dfSMichal Hocko 		return -EINTR;
7131da177e4SLinus Torvalds 
714e8c24d3aSDave Hansen 	/*
715e8c24d3aSDave Hansen 	 * If userspace did not allocate the pkey, do not let
716e8c24d3aSDave Hansen 	 * them use it here.
717e8c24d3aSDave Hansen 	 */
718e8c24d3aSDave Hansen 	error = -EINVAL;
719e8c24d3aSDave Hansen 	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
720e8c24d3aSDave Hansen 		goto out;
721e8c24d3aSDave Hansen 
7222286a691SLiam R. Howlett 	vma_iter_init(&vmi, current->mm, start);
7232286a691SLiam R. Howlett 	vma = vma_find(&vmi, end);
7241da177e4SLinus Torvalds 	error = -ENOMEM;
7251da177e4SLinus Torvalds 	if (!vma)
7261da177e4SLinus Torvalds 		goto out;
7276af5fa0dSLiu Song 
7281da177e4SLinus Torvalds 	if (unlikely(grows & PROT_GROWSDOWN)) {
7291da177e4SLinus Torvalds 		if (vma->vm_start >= end)
7301da177e4SLinus Torvalds 			goto out;
7311da177e4SLinus Torvalds 		start = vma->vm_start;
7321da177e4SLinus Torvalds 		error = -EINVAL;
7331da177e4SLinus Torvalds 		if (!(vma->vm_flags & VM_GROWSDOWN))
7341da177e4SLinus Torvalds 			goto out;
7357d12efaeSAndrew Morton 	} else {
7361da177e4SLinus Torvalds 		if (vma->vm_start > start)
7371da177e4SLinus Torvalds 			goto out;
7381da177e4SLinus Torvalds 		if (unlikely(grows & PROT_GROWSUP)) {
7391da177e4SLinus Torvalds 			end = vma->vm_end;
7401da177e4SLinus Torvalds 			error = -EINVAL;
7411da177e4SLinus Torvalds 			if (!(vma->vm_flags & VM_GROWSUP))
7421da177e4SLinus Torvalds 				goto out;
7431da177e4SLinus Torvalds 		}
7441da177e4SLinus Torvalds 	}
7456af5fa0dSLiu Song 
7462286a691SLiam R. Howlett 	prev = vma_prev(&vmi);
7471da177e4SLinus Torvalds 	if (start > vma->vm_start)
7481da177e4SLinus Torvalds 		prev = vma;
7491da177e4SLinus Torvalds 
7504a18419fSNadav Amit 	tlb_gather_mmu(&tlb, current->mm);
7512286a691SLiam R. Howlett 	nstart = start;
7522286a691SLiam R. Howlett 	tmp = vma->vm_start;
7532286a691SLiam R. Howlett 	for_each_vma_range(vmi, vma, end) {
754a8502b67SDave Hansen 		unsigned long mask_off_old_flags;
7551da177e4SLinus Torvalds 		unsigned long newflags;
7567d06d9c9SDave Hansen 		int new_vma_pkey;
7571da177e4SLinus Torvalds 
7582286a691SLiam R. Howlett 		if (vma->vm_start != tmp) {
7592286a691SLiam R. Howlett 			error = -ENOMEM;
7602286a691SLiam R. Howlett 			break;
7612286a691SLiam R. Howlett 		}
7621da177e4SLinus Torvalds 
763f138556dSPiotr Kwapulinski 		/* Does the application expect PROT_READ to imply PROT_EXEC */
764f138556dSPiotr Kwapulinski 		if (rier && (vma->vm_flags & VM_MAYEXEC))
765f138556dSPiotr Kwapulinski 			prot |= PROT_EXEC;
766f138556dSPiotr Kwapulinski 
767a8502b67SDave Hansen 		/*
768a8502b67SDave Hansen 		 * Each mprotect() call explicitly passes r/w/x permissions.
769a8502b67SDave Hansen 		 * If a permission is not passed to mprotect(), it must be
770a8502b67SDave Hansen 		 * cleared from the VMA.
771a8502b67SDave Hansen 		 */
772e39ee675SKefeng Wang 		mask_off_old_flags = VM_ACCESS_FLAGS | VM_FLAGS_CLEAR;
773a8502b67SDave Hansen 
7747d06d9c9SDave Hansen 		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
7757d06d9c9SDave Hansen 		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
776a8502b67SDave Hansen 		newflags |= (vma->vm_flags & ~mask_off_old_flags);
7771da177e4SLinus Torvalds 
7787e2cff42SPaolo 'Blaisorblade' Giarrusso 		/* newflags >> 4 shift VM_MAY% in place of VM_% */
7796cb4d9a2SAnshuman Khandual 		if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
7801da177e4SLinus Torvalds 			error = -EACCES;
7814a18419fSNadav Amit 			break;
7821da177e4SLinus Torvalds 		}
7831da177e4SLinus Torvalds 
784b507808eSJoey Gouly 		if (map_deny_write_exec(vma, newflags)) {
785b507808eSJoey Gouly 			error = -EACCES;
7863d27a95bSJoey Gouly 			break;
787b507808eSJoey Gouly 		}
788b507808eSJoey Gouly 
789c462ac28SCatalin Marinas 		/* Allow architectures to sanity-check the new flags */
790c462ac28SCatalin Marinas 		if (!arch_validate_flags(newflags)) {
791c462ac28SCatalin Marinas 			error = -EINVAL;
7924a18419fSNadav Amit 			break;
793c462ac28SCatalin Marinas 		}
794c462ac28SCatalin Marinas 
7951da177e4SLinus Torvalds 		error = security_file_mprotect(vma, reqprot, prot);
7961da177e4SLinus Torvalds 		if (error)
7974a18419fSNadav Amit 			break;
7981da177e4SLinus Torvalds 
7991da177e4SLinus Torvalds 		tmp = vma->vm_end;
8001da177e4SLinus Torvalds 		if (tmp > end)
8011da177e4SLinus Torvalds 			tmp = end;
80295bb7c42SSean Christopherson 
803dbf53f75STianjia Zhang 		if (vma->vm_ops && vma->vm_ops->mprotect) {
80495bb7c42SSean Christopherson 			error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
80595bb7c42SSean Christopherson 			if (error)
8064a18419fSNadav Amit 				break;
807dbf53f75STianjia Zhang 		}
80895bb7c42SSean Christopherson 
8092286a691SLiam R. Howlett 		error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags);
8101da177e4SLinus Torvalds 		if (error)
8114a18419fSNadav Amit 			break;
81295bb7c42SSean Christopherson 
8132fcd07b7SLiam R. Howlett 		tmp = vma_iter_end(&vmi);
8141da177e4SLinus Torvalds 		nstart = tmp;
815f138556dSPiotr Kwapulinski 		prot = reqprot;
8161da177e4SLinus Torvalds 	}
8174a18419fSNadav Amit 	tlb_finish_mmu(&tlb);
8182286a691SLiam R. Howlett 
81977795f90SLiam R. Howlett 	if (!error && tmp < end)
8202286a691SLiam R. Howlett 		error = -ENOMEM;
8212286a691SLiam R. Howlett 
8221da177e4SLinus Torvalds out:
823d8ed45c5SMichel Lespinasse 	mmap_write_unlock(current->mm);
8241da177e4SLinus Torvalds 	return error;
8251da177e4SLinus Torvalds }
8267d06d9c9SDave Hansen 
8277d06d9c9SDave Hansen SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
8287d06d9c9SDave Hansen 		unsigned long, prot)
8297d06d9c9SDave Hansen {
8307d06d9c9SDave Hansen 	return do_mprotect_pkey(start, len, prot, -1);
8317d06d9c9SDave Hansen }
8327d06d9c9SDave Hansen 
833c7142aeaSHeiko Carstens #ifdef CONFIG_ARCH_HAS_PKEYS
834c7142aeaSHeiko Carstens 
8357d06d9c9SDave Hansen SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
8367d06d9c9SDave Hansen 		unsigned long, prot, int, pkey)
8377d06d9c9SDave Hansen {
8387d06d9c9SDave Hansen 	return do_mprotect_pkey(start, len, prot, pkey);
8397d06d9c9SDave Hansen }
840e8c24d3aSDave Hansen 
841e8c24d3aSDave Hansen SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
842e8c24d3aSDave Hansen {
843e8c24d3aSDave Hansen 	int pkey;
844e8c24d3aSDave Hansen 	int ret;
845e8c24d3aSDave Hansen 
846e8c24d3aSDave Hansen 	/* No flags supported yet. */
847e8c24d3aSDave Hansen 	if (flags)
848e8c24d3aSDave Hansen 		return -EINVAL;
849e8c24d3aSDave Hansen 	/* check for unsupported init values */
850e8c24d3aSDave Hansen 	if (init_val & ~PKEY_ACCESS_MASK)
851e8c24d3aSDave Hansen 		return -EINVAL;
852e8c24d3aSDave Hansen 
853d8ed45c5SMichel Lespinasse 	mmap_write_lock(current->mm);
854e8c24d3aSDave Hansen 	pkey = mm_pkey_alloc(current->mm);
855e8c24d3aSDave Hansen 
856e8c24d3aSDave Hansen 	ret = -ENOSPC;
857e8c24d3aSDave Hansen 	if (pkey == -1)
858e8c24d3aSDave Hansen 		goto out;
859e8c24d3aSDave Hansen 
860e8c24d3aSDave Hansen 	ret = arch_set_user_pkey_access(current, pkey, init_val);
861e8c24d3aSDave Hansen 	if (ret) {
862e8c24d3aSDave Hansen 		mm_pkey_free(current->mm, pkey);
863e8c24d3aSDave Hansen 		goto out;
864e8c24d3aSDave Hansen 	}
865e8c24d3aSDave Hansen 	ret = pkey;
866e8c24d3aSDave Hansen out:
867d8ed45c5SMichel Lespinasse 	mmap_write_unlock(current->mm);
868e8c24d3aSDave Hansen 	return ret;
869e8c24d3aSDave Hansen }
870e8c24d3aSDave Hansen 
871e8c24d3aSDave Hansen SYSCALL_DEFINE1(pkey_free, int, pkey)
872e8c24d3aSDave Hansen {
873e8c24d3aSDave Hansen 	int ret;
874e8c24d3aSDave Hansen 
875d8ed45c5SMichel Lespinasse 	mmap_write_lock(current->mm);
876e8c24d3aSDave Hansen 	ret = mm_pkey_free(current->mm, pkey);
877d8ed45c5SMichel Lespinasse 	mmap_write_unlock(current->mm);
878e8c24d3aSDave Hansen 
879e8c24d3aSDave Hansen 	/*
880f0953a1bSIngo Molnar 	 * We could provide warnings or errors if any VMA still
881e8c24d3aSDave Hansen 	 * has the pkey set here.
882e8c24d3aSDave Hansen 	 */
883e8c24d3aSDave Hansen 	return ret;
884e8c24d3aSDave Hansen }
885c7142aeaSHeiko Carstens 
886c7142aeaSHeiko Carstens #endif /* CONFIG_ARCH_HAS_PKEYS */
887