1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * mm/mprotect.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * (C) Copyright 1994 Linus Torvalds 61da177e4SLinus Torvalds * (C) Copyright 2002 Christoph Hellwig 71da177e4SLinus Torvalds * 8046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 91da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 12a520110eSChristoph Hellwig #include <linux/pagewalk.h> 131da177e4SLinus Torvalds #include <linux/hugetlb.h> 141da177e4SLinus Torvalds #include <linux/shm.h> 151da177e4SLinus Torvalds #include <linux/mman.h> 161da177e4SLinus Torvalds #include <linux/fs.h> 171da177e4SLinus Torvalds #include <linux/highmem.h> 181da177e4SLinus Torvalds #include <linux/security.h> 191da177e4SLinus Torvalds #include <linux/mempolicy.h> 201da177e4SLinus Torvalds #include <linux/personality.h> 211da177e4SLinus Torvalds #include <linux/syscalls.h> 220697212aSChristoph Lameter #include <linux/swap.h> 230697212aSChristoph Lameter #include <linux/swapops.h> 24cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 2564cdd548SKOSAKI Motohiro #include <linux/migrate.h> 26cdd6c482SIngo Molnar #include <linux/perf_event.h> 27e8c24d3aSDave Hansen #include <linux/pkeys.h> 2864a9a34eSMel Gorman #include <linux/ksm.h> 297c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 3009a913a7SMel Gorman #include <linux/mm_inline.h> 311da177e4SLinus Torvalds #include <asm/pgtable.h> 321da177e4SLinus Torvalds #include <asm/cacheflush.h> 33e8c24d3aSDave Hansen #include <asm/mmu_context.h> 341da177e4SLinus Torvalds #include <asm/tlbflush.h> 351da177e4SLinus Torvalds 3636f88188SKirill A. Shutemov #include "internal.h" 3736f88188SKirill A. Shutemov 384b10e7d5SMel Gorman static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 39c1e6098bSPeter Zijlstra unsigned long addr, unsigned long end, pgprot_t newprot, 400f19c179SMel Gorman int dirty_accountable, int prot_numa) 411da177e4SLinus Torvalds { 420697212aSChristoph Lameter pte_t *pte, oldpte; 43705e87c0SHugh Dickins spinlock_t *ptl; 447da4d641SPeter Zijlstra unsigned long pages = 0; 453e321587SAndi Kleen int target_node = NUMA_NO_NODE; 461da177e4SLinus Torvalds 47175ad4f1SAndrea Arcangeli /* 48175ad4f1SAndrea Arcangeli * Can be called with only the mmap_sem for reading by 49175ad4f1SAndrea Arcangeli * prot_numa so we must check the pmd isn't constantly 50175ad4f1SAndrea Arcangeli * changing from under us from pmd_none to pmd_trans_huge 51175ad4f1SAndrea Arcangeli * and/or the other way around. 52175ad4f1SAndrea Arcangeli */ 53175ad4f1SAndrea Arcangeli if (pmd_trans_unstable(pmd)) 54175ad4f1SAndrea Arcangeli return 0; 55175ad4f1SAndrea Arcangeli 56175ad4f1SAndrea Arcangeli /* 57175ad4f1SAndrea Arcangeli * The pmd points to a regular pte so the pmd can't change 58175ad4f1SAndrea Arcangeli * from under us even if the mmap_sem is only hold for 59175ad4f1SAndrea Arcangeli * reading. 60175ad4f1SAndrea Arcangeli */ 61175ad4f1SAndrea Arcangeli pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 621ad9f620SMel Gorman 633e321587SAndi Kleen /* Get target node for single threaded private VMAs */ 643e321587SAndi Kleen if (prot_numa && !(vma->vm_flags & VM_SHARED) && 653e321587SAndi Kleen atomic_read(&vma->vm_mm->mm_users) == 1) 663e321587SAndi Kleen target_node = numa_node_id(); 673e321587SAndi Kleen 683ea27719SMel Gorman flush_tlb_batched_pending(vma->vm_mm); 696606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 701da177e4SLinus Torvalds do { 710697212aSChristoph Lameter oldpte = *pte; 720697212aSChristoph Lameter if (pte_present(oldpte)) { 731da177e4SLinus Torvalds pte_t ptent; 74b191f9b1SMel Gorman bool preserve_write = prot_numa && pte_write(oldpte); 751da177e4SLinus Torvalds 76e944fd67SMel Gorman /* 77e944fd67SMel Gorman * Avoid trapping faults against the zero or KSM 78e944fd67SMel Gorman * pages. See similar comment in change_huge_pmd. 79e944fd67SMel Gorman */ 80e944fd67SMel Gorman if (prot_numa) { 81e944fd67SMel Gorman struct page *page; 82e944fd67SMel Gorman 83e944fd67SMel Gorman page = vm_normal_page(vma, addr, oldpte); 84e944fd67SMel Gorman if (!page || PageKsm(page)) 85e944fd67SMel Gorman continue; 8610c1045fSMel Gorman 87859d4adcSHenry Willard /* Also skip shared copy-on-write pages */ 88859d4adcSHenry Willard if (is_cow_mapping(vma->vm_flags) && 89859d4adcSHenry Willard page_mapcount(page) != 1) 90859d4adcSHenry Willard continue; 91859d4adcSHenry Willard 9209a913a7SMel Gorman /* 9309a913a7SMel Gorman * While migration can move some dirty pages, 9409a913a7SMel Gorman * it cannot move them all from MIGRATE_ASYNC 9509a913a7SMel Gorman * context. 9609a913a7SMel Gorman */ 9709a913a7SMel Gorman if (page_is_file_cache(page) && PageDirty(page)) 9809a913a7SMel Gorman continue; 9909a913a7SMel Gorman 10010c1045fSMel Gorman /* Avoid TLB flush if possible */ 10110c1045fSMel Gorman if (pte_protnone(oldpte)) 10210c1045fSMel Gorman continue; 1033e321587SAndi Kleen 1043e321587SAndi Kleen /* 1053e321587SAndi Kleen * Don't mess with PTEs if page is already on the node 1063e321587SAndi Kleen * a single-threaded process is running on. 1073e321587SAndi Kleen */ 1083e321587SAndi Kleen if (target_node == page_to_nid(page)) 1093e321587SAndi Kleen continue; 110e944fd67SMel Gorman } 111e944fd67SMel Gorman 11204a86453SAneesh Kumar K.V oldpte = ptep_modify_prot_start(vma, addr, pte); 11304a86453SAneesh Kumar K.V ptent = pte_modify(oldpte, newprot); 114b191f9b1SMel Gorman if (preserve_write) 115288bc549SAneesh Kumar K.V ptent = pte_mk_savedwrite(ptent); 1168a0516edSMel Gorman 1178a0516edSMel Gorman /* Avoid taking write faults for known dirty pages */ 11864e45507SPeter Feiner if (dirty_accountable && pte_dirty(ptent) && 11964e45507SPeter Feiner (pte_soft_dirty(ptent) || 1208a0516edSMel Gorman !(vma->vm_flags & VM_SOFTDIRTY))) { 1219d85d586SAneesh Kumar K.V ptent = pte_mkwrite(ptent); 1228a0516edSMel Gorman } 12304a86453SAneesh Kumar K.V ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); 1244b10e7d5SMel Gorman pages++; 1250661a336SKirill A. Shutemov } else if (IS_ENABLED(CONFIG_MIGRATION)) { 1260697212aSChristoph Lameter swp_entry_t entry = pte_to_swp_entry(oldpte); 1270697212aSChristoph Lameter 1280697212aSChristoph Lameter if (is_write_migration_entry(entry)) { 129c3d16e16SCyrill Gorcunov pte_t newpte; 1300697212aSChristoph Lameter /* 1310697212aSChristoph Lameter * A protection check is difficult so 1320697212aSChristoph Lameter * just be safe and disable write 1330697212aSChristoph Lameter */ 1340697212aSChristoph Lameter make_migration_entry_read(&entry); 135c3d16e16SCyrill Gorcunov newpte = swp_entry_to_pte(entry); 136c3d16e16SCyrill Gorcunov if (pte_swp_soft_dirty(oldpte)) 137c3d16e16SCyrill Gorcunov newpte = pte_swp_mksoft_dirty(newpte); 13894393c78SMike Rapoport set_pte_at(vma->vm_mm, addr, pte, newpte); 139e920e14cSMel Gorman 1407da4d641SPeter Zijlstra pages++; 1410697212aSChristoph Lameter } 1425042db43SJérôme Glisse 1435042db43SJérôme Glisse if (is_write_device_private_entry(entry)) { 1445042db43SJérôme Glisse pte_t newpte; 1455042db43SJérôme Glisse 1465042db43SJérôme Glisse /* 1475042db43SJérôme Glisse * We do not preserve soft-dirtiness. See 1485042db43SJérôme Glisse * copy_one_pte() for explanation. 1495042db43SJérôme Glisse */ 1505042db43SJérôme Glisse make_device_private_entry_read(&entry); 1515042db43SJérôme Glisse newpte = swp_entry_to_pte(entry); 15294393c78SMike Rapoport set_pte_at(vma->vm_mm, addr, pte, newpte); 1535042db43SJérôme Glisse 1545042db43SJérôme Glisse pages++; 1555042db43SJérôme Glisse } 156e920e14cSMel Gorman } 1571da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1586606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 159705e87c0SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 1607da4d641SPeter Zijlstra 1617da4d641SPeter Zijlstra return pages; 1621da177e4SLinus Torvalds } 1631da177e4SLinus Torvalds 1647d12efaeSAndrew Morton static inline unsigned long change_pmd_range(struct vm_area_struct *vma, 1657d12efaeSAndrew Morton pud_t *pud, unsigned long addr, unsigned long end, 1667d12efaeSAndrew Morton pgprot_t newprot, int dirty_accountable, int prot_numa) 1671da177e4SLinus Torvalds { 1681da177e4SLinus Torvalds pmd_t *pmd; 1691da177e4SLinus Torvalds unsigned long next; 1707da4d641SPeter Zijlstra unsigned long pages = 0; 17172403b4aSMel Gorman unsigned long nr_huge_updates = 0; 172ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 173ac46d4f3SJérôme Glisse 174ac46d4f3SJérôme Glisse range.start = 0; 1751da177e4SLinus Torvalds 1761da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 1771da177e4SLinus Torvalds do { 17825cbbef1SMel Gorman unsigned long this_pages; 17925cbbef1SMel Gorman 1801da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 18184c3fc4eSZi Yan if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) 1825c7fb56eSDan Williams && pmd_none_or_clear_bad(pmd)) 1834991c09cSAnshuman Khandual goto next; 184a5338093SRik van Riel 185a5338093SRik van Riel /* invoke the mmu notifier if the pmd is populated */ 186ac46d4f3SJérôme Glisse if (!range.start) { 1877269f999SJérôme Glisse mmu_notifier_range_init(&range, 1887269f999SJérôme Glisse MMU_NOTIFY_PROTECTION_VMA, 0, 1896f4f13e8SJérôme Glisse vma, vma->vm_mm, addr, end); 190ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 191a5338093SRik van Riel } 192a5338093SRik van Riel 19384c3fc4eSZi Yan if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1946b9116a6SKirill A. Shutemov if (next - addr != HPAGE_PMD_SIZE) { 195fd60775aSDavid Rientjes __split_huge_pmd(vma, pmd, addr, false, NULL); 1966b9116a6SKirill A. Shutemov } else { 197f123d74aSMel Gorman int nr_ptes = change_huge_pmd(vma, pmd, addr, 198e944fd67SMel Gorman newprot, prot_numa); 199f123d74aSMel Gorman 200f123d74aSMel Gorman if (nr_ptes) { 20172403b4aSMel Gorman if (nr_ptes == HPAGE_PMD_NR) { 20272403b4aSMel Gorman pages += HPAGE_PMD_NR; 20372403b4aSMel Gorman nr_huge_updates++; 20472403b4aSMel Gorman } 2051ad9f620SMel Gorman 2061ad9f620SMel Gorman /* huge pmd was handled */ 2074991c09cSAnshuman Khandual goto next; 2087da4d641SPeter Zijlstra } 209f123d74aSMel Gorman } 21088a9ab6eSRik van Riel /* fall through, the trans huge pmd just split */ 211cd7548abSJohannes Weiner } 21225cbbef1SMel Gorman this_pages = change_pte_range(vma, pmd, addr, next, newprot, 2130f19c179SMel Gorman dirty_accountable, prot_numa); 21425cbbef1SMel Gorman pages += this_pages; 2154991c09cSAnshuman Khandual next: 2164991c09cSAnshuman Khandual cond_resched(); 2171da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 2187da4d641SPeter Zijlstra 219ac46d4f3SJérôme Glisse if (range.start) 220ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 221a5338093SRik van Riel 22272403b4aSMel Gorman if (nr_huge_updates) 22372403b4aSMel Gorman count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); 2247da4d641SPeter Zijlstra return pages; 2251da177e4SLinus Torvalds } 2261da177e4SLinus Torvalds 2277d12efaeSAndrew Morton static inline unsigned long change_pud_range(struct vm_area_struct *vma, 228c2febafcSKirill A. Shutemov p4d_t *p4d, unsigned long addr, unsigned long end, 2297d12efaeSAndrew Morton pgprot_t newprot, int dirty_accountable, int prot_numa) 2301da177e4SLinus Torvalds { 2311da177e4SLinus Torvalds pud_t *pud; 2321da177e4SLinus Torvalds unsigned long next; 2337da4d641SPeter Zijlstra unsigned long pages = 0; 2341da177e4SLinus Torvalds 235c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 2361da177e4SLinus Torvalds do { 2371da177e4SLinus Torvalds next = pud_addr_end(addr, end); 2381da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 2391da177e4SLinus Torvalds continue; 2407da4d641SPeter Zijlstra pages += change_pmd_range(vma, pud, addr, next, newprot, 2414b10e7d5SMel Gorman dirty_accountable, prot_numa); 2421da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 2437da4d641SPeter Zijlstra 2447da4d641SPeter Zijlstra return pages; 2451da177e4SLinus Torvalds } 2461da177e4SLinus Torvalds 247c2febafcSKirill A. Shutemov static inline unsigned long change_p4d_range(struct vm_area_struct *vma, 248c2febafcSKirill A. Shutemov pgd_t *pgd, unsigned long addr, unsigned long end, 249c2febafcSKirill A. Shutemov pgprot_t newprot, int dirty_accountable, int prot_numa) 250c2febafcSKirill A. Shutemov { 251c2febafcSKirill A. Shutemov p4d_t *p4d; 252c2febafcSKirill A. Shutemov unsigned long next; 253c2febafcSKirill A. Shutemov unsigned long pages = 0; 254c2febafcSKirill A. Shutemov 255c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 256c2febafcSKirill A. Shutemov do { 257c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 258c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 259c2febafcSKirill A. Shutemov continue; 260c2febafcSKirill A. Shutemov pages += change_pud_range(vma, p4d, addr, next, newprot, 261c2febafcSKirill A. Shutemov dirty_accountable, prot_numa); 262c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 263c2febafcSKirill A. Shutemov 264c2febafcSKirill A. Shutemov return pages; 265c2febafcSKirill A. Shutemov } 266c2febafcSKirill A. Shutemov 2677da4d641SPeter Zijlstra static unsigned long change_protection_range(struct vm_area_struct *vma, 268c1e6098bSPeter Zijlstra unsigned long addr, unsigned long end, pgprot_t newprot, 2694b10e7d5SMel Gorman int dirty_accountable, int prot_numa) 2701da177e4SLinus Torvalds { 2711da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 2721da177e4SLinus Torvalds pgd_t *pgd; 2731da177e4SLinus Torvalds unsigned long next; 2741da177e4SLinus Torvalds unsigned long start = addr; 2757da4d641SPeter Zijlstra unsigned long pages = 0; 2761da177e4SLinus Torvalds 2771da177e4SLinus Torvalds BUG_ON(addr >= end); 2781da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 2791da177e4SLinus Torvalds flush_cache_range(vma, addr, end); 28016af97dcSNadav Amit inc_tlb_flush_pending(mm); 2811da177e4SLinus Torvalds do { 2821da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 2831da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 2841da177e4SLinus Torvalds continue; 285c2febafcSKirill A. Shutemov pages += change_p4d_range(vma, pgd, addr, next, newprot, 2864b10e7d5SMel Gorman dirty_accountable, prot_numa); 2871da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 2887da4d641SPeter Zijlstra 2891233d588SIngo Molnar /* Only flush the TLB if we actually modified any entries: */ 2901233d588SIngo Molnar if (pages) 2911da177e4SLinus Torvalds flush_tlb_range(vma, start, end); 29216af97dcSNadav Amit dec_tlb_flush_pending(mm); 2937da4d641SPeter Zijlstra 2947da4d641SPeter Zijlstra return pages; 2957da4d641SPeter Zijlstra } 2967da4d641SPeter Zijlstra 2977da4d641SPeter Zijlstra unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 2987da4d641SPeter Zijlstra unsigned long end, pgprot_t newprot, 2994b10e7d5SMel Gorman int dirty_accountable, int prot_numa) 3007da4d641SPeter Zijlstra { 3017da4d641SPeter Zijlstra unsigned long pages; 3027da4d641SPeter Zijlstra 3037da4d641SPeter Zijlstra if (is_vm_hugetlb_page(vma)) 3047da4d641SPeter Zijlstra pages = hugetlb_change_protection(vma, start, end, newprot); 3057da4d641SPeter Zijlstra else 3064b10e7d5SMel Gorman pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); 3077da4d641SPeter Zijlstra 3087da4d641SPeter Zijlstra return pages; 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds 31142e4089cSAndi Kleen static int prot_none_pte_entry(pte_t *pte, unsigned long addr, 31242e4089cSAndi Kleen unsigned long next, struct mm_walk *walk) 31342e4089cSAndi Kleen { 31442e4089cSAndi Kleen return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? 31542e4089cSAndi Kleen 0 : -EACCES; 31642e4089cSAndi Kleen } 31742e4089cSAndi Kleen 31842e4089cSAndi Kleen static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, 31942e4089cSAndi Kleen unsigned long addr, unsigned long next, 32042e4089cSAndi Kleen struct mm_walk *walk) 32142e4089cSAndi Kleen { 32242e4089cSAndi Kleen return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? 32342e4089cSAndi Kleen 0 : -EACCES; 32442e4089cSAndi Kleen } 32542e4089cSAndi Kleen 32642e4089cSAndi Kleen static int prot_none_test(unsigned long addr, unsigned long next, 32742e4089cSAndi Kleen struct mm_walk *walk) 32842e4089cSAndi Kleen { 32942e4089cSAndi Kleen return 0; 33042e4089cSAndi Kleen } 33142e4089cSAndi Kleen 3327b86ac33SChristoph Hellwig static const struct mm_walk_ops prot_none_walk_ops = { 33342e4089cSAndi Kleen .pte_entry = prot_none_pte_entry, 33442e4089cSAndi Kleen .hugetlb_entry = prot_none_hugetlb_entry, 33542e4089cSAndi Kleen .test_walk = prot_none_test, 33642e4089cSAndi Kleen }; 33742e4089cSAndi Kleen 338b6a2fea3SOllie Wild int 3391da177e4SLinus Torvalds mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, 3401da177e4SLinus Torvalds unsigned long start, unsigned long end, unsigned long newflags) 3411da177e4SLinus Torvalds { 3421da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 3431da177e4SLinus Torvalds unsigned long oldflags = vma->vm_flags; 3441da177e4SLinus Torvalds long nrpages = (end - start) >> PAGE_SHIFT; 3451da177e4SLinus Torvalds unsigned long charged = 0; 3461da177e4SLinus Torvalds pgoff_t pgoff; 3471da177e4SLinus Torvalds int error; 348c1e6098bSPeter Zijlstra int dirty_accountable = 0; 3491da177e4SLinus Torvalds 3501da177e4SLinus Torvalds if (newflags == oldflags) { 3511da177e4SLinus Torvalds *pprev = vma; 3521da177e4SLinus Torvalds return 0; 3531da177e4SLinus Torvalds } 3541da177e4SLinus Torvalds 3551da177e4SLinus Torvalds /* 35642e4089cSAndi Kleen * Do PROT_NONE PFN permission checks here when we can still 35742e4089cSAndi Kleen * bail out without undoing a lot of state. This is a rather 35842e4089cSAndi Kleen * uncommon case, so doesn't need to be very optimized. 35942e4089cSAndi Kleen */ 36042e4089cSAndi Kleen if (arch_has_pfn_modify_check() && 36142e4089cSAndi Kleen (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 36242e4089cSAndi Kleen (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) { 3637b86ac33SChristoph Hellwig pgprot_t new_pgprot = vm_get_page_prot(newflags); 3647b86ac33SChristoph Hellwig 3657b86ac33SChristoph Hellwig error = walk_page_range(current->mm, start, end, 3667b86ac33SChristoph Hellwig &prot_none_walk_ops, &new_pgprot); 36742e4089cSAndi Kleen if (error) 36842e4089cSAndi Kleen return error; 36942e4089cSAndi Kleen } 37042e4089cSAndi Kleen 37142e4089cSAndi Kleen /* 3721da177e4SLinus Torvalds * If we make a private mapping writable we increase our commit; 3731da177e4SLinus Torvalds * but (without finer accounting) cannot reduce our commit if we 3745a6fe125SMel Gorman * make it unwritable again. hugetlb mapping were accounted for 3755a6fe125SMel Gorman * even if read-only so there is no need to account for them here 3761da177e4SLinus Torvalds */ 3771da177e4SLinus Torvalds if (newflags & VM_WRITE) { 37884638335SKonstantin Khlebnikov /* Check space limits when area turns into data. */ 37984638335SKonstantin Khlebnikov if (!may_expand_vm(mm, newflags, nrpages) && 38084638335SKonstantin Khlebnikov may_expand_vm(mm, oldflags, nrpages)) 38184638335SKonstantin Khlebnikov return -ENOMEM; 3825a6fe125SMel Gorman if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| 383cdfd4325SAndy Whitcroft VM_SHARED|VM_NORESERVE))) { 3841da177e4SLinus Torvalds charged = nrpages; 385191c5424SAl Viro if (security_vm_enough_memory_mm(mm, charged)) 3861da177e4SLinus Torvalds return -ENOMEM; 3871da177e4SLinus Torvalds newflags |= VM_ACCOUNT; 3881da177e4SLinus Torvalds } 3891da177e4SLinus Torvalds } 3901da177e4SLinus Torvalds 3911da177e4SLinus Torvalds /* 3921da177e4SLinus Torvalds * First try to merge with previous and/or next vma. 3931da177e4SLinus Torvalds */ 3941da177e4SLinus Torvalds pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 3951da177e4SLinus Torvalds *pprev = vma_merge(mm, *pprev, start, end, newflags, 39619a809afSAndrea Arcangeli vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), 39719a809afSAndrea Arcangeli vma->vm_userfaultfd_ctx); 3981da177e4SLinus Torvalds if (*pprev) { 3991da177e4SLinus Torvalds vma = *pprev; 400e86f15eeSAndrea Arcangeli VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); 4011da177e4SLinus Torvalds goto success; 4021da177e4SLinus Torvalds } 4031da177e4SLinus Torvalds 4041da177e4SLinus Torvalds *pprev = vma; 4051da177e4SLinus Torvalds 4061da177e4SLinus Torvalds if (start != vma->vm_start) { 4071da177e4SLinus Torvalds error = split_vma(mm, vma, start, 1); 4081da177e4SLinus Torvalds if (error) 4091da177e4SLinus Torvalds goto fail; 4101da177e4SLinus Torvalds } 4111da177e4SLinus Torvalds 4121da177e4SLinus Torvalds if (end != vma->vm_end) { 4131da177e4SLinus Torvalds error = split_vma(mm, vma, end, 0); 4141da177e4SLinus Torvalds if (error) 4151da177e4SLinus Torvalds goto fail; 4161da177e4SLinus Torvalds } 4171da177e4SLinus Torvalds 4181da177e4SLinus Torvalds success: 4191da177e4SLinus Torvalds /* 4201da177e4SLinus Torvalds * vm_flags and vm_page_prot are protected by the mmap_sem 4211da177e4SLinus Torvalds * held in write mode. 4221da177e4SLinus Torvalds */ 4231da177e4SLinus Torvalds vma->vm_flags = newflags; 4246d2329f8SAndrea Arcangeli dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); 42564e45507SPeter Feiner vma_set_page_prot(vma); 426d08b3851SPeter Zijlstra 4277d12efaeSAndrew Morton change_protection(vma, start, end, vma->vm_page_prot, 4287d12efaeSAndrew Morton dirty_accountable, 0); 4297da4d641SPeter Zijlstra 43036f88188SKirill A. Shutemov /* 43136f88188SKirill A. Shutemov * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major 43236f88188SKirill A. Shutemov * fault on access. 43336f88188SKirill A. Shutemov */ 43436f88188SKirill A. Shutemov if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED && 43536f88188SKirill A. Shutemov (newflags & VM_WRITE)) { 43636f88188SKirill A. Shutemov populate_vma_page_range(vma, start, end, NULL); 43736f88188SKirill A. Shutemov } 43836f88188SKirill A. Shutemov 43984638335SKonstantin Khlebnikov vm_stat_account(mm, oldflags, -nrpages); 44084638335SKonstantin Khlebnikov vm_stat_account(mm, newflags, nrpages); 44163bfd738SPekka Enberg perf_event_mmap(vma); 4421da177e4SLinus Torvalds return 0; 4431da177e4SLinus Torvalds 4441da177e4SLinus Torvalds fail: 4451da177e4SLinus Torvalds vm_unacct_memory(charged); 4461da177e4SLinus Torvalds return error; 4471da177e4SLinus Torvalds } 4481da177e4SLinus Torvalds 4497d06d9c9SDave Hansen /* 4507d06d9c9SDave Hansen * pkey==-1 when doing a legacy mprotect() 4517d06d9c9SDave Hansen */ 4527d06d9c9SDave Hansen static int do_mprotect_pkey(unsigned long start, size_t len, 4537d06d9c9SDave Hansen unsigned long prot, int pkey) 4541da177e4SLinus Torvalds { 45562b5f7d0SDave Hansen unsigned long nstart, end, tmp, reqprot; 4561da177e4SLinus Torvalds struct vm_area_struct *vma, *prev; 4571da177e4SLinus Torvalds int error = -EINVAL; 4581da177e4SLinus Torvalds const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); 459f138556dSPiotr Kwapulinski const bool rier = (current->personality & READ_IMPLIES_EXEC) && 460f138556dSPiotr Kwapulinski (prot & PROT_READ); 461f138556dSPiotr Kwapulinski 462*057d3389SAndrey Konovalov start = untagged_addr(start); 463*057d3389SAndrey Konovalov 4641da177e4SLinus Torvalds prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); 4651da177e4SLinus Torvalds if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ 4661da177e4SLinus Torvalds return -EINVAL; 4671da177e4SLinus Torvalds 4681da177e4SLinus Torvalds if (start & ~PAGE_MASK) 4691da177e4SLinus Torvalds return -EINVAL; 4701da177e4SLinus Torvalds if (!len) 4711da177e4SLinus Torvalds return 0; 4721da177e4SLinus Torvalds len = PAGE_ALIGN(len); 4731da177e4SLinus Torvalds end = start + len; 4741da177e4SLinus Torvalds if (end <= start) 4751da177e4SLinus Torvalds return -ENOMEM; 4769035cf9aSKhalid Aziz if (!arch_validate_prot(prot, start)) 4771da177e4SLinus Torvalds return -EINVAL; 4781da177e4SLinus Torvalds 4791da177e4SLinus Torvalds reqprot = prot; 4801da177e4SLinus Torvalds 481dc0ef0dfSMichal Hocko if (down_write_killable(¤t->mm->mmap_sem)) 482dc0ef0dfSMichal Hocko return -EINTR; 4831da177e4SLinus Torvalds 484e8c24d3aSDave Hansen /* 485e8c24d3aSDave Hansen * If userspace did not allocate the pkey, do not let 486e8c24d3aSDave Hansen * them use it here. 487e8c24d3aSDave Hansen */ 488e8c24d3aSDave Hansen error = -EINVAL; 489e8c24d3aSDave Hansen if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) 490e8c24d3aSDave Hansen goto out; 491e8c24d3aSDave Hansen 492097d5910SLinus Torvalds vma = find_vma(current->mm, start); 4931da177e4SLinus Torvalds error = -ENOMEM; 4941da177e4SLinus Torvalds if (!vma) 4951da177e4SLinus Torvalds goto out; 496097d5910SLinus Torvalds prev = vma->vm_prev; 4971da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSDOWN)) { 4981da177e4SLinus Torvalds if (vma->vm_start >= end) 4991da177e4SLinus Torvalds goto out; 5001da177e4SLinus Torvalds start = vma->vm_start; 5011da177e4SLinus Torvalds error = -EINVAL; 5021da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSDOWN)) 5031da177e4SLinus Torvalds goto out; 5047d12efaeSAndrew Morton } else { 5051da177e4SLinus Torvalds if (vma->vm_start > start) 5061da177e4SLinus Torvalds goto out; 5071da177e4SLinus Torvalds if (unlikely(grows & PROT_GROWSUP)) { 5081da177e4SLinus Torvalds end = vma->vm_end; 5091da177e4SLinus Torvalds error = -EINVAL; 5101da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSUP)) 5111da177e4SLinus Torvalds goto out; 5121da177e4SLinus Torvalds } 5131da177e4SLinus Torvalds } 5141da177e4SLinus Torvalds if (start > vma->vm_start) 5151da177e4SLinus Torvalds prev = vma; 5161da177e4SLinus Torvalds 5171da177e4SLinus Torvalds for (nstart = start ; ; ) { 518a8502b67SDave Hansen unsigned long mask_off_old_flags; 5191da177e4SLinus Torvalds unsigned long newflags; 5207d06d9c9SDave Hansen int new_vma_pkey; 5211da177e4SLinus Torvalds 5221da177e4SLinus Torvalds /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 5231da177e4SLinus Torvalds 524f138556dSPiotr Kwapulinski /* Does the application expect PROT_READ to imply PROT_EXEC */ 525f138556dSPiotr Kwapulinski if (rier && (vma->vm_flags & VM_MAYEXEC)) 526f138556dSPiotr Kwapulinski prot |= PROT_EXEC; 527f138556dSPiotr Kwapulinski 528a8502b67SDave Hansen /* 529a8502b67SDave Hansen * Each mprotect() call explicitly passes r/w/x permissions. 530a8502b67SDave Hansen * If a permission is not passed to mprotect(), it must be 531a8502b67SDave Hansen * cleared from the VMA. 532a8502b67SDave Hansen */ 533a8502b67SDave Hansen mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC | 5342c2d57b5SKhalid Aziz VM_FLAGS_CLEAR; 535a8502b67SDave Hansen 5367d06d9c9SDave Hansen new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); 5377d06d9c9SDave Hansen newflags = calc_vm_prot_bits(prot, new_vma_pkey); 538a8502b67SDave Hansen newflags |= (vma->vm_flags & ~mask_off_old_flags); 5391da177e4SLinus Torvalds 5407e2cff42SPaolo 'Blaisorblade' Giarrusso /* newflags >> 4 shift VM_MAY% in place of VM_% */ 5417e2cff42SPaolo 'Blaisorblade' Giarrusso if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { 5421da177e4SLinus Torvalds error = -EACCES; 5431da177e4SLinus Torvalds goto out; 5441da177e4SLinus Torvalds } 5451da177e4SLinus Torvalds 5461da177e4SLinus Torvalds error = security_file_mprotect(vma, reqprot, prot); 5471da177e4SLinus Torvalds if (error) 5481da177e4SLinus Torvalds goto out; 5491da177e4SLinus Torvalds 5501da177e4SLinus Torvalds tmp = vma->vm_end; 5511da177e4SLinus Torvalds if (tmp > end) 5521da177e4SLinus Torvalds tmp = end; 5531da177e4SLinus Torvalds error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 5541da177e4SLinus Torvalds if (error) 5551da177e4SLinus Torvalds goto out; 5561da177e4SLinus Torvalds nstart = tmp; 5571da177e4SLinus Torvalds 5581da177e4SLinus Torvalds if (nstart < prev->vm_end) 5591da177e4SLinus Torvalds nstart = prev->vm_end; 5601da177e4SLinus Torvalds if (nstart >= end) 5611da177e4SLinus Torvalds goto out; 5621da177e4SLinus Torvalds 5631da177e4SLinus Torvalds vma = prev->vm_next; 5641da177e4SLinus Torvalds if (!vma || vma->vm_start != nstart) { 5651da177e4SLinus Torvalds error = -ENOMEM; 5661da177e4SLinus Torvalds goto out; 5671da177e4SLinus Torvalds } 568f138556dSPiotr Kwapulinski prot = reqprot; 5691da177e4SLinus Torvalds } 5701da177e4SLinus Torvalds out: 5711da177e4SLinus Torvalds up_write(¤t->mm->mmap_sem); 5721da177e4SLinus Torvalds return error; 5731da177e4SLinus Torvalds } 5747d06d9c9SDave Hansen 5757d06d9c9SDave Hansen SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, 5767d06d9c9SDave Hansen unsigned long, prot) 5777d06d9c9SDave Hansen { 5787d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, -1); 5797d06d9c9SDave Hansen } 5807d06d9c9SDave Hansen 581c7142aeaSHeiko Carstens #ifdef CONFIG_ARCH_HAS_PKEYS 582c7142aeaSHeiko Carstens 5837d06d9c9SDave Hansen SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, 5847d06d9c9SDave Hansen unsigned long, prot, int, pkey) 5857d06d9c9SDave Hansen { 5867d06d9c9SDave Hansen return do_mprotect_pkey(start, len, prot, pkey); 5877d06d9c9SDave Hansen } 588e8c24d3aSDave Hansen 589e8c24d3aSDave Hansen SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val) 590e8c24d3aSDave Hansen { 591e8c24d3aSDave Hansen int pkey; 592e8c24d3aSDave Hansen int ret; 593e8c24d3aSDave Hansen 594e8c24d3aSDave Hansen /* No flags supported yet. */ 595e8c24d3aSDave Hansen if (flags) 596e8c24d3aSDave Hansen return -EINVAL; 597e8c24d3aSDave Hansen /* check for unsupported init values */ 598e8c24d3aSDave Hansen if (init_val & ~PKEY_ACCESS_MASK) 599e8c24d3aSDave Hansen return -EINVAL; 600e8c24d3aSDave Hansen 601e8c24d3aSDave Hansen down_write(¤t->mm->mmap_sem); 602e8c24d3aSDave Hansen pkey = mm_pkey_alloc(current->mm); 603e8c24d3aSDave Hansen 604e8c24d3aSDave Hansen ret = -ENOSPC; 605e8c24d3aSDave Hansen if (pkey == -1) 606e8c24d3aSDave Hansen goto out; 607e8c24d3aSDave Hansen 608e8c24d3aSDave Hansen ret = arch_set_user_pkey_access(current, pkey, init_val); 609e8c24d3aSDave Hansen if (ret) { 610e8c24d3aSDave Hansen mm_pkey_free(current->mm, pkey); 611e8c24d3aSDave Hansen goto out; 612e8c24d3aSDave Hansen } 613e8c24d3aSDave Hansen ret = pkey; 614e8c24d3aSDave Hansen out: 615e8c24d3aSDave Hansen up_write(¤t->mm->mmap_sem); 616e8c24d3aSDave Hansen return ret; 617e8c24d3aSDave Hansen } 618e8c24d3aSDave Hansen 619e8c24d3aSDave Hansen SYSCALL_DEFINE1(pkey_free, int, pkey) 620e8c24d3aSDave Hansen { 621e8c24d3aSDave Hansen int ret; 622e8c24d3aSDave Hansen 623e8c24d3aSDave Hansen down_write(¤t->mm->mmap_sem); 624e8c24d3aSDave Hansen ret = mm_pkey_free(current->mm, pkey); 625e8c24d3aSDave Hansen up_write(¤t->mm->mmap_sem); 626e8c24d3aSDave Hansen 627e8c24d3aSDave Hansen /* 628e8c24d3aSDave Hansen * We could provie warnings or errors if any VMA still 629e8c24d3aSDave Hansen * has the pkey set here. 630e8c24d3aSDave Hansen */ 631e8c24d3aSDave Hansen return ret; 632e8c24d3aSDave Hansen } 633c7142aeaSHeiko Carstens 634c7142aeaSHeiko Carstens #endif /* CONFIG_ARCH_HAS_PKEYS */ 635