1 /* 2 * mm/mprotect.c 3 * 4 * (C) Copyright 1994 Linus Torvalds 5 * (C) Copyright 2002 Christoph Hellwig 6 * 7 * Address space accounting code <alan@redhat.com> 8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/hugetlb.h> 13 #include <linux/slab.h> 14 #include <linux/shm.h> 15 #include <linux/mman.h> 16 #include <linux/fs.h> 17 #include <linux/highmem.h> 18 #include <linux/security.h> 19 #include <linux/mempolicy.h> 20 #include <linux/personality.h> 21 #include <linux/syscalls.h> 22 23 #include <asm/uaccess.h> 24 #include <asm/pgtable.h> 25 #include <asm/cacheflush.h> 26 #include <asm/tlbflush.h> 27 28 static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, 29 unsigned long addr, unsigned long end, pgprot_t newprot) 30 { 31 pte_t *pte; 32 spinlock_t *ptl; 33 34 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 35 do { 36 if (pte_present(*pte)) { 37 pte_t ptent; 38 39 /* Avoid an SMP race with hardware updated dirty/clean 40 * bits by wiping the pte and then setting the new pte 41 * into place. 42 */ 43 ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot); 44 set_pte_at(mm, addr, pte, ptent); 45 lazy_mmu_prot_update(ptent); 46 } 47 } while (pte++, addr += PAGE_SIZE, addr != end); 48 pte_unmap_unlock(pte - 1, ptl); 49 } 50 51 static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, 52 unsigned long addr, unsigned long end, pgprot_t newprot) 53 { 54 pmd_t *pmd; 55 unsigned long next; 56 57 pmd = pmd_offset(pud, addr); 58 do { 59 next = pmd_addr_end(addr, end); 60 if (pmd_none_or_clear_bad(pmd)) 61 continue; 62 change_pte_range(mm, pmd, addr, next, newprot); 63 } while (pmd++, addr = next, addr != end); 64 } 65 66 static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, 67 unsigned long addr, unsigned long end, pgprot_t newprot) 68 { 69 pud_t *pud; 70 unsigned long next; 71 72 pud = pud_offset(pgd, addr); 73 do { 74 next = pud_addr_end(addr, end); 75 if (pud_none_or_clear_bad(pud)) 76 continue; 77 change_pmd_range(mm, pud, addr, next, newprot); 78 } while (pud++, addr = next, addr != end); 79 } 80 81 static void change_protection(struct vm_area_struct *vma, 82 unsigned long addr, unsigned long end, pgprot_t newprot) 83 { 84 struct mm_struct *mm = vma->vm_mm; 85 pgd_t *pgd; 86 unsigned long next; 87 unsigned long start = addr; 88 89 BUG_ON(addr >= end); 90 pgd = pgd_offset(mm, addr); 91 flush_cache_range(vma, addr, end); 92 do { 93 next = pgd_addr_end(addr, end); 94 if (pgd_none_or_clear_bad(pgd)) 95 continue; 96 change_pud_range(mm, pgd, addr, next, newprot); 97 } while (pgd++, addr = next, addr != end); 98 flush_tlb_range(vma, start, end); 99 } 100 101 static int 102 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, 103 unsigned long start, unsigned long end, unsigned long newflags) 104 { 105 struct mm_struct *mm = vma->vm_mm; 106 unsigned long oldflags = vma->vm_flags; 107 long nrpages = (end - start) >> PAGE_SHIFT; 108 unsigned long charged = 0; 109 pgprot_t newprot; 110 pgoff_t pgoff; 111 int error; 112 113 if (newflags == oldflags) { 114 *pprev = vma; 115 return 0; 116 } 117 118 /* 119 * If we make a private mapping writable we increase our commit; 120 * but (without finer accounting) cannot reduce our commit if we 121 * make it unwritable again. 122 * 123 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting 124 * a MAP_NORESERVE private mapping to writable will now reserve. 125 */ 126 if (newflags & VM_WRITE) { 127 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) { 128 charged = nrpages; 129 if (security_vm_enough_memory(charged)) 130 return -ENOMEM; 131 newflags |= VM_ACCOUNT; 132 } 133 } 134 135 newprot = protection_map[newflags & 0xf]; 136 137 /* 138 * First try to merge with previous and/or next vma. 139 */ 140 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 141 *pprev = vma_merge(mm, *pprev, start, end, newflags, 142 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); 143 if (*pprev) { 144 vma = *pprev; 145 goto success; 146 } 147 148 *pprev = vma; 149 150 if (start != vma->vm_start) { 151 error = split_vma(mm, vma, start, 1); 152 if (error) 153 goto fail; 154 } 155 156 if (end != vma->vm_end) { 157 error = split_vma(mm, vma, end, 0); 158 if (error) 159 goto fail; 160 } 161 162 success: 163 /* 164 * vm_flags and vm_page_prot are protected by the mmap_sem 165 * held in write mode. 166 */ 167 vma->vm_flags = newflags; 168 vma->vm_page_prot = newprot; 169 if (is_vm_hugetlb_page(vma)) 170 hugetlb_change_protection(vma, start, end, newprot); 171 else 172 change_protection(vma, start, end, newprot); 173 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); 174 vm_stat_account(mm, newflags, vma->vm_file, nrpages); 175 return 0; 176 177 fail: 178 vm_unacct_memory(charged); 179 return error; 180 } 181 182 asmlinkage long 183 sys_mprotect(unsigned long start, size_t len, unsigned long prot) 184 { 185 unsigned long vm_flags, nstart, end, tmp, reqprot; 186 struct vm_area_struct *vma, *prev; 187 int error = -EINVAL; 188 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); 189 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); 190 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ 191 return -EINVAL; 192 193 if (start & ~PAGE_MASK) 194 return -EINVAL; 195 if (!len) 196 return 0; 197 len = PAGE_ALIGN(len); 198 end = start + len; 199 if (end <= start) 200 return -ENOMEM; 201 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) 202 return -EINVAL; 203 204 reqprot = prot; 205 /* 206 * Does the application expect PROT_READ to imply PROT_EXEC: 207 */ 208 if (unlikely((prot & PROT_READ) && 209 (current->personality & READ_IMPLIES_EXEC))) 210 prot |= PROT_EXEC; 211 212 vm_flags = calc_vm_prot_bits(prot); 213 214 down_write(¤t->mm->mmap_sem); 215 216 vma = find_vma_prev(current->mm, start, &prev); 217 error = -ENOMEM; 218 if (!vma) 219 goto out; 220 if (unlikely(grows & PROT_GROWSDOWN)) { 221 if (vma->vm_start >= end) 222 goto out; 223 start = vma->vm_start; 224 error = -EINVAL; 225 if (!(vma->vm_flags & VM_GROWSDOWN)) 226 goto out; 227 } 228 else { 229 if (vma->vm_start > start) 230 goto out; 231 if (unlikely(grows & PROT_GROWSUP)) { 232 end = vma->vm_end; 233 error = -EINVAL; 234 if (!(vma->vm_flags & VM_GROWSUP)) 235 goto out; 236 } 237 } 238 if (start > vma->vm_start) 239 prev = vma; 240 241 for (nstart = start ; ; ) { 242 unsigned long newflags; 243 244 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 245 246 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); 247 248 /* newflags >> 4 shift VM_MAY% in place of VM_% */ 249 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { 250 error = -EACCES; 251 goto out; 252 } 253 254 error = security_file_mprotect(vma, reqprot, prot); 255 if (error) 256 goto out; 257 258 tmp = vma->vm_end; 259 if (tmp > end) 260 tmp = end; 261 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 262 if (error) 263 goto out; 264 nstart = tmp; 265 266 if (nstart < prev->vm_end) 267 nstart = prev->vm_end; 268 if (nstart >= end) 269 goto out; 270 271 vma = prev->vm_next; 272 if (!vma || vma->vm_start != nstart) { 273 error = -ENOMEM; 274 goto out; 275 } 276 } 277 out: 278 up_write(¤t->mm->mmap_sem); 279 return error; 280 } 281