1 /* 2 * flexible mmap layout support 3 * 4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. 5 * All Rights Reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 * 22 * Started by Ingo Molnar <mingo@elte.hu> 23 */ 24 25 #include <linux/personality.h> 26 #include <linux/mm.h> 27 #include <linux/mman.h> 28 #include <linux/module.h> 29 #include <linux/random.h> 30 #include <linux/compat.h> 31 #include <asm/pgalloc.h> 32 33 static unsigned long stack_maxrandom_size(void) 34 { 35 if (!(current->flags & PF_RANDOMIZE)) 36 return 0; 37 if (current->personality & ADDR_NO_RANDOMIZE) 38 return 0; 39 return STACK_RND_MASK << PAGE_SHIFT; 40 } 41 42 /* 43 * Top of mmap area (just below the process stack). 44 * 45 * Leave at least a ~32 MB hole. 46 */ 47 #define MIN_GAP (32*1024*1024) 48 #define MAX_GAP (STACK_TOP/6*5) 49 50 static inline int mmap_is_legacy(void) 51 { 52 if (current->personality & ADDR_COMPAT_LAYOUT) 53 return 1; 54 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) 55 return 1; 56 return sysctl_legacy_va_layout; 57 } 58 59 static unsigned long mmap_rnd(void) 60 { 61 if (!(current->flags & PF_RANDOMIZE)) 62 return 0; 63 /* 8MB randomization for mmap_base */ 64 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; 65 } 66 67 static inline unsigned long mmap_base(void) 68 { 69 unsigned long gap = rlimit(RLIMIT_STACK); 70 71 if (gap < MIN_GAP) 72 gap = MIN_GAP; 73 else if (gap > MAX_GAP) 74 gap = MAX_GAP; 75 gap &= PAGE_MASK; 76 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; 77 } 78 79 #ifndef CONFIG_64BIT 80 81 /* 82 * This function, called very early during the creation of a new 83 * process VM image, sets up which VM layout function to use: 84 */ 85 void arch_pick_mmap_layout(struct mm_struct *mm) 86 { 87 /* 88 * Fall back to the standard layout if the personality 89 * bit is set, or if the expected stack growth is unlimited: 90 */ 91 if (mmap_is_legacy()) { 92 mm->mmap_base = TASK_UNMAPPED_BASE; 93 mm->get_unmapped_area = arch_get_unmapped_area; 94 mm->unmap_area = arch_unmap_area; 95 } else { 96 mm->mmap_base = mmap_base(); 97 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 98 mm->unmap_area = arch_unmap_area_topdown; 99 } 100 } 101 102 #else 103 104 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) 105 { 106 int rc; 107 108 if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) 109 return 0; 110 if (!(flags & MAP_FIXED)) 111 addr = 0; 112 if ((addr + len) >= TASK_SIZE) { 113 rc = crst_table_upgrade(current->mm, 1UL << 53); 114 if (rc) 115 return rc; 116 update_mm(current->mm, current); 117 } 118 return 0; 119 } 120 121 static unsigned long 122 s390_get_unmapped_area(struct file *filp, unsigned long addr, 123 unsigned long len, unsigned long pgoff, unsigned long flags) 124 { 125 struct mm_struct *mm = current->mm; 126 unsigned long area; 127 int rc; 128 129 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 130 if (!(area & ~PAGE_MASK)) 131 return area; 132 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 133 /* Upgrade the page table to 4 levels and retry. */ 134 rc = crst_table_upgrade(mm, 1UL << 53); 135 if (rc) 136 return (unsigned long) rc; 137 update_mm(mm, current); 138 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 139 } 140 return area; 141 } 142 143 static unsigned long 144 s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, 145 const unsigned long len, const unsigned long pgoff, 146 const unsigned long flags) 147 { 148 struct mm_struct *mm = current->mm; 149 unsigned long area; 150 int rc; 151 152 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 153 if (!(area & ~PAGE_MASK)) 154 return area; 155 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 156 /* Upgrade the page table to 4 levels and retry. */ 157 rc = crst_table_upgrade(mm, 1UL << 53); 158 if (rc) 159 return (unsigned long) rc; 160 update_mm(mm, current); 161 area = arch_get_unmapped_area_topdown(filp, addr, len, 162 pgoff, flags); 163 } 164 return area; 165 } 166 /* 167 * This function, called very early during the creation of a new 168 * process VM image, sets up which VM layout function to use: 169 */ 170 void arch_pick_mmap_layout(struct mm_struct *mm) 171 { 172 /* 173 * Fall back to the standard layout if the personality 174 * bit is set, or if the expected stack growth is unlimited: 175 */ 176 if (mmap_is_legacy()) { 177 mm->mmap_base = TASK_UNMAPPED_BASE; 178 mm->get_unmapped_area = s390_get_unmapped_area; 179 mm->unmap_area = arch_unmap_area; 180 } else { 181 mm->mmap_base = mmap_base(); 182 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 183 mm->unmap_area = arch_unmap_area_topdown; 184 } 185 } 186 187 #endif 188