1 /*- 2 * Copyright (c) 2004 Tim J. Robbins 3 * Copyright (c) 2002 Doug Rabson 4 * Copyright (c) 2000 Marcel Moolenaar 5 * Copyright (c) 1994-1995 Søren Schmidt 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer 13 * in this position and unchanged. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/capsicum.h> 38 #include <sys/file.h> 39 #include <sys/imgact.h> 40 #include <sys/ktr.h> 41 #include <sys/lock.h> 42 #include <sys/mman.h> 43 #include <sys/proc.h> 44 #include <sys/resourcevar.h> 45 #include <sys/rwlock.h> 46 #include <sys/syscallsubr.h> 47 #include <sys/sysent.h> 48 #include <sys/sysproto.h> 49 50 #include <vm/pmap.h> 51 #include <vm/vm_extern.h> 52 #include <vm/vm_map.h> 53 #include <vm/vm_object.h> 54 55 #include <compat/linux/linux_emul.h> 56 #include <compat/linux/linux_mmap.h> 57 #include <compat/linux/linux_persona.h> 58 #include <compat/linux/linux_util.h> 59 60 61 #define STACK_SIZE (2 * 1024 * 1024) 62 #define GUARD_SIZE (4 * PAGE_SIZE) 63 64 #if defined(__amd64__) 65 static void linux_fixup_prot(struct thread *td, int *prot); 66 #endif 67 68 static int 69 linux_mmap_check_fp(struct file *fp, int flags, int prot, int maxprot) 70 { 71 72 /* Linux mmap() just fails for O_WRONLY files */ 73 if ((fp->f_flag & FREAD) == 0) 74 return (EACCES); 75 76 return (0); 77 } 78 79 int 80 linux_mmap_common(struct thread *td, uintptr_t addr, size_t len, int prot, 81 int flags, int fd, off_t pos) 82 { 83 struct mmap_req mr, mr_fixed; 84 struct proc *p = td->td_proc; 85 struct vmspace *vms = td->td_proc->p_vmspace; 86 int bsd_flags, error; 87 struct file *fp; 88 89 LINUX_CTR6(mmap2, "0x%lx, %ld, %ld, 0x%08lx, %ld, 0x%lx", 90 addr, len, prot, flags, fd, pos); 91 92 error = 0; 93 bsd_flags = 0; 94 fp = NULL; 95 96 /* 97 * Linux mmap(2): 98 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE 99 */ 100 if (!((flags & LINUX_MAP_SHARED) ^ (flags & LINUX_MAP_PRIVATE))) 101 return (EINVAL); 102 103 if (flags & LINUX_MAP_SHARED) 104 bsd_flags |= MAP_SHARED; 105 if (flags & LINUX_MAP_PRIVATE) 106 bsd_flags |= MAP_PRIVATE; 107 if (flags & LINUX_MAP_FIXED) 108 bsd_flags |= MAP_FIXED; 109 if (flags & LINUX_MAP_ANON) { 110 /* Enforce pos to be on page boundary, then ignore. */ 111 if ((pos & PAGE_MASK) != 0) 112 return (EINVAL); 113 pos = 0; 114 bsd_flags |= MAP_ANON; 115 } else 116 bsd_flags |= MAP_NOSYNC; 117 if (flags & LINUX_MAP_GROWSDOWN) 118 bsd_flags |= MAP_STACK; 119 120 #if defined(__amd64__) 121 /* 122 * According to the Linux mmap(2) man page, "MAP_32BIT flag 123 * is ignored when MAP_FIXED is set." 124 */ 125 if ((flags & LINUX_MAP_32BIT) && (flags & LINUX_MAP_FIXED) == 0) 126 bsd_flags |= MAP_32BIT; 127 128 /* 129 * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC 130 * on Linux/i386 if the binary requires executable stack. 131 * We do this only for IA32 emulation as on native i386 this is does not 132 * make sense without PAE. 133 * 134 * XXX. Linux checks that the file system is not mounted with noexec. 135 */ 136 linux_fixup_prot(td, &prot); 137 #endif 138 139 /* Linux does not check file descriptor when MAP_ANONYMOUS is set. */ 140 fd = (bsd_flags & MAP_ANON) ? -1 : fd; 141 if (flags & LINUX_MAP_GROWSDOWN) { 142 /* 143 * The Linux MAP_GROWSDOWN option does not limit auto 144 * growth of the region. Linux mmap with this option 145 * takes as addr the initial BOS, and as len, the initial 146 * region size. It can then grow down from addr without 147 * limit. However, Linux threads has an implicit internal 148 * limit to stack size of STACK_SIZE. Its just not 149 * enforced explicitly in Linux. But, here we impose 150 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack 151 * region, since we can do this with our mmap. 152 * 153 * Our mmap with MAP_STACK takes addr as the maximum 154 * downsize limit on BOS, and as len the max size of 155 * the region. It then maps the top SGROWSIZ bytes, 156 * and auto grows the region down, up to the limit 157 * in addr. 158 * 159 * If we don't use the MAP_STACK option, the effect 160 * of this code is to allocate a stack region of a 161 * fixed size of (STACK_SIZE - GUARD_SIZE). 162 */ 163 164 if ((caddr_t)addr + len > vms->vm_maxsaddr) { 165 /* 166 * Some Linux apps will attempt to mmap 167 * thread stacks near the top of their 168 * address space. If their TOS is greater 169 * than vm_maxsaddr, vm_map_growstack() 170 * will confuse the thread stack with the 171 * process stack and deliver a SEGV if they 172 * attempt to grow the thread stack past their 173 * current stacksize rlimit. To avoid this, 174 * adjust vm_maxsaddr upwards to reflect 175 * the current stacksize rlimit rather 176 * than the maximum possible stacksize. 177 * It would be better to adjust the 178 * mmap'ed region, but some apps do not check 179 * mmap's return value. 180 */ 181 PROC_LOCK(p); 182 vms->vm_maxsaddr = (char *)p->p_sysent->sv_usrstack - 183 lim_cur_proc(p, RLIMIT_STACK); 184 PROC_UNLOCK(p); 185 } 186 187 /* 188 * This gives us our maximum stack size and a new BOS. 189 * If we're using VM_STACK, then mmap will just map 190 * the top SGROWSIZ bytes, and let the stack grow down 191 * to the limit at BOS. If we're not using VM_STACK 192 * we map the full stack, since we don't have a way 193 * to autogrow it. 194 */ 195 if (len <= STACK_SIZE - GUARD_SIZE) { 196 addr = addr - (STACK_SIZE - GUARD_SIZE - len); 197 len = STACK_SIZE - GUARD_SIZE; 198 } 199 } 200 201 /* 202 * FreeBSD is free to ignore the address hint if MAP_FIXED wasn't 203 * passed. However, some Linux applications, like the ART runtime, 204 * depend on the hint. If the MAP_FIXED wasn't passed, but the 205 * address is not zero, try with MAP_FIXED and MAP_EXCL first, 206 * and fall back to the normal behaviour if that fails. 207 */ 208 mr = (struct mmap_req) { 209 .mr_hint = addr, 210 .mr_len = len, 211 .mr_prot = prot, 212 .mr_flags = bsd_flags, 213 .mr_fd = fd, 214 .mr_pos = pos, 215 .mr_check_fp_fn = linux_mmap_check_fp, 216 }; 217 if (addr != 0 && (bsd_flags & MAP_FIXED) == 0 && 218 (bsd_flags & MAP_EXCL) == 0) { 219 mr_fixed = mr; 220 mr_fixed.mr_flags |= MAP_FIXED | MAP_EXCL; 221 error = kern_mmap_req(td, &mr_fixed); 222 if (error == 0) 223 goto out; 224 } 225 226 error = kern_mmap_req(td, &mr); 227 out: 228 LINUX_CTR2(mmap2, "return: %d (%p)", error, td->td_retval[0]); 229 230 return (error); 231 } 232 233 int 234 linux_mprotect_common(struct thread *td, uintptr_t addr, size_t len, int prot) 235 { 236 237 /* XXX Ignore PROT_GROWSDOWN and PROT_GROWSUP for now. */ 238 prot &= ~(LINUX_PROT_GROWSDOWN | LINUX_PROT_GROWSUP); 239 if ((prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) != 0) 240 return (EINVAL); 241 242 #if defined(__amd64__) 243 linux_fixup_prot(td, &prot); 244 #endif 245 return (kern_mprotect(td, addr, len, prot)); 246 } 247 248 /* 249 * Implement Linux madvise(MADV_DONTNEED), which has unusual semantics: for 250 * anonymous memory, pages in the range are immediately discarded. 251 */ 252 static int 253 linux_madvise_dontneed(struct thread *td, vm_offset_t start, vm_offset_t end) 254 { 255 vm_map_t map; 256 vm_map_entry_t entry; 257 vm_object_t backing_object, object; 258 vm_offset_t estart, eend; 259 vm_pindex_t pstart, pend; 260 int error; 261 262 map = &td->td_proc->p_vmspace->vm_map; 263 264 if (!vm_map_range_valid(map, start, end)) 265 return (EINVAL); 266 start = trunc_page(start); 267 end = round_page(end); 268 269 error = 0; 270 vm_map_lock_read(map); 271 if (!vm_map_lookup_entry(map, start, &entry)) 272 entry = vm_map_entry_succ(entry); 273 for (; entry->start < end; entry = vm_map_entry_succ(entry)) { 274 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) 275 continue; 276 277 if (entry->wired_count != 0) { 278 error = EINVAL; 279 break; 280 } 281 282 object = entry->object.vm_object; 283 if (object == NULL) 284 continue; 285 286 pstart = OFF_TO_IDX(entry->offset); 287 if (start > entry->start) { 288 pstart += atop(start - entry->start); 289 estart = start; 290 } else { 291 estart = entry->start; 292 } 293 pend = OFF_TO_IDX(entry->offset) + 294 atop(entry->end - entry->start); 295 if (entry->end > end) { 296 pend -= atop(entry->end - end); 297 eend = end; 298 } else { 299 eend = entry->end; 300 } 301 302 if ((object->flags & (OBJ_ANON | OBJ_ONEMAPPING)) == 303 (OBJ_ANON | OBJ_ONEMAPPING)) { 304 /* 305 * Singly-mapped anonymous memory is discarded. This 306 * does not match Linux's semantics when the object 307 * belongs to a shadow chain of length > 1, since 308 * subsequent faults may retrieve pages from an 309 * intermediate anonymous object. However, handling 310 * this case correctly introduces a fair bit of 311 * complexity. 312 */ 313 VM_OBJECT_WLOCK(object); 314 if ((object->flags & OBJ_ONEMAPPING) != 0) { 315 vm_object_collapse(object); 316 vm_object_page_remove(object, pstart, pend, 0); 317 backing_object = object->backing_object; 318 if (backing_object != NULL && 319 (backing_object->flags & OBJ_ANON) != 0) 320 linux_msg(td, 321 "possibly incorrect MADV_DONTNEED"); 322 VM_OBJECT_WUNLOCK(object); 323 continue; 324 } 325 VM_OBJECT_WUNLOCK(object); 326 } 327 328 /* 329 * Handle shared mappings. Remove them outright instead of 330 * calling pmap_advise(), for consistency with Linux. 331 */ 332 pmap_remove(map->pmap, estart, eend); 333 vm_object_madvise(object, pstart, pend, MADV_DONTNEED); 334 } 335 vm_map_unlock_read(map); 336 337 return (error); 338 } 339 340 int 341 linux_madvise_common(struct thread *td, uintptr_t addr, size_t len, int behav) 342 { 343 344 switch (behav) { 345 case LINUX_MADV_NORMAL: 346 return (kern_madvise(td, addr, len, MADV_NORMAL)); 347 case LINUX_MADV_RANDOM: 348 return (kern_madvise(td, addr, len, MADV_RANDOM)); 349 case LINUX_MADV_SEQUENTIAL: 350 return (kern_madvise(td, addr, len, MADV_SEQUENTIAL)); 351 case LINUX_MADV_WILLNEED: 352 return (kern_madvise(td, addr, len, MADV_WILLNEED)); 353 case LINUX_MADV_DONTNEED: 354 return (linux_madvise_dontneed(td, addr, addr + len)); 355 case LINUX_MADV_FREE: 356 return (kern_madvise(td, addr, len, MADV_FREE)); 357 case LINUX_MADV_REMOVE: 358 linux_msg(curthread, "unsupported madvise MADV_REMOVE"); 359 return (EINVAL); 360 case LINUX_MADV_DONTFORK: 361 return (kern_minherit(td, addr, len, INHERIT_NONE)); 362 case LINUX_MADV_DOFORK: 363 return (kern_minherit(td, addr, len, INHERIT_COPY)); 364 case LINUX_MADV_MERGEABLE: 365 linux_msg(curthread, "unsupported madvise MADV_MERGEABLE"); 366 return (EINVAL); 367 case LINUX_MADV_UNMERGEABLE: 368 /* We don't merge anyway. */ 369 return (0); 370 case LINUX_MADV_HUGEPAGE: 371 /* Ignored; on FreeBSD huge pages are always on. */ 372 return (0); 373 case LINUX_MADV_NOHUGEPAGE: 374 linux_msg(curthread, "unsupported madvise MADV_NOHUGEPAGE"); 375 return (EINVAL); 376 case LINUX_MADV_DONTDUMP: 377 return (kern_madvise(td, addr, len, MADV_NOCORE)); 378 case LINUX_MADV_DODUMP: 379 return (kern_madvise(td, addr, len, MADV_CORE)); 380 case LINUX_MADV_WIPEONFORK: 381 return (kern_minherit(td, addr, len, INHERIT_ZERO)); 382 case LINUX_MADV_KEEPONFORK: 383 return (kern_minherit(td, addr, len, INHERIT_COPY)); 384 case LINUX_MADV_HWPOISON: 385 linux_msg(curthread, "unsupported madvise MADV_HWPOISON"); 386 return (EINVAL); 387 case LINUX_MADV_SOFT_OFFLINE: 388 linux_msg(curthread, "unsupported madvise MADV_SOFT_OFFLINE"); 389 return (EINVAL); 390 default: 391 linux_msg(curthread, "unsupported madvise behav %d", behav); 392 return (EINVAL); 393 } 394 } 395 396 #if defined(__amd64__) 397 static void 398 linux_fixup_prot(struct thread *td, int *prot) 399 { 400 struct linux_pemuldata *pem; 401 402 if (SV_PROC_FLAG(td->td_proc, SV_ILP32) && *prot & PROT_READ) { 403 pem = pem_find(td->td_proc); 404 if (pem->persona & LINUX_READ_IMPLIES_EXEC) 405 *prot |= PROT_EXEC; 406 } 407 408 } 409 #endif 410