1 /*- 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1991, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: Utah $Hdr: vm_unix.c 1.1 89/11/07$ 35 * 36 * @(#)vm_unix.c 8.1 (Berkeley) 6/11/93 37 */ 38 39 #include "opt_compat.h" 40 41 /* 42 * Traditional sbrk/grow interface to VM 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/proc.h> 52 #include <sys/racct.h> 53 #include <sys/resourcevar.h> 54 #include <sys/sysent.h> 55 #include <sys/sysproto.h> 56 #include <sys/systm.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_param.h> 60 #include <vm/pmap.h> 61 #include <vm/vm_map.h> 62 63 #ifndef _SYS_SYSPROTO_H_ 64 struct obreak_args { 65 char *nsize; 66 }; 67 #endif 68 69 /* 70 * MPSAFE 71 */ 72 /* ARGSUSED */ 73 int 74 sys_obreak(td, uap) 75 struct thread *td; 76 struct obreak_args *uap; 77 { 78 struct vmspace *vm = td->td_proc->p_vmspace; 79 vm_map_t map = &vm->vm_map; 80 vm_offset_t new, old, base; 81 rlim_t datalim, lmemlim, vmemlim; 82 int prot, rv; 83 int error = 0; 84 boolean_t do_map_wirefuture; 85 86 PROC_LOCK(td->td_proc); 87 datalim = lim_cur(td->td_proc, RLIMIT_DATA); 88 lmemlim = lim_cur(td->td_proc, RLIMIT_MEMLOCK); 89 vmemlim = lim_cur(td->td_proc, RLIMIT_VMEM); 90 PROC_UNLOCK(td->td_proc); 91 92 do_map_wirefuture = FALSE; 93 new = round_page((vm_offset_t)uap->nsize); 94 vm_map_lock(map); 95 96 base = round_page((vm_offset_t) vm->vm_daddr); 97 old = base + ctob(vm->vm_dsize); 98 if (new > base) { 99 /* 100 * Check the resource limit, but allow a process to reduce 101 * its usage, even if it remains over the limit. 102 */ 103 if (new - base > datalim && new > old) { 104 error = ENOMEM; 105 goto done; 106 } 107 if (new > vm_map_max(map)) { 108 error = ENOMEM; 109 goto done; 110 } 111 } else if (new < base) { 112 /* 113 * This is simply an invalid value. If someone wants to 114 * do fancy address space manipulations, mmap and munmap 115 * can do most of what the user would want. 116 */ 117 error = EINVAL; 118 goto done; 119 } 120 if (new > old) { 121 if (!old_mlock && map->flags & MAP_WIREFUTURE) { 122 if (ptoa(pmap_wired_count(map->pmap)) + 123 (new - old) > lmemlim) { 124 error = ENOMEM; 125 goto done; 126 } 127 } 128 if (map->size + (new - old) > vmemlim) { 129 error = ENOMEM; 130 goto done; 131 } 132 #ifdef RACCT 133 PROC_LOCK(td->td_proc); 134 error = racct_set(td->td_proc, RACCT_DATA, new - base); 135 if (error != 0) { 136 PROC_UNLOCK(td->td_proc); 137 error = ENOMEM; 138 goto done; 139 } 140 error = racct_set(td->td_proc, RACCT_VMEM, 141 map->size + (new - old)); 142 if (error != 0) { 143 racct_set_force(td->td_proc, RACCT_DATA, old - base); 144 PROC_UNLOCK(td->td_proc); 145 error = ENOMEM; 146 goto done; 147 } 148 if (!old_mlock && map->flags & MAP_WIREFUTURE) { 149 error = racct_set(td->td_proc, RACCT_MEMLOCK, 150 ptoa(pmap_wired_count(map->pmap)) + (new - old)); 151 if (error != 0) { 152 racct_set_force(td->td_proc, RACCT_DATA, 153 old - base); 154 racct_set_force(td->td_proc, RACCT_VMEM, 155 map->size); 156 PROC_UNLOCK(td->td_proc); 157 error = ENOMEM; 158 goto done; 159 } 160 } 161 PROC_UNLOCK(td->td_proc); 162 #endif 163 prot = VM_PROT_RW; 164 #ifdef COMPAT_FREEBSD32 165 #if defined(__amd64__) 166 if (i386_read_exec && SV_PROC_FLAG(td->td_proc, SV_ILP32)) 167 prot |= VM_PROT_EXECUTE; 168 #endif 169 #endif 170 rv = vm_map_insert(map, NULL, 0, old, new, prot, VM_PROT_ALL, 0); 171 if (rv != KERN_SUCCESS) { 172 #ifdef RACCT 173 PROC_LOCK(td->td_proc); 174 racct_set_force(td->td_proc, RACCT_DATA, old - base); 175 racct_set_force(td->td_proc, RACCT_VMEM, map->size); 176 if (!old_mlock && map->flags & MAP_WIREFUTURE) { 177 racct_set_force(td->td_proc, RACCT_MEMLOCK, 178 ptoa(pmap_wired_count(map->pmap))); 179 } 180 PROC_UNLOCK(td->td_proc); 181 #endif 182 error = ENOMEM; 183 goto done; 184 } 185 vm->vm_dsize += btoc(new - old); 186 /* 187 * Handle the MAP_WIREFUTURE case for legacy applications, 188 * by marking the newly mapped range of pages as wired. 189 * We are not required to perform a corresponding 190 * vm_map_unwire() before vm_map_delete() below, as 191 * it will forcibly unwire the pages in the range. 192 * 193 * XXX If the pages cannot be wired, no error is returned. 194 */ 195 if ((map->flags & MAP_WIREFUTURE) == MAP_WIREFUTURE) { 196 if (bootverbose) 197 printf("obreak: MAP_WIREFUTURE set\n"); 198 do_map_wirefuture = TRUE; 199 } 200 } else if (new < old) { 201 rv = vm_map_delete(map, new, old); 202 if (rv != KERN_SUCCESS) { 203 error = ENOMEM; 204 goto done; 205 } 206 vm->vm_dsize -= btoc(old - new); 207 #ifdef RACCT 208 PROC_LOCK(td->td_proc); 209 racct_set_force(td->td_proc, RACCT_DATA, new - base); 210 racct_set_force(td->td_proc, RACCT_VMEM, map->size); 211 if (!old_mlock && map->flags & MAP_WIREFUTURE) { 212 racct_set_force(td->td_proc, RACCT_MEMLOCK, 213 ptoa(pmap_wired_count(map->pmap))); 214 } 215 PROC_UNLOCK(td->td_proc); 216 #endif 217 } 218 done: 219 vm_map_unlock(map); 220 221 if (do_map_wirefuture) 222 (void) vm_map_wire(map, old, new, 223 VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 224 225 return (error); 226 } 227 228 #ifndef _SYS_SYSPROTO_H_ 229 struct ovadvise_args { 230 int anom; 231 }; 232 #endif 233 234 /* 235 * MPSAFE 236 */ 237 /* ARGSUSED */ 238 int 239 sys_ovadvise(td, uap) 240 struct thread *td; 241 struct ovadvise_args *uap; 242 { 243 /* START_GIANT_OPTIONAL */ 244 /* END_GIANT_OPTIONAL */ 245 return (EINVAL); 246 } 247