13d903220SDoug Rabson /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 29454b2d8SWarner Losh /*- 33d903220SDoug Rabson * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 43d903220SDoug Rabson * 53d903220SDoug Rabson * Redistribution and use in source and binary forms, with or without 63d903220SDoug Rabson * modification, are permitted provided that the following conditions 73d903220SDoug Rabson * are met: 83d903220SDoug Rabson * 1. Redistributions of source code must retain the above copyright 93d903220SDoug Rabson * notice, this list of conditions and the following disclaimer. 103d903220SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 113d903220SDoug Rabson * notice, this list of conditions and the following disclaimer in the 123d903220SDoug Rabson * documentation and/or other materials provided with the distribution. 133d903220SDoug Rabson * 3. All advertising materials mentioning features or use of this software 143d903220SDoug Rabson * must display the following acknowledgement: 153d903220SDoug Rabson * This product includes software developed by Adam Glass and Charles 163d903220SDoug Rabson * Hannum. 173d903220SDoug Rabson * 4. The names of the authors may not be used to endorse or promote products 183d903220SDoug Rabson * derived from this software without specific prior written permission. 193d903220SDoug Rabson * 203d903220SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 213d903220SDoug Rabson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 223d903220SDoug Rabson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 233d903220SDoug Rabson * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 243d903220SDoug Rabson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 253d903220SDoug Rabson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 263d903220SDoug Rabson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 273d903220SDoug Rabson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 283d903220SDoug Rabson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 293d903220SDoug Rabson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 303d903220SDoug Rabson */ 3114cedfc8SRobert Watson /*- 3214cedfc8SRobert Watson * Copyright (c) 2003-2005 McAfee, Inc. 3314cedfc8SRobert Watson * All rights reserved. 3414cedfc8SRobert Watson * 3514cedfc8SRobert Watson * This software was developed for the FreeBSD Project in part by McAfee 3614cedfc8SRobert Watson * Research, the Security Research Division of McAfee, Inc under DARPA/SPAWAR 3714cedfc8SRobert Watson * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS research 3814cedfc8SRobert Watson * program. 3914cedfc8SRobert Watson * 4014cedfc8SRobert Watson * Redistribution and use in source and binary forms, with or without 4114cedfc8SRobert Watson * modification, are permitted provided that the following conditions 4214cedfc8SRobert Watson * are met: 4314cedfc8SRobert Watson * 1. Redistributions of source code must retain the above copyright 4414cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer. 4514cedfc8SRobert Watson * 2. Redistributions in binary form must reproduce the above copyright 4614cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer in the 4714cedfc8SRobert Watson * documentation and/or other materials provided with the distribution. 4814cedfc8SRobert Watson * 4914cedfc8SRobert Watson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 5014cedfc8SRobert Watson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 5114cedfc8SRobert Watson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 5214cedfc8SRobert Watson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 5314cedfc8SRobert Watson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 5414cedfc8SRobert Watson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 5514cedfc8SRobert Watson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 5614cedfc8SRobert Watson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 5714cedfc8SRobert Watson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 5814cedfc8SRobert Watson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 5914cedfc8SRobert Watson * SUCH DAMAGE. 6014cedfc8SRobert Watson */ 613d903220SDoug Rabson 62677b542eSDavid E. O'Brien #include <sys/cdefs.h> 63677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 64677b542eSDavid E. O'Brien 655591b823SEivind Eklund #include "opt_compat.h" 66255108f3SPeter Wemm #include "opt_sysvipc.h" 6714cedfc8SRobert Watson #include "opt_mac.h" 68511b67b7SGarrett Wollman 693d903220SDoug Rabson #include <sys/param.h> 70725db531SBruce Evans #include <sys/systm.h> 713d903220SDoug Rabson #include <sys/kernel.h> 72fb919e4dSMark Murray #include <sys/lock.h> 73255108f3SPeter Wemm #include <sys/sysctl.h> 743d903220SDoug Rabson #include <sys/shm.h> 753d903220SDoug Rabson #include <sys/proc.h> 763d903220SDoug Rabson #include <sys/malloc.h> 773d903220SDoug Rabson #include <sys/mman.h> 7877409fe1SPoul-Henning Kamp #include <sys/module.h> 799dceb26bSJohn Baldwin #include <sys/mutex.h> 8068ba7a1dSTim J. Robbins #include <sys/resourcevar.h> 813d903220SDoug Rabson #include <sys/stat.h> 8278525ce3SAlfred Perlstein #include <sys/syscall.h> 83f130dcf2SMartin Blapp #include <sys/syscallsubr.h> 84725db531SBruce Evans #include <sys/sysent.h> 85fb919e4dSMark Murray #include <sys/sysproto.h> 86cb1f0db9SRobert Watson #include <sys/jail.h> 8714cedfc8SRobert Watson #include <sys/mac.h> 883d903220SDoug Rabson 893d903220SDoug Rabson #include <vm/vm.h> 90efeaf95aSDavid Greenman #include <vm/vm_param.h> 91efeaf95aSDavid Greenman #include <vm/pmap.h> 92a51f7119SJohn Dyson #include <vm/vm_object.h> 933d903220SDoug Rabson #include <vm/vm_map.h> 941c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 95ae9b8c3aSJohn Dyson #include <vm/vm_pager.h> 963d903220SDoug Rabson 9714cedfc8SRobert Watson #ifdef MAC_DEBUG 9814cedfc8SRobert Watson #define MPRINTF(a) printf a 9914cedfc8SRobert Watson #else 10014cedfc8SRobert Watson #define MPRINTF(a) 10114cedfc8SRobert Watson #endif 10214cedfc8SRobert Watson 103a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 10455166637SPoul-Henning Kamp 105725db531SBruce Evans struct oshmctl_args; 1064d77a549SAlfred Perlstein static int oshmctl(struct thread *td, struct oshmctl_args *uap); 107255108f3SPeter Wemm 1084d77a549SAlfred Perlstein static int shmget_allocate_segment(struct thread *td, 1094d77a549SAlfred Perlstein struct shmget_args *uap, int mode); 1104d77a549SAlfred Perlstein static int shmget_existing(struct thread *td, struct shmget_args *uap, 1114d77a549SAlfred Perlstein int mode, int segnum); 112725db531SBruce Evans 113725db531SBruce Evans /* XXX casting to (sy_call_t *) is bogus, as usual. */ 114303b270bSEivind Eklund static sy_call_t *shmcalls[] = { 115725db531SBruce Evans (sy_call_t *)shmat, (sy_call_t *)oshmctl, 116725db531SBruce Evans (sy_call_t *)shmdt, (sy_call_t *)shmget, 117725db531SBruce Evans (sy_call_t *)shmctl 118725db531SBruce Evans }; 1193d903220SDoug Rabson 1203d903220SDoug Rabson #define SHMSEG_FREE 0x0200 1213d903220SDoug Rabson #define SHMSEG_REMOVED 0x0400 1223d903220SDoug Rabson #define SHMSEG_ALLOCATED 0x0800 1233d903220SDoug Rabson #define SHMSEG_WANTED 0x1000 1243d903220SDoug Rabson 125255108f3SPeter Wemm static int shm_last_free, shm_nused, shm_committed, shmalloced; 126921d05b9SRobert Watson static struct shmid_kernel *shmsegs; 1273d903220SDoug Rabson 1283d903220SDoug Rabson struct shmmap_state { 1293d903220SDoug Rabson vm_offset_t va; 1303d903220SDoug Rabson int shmid; 1313d903220SDoug Rabson }; 1323d903220SDoug Rabson 133921d05b9SRobert Watson static void shm_deallocate_segment(struct shmid_kernel *); 1344d77a549SAlfred Perlstein static int shm_find_segment_by_key(key_t); 135921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmid(int); 136921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmidx(int); 1373db161e0SMatthew Dillon static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *); 1384d77a549SAlfred Perlstein static void shmrealloc(void); 1394d77a549SAlfred Perlstein static void shminit(void); 1404d77a549SAlfred Perlstein static int sysvshm_modload(struct module *, int, void *); 1414d77a549SAlfred Perlstein static int shmunload(void); 1423db161e0SMatthew Dillon static void shmexit_myhook(struct vmspace *vm); 1434d77a549SAlfred Perlstein static void shmfork_myhook(struct proc *p1, struct proc *p2); 1444d77a549SAlfred Perlstein static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS); 145255108f3SPeter Wemm 146255108f3SPeter Wemm /* 147028f979dSDima Dorfman * Tuneable values. 148255108f3SPeter Wemm */ 149255108f3SPeter Wemm #ifndef SHMMAXPGS 150028f979dSDima Dorfman #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */ 151255108f3SPeter Wemm #endif 152255108f3SPeter Wemm #ifndef SHMMAX 153255108f3SPeter Wemm #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 154255108f3SPeter Wemm #endif 155255108f3SPeter Wemm #ifndef SHMMIN 156255108f3SPeter Wemm #define SHMMIN 1 157255108f3SPeter Wemm #endif 158255108f3SPeter Wemm #ifndef SHMMNI 1591766b2e5SMatthew Dillon #define SHMMNI 192 160255108f3SPeter Wemm #endif 161255108f3SPeter Wemm #ifndef SHMSEG 1621766b2e5SMatthew Dillon #define SHMSEG 128 163255108f3SPeter Wemm #endif 164255108f3SPeter Wemm #ifndef SHMALL 165255108f3SPeter Wemm #define SHMALL (SHMMAXPGS) 166255108f3SPeter Wemm #endif 167255108f3SPeter Wemm 168255108f3SPeter Wemm struct shminfo shminfo = { 169255108f3SPeter Wemm SHMMAX, 170255108f3SPeter Wemm SHMMIN, 171255108f3SPeter Wemm SHMMNI, 172255108f3SPeter Wemm SHMSEG, 173255108f3SPeter Wemm SHMALL 174255108f3SPeter Wemm }; 175255108f3SPeter Wemm 1768b03c8edSMatthew Dillon static int shm_use_phys; 1772332251cSMax Khon static int shm_allow_removed; 1788b03c8edSMatthew Dillon 179255108f3SPeter Wemm SYSCTL_DECL(_kern_ipc); 18084f85aedSChristian S.J. Peron SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, 18184f85aedSChristian S.J. Peron "Maximum shared memory segment size"); 18284f85aedSChristian S.J. Peron SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, 18384f85aedSChristian S.J. Peron "Minimum shared memory segment size"); 18484f85aedSChristian S.J. Peron SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, 18584f85aedSChristian S.J. Peron "Number of shared memory identifiers"); 18684f85aedSChristian S.J. Peron SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, 18784f85aedSChristian S.J. Peron "Number of segments per process"); 18884f85aedSChristian S.J. Peron SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, 18984f85aedSChristian S.J. Peron "Maximum number of pages available for shared memory"); 190028f979dSDima Dorfman SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, 19184f85aedSChristian S.J. Peron &shm_use_phys, 0, "Enable/Disable locking of shared memory pages in core"); 1922332251cSMax Khon SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW, 19384f85aedSChristian S.J. Peron &shm_allow_removed, 0, 19484f85aedSChristian S.J. Peron "Enable/Disable attachment to attached segments marked for removal"); 195a723c4e1SDima Dorfman SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD, 19684f85aedSChristian S.J. Peron NULL, 0, sysctl_shmsegs, "", 19784f85aedSChristian S.J. Peron "Current number of shared memory segments allocated"); 1983d903220SDoug Rabson 1993d903220SDoug Rabson static int 2003d903220SDoug Rabson shm_find_segment_by_key(key) 2013d903220SDoug Rabson key_t key; 2023d903220SDoug Rabson { 2033d903220SDoug Rabson int i; 2043d903220SDoug Rabson 205255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 206921d05b9SRobert Watson if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) && 207921d05b9SRobert Watson shmsegs[i].u.shm_perm.key == key) 208b618bb96SAlfred Perlstein return (i); 209b618bb96SAlfred Perlstein return (-1); 2103d903220SDoug Rabson } 2113d903220SDoug Rabson 212921d05b9SRobert Watson static struct shmid_kernel * 2132332251cSMax Khon shm_find_segment_by_shmid(int shmid) 2143d903220SDoug Rabson { 2153d903220SDoug Rabson int segnum; 216921d05b9SRobert Watson struct shmid_kernel *shmseg; 2173d903220SDoug Rabson 2183d903220SDoug Rabson segnum = IPCID_TO_IX(shmid); 219255108f3SPeter Wemm if (segnum < 0 || segnum >= shmalloced) 220b618bb96SAlfred Perlstein return (NULL); 2213d903220SDoug Rabson shmseg = &shmsegs[segnum]; 222921d05b9SRobert Watson if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 2232332251cSMax Khon (!shm_allow_removed && 224921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0) || 225921d05b9SRobert Watson shmseg->u.shm_perm.seq != IPCID_TO_SEQ(shmid)) 226b618bb96SAlfred Perlstein return (NULL); 227b618bb96SAlfred Perlstein return (shmseg); 2283d903220SDoug Rabson } 2293d903220SDoug Rabson 230921d05b9SRobert Watson static struct shmid_kernel * 2312332251cSMax Khon shm_find_segment_by_shmidx(int segnum) 232491dec93SMichael Reifenberger { 233921d05b9SRobert Watson struct shmid_kernel *shmseg; 234491dec93SMichael Reifenberger 235491dec93SMichael Reifenberger if (segnum < 0 || segnum >= shmalloced) 236b618bb96SAlfred Perlstein return (NULL); 237491dec93SMichael Reifenberger shmseg = &shmsegs[segnum]; 238921d05b9SRobert Watson if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 2392332251cSMax Khon (!shm_allow_removed && 240921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0)) 241b618bb96SAlfred Perlstein return (NULL); 242b618bb96SAlfred Perlstein return (shmseg); 243491dec93SMichael Reifenberger } 244491dec93SMichael Reifenberger 2453d903220SDoug Rabson static void 2463d903220SDoug Rabson shm_deallocate_segment(shmseg) 247921d05b9SRobert Watson struct shmid_kernel *shmseg; 2483d903220SDoug Rabson { 2493d903220SDoug Rabson size_t size; 2503d903220SDoug Rabson 2510cddd8f0SMatthew Dillon GIANT_REQUIRED; 2520cddd8f0SMatthew Dillon 253921d05b9SRobert Watson vm_object_deallocate(shmseg->u.shm_internal); 254921d05b9SRobert Watson shmseg->u.shm_internal = NULL; 255921d05b9SRobert Watson size = round_page(shmseg->u.shm_segsz); 2563d903220SDoug Rabson shm_committed -= btoc(size); 2573d903220SDoug Rabson shm_nused--; 258921d05b9SRobert Watson shmseg->u.shm_perm.mode = SHMSEG_FREE; 25914cedfc8SRobert Watson #ifdef MAC 26014cedfc8SRobert Watson mac_cleanup_sysv_shm(shmseg); 26114cedfc8SRobert Watson #endif 2623d903220SDoug Rabson } 2633d903220SDoug Rabson 2643d903220SDoug Rabson static int 2653db161e0SMatthew Dillon shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 2663d903220SDoug Rabson { 267921d05b9SRobert Watson struct shmid_kernel *shmseg; 2683d903220SDoug Rabson int segnum, result; 2693d903220SDoug Rabson size_t size; 2703d903220SDoug Rabson 2710cddd8f0SMatthew Dillon GIANT_REQUIRED; 272028f979dSDima Dorfman 2733d903220SDoug Rabson segnum = IPCID_TO_IX(shmmap_s->shmid); 2743d903220SDoug Rabson shmseg = &shmsegs[segnum]; 275921d05b9SRobert Watson size = round_page(shmseg->u.shm_segsz); 2763db161e0SMatthew Dillon result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 2773d903220SDoug Rabson if (result != KERN_SUCCESS) 278b618bb96SAlfred Perlstein return (EINVAL); 2793d903220SDoug Rabson shmmap_s->shmid = -1; 280921d05b9SRobert Watson shmseg->u.shm_dtime = time_second; 281921d05b9SRobert Watson if ((--shmseg->u.shm_nattch <= 0) && 282921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) { 2833d903220SDoug Rabson shm_deallocate_segment(shmseg); 2843d903220SDoug Rabson shm_last_free = segnum; 2853d903220SDoug Rabson } 286b618bb96SAlfred Perlstein return (0); 2873d903220SDoug Rabson } 2883d903220SDoug Rabson 289b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 2903d903220SDoug Rabson struct shmdt_args { 291e1d7d0bbSAlfred Perlstein const void *shmaddr; 2923d903220SDoug Rabson }; 293b5d5c0c9SPeter Wemm #endif 294b5d5c0c9SPeter Wemm 295b6a4b4f9SMatthew Dillon /* 296b6a4b4f9SMatthew Dillon * MPSAFE 297b6a4b4f9SMatthew Dillon */ 2983d903220SDoug Rabson int 299b40ce416SJulian Elischer shmdt(td, uap) 300b40ce416SJulian Elischer struct thread *td; 3013d903220SDoug Rabson struct shmdt_args *uap; 3023d903220SDoug Rabson { 303b40ce416SJulian Elischer struct proc *p = td->td_proc; 3043d903220SDoug Rabson struct shmmap_state *shmmap_s; 30514cedfc8SRobert Watson #ifdef MAC 30614cedfc8SRobert Watson struct shmid_kernel *shmsegptr; 30714cedfc8SRobert Watson #endif 3083d903220SDoug Rabson int i; 309b6a4b4f9SMatthew Dillon int error = 0; 3103d903220SDoug Rabson 311c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 312c6f55f33SJohn Baldwin return (ENOSYS); 313b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 3148209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 315b6a4b4f9SMatthew Dillon if (shmmap_s == NULL) { 316b6a4b4f9SMatthew Dillon error = EINVAL; 317b6a4b4f9SMatthew Dillon goto done2; 318b6a4b4f9SMatthew Dillon } 319b6a4b4f9SMatthew Dillon for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 3203d903220SDoug Rabson if (shmmap_s->shmid != -1 && 321b6a4b4f9SMatthew Dillon shmmap_s->va == (vm_offset_t)uap->shmaddr) { 3223d903220SDoug Rabson break; 323b6a4b4f9SMatthew Dillon } 324b6a4b4f9SMatthew Dillon } 325b6a4b4f9SMatthew Dillon if (i == shminfo.shmseg) { 326b6a4b4f9SMatthew Dillon error = EINVAL; 327b6a4b4f9SMatthew Dillon goto done2; 328b6a4b4f9SMatthew Dillon } 32914cedfc8SRobert Watson #ifdef MAC 33014cedfc8SRobert Watson shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)]; 33114cedfc8SRobert Watson error = mac_check_sysv_shmdt(td->td_ucred, shmsegptr); 33214cedfc8SRobert Watson if (error != 0) { 33314cedfc8SRobert Watson MPRINTF(("mac_check_sysv_shmdt returned %d\n", error)); 33414cedfc8SRobert Watson goto done2; 33514cedfc8SRobert Watson } 33614cedfc8SRobert Watson #endif 3373db161e0SMatthew Dillon error = shm_delete_mapping(p->p_vmspace, shmmap_s); 338b6a4b4f9SMatthew Dillon done2: 339b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 340b6a4b4f9SMatthew Dillon return (error); 3413d903220SDoug Rabson } 3423d903220SDoug Rabson 343b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 3443d903220SDoug Rabson struct shmat_args { 3453d903220SDoug Rabson int shmid; 346e1d7d0bbSAlfred Perlstein const void *shmaddr; 3473d903220SDoug Rabson int shmflg; 3483d903220SDoug Rabson }; 349b5d5c0c9SPeter Wemm #endif 350b5d5c0c9SPeter Wemm 351b6a4b4f9SMatthew Dillon /* 352b6a4b4f9SMatthew Dillon * MPSAFE 353b6a4b4f9SMatthew Dillon */ 3543d903220SDoug Rabson int 3552332251cSMax Khon kern_shmat(td, shmid, shmaddr, shmflg) 356b40ce416SJulian Elischer struct thread *td; 357f130dcf2SMartin Blapp int shmid; 358f130dcf2SMartin Blapp const void *shmaddr; 359f130dcf2SMartin Blapp int shmflg; 3603d903220SDoug Rabson { 361b40ce416SJulian Elischer struct proc *p = td->td_proc; 362b6a4b4f9SMatthew Dillon int i, flags; 363921d05b9SRobert Watson struct shmid_kernel *shmseg; 3643d903220SDoug Rabson struct shmmap_state *shmmap_s = NULL; 3653d903220SDoug Rabson vm_offset_t attach_va; 3663d903220SDoug Rabson vm_prot_t prot; 3673d903220SDoug Rabson vm_size_t size; 368a51f7119SJohn Dyson int rv; 369b6a4b4f9SMatthew Dillon int error = 0; 3703d903220SDoug Rabson 371c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 372c6f55f33SJohn Baldwin return (ENOSYS); 373b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 3748209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 3753d903220SDoug Rabson if (shmmap_s == NULL) { 3763d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 377a163d034SWarner Losh shmmap_s = malloc(size, M_SHM, M_WAITOK); 3783d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) 3793d903220SDoug Rabson shmmap_s[i].shmid = -1; 3802cc593fdSAlfred Perlstein p->p_vmspace->vm_shm = shmmap_s; 3813d903220SDoug Rabson } 3822332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 383b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 384b6a4b4f9SMatthew Dillon error = EINVAL; 385b6a4b4f9SMatthew Dillon goto done2; 386b6a4b4f9SMatthew Dillon } 387921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, 388f130dcf2SMartin Blapp (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 389797f2d22SPoul-Henning Kamp if (error) 390b6a4b4f9SMatthew Dillon goto done2; 39114cedfc8SRobert Watson #ifdef MAC 39214cedfc8SRobert Watson error = mac_check_sysv_shmat(td->td_ucred, shmseg, shmflg); 39314cedfc8SRobert Watson if (error != 0) { 39414cedfc8SRobert Watson MPRINTF(("mac_check_sysv_shmat returned %d\n", error)); 39514cedfc8SRobert Watson goto done2; 39614cedfc8SRobert Watson } 39714cedfc8SRobert Watson #endif 3983d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) { 3993d903220SDoug Rabson if (shmmap_s->shmid == -1) 4003d903220SDoug Rabson break; 4013d903220SDoug Rabson shmmap_s++; 4023d903220SDoug Rabson } 403b6a4b4f9SMatthew Dillon if (i >= shminfo.shmseg) { 404b6a4b4f9SMatthew Dillon error = EMFILE; 405b6a4b4f9SMatthew Dillon goto done2; 406b6a4b4f9SMatthew Dillon } 407921d05b9SRobert Watson size = round_page(shmseg->u.shm_segsz); 408af25d10cSAlan Cox #ifdef VM_PROT_READ_IS_EXEC 409af25d10cSAlan Cox prot = VM_PROT_READ | VM_PROT_EXECUTE; 410af25d10cSAlan Cox #else 4113d903220SDoug Rabson prot = VM_PROT_READ; 412af25d10cSAlan Cox #endif 413f130dcf2SMartin Blapp if ((shmflg & SHM_RDONLY) == 0) 4143d903220SDoug Rabson prot |= VM_PROT_WRITE; 4153d903220SDoug Rabson flags = MAP_ANON | MAP_SHARED; 416f130dcf2SMartin Blapp if (shmaddr) { 4173d903220SDoug Rabson flags |= MAP_FIXED; 418f130dcf2SMartin Blapp if (shmflg & SHM_RND) { 419f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1); 420f130dcf2SMartin Blapp } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) { 421f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr; 422b6a4b4f9SMatthew Dillon } else { 423b6a4b4f9SMatthew Dillon error = EINVAL; 424b6a4b4f9SMatthew Dillon goto done2; 425b6a4b4f9SMatthew Dillon } 4263d903220SDoug Rabson } else { 427028f979dSDima Dorfman /* 428028f979dSDima Dorfman * This is just a hint to vm_map_find() about where to 429028f979dSDima Dorfman * put it. 430028f979dSDima Dorfman */ 43168ba7a1dSTim J. Robbins PROC_LOCK(p); 43268ba7a1dSTim J. Robbins attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr + 43368ba7a1dSTim J. Robbins lim_max(p, RLIMIT_DATA)); 43468ba7a1dSTim J. Robbins PROC_UNLOCK(p); 4353d903220SDoug Rabson } 436a51f7119SJohn Dyson 437921d05b9SRobert Watson vm_object_reference(shmseg->u.shm_internal); 438921d05b9SRobert Watson rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->u.shm_internal, 439a51f7119SJohn Dyson 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0); 440a51f7119SJohn Dyson if (rv != KERN_SUCCESS) { 441921d05b9SRobert Watson vm_object_deallocate(shmseg->u.shm_internal); 442b6a4b4f9SMatthew Dillon error = ENOMEM; 443b6a4b4f9SMatthew Dillon goto done2; 444a51f7119SJohn Dyson } 4450463028cSJohn Dyson vm_map_inherit(&p->p_vmspace->vm_map, 4460463028cSJohn Dyson attach_va, attach_va + size, VM_INHERIT_SHARE); 4470463028cSJohn Dyson 4483d903220SDoug Rabson shmmap_s->va = attach_va; 449f130dcf2SMartin Blapp shmmap_s->shmid = shmid; 450921d05b9SRobert Watson shmseg->u.shm_lpid = p->p_pid; 451921d05b9SRobert Watson shmseg->u.shm_atime = time_second; 452921d05b9SRobert Watson shmseg->u.shm_nattch++; 453b40ce416SJulian Elischer td->td_retval[0] = attach_va; 454b6a4b4f9SMatthew Dillon done2: 455b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 456b6a4b4f9SMatthew Dillon return (error); 4573d903220SDoug Rabson } 4583d903220SDoug Rabson 459f130dcf2SMartin Blapp int 460f130dcf2SMartin Blapp shmat(td, uap) 461f130dcf2SMartin Blapp struct thread *td; 462f130dcf2SMartin Blapp struct shmat_args *uap; 463f130dcf2SMartin Blapp { 4642332251cSMax Khon return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg); 465f130dcf2SMartin Blapp } 466f130dcf2SMartin Blapp 4678bec0921SDoug Rabson struct oshmid_ds { 4688bec0921SDoug Rabson struct ipc_perm shm_perm; /* operation perms */ 4698bec0921SDoug Rabson int shm_segsz; /* size of segment (bytes) */ 4708b149b51SJohn Baldwin u_short shm_cpid; /* pid, creator */ 4718b149b51SJohn Baldwin u_short shm_lpid; /* pid, last operation */ 4728bec0921SDoug Rabson short shm_nattch; /* no. of current attaches */ 4738bec0921SDoug Rabson time_t shm_atime; /* last attach time */ 4748bec0921SDoug Rabson time_t shm_dtime; /* last detach time */ 4758bec0921SDoug Rabson time_t shm_ctime; /* last change time */ 4768bec0921SDoug Rabson void *shm_handle; /* internal handle for shm segment */ 4778bec0921SDoug Rabson }; 4788bec0921SDoug Rabson 4798bec0921SDoug Rabson struct oshmctl_args { 4808bec0921SDoug Rabson int shmid; 4818bec0921SDoug Rabson int cmd; 4828bec0921SDoug Rabson struct oshmid_ds *ubuf; 4838bec0921SDoug Rabson }; 4848bec0921SDoug Rabson 485b6a4b4f9SMatthew Dillon /* 486b6a4b4f9SMatthew Dillon * MPSAFE 487b6a4b4f9SMatthew Dillon */ 48887b6de2bSPoul-Henning Kamp static int 489b40ce416SJulian Elischer oshmctl(td, uap) 490b40ce416SJulian Elischer struct thread *td; 4918bec0921SDoug Rabson struct oshmctl_args *uap; 4928bec0921SDoug Rabson { 4938bec0921SDoug Rabson #ifdef COMPAT_43 494b6a4b4f9SMatthew Dillon int error = 0; 495921d05b9SRobert Watson struct shmid_kernel *shmseg; 4968bec0921SDoug Rabson struct oshmid_ds outbuf; 4978bec0921SDoug Rabson 498c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 499c6f55f33SJohn Baldwin return (ENOSYS); 500b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 5012332251cSMax Khon shmseg = shm_find_segment_by_shmid(uap->shmid); 502b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 503b6a4b4f9SMatthew Dillon error = EINVAL; 504b6a4b4f9SMatthew Dillon goto done2; 505b6a4b4f9SMatthew Dillon } 5068bec0921SDoug Rabson switch (uap->cmd) { 5078bec0921SDoug Rabson case IPC_STAT: 508921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 509797f2d22SPoul-Henning Kamp if (error) 510b6a4b4f9SMatthew Dillon goto done2; 51114cedfc8SRobert Watson #ifdef MAC 51214cedfc8SRobert Watson error = mac_check_sysv_shmctl(td->td_ucred, shmseg, uap->cmd); 51314cedfc8SRobert Watson if (error != 0) { 51414cedfc8SRobert Watson MPRINTF(("mac_check_sysv_shmctl returned %d\n", 51514cedfc8SRobert Watson error)); 51614cedfc8SRobert Watson goto done2; 51714cedfc8SRobert Watson } 51814cedfc8SRobert Watson #endif 519921d05b9SRobert Watson outbuf.shm_perm = shmseg->u.shm_perm; 520921d05b9SRobert Watson outbuf.shm_segsz = shmseg->u.shm_segsz; 521921d05b9SRobert Watson outbuf.shm_cpid = shmseg->u.shm_cpid; 522921d05b9SRobert Watson outbuf.shm_lpid = shmseg->u.shm_lpid; 523921d05b9SRobert Watson outbuf.shm_nattch = shmseg->u.shm_nattch; 524921d05b9SRobert Watson outbuf.shm_atime = shmseg->u.shm_atime; 525921d05b9SRobert Watson outbuf.shm_dtime = shmseg->u.shm_dtime; 526921d05b9SRobert Watson outbuf.shm_ctime = shmseg->u.shm_ctime; 527921d05b9SRobert Watson outbuf.shm_handle = shmseg->u.shm_internal; 5282cc593fdSAlfred Perlstein error = copyout(&outbuf, uap->ubuf, sizeof(outbuf)); 529797f2d22SPoul-Henning Kamp if (error) 530b6a4b4f9SMatthew Dillon goto done2; 5318bec0921SDoug Rabson break; 5328bec0921SDoug Rabson default: 53300fbcda8SAlexander Kabaev error = shmctl(td, (struct shmctl_args *)uap); 534b6a4b4f9SMatthew Dillon break; 5358bec0921SDoug Rabson } 536b6a4b4f9SMatthew Dillon done2: 537b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 538b6a4b4f9SMatthew Dillon return (error); 5398bec0921SDoug Rabson #else 540b618bb96SAlfred Perlstein return (EINVAL); 5418bec0921SDoug Rabson #endif 5428bec0921SDoug Rabson } 5438bec0921SDoug Rabson 544b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 5453d903220SDoug Rabson struct shmctl_args { 5463d903220SDoug Rabson int shmid; 5473d903220SDoug Rabson int cmd; 548b5d5c0c9SPeter Wemm struct shmid_ds *buf; 5493d903220SDoug Rabson }; 550b5d5c0c9SPeter Wemm #endif 551b5d5c0c9SPeter Wemm 552b6a4b4f9SMatthew Dillon /* 553b6a4b4f9SMatthew Dillon * MPSAFE 554b6a4b4f9SMatthew Dillon */ 5553d903220SDoug Rabson int 5562332251cSMax Khon kern_shmctl(td, shmid, cmd, buf, bufsz) 557b40ce416SJulian Elischer struct thread *td; 558f130dcf2SMartin Blapp int shmid; 559f130dcf2SMartin Blapp int cmd; 560f130dcf2SMartin Blapp void *buf; 561f130dcf2SMartin Blapp size_t *bufsz; 5623d903220SDoug Rabson { 563b6a4b4f9SMatthew Dillon int error = 0; 564921d05b9SRobert Watson struct shmid_kernel *shmseg; 5653d903220SDoug Rabson 566c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 567c6f55f33SJohn Baldwin return (ENOSYS); 568f130dcf2SMartin Blapp 569b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 570f130dcf2SMartin Blapp switch (cmd) { 571491dec93SMichael Reifenberger case IPC_INFO: 572f130dcf2SMartin Blapp memcpy(buf, &shminfo, sizeof(shminfo)); 573f130dcf2SMartin Blapp if (bufsz) 574f130dcf2SMartin Blapp *bufsz = sizeof(shminfo); 575491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 576491dec93SMichael Reifenberger goto done2; 577491dec93SMichael Reifenberger case SHM_INFO: { 578491dec93SMichael Reifenberger struct shm_info shm_info; 579491dec93SMichael Reifenberger shm_info.used_ids = shm_nused; 580491dec93SMichael Reifenberger shm_info.shm_rss = 0; /*XXX where to get from ? */ 581491dec93SMichael Reifenberger shm_info.shm_tot = 0; /*XXX where to get from ? */ 582491dec93SMichael Reifenberger shm_info.shm_swp = 0; /*XXX where to get from ? */ 583491dec93SMichael Reifenberger shm_info.swap_attempts = 0; /*XXX where to get from ? */ 584491dec93SMichael Reifenberger shm_info.swap_successes = 0; /*XXX where to get from ? */ 585f130dcf2SMartin Blapp memcpy(buf, &shm_info, sizeof(shm_info)); 586f130dcf2SMartin Blapp if (bufsz) 587f130dcf2SMartin Blapp *bufsz = sizeof(shm_info); 588491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 589491dec93SMichael Reifenberger goto done2; 590491dec93SMichael Reifenberger } 591491dec93SMichael Reifenberger } 592f130dcf2SMartin Blapp if (cmd == SHM_STAT) 5932332251cSMax Khon shmseg = shm_find_segment_by_shmidx(shmid); 594491dec93SMichael Reifenberger else 5952332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 596b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 597b6a4b4f9SMatthew Dillon error = EINVAL; 598b6a4b4f9SMatthew Dillon goto done2; 599b6a4b4f9SMatthew Dillon } 60014cedfc8SRobert Watson #ifdef MAC 60114cedfc8SRobert Watson error = mac_check_sysv_shmctl(td->td_ucred, shmseg, cmd); 60214cedfc8SRobert Watson if (error != 0) { 60314cedfc8SRobert Watson MPRINTF(("mac_check_sysv_shmctl returned %d\n", error)); 60414cedfc8SRobert Watson goto done2; 60514cedfc8SRobert Watson } 60614cedfc8SRobert Watson #endif 607f130dcf2SMartin Blapp switch (cmd) { 608491dec93SMichael Reifenberger case SHM_STAT: 6093d903220SDoug Rabson case IPC_STAT: 610921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 611797f2d22SPoul-Henning Kamp if (error) 612b6a4b4f9SMatthew Dillon goto done2; 613921d05b9SRobert Watson memcpy(buf, &shmseg->u, sizeof(struct shmid_ds)); 614f130dcf2SMartin Blapp if (bufsz) 615f130dcf2SMartin Blapp *bufsz = sizeof(struct shmid_ds); 616f130dcf2SMartin Blapp if (cmd == SHM_STAT) 617921d05b9SRobert Watson td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->u.shm_perm); 6183d903220SDoug Rabson break; 619f130dcf2SMartin Blapp case IPC_SET: { 620f130dcf2SMartin Blapp struct shmid_ds *shmid; 621f130dcf2SMartin Blapp 622f130dcf2SMartin Blapp shmid = (struct shmid_ds *)buf; 623921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 624797f2d22SPoul-Henning Kamp if (error) 625b6a4b4f9SMatthew Dillon goto done2; 626921d05b9SRobert Watson shmseg->u.shm_perm.uid = shmid->shm_perm.uid; 627921d05b9SRobert Watson shmseg->u.shm_perm.gid = shmid->shm_perm.gid; 628921d05b9SRobert Watson shmseg->u.shm_perm.mode = 629921d05b9SRobert Watson (shmseg->u.shm_perm.mode & ~ACCESSPERMS) | 630f130dcf2SMartin Blapp (shmid->shm_perm.mode & ACCESSPERMS); 631921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 6323d903220SDoug Rabson break; 633f130dcf2SMartin Blapp } 6343d903220SDoug Rabson case IPC_RMID: 635921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 636797f2d22SPoul-Henning Kamp if (error) 637b6a4b4f9SMatthew Dillon goto done2; 638921d05b9SRobert Watson shmseg->u.shm_perm.key = IPC_PRIVATE; 639921d05b9SRobert Watson shmseg->u.shm_perm.mode |= SHMSEG_REMOVED; 640921d05b9SRobert Watson if (shmseg->u.shm_nattch <= 0) { 6413d903220SDoug Rabson shm_deallocate_segment(shmseg); 642f130dcf2SMartin Blapp shm_last_free = IPCID_TO_IX(shmid); 6433d903220SDoug Rabson } 6443d903220SDoug Rabson break; 6453d903220SDoug Rabson #if 0 6463d903220SDoug Rabson case SHM_LOCK: 6473d903220SDoug Rabson case SHM_UNLOCK: 6483d903220SDoug Rabson #endif 6493d903220SDoug Rabson default: 650b6a4b4f9SMatthew Dillon error = EINVAL; 651b6a4b4f9SMatthew Dillon break; 6523d903220SDoug Rabson } 653b6a4b4f9SMatthew Dillon done2: 654b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 655b6a4b4f9SMatthew Dillon return (error); 6563d903220SDoug Rabson } 6573d903220SDoug Rabson 658f130dcf2SMartin Blapp int 659f130dcf2SMartin Blapp shmctl(td, uap) 660f130dcf2SMartin Blapp struct thread *td; 661f130dcf2SMartin Blapp struct shmctl_args *uap; 662f130dcf2SMartin Blapp { 663f130dcf2SMartin Blapp int error = 0; 664f130dcf2SMartin Blapp struct shmid_ds buf; 665f130dcf2SMartin Blapp size_t bufsz; 666f130dcf2SMartin Blapp 667f130dcf2SMartin Blapp /* IPC_SET needs to copyin the buffer before calling kern_shmctl */ 668f130dcf2SMartin Blapp if (uap->cmd == IPC_SET) { 669f130dcf2SMartin Blapp if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds)))) 670f130dcf2SMartin Blapp goto done; 671f130dcf2SMartin Blapp } 672f130dcf2SMartin Blapp 6732332251cSMax Khon error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz); 674f130dcf2SMartin Blapp if (error) 675f130dcf2SMartin Blapp goto done; 676f130dcf2SMartin Blapp 677f130dcf2SMartin Blapp /* Cases in which we need to copyout */ 678f130dcf2SMartin Blapp switch (uap->cmd) { 679f130dcf2SMartin Blapp case IPC_INFO: 680f130dcf2SMartin Blapp case SHM_INFO: 681f130dcf2SMartin Blapp case SHM_STAT: 682f130dcf2SMartin Blapp case IPC_STAT: 683f130dcf2SMartin Blapp error = copyout(&buf, uap->buf, bufsz); 684f130dcf2SMartin Blapp break; 685f130dcf2SMartin Blapp } 686f130dcf2SMartin Blapp 687f130dcf2SMartin Blapp done: 688f130dcf2SMartin Blapp if (error) { 689f130dcf2SMartin Blapp /* Invalidate the return value */ 690f130dcf2SMartin Blapp td->td_retval[0] = -1; 691f130dcf2SMartin Blapp } 692f130dcf2SMartin Blapp return (error); 693f130dcf2SMartin Blapp } 694f130dcf2SMartin Blapp 695f130dcf2SMartin Blapp 696b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 6973d903220SDoug Rabson struct shmget_args { 6983d903220SDoug Rabson key_t key; 6993d903220SDoug Rabson size_t size; 7003d903220SDoug Rabson int shmflg; 7013d903220SDoug Rabson }; 702b5d5c0c9SPeter Wemm #endif 703b5d5c0c9SPeter Wemm 7043d903220SDoug Rabson static int 705b40ce416SJulian Elischer shmget_existing(td, uap, mode, segnum) 706b40ce416SJulian Elischer struct thread *td; 7073d903220SDoug Rabson struct shmget_args *uap; 7083d903220SDoug Rabson int mode; 7093d903220SDoug Rabson int segnum; 7103d903220SDoug Rabson { 711921d05b9SRobert Watson struct shmid_kernel *shmseg; 7123d903220SDoug Rabson int error; 7133d903220SDoug Rabson 7143d903220SDoug Rabson shmseg = &shmsegs[segnum]; 715921d05b9SRobert Watson if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) { 7163d903220SDoug Rabson /* 7173d903220SDoug Rabson * This segment is in the process of being allocated. Wait 7183d903220SDoug Rabson * until it's done, and look the key up again (in case the 7193d903220SDoug Rabson * allocation failed or it was freed). 7203d903220SDoug Rabson */ 721921d05b9SRobert Watson shmseg->u.shm_perm.mode |= SHMSEG_WANTED; 7222cc593fdSAlfred Perlstein error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0); 723797f2d22SPoul-Henning Kamp if (error) 724b618bb96SAlfred Perlstein return (error); 725b618bb96SAlfred Perlstein return (EAGAIN); 7263d903220SDoug Rabson } 727dc92aa57SAlan Cox if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 728b618bb96SAlfred Perlstein return (EEXIST); 729921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, mode); 73014cedfc8SRobert Watson #ifdef MAC 73114cedfc8SRobert Watson error = mac_check_sysv_shmget(td->td_ucred, shmseg, uap->shmflg); 73214cedfc8SRobert Watson if (error != 0) 73314cedfc8SRobert Watson MPRINTF(("mac_check_sysv_shmget returned %d\n", error)); 73414cedfc8SRobert Watson #endif 735797f2d22SPoul-Henning Kamp if (error) 736b618bb96SAlfred Perlstein return (error); 737921d05b9SRobert Watson if (uap->size && uap->size > shmseg->u.shm_segsz) 738b618bb96SAlfred Perlstein return (EINVAL); 739921d05b9SRobert Watson td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 740b618bb96SAlfred Perlstein return (0); 7413d903220SDoug Rabson } 7423d903220SDoug Rabson 7433d903220SDoug Rabson static int 744b40ce416SJulian Elischer shmget_allocate_segment(td, uap, mode) 745b40ce416SJulian Elischer struct thread *td; 7463d903220SDoug Rabson struct shmget_args *uap; 7473d903220SDoug Rabson int mode; 7483d903220SDoug Rabson { 749a51f7119SJohn Dyson int i, segnum, shmid, size; 750a854ed98SJohn Baldwin struct ucred *cred = td->td_ucred; 751921d05b9SRobert Watson struct shmid_kernel *shmseg; 7520049f8b2SAlan Cox vm_object_t shm_object; 7533d903220SDoug Rabson 7540cddd8f0SMatthew Dillon GIANT_REQUIRED; 7550cddd8f0SMatthew Dillon 7563d903220SDoug Rabson if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 757b618bb96SAlfred Perlstein return (EINVAL); 758028f979dSDima Dorfman if (shm_nused >= shminfo.shmmni) /* Any shmids left? */ 759b618bb96SAlfred Perlstein return (ENOSPC); 7609e609ddeSJoerg Wunsch size = round_page(uap->size); 7613d903220SDoug Rabson if (shm_committed + btoc(size) > shminfo.shmall) 762b618bb96SAlfred Perlstein return (ENOMEM); 7633d903220SDoug Rabson if (shm_last_free < 0) { 764028f979dSDima Dorfman shmrealloc(); /* Maybe expand the shmsegs[] array. */ 765255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 766921d05b9SRobert Watson if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE) 7673d903220SDoug Rabson break; 768255108f3SPeter Wemm if (i == shmalloced) 769b618bb96SAlfred Perlstein return (ENOSPC); 7703d903220SDoug Rabson segnum = i; 7713d903220SDoug Rabson } else { 7723d903220SDoug Rabson segnum = shm_last_free; 7733d903220SDoug Rabson shm_last_free = -1; 7743d903220SDoug Rabson } 7753d903220SDoug Rabson shmseg = &shmsegs[segnum]; 7763d903220SDoug Rabson /* 7773d903220SDoug Rabson * In case we sleep in malloc(), mark the segment present but deleted 7783d903220SDoug Rabson * so that noone else tries to create the same key. 7793d903220SDoug Rabson */ 780921d05b9SRobert Watson shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 781921d05b9SRobert Watson shmseg->u.shm_perm.key = uap->key; 782921d05b9SRobert Watson shmseg->u.shm_perm.seq = (shmseg->u.shm_perm.seq + 1) & 0x7fff; 783921d05b9SRobert Watson shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 784a51f7119SJohn Dyson 785ae9b8c3aSJohn Dyson /* 786ae9b8c3aSJohn Dyson * We make sure that we have allocated a pager before we need 787ae9b8c3aSJohn Dyson * to. 788ae9b8c3aSJohn Dyson */ 7898b03c8edSMatthew Dillon if (shm_use_phys) { 7900049f8b2SAlan Cox shm_object = 79124488c74SPeter Wemm vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0); 7928b03c8edSMatthew Dillon } else { 7930049f8b2SAlan Cox shm_object = 7946cde7a16SDavid Greenman vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0); 7958b03c8edSMatthew Dillon } 7960049f8b2SAlan Cox VM_OBJECT_LOCK(shm_object); 7970049f8b2SAlan Cox vm_object_clear_flag(shm_object, OBJ_ONEMAPPING); 7980049f8b2SAlan Cox vm_object_set_flag(shm_object, OBJ_NOSPLIT); 7990049f8b2SAlan Cox VM_OBJECT_UNLOCK(shm_object); 800cbd8ec09SJohn Dyson 801921d05b9SRobert Watson shmseg->u.shm_internal = shm_object; 802921d05b9SRobert Watson shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid; 803921d05b9SRobert Watson shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = cred->cr_gid; 804921d05b9SRobert Watson shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) | 8053d903220SDoug Rabson (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 806921d05b9SRobert Watson shmseg->u.shm_segsz = uap->size; 807921d05b9SRobert Watson shmseg->u.shm_cpid = td->td_proc->p_pid; 808921d05b9SRobert Watson shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0; 809921d05b9SRobert Watson shmseg->u.shm_atime = shmseg->u.shm_dtime = 0; 81014cedfc8SRobert Watson #ifdef MAC 81114cedfc8SRobert Watson mac_create_sysv_shm(cred, shmseg); 81214cedfc8SRobert Watson #endif 813921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 8143d903220SDoug Rabson shm_committed += btoc(size); 8153d903220SDoug Rabson shm_nused++; 816921d05b9SRobert Watson if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) { 8173d903220SDoug Rabson /* 8183d903220SDoug Rabson * Somebody else wanted this key while we were asleep. Wake 8193d903220SDoug Rabson * them up now. 8203d903220SDoug Rabson */ 821921d05b9SRobert Watson shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED; 8222cc593fdSAlfred Perlstein wakeup(shmseg); 8233d903220SDoug Rabson } 824b40ce416SJulian Elischer td->td_retval[0] = shmid; 825b618bb96SAlfred Perlstein return (0); 8263d903220SDoug Rabson } 8273d903220SDoug Rabson 828b6a4b4f9SMatthew Dillon /* 829b6a4b4f9SMatthew Dillon * MPSAFE 830b6a4b4f9SMatthew Dillon */ 8313d903220SDoug Rabson int 832b40ce416SJulian Elischer shmget(td, uap) 833b40ce416SJulian Elischer struct thread *td; 8343d903220SDoug Rabson struct shmget_args *uap; 8353d903220SDoug Rabson { 836b6a4b4f9SMatthew Dillon int segnum, mode; 837b6a4b4f9SMatthew Dillon int error; 8383d903220SDoug Rabson 839c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 840c6f55f33SJohn Baldwin return (ENOSYS); 841b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 8423d903220SDoug Rabson mode = uap->shmflg & ACCESSPERMS; 8433d903220SDoug Rabson if (uap->key != IPC_PRIVATE) { 8443d903220SDoug Rabson again: 8453d903220SDoug Rabson segnum = shm_find_segment_by_key(uap->key); 8463d903220SDoug Rabson if (segnum >= 0) { 847b40ce416SJulian Elischer error = shmget_existing(td, uap, mode, segnum); 8483d903220SDoug Rabson if (error == EAGAIN) 8493d903220SDoug Rabson goto again; 850b6a4b4f9SMatthew Dillon goto done2; 8513d903220SDoug Rabson } 852b6a4b4f9SMatthew Dillon if ((uap->shmflg & IPC_CREAT) == 0) { 853b6a4b4f9SMatthew Dillon error = ENOENT; 854b6a4b4f9SMatthew Dillon goto done2; 8553d903220SDoug Rabson } 856b6a4b4f9SMatthew Dillon } 857b40ce416SJulian Elischer error = shmget_allocate_segment(td, uap, mode); 858b6a4b4f9SMatthew Dillon done2: 859b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 860b6a4b4f9SMatthew Dillon return (error); 8613d903220SDoug Rabson } 8623d903220SDoug Rabson 863b6a4b4f9SMatthew Dillon /* 864b6a4b4f9SMatthew Dillon * MPSAFE 865b6a4b4f9SMatthew Dillon */ 8663d903220SDoug Rabson int 867b40ce416SJulian Elischer shmsys(td, uap) 868b40ce416SJulian Elischer struct thread *td; 869725db531SBruce Evans /* XXX actually varargs. */ 870725db531SBruce Evans struct shmsys_args /* { 87101b9dc96SJacques Vidrine int which; 872725db531SBruce Evans int a2; 873725db531SBruce Evans int a3; 874725db531SBruce Evans int a4; 875725db531SBruce Evans } */ *uap; 8763d903220SDoug Rabson { 877b6a4b4f9SMatthew Dillon int error; 8783d903220SDoug Rabson 879c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 880c6f55f33SJohn Baldwin return (ENOSYS); 88101b9dc96SJacques Vidrine if (uap->which < 0 || 88201b9dc96SJacques Vidrine uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 883c6f55f33SJohn Baldwin return (EINVAL); 884b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 885b40ce416SJulian Elischer error = (*shmcalls[uap->which])(td, &uap->a2); 886b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 887b6a4b4f9SMatthew Dillon return (error); 8883d903220SDoug Rabson } 8893d903220SDoug Rabson 89078525ce3SAlfred Perlstein static void 89178525ce3SAlfred Perlstein shmfork_myhook(p1, p2) 8923d903220SDoug Rabson struct proc *p1, *p2; 8933d903220SDoug Rabson { 8943d903220SDoug Rabson struct shmmap_state *shmmap_s; 8953d903220SDoug Rabson size_t size; 8963d903220SDoug Rabson int i; 8973d903220SDoug Rabson 89894ddc707SAlan Cox mtx_lock(&Giant); 8993d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 900a163d034SWarner Losh shmmap_s = malloc(size, M_SHM, M_WAITOK); 9012cc593fdSAlfred Perlstein bcopy(p1->p_vmspace->vm_shm, shmmap_s, size); 9022cc593fdSAlfred Perlstein p2->p_vmspace->vm_shm = shmmap_s; 9033d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 9043d903220SDoug Rabson if (shmmap_s->shmid != -1) 905921d05b9SRobert Watson shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++; 90694ddc707SAlan Cox mtx_unlock(&Giant); 9073d903220SDoug Rabson } 9083d903220SDoug Rabson 90978525ce3SAlfred Perlstein static void 9103db161e0SMatthew Dillon shmexit_myhook(struct vmspace *vm) 9113d903220SDoug Rabson { 9123db161e0SMatthew Dillon struct shmmap_state *base, *shm; 9133d903220SDoug Rabson int i; 9143d903220SDoug Rabson 9153db161e0SMatthew Dillon if ((base = vm->vm_shm) != NULL) { 9163db161e0SMatthew Dillon vm->vm_shm = NULL; 9171a276a3fSAlan Cox mtx_lock(&Giant); 9183db161e0SMatthew Dillon for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 9193db161e0SMatthew Dillon if (shm->shmid != -1) 9203db161e0SMatthew Dillon shm_delete_mapping(vm, shm); 9213db161e0SMatthew Dillon } 9221a276a3fSAlan Cox mtx_unlock(&Giant); 9233db161e0SMatthew Dillon free(base, M_SHM); 9243db161e0SMatthew Dillon } 9253d903220SDoug Rabson } 9263d903220SDoug Rabson 927255108f3SPeter Wemm static void 928255108f3SPeter Wemm shmrealloc(void) 929255108f3SPeter Wemm { 930255108f3SPeter Wemm int i; 931921d05b9SRobert Watson struct shmid_kernel *newsegs; 932255108f3SPeter Wemm 933255108f3SPeter Wemm if (shmalloced >= shminfo.shmmni) 934255108f3SPeter Wemm return; 935255108f3SPeter Wemm 936a163d034SWarner Losh newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 937255108f3SPeter Wemm if (newsegs == NULL) 938255108f3SPeter Wemm return; 939255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 940255108f3SPeter Wemm bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 941255108f3SPeter Wemm for (; i < shminfo.shmmni; i++) { 942921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 943921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 94414cedfc8SRobert Watson #ifdef MAC 94514cedfc8SRobert Watson mac_init_sysv_shm(&shmsegs[i]); 94614cedfc8SRobert Watson #endif 947255108f3SPeter Wemm } 948255108f3SPeter Wemm free(shmsegs, M_SHM); 949255108f3SPeter Wemm shmsegs = newsegs; 950255108f3SPeter Wemm shmalloced = shminfo.shmmni; 951255108f3SPeter Wemm } 952255108f3SPeter Wemm 953255108f3SPeter Wemm static void 95478525ce3SAlfred Perlstein shminit() 9553d903220SDoug Rabson { 9563d903220SDoug Rabson int i; 957255108f3SPeter Wemm 958b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall); 9599d4156aeSAlfred Perlstein for (i = PAGE_SIZE; i > 0; i--) { 960b3a4bc42SMichael Reifenberger shminfo.shmmax = shminfo.shmall * PAGE_SIZE; 9615015c68aSAlfred Perlstein if (shminfo.shmmax >= shminfo.shmall) 9625015c68aSAlfred Perlstein break; 9635015c68aSAlfred Perlstein } 964b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin); 965b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni); 966b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg); 967b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys); 968b3a4bc42SMichael Reifenberger 969255108f3SPeter Wemm shmalloced = shminfo.shmmni; 970a163d034SWarner Losh shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 971255108f3SPeter Wemm if (shmsegs == NULL) 972255108f3SPeter Wemm panic("cannot allocate initial memory for sysvshm"); 973255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) { 974921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 975921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 97614cedfc8SRobert Watson #ifdef MAC 97714cedfc8SRobert Watson mac_init_sysv_shm(&shmsegs[i]); 97814cedfc8SRobert Watson #endif 9793d903220SDoug Rabson } 9803d903220SDoug Rabson shm_last_free = 0; 9813d903220SDoug Rabson shm_nused = 0; 9823d903220SDoug Rabson shm_committed = 0; 98378525ce3SAlfred Perlstein shmexit_hook = &shmexit_myhook; 98478525ce3SAlfred Perlstein shmfork_hook = &shmfork_myhook; 9853d903220SDoug Rabson } 98678525ce3SAlfred Perlstein 98778525ce3SAlfred Perlstein static int 98878525ce3SAlfred Perlstein shmunload() 98978525ce3SAlfred Perlstein { 99014cedfc8SRobert Watson #ifdef MAC 99114cedfc8SRobert Watson int i; 99214cedfc8SRobert Watson #endif 99378525ce3SAlfred Perlstein 99478525ce3SAlfred Perlstein if (shm_nused > 0) 99578525ce3SAlfred Perlstein return (EBUSY); 99678525ce3SAlfred Perlstein 99714cedfc8SRobert Watson #ifdef MAC 99814cedfc8SRobert Watson for (i = 0; i < shmalloced; i++) 99914cedfc8SRobert Watson mac_destroy_sysv_shm(&shmsegs[i]); 100014cedfc8SRobert Watson #endif 100178525ce3SAlfred Perlstein free(shmsegs, M_SHM); 100278525ce3SAlfred Perlstein shmexit_hook = NULL; 100378525ce3SAlfred Perlstein shmfork_hook = NULL; 100478525ce3SAlfred Perlstein return (0); 100578525ce3SAlfred Perlstein } 100678525ce3SAlfred Perlstein 100778525ce3SAlfred Perlstein static int 1008a723c4e1SDima Dorfman sysctl_shmsegs(SYSCTL_HANDLER_ARGS) 1009a723c4e1SDima Dorfman { 1010a723c4e1SDima Dorfman 1011a723c4e1SDima Dorfman return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0]))); 1012a723c4e1SDima Dorfman } 1013a723c4e1SDima Dorfman 1014a723c4e1SDima Dorfman static int 101578525ce3SAlfred Perlstein sysvshm_modload(struct module *module, int cmd, void *arg) 101678525ce3SAlfred Perlstein { 101778525ce3SAlfred Perlstein int error = 0; 101878525ce3SAlfred Perlstein 101978525ce3SAlfred Perlstein switch (cmd) { 102078525ce3SAlfred Perlstein case MOD_LOAD: 102178525ce3SAlfred Perlstein shminit(); 102278525ce3SAlfred Perlstein break; 102378525ce3SAlfred Perlstein case MOD_UNLOAD: 102478525ce3SAlfred Perlstein error = shmunload(); 102578525ce3SAlfred Perlstein break; 102678525ce3SAlfred Perlstein case MOD_SHUTDOWN: 102778525ce3SAlfred Perlstein break; 102878525ce3SAlfred Perlstein default: 102978525ce3SAlfred Perlstein error = EINVAL; 103078525ce3SAlfred Perlstein break; 103178525ce3SAlfred Perlstein } 103278525ce3SAlfred Perlstein return (error); 103378525ce3SAlfred Perlstein } 103478525ce3SAlfred Perlstein 1035faa784b7SDag-Erling Smørgrav static moduledata_t sysvshm_mod = { 1036faa784b7SDag-Erling Smørgrav "sysvshm", 103778525ce3SAlfred Perlstein &sysvshm_modload, 103878525ce3SAlfred Perlstein NULL 103978525ce3SAlfred Perlstein }; 104078525ce3SAlfred Perlstein 104121d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmsys); 104221d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmat); 104321d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmctl); 104421d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmdt); 104521d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmget); 104678525ce3SAlfred Perlstein 1047faa784b7SDag-Erling Smørgrav DECLARE_MODULE(sysvshm, sysvshm_mod, 104878525ce3SAlfred Perlstein SI_SUB_SYSV_SHM, SI_ORDER_FIRST); 1049faa784b7SDag-Erling Smørgrav MODULE_VERSION(sysvshm, 1); 1050