13d903220SDoug Rabson /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 29454b2d8SWarner Losh /*- 33d903220SDoug Rabson * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 43d903220SDoug Rabson * 53d903220SDoug Rabson * Redistribution and use in source and binary forms, with or without 63d903220SDoug Rabson * modification, are permitted provided that the following conditions 73d903220SDoug Rabson * are met: 83d903220SDoug Rabson * 1. Redistributions of source code must retain the above copyright 93d903220SDoug Rabson * notice, this list of conditions and the following disclaimer. 103d903220SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 113d903220SDoug Rabson * notice, this list of conditions and the following disclaimer in the 123d903220SDoug Rabson * documentation and/or other materials provided with the distribution. 133d903220SDoug Rabson * 3. All advertising materials mentioning features or use of this software 143d903220SDoug Rabson * must display the following acknowledgement: 153d903220SDoug Rabson * This product includes software developed by Adam Glass and Charles 163d903220SDoug Rabson * Hannum. 173d903220SDoug Rabson * 4. The names of the authors may not be used to endorse or promote products 183d903220SDoug Rabson * derived from this software without specific prior written permission. 193d903220SDoug Rabson * 203d903220SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 213d903220SDoug Rabson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 223d903220SDoug Rabson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 233d903220SDoug Rabson * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 243d903220SDoug Rabson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 253d903220SDoug Rabson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 263d903220SDoug Rabson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 273d903220SDoug Rabson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 283d903220SDoug Rabson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 293d903220SDoug Rabson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 303d903220SDoug Rabson */ 3114cedfc8SRobert Watson /*- 3214cedfc8SRobert Watson * Copyright (c) 2003-2005 McAfee, Inc. 3314cedfc8SRobert Watson * All rights reserved. 3414cedfc8SRobert Watson * 3514cedfc8SRobert Watson * This software was developed for the FreeBSD Project in part by McAfee 3614cedfc8SRobert Watson * Research, the Security Research Division of McAfee, Inc under DARPA/SPAWAR 3714cedfc8SRobert Watson * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS research 3814cedfc8SRobert Watson * program. 3914cedfc8SRobert Watson * 4014cedfc8SRobert Watson * Redistribution and use in source and binary forms, with or without 4114cedfc8SRobert Watson * modification, are permitted provided that the following conditions 4214cedfc8SRobert Watson * are met: 4314cedfc8SRobert Watson * 1. Redistributions of source code must retain the above copyright 4414cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer. 4514cedfc8SRobert Watson * 2. Redistributions in binary form must reproduce the above copyright 4614cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer in the 4714cedfc8SRobert Watson * documentation and/or other materials provided with the distribution. 4814cedfc8SRobert Watson * 4914cedfc8SRobert Watson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 5014cedfc8SRobert Watson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 5114cedfc8SRobert Watson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 5214cedfc8SRobert Watson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 5314cedfc8SRobert Watson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 5414cedfc8SRobert Watson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 5514cedfc8SRobert Watson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 5614cedfc8SRobert Watson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 5714cedfc8SRobert Watson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 5814cedfc8SRobert Watson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 5914cedfc8SRobert Watson * SUCH DAMAGE. 6014cedfc8SRobert Watson */ 613d903220SDoug Rabson 62677b542eSDavid E. O'Brien #include <sys/cdefs.h> 63677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 64677b542eSDavid E. O'Brien 655591b823SEivind Eklund #include "opt_compat.h" 66255108f3SPeter Wemm #include "opt_sysvipc.h" 67511b67b7SGarrett Wollman 683d903220SDoug Rabson #include <sys/param.h> 69725db531SBruce Evans #include <sys/systm.h> 703d903220SDoug Rabson #include <sys/kernel.h> 71fb919e4dSMark Murray #include <sys/lock.h> 72255108f3SPeter Wemm #include <sys/sysctl.h> 733d903220SDoug Rabson #include <sys/shm.h> 743d903220SDoug Rabson #include <sys/proc.h> 753d903220SDoug Rabson #include <sys/malloc.h> 763d903220SDoug Rabson #include <sys/mman.h> 7777409fe1SPoul-Henning Kamp #include <sys/module.h> 789dceb26bSJohn Baldwin #include <sys/mutex.h> 7968ba7a1dSTim J. Robbins #include <sys/resourcevar.h> 803d903220SDoug Rabson #include <sys/stat.h> 8178525ce3SAlfred Perlstein #include <sys/syscall.h> 82f130dcf2SMartin Blapp #include <sys/syscallsubr.h> 83725db531SBruce Evans #include <sys/sysent.h> 84fb919e4dSMark Murray #include <sys/sysproto.h> 85cb1f0db9SRobert Watson #include <sys/jail.h> 86aed55708SRobert Watson 87aed55708SRobert Watson #include <security/mac/mac_framework.h> 883d903220SDoug Rabson 893d903220SDoug Rabson #include <vm/vm.h> 90efeaf95aSDavid Greenman #include <vm/vm_param.h> 91efeaf95aSDavid Greenman #include <vm/pmap.h> 92a51f7119SJohn Dyson #include <vm/vm_object.h> 933d903220SDoug Rabson #include <vm/vm_map.h> 941c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 95ae9b8c3aSJohn Dyson #include <vm/vm_pager.h> 963d903220SDoug Rabson 97a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 9855166637SPoul-Henning Kamp 99fbb273bcSPaul Saab #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) 100725db531SBruce Evans struct oshmctl_args; 1014d77a549SAlfred Perlstein static int oshmctl(struct thread *td, struct oshmctl_args *uap); 102fbb273bcSPaul Saab #endif 103255108f3SPeter Wemm 1044d77a549SAlfred Perlstein static int shmget_allocate_segment(struct thread *td, 1054d77a549SAlfred Perlstein struct shmget_args *uap, int mode); 1064d77a549SAlfred Perlstein static int shmget_existing(struct thread *td, struct shmget_args *uap, 1074d77a549SAlfred Perlstein int mode, int segnum); 108725db531SBruce Evans 109fbb273bcSPaul Saab #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) 110725db531SBruce Evans /* XXX casting to (sy_call_t *) is bogus, as usual. */ 111303b270bSEivind Eklund static sy_call_t *shmcalls[] = { 112725db531SBruce Evans (sy_call_t *)shmat, (sy_call_t *)oshmctl, 113725db531SBruce Evans (sy_call_t *)shmdt, (sy_call_t *)shmget, 114725db531SBruce Evans (sy_call_t *)shmctl 115725db531SBruce Evans }; 116fbb273bcSPaul Saab #endif 1173d903220SDoug Rabson 1183d903220SDoug Rabson #define SHMSEG_FREE 0x0200 1193d903220SDoug Rabson #define SHMSEG_REMOVED 0x0400 1203d903220SDoug Rabson #define SHMSEG_ALLOCATED 0x0800 1213d903220SDoug Rabson #define SHMSEG_WANTED 0x1000 1223d903220SDoug Rabson 12365067cc8SKonstantin Belousov static int shm_last_free, shm_nused, shmalloced; 12445329b60SKonstantin Belousov vm_size_t shm_committed; 125921d05b9SRobert Watson static struct shmid_kernel *shmsegs; 1263d903220SDoug Rabson 1273d903220SDoug Rabson struct shmmap_state { 1283d903220SDoug Rabson vm_offset_t va; 1293d903220SDoug Rabson int shmid; 1303d903220SDoug Rabson }; 1313d903220SDoug Rabson 132921d05b9SRobert Watson static void shm_deallocate_segment(struct shmid_kernel *); 1334d77a549SAlfred Perlstein static int shm_find_segment_by_key(key_t); 134921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmid(int); 135921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmidx(int); 1363db161e0SMatthew Dillon static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *); 1374d77a549SAlfred Perlstein static void shmrealloc(void); 1384d77a549SAlfred Perlstein static void shminit(void); 1394d77a549SAlfred Perlstein static int sysvshm_modload(struct module *, int, void *); 1404d77a549SAlfred Perlstein static int shmunload(void); 1413db161e0SMatthew Dillon static void shmexit_myhook(struct vmspace *vm); 1424d77a549SAlfred Perlstein static void shmfork_myhook(struct proc *p1, struct proc *p2); 1434d77a549SAlfred Perlstein static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS); 144255108f3SPeter Wemm 145255108f3SPeter Wemm /* 146028f979dSDima Dorfman * Tuneable values. 147255108f3SPeter Wemm */ 148255108f3SPeter Wemm #ifndef SHMMAXPGS 149028f979dSDima Dorfman #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */ 150255108f3SPeter Wemm #endif 151255108f3SPeter Wemm #ifndef SHMMAX 152255108f3SPeter Wemm #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 153255108f3SPeter Wemm #endif 154255108f3SPeter Wemm #ifndef SHMMIN 155255108f3SPeter Wemm #define SHMMIN 1 156255108f3SPeter Wemm #endif 157255108f3SPeter Wemm #ifndef SHMMNI 1581766b2e5SMatthew Dillon #define SHMMNI 192 159255108f3SPeter Wemm #endif 160255108f3SPeter Wemm #ifndef SHMSEG 1611766b2e5SMatthew Dillon #define SHMSEG 128 162255108f3SPeter Wemm #endif 163255108f3SPeter Wemm #ifndef SHMALL 164255108f3SPeter Wemm #define SHMALL (SHMMAXPGS) 165255108f3SPeter Wemm #endif 166255108f3SPeter Wemm 167255108f3SPeter Wemm struct shminfo shminfo = { 168255108f3SPeter Wemm SHMMAX, 169255108f3SPeter Wemm SHMMIN, 170255108f3SPeter Wemm SHMMNI, 171255108f3SPeter Wemm SHMSEG, 172255108f3SPeter Wemm SHMALL 173255108f3SPeter Wemm }; 174255108f3SPeter Wemm 1758b03c8edSMatthew Dillon static int shm_use_phys; 1762332251cSMax Khon static int shm_allow_removed; 1778b03c8edSMatthew Dillon 1789baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, 17984f85aedSChristian S.J. Peron "Maximum shared memory segment size"); 1809baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, 18184f85aedSChristian S.J. Peron "Minimum shared memory segment size"); 1829baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, 18384f85aedSChristian S.J. Peron "Number of shared memory identifiers"); 1849baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, 18584f85aedSChristian S.J. Peron "Number of segments per process"); 1869baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, 18784f85aedSChristian S.J. Peron "Maximum number of pages available for shared memory"); 188028f979dSDima Dorfman SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, 18984f85aedSChristian S.J. Peron &shm_use_phys, 0, "Enable/Disable locking of shared memory pages in core"); 1902332251cSMax Khon SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW, 19184f85aedSChristian S.J. Peron &shm_allow_removed, 0, 19284f85aedSChristian S.J. Peron "Enable/Disable attachment to attached segments marked for removal"); 193a723c4e1SDima Dorfman SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD, 19484f85aedSChristian S.J. Peron NULL, 0, sysctl_shmsegs, "", 19584f85aedSChristian S.J. Peron "Current number of shared memory segments allocated"); 1963d903220SDoug Rabson 1973d903220SDoug Rabson static int 1983d903220SDoug Rabson shm_find_segment_by_key(key) 1993d903220SDoug Rabson key_t key; 2003d903220SDoug Rabson { 2013d903220SDoug Rabson int i; 2023d903220SDoug Rabson 203255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 204921d05b9SRobert Watson if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) && 205921d05b9SRobert Watson shmsegs[i].u.shm_perm.key == key) 206b618bb96SAlfred Perlstein return (i); 207b618bb96SAlfred Perlstein return (-1); 2083d903220SDoug Rabson } 2093d903220SDoug Rabson 210921d05b9SRobert Watson static struct shmid_kernel * 2112332251cSMax Khon shm_find_segment_by_shmid(int shmid) 2123d903220SDoug Rabson { 2133d903220SDoug Rabson int segnum; 214921d05b9SRobert Watson struct shmid_kernel *shmseg; 2153d903220SDoug Rabson 2163d903220SDoug Rabson segnum = IPCID_TO_IX(shmid); 217255108f3SPeter Wemm if (segnum < 0 || segnum >= shmalloced) 218b618bb96SAlfred Perlstein return (NULL); 2193d903220SDoug Rabson shmseg = &shmsegs[segnum]; 220921d05b9SRobert Watson if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 2212332251cSMax Khon (!shm_allow_removed && 222921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0) || 223921d05b9SRobert Watson shmseg->u.shm_perm.seq != IPCID_TO_SEQ(shmid)) 224b618bb96SAlfred Perlstein return (NULL); 225b618bb96SAlfred Perlstein return (shmseg); 2263d903220SDoug Rabson } 2273d903220SDoug Rabson 228921d05b9SRobert Watson static struct shmid_kernel * 2292332251cSMax Khon shm_find_segment_by_shmidx(int segnum) 230491dec93SMichael Reifenberger { 231921d05b9SRobert Watson struct shmid_kernel *shmseg; 232491dec93SMichael Reifenberger 233491dec93SMichael Reifenberger if (segnum < 0 || segnum >= shmalloced) 234b618bb96SAlfred Perlstein return (NULL); 235491dec93SMichael Reifenberger shmseg = &shmsegs[segnum]; 236921d05b9SRobert Watson if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 2372332251cSMax Khon (!shm_allow_removed && 238921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0)) 239b618bb96SAlfred Perlstein return (NULL); 240b618bb96SAlfred Perlstein return (shmseg); 241491dec93SMichael Reifenberger } 242491dec93SMichael Reifenberger 2433d903220SDoug Rabson static void 2443d903220SDoug Rabson shm_deallocate_segment(shmseg) 245921d05b9SRobert Watson struct shmid_kernel *shmseg; 2463d903220SDoug Rabson { 24745329b60SKonstantin Belousov vm_size_t size; 2483d903220SDoug Rabson 2490cddd8f0SMatthew Dillon GIANT_REQUIRED; 2500cddd8f0SMatthew Dillon 251921d05b9SRobert Watson vm_object_deallocate(shmseg->u.shm_internal); 252921d05b9SRobert Watson shmseg->u.shm_internal = NULL; 25365067cc8SKonstantin Belousov size = round_page(shmseg->shm_bsegsz); 2543d903220SDoug Rabson shm_committed -= btoc(size); 2553d903220SDoug Rabson shm_nused--; 256921d05b9SRobert Watson shmseg->u.shm_perm.mode = SHMSEG_FREE; 25714cedfc8SRobert Watson #ifdef MAC 25830d239bcSRobert Watson mac_sysvshm_cleanup(shmseg); 25914cedfc8SRobert Watson #endif 2603d903220SDoug Rabson } 2613d903220SDoug Rabson 2623d903220SDoug Rabson static int 2633db161e0SMatthew Dillon shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 2643d903220SDoug Rabson { 265921d05b9SRobert Watson struct shmid_kernel *shmseg; 2663d903220SDoug Rabson int segnum, result; 26745329b60SKonstantin Belousov vm_size_t size; 2683d903220SDoug Rabson 2690cddd8f0SMatthew Dillon GIANT_REQUIRED; 270028f979dSDima Dorfman 2713d903220SDoug Rabson segnum = IPCID_TO_IX(shmmap_s->shmid); 2723d903220SDoug Rabson shmseg = &shmsegs[segnum]; 27365067cc8SKonstantin Belousov size = round_page(shmseg->shm_bsegsz); 2743db161e0SMatthew Dillon result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 2753d903220SDoug Rabson if (result != KERN_SUCCESS) 276b618bb96SAlfred Perlstein return (EINVAL); 2773d903220SDoug Rabson shmmap_s->shmid = -1; 278921d05b9SRobert Watson shmseg->u.shm_dtime = time_second; 279921d05b9SRobert Watson if ((--shmseg->u.shm_nattch <= 0) && 280921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) { 2813d903220SDoug Rabson shm_deallocate_segment(shmseg); 2823d903220SDoug Rabson shm_last_free = segnum; 2833d903220SDoug Rabson } 284b618bb96SAlfred Perlstein return (0); 2853d903220SDoug Rabson } 2863d903220SDoug Rabson 287b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 2883d903220SDoug Rabson struct shmdt_args { 289e1d7d0bbSAlfred Perlstein const void *shmaddr; 2903d903220SDoug Rabson }; 291b5d5c0c9SPeter Wemm #endif 2923d903220SDoug Rabson int 293b40ce416SJulian Elischer shmdt(td, uap) 294b40ce416SJulian Elischer struct thread *td; 2953d903220SDoug Rabson struct shmdt_args *uap; 2963d903220SDoug Rabson { 297b40ce416SJulian Elischer struct proc *p = td->td_proc; 2983d903220SDoug Rabson struct shmmap_state *shmmap_s; 29914cedfc8SRobert Watson #ifdef MAC 30014cedfc8SRobert Watson struct shmid_kernel *shmsegptr; 30114cedfc8SRobert Watson #endif 3023d903220SDoug Rabson int i; 303b6a4b4f9SMatthew Dillon int error = 0; 3043d903220SDoug Rabson 3050304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 306c6f55f33SJohn Baldwin return (ENOSYS); 307b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 3088209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 309b6a4b4f9SMatthew Dillon if (shmmap_s == NULL) { 310b6a4b4f9SMatthew Dillon error = EINVAL; 311b6a4b4f9SMatthew Dillon goto done2; 312b6a4b4f9SMatthew Dillon } 313b6a4b4f9SMatthew Dillon for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 3143d903220SDoug Rabson if (shmmap_s->shmid != -1 && 315b6a4b4f9SMatthew Dillon shmmap_s->va == (vm_offset_t)uap->shmaddr) { 3163d903220SDoug Rabson break; 317b6a4b4f9SMatthew Dillon } 318b6a4b4f9SMatthew Dillon } 319b6a4b4f9SMatthew Dillon if (i == shminfo.shmseg) { 320b6a4b4f9SMatthew Dillon error = EINVAL; 321b6a4b4f9SMatthew Dillon goto done2; 322b6a4b4f9SMatthew Dillon } 32314cedfc8SRobert Watson #ifdef MAC 32414cedfc8SRobert Watson shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)]; 32530d239bcSRobert Watson error = mac_sysvshm_check_shmdt(td->td_ucred, shmsegptr); 326f50c4fd8SRobert Watson if (error != 0) 32714cedfc8SRobert Watson goto done2; 32814cedfc8SRobert Watson #endif 3293db161e0SMatthew Dillon error = shm_delete_mapping(p->p_vmspace, shmmap_s); 330b6a4b4f9SMatthew Dillon done2: 331b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 332b6a4b4f9SMatthew Dillon return (error); 3333d903220SDoug Rabson } 3343d903220SDoug Rabson 335b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 3363d903220SDoug Rabson struct shmat_args { 3373d903220SDoug Rabson int shmid; 338e1d7d0bbSAlfred Perlstein const void *shmaddr; 3393d903220SDoug Rabson int shmflg; 3403d903220SDoug Rabson }; 341b5d5c0c9SPeter Wemm #endif 3423d903220SDoug Rabson int 3432332251cSMax Khon kern_shmat(td, shmid, shmaddr, shmflg) 344b40ce416SJulian Elischer struct thread *td; 345f130dcf2SMartin Blapp int shmid; 346f130dcf2SMartin Blapp const void *shmaddr; 347f130dcf2SMartin Blapp int shmflg; 3483d903220SDoug Rabson { 349b40ce416SJulian Elischer struct proc *p = td->td_proc; 350b6a4b4f9SMatthew Dillon int i, flags; 351921d05b9SRobert Watson struct shmid_kernel *shmseg; 3523d903220SDoug Rabson struct shmmap_state *shmmap_s = NULL; 3533d903220SDoug Rabson vm_offset_t attach_va; 3543d903220SDoug Rabson vm_prot_t prot; 3553d903220SDoug Rabson vm_size_t size; 356a51f7119SJohn Dyson int rv; 357b6a4b4f9SMatthew Dillon int error = 0; 3583d903220SDoug Rabson 3590304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 360c6f55f33SJohn Baldwin return (ENOSYS); 361b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 3628209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 3633d903220SDoug Rabson if (shmmap_s == NULL) { 36445329b60SKonstantin Belousov shmmap_s = malloc(shminfo.shmseg * sizeof(struct shmmap_state), 36545329b60SKonstantin Belousov M_SHM, M_WAITOK); 3663d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) 3673d903220SDoug Rabson shmmap_s[i].shmid = -1; 3682cc593fdSAlfred Perlstein p->p_vmspace->vm_shm = shmmap_s; 3693d903220SDoug Rabson } 3702332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 371b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 372b6a4b4f9SMatthew Dillon error = EINVAL; 373b6a4b4f9SMatthew Dillon goto done2; 374b6a4b4f9SMatthew Dillon } 375921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, 376f130dcf2SMartin Blapp (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 377797f2d22SPoul-Henning Kamp if (error) 378b6a4b4f9SMatthew Dillon goto done2; 37914cedfc8SRobert Watson #ifdef MAC 38030d239bcSRobert Watson error = mac_sysvshm_check_shmat(td->td_ucred, shmseg, shmflg); 381f50c4fd8SRobert Watson if (error != 0) 38214cedfc8SRobert Watson goto done2; 38314cedfc8SRobert Watson #endif 3843d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) { 3853d903220SDoug Rabson if (shmmap_s->shmid == -1) 3863d903220SDoug Rabson break; 3873d903220SDoug Rabson shmmap_s++; 3883d903220SDoug Rabson } 389b6a4b4f9SMatthew Dillon if (i >= shminfo.shmseg) { 390b6a4b4f9SMatthew Dillon error = EMFILE; 391b6a4b4f9SMatthew Dillon goto done2; 392b6a4b4f9SMatthew Dillon } 39365067cc8SKonstantin Belousov size = round_page(shmseg->shm_bsegsz); 3943d903220SDoug Rabson prot = VM_PROT_READ; 395f130dcf2SMartin Blapp if ((shmflg & SHM_RDONLY) == 0) 3963d903220SDoug Rabson prot |= VM_PROT_WRITE; 3973d903220SDoug Rabson flags = MAP_ANON | MAP_SHARED; 398f130dcf2SMartin Blapp if (shmaddr) { 3993d903220SDoug Rabson flags |= MAP_FIXED; 400f130dcf2SMartin Blapp if (shmflg & SHM_RND) { 401f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1); 402f130dcf2SMartin Blapp } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) { 403f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr; 404b6a4b4f9SMatthew Dillon } else { 405b6a4b4f9SMatthew Dillon error = EINVAL; 406b6a4b4f9SMatthew Dillon goto done2; 407b6a4b4f9SMatthew Dillon } 4083d903220SDoug Rabson } else { 409028f979dSDima Dorfman /* 410028f979dSDima Dorfman * This is just a hint to vm_map_find() about where to 411028f979dSDima Dorfman * put it. 412028f979dSDima Dorfman */ 41368ba7a1dSTim J. Robbins PROC_LOCK(p); 41468ba7a1dSTim J. Robbins attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr + 41568ba7a1dSTim J. Robbins lim_max(p, RLIMIT_DATA)); 41668ba7a1dSTim J. Robbins PROC_UNLOCK(p); 4173d903220SDoug Rabson } 418a51f7119SJohn Dyson 419921d05b9SRobert Watson vm_object_reference(shmseg->u.shm_internal); 420921d05b9SRobert Watson rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->u.shm_internal, 42165067cc8SKonstantin Belousov 0, &attach_va, size, (flags & MAP_FIXED) ? VMFS_NO_SPACE : 42265067cc8SKonstantin Belousov VMFS_ANY_SPACE, prot, prot, 0); 423a51f7119SJohn Dyson if (rv != KERN_SUCCESS) { 424921d05b9SRobert Watson vm_object_deallocate(shmseg->u.shm_internal); 425b6a4b4f9SMatthew Dillon error = ENOMEM; 426b6a4b4f9SMatthew Dillon goto done2; 427a51f7119SJohn Dyson } 4280463028cSJohn Dyson vm_map_inherit(&p->p_vmspace->vm_map, 4290463028cSJohn Dyson attach_va, attach_va + size, VM_INHERIT_SHARE); 4300463028cSJohn Dyson 4313d903220SDoug Rabson shmmap_s->va = attach_va; 432f130dcf2SMartin Blapp shmmap_s->shmid = shmid; 433921d05b9SRobert Watson shmseg->u.shm_lpid = p->p_pid; 434921d05b9SRobert Watson shmseg->u.shm_atime = time_second; 435921d05b9SRobert Watson shmseg->u.shm_nattch++; 436b40ce416SJulian Elischer td->td_retval[0] = attach_va; 437b6a4b4f9SMatthew Dillon done2: 438b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 439b6a4b4f9SMatthew Dillon return (error); 4403d903220SDoug Rabson } 4413d903220SDoug Rabson 442f130dcf2SMartin Blapp int 443f130dcf2SMartin Blapp shmat(td, uap) 444f130dcf2SMartin Blapp struct thread *td; 445f130dcf2SMartin Blapp struct shmat_args *uap; 446f130dcf2SMartin Blapp { 4472332251cSMax Khon return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg); 448f130dcf2SMartin Blapp } 449f130dcf2SMartin Blapp 450fbb273bcSPaul Saab #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) 4518bec0921SDoug Rabson struct oshmid_ds { 4528bec0921SDoug Rabson struct ipc_perm shm_perm; /* operation perms */ 4538bec0921SDoug Rabson int shm_segsz; /* size of segment (bytes) */ 4548b149b51SJohn Baldwin u_short shm_cpid; /* pid, creator */ 4558b149b51SJohn Baldwin u_short shm_lpid; /* pid, last operation */ 4568bec0921SDoug Rabson short shm_nattch; /* no. of current attaches */ 4578bec0921SDoug Rabson time_t shm_atime; /* last attach time */ 4588bec0921SDoug Rabson time_t shm_dtime; /* last detach time */ 4598bec0921SDoug Rabson time_t shm_ctime; /* last change time */ 4608bec0921SDoug Rabson void *shm_handle; /* internal handle for shm segment */ 4618bec0921SDoug Rabson }; 4628bec0921SDoug Rabson 4638bec0921SDoug Rabson struct oshmctl_args { 4648bec0921SDoug Rabson int shmid; 4658bec0921SDoug Rabson int cmd; 4668bec0921SDoug Rabson struct oshmid_ds *ubuf; 4678bec0921SDoug Rabson }; 46887b6de2bSPoul-Henning Kamp static int 469b40ce416SJulian Elischer oshmctl(td, uap) 470b40ce416SJulian Elischer struct thread *td; 4718bec0921SDoug Rabson struct oshmctl_args *uap; 4728bec0921SDoug Rabson { 4738bec0921SDoug Rabson #ifdef COMPAT_43 474b6a4b4f9SMatthew Dillon int error = 0; 475921d05b9SRobert Watson struct shmid_kernel *shmseg; 4768bec0921SDoug Rabson struct oshmid_ds outbuf; 4778bec0921SDoug Rabson 4780304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 479c6f55f33SJohn Baldwin return (ENOSYS); 480b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 4812332251cSMax Khon shmseg = shm_find_segment_by_shmid(uap->shmid); 482b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 483b6a4b4f9SMatthew Dillon error = EINVAL; 484b6a4b4f9SMatthew Dillon goto done2; 485b6a4b4f9SMatthew Dillon } 4868bec0921SDoug Rabson switch (uap->cmd) { 4878bec0921SDoug Rabson case IPC_STAT: 488921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 489797f2d22SPoul-Henning Kamp if (error) 490b6a4b4f9SMatthew Dillon goto done2; 49114cedfc8SRobert Watson #ifdef MAC 49230d239bcSRobert Watson error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, uap->cmd); 493f50c4fd8SRobert Watson if (error != 0) 49414cedfc8SRobert Watson goto done2; 49514cedfc8SRobert Watson #endif 496921d05b9SRobert Watson outbuf.shm_perm = shmseg->u.shm_perm; 497921d05b9SRobert Watson outbuf.shm_segsz = shmseg->u.shm_segsz; 498921d05b9SRobert Watson outbuf.shm_cpid = shmseg->u.shm_cpid; 499921d05b9SRobert Watson outbuf.shm_lpid = shmseg->u.shm_lpid; 500921d05b9SRobert Watson outbuf.shm_nattch = shmseg->u.shm_nattch; 501921d05b9SRobert Watson outbuf.shm_atime = shmseg->u.shm_atime; 502921d05b9SRobert Watson outbuf.shm_dtime = shmseg->u.shm_dtime; 503921d05b9SRobert Watson outbuf.shm_ctime = shmseg->u.shm_ctime; 504921d05b9SRobert Watson outbuf.shm_handle = shmseg->u.shm_internal; 5052cc593fdSAlfred Perlstein error = copyout(&outbuf, uap->ubuf, sizeof(outbuf)); 506797f2d22SPoul-Henning Kamp if (error) 507b6a4b4f9SMatthew Dillon goto done2; 5088bec0921SDoug Rabson break; 5098bec0921SDoug Rabson default: 51000fbcda8SAlexander Kabaev error = shmctl(td, (struct shmctl_args *)uap); 511b6a4b4f9SMatthew Dillon break; 5128bec0921SDoug Rabson } 513b6a4b4f9SMatthew Dillon done2: 514b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 515b6a4b4f9SMatthew Dillon return (error); 5168bec0921SDoug Rabson #else 517b618bb96SAlfred Perlstein return (EINVAL); 5188bec0921SDoug Rabson #endif 5198bec0921SDoug Rabson } 520fbb273bcSPaul Saab #endif 5218bec0921SDoug Rabson 5223d903220SDoug Rabson int 5232332251cSMax Khon kern_shmctl(td, shmid, cmd, buf, bufsz) 524b40ce416SJulian Elischer struct thread *td; 525f130dcf2SMartin Blapp int shmid; 526f130dcf2SMartin Blapp int cmd; 527f130dcf2SMartin Blapp void *buf; 528f130dcf2SMartin Blapp size_t *bufsz; 5293d903220SDoug Rabson { 530b6a4b4f9SMatthew Dillon int error = 0; 531921d05b9SRobert Watson struct shmid_kernel *shmseg; 5323d903220SDoug Rabson 5330304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 534c6f55f33SJohn Baldwin return (ENOSYS); 535f130dcf2SMartin Blapp 536b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 537f130dcf2SMartin Blapp switch (cmd) { 5384f18813fSChristian S.J. Peron /* 5394f18813fSChristian S.J. Peron * It is possible that kern_shmctl is being called from the Linux ABI 5404f18813fSChristian S.J. Peron * layer, in which case, we will need to implement IPC_INFO. It should 5414f18813fSChristian S.J. Peron * be noted that other shmctl calls will be funneled through here for 5424f18813fSChristian S.J. Peron * Linix binaries as well. 5434f18813fSChristian S.J. Peron * 5444f18813fSChristian S.J. Peron * NB: The Linux ABI layer will convert this data to structure(s) more 5454f18813fSChristian S.J. Peron * consistent with the Linux ABI. 5464f18813fSChristian S.J. Peron */ 547491dec93SMichael Reifenberger case IPC_INFO: 548f130dcf2SMartin Blapp memcpy(buf, &shminfo, sizeof(shminfo)); 549f130dcf2SMartin Blapp if (bufsz) 550f130dcf2SMartin Blapp *bufsz = sizeof(shminfo); 551491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 552491dec93SMichael Reifenberger goto done2; 553491dec93SMichael Reifenberger case SHM_INFO: { 554491dec93SMichael Reifenberger struct shm_info shm_info; 555491dec93SMichael Reifenberger shm_info.used_ids = shm_nused; 556491dec93SMichael Reifenberger shm_info.shm_rss = 0; /*XXX where to get from ? */ 557491dec93SMichael Reifenberger shm_info.shm_tot = 0; /*XXX where to get from ? */ 558491dec93SMichael Reifenberger shm_info.shm_swp = 0; /*XXX where to get from ? */ 559491dec93SMichael Reifenberger shm_info.swap_attempts = 0; /*XXX where to get from ? */ 560491dec93SMichael Reifenberger shm_info.swap_successes = 0; /*XXX where to get from ? */ 561f130dcf2SMartin Blapp memcpy(buf, &shm_info, sizeof(shm_info)); 562f130dcf2SMartin Blapp if (bufsz) 563f130dcf2SMartin Blapp *bufsz = sizeof(shm_info); 564491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 565491dec93SMichael Reifenberger goto done2; 566491dec93SMichael Reifenberger } 567491dec93SMichael Reifenberger } 568f130dcf2SMartin Blapp if (cmd == SHM_STAT) 5692332251cSMax Khon shmseg = shm_find_segment_by_shmidx(shmid); 570491dec93SMichael Reifenberger else 5712332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 572b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 573b6a4b4f9SMatthew Dillon error = EINVAL; 574b6a4b4f9SMatthew Dillon goto done2; 575b6a4b4f9SMatthew Dillon } 57614cedfc8SRobert Watson #ifdef MAC 57730d239bcSRobert Watson error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, cmd); 578f50c4fd8SRobert Watson if (error != 0) 57914cedfc8SRobert Watson goto done2; 58014cedfc8SRobert Watson #endif 581f130dcf2SMartin Blapp switch (cmd) { 582491dec93SMichael Reifenberger case SHM_STAT: 5833d903220SDoug Rabson case IPC_STAT: 584921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 585797f2d22SPoul-Henning Kamp if (error) 586b6a4b4f9SMatthew Dillon goto done2; 587921d05b9SRobert Watson memcpy(buf, &shmseg->u, sizeof(struct shmid_ds)); 588f130dcf2SMartin Blapp if (bufsz) 589f130dcf2SMartin Blapp *bufsz = sizeof(struct shmid_ds); 590f130dcf2SMartin Blapp if (cmd == SHM_STAT) 591921d05b9SRobert Watson td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->u.shm_perm); 5923d903220SDoug Rabson break; 593f130dcf2SMartin Blapp case IPC_SET: { 594f130dcf2SMartin Blapp struct shmid_ds *shmid; 595f130dcf2SMartin Blapp 596f130dcf2SMartin Blapp shmid = (struct shmid_ds *)buf; 597921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 598797f2d22SPoul-Henning Kamp if (error) 599b6a4b4f9SMatthew Dillon goto done2; 600921d05b9SRobert Watson shmseg->u.shm_perm.uid = shmid->shm_perm.uid; 601921d05b9SRobert Watson shmseg->u.shm_perm.gid = shmid->shm_perm.gid; 602921d05b9SRobert Watson shmseg->u.shm_perm.mode = 603921d05b9SRobert Watson (shmseg->u.shm_perm.mode & ~ACCESSPERMS) | 604f130dcf2SMartin Blapp (shmid->shm_perm.mode & ACCESSPERMS); 605921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 6063d903220SDoug Rabson break; 607f130dcf2SMartin Blapp } 6083d903220SDoug Rabson case IPC_RMID: 609921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 610797f2d22SPoul-Henning Kamp if (error) 611b6a4b4f9SMatthew Dillon goto done2; 612921d05b9SRobert Watson shmseg->u.shm_perm.key = IPC_PRIVATE; 613921d05b9SRobert Watson shmseg->u.shm_perm.mode |= SHMSEG_REMOVED; 614921d05b9SRobert Watson if (shmseg->u.shm_nattch <= 0) { 6153d903220SDoug Rabson shm_deallocate_segment(shmseg); 616f130dcf2SMartin Blapp shm_last_free = IPCID_TO_IX(shmid); 6173d903220SDoug Rabson } 6183d903220SDoug Rabson break; 6193d903220SDoug Rabson #if 0 6203d903220SDoug Rabson case SHM_LOCK: 6213d903220SDoug Rabson case SHM_UNLOCK: 6223d903220SDoug Rabson #endif 6233d903220SDoug Rabson default: 624b6a4b4f9SMatthew Dillon error = EINVAL; 625b6a4b4f9SMatthew Dillon break; 6263d903220SDoug Rabson } 627b6a4b4f9SMatthew Dillon done2: 628b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 629b6a4b4f9SMatthew Dillon return (error); 6303d903220SDoug Rabson } 6313d903220SDoug Rabson 63271361470SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 63371361470SJohn Baldwin struct shmctl_args { 63471361470SJohn Baldwin int shmid; 63571361470SJohn Baldwin int cmd; 63671361470SJohn Baldwin struct shmid_ds *buf; 63771361470SJohn Baldwin }; 63871361470SJohn Baldwin #endif 639f130dcf2SMartin Blapp int 640f130dcf2SMartin Blapp shmctl(td, uap) 641f130dcf2SMartin Blapp struct thread *td; 642f130dcf2SMartin Blapp struct shmctl_args *uap; 643f130dcf2SMartin Blapp { 644f130dcf2SMartin Blapp int error = 0; 645f130dcf2SMartin Blapp struct shmid_ds buf; 646f130dcf2SMartin Blapp size_t bufsz; 647f130dcf2SMartin Blapp 6484f18813fSChristian S.J. Peron /* 6494f18813fSChristian S.J. Peron * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support 6504f18813fSChristian S.J. Peron * Linux binaries. If we see the call come through the FreeBSD ABI, 6514f18813fSChristian S.J. Peron * return an error back to the user since we do not to support this. 6524f18813fSChristian S.J. Peron */ 6534f18813fSChristian S.J. Peron if (uap->cmd == IPC_INFO || uap->cmd == SHM_INFO || 6544f18813fSChristian S.J. Peron uap->cmd == SHM_STAT) 6554f18813fSChristian S.J. Peron return (EINVAL); 6564f18813fSChristian S.J. Peron 657f130dcf2SMartin Blapp /* IPC_SET needs to copyin the buffer before calling kern_shmctl */ 658f130dcf2SMartin Blapp if (uap->cmd == IPC_SET) { 659f130dcf2SMartin Blapp if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds)))) 660f130dcf2SMartin Blapp goto done; 661f130dcf2SMartin Blapp } 662f130dcf2SMartin Blapp 6632332251cSMax Khon error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz); 664f130dcf2SMartin Blapp if (error) 665f130dcf2SMartin Blapp goto done; 666f130dcf2SMartin Blapp 667f130dcf2SMartin Blapp /* Cases in which we need to copyout */ 668f130dcf2SMartin Blapp switch (uap->cmd) { 669f130dcf2SMartin Blapp case IPC_STAT: 670f130dcf2SMartin Blapp error = copyout(&buf, uap->buf, bufsz); 671f130dcf2SMartin Blapp break; 672f130dcf2SMartin Blapp } 673f130dcf2SMartin Blapp 674f130dcf2SMartin Blapp done: 675f130dcf2SMartin Blapp if (error) { 676f130dcf2SMartin Blapp /* Invalidate the return value */ 677f130dcf2SMartin Blapp td->td_retval[0] = -1; 678f130dcf2SMartin Blapp } 679f130dcf2SMartin Blapp return (error); 680f130dcf2SMartin Blapp } 681f130dcf2SMartin Blapp 682f130dcf2SMartin Blapp 6833d903220SDoug Rabson static int 684b40ce416SJulian Elischer shmget_existing(td, uap, mode, segnum) 685b40ce416SJulian Elischer struct thread *td; 6863d903220SDoug Rabson struct shmget_args *uap; 6873d903220SDoug Rabson int mode; 6883d903220SDoug Rabson int segnum; 6893d903220SDoug Rabson { 690921d05b9SRobert Watson struct shmid_kernel *shmseg; 6913d903220SDoug Rabson int error; 6923d903220SDoug Rabson 6933d903220SDoug Rabson shmseg = &shmsegs[segnum]; 694921d05b9SRobert Watson if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) { 6953d903220SDoug Rabson /* 6963d903220SDoug Rabson * This segment is in the process of being allocated. Wait 6973d903220SDoug Rabson * until it's done, and look the key up again (in case the 6983d903220SDoug Rabson * allocation failed or it was freed). 6993d903220SDoug Rabson */ 700921d05b9SRobert Watson shmseg->u.shm_perm.mode |= SHMSEG_WANTED; 7012cc593fdSAlfred Perlstein error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0); 702797f2d22SPoul-Henning Kamp if (error) 703b618bb96SAlfred Perlstein return (error); 704b618bb96SAlfred Perlstein return (EAGAIN); 7053d903220SDoug Rabson } 706dc92aa57SAlan Cox if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 707b618bb96SAlfred Perlstein return (EEXIST); 70814cedfc8SRobert Watson #ifdef MAC 70930d239bcSRobert Watson error = mac_sysvshm_check_shmget(td->td_ucred, shmseg, uap->shmflg); 710f50c4fd8SRobert Watson if (error != 0) 7117723d5edSRobert Watson return (error); 71214cedfc8SRobert Watson #endif 71345329b60SKonstantin Belousov if (uap->size != 0 && uap->size > shmseg->shm_bsegsz) 714b618bb96SAlfred Perlstein return (EINVAL); 715921d05b9SRobert Watson td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 716b618bb96SAlfred Perlstein return (0); 7173d903220SDoug Rabson } 7183d903220SDoug Rabson 7193d903220SDoug Rabson static int 720b40ce416SJulian Elischer shmget_allocate_segment(td, uap, mode) 721b40ce416SJulian Elischer struct thread *td; 7223d903220SDoug Rabson struct shmget_args *uap; 7233d903220SDoug Rabson int mode; 7243d903220SDoug Rabson { 72565067cc8SKonstantin Belousov int i, segnum, shmid; 72665067cc8SKonstantin Belousov size_t size; 727a854ed98SJohn Baldwin struct ucred *cred = td->td_ucred; 728921d05b9SRobert Watson struct shmid_kernel *shmseg; 7290049f8b2SAlan Cox vm_object_t shm_object; 7303d903220SDoug Rabson 7310cddd8f0SMatthew Dillon GIANT_REQUIRED; 7320cddd8f0SMatthew Dillon 7333d903220SDoug Rabson if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 734b618bb96SAlfred Perlstein return (EINVAL); 735028f979dSDima Dorfman if (shm_nused >= shminfo.shmmni) /* Any shmids left? */ 736b618bb96SAlfred Perlstein return (ENOSPC); 7379e609ddeSJoerg Wunsch size = round_page(uap->size); 7383d903220SDoug Rabson if (shm_committed + btoc(size) > shminfo.shmall) 739b618bb96SAlfred Perlstein return (ENOMEM); 7403d903220SDoug Rabson if (shm_last_free < 0) { 741028f979dSDima Dorfman shmrealloc(); /* Maybe expand the shmsegs[] array. */ 742255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 743921d05b9SRobert Watson if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE) 7443d903220SDoug Rabson break; 745255108f3SPeter Wemm if (i == shmalloced) 746b618bb96SAlfred Perlstein return (ENOSPC); 7473d903220SDoug Rabson segnum = i; 7483d903220SDoug Rabson } else { 7493d903220SDoug Rabson segnum = shm_last_free; 7503d903220SDoug Rabson shm_last_free = -1; 7513d903220SDoug Rabson } 7523d903220SDoug Rabson shmseg = &shmsegs[segnum]; 7533d903220SDoug Rabson /* 7543d903220SDoug Rabson * In case we sleep in malloc(), mark the segment present but deleted 7553d903220SDoug Rabson * so that noone else tries to create the same key. 7563d903220SDoug Rabson */ 757921d05b9SRobert Watson shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 758921d05b9SRobert Watson shmseg->u.shm_perm.key = uap->key; 759921d05b9SRobert Watson shmseg->u.shm_perm.seq = (shmseg->u.shm_perm.seq + 1) & 0x7fff; 760921d05b9SRobert Watson shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 761a51f7119SJohn Dyson 762ae9b8c3aSJohn Dyson /* 763ae9b8c3aSJohn Dyson * We make sure that we have allocated a pager before we need 764ae9b8c3aSJohn Dyson * to. 765ae9b8c3aSJohn Dyson */ 7663364c323SKonstantin Belousov shm_object = vm_pager_allocate(shm_use_phys ? OBJT_PHYS : OBJT_SWAP, 7673364c323SKonstantin Belousov 0, size, VM_PROT_DEFAULT, 0, cred); 7683364c323SKonstantin Belousov if (shm_object == NULL) 7693364c323SKonstantin Belousov return (ENOMEM); 7700049f8b2SAlan Cox VM_OBJECT_LOCK(shm_object); 7710049f8b2SAlan Cox vm_object_clear_flag(shm_object, OBJ_ONEMAPPING); 7720049f8b2SAlan Cox vm_object_set_flag(shm_object, OBJ_NOSPLIT); 7730049f8b2SAlan Cox VM_OBJECT_UNLOCK(shm_object); 774cbd8ec09SJohn Dyson 775921d05b9SRobert Watson shmseg->u.shm_internal = shm_object; 776921d05b9SRobert Watson shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid; 777921d05b9SRobert Watson shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = cred->cr_gid; 778921d05b9SRobert Watson shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) | 7793d903220SDoug Rabson (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 780921d05b9SRobert Watson shmseg->u.shm_segsz = uap->size; 78165067cc8SKonstantin Belousov shmseg->shm_bsegsz = uap->size; 782921d05b9SRobert Watson shmseg->u.shm_cpid = td->td_proc->p_pid; 783921d05b9SRobert Watson shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0; 784921d05b9SRobert Watson shmseg->u.shm_atime = shmseg->u.shm_dtime = 0; 78514cedfc8SRobert Watson #ifdef MAC 78630d239bcSRobert Watson mac_sysvshm_create(cred, shmseg); 78714cedfc8SRobert Watson #endif 788921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 7893d903220SDoug Rabson shm_committed += btoc(size); 7903d903220SDoug Rabson shm_nused++; 791921d05b9SRobert Watson if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) { 7923d903220SDoug Rabson /* 7933d903220SDoug Rabson * Somebody else wanted this key while we were asleep. Wake 7943d903220SDoug Rabson * them up now. 7953d903220SDoug Rabson */ 796921d05b9SRobert Watson shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED; 7972cc593fdSAlfred Perlstein wakeup(shmseg); 7983d903220SDoug Rabson } 799b40ce416SJulian Elischer td->td_retval[0] = shmid; 800b618bb96SAlfred Perlstein return (0); 8013d903220SDoug Rabson } 8023d903220SDoug Rabson 80371361470SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 80471361470SJohn Baldwin struct shmget_args { 80571361470SJohn Baldwin key_t key; 80671361470SJohn Baldwin size_t size; 80771361470SJohn Baldwin int shmflg; 80871361470SJohn Baldwin }; 80971361470SJohn Baldwin #endif 8103d903220SDoug Rabson int 811b40ce416SJulian Elischer shmget(td, uap) 812b40ce416SJulian Elischer struct thread *td; 8133d903220SDoug Rabson struct shmget_args *uap; 8143d903220SDoug Rabson { 815b6a4b4f9SMatthew Dillon int segnum, mode; 816b6a4b4f9SMatthew Dillon int error; 8173d903220SDoug Rabson 8180304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 819c6f55f33SJohn Baldwin return (ENOSYS); 820b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 8213d903220SDoug Rabson mode = uap->shmflg & ACCESSPERMS; 8223d903220SDoug Rabson if (uap->key != IPC_PRIVATE) { 8233d903220SDoug Rabson again: 8243d903220SDoug Rabson segnum = shm_find_segment_by_key(uap->key); 8253d903220SDoug Rabson if (segnum >= 0) { 826b40ce416SJulian Elischer error = shmget_existing(td, uap, mode, segnum); 8273d903220SDoug Rabson if (error == EAGAIN) 8283d903220SDoug Rabson goto again; 829b6a4b4f9SMatthew Dillon goto done2; 8303d903220SDoug Rabson } 831b6a4b4f9SMatthew Dillon if ((uap->shmflg & IPC_CREAT) == 0) { 832b6a4b4f9SMatthew Dillon error = ENOENT; 833b6a4b4f9SMatthew Dillon goto done2; 8343d903220SDoug Rabson } 835b6a4b4f9SMatthew Dillon } 836b40ce416SJulian Elischer error = shmget_allocate_segment(td, uap, mode); 837b6a4b4f9SMatthew Dillon done2: 838b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 839b6a4b4f9SMatthew Dillon return (error); 8403d903220SDoug Rabson } 8413d903220SDoug Rabson 8423d903220SDoug Rabson int 843b40ce416SJulian Elischer shmsys(td, uap) 844b40ce416SJulian Elischer struct thread *td; 845725db531SBruce Evans /* XXX actually varargs. */ 846725db531SBruce Evans struct shmsys_args /* { 84701b9dc96SJacques Vidrine int which; 848725db531SBruce Evans int a2; 849725db531SBruce Evans int a3; 850725db531SBruce Evans int a4; 851725db531SBruce Evans } */ *uap; 8523d903220SDoug Rabson { 853fbb273bcSPaul Saab #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) 854b6a4b4f9SMatthew Dillon int error; 8553d903220SDoug Rabson 8560304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 857c6f55f33SJohn Baldwin return (ENOSYS); 85801b9dc96SJacques Vidrine if (uap->which < 0 || 85901b9dc96SJacques Vidrine uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 860c6f55f33SJohn Baldwin return (EINVAL); 861b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 862b40ce416SJulian Elischer error = (*shmcalls[uap->which])(td, &uap->a2); 863b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 864b6a4b4f9SMatthew Dillon return (error); 865fbb273bcSPaul Saab #else 866fbb273bcSPaul Saab return (nosys(td, NULL)); 867fbb273bcSPaul Saab #endif 8683d903220SDoug Rabson } 8693d903220SDoug Rabson 87078525ce3SAlfred Perlstein static void 87178525ce3SAlfred Perlstein shmfork_myhook(p1, p2) 8723d903220SDoug Rabson struct proc *p1, *p2; 8733d903220SDoug Rabson { 8743d903220SDoug Rabson struct shmmap_state *shmmap_s; 8753d903220SDoug Rabson size_t size; 8763d903220SDoug Rabson int i; 8773d903220SDoug Rabson 87894ddc707SAlan Cox mtx_lock(&Giant); 8793d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 880a163d034SWarner Losh shmmap_s = malloc(size, M_SHM, M_WAITOK); 8812cc593fdSAlfred Perlstein bcopy(p1->p_vmspace->vm_shm, shmmap_s, size); 8822cc593fdSAlfred Perlstein p2->p_vmspace->vm_shm = shmmap_s; 8833d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 8843d903220SDoug Rabson if (shmmap_s->shmid != -1) 885921d05b9SRobert Watson shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++; 88694ddc707SAlan Cox mtx_unlock(&Giant); 8873d903220SDoug Rabson } 8883d903220SDoug Rabson 88978525ce3SAlfred Perlstein static void 8903db161e0SMatthew Dillon shmexit_myhook(struct vmspace *vm) 8913d903220SDoug Rabson { 8923db161e0SMatthew Dillon struct shmmap_state *base, *shm; 8933d903220SDoug Rabson int i; 8943d903220SDoug Rabson 8953db161e0SMatthew Dillon if ((base = vm->vm_shm) != NULL) { 8963db161e0SMatthew Dillon vm->vm_shm = NULL; 8971a276a3fSAlan Cox mtx_lock(&Giant); 8983db161e0SMatthew Dillon for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 8993db161e0SMatthew Dillon if (shm->shmid != -1) 9003db161e0SMatthew Dillon shm_delete_mapping(vm, shm); 9013db161e0SMatthew Dillon } 9021a276a3fSAlan Cox mtx_unlock(&Giant); 9033db161e0SMatthew Dillon free(base, M_SHM); 9043db161e0SMatthew Dillon } 9053d903220SDoug Rabson } 9063d903220SDoug Rabson 907255108f3SPeter Wemm static void 908255108f3SPeter Wemm shmrealloc(void) 909255108f3SPeter Wemm { 910255108f3SPeter Wemm int i; 911921d05b9SRobert Watson struct shmid_kernel *newsegs; 912255108f3SPeter Wemm 913255108f3SPeter Wemm if (shmalloced >= shminfo.shmmni) 914255108f3SPeter Wemm return; 915255108f3SPeter Wemm 916a163d034SWarner Losh newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 917255108f3SPeter Wemm if (newsegs == NULL) 918255108f3SPeter Wemm return; 919255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 920255108f3SPeter Wemm bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 921255108f3SPeter Wemm for (; i < shminfo.shmmni; i++) { 922921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 923921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 92414cedfc8SRobert Watson #ifdef MAC 92530d239bcSRobert Watson mac_sysvshm_init(&shmsegs[i]); 92614cedfc8SRobert Watson #endif 927255108f3SPeter Wemm } 928255108f3SPeter Wemm free(shmsegs, M_SHM); 929255108f3SPeter Wemm shmsegs = newsegs; 930255108f3SPeter Wemm shmalloced = shminfo.shmmni; 931255108f3SPeter Wemm } 932255108f3SPeter Wemm 933255108f3SPeter Wemm static void 93478525ce3SAlfred Perlstein shminit() 9353d903220SDoug Rabson { 9363d903220SDoug Rabson int i; 937255108f3SPeter Wemm 9389baea4b4SChristian S.J. Peron TUNABLE_ULONG_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall); 9399d4156aeSAlfred Perlstein for (i = PAGE_SIZE; i > 0; i--) { 940a4c24c66SJohn Baldwin shminfo.shmmax = shminfo.shmall * i; 9415015c68aSAlfred Perlstein if (shminfo.shmmax >= shminfo.shmall) 9425015c68aSAlfred Perlstein break; 9435015c68aSAlfred Perlstein } 9449baea4b4SChristian S.J. Peron TUNABLE_ULONG_FETCH("kern.ipc.shmmin", &shminfo.shmmin); 9459baea4b4SChristian S.J. Peron TUNABLE_ULONG_FETCH("kern.ipc.shmmni", &shminfo.shmmni); 9469baea4b4SChristian S.J. Peron TUNABLE_ULONG_FETCH("kern.ipc.shmseg", &shminfo.shmseg); 947b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys); 948b3a4bc42SMichael Reifenberger 949255108f3SPeter Wemm shmalloced = shminfo.shmmni; 950a163d034SWarner Losh shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 951255108f3SPeter Wemm if (shmsegs == NULL) 952255108f3SPeter Wemm panic("cannot allocate initial memory for sysvshm"); 953255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) { 954921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 955921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 95614cedfc8SRobert Watson #ifdef MAC 95730d239bcSRobert Watson mac_sysvshm_init(&shmsegs[i]); 95814cedfc8SRobert Watson #endif 9593d903220SDoug Rabson } 9603d903220SDoug Rabson shm_last_free = 0; 9613d903220SDoug Rabson shm_nused = 0; 9623d903220SDoug Rabson shm_committed = 0; 96378525ce3SAlfred Perlstein shmexit_hook = &shmexit_myhook; 96478525ce3SAlfred Perlstein shmfork_hook = &shmfork_myhook; 9653d903220SDoug Rabson } 96678525ce3SAlfred Perlstein 96778525ce3SAlfred Perlstein static int 96878525ce3SAlfred Perlstein shmunload() 96978525ce3SAlfred Perlstein { 97014cedfc8SRobert Watson #ifdef MAC 97114cedfc8SRobert Watson int i; 97214cedfc8SRobert Watson #endif 97378525ce3SAlfred Perlstein 97478525ce3SAlfred Perlstein if (shm_nused > 0) 97578525ce3SAlfred Perlstein return (EBUSY); 97678525ce3SAlfred Perlstein 97714cedfc8SRobert Watson #ifdef MAC 97814cedfc8SRobert Watson for (i = 0; i < shmalloced; i++) 97930d239bcSRobert Watson mac_sysvshm_destroy(&shmsegs[i]); 98014cedfc8SRobert Watson #endif 98178525ce3SAlfred Perlstein free(shmsegs, M_SHM); 98278525ce3SAlfred Perlstein shmexit_hook = NULL; 98378525ce3SAlfred Perlstein shmfork_hook = NULL; 98478525ce3SAlfred Perlstein return (0); 98578525ce3SAlfred Perlstein } 98678525ce3SAlfred Perlstein 98778525ce3SAlfred Perlstein static int 988a723c4e1SDima Dorfman sysctl_shmsegs(SYSCTL_HANDLER_ARGS) 989a723c4e1SDima Dorfman { 990a723c4e1SDima Dorfman 991a723c4e1SDima Dorfman return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0]))); 992a723c4e1SDima Dorfman } 993a723c4e1SDima Dorfman 994a723c4e1SDima Dorfman static int 99578525ce3SAlfred Perlstein sysvshm_modload(struct module *module, int cmd, void *arg) 99678525ce3SAlfred Perlstein { 99778525ce3SAlfred Perlstein int error = 0; 99878525ce3SAlfred Perlstein 99978525ce3SAlfred Perlstein switch (cmd) { 100078525ce3SAlfred Perlstein case MOD_LOAD: 100178525ce3SAlfred Perlstein shminit(); 100278525ce3SAlfred Perlstein break; 100378525ce3SAlfred Perlstein case MOD_UNLOAD: 100478525ce3SAlfred Perlstein error = shmunload(); 100578525ce3SAlfred Perlstein break; 100678525ce3SAlfred Perlstein case MOD_SHUTDOWN: 100778525ce3SAlfred Perlstein break; 100878525ce3SAlfred Perlstein default: 100978525ce3SAlfred Perlstein error = EINVAL; 101078525ce3SAlfred Perlstein break; 101178525ce3SAlfred Perlstein } 101278525ce3SAlfred Perlstein return (error); 101378525ce3SAlfred Perlstein } 101478525ce3SAlfred Perlstein 1015faa784b7SDag-Erling Smørgrav static moduledata_t sysvshm_mod = { 1016faa784b7SDag-Erling Smørgrav "sysvshm", 101778525ce3SAlfred Perlstein &sysvshm_modload, 101878525ce3SAlfred Perlstein NULL 101978525ce3SAlfred Perlstein }; 102078525ce3SAlfred Perlstein 102121d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmsys); 102221d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmat); 102321d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmctl); 102421d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmdt); 102521d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmget); 102678525ce3SAlfred Perlstein 102771361470SJohn Baldwin DECLARE_MODULE(sysvshm, sysvshm_mod, SI_SUB_SYSV_SHM, SI_ORDER_FIRST); 1028faa784b7SDag-Erling Smørgrav MODULE_VERSION(sysvshm, 1); 1029