13d903220SDoug Rabson /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 29454b2d8SWarner Losh /*- 33d903220SDoug Rabson * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 43d903220SDoug Rabson * 53d903220SDoug Rabson * Redistribution and use in source and binary forms, with or without 63d903220SDoug Rabson * modification, are permitted provided that the following conditions 73d903220SDoug Rabson * are met: 83d903220SDoug Rabson * 1. Redistributions of source code must retain the above copyright 93d903220SDoug Rabson * notice, this list of conditions and the following disclaimer. 103d903220SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 113d903220SDoug Rabson * notice, this list of conditions and the following disclaimer in the 123d903220SDoug Rabson * documentation and/or other materials provided with the distribution. 133d903220SDoug Rabson * 3. All advertising materials mentioning features or use of this software 143d903220SDoug Rabson * must display the following acknowledgement: 153d903220SDoug Rabson * This product includes software developed by Adam Glass and Charles 163d903220SDoug Rabson * Hannum. 173d903220SDoug Rabson * 4. The names of the authors may not be used to endorse or promote products 183d903220SDoug Rabson * derived from this software without specific prior written permission. 193d903220SDoug Rabson * 203d903220SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 213d903220SDoug Rabson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 223d903220SDoug Rabson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 233d903220SDoug Rabson * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 243d903220SDoug Rabson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 253d903220SDoug Rabson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 263d903220SDoug Rabson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 273d903220SDoug Rabson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 283d903220SDoug Rabson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 293d903220SDoug Rabson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 303d903220SDoug Rabson */ 3114cedfc8SRobert Watson /*- 3214cedfc8SRobert Watson * Copyright (c) 2003-2005 McAfee, Inc. 3314cedfc8SRobert Watson * All rights reserved. 3414cedfc8SRobert Watson * 3514cedfc8SRobert Watson * This software was developed for the FreeBSD Project in part by McAfee 3614cedfc8SRobert Watson * Research, the Security Research Division of McAfee, Inc under DARPA/SPAWAR 3714cedfc8SRobert Watson * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS research 3814cedfc8SRobert Watson * program. 3914cedfc8SRobert Watson * 4014cedfc8SRobert Watson * Redistribution and use in source and binary forms, with or without 4114cedfc8SRobert Watson * modification, are permitted provided that the following conditions 4214cedfc8SRobert Watson * are met: 4314cedfc8SRobert Watson * 1. Redistributions of source code must retain the above copyright 4414cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer. 4514cedfc8SRobert Watson * 2. Redistributions in binary form must reproduce the above copyright 4614cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer in the 4714cedfc8SRobert Watson * documentation and/or other materials provided with the distribution. 4814cedfc8SRobert Watson * 4914cedfc8SRobert Watson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 5014cedfc8SRobert Watson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 5114cedfc8SRobert Watson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 5214cedfc8SRobert Watson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 5314cedfc8SRobert Watson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 5414cedfc8SRobert Watson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 5514cedfc8SRobert Watson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 5614cedfc8SRobert Watson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 5714cedfc8SRobert Watson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 5814cedfc8SRobert Watson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 5914cedfc8SRobert Watson * SUCH DAMAGE. 6014cedfc8SRobert Watson */ 613d903220SDoug Rabson 62677b542eSDavid E. O'Brien #include <sys/cdefs.h> 63677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 64677b542eSDavid E. O'Brien 655591b823SEivind Eklund #include "opt_compat.h" 66255108f3SPeter Wemm #include "opt_sysvipc.h" 67511b67b7SGarrett Wollman 683d903220SDoug Rabson #include <sys/param.h> 69725db531SBruce Evans #include <sys/systm.h> 703d903220SDoug Rabson #include <sys/kernel.h> 71b648d480SJohn Baldwin #include <sys/limits.h> 72fb919e4dSMark Murray #include <sys/lock.h> 73255108f3SPeter Wemm #include <sys/sysctl.h> 743d903220SDoug Rabson #include <sys/shm.h> 753d903220SDoug Rabson #include <sys/proc.h> 763d903220SDoug Rabson #include <sys/malloc.h> 773d903220SDoug Rabson #include <sys/mman.h> 7877409fe1SPoul-Henning Kamp #include <sys/module.h> 799dceb26bSJohn Baldwin #include <sys/mutex.h> 803bcf7445SEdward Tomasz Napierala #include <sys/racct.h> 8168ba7a1dSTim J. Robbins #include <sys/resourcevar.h> 8289f6b863SAttilio Rao #include <sys/rwlock.h> 833d903220SDoug Rabson #include <sys/stat.h> 8478525ce3SAlfred Perlstein #include <sys/syscall.h> 85f130dcf2SMartin Blapp #include <sys/syscallsubr.h> 86725db531SBruce Evans #include <sys/sysent.h> 87fb919e4dSMark Murray #include <sys/sysproto.h> 88cb1f0db9SRobert Watson #include <sys/jail.h> 89aed55708SRobert Watson 90aed55708SRobert Watson #include <security/mac/mac_framework.h> 913d903220SDoug Rabson 923d903220SDoug Rabson #include <vm/vm.h> 93efeaf95aSDavid Greenman #include <vm/vm_param.h> 94efeaf95aSDavid Greenman #include <vm/pmap.h> 95a51f7119SJohn Dyson #include <vm/vm_object.h> 963d903220SDoug Rabson #include <vm/vm_map.h> 971c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 98ae9b8c3aSJohn Dyson #include <vm/vm_pager.h> 993d903220SDoug Rabson 100de5b1952SAlexander Leidinger FEATURE(sysv_shm, "System V shared memory segments support"); 101de5b1952SAlexander Leidinger 102a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 10355166637SPoul-Henning Kamp 1044d77a549SAlfred Perlstein static int shmget_allocate_segment(struct thread *td, 1054d77a549SAlfred Perlstein struct shmget_args *uap, int mode); 1064d77a549SAlfred Perlstein static int shmget_existing(struct thread *td, struct shmget_args *uap, 1074d77a549SAlfred Perlstein int mode, int segnum); 108725db531SBruce Evans 1093d903220SDoug Rabson #define SHMSEG_FREE 0x0200 1103d903220SDoug Rabson #define SHMSEG_REMOVED 0x0400 1113d903220SDoug Rabson #define SHMSEG_ALLOCATED 0x0800 1123d903220SDoug Rabson #define SHMSEG_WANTED 0x1000 1133d903220SDoug Rabson 11465067cc8SKonstantin Belousov static int shm_last_free, shm_nused, shmalloced; 11545329b60SKonstantin Belousov vm_size_t shm_committed; 116921d05b9SRobert Watson static struct shmid_kernel *shmsegs; 1173d903220SDoug Rabson 1183d903220SDoug Rabson struct shmmap_state { 1193d903220SDoug Rabson vm_offset_t va; 1203d903220SDoug Rabson int shmid; 1213d903220SDoug Rabson }; 1223d903220SDoug Rabson 123921d05b9SRobert Watson static void shm_deallocate_segment(struct shmid_kernel *); 1244d77a549SAlfred Perlstein static int shm_find_segment_by_key(key_t); 125921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmid(int); 126921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmidx(int); 1273db161e0SMatthew Dillon static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *); 1284d77a549SAlfred Perlstein static void shmrealloc(void); 12975d633cbSKonstantin Belousov static int shminit(void); 1304d77a549SAlfred Perlstein static int sysvshm_modload(struct module *, int, void *); 1314d77a549SAlfred Perlstein static int shmunload(void); 1323db161e0SMatthew Dillon static void shmexit_myhook(struct vmspace *vm); 1334d77a549SAlfred Perlstein static void shmfork_myhook(struct proc *p1, struct proc *p2); 1344d77a549SAlfred Perlstein static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS); 135255108f3SPeter Wemm 136255108f3SPeter Wemm /* 137028f979dSDima Dorfman * Tuneable values. 138255108f3SPeter Wemm */ 139255108f3SPeter Wemm #ifndef SHMMAXPGS 140c1e34abfSIvan Voras #define SHMMAXPGS 131072 /* Note: sysv shared memory is swap backed. */ 141255108f3SPeter Wemm #endif 142255108f3SPeter Wemm #ifndef SHMMAX 143255108f3SPeter Wemm #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 144255108f3SPeter Wemm #endif 145255108f3SPeter Wemm #ifndef SHMMIN 146255108f3SPeter Wemm #define SHMMIN 1 147255108f3SPeter Wemm #endif 148255108f3SPeter Wemm #ifndef SHMMNI 1491766b2e5SMatthew Dillon #define SHMMNI 192 150255108f3SPeter Wemm #endif 151255108f3SPeter Wemm #ifndef SHMSEG 1521766b2e5SMatthew Dillon #define SHMSEG 128 153255108f3SPeter Wemm #endif 154255108f3SPeter Wemm #ifndef SHMALL 155255108f3SPeter Wemm #define SHMALL (SHMMAXPGS) 156255108f3SPeter Wemm #endif 157255108f3SPeter Wemm 158255108f3SPeter Wemm struct shminfo shminfo = { 159*af3b2549SHans Petter Selasky .shmmax = SHMMAX, 160*af3b2549SHans Petter Selasky .shmmin = SHMMIN, 161*af3b2549SHans Petter Selasky .shmmni = SHMMNI, 162*af3b2549SHans Petter Selasky .shmseg = SHMSEG, 163*af3b2549SHans Petter Selasky .shmall = SHMALL 164255108f3SPeter Wemm }; 165255108f3SPeter Wemm 1668b03c8edSMatthew Dillon static int shm_use_phys; 1672332251cSMax Khon static int shm_allow_removed; 1688b03c8edSMatthew Dillon 169*af3b2549SHans Petter Selasky SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RWTUN, &shminfo.shmmax, 0, 17084f85aedSChristian S.J. Peron "Maximum shared memory segment size"); 171*af3b2549SHans Petter Selasky SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RWTUN, &shminfo.shmmin, 0, 17284f85aedSChristian S.J. Peron "Minimum shared memory segment size"); 1739baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, 17484f85aedSChristian S.J. Peron "Number of shared memory identifiers"); 1759baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, 17684f85aedSChristian S.J. Peron "Number of segments per process"); 177*af3b2549SHans Petter Selasky SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RWTUN, &shminfo.shmall, 0, 17884f85aedSChristian S.J. Peron "Maximum number of pages available for shared memory"); 179*af3b2549SHans Petter Selasky SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RWTUN, 18084f85aedSChristian S.J. Peron &shm_use_phys, 0, "Enable/Disable locking of shared memory pages in core"); 181*af3b2549SHans Petter Selasky SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RWTUN, 18284f85aedSChristian S.J. Peron &shm_allow_removed, 0, 18384f85aedSChristian S.J. Peron "Enable/Disable attachment to attached segments marked for removal"); 1842fee06f0SMatthew D Fleming SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLTYPE_OPAQUE | CTLFLAG_RD, 18584f85aedSChristian S.J. Peron NULL, 0, sysctl_shmsegs, "", 18684f85aedSChristian S.J. Peron "Current number of shared memory segments allocated"); 1873d903220SDoug Rabson 1883d903220SDoug Rabson static int 1893d903220SDoug Rabson shm_find_segment_by_key(key) 1903d903220SDoug Rabson key_t key; 1913d903220SDoug Rabson { 1923d903220SDoug Rabson int i; 1933d903220SDoug Rabson 194255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 195921d05b9SRobert Watson if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) && 196921d05b9SRobert Watson shmsegs[i].u.shm_perm.key == key) 197b618bb96SAlfred Perlstein return (i); 198b618bb96SAlfred Perlstein return (-1); 1993d903220SDoug Rabson } 2003d903220SDoug Rabson 201921d05b9SRobert Watson static struct shmid_kernel * 2022332251cSMax Khon shm_find_segment_by_shmid(int shmid) 2033d903220SDoug Rabson { 2043d903220SDoug Rabson int segnum; 205921d05b9SRobert Watson struct shmid_kernel *shmseg; 2063d903220SDoug Rabson 2073d903220SDoug Rabson segnum = IPCID_TO_IX(shmid); 208255108f3SPeter Wemm if (segnum < 0 || segnum >= shmalloced) 209b618bb96SAlfred Perlstein return (NULL); 2103d903220SDoug Rabson shmseg = &shmsegs[segnum]; 211921d05b9SRobert Watson if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 2122332251cSMax Khon (!shm_allow_removed && 213921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0) || 214921d05b9SRobert Watson shmseg->u.shm_perm.seq != IPCID_TO_SEQ(shmid)) 215b618bb96SAlfred Perlstein return (NULL); 216b618bb96SAlfred Perlstein return (shmseg); 2173d903220SDoug Rabson } 2183d903220SDoug Rabson 219921d05b9SRobert Watson static struct shmid_kernel * 2202332251cSMax Khon shm_find_segment_by_shmidx(int segnum) 221491dec93SMichael Reifenberger { 222921d05b9SRobert Watson struct shmid_kernel *shmseg; 223491dec93SMichael Reifenberger 224491dec93SMichael Reifenberger if (segnum < 0 || segnum >= shmalloced) 225b618bb96SAlfred Perlstein return (NULL); 226491dec93SMichael Reifenberger shmseg = &shmsegs[segnum]; 227921d05b9SRobert Watson if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 2282332251cSMax Khon (!shm_allow_removed && 229921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0)) 230b618bb96SAlfred Perlstein return (NULL); 231b618bb96SAlfred Perlstein return (shmseg); 232491dec93SMichael Reifenberger } 233491dec93SMichael Reifenberger 2343d903220SDoug Rabson static void 2353d903220SDoug Rabson shm_deallocate_segment(shmseg) 236921d05b9SRobert Watson struct shmid_kernel *shmseg; 2373d903220SDoug Rabson { 23845329b60SKonstantin Belousov vm_size_t size; 2393d903220SDoug Rabson 2400cddd8f0SMatthew Dillon GIANT_REQUIRED; 2410cddd8f0SMatthew Dillon 242b648d480SJohn Baldwin vm_object_deallocate(shmseg->object); 243b648d480SJohn Baldwin shmseg->object = NULL; 244b648d480SJohn Baldwin size = round_page(shmseg->u.shm_segsz); 2453d903220SDoug Rabson shm_committed -= btoc(size); 2463d903220SDoug Rabson shm_nused--; 247921d05b9SRobert Watson shmseg->u.shm_perm.mode = SHMSEG_FREE; 24814cedfc8SRobert Watson #ifdef MAC 24930d239bcSRobert Watson mac_sysvshm_cleanup(shmseg); 25014cedfc8SRobert Watson #endif 2513bcf7445SEdward Tomasz Napierala racct_sub_cred(shmseg->cred, RACCT_NSHM, 1); 2523bcf7445SEdward Tomasz Napierala racct_sub_cred(shmseg->cred, RACCT_SHMSIZE, size); 2538caddd81SEdward Tomasz Napierala crfree(shmseg->cred); 2548caddd81SEdward Tomasz Napierala shmseg->cred = NULL; 2553d903220SDoug Rabson } 2563d903220SDoug Rabson 2573d903220SDoug Rabson static int 2583db161e0SMatthew Dillon shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 2593d903220SDoug Rabson { 260921d05b9SRobert Watson struct shmid_kernel *shmseg; 2613d903220SDoug Rabson int segnum, result; 26245329b60SKonstantin Belousov vm_size_t size; 2633d903220SDoug Rabson 2640cddd8f0SMatthew Dillon GIANT_REQUIRED; 265028f979dSDima Dorfman 2663d903220SDoug Rabson segnum = IPCID_TO_IX(shmmap_s->shmid); 2673d903220SDoug Rabson shmseg = &shmsegs[segnum]; 268b648d480SJohn Baldwin size = round_page(shmseg->u.shm_segsz); 2693db161e0SMatthew Dillon result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 2703d903220SDoug Rabson if (result != KERN_SUCCESS) 271b618bb96SAlfred Perlstein return (EINVAL); 2723d903220SDoug Rabson shmmap_s->shmid = -1; 273921d05b9SRobert Watson shmseg->u.shm_dtime = time_second; 274921d05b9SRobert Watson if ((--shmseg->u.shm_nattch <= 0) && 275921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) { 2763d903220SDoug Rabson shm_deallocate_segment(shmseg); 2773d903220SDoug Rabson shm_last_free = segnum; 2783d903220SDoug Rabson } 279b618bb96SAlfred Perlstein return (0); 2803d903220SDoug Rabson } 2813d903220SDoug Rabson 282b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 2833d903220SDoug Rabson struct shmdt_args { 284e1d7d0bbSAlfred Perlstein const void *shmaddr; 2853d903220SDoug Rabson }; 286b5d5c0c9SPeter Wemm #endif 2873d903220SDoug Rabson int 2888451d0ddSKip Macy sys_shmdt(td, uap) 289b40ce416SJulian Elischer struct thread *td; 2903d903220SDoug Rabson struct shmdt_args *uap; 2913d903220SDoug Rabson { 292b40ce416SJulian Elischer struct proc *p = td->td_proc; 2933d903220SDoug Rabson struct shmmap_state *shmmap_s; 29414cedfc8SRobert Watson #ifdef MAC 29514cedfc8SRobert Watson struct shmid_kernel *shmsegptr; 29614cedfc8SRobert Watson #endif 2973d903220SDoug Rabson int i; 298b6a4b4f9SMatthew Dillon int error = 0; 2993d903220SDoug Rabson 3000304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 301c6f55f33SJohn Baldwin return (ENOSYS); 302b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 3038209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 304b6a4b4f9SMatthew Dillon if (shmmap_s == NULL) { 305b6a4b4f9SMatthew Dillon error = EINVAL; 306b6a4b4f9SMatthew Dillon goto done2; 307b6a4b4f9SMatthew Dillon } 308b6a4b4f9SMatthew Dillon for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 3093d903220SDoug Rabson if (shmmap_s->shmid != -1 && 310b6a4b4f9SMatthew Dillon shmmap_s->va == (vm_offset_t)uap->shmaddr) { 3113d903220SDoug Rabson break; 312b6a4b4f9SMatthew Dillon } 313b6a4b4f9SMatthew Dillon } 314b6a4b4f9SMatthew Dillon if (i == shminfo.shmseg) { 315b6a4b4f9SMatthew Dillon error = EINVAL; 316b6a4b4f9SMatthew Dillon goto done2; 317b6a4b4f9SMatthew Dillon } 31814cedfc8SRobert Watson #ifdef MAC 31914cedfc8SRobert Watson shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)]; 32030d239bcSRobert Watson error = mac_sysvshm_check_shmdt(td->td_ucred, shmsegptr); 321f50c4fd8SRobert Watson if (error != 0) 32214cedfc8SRobert Watson goto done2; 32314cedfc8SRobert Watson #endif 3243db161e0SMatthew Dillon error = shm_delete_mapping(p->p_vmspace, shmmap_s); 325b6a4b4f9SMatthew Dillon done2: 326b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 327b6a4b4f9SMatthew Dillon return (error); 3283d903220SDoug Rabson } 3293d903220SDoug Rabson 330b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 3313d903220SDoug Rabson struct shmat_args { 3323d903220SDoug Rabson int shmid; 333e1d7d0bbSAlfred Perlstein const void *shmaddr; 3343d903220SDoug Rabson int shmflg; 3353d903220SDoug Rabson }; 336b5d5c0c9SPeter Wemm #endif 3373d903220SDoug Rabson int 3382332251cSMax Khon kern_shmat(td, shmid, shmaddr, shmflg) 339b40ce416SJulian Elischer struct thread *td; 340f130dcf2SMartin Blapp int shmid; 341f130dcf2SMartin Blapp const void *shmaddr; 342f130dcf2SMartin Blapp int shmflg; 3433d903220SDoug Rabson { 344b40ce416SJulian Elischer struct proc *p = td->td_proc; 34563b9ae94SAlan Cox int i; 346921d05b9SRobert Watson struct shmid_kernel *shmseg; 3473d903220SDoug Rabson struct shmmap_state *shmmap_s = NULL; 3483d903220SDoug Rabson vm_offset_t attach_va; 3493d903220SDoug Rabson vm_prot_t prot; 3503d903220SDoug Rabson vm_size_t size; 351a51f7119SJohn Dyson int rv; 352b6a4b4f9SMatthew Dillon int error = 0; 3533d903220SDoug Rabson 3540304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 355c6f55f33SJohn Baldwin return (ENOSYS); 356b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 3578209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 3583d903220SDoug Rabson if (shmmap_s == NULL) { 35945329b60SKonstantin Belousov shmmap_s = malloc(shminfo.shmseg * sizeof(struct shmmap_state), 36045329b60SKonstantin Belousov M_SHM, M_WAITOK); 3613d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) 3623d903220SDoug Rabson shmmap_s[i].shmid = -1; 3632cc593fdSAlfred Perlstein p->p_vmspace->vm_shm = shmmap_s; 3643d903220SDoug Rabson } 3652332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 366b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 367b6a4b4f9SMatthew Dillon error = EINVAL; 368b6a4b4f9SMatthew Dillon goto done2; 369b6a4b4f9SMatthew Dillon } 370921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, 371f130dcf2SMartin Blapp (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 372797f2d22SPoul-Henning Kamp if (error) 373b6a4b4f9SMatthew Dillon goto done2; 37414cedfc8SRobert Watson #ifdef MAC 37530d239bcSRobert Watson error = mac_sysvshm_check_shmat(td->td_ucred, shmseg, shmflg); 376f50c4fd8SRobert Watson if (error != 0) 37714cedfc8SRobert Watson goto done2; 37814cedfc8SRobert Watson #endif 3793d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) { 3803d903220SDoug Rabson if (shmmap_s->shmid == -1) 3813d903220SDoug Rabson break; 3823d903220SDoug Rabson shmmap_s++; 3833d903220SDoug Rabson } 384b6a4b4f9SMatthew Dillon if (i >= shminfo.shmseg) { 385b6a4b4f9SMatthew Dillon error = EMFILE; 386b6a4b4f9SMatthew Dillon goto done2; 387b6a4b4f9SMatthew Dillon } 388b648d480SJohn Baldwin size = round_page(shmseg->u.shm_segsz); 3893d903220SDoug Rabson prot = VM_PROT_READ; 390f130dcf2SMartin Blapp if ((shmflg & SHM_RDONLY) == 0) 3913d903220SDoug Rabson prot |= VM_PROT_WRITE; 392f130dcf2SMartin Blapp if (shmaddr) { 393f130dcf2SMartin Blapp if (shmflg & SHM_RND) { 394f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1); 395f130dcf2SMartin Blapp } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) { 396f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr; 397b6a4b4f9SMatthew Dillon } else { 398b6a4b4f9SMatthew Dillon error = EINVAL; 399b6a4b4f9SMatthew Dillon goto done2; 400b6a4b4f9SMatthew Dillon } 4013d903220SDoug Rabson } else { 402028f979dSDima Dorfman /* 403028f979dSDima Dorfman * This is just a hint to vm_map_find() about where to 404028f979dSDima Dorfman * put it. 405028f979dSDima Dorfman */ 40668ba7a1dSTim J. Robbins PROC_LOCK(p); 40768ba7a1dSTim J. Robbins attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr + 40868ba7a1dSTim J. Robbins lim_max(p, RLIMIT_DATA)); 40968ba7a1dSTim J. Robbins PROC_UNLOCK(p); 4103d903220SDoug Rabson } 411a51f7119SJohn Dyson 412b648d480SJohn Baldwin vm_object_reference(shmseg->object); 413b648d480SJohn Baldwin rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->object, 41463b9ae94SAlan Cox 0, &attach_va, size, 0, shmaddr != NULL ? VMFS_NO_SPACE : 415ff74a3faSJohn Baldwin VMFS_OPTIMAL_SPACE, prot, prot, MAP_INHERIT_SHARE); 416a51f7119SJohn Dyson if (rv != KERN_SUCCESS) { 417b648d480SJohn Baldwin vm_object_deallocate(shmseg->object); 418b6a4b4f9SMatthew Dillon error = ENOMEM; 419b6a4b4f9SMatthew Dillon goto done2; 420a51f7119SJohn Dyson } 4210463028cSJohn Dyson 4223d903220SDoug Rabson shmmap_s->va = attach_va; 423f130dcf2SMartin Blapp shmmap_s->shmid = shmid; 424921d05b9SRobert Watson shmseg->u.shm_lpid = p->p_pid; 425921d05b9SRobert Watson shmseg->u.shm_atime = time_second; 426921d05b9SRobert Watson shmseg->u.shm_nattch++; 427b40ce416SJulian Elischer td->td_retval[0] = attach_va; 428b6a4b4f9SMatthew Dillon done2: 429b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 430b6a4b4f9SMatthew Dillon return (error); 4313d903220SDoug Rabson } 4323d903220SDoug Rabson 433f130dcf2SMartin Blapp int 4348451d0ddSKip Macy sys_shmat(td, uap) 435f130dcf2SMartin Blapp struct thread *td; 436f130dcf2SMartin Blapp struct shmat_args *uap; 437f130dcf2SMartin Blapp { 4382332251cSMax Khon return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg); 439f130dcf2SMartin Blapp } 440f130dcf2SMartin Blapp 4413d903220SDoug Rabson int 4422332251cSMax Khon kern_shmctl(td, shmid, cmd, buf, bufsz) 443b40ce416SJulian Elischer struct thread *td; 444f130dcf2SMartin Blapp int shmid; 445f130dcf2SMartin Blapp int cmd; 446f130dcf2SMartin Blapp void *buf; 447f130dcf2SMartin Blapp size_t *bufsz; 4483d903220SDoug Rabson { 449b6a4b4f9SMatthew Dillon int error = 0; 450921d05b9SRobert Watson struct shmid_kernel *shmseg; 4513d903220SDoug Rabson 4520304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 453c6f55f33SJohn Baldwin return (ENOSYS); 454f130dcf2SMartin Blapp 455b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 456f130dcf2SMartin Blapp switch (cmd) { 4574f18813fSChristian S.J. Peron /* 4584f18813fSChristian S.J. Peron * It is possible that kern_shmctl is being called from the Linux ABI 4594f18813fSChristian S.J. Peron * layer, in which case, we will need to implement IPC_INFO. It should 4604f18813fSChristian S.J. Peron * be noted that other shmctl calls will be funneled through here for 4614f18813fSChristian S.J. Peron * Linix binaries as well. 4624f18813fSChristian S.J. Peron * 4634f18813fSChristian S.J. Peron * NB: The Linux ABI layer will convert this data to structure(s) more 4644f18813fSChristian S.J. Peron * consistent with the Linux ABI. 4654f18813fSChristian S.J. Peron */ 466491dec93SMichael Reifenberger case IPC_INFO: 467f130dcf2SMartin Blapp memcpy(buf, &shminfo, sizeof(shminfo)); 468f130dcf2SMartin Blapp if (bufsz) 469f130dcf2SMartin Blapp *bufsz = sizeof(shminfo); 470491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 471491dec93SMichael Reifenberger goto done2; 472491dec93SMichael Reifenberger case SHM_INFO: { 473491dec93SMichael Reifenberger struct shm_info shm_info; 474491dec93SMichael Reifenberger shm_info.used_ids = shm_nused; 475491dec93SMichael Reifenberger shm_info.shm_rss = 0; /*XXX where to get from ? */ 476491dec93SMichael Reifenberger shm_info.shm_tot = 0; /*XXX where to get from ? */ 477491dec93SMichael Reifenberger shm_info.shm_swp = 0; /*XXX where to get from ? */ 478491dec93SMichael Reifenberger shm_info.swap_attempts = 0; /*XXX where to get from ? */ 479491dec93SMichael Reifenberger shm_info.swap_successes = 0; /*XXX where to get from ? */ 480f130dcf2SMartin Blapp memcpy(buf, &shm_info, sizeof(shm_info)); 481f130dcf2SMartin Blapp if (bufsz) 482f130dcf2SMartin Blapp *bufsz = sizeof(shm_info); 483491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 484491dec93SMichael Reifenberger goto done2; 485491dec93SMichael Reifenberger } 486491dec93SMichael Reifenberger } 487f130dcf2SMartin Blapp if (cmd == SHM_STAT) 4882332251cSMax Khon shmseg = shm_find_segment_by_shmidx(shmid); 489491dec93SMichael Reifenberger else 4902332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 491b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 492b6a4b4f9SMatthew Dillon error = EINVAL; 493b6a4b4f9SMatthew Dillon goto done2; 494b6a4b4f9SMatthew Dillon } 49514cedfc8SRobert Watson #ifdef MAC 49630d239bcSRobert Watson error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, cmd); 497f50c4fd8SRobert Watson if (error != 0) 49814cedfc8SRobert Watson goto done2; 49914cedfc8SRobert Watson #endif 500f130dcf2SMartin Blapp switch (cmd) { 501491dec93SMichael Reifenberger case SHM_STAT: 5023d903220SDoug Rabson case IPC_STAT: 503921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 504797f2d22SPoul-Henning Kamp if (error) 505b6a4b4f9SMatthew Dillon goto done2; 506921d05b9SRobert Watson memcpy(buf, &shmseg->u, sizeof(struct shmid_ds)); 507f130dcf2SMartin Blapp if (bufsz) 508f130dcf2SMartin Blapp *bufsz = sizeof(struct shmid_ds); 509f130dcf2SMartin Blapp if (cmd == SHM_STAT) 510921d05b9SRobert Watson td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->u.shm_perm); 5113d903220SDoug Rabson break; 512f130dcf2SMartin Blapp case IPC_SET: { 513f130dcf2SMartin Blapp struct shmid_ds *shmid; 514f130dcf2SMartin Blapp 515f130dcf2SMartin Blapp shmid = (struct shmid_ds *)buf; 516921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 517797f2d22SPoul-Henning Kamp if (error) 518b6a4b4f9SMatthew Dillon goto done2; 519921d05b9SRobert Watson shmseg->u.shm_perm.uid = shmid->shm_perm.uid; 520921d05b9SRobert Watson shmseg->u.shm_perm.gid = shmid->shm_perm.gid; 521921d05b9SRobert Watson shmseg->u.shm_perm.mode = 522921d05b9SRobert Watson (shmseg->u.shm_perm.mode & ~ACCESSPERMS) | 523f130dcf2SMartin Blapp (shmid->shm_perm.mode & ACCESSPERMS); 524921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 5253d903220SDoug Rabson break; 526f130dcf2SMartin Blapp } 5273d903220SDoug Rabson case IPC_RMID: 528921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 529797f2d22SPoul-Henning Kamp if (error) 530b6a4b4f9SMatthew Dillon goto done2; 531921d05b9SRobert Watson shmseg->u.shm_perm.key = IPC_PRIVATE; 532921d05b9SRobert Watson shmseg->u.shm_perm.mode |= SHMSEG_REMOVED; 533921d05b9SRobert Watson if (shmseg->u.shm_nattch <= 0) { 5343d903220SDoug Rabson shm_deallocate_segment(shmseg); 535f130dcf2SMartin Blapp shm_last_free = IPCID_TO_IX(shmid); 5363d903220SDoug Rabson } 5373d903220SDoug Rabson break; 5383d903220SDoug Rabson #if 0 5393d903220SDoug Rabson case SHM_LOCK: 5403d903220SDoug Rabson case SHM_UNLOCK: 5413d903220SDoug Rabson #endif 5423d903220SDoug Rabson default: 543b6a4b4f9SMatthew Dillon error = EINVAL; 544b6a4b4f9SMatthew Dillon break; 5453d903220SDoug Rabson } 546b6a4b4f9SMatthew Dillon done2: 547b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 548b6a4b4f9SMatthew Dillon return (error); 5493d903220SDoug Rabson } 5503d903220SDoug Rabson 55171361470SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 55271361470SJohn Baldwin struct shmctl_args { 55371361470SJohn Baldwin int shmid; 55471361470SJohn Baldwin int cmd; 55571361470SJohn Baldwin struct shmid_ds *buf; 55671361470SJohn Baldwin }; 55771361470SJohn Baldwin #endif 558f130dcf2SMartin Blapp int 5598451d0ddSKip Macy sys_shmctl(td, uap) 560f130dcf2SMartin Blapp struct thread *td; 561f130dcf2SMartin Blapp struct shmctl_args *uap; 562f130dcf2SMartin Blapp { 563f130dcf2SMartin Blapp int error = 0; 564f130dcf2SMartin Blapp struct shmid_ds buf; 565f130dcf2SMartin Blapp size_t bufsz; 566f130dcf2SMartin Blapp 5674f18813fSChristian S.J. Peron /* 5684f18813fSChristian S.J. Peron * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support 5694f18813fSChristian S.J. Peron * Linux binaries. If we see the call come through the FreeBSD ABI, 5704f18813fSChristian S.J. Peron * return an error back to the user since we do not to support this. 5714f18813fSChristian S.J. Peron */ 5724f18813fSChristian S.J. Peron if (uap->cmd == IPC_INFO || uap->cmd == SHM_INFO || 5734f18813fSChristian S.J. Peron uap->cmd == SHM_STAT) 5744f18813fSChristian S.J. Peron return (EINVAL); 5754f18813fSChristian S.J. Peron 576f130dcf2SMartin Blapp /* IPC_SET needs to copyin the buffer before calling kern_shmctl */ 577f130dcf2SMartin Blapp if (uap->cmd == IPC_SET) { 578f130dcf2SMartin Blapp if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds)))) 579f130dcf2SMartin Blapp goto done; 580f130dcf2SMartin Blapp } 581f130dcf2SMartin Blapp 5822332251cSMax Khon error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz); 583f130dcf2SMartin Blapp if (error) 584f130dcf2SMartin Blapp goto done; 585f130dcf2SMartin Blapp 586f130dcf2SMartin Blapp /* Cases in which we need to copyout */ 587f130dcf2SMartin Blapp switch (uap->cmd) { 588f130dcf2SMartin Blapp case IPC_STAT: 589f130dcf2SMartin Blapp error = copyout(&buf, uap->buf, bufsz); 590f130dcf2SMartin Blapp break; 591f130dcf2SMartin Blapp } 592f130dcf2SMartin Blapp 593f130dcf2SMartin Blapp done: 594f130dcf2SMartin Blapp if (error) { 595f130dcf2SMartin Blapp /* Invalidate the return value */ 596f130dcf2SMartin Blapp td->td_retval[0] = -1; 597f130dcf2SMartin Blapp } 598f130dcf2SMartin Blapp return (error); 599f130dcf2SMartin Blapp } 600f130dcf2SMartin Blapp 601f130dcf2SMartin Blapp 6023d903220SDoug Rabson static int 603b40ce416SJulian Elischer shmget_existing(td, uap, mode, segnum) 604b40ce416SJulian Elischer struct thread *td; 6053d903220SDoug Rabson struct shmget_args *uap; 6063d903220SDoug Rabson int mode; 6073d903220SDoug Rabson int segnum; 6083d903220SDoug Rabson { 609921d05b9SRobert Watson struct shmid_kernel *shmseg; 6103d903220SDoug Rabson int error; 6113d903220SDoug Rabson 6123d903220SDoug Rabson shmseg = &shmsegs[segnum]; 613921d05b9SRobert Watson if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) { 6143d903220SDoug Rabson /* 6153d903220SDoug Rabson * This segment is in the process of being allocated. Wait 6163d903220SDoug Rabson * until it's done, and look the key up again (in case the 6173d903220SDoug Rabson * allocation failed or it was freed). 6183d903220SDoug Rabson */ 619921d05b9SRobert Watson shmseg->u.shm_perm.mode |= SHMSEG_WANTED; 6202cc593fdSAlfred Perlstein error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0); 621797f2d22SPoul-Henning Kamp if (error) 622b618bb96SAlfred Perlstein return (error); 623b618bb96SAlfred Perlstein return (EAGAIN); 6243d903220SDoug Rabson } 625dc92aa57SAlan Cox if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 626b618bb96SAlfred Perlstein return (EEXIST); 62714cedfc8SRobert Watson #ifdef MAC 62830d239bcSRobert Watson error = mac_sysvshm_check_shmget(td->td_ucred, shmseg, uap->shmflg); 629f50c4fd8SRobert Watson if (error != 0) 6307723d5edSRobert Watson return (error); 63114cedfc8SRobert Watson #endif 632b648d480SJohn Baldwin if (uap->size != 0 && uap->size > shmseg->u.shm_segsz) 633b618bb96SAlfred Perlstein return (EINVAL); 634921d05b9SRobert Watson td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 635b618bb96SAlfred Perlstein return (0); 6363d903220SDoug Rabson } 6373d903220SDoug Rabson 6383d903220SDoug Rabson static int 639b40ce416SJulian Elischer shmget_allocate_segment(td, uap, mode) 640b40ce416SJulian Elischer struct thread *td; 6413d903220SDoug Rabson struct shmget_args *uap; 6423d903220SDoug Rabson int mode; 6433d903220SDoug Rabson { 64465067cc8SKonstantin Belousov int i, segnum, shmid; 64565067cc8SKonstantin Belousov size_t size; 646a854ed98SJohn Baldwin struct ucred *cred = td->td_ucred; 647921d05b9SRobert Watson struct shmid_kernel *shmseg; 6480049f8b2SAlan Cox vm_object_t shm_object; 6493d903220SDoug Rabson 6500cddd8f0SMatthew Dillon GIANT_REQUIRED; 6510cddd8f0SMatthew Dillon 6523d903220SDoug Rabson if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 653b618bb96SAlfred Perlstein return (EINVAL); 654028f979dSDima Dorfman if (shm_nused >= shminfo.shmmni) /* Any shmids left? */ 655b618bb96SAlfred Perlstein return (ENOSPC); 6569e609ddeSJoerg Wunsch size = round_page(uap->size); 6573d903220SDoug Rabson if (shm_committed + btoc(size) > shminfo.shmall) 658b618bb96SAlfred Perlstein return (ENOMEM); 6593d903220SDoug Rabson if (shm_last_free < 0) { 660028f979dSDima Dorfman shmrealloc(); /* Maybe expand the shmsegs[] array. */ 661255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 662921d05b9SRobert Watson if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE) 6633d903220SDoug Rabson break; 664255108f3SPeter Wemm if (i == shmalloced) 665b618bb96SAlfred Perlstein return (ENOSPC); 6663d903220SDoug Rabson segnum = i; 6673d903220SDoug Rabson } else { 6683d903220SDoug Rabson segnum = shm_last_free; 6693d903220SDoug Rabson shm_last_free = -1; 6703d903220SDoug Rabson } 6713d903220SDoug Rabson shmseg = &shmsegs[segnum]; 672afcc55f3SEdward Tomasz Napierala #ifdef RACCT 6733bcf7445SEdward Tomasz Napierala PROC_LOCK(td->td_proc); 6743bcf7445SEdward Tomasz Napierala if (racct_add(td->td_proc, RACCT_NSHM, 1)) { 6753bcf7445SEdward Tomasz Napierala PROC_UNLOCK(td->td_proc); 6763bcf7445SEdward Tomasz Napierala return (ENOSPC); 6773bcf7445SEdward Tomasz Napierala } 6783bcf7445SEdward Tomasz Napierala if (racct_add(td->td_proc, RACCT_SHMSIZE, size)) { 6793bcf7445SEdward Tomasz Napierala racct_sub(td->td_proc, RACCT_NSHM, 1); 6803bcf7445SEdward Tomasz Napierala PROC_UNLOCK(td->td_proc); 6813bcf7445SEdward Tomasz Napierala return (ENOMEM); 6823bcf7445SEdward Tomasz Napierala } 6833bcf7445SEdward Tomasz Napierala PROC_UNLOCK(td->td_proc); 684afcc55f3SEdward Tomasz Napierala #endif 6853d903220SDoug Rabson /* 6863d903220SDoug Rabson * In case we sleep in malloc(), mark the segment present but deleted 6873d903220SDoug Rabson * so that noone else tries to create the same key. 6883d903220SDoug Rabson */ 689921d05b9SRobert Watson shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 690921d05b9SRobert Watson shmseg->u.shm_perm.key = uap->key; 691921d05b9SRobert Watson shmseg->u.shm_perm.seq = (shmseg->u.shm_perm.seq + 1) & 0x7fff; 692921d05b9SRobert Watson shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 693a51f7119SJohn Dyson 694ae9b8c3aSJohn Dyson /* 695ae9b8c3aSJohn Dyson * We make sure that we have allocated a pager before we need 696ae9b8c3aSJohn Dyson * to. 697ae9b8c3aSJohn Dyson */ 6983364c323SKonstantin Belousov shm_object = vm_pager_allocate(shm_use_phys ? OBJT_PHYS : OBJT_SWAP, 6993364c323SKonstantin Belousov 0, size, VM_PROT_DEFAULT, 0, cred); 7003bcf7445SEdward Tomasz Napierala if (shm_object == NULL) { 701afcc55f3SEdward Tomasz Napierala #ifdef RACCT 7023bcf7445SEdward Tomasz Napierala PROC_LOCK(td->td_proc); 7033bcf7445SEdward Tomasz Napierala racct_sub(td->td_proc, RACCT_NSHM, 1); 7043bcf7445SEdward Tomasz Napierala racct_sub(td->td_proc, RACCT_SHMSIZE, size); 7053bcf7445SEdward Tomasz Napierala PROC_UNLOCK(td->td_proc); 706afcc55f3SEdward Tomasz Napierala #endif 7073364c323SKonstantin Belousov return (ENOMEM); 7083bcf7445SEdward Tomasz Napierala } 70989f6b863SAttilio Rao VM_OBJECT_WLOCK(shm_object); 7100049f8b2SAlan Cox vm_object_clear_flag(shm_object, OBJ_ONEMAPPING); 7110049f8b2SAlan Cox vm_object_set_flag(shm_object, OBJ_NOSPLIT); 71289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(shm_object); 713cbd8ec09SJohn Dyson 714b648d480SJohn Baldwin shmseg->object = shm_object; 715921d05b9SRobert Watson shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid; 716921d05b9SRobert Watson shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = cred->cr_gid; 717921d05b9SRobert Watson shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) | 7183d903220SDoug Rabson (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 719b1fb5f9cSEdward Tomasz Napierala shmseg->cred = crhold(cred); 720921d05b9SRobert Watson shmseg->u.shm_segsz = uap->size; 721921d05b9SRobert Watson shmseg->u.shm_cpid = td->td_proc->p_pid; 722921d05b9SRobert Watson shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0; 723921d05b9SRobert Watson shmseg->u.shm_atime = shmseg->u.shm_dtime = 0; 72414cedfc8SRobert Watson #ifdef MAC 72530d239bcSRobert Watson mac_sysvshm_create(cred, shmseg); 72614cedfc8SRobert Watson #endif 727921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 7283d903220SDoug Rabson shm_committed += btoc(size); 7293d903220SDoug Rabson shm_nused++; 730921d05b9SRobert Watson if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) { 7313d903220SDoug Rabson /* 7323d903220SDoug Rabson * Somebody else wanted this key while we were asleep. Wake 7333d903220SDoug Rabson * them up now. 7343d903220SDoug Rabson */ 735921d05b9SRobert Watson shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED; 7362cc593fdSAlfred Perlstein wakeup(shmseg); 7373d903220SDoug Rabson } 738b40ce416SJulian Elischer td->td_retval[0] = shmid; 739b618bb96SAlfred Perlstein return (0); 7403d903220SDoug Rabson } 7413d903220SDoug Rabson 74271361470SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 74371361470SJohn Baldwin struct shmget_args { 74471361470SJohn Baldwin key_t key; 74571361470SJohn Baldwin size_t size; 74671361470SJohn Baldwin int shmflg; 74771361470SJohn Baldwin }; 74871361470SJohn Baldwin #endif 7493d903220SDoug Rabson int 7508451d0ddSKip Macy sys_shmget(td, uap) 751b40ce416SJulian Elischer struct thread *td; 7523d903220SDoug Rabson struct shmget_args *uap; 7533d903220SDoug Rabson { 754b6a4b4f9SMatthew Dillon int segnum, mode; 755b6a4b4f9SMatthew Dillon int error; 7563d903220SDoug Rabson 7570304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 758c6f55f33SJohn Baldwin return (ENOSYS); 759b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 7603d903220SDoug Rabson mode = uap->shmflg & ACCESSPERMS; 7613d903220SDoug Rabson if (uap->key != IPC_PRIVATE) { 7623d903220SDoug Rabson again: 7633d903220SDoug Rabson segnum = shm_find_segment_by_key(uap->key); 7643d903220SDoug Rabson if (segnum >= 0) { 765b40ce416SJulian Elischer error = shmget_existing(td, uap, mode, segnum); 7663d903220SDoug Rabson if (error == EAGAIN) 7673d903220SDoug Rabson goto again; 768b6a4b4f9SMatthew Dillon goto done2; 7693d903220SDoug Rabson } 770b6a4b4f9SMatthew Dillon if ((uap->shmflg & IPC_CREAT) == 0) { 771b6a4b4f9SMatthew Dillon error = ENOENT; 772b6a4b4f9SMatthew Dillon goto done2; 7733d903220SDoug Rabson } 774b6a4b4f9SMatthew Dillon } 775b40ce416SJulian Elischer error = shmget_allocate_segment(td, uap, mode); 776b6a4b4f9SMatthew Dillon done2: 777b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 778b6a4b4f9SMatthew Dillon return (error); 7793d903220SDoug Rabson } 7803d903220SDoug Rabson 78178525ce3SAlfred Perlstein static void 78278525ce3SAlfred Perlstein shmfork_myhook(p1, p2) 7833d903220SDoug Rabson struct proc *p1, *p2; 7843d903220SDoug Rabson { 7853d903220SDoug Rabson struct shmmap_state *shmmap_s; 7863d903220SDoug Rabson size_t size; 7873d903220SDoug Rabson int i; 7883d903220SDoug Rabson 78994ddc707SAlan Cox mtx_lock(&Giant); 7903d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 791a163d034SWarner Losh shmmap_s = malloc(size, M_SHM, M_WAITOK); 7922cc593fdSAlfred Perlstein bcopy(p1->p_vmspace->vm_shm, shmmap_s, size); 7932cc593fdSAlfred Perlstein p2->p_vmspace->vm_shm = shmmap_s; 7943d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 7953d903220SDoug Rabson if (shmmap_s->shmid != -1) 796921d05b9SRobert Watson shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++; 79794ddc707SAlan Cox mtx_unlock(&Giant); 7983d903220SDoug Rabson } 7993d903220SDoug Rabson 80078525ce3SAlfred Perlstein static void 8013db161e0SMatthew Dillon shmexit_myhook(struct vmspace *vm) 8023d903220SDoug Rabson { 8033db161e0SMatthew Dillon struct shmmap_state *base, *shm; 8043d903220SDoug Rabson int i; 8053d903220SDoug Rabson 8063db161e0SMatthew Dillon if ((base = vm->vm_shm) != NULL) { 8073db161e0SMatthew Dillon vm->vm_shm = NULL; 8081a276a3fSAlan Cox mtx_lock(&Giant); 8093db161e0SMatthew Dillon for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 8103db161e0SMatthew Dillon if (shm->shmid != -1) 8113db161e0SMatthew Dillon shm_delete_mapping(vm, shm); 8123db161e0SMatthew Dillon } 8131a276a3fSAlan Cox mtx_unlock(&Giant); 8143db161e0SMatthew Dillon free(base, M_SHM); 8153db161e0SMatthew Dillon } 8163d903220SDoug Rabson } 8173d903220SDoug Rabson 818255108f3SPeter Wemm static void 819255108f3SPeter Wemm shmrealloc(void) 820255108f3SPeter Wemm { 821255108f3SPeter Wemm int i; 822921d05b9SRobert Watson struct shmid_kernel *newsegs; 823255108f3SPeter Wemm 824255108f3SPeter Wemm if (shmalloced >= shminfo.shmmni) 825255108f3SPeter Wemm return; 826255108f3SPeter Wemm 827a163d034SWarner Losh newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 828255108f3SPeter Wemm if (newsegs == NULL) 829255108f3SPeter Wemm return; 830255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 831255108f3SPeter Wemm bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 832255108f3SPeter Wemm for (; i < shminfo.shmmni; i++) { 833921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 834921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 83514cedfc8SRobert Watson #ifdef MAC 83630d239bcSRobert Watson mac_sysvshm_init(&shmsegs[i]); 83714cedfc8SRobert Watson #endif 838255108f3SPeter Wemm } 839255108f3SPeter Wemm free(shmsegs, M_SHM); 840255108f3SPeter Wemm shmsegs = newsegs; 841255108f3SPeter Wemm shmalloced = shminfo.shmmni; 842255108f3SPeter Wemm } 843255108f3SPeter Wemm 84475d633cbSKonstantin Belousov static struct syscall_helper_data shm_syscalls[] = { 84575d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmat), 84675d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmctl), 84775d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmdt), 84875d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmget), 84975d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 85075d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 8518451d0ddSKip Macy SYSCALL_INIT_HELPER_COMPAT(freebsd7_shmctl), 85275d633cbSKonstantin Belousov #endif 85375d633cbSKonstantin Belousov #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) 85475d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmsys), 85575d633cbSKonstantin Belousov #endif 85675d633cbSKonstantin Belousov SYSCALL_INIT_LAST 85775d633cbSKonstantin Belousov }; 85875d633cbSKonstantin Belousov 85975d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 86075d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32.h> 86175d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_ipc.h> 86275d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_proto.h> 86375d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_signal.h> 86475d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_syscall.h> 86575d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_util.h> 86675d633cbSKonstantin Belousov 86775d633cbSKonstantin Belousov static struct syscall_helper_data shm32_syscalls[] = { 8688451d0ddSKip Macy SYSCALL32_INIT_HELPER_COMPAT(shmat), 8698451d0ddSKip Macy SYSCALL32_INIT_HELPER_COMPAT(shmdt), 8708451d0ddSKip Macy SYSCALL32_INIT_HELPER_COMPAT(shmget), 87175d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(freebsd32_shmsys), 87275d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(freebsd32_shmctl), 87375d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 87475d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 87575d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(freebsd7_freebsd32_shmctl), 87675d633cbSKonstantin Belousov #endif 87775d633cbSKonstantin Belousov SYSCALL_INIT_LAST 87875d633cbSKonstantin Belousov }; 87975d633cbSKonstantin Belousov #endif 88075d633cbSKonstantin Belousov 88175d633cbSKonstantin Belousov static int 88278525ce3SAlfred Perlstein shminit() 8833d903220SDoug Rabson { 88475d633cbSKonstantin Belousov int i, error; 885255108f3SPeter Wemm 8864d9d1e82SRuslan Ermilov #ifndef BURN_BRIDGES 8874d9d1e82SRuslan Ermilov if (TUNABLE_ULONG_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall) != 0) 8884d9d1e82SRuslan Ermilov printf("kern.ipc.shmmaxpgs is now called kern.ipc.shmall!\n"); 8894d9d1e82SRuslan Ermilov #endif 890*af3b2549SHans Petter Selasky if (shminfo.shmmax == SHMMAX) { 8914d9d1e82SRuslan Ermilov /* Initialize shmmax dealing with possible overflow. */ 892*af3b2549SHans Petter Selasky for (i = PAGE_SIZE; i != 0; i--) { 893a4c24c66SJohn Baldwin shminfo.shmmax = shminfo.shmall * i; 894*af3b2549SHans Petter Selasky if ((shminfo.shmmax / shminfo.shmall) == (u_long)i) 8955015c68aSAlfred Perlstein break; 8965015c68aSAlfred Perlstein } 89712075c09SPawel Jakub Dawidek } 898255108f3SPeter Wemm shmalloced = shminfo.shmmni; 899a163d034SWarner Losh shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 900255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) { 901921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 902921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 90314cedfc8SRobert Watson #ifdef MAC 90430d239bcSRobert Watson mac_sysvshm_init(&shmsegs[i]); 90514cedfc8SRobert Watson #endif 9063d903220SDoug Rabson } 9073d903220SDoug Rabson shm_last_free = 0; 9083d903220SDoug Rabson shm_nused = 0; 9093d903220SDoug Rabson shm_committed = 0; 91078525ce3SAlfred Perlstein shmexit_hook = &shmexit_myhook; 91178525ce3SAlfred Perlstein shmfork_hook = &shmfork_myhook; 91275d633cbSKonstantin Belousov 91375d633cbSKonstantin Belousov error = syscall_helper_register(shm_syscalls); 91475d633cbSKonstantin Belousov if (error != 0) 91575d633cbSKonstantin Belousov return (error); 91675d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 91775d633cbSKonstantin Belousov error = syscall32_helper_register(shm32_syscalls); 91875d633cbSKonstantin Belousov if (error != 0) 91975d633cbSKonstantin Belousov return (error); 92075d633cbSKonstantin Belousov #endif 92175d633cbSKonstantin Belousov return (0); 9223d903220SDoug Rabson } 92378525ce3SAlfred Perlstein 92478525ce3SAlfred Perlstein static int 92578525ce3SAlfred Perlstein shmunload() 92678525ce3SAlfred Perlstein { 92714cedfc8SRobert Watson int i; 92878525ce3SAlfred Perlstein 92978525ce3SAlfred Perlstein if (shm_nused > 0) 93078525ce3SAlfred Perlstein return (EBUSY); 93178525ce3SAlfred Perlstein 93275d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 93375d633cbSKonstantin Belousov syscall32_helper_unregister(shm32_syscalls); 93475d633cbSKonstantin Belousov #endif 93575d633cbSKonstantin Belousov syscall_helper_unregister(shm_syscalls); 93675d633cbSKonstantin Belousov 9370d9d996dSKonstantin Belousov for (i = 0; i < shmalloced; i++) { 93814cedfc8SRobert Watson #ifdef MAC 93930d239bcSRobert Watson mac_sysvshm_destroy(&shmsegs[i]); 94014cedfc8SRobert Watson #endif 9410d9d996dSKonstantin Belousov /* 9420d9d996dSKonstantin Belousov * Objects might be still mapped into the processes 9430d9d996dSKonstantin Belousov * address spaces. Actual free would happen on the 9440d9d996dSKonstantin Belousov * last mapping destruction. 9450d9d996dSKonstantin Belousov */ 9460d9d996dSKonstantin Belousov if (shmsegs[i].u.shm_perm.mode != SHMSEG_FREE) 9470d9d996dSKonstantin Belousov vm_object_deallocate(shmsegs[i].object); 9480d9d996dSKonstantin Belousov } 94978525ce3SAlfred Perlstein free(shmsegs, M_SHM); 95078525ce3SAlfred Perlstein shmexit_hook = NULL; 95178525ce3SAlfred Perlstein shmfork_hook = NULL; 95278525ce3SAlfred Perlstein return (0); 95378525ce3SAlfred Perlstein } 95478525ce3SAlfred Perlstein 95578525ce3SAlfred Perlstein static int 956a723c4e1SDima Dorfman sysctl_shmsegs(SYSCTL_HANDLER_ARGS) 957a723c4e1SDima Dorfman { 958a723c4e1SDima Dorfman 959a723c4e1SDima Dorfman return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0]))); 960a723c4e1SDima Dorfman } 961a723c4e1SDima Dorfman 96245f48220SJohn Baldwin #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) 96345f48220SJohn Baldwin struct oshmid_ds { 96445f48220SJohn Baldwin struct ipc_perm_old shm_perm; /* operation perms */ 96545f48220SJohn Baldwin int shm_segsz; /* size of segment (bytes) */ 96645f48220SJohn Baldwin u_short shm_cpid; /* pid, creator */ 96745f48220SJohn Baldwin u_short shm_lpid; /* pid, last operation */ 96845f48220SJohn Baldwin short shm_nattch; /* no. of current attaches */ 96945f48220SJohn Baldwin time_t shm_atime; /* last attach time */ 97045f48220SJohn Baldwin time_t shm_dtime; /* last detach time */ 97145f48220SJohn Baldwin time_t shm_ctime; /* last change time */ 97245f48220SJohn Baldwin void *shm_handle; /* internal handle for shm segment */ 97345f48220SJohn Baldwin }; 97445f48220SJohn Baldwin 97545f48220SJohn Baldwin struct oshmctl_args { 97645f48220SJohn Baldwin int shmid; 97745f48220SJohn Baldwin int cmd; 97845f48220SJohn Baldwin struct oshmid_ds *ubuf; 97945f48220SJohn Baldwin }; 98045f48220SJohn Baldwin 98145f48220SJohn Baldwin static int 982ca998284SJohn Baldwin oshmctl(struct thread *td, struct oshmctl_args *uap) 98345f48220SJohn Baldwin { 98445f48220SJohn Baldwin #ifdef COMPAT_43 98545f48220SJohn Baldwin int error = 0; 98645f48220SJohn Baldwin struct shmid_kernel *shmseg; 98745f48220SJohn Baldwin struct oshmid_ds outbuf; 98845f48220SJohn Baldwin 98945f48220SJohn Baldwin if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 99045f48220SJohn Baldwin return (ENOSYS); 99145f48220SJohn Baldwin mtx_lock(&Giant); 99245f48220SJohn Baldwin shmseg = shm_find_segment_by_shmid(uap->shmid); 99345f48220SJohn Baldwin if (shmseg == NULL) { 99445f48220SJohn Baldwin error = EINVAL; 99545f48220SJohn Baldwin goto done2; 99645f48220SJohn Baldwin } 99745f48220SJohn Baldwin switch (uap->cmd) { 99845f48220SJohn Baldwin case IPC_STAT: 99945f48220SJohn Baldwin error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 100045f48220SJohn Baldwin if (error) 100145f48220SJohn Baldwin goto done2; 100245f48220SJohn Baldwin #ifdef MAC 100345f48220SJohn Baldwin error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, uap->cmd); 100445f48220SJohn Baldwin if (error != 0) 100545f48220SJohn Baldwin goto done2; 100645f48220SJohn Baldwin #endif 100745f48220SJohn Baldwin ipcperm_new2old(&shmseg->u.shm_perm, &outbuf.shm_perm); 100845f48220SJohn Baldwin outbuf.shm_segsz = shmseg->u.shm_segsz; 100945f48220SJohn Baldwin outbuf.shm_cpid = shmseg->u.shm_cpid; 101045f48220SJohn Baldwin outbuf.shm_lpid = shmseg->u.shm_lpid; 101145f48220SJohn Baldwin outbuf.shm_nattch = shmseg->u.shm_nattch; 101245f48220SJohn Baldwin outbuf.shm_atime = shmseg->u.shm_atime; 101345f48220SJohn Baldwin outbuf.shm_dtime = shmseg->u.shm_dtime; 101445f48220SJohn Baldwin outbuf.shm_ctime = shmseg->u.shm_ctime; 101545f48220SJohn Baldwin outbuf.shm_handle = shmseg->object; 101645f48220SJohn Baldwin error = copyout(&outbuf, uap->ubuf, sizeof(outbuf)); 101745f48220SJohn Baldwin if (error) 101845f48220SJohn Baldwin goto done2; 101945f48220SJohn Baldwin break; 102045f48220SJohn Baldwin default: 1021af88b2c2SJohn Baldwin error = freebsd7_shmctl(td, (struct freebsd7_shmctl_args *)uap); 102245f48220SJohn Baldwin break; 102345f48220SJohn Baldwin } 102445f48220SJohn Baldwin done2: 102545f48220SJohn Baldwin mtx_unlock(&Giant); 102645f48220SJohn Baldwin return (error); 102745f48220SJohn Baldwin #else 102845f48220SJohn Baldwin return (EINVAL); 102945f48220SJohn Baldwin #endif 103045f48220SJohn Baldwin } 103145f48220SJohn Baldwin 103245f48220SJohn Baldwin /* XXX casting to (sy_call_t *) is bogus, as usual. */ 103345f48220SJohn Baldwin static sy_call_t *shmcalls[] = { 10348451d0ddSKip Macy (sy_call_t *)sys_shmat, (sy_call_t *)oshmctl, 10358451d0ddSKip Macy (sy_call_t *)sys_shmdt, (sy_call_t *)sys_shmget, 1036b648d480SJohn Baldwin (sy_call_t *)freebsd7_shmctl 103745f48220SJohn Baldwin }; 103845f48220SJohn Baldwin 103945f48220SJohn Baldwin int 10408451d0ddSKip Macy sys_shmsys(td, uap) 104145f48220SJohn Baldwin struct thread *td; 104245f48220SJohn Baldwin /* XXX actually varargs. */ 104345f48220SJohn Baldwin struct shmsys_args /* { 104445f48220SJohn Baldwin int which; 104545f48220SJohn Baldwin int a2; 104645f48220SJohn Baldwin int a3; 104745f48220SJohn Baldwin int a4; 104845f48220SJohn Baldwin } */ *uap; 104945f48220SJohn Baldwin { 105045f48220SJohn Baldwin int error; 105145f48220SJohn Baldwin 105245f48220SJohn Baldwin if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 105345f48220SJohn Baldwin return (ENOSYS); 105445f48220SJohn Baldwin if (uap->which < 0 || 105545f48220SJohn Baldwin uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 105645f48220SJohn Baldwin return (EINVAL); 105745f48220SJohn Baldwin mtx_lock(&Giant); 105845f48220SJohn Baldwin error = (*shmcalls[uap->which])(td, &uap->a2); 105945f48220SJohn Baldwin mtx_unlock(&Giant); 106045f48220SJohn Baldwin return (error); 106145f48220SJohn Baldwin } 106245f48220SJohn Baldwin 106345f48220SJohn Baldwin #endif /* i386 && (COMPAT_FREEBSD4 || COMPAT_43) */ 106445f48220SJohn Baldwin 106575d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 106675d633cbSKonstantin Belousov 106775d633cbSKonstantin Belousov int 106875d633cbSKonstantin Belousov freebsd32_shmsys(struct thread *td, struct freebsd32_shmsys_args *uap) 106975d633cbSKonstantin Belousov { 107075d633cbSKonstantin Belousov 107175d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 107275d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 107375d633cbSKonstantin Belousov switch (uap->which) { 107475d633cbSKonstantin Belousov case 0: { /* shmat */ 107575d633cbSKonstantin Belousov struct shmat_args ap; 107675d633cbSKonstantin Belousov 107775d633cbSKonstantin Belousov ap.shmid = uap->a2; 107875d633cbSKonstantin Belousov ap.shmaddr = PTRIN(uap->a3); 107975d633cbSKonstantin Belousov ap.shmflg = uap->a4; 108075d633cbSKonstantin Belousov return (sysent[SYS_shmat].sy_call(td, &ap)); 108175d633cbSKonstantin Belousov } 108275d633cbSKonstantin Belousov case 2: { /* shmdt */ 108375d633cbSKonstantin Belousov struct shmdt_args ap; 108475d633cbSKonstantin Belousov 108575d633cbSKonstantin Belousov ap.shmaddr = PTRIN(uap->a2); 108675d633cbSKonstantin Belousov return (sysent[SYS_shmdt].sy_call(td, &ap)); 108775d633cbSKonstantin Belousov } 108875d633cbSKonstantin Belousov case 3: { /* shmget */ 108975d633cbSKonstantin Belousov struct shmget_args ap; 109075d633cbSKonstantin Belousov 109175d633cbSKonstantin Belousov ap.key = uap->a2; 109275d633cbSKonstantin Belousov ap.size = uap->a3; 109375d633cbSKonstantin Belousov ap.shmflg = uap->a4; 109475d633cbSKonstantin Belousov return (sysent[SYS_shmget].sy_call(td, &ap)); 109575d633cbSKonstantin Belousov } 109675d633cbSKonstantin Belousov case 4: { /* shmctl */ 109775d633cbSKonstantin Belousov struct freebsd7_freebsd32_shmctl_args ap; 109875d633cbSKonstantin Belousov 109975d633cbSKonstantin Belousov ap.shmid = uap->a2; 110075d633cbSKonstantin Belousov ap.cmd = uap->a3; 110175d633cbSKonstantin Belousov ap.buf = PTRIN(uap->a4); 110275d633cbSKonstantin Belousov return (freebsd7_freebsd32_shmctl(td, &ap)); 110375d633cbSKonstantin Belousov } 110475d633cbSKonstantin Belousov case 1: /* oshmctl */ 110575d633cbSKonstantin Belousov default: 110675d633cbSKonstantin Belousov return (EINVAL); 110775d633cbSKonstantin Belousov } 110875d633cbSKonstantin Belousov #else 110975d633cbSKonstantin Belousov return (nosys(td, NULL)); 111075d633cbSKonstantin Belousov #endif 111175d633cbSKonstantin Belousov } 111275d633cbSKonstantin Belousov 111375d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 111475d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 111575d633cbSKonstantin Belousov int 111675d633cbSKonstantin Belousov freebsd7_freebsd32_shmctl(struct thread *td, 111775d633cbSKonstantin Belousov struct freebsd7_freebsd32_shmctl_args *uap) 111875d633cbSKonstantin Belousov { 111975d633cbSKonstantin Belousov int error = 0; 112075d633cbSKonstantin Belousov union { 112175d633cbSKonstantin Belousov struct shmid_ds shmid_ds; 112275d633cbSKonstantin Belousov struct shm_info shm_info; 112375d633cbSKonstantin Belousov struct shminfo shminfo; 112475d633cbSKonstantin Belousov } u; 112575d633cbSKonstantin Belousov union { 112675d633cbSKonstantin Belousov struct shmid_ds32_old shmid_ds32; 112775d633cbSKonstantin Belousov struct shm_info32 shm_info32; 112875d633cbSKonstantin Belousov struct shminfo32 shminfo32; 112975d633cbSKonstantin Belousov } u32; 113075d633cbSKonstantin Belousov size_t sz; 113175d633cbSKonstantin Belousov 113275d633cbSKonstantin Belousov if (uap->cmd == IPC_SET) { 113375d633cbSKonstantin Belousov if ((error = copyin(uap->buf, &u32.shmid_ds32, 113475d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)))) 113575d633cbSKonstantin Belousov goto done; 113675d633cbSKonstantin Belousov freebsd32_ipcperm_old_in(&u32.shmid_ds32.shm_perm, 113775d633cbSKonstantin Belousov &u.shmid_ds.shm_perm); 113875d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_segsz); 113975d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_lpid); 114075d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_cpid); 114175d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_nattch); 114275d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_atime); 114375d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_dtime); 114475d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_ctime); 114575d633cbSKonstantin Belousov } 114675d633cbSKonstantin Belousov 114775d633cbSKonstantin Belousov error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&u, &sz); 114875d633cbSKonstantin Belousov if (error) 114975d633cbSKonstantin Belousov goto done; 115075d633cbSKonstantin Belousov 115175d633cbSKonstantin Belousov /* Cases in which we need to copyout */ 115275d633cbSKonstantin Belousov switch (uap->cmd) { 115375d633cbSKonstantin Belousov case IPC_INFO: 115475d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmax); 115575d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmin); 115675d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmni); 115775d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmseg); 115875d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmall); 115975d633cbSKonstantin Belousov error = copyout(&u32.shminfo32, uap->buf, 116075d633cbSKonstantin Belousov sizeof(u32.shminfo32)); 116175d633cbSKonstantin Belousov break; 116275d633cbSKonstantin Belousov case SHM_INFO: 116375d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, used_ids); 116475d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_rss); 116575d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_tot); 116675d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_swp); 116775d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_attempts); 116875d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_successes); 116975d633cbSKonstantin Belousov error = copyout(&u32.shm_info32, uap->buf, 117075d633cbSKonstantin Belousov sizeof(u32.shm_info32)); 117175d633cbSKonstantin Belousov break; 117275d633cbSKonstantin Belousov case SHM_STAT: 117375d633cbSKonstantin Belousov case IPC_STAT: 117475d633cbSKonstantin Belousov freebsd32_ipcperm_old_out(&u.shmid_ds.shm_perm, 117575d633cbSKonstantin Belousov &u32.shmid_ds32.shm_perm); 117675d633cbSKonstantin Belousov if (u.shmid_ds.shm_segsz > INT32_MAX) 117775d633cbSKonstantin Belousov u32.shmid_ds32.shm_segsz = INT32_MAX; 117875d633cbSKonstantin Belousov else 117975d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_segsz); 118075d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_lpid); 118175d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_cpid); 118275d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_nattch); 118375d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_atime); 118475d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_dtime); 118575d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_ctime); 118675d633cbSKonstantin Belousov u32.shmid_ds32.shm_internal = 0; 118775d633cbSKonstantin Belousov error = copyout(&u32.shmid_ds32, uap->buf, 118875d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)); 118975d633cbSKonstantin Belousov break; 119075d633cbSKonstantin Belousov } 119175d633cbSKonstantin Belousov 119275d633cbSKonstantin Belousov done: 119375d633cbSKonstantin Belousov if (error) { 119475d633cbSKonstantin Belousov /* Invalidate the return value */ 119575d633cbSKonstantin Belousov td->td_retval[0] = -1; 119675d633cbSKonstantin Belousov } 119775d633cbSKonstantin Belousov return (error); 119875d633cbSKonstantin Belousov } 119975d633cbSKonstantin Belousov #endif 120075d633cbSKonstantin Belousov 120175d633cbSKonstantin Belousov int 120275d633cbSKonstantin Belousov freebsd32_shmctl(struct thread *td, struct freebsd32_shmctl_args *uap) 120375d633cbSKonstantin Belousov { 120475d633cbSKonstantin Belousov int error = 0; 120575d633cbSKonstantin Belousov union { 120675d633cbSKonstantin Belousov struct shmid_ds shmid_ds; 120775d633cbSKonstantin Belousov struct shm_info shm_info; 120875d633cbSKonstantin Belousov struct shminfo shminfo; 120975d633cbSKonstantin Belousov } u; 121075d633cbSKonstantin Belousov union { 121175d633cbSKonstantin Belousov struct shmid_ds32 shmid_ds32; 121275d633cbSKonstantin Belousov struct shm_info32 shm_info32; 121375d633cbSKonstantin Belousov struct shminfo32 shminfo32; 121475d633cbSKonstantin Belousov } u32; 121575d633cbSKonstantin Belousov size_t sz; 121675d633cbSKonstantin Belousov 121775d633cbSKonstantin Belousov if (uap->cmd == IPC_SET) { 121875d633cbSKonstantin Belousov if ((error = copyin(uap->buf, &u32.shmid_ds32, 121975d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)))) 122075d633cbSKonstantin Belousov goto done; 122175d633cbSKonstantin Belousov freebsd32_ipcperm_in(&u32.shmid_ds32.shm_perm, 122275d633cbSKonstantin Belousov &u.shmid_ds.shm_perm); 122375d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_segsz); 122475d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_lpid); 122575d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_cpid); 122675d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_nattch); 122775d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_atime); 122875d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_dtime); 122975d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_ctime); 123075d633cbSKonstantin Belousov } 123175d633cbSKonstantin Belousov 123275d633cbSKonstantin Belousov error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&u, &sz); 123375d633cbSKonstantin Belousov if (error) 123475d633cbSKonstantin Belousov goto done; 123575d633cbSKonstantin Belousov 123675d633cbSKonstantin Belousov /* Cases in which we need to copyout */ 123775d633cbSKonstantin Belousov switch (uap->cmd) { 123875d633cbSKonstantin Belousov case IPC_INFO: 123975d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmax); 124075d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmin); 124175d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmni); 124275d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmseg); 124375d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmall); 124475d633cbSKonstantin Belousov error = copyout(&u32.shminfo32, uap->buf, 124575d633cbSKonstantin Belousov sizeof(u32.shminfo32)); 124675d633cbSKonstantin Belousov break; 124775d633cbSKonstantin Belousov case SHM_INFO: 124875d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, used_ids); 124975d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_rss); 125075d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_tot); 125175d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_swp); 125275d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_attempts); 125375d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_successes); 125475d633cbSKonstantin Belousov error = copyout(&u32.shm_info32, uap->buf, 125575d633cbSKonstantin Belousov sizeof(u32.shm_info32)); 125675d633cbSKonstantin Belousov break; 125775d633cbSKonstantin Belousov case SHM_STAT: 125875d633cbSKonstantin Belousov case IPC_STAT: 125975d633cbSKonstantin Belousov freebsd32_ipcperm_out(&u.shmid_ds.shm_perm, 126075d633cbSKonstantin Belousov &u32.shmid_ds32.shm_perm); 126175d633cbSKonstantin Belousov if (u.shmid_ds.shm_segsz > INT32_MAX) 126275d633cbSKonstantin Belousov u32.shmid_ds32.shm_segsz = INT32_MAX; 126375d633cbSKonstantin Belousov else 126475d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_segsz); 126575d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_lpid); 126675d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_cpid); 126775d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_nattch); 126875d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_atime); 126975d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_dtime); 127075d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_ctime); 127175d633cbSKonstantin Belousov error = copyout(&u32.shmid_ds32, uap->buf, 127275d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)); 127375d633cbSKonstantin Belousov break; 127475d633cbSKonstantin Belousov } 127575d633cbSKonstantin Belousov 127675d633cbSKonstantin Belousov done: 127775d633cbSKonstantin Belousov if (error) { 127875d633cbSKonstantin Belousov /* Invalidate the return value */ 127975d633cbSKonstantin Belousov td->td_retval[0] = -1; 128075d633cbSKonstantin Belousov } 128175d633cbSKonstantin Belousov return (error); 128275d633cbSKonstantin Belousov } 128375d633cbSKonstantin Belousov #endif 128475d633cbSKonstantin Belousov 1285b648d480SJohn Baldwin #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 1286b648d480SJohn Baldwin defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 1287b648d480SJohn Baldwin 128875d633cbSKonstantin Belousov #ifndef CP 1289b648d480SJohn Baldwin #define CP(src, dst, fld) do { (dst).fld = (src).fld; } while (0) 129075d633cbSKonstantin Belousov #endif 1291b648d480SJohn Baldwin 1292b648d480SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 1293b648d480SJohn Baldwin struct freebsd7_shmctl_args { 1294b648d480SJohn Baldwin int shmid; 1295b648d480SJohn Baldwin int cmd; 1296b648d480SJohn Baldwin struct shmid_ds_old *buf; 1297b648d480SJohn Baldwin }; 1298b648d480SJohn Baldwin #endif 1299b648d480SJohn Baldwin int 1300b648d480SJohn Baldwin freebsd7_shmctl(td, uap) 1301b648d480SJohn Baldwin struct thread *td; 1302b648d480SJohn Baldwin struct freebsd7_shmctl_args *uap; 1303b648d480SJohn Baldwin { 1304b648d480SJohn Baldwin int error = 0; 1305b648d480SJohn Baldwin struct shmid_ds_old old; 1306b648d480SJohn Baldwin struct shmid_ds buf; 1307b648d480SJohn Baldwin size_t bufsz; 1308b648d480SJohn Baldwin 1309b648d480SJohn Baldwin /* 1310b648d480SJohn Baldwin * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support 1311b648d480SJohn Baldwin * Linux binaries. If we see the call come through the FreeBSD ABI, 1312b648d480SJohn Baldwin * return an error back to the user since we do not to support this. 1313b648d480SJohn Baldwin */ 1314b648d480SJohn Baldwin if (uap->cmd == IPC_INFO || uap->cmd == SHM_INFO || 1315b648d480SJohn Baldwin uap->cmd == SHM_STAT) 1316b648d480SJohn Baldwin return (EINVAL); 1317b648d480SJohn Baldwin 1318b648d480SJohn Baldwin /* IPC_SET needs to copyin the buffer before calling kern_shmctl */ 1319b648d480SJohn Baldwin if (uap->cmd == IPC_SET) { 1320b648d480SJohn Baldwin if ((error = copyin(uap->buf, &old, sizeof(old)))) 1321b648d480SJohn Baldwin goto done; 1322b648d480SJohn Baldwin ipcperm_old2new(&old.shm_perm, &buf.shm_perm); 1323b648d480SJohn Baldwin CP(old, buf, shm_segsz); 1324b648d480SJohn Baldwin CP(old, buf, shm_lpid); 1325b648d480SJohn Baldwin CP(old, buf, shm_cpid); 1326b648d480SJohn Baldwin CP(old, buf, shm_nattch); 1327b648d480SJohn Baldwin CP(old, buf, shm_atime); 1328b648d480SJohn Baldwin CP(old, buf, shm_dtime); 1329b648d480SJohn Baldwin CP(old, buf, shm_ctime); 1330b648d480SJohn Baldwin } 1331b648d480SJohn Baldwin 1332b648d480SJohn Baldwin error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz); 1333b648d480SJohn Baldwin if (error) 1334b648d480SJohn Baldwin goto done; 1335b648d480SJohn Baldwin 1336b648d480SJohn Baldwin /* Cases in which we need to copyout */ 1337b648d480SJohn Baldwin switch (uap->cmd) { 1338b648d480SJohn Baldwin case IPC_STAT: 1339b648d480SJohn Baldwin ipcperm_new2old(&buf.shm_perm, &old.shm_perm); 1340b648d480SJohn Baldwin if (buf.shm_segsz > INT_MAX) 1341b648d480SJohn Baldwin old.shm_segsz = INT_MAX; 1342b648d480SJohn Baldwin else 1343b648d480SJohn Baldwin CP(buf, old, shm_segsz); 1344b648d480SJohn Baldwin CP(buf, old, shm_lpid); 1345b648d480SJohn Baldwin CP(buf, old, shm_cpid); 1346b648d480SJohn Baldwin if (buf.shm_nattch > SHRT_MAX) 1347b648d480SJohn Baldwin old.shm_nattch = SHRT_MAX; 1348b648d480SJohn Baldwin else 1349b648d480SJohn Baldwin CP(buf, old, shm_nattch); 1350b648d480SJohn Baldwin CP(buf, old, shm_atime); 1351b648d480SJohn Baldwin CP(buf, old, shm_dtime); 1352b648d480SJohn Baldwin CP(buf, old, shm_ctime); 1353b648d480SJohn Baldwin old.shm_internal = NULL; 1354b648d480SJohn Baldwin error = copyout(&old, uap->buf, sizeof(old)); 1355b648d480SJohn Baldwin break; 1356b648d480SJohn Baldwin } 1357b648d480SJohn Baldwin 1358b648d480SJohn Baldwin done: 1359b648d480SJohn Baldwin if (error) { 1360b648d480SJohn Baldwin /* Invalidate the return value */ 1361b648d480SJohn Baldwin td->td_retval[0] = -1; 1362b648d480SJohn Baldwin } 1363b648d480SJohn Baldwin return (error); 1364b648d480SJohn Baldwin } 1365b648d480SJohn Baldwin 1366b648d480SJohn Baldwin #endif /* COMPAT_FREEBSD4 || COMPAT_FREEBSD5 || COMPAT_FREEBSD6 || 1367b648d480SJohn Baldwin COMPAT_FREEBSD7 */ 1368b648d480SJohn Baldwin 1369a723c4e1SDima Dorfman static int 137078525ce3SAlfred Perlstein sysvshm_modload(struct module *module, int cmd, void *arg) 137178525ce3SAlfred Perlstein { 137278525ce3SAlfred Perlstein int error = 0; 137378525ce3SAlfred Perlstein 137478525ce3SAlfred Perlstein switch (cmd) { 137578525ce3SAlfred Perlstein case MOD_LOAD: 137675d633cbSKonstantin Belousov error = shminit(); 137775d633cbSKonstantin Belousov if (error != 0) 137875d633cbSKonstantin Belousov shmunload(); 137978525ce3SAlfred Perlstein break; 138078525ce3SAlfred Perlstein case MOD_UNLOAD: 138178525ce3SAlfred Perlstein error = shmunload(); 138278525ce3SAlfred Perlstein break; 138378525ce3SAlfred Perlstein case MOD_SHUTDOWN: 138478525ce3SAlfred Perlstein break; 138578525ce3SAlfred Perlstein default: 138678525ce3SAlfred Perlstein error = EINVAL; 138778525ce3SAlfred Perlstein break; 138878525ce3SAlfred Perlstein } 138978525ce3SAlfred Perlstein return (error); 139078525ce3SAlfred Perlstein } 139178525ce3SAlfred Perlstein 1392faa784b7SDag-Erling Smørgrav static moduledata_t sysvshm_mod = { 1393faa784b7SDag-Erling Smørgrav "sysvshm", 139478525ce3SAlfred Perlstein &sysvshm_modload, 139578525ce3SAlfred Perlstein NULL 139678525ce3SAlfred Perlstein }; 139778525ce3SAlfred Perlstein 139871361470SJohn Baldwin DECLARE_MODULE(sysvshm, sysvshm_mod, SI_SUB_SYSV_SHM, SI_ORDER_FIRST); 1399faa784b7SDag-Erling Smørgrav MODULE_VERSION(sysvshm, 1); 1400