13d903220SDoug Rabson /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 29454b2d8SWarner Losh /*- 33d903220SDoug Rabson * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 43d903220SDoug Rabson * 53d903220SDoug Rabson * Redistribution and use in source and binary forms, with or without 63d903220SDoug Rabson * modification, are permitted provided that the following conditions 73d903220SDoug Rabson * are met: 83d903220SDoug Rabson * 1. Redistributions of source code must retain the above copyright 93d903220SDoug Rabson * notice, this list of conditions and the following disclaimer. 103d903220SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 113d903220SDoug Rabson * notice, this list of conditions and the following disclaimer in the 123d903220SDoug Rabson * documentation and/or other materials provided with the distribution. 133d903220SDoug Rabson * 3. All advertising materials mentioning features or use of this software 143d903220SDoug Rabson * must display the following acknowledgement: 153d903220SDoug Rabson * This product includes software developed by Adam Glass and Charles 163d903220SDoug Rabson * Hannum. 173d903220SDoug Rabson * 4. The names of the authors may not be used to endorse or promote products 183d903220SDoug Rabson * derived from this software without specific prior written permission. 193d903220SDoug Rabson * 203d903220SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 213d903220SDoug Rabson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 223d903220SDoug Rabson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 233d903220SDoug Rabson * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 243d903220SDoug Rabson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 253d903220SDoug Rabson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 263d903220SDoug Rabson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 273d903220SDoug Rabson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 283d903220SDoug Rabson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 293d903220SDoug Rabson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 303d903220SDoug Rabson */ 3114cedfc8SRobert Watson /*- 3214cedfc8SRobert Watson * Copyright (c) 2003-2005 McAfee, Inc. 3314cedfc8SRobert Watson * All rights reserved. 3414cedfc8SRobert Watson * 3514cedfc8SRobert Watson * This software was developed for the FreeBSD Project in part by McAfee 3614cedfc8SRobert Watson * Research, the Security Research Division of McAfee, Inc under DARPA/SPAWAR 3714cedfc8SRobert Watson * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS research 3814cedfc8SRobert Watson * program. 3914cedfc8SRobert Watson * 4014cedfc8SRobert Watson * Redistribution and use in source and binary forms, with or without 4114cedfc8SRobert Watson * modification, are permitted provided that the following conditions 4214cedfc8SRobert Watson * are met: 4314cedfc8SRobert Watson * 1. Redistributions of source code must retain the above copyright 4414cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer. 4514cedfc8SRobert Watson * 2. Redistributions in binary form must reproduce the above copyright 4614cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer in the 4714cedfc8SRobert Watson * documentation and/or other materials provided with the distribution. 4814cedfc8SRobert Watson * 4914cedfc8SRobert Watson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 5014cedfc8SRobert Watson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 5114cedfc8SRobert Watson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 5214cedfc8SRobert Watson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 5314cedfc8SRobert Watson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 5414cedfc8SRobert Watson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 5514cedfc8SRobert Watson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 5614cedfc8SRobert Watson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 5714cedfc8SRobert Watson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 5814cedfc8SRobert Watson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 5914cedfc8SRobert Watson * SUCH DAMAGE. 6014cedfc8SRobert Watson */ 613d903220SDoug Rabson 62677b542eSDavid E. O'Brien #include <sys/cdefs.h> 63677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 64677b542eSDavid E. O'Brien 655591b823SEivind Eklund #include "opt_compat.h" 66255108f3SPeter Wemm #include "opt_sysvipc.h" 67511b67b7SGarrett Wollman 683d903220SDoug Rabson #include <sys/param.h> 69725db531SBruce Evans #include <sys/systm.h> 703d903220SDoug Rabson #include <sys/kernel.h> 71b648d480SJohn Baldwin #include <sys/limits.h> 72fb919e4dSMark Murray #include <sys/lock.h> 73255108f3SPeter Wemm #include <sys/sysctl.h> 743d903220SDoug Rabson #include <sys/shm.h> 753d903220SDoug Rabson #include <sys/proc.h> 763d903220SDoug Rabson #include <sys/malloc.h> 773d903220SDoug Rabson #include <sys/mman.h> 7877409fe1SPoul-Henning Kamp #include <sys/module.h> 799dceb26bSJohn Baldwin #include <sys/mutex.h> 803bcf7445SEdward Tomasz Napierala #include <sys/racct.h> 8168ba7a1dSTim J. Robbins #include <sys/resourcevar.h> 8289f6b863SAttilio Rao #include <sys/rwlock.h> 833d903220SDoug Rabson #include <sys/stat.h> 8478525ce3SAlfred Perlstein #include <sys/syscall.h> 85f130dcf2SMartin Blapp #include <sys/syscallsubr.h> 86725db531SBruce Evans #include <sys/sysent.h> 87fb919e4dSMark Murray #include <sys/sysproto.h> 88cb1f0db9SRobert Watson #include <sys/jail.h> 89aed55708SRobert Watson 90aed55708SRobert Watson #include <security/mac/mac_framework.h> 913d903220SDoug Rabson 923d903220SDoug Rabson #include <vm/vm.h> 93efeaf95aSDavid Greenman #include <vm/vm_param.h> 94efeaf95aSDavid Greenman #include <vm/pmap.h> 95a51f7119SJohn Dyson #include <vm/vm_object.h> 963d903220SDoug Rabson #include <vm/vm_map.h> 971c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 98ae9b8c3aSJohn Dyson #include <vm/vm_pager.h> 993d903220SDoug Rabson 100de5b1952SAlexander Leidinger FEATURE(sysv_shm, "System V shared memory segments support"); 101de5b1952SAlexander Leidinger 102a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 10355166637SPoul-Henning Kamp 1044d77a549SAlfred Perlstein static int shmget_allocate_segment(struct thread *td, 1054d77a549SAlfred Perlstein struct shmget_args *uap, int mode); 1064d77a549SAlfred Perlstein static int shmget_existing(struct thread *td, struct shmget_args *uap, 1074d77a549SAlfred Perlstein int mode, int segnum); 108725db531SBruce Evans 1093d903220SDoug Rabson #define SHMSEG_FREE 0x0200 1103d903220SDoug Rabson #define SHMSEG_REMOVED 0x0400 1113d903220SDoug Rabson #define SHMSEG_ALLOCATED 0x0800 1123d903220SDoug Rabson 11365067cc8SKonstantin Belousov static int shm_last_free, shm_nused, shmalloced; 11445329b60SKonstantin Belousov vm_size_t shm_committed; 115921d05b9SRobert Watson static struct shmid_kernel *shmsegs; 1163d903220SDoug Rabson 1173d903220SDoug Rabson struct shmmap_state { 1183d903220SDoug Rabson vm_offset_t va; 1193d903220SDoug Rabson int shmid; 1203d903220SDoug Rabson }; 1213d903220SDoug Rabson 122921d05b9SRobert Watson static void shm_deallocate_segment(struct shmid_kernel *); 1234d77a549SAlfred Perlstein static int shm_find_segment_by_key(key_t); 1240555fb35SKonstantin Belousov static struct shmid_kernel *shm_find_segment(int, bool); 1253db161e0SMatthew Dillon static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *); 1264d77a549SAlfred Perlstein static void shmrealloc(void); 12775d633cbSKonstantin Belousov static int shminit(void); 1284d77a549SAlfred Perlstein static int sysvshm_modload(struct module *, int, void *); 1294d77a549SAlfred Perlstein static int shmunload(void); 1303db161e0SMatthew Dillon static void shmexit_myhook(struct vmspace *vm); 1314d77a549SAlfred Perlstein static void shmfork_myhook(struct proc *p1, struct proc *p2); 1324d77a549SAlfred Perlstein static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS); 133255108f3SPeter Wemm 134255108f3SPeter Wemm /* 135028f979dSDima Dorfman * Tuneable values. 136255108f3SPeter Wemm */ 137255108f3SPeter Wemm #ifndef SHMMAXPGS 138c1e34abfSIvan Voras #define SHMMAXPGS 131072 /* Note: sysv shared memory is swap backed. */ 139255108f3SPeter Wemm #endif 140255108f3SPeter Wemm #ifndef SHMMAX 141255108f3SPeter Wemm #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 142255108f3SPeter Wemm #endif 143255108f3SPeter Wemm #ifndef SHMMIN 144255108f3SPeter Wemm #define SHMMIN 1 145255108f3SPeter Wemm #endif 146255108f3SPeter Wemm #ifndef SHMMNI 1471766b2e5SMatthew Dillon #define SHMMNI 192 148255108f3SPeter Wemm #endif 149255108f3SPeter Wemm #ifndef SHMSEG 1501766b2e5SMatthew Dillon #define SHMSEG 128 151255108f3SPeter Wemm #endif 152255108f3SPeter Wemm #ifndef SHMALL 153255108f3SPeter Wemm #define SHMALL (SHMMAXPGS) 154255108f3SPeter Wemm #endif 155255108f3SPeter Wemm 156255108f3SPeter Wemm struct shminfo shminfo = { 157af3b2549SHans Petter Selasky .shmmax = SHMMAX, 158af3b2549SHans Petter Selasky .shmmin = SHMMIN, 159af3b2549SHans Petter Selasky .shmmni = SHMMNI, 160af3b2549SHans Petter Selasky .shmseg = SHMSEG, 161af3b2549SHans Petter Selasky .shmall = SHMALL 162255108f3SPeter Wemm }; 163255108f3SPeter Wemm 1648b03c8edSMatthew Dillon static int shm_use_phys; 1652332251cSMax Khon static int shm_allow_removed; 1668b03c8edSMatthew Dillon 167af3b2549SHans Petter Selasky SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RWTUN, &shminfo.shmmax, 0, 16884f85aedSChristian S.J. Peron "Maximum shared memory segment size"); 169af3b2549SHans Petter Selasky SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RWTUN, &shminfo.shmmin, 0, 17084f85aedSChristian S.J. Peron "Minimum shared memory segment size"); 1719baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, 17284f85aedSChristian S.J. Peron "Number of shared memory identifiers"); 1739baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, 17484f85aedSChristian S.J. Peron "Number of segments per process"); 175af3b2549SHans Petter Selasky SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RWTUN, &shminfo.shmall, 0, 17684f85aedSChristian S.J. Peron "Maximum number of pages available for shared memory"); 177af3b2549SHans Petter Selasky SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RWTUN, 17884f85aedSChristian S.J. Peron &shm_use_phys, 0, "Enable/Disable locking of shared memory pages in core"); 179af3b2549SHans Petter Selasky SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RWTUN, 18084f85aedSChristian S.J. Peron &shm_allow_removed, 0, 18184f85aedSChristian S.J. Peron "Enable/Disable attachment to attached segments marked for removal"); 1820555fb35SKonstantin Belousov SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLTYPE_OPAQUE | CTLFLAG_RD | 1830555fb35SKonstantin Belousov CTLFLAG_MPSAFE, NULL, 0, sysctl_shmsegs, "", 18484f85aedSChristian S.J. Peron "Current number of shared memory segments allocated"); 1853d903220SDoug Rabson 1860555fb35SKonstantin Belousov static struct sx sysvshmsx; 1870555fb35SKonstantin Belousov #define SYSVSHM_LOCK() sx_xlock(&sysvshmsx) 1880555fb35SKonstantin Belousov #define SYSVSHM_UNLOCK() sx_xunlock(&sysvshmsx) 1890555fb35SKonstantin Belousov #define SYSVSHM_ASSERT_LOCKED() sx_assert(&sysvshmsx, SA_XLOCKED) 1900555fb35SKonstantin Belousov 1913d903220SDoug Rabson static int 1920555fb35SKonstantin Belousov shm_find_segment_by_key(key_t key) 1933d903220SDoug Rabson { 1943d903220SDoug Rabson int i; 1953d903220SDoug Rabson 196255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 197921d05b9SRobert Watson if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) && 198921d05b9SRobert Watson shmsegs[i].u.shm_perm.key == key) 199b618bb96SAlfred Perlstein return (i); 200b618bb96SAlfred Perlstein return (-1); 2013d903220SDoug Rabson } 2023d903220SDoug Rabson 2030555fb35SKonstantin Belousov /* 2040555fb35SKonstantin Belousov * Finds segment either by shmid if is_shmid is true, or by segnum if 2050555fb35SKonstantin Belousov * is_shmid is false. 2060555fb35SKonstantin Belousov */ 207921d05b9SRobert Watson static struct shmid_kernel * 2080555fb35SKonstantin Belousov shm_find_segment(int arg, bool is_shmid) 2093d903220SDoug Rabson { 210921d05b9SRobert Watson struct shmid_kernel *shmseg; 2110555fb35SKonstantin Belousov int segnum; 2123d903220SDoug Rabson 2130555fb35SKonstantin Belousov segnum = is_shmid ? IPCID_TO_IX(arg) : arg; 214255108f3SPeter Wemm if (segnum < 0 || segnum >= shmalloced) 215b618bb96SAlfred Perlstein return (NULL); 2163d903220SDoug Rabson shmseg = &shmsegs[segnum]; 217921d05b9SRobert Watson if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 2182332251cSMax Khon (!shm_allow_removed && 219921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0) || 2200555fb35SKonstantin Belousov (is_shmid && shmseg->u.shm_perm.seq != IPCID_TO_SEQ(arg))) 221b618bb96SAlfred Perlstein return (NULL); 222b618bb96SAlfred Perlstein return (shmseg); 223491dec93SMichael Reifenberger } 224491dec93SMichael Reifenberger 2253d903220SDoug Rabson static void 2260555fb35SKonstantin Belousov shm_deallocate_segment(struct shmid_kernel *shmseg) 2273d903220SDoug Rabson { 22845329b60SKonstantin Belousov vm_size_t size; 2293d903220SDoug Rabson 2300555fb35SKonstantin Belousov SYSVSHM_ASSERT_LOCKED(); 2310cddd8f0SMatthew Dillon 232b648d480SJohn Baldwin vm_object_deallocate(shmseg->object); 233b648d480SJohn Baldwin shmseg->object = NULL; 234b648d480SJohn Baldwin size = round_page(shmseg->u.shm_segsz); 2353d903220SDoug Rabson shm_committed -= btoc(size); 2363d903220SDoug Rabson shm_nused--; 237921d05b9SRobert Watson shmseg->u.shm_perm.mode = SHMSEG_FREE; 23814cedfc8SRobert Watson #ifdef MAC 23930d239bcSRobert Watson mac_sysvshm_cleanup(shmseg); 24014cedfc8SRobert Watson #endif 2413bcf7445SEdward Tomasz Napierala racct_sub_cred(shmseg->cred, RACCT_NSHM, 1); 2423bcf7445SEdward Tomasz Napierala racct_sub_cred(shmseg->cred, RACCT_SHMSIZE, size); 2438caddd81SEdward Tomasz Napierala crfree(shmseg->cred); 2448caddd81SEdward Tomasz Napierala shmseg->cred = NULL; 2453d903220SDoug Rabson } 2463d903220SDoug Rabson 2473d903220SDoug Rabson static int 2483db161e0SMatthew Dillon shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 2493d903220SDoug Rabson { 250921d05b9SRobert Watson struct shmid_kernel *shmseg; 2513d903220SDoug Rabson int segnum, result; 25245329b60SKonstantin Belousov vm_size_t size; 2533d903220SDoug Rabson 2540555fb35SKonstantin Belousov SYSVSHM_ASSERT_LOCKED(); 2553d903220SDoug Rabson segnum = IPCID_TO_IX(shmmap_s->shmid); 2560555fb35SKonstantin Belousov KASSERT(segnum >= 0 && segnum < shmalloced, 2570555fb35SKonstantin Belousov ("segnum %d shmalloced %d", segnum, shmalloced)); 2580555fb35SKonstantin Belousov 2593d903220SDoug Rabson shmseg = &shmsegs[segnum]; 260b648d480SJohn Baldwin size = round_page(shmseg->u.shm_segsz); 2613db161e0SMatthew Dillon result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 2623d903220SDoug Rabson if (result != KERN_SUCCESS) 263b618bb96SAlfred Perlstein return (EINVAL); 2643d903220SDoug Rabson shmmap_s->shmid = -1; 265921d05b9SRobert Watson shmseg->u.shm_dtime = time_second; 266921d05b9SRobert Watson if ((--shmseg->u.shm_nattch <= 0) && 267921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) { 2683d903220SDoug Rabson shm_deallocate_segment(shmseg); 2693d903220SDoug Rabson shm_last_free = segnum; 2703d903220SDoug Rabson } 271b618bb96SAlfred Perlstein return (0); 2723d903220SDoug Rabson } 2733d903220SDoug Rabson 2740555fb35SKonstantin Belousov static int 2750555fb35SKonstantin Belousov kern_shmdt_locked(struct thread *td, const void *shmaddr) 2763d903220SDoug Rabson { 277b40ce416SJulian Elischer struct proc *p = td->td_proc; 2783d903220SDoug Rabson struct shmmap_state *shmmap_s; 27914cedfc8SRobert Watson #ifdef MAC 28014cedfc8SRobert Watson struct shmid_kernel *shmsegptr; 28114cedfc8SRobert Watson #endif 2820555fb35SKonstantin Belousov int error, i; 2833d903220SDoug Rabson 2840555fb35SKonstantin Belousov SYSVSHM_ASSERT_LOCKED(); 2850304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 286c6f55f33SJohn Baldwin return (ENOSYS); 2878209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 2880555fb35SKonstantin Belousov if (shmmap_s == NULL) 2890555fb35SKonstantin Belousov return (EINVAL); 290b6a4b4f9SMatthew Dillon for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 2913d903220SDoug Rabson if (shmmap_s->shmid != -1 && 2920555fb35SKonstantin Belousov shmmap_s->va == (vm_offset_t)shmaddr) { 2933d903220SDoug Rabson break; 294b6a4b4f9SMatthew Dillon } 295b6a4b4f9SMatthew Dillon } 2960555fb35SKonstantin Belousov if (i == shminfo.shmseg) 2970555fb35SKonstantin Belousov return (EINVAL); 29814cedfc8SRobert Watson #ifdef MAC 29914cedfc8SRobert Watson shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)]; 30030d239bcSRobert Watson error = mac_sysvshm_check_shmdt(td->td_ucred, shmsegptr); 301f50c4fd8SRobert Watson if (error != 0) 3020555fb35SKonstantin Belousov return (error); 30314cedfc8SRobert Watson #endif 3043db161e0SMatthew Dillon error = shm_delete_mapping(p->p_vmspace, shmmap_s); 305b6a4b4f9SMatthew Dillon return (error); 3063d903220SDoug Rabson } 3073d903220SDoug Rabson 308b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 3090555fb35SKonstantin Belousov struct shmdt_args { 310e1d7d0bbSAlfred Perlstein const void *shmaddr; 3113d903220SDoug Rabson }; 312b5d5c0c9SPeter Wemm #endif 3133d903220SDoug Rabson int 3140555fb35SKonstantin Belousov sys_shmdt(struct thread *td, struct shmdt_args *uap) 3150555fb35SKonstantin Belousov { 3160555fb35SKonstantin Belousov int error; 3170555fb35SKonstantin Belousov 3180555fb35SKonstantin Belousov SYSVSHM_LOCK(); 3190555fb35SKonstantin Belousov error = kern_shmdt_locked(td, uap->shmaddr); 3200555fb35SKonstantin Belousov SYSVSHM_UNLOCK(); 3210555fb35SKonstantin Belousov return (error); 3220555fb35SKonstantin Belousov } 3230555fb35SKonstantin Belousov 3240555fb35SKonstantin Belousov static int 3250555fb35SKonstantin Belousov kern_shmat_locked(struct thread *td, int shmid, const void *shmaddr, 3260555fb35SKonstantin Belousov int shmflg) 3273d903220SDoug Rabson { 328b40ce416SJulian Elischer struct proc *p = td->td_proc; 329921d05b9SRobert Watson struct shmid_kernel *shmseg; 3300122d251SKonstantin Belousov struct shmmap_state *shmmap_s; 3313d903220SDoug Rabson vm_offset_t attach_va; 3323d903220SDoug Rabson vm_prot_t prot; 3333d903220SDoug Rabson vm_size_t size; 3340555fb35SKonstantin Belousov int error, i, rv; 3353d903220SDoug Rabson 3360555fb35SKonstantin Belousov SYSVSHM_ASSERT_LOCKED(); 3370304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 338c6f55f33SJohn Baldwin return (ENOSYS); 3398209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 3403d903220SDoug Rabson if (shmmap_s == NULL) { 34145329b60SKonstantin Belousov shmmap_s = malloc(shminfo.shmseg * sizeof(struct shmmap_state), 34245329b60SKonstantin Belousov M_SHM, M_WAITOK); 3433d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) 3443d903220SDoug Rabson shmmap_s[i].shmid = -1; 3450555fb35SKonstantin Belousov KASSERT(p->p_vmspace->vm_shm == NULL, ("raced")); 3462cc593fdSAlfred Perlstein p->p_vmspace->vm_shm = shmmap_s; 3473d903220SDoug Rabson } 3480555fb35SKonstantin Belousov shmseg = shm_find_segment(shmid, true); 3490555fb35SKonstantin Belousov if (shmseg == NULL) 3500555fb35SKonstantin Belousov return (EINVAL); 351921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, 352f130dcf2SMartin Blapp (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 3530555fb35SKonstantin Belousov if (error != 0) 3540555fb35SKonstantin Belousov return (error); 35514cedfc8SRobert Watson #ifdef MAC 35630d239bcSRobert Watson error = mac_sysvshm_check_shmat(td->td_ucred, shmseg, shmflg); 357f50c4fd8SRobert Watson if (error != 0) 3580555fb35SKonstantin Belousov return (error); 35914cedfc8SRobert Watson #endif 3603d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) { 3613d903220SDoug Rabson if (shmmap_s->shmid == -1) 3623d903220SDoug Rabson break; 3633d903220SDoug Rabson shmmap_s++; 3643d903220SDoug Rabson } 3650555fb35SKonstantin Belousov if (i >= shminfo.shmseg) 3660555fb35SKonstantin Belousov return (EMFILE); 367b648d480SJohn Baldwin size = round_page(shmseg->u.shm_segsz); 3683d903220SDoug Rabson prot = VM_PROT_READ; 369f130dcf2SMartin Blapp if ((shmflg & SHM_RDONLY) == 0) 3703d903220SDoug Rabson prot |= VM_PROT_WRITE; 3710555fb35SKonstantin Belousov if (shmaddr != NULL) { 3720555fb35SKonstantin Belousov if ((shmflg & SHM_RND) != 0) 373f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1); 3740555fb35SKonstantin Belousov else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) 375f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr; 3760555fb35SKonstantin Belousov else 3770555fb35SKonstantin Belousov return (EINVAL); 3783d903220SDoug Rabson } else { 379028f979dSDima Dorfman /* 380028f979dSDima Dorfman * This is just a hint to vm_map_find() about where to 381028f979dSDima Dorfman * put it. 382028f979dSDima Dorfman */ 38368ba7a1dSTim J. Robbins PROC_LOCK(p); 38468ba7a1dSTim J. Robbins attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr + 38568ba7a1dSTim J. Robbins lim_max(p, RLIMIT_DATA)); 38668ba7a1dSTim J. Robbins PROC_UNLOCK(p); 3873d903220SDoug Rabson } 388a51f7119SJohn Dyson 389b648d480SJohn Baldwin vm_object_reference(shmseg->object); 39001a8fb7dSAlan Cox rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->object, 0, &attach_va, 39101a8fb7dSAlan Cox size, 0, shmaddr != NULL ? VMFS_NO_SPACE : VMFS_OPTIMAL_SPACE, 39201a8fb7dSAlan Cox prot, prot, MAP_INHERIT_SHARE | MAP_PREFAULT_PARTIAL); 393a51f7119SJohn Dyson if (rv != KERN_SUCCESS) { 394b648d480SJohn Baldwin vm_object_deallocate(shmseg->object); 3950555fb35SKonstantin Belousov return (ENOMEM); 396a51f7119SJohn Dyson } 3970463028cSJohn Dyson 3983d903220SDoug Rabson shmmap_s->va = attach_va; 399f130dcf2SMartin Blapp shmmap_s->shmid = shmid; 400921d05b9SRobert Watson shmseg->u.shm_lpid = p->p_pid; 401921d05b9SRobert Watson shmseg->u.shm_atime = time_second; 402921d05b9SRobert Watson shmseg->u.shm_nattch++; 403b40ce416SJulian Elischer td->td_retval[0] = attach_va; 404b6a4b4f9SMatthew Dillon return (error); 4053d903220SDoug Rabson } 4063d903220SDoug Rabson 407f130dcf2SMartin Blapp int 4080555fb35SKonstantin Belousov kern_shmat(struct thread *td, int shmid, const void *shmaddr, int shmflg) 409f130dcf2SMartin Blapp { 4100555fb35SKonstantin Belousov int error; 4110555fb35SKonstantin Belousov 4120555fb35SKonstantin Belousov SYSVSHM_LOCK(); 4130555fb35SKonstantin Belousov error = kern_shmat_locked(td, shmid, shmaddr, shmflg); 4140555fb35SKonstantin Belousov SYSVSHM_UNLOCK(); 4150555fb35SKonstantin Belousov return (error); 416f130dcf2SMartin Blapp } 417f130dcf2SMartin Blapp 4180555fb35SKonstantin Belousov #ifndef _SYS_SYSPROTO_H_ 4190555fb35SKonstantin Belousov struct shmat_args { 420f130dcf2SMartin Blapp int shmid; 4210555fb35SKonstantin Belousov const void *shmaddr; 4220555fb35SKonstantin Belousov int shmflg; 4230555fb35SKonstantin Belousov }; 4240555fb35SKonstantin Belousov #endif 4250555fb35SKonstantin Belousov int 4260555fb35SKonstantin Belousov sys_shmat(struct thread *td, struct shmat_args *uap) 4273d903220SDoug Rabson { 4280555fb35SKonstantin Belousov 4290555fb35SKonstantin Belousov return (kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg)); 4300555fb35SKonstantin Belousov } 4310555fb35SKonstantin Belousov 4320555fb35SKonstantin Belousov static int 4330555fb35SKonstantin Belousov kern_shmctl_locked(struct thread *td, int shmid, int cmd, void *buf, 4340555fb35SKonstantin Belousov size_t *bufsz) 4350555fb35SKonstantin Belousov { 436921d05b9SRobert Watson struct shmid_kernel *shmseg; 4370555fb35SKonstantin Belousov struct shmid_ds *shmidp; 4380555fb35SKonstantin Belousov struct shm_info shm_info; 4390555fb35SKonstantin Belousov int error; 4400555fb35SKonstantin Belousov 4410555fb35SKonstantin Belousov SYSVSHM_ASSERT_LOCKED(); 4423d903220SDoug Rabson 4430304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 444c6f55f33SJohn Baldwin return (ENOSYS); 445f130dcf2SMartin Blapp 4460555fb35SKonstantin Belousov error = 0; 447f130dcf2SMartin Blapp switch (cmd) { 4484f18813fSChristian S.J. Peron /* 4494f18813fSChristian S.J. Peron * It is possible that kern_shmctl is being called from the Linux ABI 4504f18813fSChristian S.J. Peron * layer, in which case, we will need to implement IPC_INFO. It should 4514f18813fSChristian S.J. Peron * be noted that other shmctl calls will be funneled through here for 4524f18813fSChristian S.J. Peron * Linix binaries as well. 4534f18813fSChristian S.J. Peron * 4544f18813fSChristian S.J. Peron * NB: The Linux ABI layer will convert this data to structure(s) more 4554f18813fSChristian S.J. Peron * consistent with the Linux ABI. 4564f18813fSChristian S.J. Peron */ 457491dec93SMichael Reifenberger case IPC_INFO: 458f130dcf2SMartin Blapp memcpy(buf, &shminfo, sizeof(shminfo)); 459f130dcf2SMartin Blapp if (bufsz) 460f130dcf2SMartin Blapp *bufsz = sizeof(shminfo); 461491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 4620555fb35SKonstantin Belousov return (0); 463491dec93SMichael Reifenberger case SHM_INFO: { 464491dec93SMichael Reifenberger shm_info.used_ids = shm_nused; 465491dec93SMichael Reifenberger shm_info.shm_rss = 0; /*XXX where to get from ? */ 466491dec93SMichael Reifenberger shm_info.shm_tot = 0; /*XXX where to get from ? */ 467491dec93SMichael Reifenberger shm_info.shm_swp = 0; /*XXX where to get from ? */ 468491dec93SMichael Reifenberger shm_info.swap_attempts = 0; /*XXX where to get from ? */ 469491dec93SMichael Reifenberger shm_info.swap_successes = 0; /*XXX where to get from ? */ 470f130dcf2SMartin Blapp memcpy(buf, &shm_info, sizeof(shm_info)); 4710555fb35SKonstantin Belousov if (bufsz != NULL) 472f130dcf2SMartin Blapp *bufsz = sizeof(shm_info); 473491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 4740555fb35SKonstantin Belousov return (0); 475491dec93SMichael Reifenberger } 476491dec93SMichael Reifenberger } 4770555fb35SKonstantin Belousov shmseg = shm_find_segment(shmid, cmd != SHM_STAT); 4780555fb35SKonstantin Belousov if (shmseg == NULL) 4790555fb35SKonstantin Belousov return (EINVAL); 48014cedfc8SRobert Watson #ifdef MAC 48130d239bcSRobert Watson error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, cmd); 482f50c4fd8SRobert Watson if (error != 0) 4830555fb35SKonstantin Belousov return (error); 48414cedfc8SRobert Watson #endif 485f130dcf2SMartin Blapp switch (cmd) { 486491dec93SMichael Reifenberger case SHM_STAT: 4873d903220SDoug Rabson case IPC_STAT: 488921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 4890555fb35SKonstantin Belousov if (error != 0) 4900555fb35SKonstantin Belousov return (error); 491921d05b9SRobert Watson memcpy(buf, &shmseg->u, sizeof(struct shmid_ds)); 4920555fb35SKonstantin Belousov if (bufsz != NULL) 493f130dcf2SMartin Blapp *bufsz = sizeof(struct shmid_ds); 4940555fb35SKonstantin Belousov if (cmd == SHM_STAT) { 4950555fb35SKonstantin Belousov td->td_retval[0] = IXSEQ_TO_IPCID(shmid, 4960555fb35SKonstantin Belousov shmseg->u.shm_perm); 4970555fb35SKonstantin Belousov } 4983d903220SDoug Rabson break; 4990555fb35SKonstantin Belousov case IPC_SET: 5000555fb35SKonstantin Belousov shmidp = (struct shmid_ds *)buf; 501921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 5020555fb35SKonstantin Belousov if (error != 0) 5030555fb35SKonstantin Belousov return (error); 5040555fb35SKonstantin Belousov shmseg->u.shm_perm.uid = shmidp->shm_perm.uid; 5050555fb35SKonstantin Belousov shmseg->u.shm_perm.gid = shmidp->shm_perm.gid; 506921d05b9SRobert Watson shmseg->u.shm_perm.mode = 507921d05b9SRobert Watson (shmseg->u.shm_perm.mode & ~ACCESSPERMS) | 5080555fb35SKonstantin Belousov (shmidp->shm_perm.mode & ACCESSPERMS); 509921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 5103d903220SDoug Rabson break; 5113d903220SDoug Rabson case IPC_RMID: 512921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 5130555fb35SKonstantin Belousov if (error != 0) 5140555fb35SKonstantin Belousov return (error); 515921d05b9SRobert Watson shmseg->u.shm_perm.key = IPC_PRIVATE; 516921d05b9SRobert Watson shmseg->u.shm_perm.mode |= SHMSEG_REMOVED; 517921d05b9SRobert Watson if (shmseg->u.shm_nattch <= 0) { 5183d903220SDoug Rabson shm_deallocate_segment(shmseg); 519f130dcf2SMartin Blapp shm_last_free = IPCID_TO_IX(shmid); 5203d903220SDoug Rabson } 5213d903220SDoug Rabson break; 5223d903220SDoug Rabson #if 0 5233d903220SDoug Rabson case SHM_LOCK: 5243d903220SDoug Rabson case SHM_UNLOCK: 5253d903220SDoug Rabson #endif 5263d903220SDoug Rabson default: 527b6a4b4f9SMatthew Dillon error = EINVAL; 528b6a4b4f9SMatthew Dillon break; 5293d903220SDoug Rabson } 530b6a4b4f9SMatthew Dillon return (error); 5313d903220SDoug Rabson } 5323d903220SDoug Rabson 5330555fb35SKonstantin Belousov int 5340555fb35SKonstantin Belousov kern_shmctl(struct thread *td, int shmid, int cmd, void *buf, size_t *bufsz) 5350555fb35SKonstantin Belousov { 5360555fb35SKonstantin Belousov int error; 5370555fb35SKonstantin Belousov 5380555fb35SKonstantin Belousov SYSVSHM_LOCK(); 5390555fb35SKonstantin Belousov error = kern_shmctl_locked(td, shmid, cmd, buf, bufsz); 5400555fb35SKonstantin Belousov SYSVSHM_UNLOCK(); 5410555fb35SKonstantin Belousov return (error); 5420555fb35SKonstantin Belousov } 5430555fb35SKonstantin Belousov 5440555fb35SKonstantin Belousov 54571361470SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 54671361470SJohn Baldwin struct shmctl_args { 54771361470SJohn Baldwin int shmid; 54871361470SJohn Baldwin int cmd; 54971361470SJohn Baldwin struct shmid_ds *buf; 55071361470SJohn Baldwin }; 55171361470SJohn Baldwin #endif 552f130dcf2SMartin Blapp int 5530555fb35SKonstantin Belousov sys_shmctl(struct thread *td, struct shmctl_args *uap) 554f130dcf2SMartin Blapp { 555f130dcf2SMartin Blapp int error = 0; 556f130dcf2SMartin Blapp struct shmid_ds buf; 557f130dcf2SMartin Blapp size_t bufsz; 558f130dcf2SMartin Blapp 5594f18813fSChristian S.J. Peron /* 5604f18813fSChristian S.J. Peron * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support 5614f18813fSChristian S.J. Peron * Linux binaries. If we see the call come through the FreeBSD ABI, 5624f18813fSChristian S.J. Peron * return an error back to the user since we do not to support this. 5634f18813fSChristian S.J. Peron */ 5644f18813fSChristian S.J. Peron if (uap->cmd == IPC_INFO || uap->cmd == SHM_INFO || 5654f18813fSChristian S.J. Peron uap->cmd == SHM_STAT) 5664f18813fSChristian S.J. Peron return (EINVAL); 5674f18813fSChristian S.J. Peron 568f130dcf2SMartin Blapp /* IPC_SET needs to copyin the buffer before calling kern_shmctl */ 569f130dcf2SMartin Blapp if (uap->cmd == IPC_SET) { 570f130dcf2SMartin Blapp if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds)))) 571f130dcf2SMartin Blapp goto done; 572f130dcf2SMartin Blapp } 573f130dcf2SMartin Blapp 5742332251cSMax Khon error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz); 575f130dcf2SMartin Blapp if (error) 576f130dcf2SMartin Blapp goto done; 577f130dcf2SMartin Blapp 578f130dcf2SMartin Blapp /* Cases in which we need to copyout */ 579f130dcf2SMartin Blapp switch (uap->cmd) { 580f130dcf2SMartin Blapp case IPC_STAT: 581f130dcf2SMartin Blapp error = copyout(&buf, uap->buf, bufsz); 582f130dcf2SMartin Blapp break; 583f130dcf2SMartin Blapp } 584f130dcf2SMartin Blapp 585f130dcf2SMartin Blapp done: 586f130dcf2SMartin Blapp if (error) { 587f130dcf2SMartin Blapp /* Invalidate the return value */ 588f130dcf2SMartin Blapp td->td_retval[0] = -1; 589f130dcf2SMartin Blapp } 590f130dcf2SMartin Blapp return (error); 591f130dcf2SMartin Blapp } 592f130dcf2SMartin Blapp 593f130dcf2SMartin Blapp 5943d903220SDoug Rabson static int 5950555fb35SKonstantin Belousov shmget_existing(struct thread *td, struct shmget_args *uap, int mode, 5960555fb35SKonstantin Belousov int segnum) 5973d903220SDoug Rabson { 598921d05b9SRobert Watson struct shmid_kernel *shmseg; 599d8d2f476SOlivier Houchard #ifdef MAC 6003d903220SDoug Rabson int error; 601d8d2f476SOlivier Houchard #endif 6023d903220SDoug Rabson 6030555fb35SKonstantin Belousov SYSVSHM_ASSERT_LOCKED(); 6040555fb35SKonstantin Belousov KASSERT(segnum >= 0 && segnum < shmalloced, 6050555fb35SKonstantin Belousov ("segnum %d shmalloced %d", segnum, shmalloced)); 6063d903220SDoug Rabson shmseg = &shmsegs[segnum]; 607dc92aa57SAlan Cox if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 608b618bb96SAlfred Perlstein return (EEXIST); 60914cedfc8SRobert Watson #ifdef MAC 61030d239bcSRobert Watson error = mac_sysvshm_check_shmget(td->td_ucred, shmseg, uap->shmflg); 611f50c4fd8SRobert Watson if (error != 0) 6127723d5edSRobert Watson return (error); 61314cedfc8SRobert Watson #endif 614b648d480SJohn Baldwin if (uap->size != 0 && uap->size > shmseg->u.shm_segsz) 615b618bb96SAlfred Perlstein return (EINVAL); 616921d05b9SRobert Watson td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 617b618bb96SAlfred Perlstein return (0); 6183d903220SDoug Rabson } 6193d903220SDoug Rabson 6203d903220SDoug Rabson static int 6210555fb35SKonstantin Belousov shmget_allocate_segment(struct thread *td, struct shmget_args *uap, int mode) 6223d903220SDoug Rabson { 623a854ed98SJohn Baldwin struct ucred *cred = td->td_ucred; 624921d05b9SRobert Watson struct shmid_kernel *shmseg; 6250049f8b2SAlan Cox vm_object_t shm_object; 6260555fb35SKonstantin Belousov int i, segnum; 6270555fb35SKonstantin Belousov size_t size; 6283d903220SDoug Rabson 6290555fb35SKonstantin Belousov SYSVSHM_ASSERT_LOCKED(); 6300cddd8f0SMatthew Dillon 6313d903220SDoug Rabson if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 632b618bb96SAlfred Perlstein return (EINVAL); 633028f979dSDima Dorfman if (shm_nused >= shminfo.shmmni) /* Any shmids left? */ 634b618bb96SAlfred Perlstein return (ENOSPC); 6359e609ddeSJoerg Wunsch size = round_page(uap->size); 6363d903220SDoug Rabson if (shm_committed + btoc(size) > shminfo.shmall) 637b618bb96SAlfred Perlstein return (ENOMEM); 6383d903220SDoug Rabson if (shm_last_free < 0) { 639028f979dSDima Dorfman shmrealloc(); /* Maybe expand the shmsegs[] array. */ 640255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 641921d05b9SRobert Watson if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE) 6423d903220SDoug Rabson break; 643255108f3SPeter Wemm if (i == shmalloced) 644b618bb96SAlfred Perlstein return (ENOSPC); 6453d903220SDoug Rabson segnum = i; 6463d903220SDoug Rabson } else { 6473d903220SDoug Rabson segnum = shm_last_free; 6483d903220SDoug Rabson shm_last_free = -1; 6493d903220SDoug Rabson } 6500555fb35SKonstantin Belousov KASSERT(segnum >= 0 && segnum < shmalloced, 6510555fb35SKonstantin Belousov ("segnum %d shmalloced %d", segnum, shmalloced)); 6523d903220SDoug Rabson shmseg = &shmsegs[segnum]; 653afcc55f3SEdward Tomasz Napierala #ifdef RACCT 6543bcf7445SEdward Tomasz Napierala PROC_LOCK(td->td_proc); 6553bcf7445SEdward Tomasz Napierala if (racct_add(td->td_proc, RACCT_NSHM, 1)) { 6563bcf7445SEdward Tomasz Napierala PROC_UNLOCK(td->td_proc); 6573bcf7445SEdward Tomasz Napierala return (ENOSPC); 6583bcf7445SEdward Tomasz Napierala } 6593bcf7445SEdward Tomasz Napierala if (racct_add(td->td_proc, RACCT_SHMSIZE, size)) { 6603bcf7445SEdward Tomasz Napierala racct_sub(td->td_proc, RACCT_NSHM, 1); 6613bcf7445SEdward Tomasz Napierala PROC_UNLOCK(td->td_proc); 6623bcf7445SEdward Tomasz Napierala return (ENOMEM); 6633bcf7445SEdward Tomasz Napierala } 6643bcf7445SEdward Tomasz Napierala PROC_UNLOCK(td->td_proc); 665afcc55f3SEdward Tomasz Napierala #endif 666a51f7119SJohn Dyson 667ae9b8c3aSJohn Dyson /* 668ae9b8c3aSJohn Dyson * We make sure that we have allocated a pager before we need 669ae9b8c3aSJohn Dyson * to. 670ae9b8c3aSJohn Dyson */ 6713364c323SKonstantin Belousov shm_object = vm_pager_allocate(shm_use_phys ? OBJT_PHYS : OBJT_SWAP, 6723364c323SKonstantin Belousov 0, size, VM_PROT_DEFAULT, 0, cred); 6733bcf7445SEdward Tomasz Napierala if (shm_object == NULL) { 674afcc55f3SEdward Tomasz Napierala #ifdef RACCT 6753bcf7445SEdward Tomasz Napierala PROC_LOCK(td->td_proc); 6763bcf7445SEdward Tomasz Napierala racct_sub(td->td_proc, RACCT_NSHM, 1); 6773bcf7445SEdward Tomasz Napierala racct_sub(td->td_proc, RACCT_SHMSIZE, size); 6783bcf7445SEdward Tomasz Napierala PROC_UNLOCK(td->td_proc); 679afcc55f3SEdward Tomasz Napierala #endif 6803364c323SKonstantin Belousov return (ENOMEM); 6813bcf7445SEdward Tomasz Napierala } 682c2d5d3eeSAlan Cox shm_object->pg_color = 0; 68389f6b863SAttilio Rao VM_OBJECT_WLOCK(shm_object); 6840049f8b2SAlan Cox vm_object_clear_flag(shm_object, OBJ_ONEMAPPING); 685c2d5d3eeSAlan Cox vm_object_set_flag(shm_object, OBJ_COLORED | OBJ_NOSPLIT); 68689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(shm_object); 687cbd8ec09SJohn Dyson 688b648d480SJohn Baldwin shmseg->object = shm_object; 689921d05b9SRobert Watson shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid; 690921d05b9SRobert Watson shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = cred->cr_gid; 6910555fb35SKonstantin Belousov shmseg->u.shm_perm.mode = (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 6920555fb35SKonstantin Belousov shmseg->u.shm_perm.key = uap->key; 6930555fb35SKonstantin Belousov shmseg->u.shm_perm.seq = (shmseg->u.shm_perm.seq + 1) & 0x7fff; 694b1fb5f9cSEdward Tomasz Napierala shmseg->cred = crhold(cred); 695921d05b9SRobert Watson shmseg->u.shm_segsz = uap->size; 696921d05b9SRobert Watson shmseg->u.shm_cpid = td->td_proc->p_pid; 697921d05b9SRobert Watson shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0; 698921d05b9SRobert Watson shmseg->u.shm_atime = shmseg->u.shm_dtime = 0; 69914cedfc8SRobert Watson #ifdef MAC 70030d239bcSRobert Watson mac_sysvshm_create(cred, shmseg); 70114cedfc8SRobert Watson #endif 702921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 7033d903220SDoug Rabson shm_committed += btoc(size); 7043d903220SDoug Rabson shm_nused++; 7050555fb35SKonstantin Belousov td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 7060555fb35SKonstantin Belousov 707b618bb96SAlfred Perlstein return (0); 7083d903220SDoug Rabson } 7093d903220SDoug Rabson 71071361470SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 71171361470SJohn Baldwin struct shmget_args { 71271361470SJohn Baldwin key_t key; 71371361470SJohn Baldwin size_t size; 71471361470SJohn Baldwin int shmflg; 71571361470SJohn Baldwin }; 71671361470SJohn Baldwin #endif 7173d903220SDoug Rabson int 7180555fb35SKonstantin Belousov sys_shmget(struct thread *td, struct shmget_args *uap) 7193d903220SDoug Rabson { 720b6a4b4f9SMatthew Dillon int segnum, mode; 721b6a4b4f9SMatthew Dillon int error; 7223d903220SDoug Rabson 7230304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 724c6f55f33SJohn Baldwin return (ENOSYS); 7253d903220SDoug Rabson mode = uap->shmflg & ACCESSPERMS; 7260555fb35SKonstantin Belousov SYSVSHM_LOCK(); 7270555fb35SKonstantin Belousov if (uap->key == IPC_PRIVATE) { 728b40ce416SJulian Elischer error = shmget_allocate_segment(td, uap, mode); 7290555fb35SKonstantin Belousov } else { 7300555fb35SKonstantin Belousov segnum = shm_find_segment_by_key(uap->key); 7310555fb35SKonstantin Belousov if (segnum >= 0) 7320555fb35SKonstantin Belousov error = shmget_existing(td, uap, mode, segnum); 7330555fb35SKonstantin Belousov else if ((uap->shmflg & IPC_CREAT) == 0) 7340555fb35SKonstantin Belousov error = ENOENT; 7350555fb35SKonstantin Belousov else 7360555fb35SKonstantin Belousov error = shmget_allocate_segment(td, uap, mode); 7370555fb35SKonstantin Belousov } 7380555fb35SKonstantin Belousov SYSVSHM_UNLOCK(); 739b6a4b4f9SMatthew Dillon return (error); 7403d903220SDoug Rabson } 7413d903220SDoug Rabson 74278525ce3SAlfred Perlstein static void 7430555fb35SKonstantin Belousov shmfork_myhook(struct proc *p1, struct proc *p2) 7443d903220SDoug Rabson { 7453d903220SDoug Rabson struct shmmap_state *shmmap_s; 7463d903220SDoug Rabson size_t size; 7473d903220SDoug Rabson int i; 7483d903220SDoug Rabson 7490555fb35SKonstantin Belousov SYSVSHM_LOCK(); 7503d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 751a163d034SWarner Losh shmmap_s = malloc(size, M_SHM, M_WAITOK); 7522cc593fdSAlfred Perlstein bcopy(p1->p_vmspace->vm_shm, shmmap_s, size); 7532cc593fdSAlfred Perlstein p2->p_vmspace->vm_shm = shmmap_s; 7540555fb35SKonstantin Belousov for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 7550555fb35SKonstantin Belousov if (shmmap_s->shmid != -1) { 7560555fb35SKonstantin Belousov KASSERT(IPCID_TO_IX(shmmap_s->shmid) >= 0 && 7570555fb35SKonstantin Belousov IPCID_TO_IX(shmmap_s->shmid) < shmalloced, 7580555fb35SKonstantin Belousov ("segnum %d shmalloced %d", 7590555fb35SKonstantin Belousov IPCID_TO_IX(shmmap_s->shmid), shmalloced)); 760921d05b9SRobert Watson shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++; 7610555fb35SKonstantin Belousov } 7620555fb35SKonstantin Belousov } 7630555fb35SKonstantin Belousov SYSVSHM_UNLOCK(); 7643d903220SDoug Rabson } 7653d903220SDoug Rabson 76678525ce3SAlfred Perlstein static void 7673db161e0SMatthew Dillon shmexit_myhook(struct vmspace *vm) 7683d903220SDoug Rabson { 7693db161e0SMatthew Dillon struct shmmap_state *base, *shm; 7703d903220SDoug Rabson int i; 7713d903220SDoug Rabson 7720555fb35SKonstantin Belousov base = vm->vm_shm; 7730555fb35SKonstantin Belousov if (base != NULL) { 7743db161e0SMatthew Dillon vm->vm_shm = NULL; 7750555fb35SKonstantin Belousov SYSVSHM_LOCK(); 7763db161e0SMatthew Dillon for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 7773db161e0SMatthew Dillon if (shm->shmid != -1) 7783db161e0SMatthew Dillon shm_delete_mapping(vm, shm); 7793db161e0SMatthew Dillon } 7800555fb35SKonstantin Belousov SYSVSHM_UNLOCK(); 7813db161e0SMatthew Dillon free(base, M_SHM); 7823db161e0SMatthew Dillon } 7833d903220SDoug Rabson } 7843d903220SDoug Rabson 785255108f3SPeter Wemm static void 786255108f3SPeter Wemm shmrealloc(void) 787255108f3SPeter Wemm { 788921d05b9SRobert Watson struct shmid_kernel *newsegs; 7890555fb35SKonstantin Belousov int i; 7900555fb35SKonstantin Belousov 7910555fb35SKonstantin Belousov SYSVSHM_ASSERT_LOCKED(); 792255108f3SPeter Wemm 793255108f3SPeter Wemm if (shmalloced >= shminfo.shmmni) 794255108f3SPeter Wemm return; 795255108f3SPeter Wemm 796a163d034SWarner Losh newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 797255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 798255108f3SPeter Wemm bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 799255108f3SPeter Wemm for (; i < shminfo.shmmni; i++) { 800921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 801921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 80214cedfc8SRobert Watson #ifdef MAC 80330d239bcSRobert Watson mac_sysvshm_init(&shmsegs[i]); 80414cedfc8SRobert Watson #endif 805255108f3SPeter Wemm } 806255108f3SPeter Wemm free(shmsegs, M_SHM); 807255108f3SPeter Wemm shmsegs = newsegs; 808255108f3SPeter Wemm shmalloced = shminfo.shmmni; 809255108f3SPeter Wemm } 810255108f3SPeter Wemm 81175d633cbSKonstantin Belousov static struct syscall_helper_data shm_syscalls[] = { 81275d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmat), 81375d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmctl), 81475d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmdt), 81575d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmget), 81675d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 81775d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 8188451d0ddSKip Macy SYSCALL_INIT_HELPER_COMPAT(freebsd7_shmctl), 81975d633cbSKonstantin Belousov #endif 82075d633cbSKonstantin Belousov #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) 82175d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmsys), 82275d633cbSKonstantin Belousov #endif 82375d633cbSKonstantin Belousov SYSCALL_INIT_LAST 82475d633cbSKonstantin Belousov }; 82575d633cbSKonstantin Belousov 82675d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 82775d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32.h> 82875d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_ipc.h> 82975d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_proto.h> 83075d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_signal.h> 83175d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_syscall.h> 83275d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_util.h> 83375d633cbSKonstantin Belousov 83475d633cbSKonstantin Belousov static struct syscall_helper_data shm32_syscalls[] = { 8358451d0ddSKip Macy SYSCALL32_INIT_HELPER_COMPAT(shmat), 8368451d0ddSKip Macy SYSCALL32_INIT_HELPER_COMPAT(shmdt), 8378451d0ddSKip Macy SYSCALL32_INIT_HELPER_COMPAT(shmget), 83875d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(freebsd32_shmsys), 83975d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(freebsd32_shmctl), 84075d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 84175d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 84275d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(freebsd7_freebsd32_shmctl), 84375d633cbSKonstantin Belousov #endif 84475d633cbSKonstantin Belousov SYSCALL_INIT_LAST 84575d633cbSKonstantin Belousov }; 84675d633cbSKonstantin Belousov #endif 84775d633cbSKonstantin Belousov 84875d633cbSKonstantin Belousov static int 8490555fb35SKonstantin Belousov shminit(void) 8503d903220SDoug Rabson { 85175d633cbSKonstantin Belousov int i, error; 852255108f3SPeter Wemm 8534d9d1e82SRuslan Ermilov #ifndef BURN_BRIDGES 8544d9d1e82SRuslan Ermilov if (TUNABLE_ULONG_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall) != 0) 8554d9d1e82SRuslan Ermilov printf("kern.ipc.shmmaxpgs is now called kern.ipc.shmall!\n"); 8564d9d1e82SRuslan Ermilov #endif 857af3b2549SHans Petter Selasky if (shminfo.shmmax == SHMMAX) { 8584d9d1e82SRuslan Ermilov /* Initialize shmmax dealing with possible overflow. */ 859af3b2549SHans Petter Selasky for (i = PAGE_SIZE; i != 0; i--) { 860a4c24c66SJohn Baldwin shminfo.shmmax = shminfo.shmall * i; 861af3b2549SHans Petter Selasky if ((shminfo.shmmax / shminfo.shmall) == (u_long)i) 8625015c68aSAlfred Perlstein break; 8635015c68aSAlfred Perlstein } 86412075c09SPawel Jakub Dawidek } 865255108f3SPeter Wemm shmalloced = shminfo.shmmni; 866a163d034SWarner Losh shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 867255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) { 868921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 869921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 87014cedfc8SRobert Watson #ifdef MAC 87130d239bcSRobert Watson mac_sysvshm_init(&shmsegs[i]); 87214cedfc8SRobert Watson #endif 8733d903220SDoug Rabson } 8743d903220SDoug Rabson shm_last_free = 0; 8753d903220SDoug Rabson shm_nused = 0; 8763d903220SDoug Rabson shm_committed = 0; 8770555fb35SKonstantin Belousov sx_init(&sysvshmsx, "sysvshmsx"); 87878525ce3SAlfred Perlstein shmexit_hook = &shmexit_myhook; 87978525ce3SAlfred Perlstein shmfork_hook = &shmfork_myhook; 88075d633cbSKonstantin Belousov 881e015b1abSMateusz Guzik error = syscall_helper_register(shm_syscalls, SY_THR_STATIC_KLD); 88275d633cbSKonstantin Belousov if (error != 0) 88375d633cbSKonstantin Belousov return (error); 88475d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 885e015b1abSMateusz Guzik error = syscall32_helper_register(shm32_syscalls, SY_THR_STATIC_KLD); 88675d633cbSKonstantin Belousov if (error != 0) 88775d633cbSKonstantin Belousov return (error); 88875d633cbSKonstantin Belousov #endif 88975d633cbSKonstantin Belousov return (0); 8903d903220SDoug Rabson } 89178525ce3SAlfred Perlstein 89278525ce3SAlfred Perlstein static int 8930555fb35SKonstantin Belousov shmunload(void) 89478525ce3SAlfred Perlstein { 89514cedfc8SRobert Watson int i; 89678525ce3SAlfred Perlstein 89778525ce3SAlfred Perlstein if (shm_nused > 0) 89878525ce3SAlfred Perlstein return (EBUSY); 89978525ce3SAlfred Perlstein 90075d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 90175d633cbSKonstantin Belousov syscall32_helper_unregister(shm32_syscalls); 90275d633cbSKonstantin Belousov #endif 90375d633cbSKonstantin Belousov syscall_helper_unregister(shm_syscalls); 90475d633cbSKonstantin Belousov 9050d9d996dSKonstantin Belousov for (i = 0; i < shmalloced; i++) { 90614cedfc8SRobert Watson #ifdef MAC 90730d239bcSRobert Watson mac_sysvshm_destroy(&shmsegs[i]); 90814cedfc8SRobert Watson #endif 9090d9d996dSKonstantin Belousov /* 9100d9d996dSKonstantin Belousov * Objects might be still mapped into the processes 9110d9d996dSKonstantin Belousov * address spaces. Actual free would happen on the 9120d9d996dSKonstantin Belousov * last mapping destruction. 9130d9d996dSKonstantin Belousov */ 9140d9d996dSKonstantin Belousov if (shmsegs[i].u.shm_perm.mode != SHMSEG_FREE) 9150d9d996dSKonstantin Belousov vm_object_deallocate(shmsegs[i].object); 9160d9d996dSKonstantin Belousov } 91778525ce3SAlfred Perlstein free(shmsegs, M_SHM); 91878525ce3SAlfred Perlstein shmexit_hook = NULL; 91978525ce3SAlfred Perlstein shmfork_hook = NULL; 9200555fb35SKonstantin Belousov sx_destroy(&sysvshmsx); 92178525ce3SAlfred Perlstein return (0); 92278525ce3SAlfred Perlstein } 92378525ce3SAlfred Perlstein 92478525ce3SAlfred Perlstein static int 925a723c4e1SDima Dorfman sysctl_shmsegs(SYSCTL_HANDLER_ARGS) 926a723c4e1SDima Dorfman { 9270555fb35SKonstantin Belousov int error; 928a723c4e1SDima Dorfman 9290555fb35SKonstantin Belousov SYSVSHM_LOCK(); 9300555fb35SKonstantin Belousov error = SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])); 9310555fb35SKonstantin Belousov SYSVSHM_UNLOCK(); 9320555fb35SKonstantin Belousov return (error); 933a723c4e1SDima Dorfman } 934a723c4e1SDima Dorfman 93545f48220SJohn Baldwin #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) 93645f48220SJohn Baldwin struct oshmid_ds { 93745f48220SJohn Baldwin struct ipc_perm_old shm_perm; /* operation perms */ 93845f48220SJohn Baldwin int shm_segsz; /* size of segment (bytes) */ 93945f48220SJohn Baldwin u_short shm_cpid; /* pid, creator */ 94045f48220SJohn Baldwin u_short shm_lpid; /* pid, last operation */ 94145f48220SJohn Baldwin short shm_nattch; /* no. of current attaches */ 94245f48220SJohn Baldwin time_t shm_atime; /* last attach time */ 94345f48220SJohn Baldwin time_t shm_dtime; /* last detach time */ 94445f48220SJohn Baldwin time_t shm_ctime; /* last change time */ 94545f48220SJohn Baldwin void *shm_handle; /* internal handle for shm segment */ 94645f48220SJohn Baldwin }; 94745f48220SJohn Baldwin 94845f48220SJohn Baldwin struct oshmctl_args { 94945f48220SJohn Baldwin int shmid; 95045f48220SJohn Baldwin int cmd; 95145f48220SJohn Baldwin struct oshmid_ds *ubuf; 95245f48220SJohn Baldwin }; 95345f48220SJohn Baldwin 95445f48220SJohn Baldwin static int 955ca998284SJohn Baldwin oshmctl(struct thread *td, struct oshmctl_args *uap) 95645f48220SJohn Baldwin { 95745f48220SJohn Baldwin #ifdef COMPAT_43 95845f48220SJohn Baldwin int error = 0; 95945f48220SJohn Baldwin struct shmid_kernel *shmseg; 96045f48220SJohn Baldwin struct oshmid_ds outbuf; 96145f48220SJohn Baldwin 96245f48220SJohn Baldwin if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 96345f48220SJohn Baldwin return (ENOSYS); 964*f16f8610SKonstantin Belousov if (uap->cmd != IPC_STAT) { 965*f16f8610SKonstantin Belousov return (freebsd7_shmctl(td, 966*f16f8610SKonstantin Belousov (struct freebsd7_shmctl_args *)uap)); 967*f16f8610SKonstantin Belousov } 9680555fb35SKonstantin Belousov SYSVSHM_LOCK(); 9690555fb35SKonstantin Belousov shmseg = shm_find_segment(uap->shmid, true); 97045f48220SJohn Baldwin if (shmseg == NULL) { 9710555fb35SKonstantin Belousov SYSVSHM_UNLOCK(); 9724cfc037cSKonstantin Belousov return (EINVAL); 97345f48220SJohn Baldwin } 97445f48220SJohn Baldwin error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 975*f16f8610SKonstantin Belousov if (error != 0) { 976*f16f8610SKonstantin Belousov SYSVSHM_UNLOCK(); 977*f16f8610SKonstantin Belousov return (error); 978*f16f8610SKonstantin Belousov } 9790555fb35SKonstantin Belousov #ifdef MAC 980*f16f8610SKonstantin Belousov error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, uap->cmd); 981*f16f8610SKonstantin Belousov if (error != 0) { 982*f16f8610SKonstantin Belousov SYSVSHM_UNLOCK(); 983*f16f8610SKonstantin Belousov return (error); 984*f16f8610SKonstantin Belousov } 98545f48220SJohn Baldwin #endif 98645f48220SJohn Baldwin ipcperm_new2old(&shmseg->u.shm_perm, &outbuf.shm_perm); 98745f48220SJohn Baldwin outbuf.shm_segsz = shmseg->u.shm_segsz; 98845f48220SJohn Baldwin outbuf.shm_cpid = shmseg->u.shm_cpid; 98945f48220SJohn Baldwin outbuf.shm_lpid = shmseg->u.shm_lpid; 99045f48220SJohn Baldwin outbuf.shm_nattch = shmseg->u.shm_nattch; 99145f48220SJohn Baldwin outbuf.shm_atime = shmseg->u.shm_atime; 99245f48220SJohn Baldwin outbuf.shm_dtime = shmseg->u.shm_dtime; 99345f48220SJohn Baldwin outbuf.shm_ctime = shmseg->u.shm_ctime; 99445f48220SJohn Baldwin outbuf.shm_handle = shmseg->object; 9950555fb35SKonstantin Belousov SYSVSHM_UNLOCK(); 996*f16f8610SKonstantin Belousov error = copyout(&outbuf, uap->ubuf, sizeof(outbuf)); 99745f48220SJohn Baldwin return (error); 99845f48220SJohn Baldwin #else 99945f48220SJohn Baldwin return (EINVAL); 100045f48220SJohn Baldwin #endif 100145f48220SJohn Baldwin } 100245f48220SJohn Baldwin 100345f48220SJohn Baldwin /* XXX casting to (sy_call_t *) is bogus, as usual. */ 100445f48220SJohn Baldwin static sy_call_t *shmcalls[] = { 10058451d0ddSKip Macy (sy_call_t *)sys_shmat, (sy_call_t *)oshmctl, 10068451d0ddSKip Macy (sy_call_t *)sys_shmdt, (sy_call_t *)sys_shmget, 1007b648d480SJohn Baldwin (sy_call_t *)freebsd7_shmctl 100845f48220SJohn Baldwin }; 100945f48220SJohn Baldwin 10100555fb35SKonstantin Belousov #ifndef _SYS_SYSPROTO_H_ 101145f48220SJohn Baldwin /* XXX actually varargs. */ 10120555fb35SKonstantin Belousov struct shmsys_args { 101345f48220SJohn Baldwin int which; 101445f48220SJohn Baldwin int a2; 101545f48220SJohn Baldwin int a3; 101645f48220SJohn Baldwin int a4; 10170555fb35SKonstantin Belousov }; 10180555fb35SKonstantin Belousov #endif 10190555fb35SKonstantin Belousov int 10200555fb35SKonstantin Belousov sys_shmsys(struct thread *td, struct shmsys_args *uap) 102145f48220SJohn Baldwin { 102245f48220SJohn Baldwin int error; 102345f48220SJohn Baldwin 102445f48220SJohn Baldwin if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 102545f48220SJohn Baldwin return (ENOSYS); 10260555fb35SKonstantin Belousov if (uap->which < 0 || uap->which >= nitems(shmcalls)) 102745f48220SJohn Baldwin return (EINVAL); 102845f48220SJohn Baldwin error = (*shmcalls[uap->which])(td, &uap->a2); 102945f48220SJohn Baldwin return (error); 103045f48220SJohn Baldwin } 103145f48220SJohn Baldwin 103245f48220SJohn Baldwin #endif /* i386 && (COMPAT_FREEBSD4 || COMPAT_43) */ 103345f48220SJohn Baldwin 103475d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 103575d633cbSKonstantin Belousov 103675d633cbSKonstantin Belousov int 103775d633cbSKonstantin Belousov freebsd32_shmsys(struct thread *td, struct freebsd32_shmsys_args *uap) 103875d633cbSKonstantin Belousov { 103975d633cbSKonstantin Belousov 104075d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 104175d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 104275d633cbSKonstantin Belousov switch (uap->which) { 104375d633cbSKonstantin Belousov case 0: { /* shmat */ 104475d633cbSKonstantin Belousov struct shmat_args ap; 104575d633cbSKonstantin Belousov 104675d633cbSKonstantin Belousov ap.shmid = uap->a2; 104775d633cbSKonstantin Belousov ap.shmaddr = PTRIN(uap->a3); 104875d633cbSKonstantin Belousov ap.shmflg = uap->a4; 104975d633cbSKonstantin Belousov return (sysent[SYS_shmat].sy_call(td, &ap)); 105075d633cbSKonstantin Belousov } 105175d633cbSKonstantin Belousov case 2: { /* shmdt */ 105275d633cbSKonstantin Belousov struct shmdt_args ap; 105375d633cbSKonstantin Belousov 105475d633cbSKonstantin Belousov ap.shmaddr = PTRIN(uap->a2); 105575d633cbSKonstantin Belousov return (sysent[SYS_shmdt].sy_call(td, &ap)); 105675d633cbSKonstantin Belousov } 105775d633cbSKonstantin Belousov case 3: { /* shmget */ 105875d633cbSKonstantin Belousov struct shmget_args ap; 105975d633cbSKonstantin Belousov 106075d633cbSKonstantin Belousov ap.key = uap->a2; 106175d633cbSKonstantin Belousov ap.size = uap->a3; 106275d633cbSKonstantin Belousov ap.shmflg = uap->a4; 106375d633cbSKonstantin Belousov return (sysent[SYS_shmget].sy_call(td, &ap)); 106475d633cbSKonstantin Belousov } 106575d633cbSKonstantin Belousov case 4: { /* shmctl */ 106675d633cbSKonstantin Belousov struct freebsd7_freebsd32_shmctl_args ap; 106775d633cbSKonstantin Belousov 106875d633cbSKonstantin Belousov ap.shmid = uap->a2; 106975d633cbSKonstantin Belousov ap.cmd = uap->a3; 107075d633cbSKonstantin Belousov ap.buf = PTRIN(uap->a4); 107175d633cbSKonstantin Belousov return (freebsd7_freebsd32_shmctl(td, &ap)); 107275d633cbSKonstantin Belousov } 107375d633cbSKonstantin Belousov case 1: /* oshmctl */ 107475d633cbSKonstantin Belousov default: 107575d633cbSKonstantin Belousov return (EINVAL); 107675d633cbSKonstantin Belousov } 107775d633cbSKonstantin Belousov #else 107875d633cbSKonstantin Belousov return (nosys(td, NULL)); 107975d633cbSKonstantin Belousov #endif 108075d633cbSKonstantin Belousov } 108175d633cbSKonstantin Belousov 108275d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 108375d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 108475d633cbSKonstantin Belousov int 108575d633cbSKonstantin Belousov freebsd7_freebsd32_shmctl(struct thread *td, 108675d633cbSKonstantin Belousov struct freebsd7_freebsd32_shmctl_args *uap) 108775d633cbSKonstantin Belousov { 108875d633cbSKonstantin Belousov int error = 0; 108975d633cbSKonstantin Belousov union { 109075d633cbSKonstantin Belousov struct shmid_ds shmid_ds; 109175d633cbSKonstantin Belousov struct shm_info shm_info; 109275d633cbSKonstantin Belousov struct shminfo shminfo; 109375d633cbSKonstantin Belousov } u; 109475d633cbSKonstantin Belousov union { 109575d633cbSKonstantin Belousov struct shmid_ds32_old shmid_ds32; 109675d633cbSKonstantin Belousov struct shm_info32 shm_info32; 109775d633cbSKonstantin Belousov struct shminfo32 shminfo32; 109875d633cbSKonstantin Belousov } u32; 109975d633cbSKonstantin Belousov size_t sz; 110075d633cbSKonstantin Belousov 110175d633cbSKonstantin Belousov if (uap->cmd == IPC_SET) { 110275d633cbSKonstantin Belousov if ((error = copyin(uap->buf, &u32.shmid_ds32, 110375d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)))) 110475d633cbSKonstantin Belousov goto done; 110575d633cbSKonstantin Belousov freebsd32_ipcperm_old_in(&u32.shmid_ds32.shm_perm, 110675d633cbSKonstantin Belousov &u.shmid_ds.shm_perm); 110775d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_segsz); 110875d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_lpid); 110975d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_cpid); 111075d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_nattch); 111175d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_atime); 111275d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_dtime); 111375d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_ctime); 111475d633cbSKonstantin Belousov } 111575d633cbSKonstantin Belousov 111675d633cbSKonstantin Belousov error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&u, &sz); 111775d633cbSKonstantin Belousov if (error) 111875d633cbSKonstantin Belousov goto done; 111975d633cbSKonstantin Belousov 112075d633cbSKonstantin Belousov /* Cases in which we need to copyout */ 112175d633cbSKonstantin Belousov switch (uap->cmd) { 112275d633cbSKonstantin Belousov case IPC_INFO: 112375d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmax); 112475d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmin); 112575d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmni); 112675d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmseg); 112775d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmall); 112875d633cbSKonstantin Belousov error = copyout(&u32.shminfo32, uap->buf, 112975d633cbSKonstantin Belousov sizeof(u32.shminfo32)); 113075d633cbSKonstantin Belousov break; 113175d633cbSKonstantin Belousov case SHM_INFO: 113275d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, used_ids); 113375d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_rss); 113475d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_tot); 113575d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_swp); 113675d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_attempts); 113775d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_successes); 113875d633cbSKonstantin Belousov error = copyout(&u32.shm_info32, uap->buf, 113975d633cbSKonstantin Belousov sizeof(u32.shm_info32)); 114075d633cbSKonstantin Belousov break; 114175d633cbSKonstantin Belousov case SHM_STAT: 114275d633cbSKonstantin Belousov case IPC_STAT: 114375d633cbSKonstantin Belousov freebsd32_ipcperm_old_out(&u.shmid_ds.shm_perm, 114475d633cbSKonstantin Belousov &u32.shmid_ds32.shm_perm); 114575d633cbSKonstantin Belousov if (u.shmid_ds.shm_segsz > INT32_MAX) 114675d633cbSKonstantin Belousov u32.shmid_ds32.shm_segsz = INT32_MAX; 114775d633cbSKonstantin Belousov else 114875d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_segsz); 114975d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_lpid); 115075d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_cpid); 115175d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_nattch); 115275d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_atime); 115375d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_dtime); 115475d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_ctime); 115575d633cbSKonstantin Belousov u32.shmid_ds32.shm_internal = 0; 115675d633cbSKonstantin Belousov error = copyout(&u32.shmid_ds32, uap->buf, 115775d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)); 115875d633cbSKonstantin Belousov break; 115975d633cbSKonstantin Belousov } 116075d633cbSKonstantin Belousov 116175d633cbSKonstantin Belousov done: 116275d633cbSKonstantin Belousov if (error) { 116375d633cbSKonstantin Belousov /* Invalidate the return value */ 116475d633cbSKonstantin Belousov td->td_retval[0] = -1; 116575d633cbSKonstantin Belousov } 116675d633cbSKonstantin Belousov return (error); 116775d633cbSKonstantin Belousov } 116875d633cbSKonstantin Belousov #endif 116975d633cbSKonstantin Belousov 117075d633cbSKonstantin Belousov int 117175d633cbSKonstantin Belousov freebsd32_shmctl(struct thread *td, struct freebsd32_shmctl_args *uap) 117275d633cbSKonstantin Belousov { 117375d633cbSKonstantin Belousov int error = 0; 117475d633cbSKonstantin Belousov union { 117575d633cbSKonstantin Belousov struct shmid_ds shmid_ds; 117675d633cbSKonstantin Belousov struct shm_info shm_info; 117775d633cbSKonstantin Belousov struct shminfo shminfo; 117875d633cbSKonstantin Belousov } u; 117975d633cbSKonstantin Belousov union { 118075d633cbSKonstantin Belousov struct shmid_ds32 shmid_ds32; 118175d633cbSKonstantin Belousov struct shm_info32 shm_info32; 118275d633cbSKonstantin Belousov struct shminfo32 shminfo32; 118375d633cbSKonstantin Belousov } u32; 118475d633cbSKonstantin Belousov size_t sz; 118575d633cbSKonstantin Belousov 118675d633cbSKonstantin Belousov if (uap->cmd == IPC_SET) { 118775d633cbSKonstantin Belousov if ((error = copyin(uap->buf, &u32.shmid_ds32, 118875d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)))) 118975d633cbSKonstantin Belousov goto done; 119075d633cbSKonstantin Belousov freebsd32_ipcperm_in(&u32.shmid_ds32.shm_perm, 119175d633cbSKonstantin Belousov &u.shmid_ds.shm_perm); 119275d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_segsz); 119375d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_lpid); 119475d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_cpid); 119575d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_nattch); 119675d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_atime); 119775d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_dtime); 119875d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_ctime); 119975d633cbSKonstantin Belousov } 120075d633cbSKonstantin Belousov 120175d633cbSKonstantin Belousov error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&u, &sz); 120275d633cbSKonstantin Belousov if (error) 120375d633cbSKonstantin Belousov goto done; 120475d633cbSKonstantin Belousov 120575d633cbSKonstantin Belousov /* Cases in which we need to copyout */ 120675d633cbSKonstantin Belousov switch (uap->cmd) { 120775d633cbSKonstantin Belousov case IPC_INFO: 120875d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmax); 120975d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmin); 121075d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmni); 121175d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmseg); 121275d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmall); 121375d633cbSKonstantin Belousov error = copyout(&u32.shminfo32, uap->buf, 121475d633cbSKonstantin Belousov sizeof(u32.shminfo32)); 121575d633cbSKonstantin Belousov break; 121675d633cbSKonstantin Belousov case SHM_INFO: 121775d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, used_ids); 121875d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_rss); 121975d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_tot); 122075d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_swp); 122175d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_attempts); 122275d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_successes); 122375d633cbSKonstantin Belousov error = copyout(&u32.shm_info32, uap->buf, 122475d633cbSKonstantin Belousov sizeof(u32.shm_info32)); 122575d633cbSKonstantin Belousov break; 122675d633cbSKonstantin Belousov case SHM_STAT: 122775d633cbSKonstantin Belousov case IPC_STAT: 122875d633cbSKonstantin Belousov freebsd32_ipcperm_out(&u.shmid_ds.shm_perm, 122975d633cbSKonstantin Belousov &u32.shmid_ds32.shm_perm); 123075d633cbSKonstantin Belousov if (u.shmid_ds.shm_segsz > INT32_MAX) 123175d633cbSKonstantin Belousov u32.shmid_ds32.shm_segsz = INT32_MAX; 123275d633cbSKonstantin Belousov else 123375d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_segsz); 123475d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_lpid); 123575d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_cpid); 123675d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_nattch); 123775d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_atime); 123875d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_dtime); 123975d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_ctime); 124075d633cbSKonstantin Belousov error = copyout(&u32.shmid_ds32, uap->buf, 124175d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)); 124275d633cbSKonstantin Belousov break; 124375d633cbSKonstantin Belousov } 124475d633cbSKonstantin Belousov 124575d633cbSKonstantin Belousov done: 124675d633cbSKonstantin Belousov if (error) { 124775d633cbSKonstantin Belousov /* Invalidate the return value */ 124875d633cbSKonstantin Belousov td->td_retval[0] = -1; 124975d633cbSKonstantin Belousov } 125075d633cbSKonstantin Belousov return (error); 125175d633cbSKonstantin Belousov } 125275d633cbSKonstantin Belousov #endif 125375d633cbSKonstantin Belousov 1254b648d480SJohn Baldwin #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 1255b648d480SJohn Baldwin defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 1256b648d480SJohn Baldwin 125775d633cbSKonstantin Belousov #ifndef CP 1258b648d480SJohn Baldwin #define CP(src, dst, fld) do { (dst).fld = (src).fld; } while (0) 125975d633cbSKonstantin Belousov #endif 1260b648d480SJohn Baldwin 1261b648d480SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 1262b648d480SJohn Baldwin struct freebsd7_shmctl_args { 1263b648d480SJohn Baldwin int shmid; 1264b648d480SJohn Baldwin int cmd; 1265b648d480SJohn Baldwin struct shmid_ds_old *buf; 1266b648d480SJohn Baldwin }; 1267b648d480SJohn Baldwin #endif 1268b648d480SJohn Baldwin int 12690555fb35SKonstantin Belousov freebsd7_shmctl(struct thread *td, struct freebsd7_shmctl_args *uap) 1270b648d480SJohn Baldwin { 1271b648d480SJohn Baldwin int error = 0; 1272b648d480SJohn Baldwin struct shmid_ds_old old; 1273b648d480SJohn Baldwin struct shmid_ds buf; 1274b648d480SJohn Baldwin size_t bufsz; 1275b648d480SJohn Baldwin 1276b648d480SJohn Baldwin /* 1277b648d480SJohn Baldwin * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support 1278b648d480SJohn Baldwin * Linux binaries. If we see the call come through the FreeBSD ABI, 1279b648d480SJohn Baldwin * return an error back to the user since we do not to support this. 1280b648d480SJohn Baldwin */ 1281b648d480SJohn Baldwin if (uap->cmd == IPC_INFO || uap->cmd == SHM_INFO || 1282b648d480SJohn Baldwin uap->cmd == SHM_STAT) 1283b648d480SJohn Baldwin return (EINVAL); 1284b648d480SJohn Baldwin 1285b648d480SJohn Baldwin /* IPC_SET needs to copyin the buffer before calling kern_shmctl */ 1286b648d480SJohn Baldwin if (uap->cmd == IPC_SET) { 1287b648d480SJohn Baldwin if ((error = copyin(uap->buf, &old, sizeof(old)))) 1288b648d480SJohn Baldwin goto done; 1289b648d480SJohn Baldwin ipcperm_old2new(&old.shm_perm, &buf.shm_perm); 1290b648d480SJohn Baldwin CP(old, buf, shm_segsz); 1291b648d480SJohn Baldwin CP(old, buf, shm_lpid); 1292b648d480SJohn Baldwin CP(old, buf, shm_cpid); 1293b648d480SJohn Baldwin CP(old, buf, shm_nattch); 1294b648d480SJohn Baldwin CP(old, buf, shm_atime); 1295b648d480SJohn Baldwin CP(old, buf, shm_dtime); 1296b648d480SJohn Baldwin CP(old, buf, shm_ctime); 1297b648d480SJohn Baldwin } 1298b648d480SJohn Baldwin 1299b648d480SJohn Baldwin error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz); 1300b648d480SJohn Baldwin if (error) 1301b648d480SJohn Baldwin goto done; 1302b648d480SJohn Baldwin 1303b648d480SJohn Baldwin /* Cases in which we need to copyout */ 1304b648d480SJohn Baldwin switch (uap->cmd) { 1305b648d480SJohn Baldwin case IPC_STAT: 1306b648d480SJohn Baldwin ipcperm_new2old(&buf.shm_perm, &old.shm_perm); 1307b648d480SJohn Baldwin if (buf.shm_segsz > INT_MAX) 1308b648d480SJohn Baldwin old.shm_segsz = INT_MAX; 1309b648d480SJohn Baldwin else 1310b648d480SJohn Baldwin CP(buf, old, shm_segsz); 1311b648d480SJohn Baldwin CP(buf, old, shm_lpid); 1312b648d480SJohn Baldwin CP(buf, old, shm_cpid); 1313b648d480SJohn Baldwin if (buf.shm_nattch > SHRT_MAX) 1314b648d480SJohn Baldwin old.shm_nattch = SHRT_MAX; 1315b648d480SJohn Baldwin else 1316b648d480SJohn Baldwin CP(buf, old, shm_nattch); 1317b648d480SJohn Baldwin CP(buf, old, shm_atime); 1318b648d480SJohn Baldwin CP(buf, old, shm_dtime); 1319b648d480SJohn Baldwin CP(buf, old, shm_ctime); 1320b648d480SJohn Baldwin old.shm_internal = NULL; 1321b648d480SJohn Baldwin error = copyout(&old, uap->buf, sizeof(old)); 1322b648d480SJohn Baldwin break; 1323b648d480SJohn Baldwin } 1324b648d480SJohn Baldwin 1325b648d480SJohn Baldwin done: 1326b648d480SJohn Baldwin if (error) { 1327b648d480SJohn Baldwin /* Invalidate the return value */ 1328b648d480SJohn Baldwin td->td_retval[0] = -1; 1329b648d480SJohn Baldwin } 1330b648d480SJohn Baldwin return (error); 1331b648d480SJohn Baldwin } 1332b648d480SJohn Baldwin 1333b648d480SJohn Baldwin #endif /* COMPAT_FREEBSD4 || COMPAT_FREEBSD5 || COMPAT_FREEBSD6 || 1334b648d480SJohn Baldwin COMPAT_FREEBSD7 */ 1335b648d480SJohn Baldwin 1336a723c4e1SDima Dorfman static int 133778525ce3SAlfred Perlstein sysvshm_modload(struct module *module, int cmd, void *arg) 133878525ce3SAlfred Perlstein { 133978525ce3SAlfred Perlstein int error = 0; 134078525ce3SAlfred Perlstein 134178525ce3SAlfred Perlstein switch (cmd) { 134278525ce3SAlfred Perlstein case MOD_LOAD: 134375d633cbSKonstantin Belousov error = shminit(); 134475d633cbSKonstantin Belousov if (error != 0) 134575d633cbSKonstantin Belousov shmunload(); 134678525ce3SAlfred Perlstein break; 134778525ce3SAlfred Perlstein case MOD_UNLOAD: 134878525ce3SAlfred Perlstein error = shmunload(); 134978525ce3SAlfred Perlstein break; 135078525ce3SAlfred Perlstein case MOD_SHUTDOWN: 135178525ce3SAlfred Perlstein break; 135278525ce3SAlfred Perlstein default: 135378525ce3SAlfred Perlstein error = EINVAL; 135478525ce3SAlfred Perlstein break; 135578525ce3SAlfred Perlstein } 135678525ce3SAlfred Perlstein return (error); 135778525ce3SAlfred Perlstein } 135878525ce3SAlfred Perlstein 1359faa784b7SDag-Erling Smørgrav static moduledata_t sysvshm_mod = { 1360faa784b7SDag-Erling Smørgrav "sysvshm", 136178525ce3SAlfred Perlstein &sysvshm_modload, 136278525ce3SAlfred Perlstein NULL 136378525ce3SAlfred Perlstein }; 136478525ce3SAlfred Perlstein 136571361470SJohn Baldwin DECLARE_MODULE(sysvshm, sysvshm_mod, SI_SUB_SYSV_SHM, SI_ORDER_FIRST); 1366faa784b7SDag-Erling Smørgrav MODULE_VERSION(sysvshm, 1); 1367