13d903220SDoug Rabson /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 29454b2d8SWarner Losh /*- 33d903220SDoug Rabson * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 43d903220SDoug Rabson * 53d903220SDoug Rabson * Redistribution and use in source and binary forms, with or without 63d903220SDoug Rabson * modification, are permitted provided that the following conditions 73d903220SDoug Rabson * are met: 83d903220SDoug Rabson * 1. Redistributions of source code must retain the above copyright 93d903220SDoug Rabson * notice, this list of conditions and the following disclaimer. 103d903220SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 113d903220SDoug Rabson * notice, this list of conditions and the following disclaimer in the 123d903220SDoug Rabson * documentation and/or other materials provided with the distribution. 133d903220SDoug Rabson * 3. All advertising materials mentioning features or use of this software 143d903220SDoug Rabson * must display the following acknowledgement: 153d903220SDoug Rabson * This product includes software developed by Adam Glass and Charles 163d903220SDoug Rabson * Hannum. 173d903220SDoug Rabson * 4. The names of the authors may not be used to endorse or promote products 183d903220SDoug Rabson * derived from this software without specific prior written permission. 193d903220SDoug Rabson * 203d903220SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 213d903220SDoug Rabson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 223d903220SDoug Rabson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 233d903220SDoug Rabson * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 243d903220SDoug Rabson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 253d903220SDoug Rabson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 263d903220SDoug Rabson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 273d903220SDoug Rabson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 283d903220SDoug Rabson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 293d903220SDoug Rabson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 303d903220SDoug Rabson */ 3114cedfc8SRobert Watson /*- 3214cedfc8SRobert Watson * Copyright (c) 2003-2005 McAfee, Inc. 3314cedfc8SRobert Watson * All rights reserved. 3414cedfc8SRobert Watson * 3514cedfc8SRobert Watson * This software was developed for the FreeBSD Project in part by McAfee 3614cedfc8SRobert Watson * Research, the Security Research Division of McAfee, Inc under DARPA/SPAWAR 3714cedfc8SRobert Watson * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS research 3814cedfc8SRobert Watson * program. 3914cedfc8SRobert Watson * 4014cedfc8SRobert Watson * Redistribution and use in source and binary forms, with or without 4114cedfc8SRobert Watson * modification, are permitted provided that the following conditions 4214cedfc8SRobert Watson * are met: 4314cedfc8SRobert Watson * 1. Redistributions of source code must retain the above copyright 4414cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer. 4514cedfc8SRobert Watson * 2. Redistributions in binary form must reproduce the above copyright 4614cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer in the 4714cedfc8SRobert Watson * documentation and/or other materials provided with the distribution. 4814cedfc8SRobert Watson * 4914cedfc8SRobert Watson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 5014cedfc8SRobert Watson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 5114cedfc8SRobert Watson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 5214cedfc8SRobert Watson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 5314cedfc8SRobert Watson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 5414cedfc8SRobert Watson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 5514cedfc8SRobert Watson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 5614cedfc8SRobert Watson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 5714cedfc8SRobert Watson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 5814cedfc8SRobert Watson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 5914cedfc8SRobert Watson * SUCH DAMAGE. 6014cedfc8SRobert Watson */ 613d903220SDoug Rabson 62677b542eSDavid E. O'Brien #include <sys/cdefs.h> 63677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 64677b542eSDavid E. O'Brien 655591b823SEivind Eklund #include "opt_compat.h" 66255108f3SPeter Wemm #include "opt_sysvipc.h" 67511b67b7SGarrett Wollman 683d903220SDoug Rabson #include <sys/param.h> 69725db531SBruce Evans #include <sys/systm.h> 703d903220SDoug Rabson #include <sys/kernel.h> 71b648d480SJohn Baldwin #include <sys/limits.h> 72fb919e4dSMark Murray #include <sys/lock.h> 73255108f3SPeter Wemm #include <sys/sysctl.h> 743d903220SDoug Rabson #include <sys/shm.h> 753d903220SDoug Rabson #include <sys/proc.h> 763d903220SDoug Rabson #include <sys/malloc.h> 773d903220SDoug Rabson #include <sys/mman.h> 7877409fe1SPoul-Henning Kamp #include <sys/module.h> 799dceb26bSJohn Baldwin #include <sys/mutex.h> 8068ba7a1dSTim J. Robbins #include <sys/resourcevar.h> 813d903220SDoug Rabson #include <sys/stat.h> 8278525ce3SAlfred Perlstein #include <sys/syscall.h> 83f130dcf2SMartin Blapp #include <sys/syscallsubr.h> 84725db531SBruce Evans #include <sys/sysent.h> 85fb919e4dSMark Murray #include <sys/sysproto.h> 86cb1f0db9SRobert Watson #include <sys/jail.h> 87aed55708SRobert Watson 88aed55708SRobert Watson #include <security/mac/mac_framework.h> 893d903220SDoug Rabson 903d903220SDoug Rabson #include <vm/vm.h> 91efeaf95aSDavid Greenman #include <vm/vm_param.h> 92efeaf95aSDavid Greenman #include <vm/pmap.h> 93a51f7119SJohn Dyson #include <vm/vm_object.h> 943d903220SDoug Rabson #include <vm/vm_map.h> 951c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 96ae9b8c3aSJohn Dyson #include <vm/vm_pager.h> 973d903220SDoug Rabson 98*de5b1952SAlexander Leidinger FEATURE(sysv_shm, "System V shared memory segments support"); 99*de5b1952SAlexander Leidinger 100a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 10155166637SPoul-Henning Kamp 1024d77a549SAlfred Perlstein static int shmget_allocate_segment(struct thread *td, 1034d77a549SAlfred Perlstein struct shmget_args *uap, int mode); 1044d77a549SAlfred Perlstein static int shmget_existing(struct thread *td, struct shmget_args *uap, 1054d77a549SAlfred Perlstein int mode, int segnum); 106725db531SBruce Evans 1073d903220SDoug Rabson #define SHMSEG_FREE 0x0200 1083d903220SDoug Rabson #define SHMSEG_REMOVED 0x0400 1093d903220SDoug Rabson #define SHMSEG_ALLOCATED 0x0800 1103d903220SDoug Rabson #define SHMSEG_WANTED 0x1000 1113d903220SDoug Rabson 11265067cc8SKonstantin Belousov static int shm_last_free, shm_nused, shmalloced; 11345329b60SKonstantin Belousov vm_size_t shm_committed; 114921d05b9SRobert Watson static struct shmid_kernel *shmsegs; 1153d903220SDoug Rabson 1163d903220SDoug Rabson struct shmmap_state { 1173d903220SDoug Rabson vm_offset_t va; 1183d903220SDoug Rabson int shmid; 1193d903220SDoug Rabson }; 1203d903220SDoug Rabson 121921d05b9SRobert Watson static void shm_deallocate_segment(struct shmid_kernel *); 1224d77a549SAlfred Perlstein static int shm_find_segment_by_key(key_t); 123921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmid(int); 124921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmidx(int); 1253db161e0SMatthew Dillon static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *); 1264d77a549SAlfred Perlstein static void shmrealloc(void); 12775d633cbSKonstantin Belousov static int shminit(void); 1284d77a549SAlfred Perlstein static int sysvshm_modload(struct module *, int, void *); 1294d77a549SAlfred Perlstein static int shmunload(void); 1303db161e0SMatthew Dillon static void shmexit_myhook(struct vmspace *vm); 1314d77a549SAlfred Perlstein static void shmfork_myhook(struct proc *p1, struct proc *p2); 1324d77a549SAlfred Perlstein static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS); 133255108f3SPeter Wemm 134255108f3SPeter Wemm /* 135028f979dSDima Dorfman * Tuneable values. 136255108f3SPeter Wemm */ 137255108f3SPeter Wemm #ifndef SHMMAXPGS 138c1e34abfSIvan Voras #define SHMMAXPGS 131072 /* Note: sysv shared memory is swap backed. */ 139255108f3SPeter Wemm #endif 140255108f3SPeter Wemm #ifndef SHMMAX 141255108f3SPeter Wemm #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 142255108f3SPeter Wemm #endif 143255108f3SPeter Wemm #ifndef SHMMIN 144255108f3SPeter Wemm #define SHMMIN 1 145255108f3SPeter Wemm #endif 146255108f3SPeter Wemm #ifndef SHMMNI 1471766b2e5SMatthew Dillon #define SHMMNI 192 148255108f3SPeter Wemm #endif 149255108f3SPeter Wemm #ifndef SHMSEG 1501766b2e5SMatthew Dillon #define SHMSEG 128 151255108f3SPeter Wemm #endif 152255108f3SPeter Wemm #ifndef SHMALL 153255108f3SPeter Wemm #define SHMALL (SHMMAXPGS) 154255108f3SPeter Wemm #endif 155255108f3SPeter Wemm 156255108f3SPeter Wemm struct shminfo shminfo = { 157255108f3SPeter Wemm SHMMAX, 158255108f3SPeter Wemm SHMMIN, 159255108f3SPeter Wemm SHMMNI, 160255108f3SPeter Wemm SHMSEG, 161255108f3SPeter Wemm SHMALL 162255108f3SPeter Wemm }; 163255108f3SPeter Wemm 1648b03c8edSMatthew Dillon static int shm_use_phys; 1652332251cSMax Khon static int shm_allow_removed; 1668b03c8edSMatthew Dillon 1679baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, 16884f85aedSChristian S.J. Peron "Maximum shared memory segment size"); 1699baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, 17084f85aedSChristian S.J. Peron "Minimum shared memory segment size"); 1719baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, 17284f85aedSChristian S.J. Peron "Number of shared memory identifiers"); 1739baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, 17484f85aedSChristian S.J. Peron "Number of segments per process"); 1759baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, 17684f85aedSChristian S.J. Peron "Maximum number of pages available for shared memory"); 177028f979dSDima Dorfman SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, 17884f85aedSChristian S.J. Peron &shm_use_phys, 0, "Enable/Disable locking of shared memory pages in core"); 1792332251cSMax Khon SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW, 18084f85aedSChristian S.J. Peron &shm_allow_removed, 0, 18184f85aedSChristian S.J. Peron "Enable/Disable attachment to attached segments marked for removal"); 1822fee06f0SMatthew D Fleming SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLTYPE_OPAQUE | CTLFLAG_RD, 18384f85aedSChristian S.J. Peron NULL, 0, sysctl_shmsegs, "", 18484f85aedSChristian S.J. Peron "Current number of shared memory segments allocated"); 1853d903220SDoug Rabson 1863d903220SDoug Rabson static int 1873d903220SDoug Rabson shm_find_segment_by_key(key) 1883d903220SDoug Rabson key_t key; 1893d903220SDoug Rabson { 1903d903220SDoug Rabson int i; 1913d903220SDoug Rabson 192255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 193921d05b9SRobert Watson if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) && 194921d05b9SRobert Watson shmsegs[i].u.shm_perm.key == key) 195b618bb96SAlfred Perlstein return (i); 196b618bb96SAlfred Perlstein return (-1); 1973d903220SDoug Rabson } 1983d903220SDoug Rabson 199921d05b9SRobert Watson static struct shmid_kernel * 2002332251cSMax Khon shm_find_segment_by_shmid(int shmid) 2013d903220SDoug Rabson { 2023d903220SDoug Rabson int segnum; 203921d05b9SRobert Watson struct shmid_kernel *shmseg; 2043d903220SDoug Rabson 2053d903220SDoug Rabson segnum = IPCID_TO_IX(shmid); 206255108f3SPeter Wemm if (segnum < 0 || segnum >= shmalloced) 207b618bb96SAlfred Perlstein return (NULL); 2083d903220SDoug Rabson shmseg = &shmsegs[segnum]; 209921d05b9SRobert Watson if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 2102332251cSMax Khon (!shm_allow_removed && 211921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0) || 212921d05b9SRobert Watson shmseg->u.shm_perm.seq != IPCID_TO_SEQ(shmid)) 213b618bb96SAlfred Perlstein return (NULL); 214b618bb96SAlfred Perlstein return (shmseg); 2153d903220SDoug Rabson } 2163d903220SDoug Rabson 217921d05b9SRobert Watson static struct shmid_kernel * 2182332251cSMax Khon shm_find_segment_by_shmidx(int segnum) 219491dec93SMichael Reifenberger { 220921d05b9SRobert Watson struct shmid_kernel *shmseg; 221491dec93SMichael Reifenberger 222491dec93SMichael Reifenberger if (segnum < 0 || segnum >= shmalloced) 223b618bb96SAlfred Perlstein return (NULL); 224491dec93SMichael Reifenberger shmseg = &shmsegs[segnum]; 225921d05b9SRobert Watson if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 2262332251cSMax Khon (!shm_allow_removed && 227921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0)) 228b618bb96SAlfred Perlstein return (NULL); 229b618bb96SAlfred Perlstein return (shmseg); 230491dec93SMichael Reifenberger } 231491dec93SMichael Reifenberger 2323d903220SDoug Rabson static void 2333d903220SDoug Rabson shm_deallocate_segment(shmseg) 234921d05b9SRobert Watson struct shmid_kernel *shmseg; 2353d903220SDoug Rabson { 23645329b60SKonstantin Belousov vm_size_t size; 2373d903220SDoug Rabson 2380cddd8f0SMatthew Dillon GIANT_REQUIRED; 2390cddd8f0SMatthew Dillon 240b648d480SJohn Baldwin vm_object_deallocate(shmseg->object); 241b648d480SJohn Baldwin shmseg->object = NULL; 242b648d480SJohn Baldwin size = round_page(shmseg->u.shm_segsz); 2433d903220SDoug Rabson shm_committed -= btoc(size); 2443d903220SDoug Rabson shm_nused--; 245921d05b9SRobert Watson shmseg->u.shm_perm.mode = SHMSEG_FREE; 24614cedfc8SRobert Watson #ifdef MAC 24730d239bcSRobert Watson mac_sysvshm_cleanup(shmseg); 24814cedfc8SRobert Watson #endif 2493d903220SDoug Rabson } 2503d903220SDoug Rabson 2513d903220SDoug Rabson static int 2523db161e0SMatthew Dillon shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 2533d903220SDoug Rabson { 254921d05b9SRobert Watson struct shmid_kernel *shmseg; 2553d903220SDoug Rabson int segnum, result; 25645329b60SKonstantin Belousov vm_size_t size; 2573d903220SDoug Rabson 2580cddd8f0SMatthew Dillon GIANT_REQUIRED; 259028f979dSDima Dorfman 2603d903220SDoug Rabson segnum = IPCID_TO_IX(shmmap_s->shmid); 2613d903220SDoug Rabson shmseg = &shmsegs[segnum]; 262b648d480SJohn Baldwin size = round_page(shmseg->u.shm_segsz); 2633db161e0SMatthew Dillon result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 2643d903220SDoug Rabson if (result != KERN_SUCCESS) 265b618bb96SAlfred Perlstein return (EINVAL); 2663d903220SDoug Rabson shmmap_s->shmid = -1; 267921d05b9SRobert Watson shmseg->u.shm_dtime = time_second; 268921d05b9SRobert Watson if ((--shmseg->u.shm_nattch <= 0) && 269921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) { 2703d903220SDoug Rabson shm_deallocate_segment(shmseg); 2713d903220SDoug Rabson shm_last_free = segnum; 2723d903220SDoug Rabson } 273b618bb96SAlfred Perlstein return (0); 2743d903220SDoug Rabson } 2753d903220SDoug Rabson 276b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 2773d903220SDoug Rabson struct shmdt_args { 278e1d7d0bbSAlfred Perlstein const void *shmaddr; 2793d903220SDoug Rabson }; 280b5d5c0c9SPeter Wemm #endif 2813d903220SDoug Rabson int 282b40ce416SJulian Elischer shmdt(td, uap) 283b40ce416SJulian Elischer struct thread *td; 2843d903220SDoug Rabson struct shmdt_args *uap; 2853d903220SDoug Rabson { 286b40ce416SJulian Elischer struct proc *p = td->td_proc; 2873d903220SDoug Rabson struct shmmap_state *shmmap_s; 28814cedfc8SRobert Watson #ifdef MAC 28914cedfc8SRobert Watson struct shmid_kernel *shmsegptr; 29014cedfc8SRobert Watson #endif 2913d903220SDoug Rabson int i; 292b6a4b4f9SMatthew Dillon int error = 0; 2933d903220SDoug Rabson 2940304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 295c6f55f33SJohn Baldwin return (ENOSYS); 296b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 2978209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 298b6a4b4f9SMatthew Dillon if (shmmap_s == NULL) { 299b6a4b4f9SMatthew Dillon error = EINVAL; 300b6a4b4f9SMatthew Dillon goto done2; 301b6a4b4f9SMatthew Dillon } 302b6a4b4f9SMatthew Dillon for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 3033d903220SDoug Rabson if (shmmap_s->shmid != -1 && 304b6a4b4f9SMatthew Dillon shmmap_s->va == (vm_offset_t)uap->shmaddr) { 3053d903220SDoug Rabson break; 306b6a4b4f9SMatthew Dillon } 307b6a4b4f9SMatthew Dillon } 308b6a4b4f9SMatthew Dillon if (i == shminfo.shmseg) { 309b6a4b4f9SMatthew Dillon error = EINVAL; 310b6a4b4f9SMatthew Dillon goto done2; 311b6a4b4f9SMatthew Dillon } 31214cedfc8SRobert Watson #ifdef MAC 31314cedfc8SRobert Watson shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)]; 31430d239bcSRobert Watson error = mac_sysvshm_check_shmdt(td->td_ucred, shmsegptr); 315f50c4fd8SRobert Watson if (error != 0) 31614cedfc8SRobert Watson goto done2; 31714cedfc8SRobert Watson #endif 3183db161e0SMatthew Dillon error = shm_delete_mapping(p->p_vmspace, shmmap_s); 319b6a4b4f9SMatthew Dillon done2: 320b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 321b6a4b4f9SMatthew Dillon return (error); 3223d903220SDoug Rabson } 3233d903220SDoug Rabson 324b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 3253d903220SDoug Rabson struct shmat_args { 3263d903220SDoug Rabson int shmid; 327e1d7d0bbSAlfred Perlstein const void *shmaddr; 3283d903220SDoug Rabson int shmflg; 3293d903220SDoug Rabson }; 330b5d5c0c9SPeter Wemm #endif 3313d903220SDoug Rabson int 3322332251cSMax Khon kern_shmat(td, shmid, shmaddr, shmflg) 333b40ce416SJulian Elischer struct thread *td; 334f130dcf2SMartin Blapp int shmid; 335f130dcf2SMartin Blapp const void *shmaddr; 336f130dcf2SMartin Blapp int shmflg; 3373d903220SDoug Rabson { 338b40ce416SJulian Elischer struct proc *p = td->td_proc; 339b6a4b4f9SMatthew Dillon int i, flags; 340921d05b9SRobert Watson struct shmid_kernel *shmseg; 3413d903220SDoug Rabson struct shmmap_state *shmmap_s = NULL; 3423d903220SDoug Rabson vm_offset_t attach_va; 3433d903220SDoug Rabson vm_prot_t prot; 3443d903220SDoug Rabson vm_size_t size; 345a51f7119SJohn Dyson int rv; 346b6a4b4f9SMatthew Dillon int error = 0; 3473d903220SDoug Rabson 3480304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 349c6f55f33SJohn Baldwin return (ENOSYS); 350b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 3518209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 3523d903220SDoug Rabson if (shmmap_s == NULL) { 35345329b60SKonstantin Belousov shmmap_s = malloc(shminfo.shmseg * sizeof(struct shmmap_state), 35445329b60SKonstantin Belousov M_SHM, M_WAITOK); 3553d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) 3563d903220SDoug Rabson shmmap_s[i].shmid = -1; 3572cc593fdSAlfred Perlstein p->p_vmspace->vm_shm = shmmap_s; 3583d903220SDoug Rabson } 3592332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 360b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 361b6a4b4f9SMatthew Dillon error = EINVAL; 362b6a4b4f9SMatthew Dillon goto done2; 363b6a4b4f9SMatthew Dillon } 364921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, 365f130dcf2SMartin Blapp (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 366797f2d22SPoul-Henning Kamp if (error) 367b6a4b4f9SMatthew Dillon goto done2; 36814cedfc8SRobert Watson #ifdef MAC 36930d239bcSRobert Watson error = mac_sysvshm_check_shmat(td->td_ucred, shmseg, shmflg); 370f50c4fd8SRobert Watson if (error != 0) 37114cedfc8SRobert Watson goto done2; 37214cedfc8SRobert Watson #endif 3733d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) { 3743d903220SDoug Rabson if (shmmap_s->shmid == -1) 3753d903220SDoug Rabson break; 3763d903220SDoug Rabson shmmap_s++; 3773d903220SDoug Rabson } 378b6a4b4f9SMatthew Dillon if (i >= shminfo.shmseg) { 379b6a4b4f9SMatthew Dillon error = EMFILE; 380b6a4b4f9SMatthew Dillon goto done2; 381b6a4b4f9SMatthew Dillon } 382b648d480SJohn Baldwin size = round_page(shmseg->u.shm_segsz); 3833d903220SDoug Rabson prot = VM_PROT_READ; 384f130dcf2SMartin Blapp if ((shmflg & SHM_RDONLY) == 0) 3853d903220SDoug Rabson prot |= VM_PROT_WRITE; 3863d903220SDoug Rabson flags = MAP_ANON | MAP_SHARED; 387f130dcf2SMartin Blapp if (shmaddr) { 3883d903220SDoug Rabson flags |= MAP_FIXED; 389f130dcf2SMartin Blapp if (shmflg & SHM_RND) { 390f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1); 391f130dcf2SMartin Blapp } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) { 392f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr; 393b6a4b4f9SMatthew Dillon } else { 394b6a4b4f9SMatthew Dillon error = EINVAL; 395b6a4b4f9SMatthew Dillon goto done2; 396b6a4b4f9SMatthew Dillon } 3973d903220SDoug Rabson } else { 398028f979dSDima Dorfman /* 399028f979dSDima Dorfman * This is just a hint to vm_map_find() about where to 400028f979dSDima Dorfman * put it. 401028f979dSDima Dorfman */ 40268ba7a1dSTim J. Robbins PROC_LOCK(p); 40368ba7a1dSTim J. Robbins attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr + 40468ba7a1dSTim J. Robbins lim_max(p, RLIMIT_DATA)); 40568ba7a1dSTim J. Robbins PROC_UNLOCK(p); 4063d903220SDoug Rabson } 407a51f7119SJohn Dyson 408b648d480SJohn Baldwin vm_object_reference(shmseg->object); 409b648d480SJohn Baldwin rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->object, 41065067cc8SKonstantin Belousov 0, &attach_va, size, (flags & MAP_FIXED) ? VMFS_NO_SPACE : 41165067cc8SKonstantin Belousov VMFS_ANY_SPACE, prot, prot, 0); 412a51f7119SJohn Dyson if (rv != KERN_SUCCESS) { 413b648d480SJohn Baldwin vm_object_deallocate(shmseg->object); 414b6a4b4f9SMatthew Dillon error = ENOMEM; 415b6a4b4f9SMatthew Dillon goto done2; 416a51f7119SJohn Dyson } 4170463028cSJohn Dyson vm_map_inherit(&p->p_vmspace->vm_map, 4180463028cSJohn Dyson attach_va, attach_va + size, VM_INHERIT_SHARE); 4190463028cSJohn Dyson 4203d903220SDoug Rabson shmmap_s->va = attach_va; 421f130dcf2SMartin Blapp shmmap_s->shmid = shmid; 422921d05b9SRobert Watson shmseg->u.shm_lpid = p->p_pid; 423921d05b9SRobert Watson shmseg->u.shm_atime = time_second; 424921d05b9SRobert Watson shmseg->u.shm_nattch++; 425b40ce416SJulian Elischer td->td_retval[0] = attach_va; 426b6a4b4f9SMatthew Dillon done2: 427b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 428b6a4b4f9SMatthew Dillon return (error); 4293d903220SDoug Rabson } 4303d903220SDoug Rabson 431f130dcf2SMartin Blapp int 432f130dcf2SMartin Blapp shmat(td, uap) 433f130dcf2SMartin Blapp struct thread *td; 434f130dcf2SMartin Blapp struct shmat_args *uap; 435f130dcf2SMartin Blapp { 4362332251cSMax Khon return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg); 437f130dcf2SMartin Blapp } 438f130dcf2SMartin Blapp 4393d903220SDoug Rabson int 4402332251cSMax Khon kern_shmctl(td, shmid, cmd, buf, bufsz) 441b40ce416SJulian Elischer struct thread *td; 442f130dcf2SMartin Blapp int shmid; 443f130dcf2SMartin Blapp int cmd; 444f130dcf2SMartin Blapp void *buf; 445f130dcf2SMartin Blapp size_t *bufsz; 4463d903220SDoug Rabson { 447b6a4b4f9SMatthew Dillon int error = 0; 448921d05b9SRobert Watson struct shmid_kernel *shmseg; 4493d903220SDoug Rabson 4500304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 451c6f55f33SJohn Baldwin return (ENOSYS); 452f130dcf2SMartin Blapp 453b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 454f130dcf2SMartin Blapp switch (cmd) { 4554f18813fSChristian S.J. Peron /* 4564f18813fSChristian S.J. Peron * It is possible that kern_shmctl is being called from the Linux ABI 4574f18813fSChristian S.J. Peron * layer, in which case, we will need to implement IPC_INFO. It should 4584f18813fSChristian S.J. Peron * be noted that other shmctl calls will be funneled through here for 4594f18813fSChristian S.J. Peron * Linix binaries as well. 4604f18813fSChristian S.J. Peron * 4614f18813fSChristian S.J. Peron * NB: The Linux ABI layer will convert this data to structure(s) more 4624f18813fSChristian S.J. Peron * consistent with the Linux ABI. 4634f18813fSChristian S.J. Peron */ 464491dec93SMichael Reifenberger case IPC_INFO: 465f130dcf2SMartin Blapp memcpy(buf, &shminfo, sizeof(shminfo)); 466f130dcf2SMartin Blapp if (bufsz) 467f130dcf2SMartin Blapp *bufsz = sizeof(shminfo); 468491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 469491dec93SMichael Reifenberger goto done2; 470491dec93SMichael Reifenberger case SHM_INFO: { 471491dec93SMichael Reifenberger struct shm_info shm_info; 472491dec93SMichael Reifenberger shm_info.used_ids = shm_nused; 473491dec93SMichael Reifenberger shm_info.shm_rss = 0; /*XXX where to get from ? */ 474491dec93SMichael Reifenberger shm_info.shm_tot = 0; /*XXX where to get from ? */ 475491dec93SMichael Reifenberger shm_info.shm_swp = 0; /*XXX where to get from ? */ 476491dec93SMichael Reifenberger shm_info.swap_attempts = 0; /*XXX where to get from ? */ 477491dec93SMichael Reifenberger shm_info.swap_successes = 0; /*XXX where to get from ? */ 478f130dcf2SMartin Blapp memcpy(buf, &shm_info, sizeof(shm_info)); 479f130dcf2SMartin Blapp if (bufsz) 480f130dcf2SMartin Blapp *bufsz = sizeof(shm_info); 481491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 482491dec93SMichael Reifenberger goto done2; 483491dec93SMichael Reifenberger } 484491dec93SMichael Reifenberger } 485f130dcf2SMartin Blapp if (cmd == SHM_STAT) 4862332251cSMax Khon shmseg = shm_find_segment_by_shmidx(shmid); 487491dec93SMichael Reifenberger else 4882332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 489b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 490b6a4b4f9SMatthew Dillon error = EINVAL; 491b6a4b4f9SMatthew Dillon goto done2; 492b6a4b4f9SMatthew Dillon } 49314cedfc8SRobert Watson #ifdef MAC 49430d239bcSRobert Watson error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, cmd); 495f50c4fd8SRobert Watson if (error != 0) 49614cedfc8SRobert Watson goto done2; 49714cedfc8SRobert Watson #endif 498f130dcf2SMartin Blapp switch (cmd) { 499491dec93SMichael Reifenberger case SHM_STAT: 5003d903220SDoug Rabson case IPC_STAT: 501921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 502797f2d22SPoul-Henning Kamp if (error) 503b6a4b4f9SMatthew Dillon goto done2; 504921d05b9SRobert Watson memcpy(buf, &shmseg->u, sizeof(struct shmid_ds)); 505f130dcf2SMartin Blapp if (bufsz) 506f130dcf2SMartin Blapp *bufsz = sizeof(struct shmid_ds); 507f130dcf2SMartin Blapp if (cmd == SHM_STAT) 508921d05b9SRobert Watson td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->u.shm_perm); 5093d903220SDoug Rabson break; 510f130dcf2SMartin Blapp case IPC_SET: { 511f130dcf2SMartin Blapp struct shmid_ds *shmid; 512f130dcf2SMartin Blapp 513f130dcf2SMartin Blapp shmid = (struct shmid_ds *)buf; 514921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 515797f2d22SPoul-Henning Kamp if (error) 516b6a4b4f9SMatthew Dillon goto done2; 517921d05b9SRobert Watson shmseg->u.shm_perm.uid = shmid->shm_perm.uid; 518921d05b9SRobert Watson shmseg->u.shm_perm.gid = shmid->shm_perm.gid; 519921d05b9SRobert Watson shmseg->u.shm_perm.mode = 520921d05b9SRobert Watson (shmseg->u.shm_perm.mode & ~ACCESSPERMS) | 521f130dcf2SMartin Blapp (shmid->shm_perm.mode & ACCESSPERMS); 522921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 5233d903220SDoug Rabson break; 524f130dcf2SMartin Blapp } 5253d903220SDoug Rabson case IPC_RMID: 526921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 527797f2d22SPoul-Henning Kamp if (error) 528b6a4b4f9SMatthew Dillon goto done2; 529921d05b9SRobert Watson shmseg->u.shm_perm.key = IPC_PRIVATE; 530921d05b9SRobert Watson shmseg->u.shm_perm.mode |= SHMSEG_REMOVED; 531921d05b9SRobert Watson if (shmseg->u.shm_nattch <= 0) { 5323d903220SDoug Rabson shm_deallocate_segment(shmseg); 533f130dcf2SMartin Blapp shm_last_free = IPCID_TO_IX(shmid); 5343d903220SDoug Rabson } 5353d903220SDoug Rabson break; 5363d903220SDoug Rabson #if 0 5373d903220SDoug Rabson case SHM_LOCK: 5383d903220SDoug Rabson case SHM_UNLOCK: 5393d903220SDoug Rabson #endif 5403d903220SDoug Rabson default: 541b6a4b4f9SMatthew Dillon error = EINVAL; 542b6a4b4f9SMatthew Dillon break; 5433d903220SDoug Rabson } 544b6a4b4f9SMatthew Dillon done2: 545b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 546b6a4b4f9SMatthew Dillon return (error); 5473d903220SDoug Rabson } 5483d903220SDoug Rabson 54971361470SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 55071361470SJohn Baldwin struct shmctl_args { 55171361470SJohn Baldwin int shmid; 55271361470SJohn Baldwin int cmd; 55371361470SJohn Baldwin struct shmid_ds *buf; 55471361470SJohn Baldwin }; 55571361470SJohn Baldwin #endif 556f130dcf2SMartin Blapp int 557f130dcf2SMartin Blapp shmctl(td, uap) 558f130dcf2SMartin Blapp struct thread *td; 559f130dcf2SMartin Blapp struct shmctl_args *uap; 560f130dcf2SMartin Blapp { 561f130dcf2SMartin Blapp int error = 0; 562f130dcf2SMartin Blapp struct shmid_ds buf; 563f130dcf2SMartin Blapp size_t bufsz; 564f130dcf2SMartin Blapp 5654f18813fSChristian S.J. Peron /* 5664f18813fSChristian S.J. Peron * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support 5674f18813fSChristian S.J. Peron * Linux binaries. If we see the call come through the FreeBSD ABI, 5684f18813fSChristian S.J. Peron * return an error back to the user since we do not to support this. 5694f18813fSChristian S.J. Peron */ 5704f18813fSChristian S.J. Peron if (uap->cmd == IPC_INFO || uap->cmd == SHM_INFO || 5714f18813fSChristian S.J. Peron uap->cmd == SHM_STAT) 5724f18813fSChristian S.J. Peron return (EINVAL); 5734f18813fSChristian S.J. Peron 574f130dcf2SMartin Blapp /* IPC_SET needs to copyin the buffer before calling kern_shmctl */ 575f130dcf2SMartin Blapp if (uap->cmd == IPC_SET) { 576f130dcf2SMartin Blapp if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds)))) 577f130dcf2SMartin Blapp goto done; 578f130dcf2SMartin Blapp } 579f130dcf2SMartin Blapp 5802332251cSMax Khon error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz); 581f130dcf2SMartin Blapp if (error) 582f130dcf2SMartin Blapp goto done; 583f130dcf2SMartin Blapp 584f130dcf2SMartin Blapp /* Cases in which we need to copyout */ 585f130dcf2SMartin Blapp switch (uap->cmd) { 586f130dcf2SMartin Blapp case IPC_STAT: 587f130dcf2SMartin Blapp error = copyout(&buf, uap->buf, bufsz); 588f130dcf2SMartin Blapp break; 589f130dcf2SMartin Blapp } 590f130dcf2SMartin Blapp 591f130dcf2SMartin Blapp done: 592f130dcf2SMartin Blapp if (error) { 593f130dcf2SMartin Blapp /* Invalidate the return value */ 594f130dcf2SMartin Blapp td->td_retval[0] = -1; 595f130dcf2SMartin Blapp } 596f130dcf2SMartin Blapp return (error); 597f130dcf2SMartin Blapp } 598f130dcf2SMartin Blapp 599f130dcf2SMartin Blapp 6003d903220SDoug Rabson static int 601b40ce416SJulian Elischer shmget_existing(td, uap, mode, segnum) 602b40ce416SJulian Elischer struct thread *td; 6033d903220SDoug Rabson struct shmget_args *uap; 6043d903220SDoug Rabson int mode; 6053d903220SDoug Rabson int segnum; 6063d903220SDoug Rabson { 607921d05b9SRobert Watson struct shmid_kernel *shmseg; 6083d903220SDoug Rabson int error; 6093d903220SDoug Rabson 6103d903220SDoug Rabson shmseg = &shmsegs[segnum]; 611921d05b9SRobert Watson if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) { 6123d903220SDoug Rabson /* 6133d903220SDoug Rabson * This segment is in the process of being allocated. Wait 6143d903220SDoug Rabson * until it's done, and look the key up again (in case the 6153d903220SDoug Rabson * allocation failed or it was freed). 6163d903220SDoug Rabson */ 617921d05b9SRobert Watson shmseg->u.shm_perm.mode |= SHMSEG_WANTED; 6182cc593fdSAlfred Perlstein error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0); 619797f2d22SPoul-Henning Kamp if (error) 620b618bb96SAlfred Perlstein return (error); 621b618bb96SAlfred Perlstein return (EAGAIN); 6223d903220SDoug Rabson } 623dc92aa57SAlan Cox if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 624b618bb96SAlfred Perlstein return (EEXIST); 62514cedfc8SRobert Watson #ifdef MAC 62630d239bcSRobert Watson error = mac_sysvshm_check_shmget(td->td_ucred, shmseg, uap->shmflg); 627f50c4fd8SRobert Watson if (error != 0) 6287723d5edSRobert Watson return (error); 62914cedfc8SRobert Watson #endif 630b648d480SJohn Baldwin if (uap->size != 0 && uap->size > shmseg->u.shm_segsz) 631b618bb96SAlfred Perlstein return (EINVAL); 632921d05b9SRobert Watson td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 633b618bb96SAlfred Perlstein return (0); 6343d903220SDoug Rabson } 6353d903220SDoug Rabson 6363d903220SDoug Rabson static int 637b40ce416SJulian Elischer shmget_allocate_segment(td, uap, mode) 638b40ce416SJulian Elischer struct thread *td; 6393d903220SDoug Rabson struct shmget_args *uap; 6403d903220SDoug Rabson int mode; 6413d903220SDoug Rabson { 64265067cc8SKonstantin Belousov int i, segnum, shmid; 64365067cc8SKonstantin Belousov size_t size; 644a854ed98SJohn Baldwin struct ucred *cred = td->td_ucred; 645921d05b9SRobert Watson struct shmid_kernel *shmseg; 6460049f8b2SAlan Cox vm_object_t shm_object; 6473d903220SDoug Rabson 6480cddd8f0SMatthew Dillon GIANT_REQUIRED; 6490cddd8f0SMatthew Dillon 6503d903220SDoug Rabson if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 651b618bb96SAlfred Perlstein return (EINVAL); 652028f979dSDima Dorfman if (shm_nused >= shminfo.shmmni) /* Any shmids left? */ 653b618bb96SAlfred Perlstein return (ENOSPC); 6549e609ddeSJoerg Wunsch size = round_page(uap->size); 6553d903220SDoug Rabson if (shm_committed + btoc(size) > shminfo.shmall) 656b618bb96SAlfred Perlstein return (ENOMEM); 6573d903220SDoug Rabson if (shm_last_free < 0) { 658028f979dSDima Dorfman shmrealloc(); /* Maybe expand the shmsegs[] array. */ 659255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 660921d05b9SRobert Watson if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE) 6613d903220SDoug Rabson break; 662255108f3SPeter Wemm if (i == shmalloced) 663b618bb96SAlfred Perlstein return (ENOSPC); 6643d903220SDoug Rabson segnum = i; 6653d903220SDoug Rabson } else { 6663d903220SDoug Rabson segnum = shm_last_free; 6673d903220SDoug Rabson shm_last_free = -1; 6683d903220SDoug Rabson } 6693d903220SDoug Rabson shmseg = &shmsegs[segnum]; 6703d903220SDoug Rabson /* 6713d903220SDoug Rabson * In case we sleep in malloc(), mark the segment present but deleted 6723d903220SDoug Rabson * so that noone else tries to create the same key. 6733d903220SDoug Rabson */ 674921d05b9SRobert Watson shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 675921d05b9SRobert Watson shmseg->u.shm_perm.key = uap->key; 676921d05b9SRobert Watson shmseg->u.shm_perm.seq = (shmseg->u.shm_perm.seq + 1) & 0x7fff; 677921d05b9SRobert Watson shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 678a51f7119SJohn Dyson 679ae9b8c3aSJohn Dyson /* 680ae9b8c3aSJohn Dyson * We make sure that we have allocated a pager before we need 681ae9b8c3aSJohn Dyson * to. 682ae9b8c3aSJohn Dyson */ 6833364c323SKonstantin Belousov shm_object = vm_pager_allocate(shm_use_phys ? OBJT_PHYS : OBJT_SWAP, 6843364c323SKonstantin Belousov 0, size, VM_PROT_DEFAULT, 0, cred); 6853364c323SKonstantin Belousov if (shm_object == NULL) 6863364c323SKonstantin Belousov return (ENOMEM); 6870049f8b2SAlan Cox VM_OBJECT_LOCK(shm_object); 6880049f8b2SAlan Cox vm_object_clear_flag(shm_object, OBJ_ONEMAPPING); 6890049f8b2SAlan Cox vm_object_set_flag(shm_object, OBJ_NOSPLIT); 6900049f8b2SAlan Cox VM_OBJECT_UNLOCK(shm_object); 691cbd8ec09SJohn Dyson 692b648d480SJohn Baldwin shmseg->object = shm_object; 693921d05b9SRobert Watson shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid; 694921d05b9SRobert Watson shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = cred->cr_gid; 695921d05b9SRobert Watson shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) | 6963d903220SDoug Rabson (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 697921d05b9SRobert Watson shmseg->u.shm_segsz = uap->size; 698921d05b9SRobert Watson shmseg->u.shm_cpid = td->td_proc->p_pid; 699921d05b9SRobert Watson shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0; 700921d05b9SRobert Watson shmseg->u.shm_atime = shmseg->u.shm_dtime = 0; 70114cedfc8SRobert Watson #ifdef MAC 70230d239bcSRobert Watson mac_sysvshm_create(cred, shmseg); 70314cedfc8SRobert Watson #endif 704921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 7053d903220SDoug Rabson shm_committed += btoc(size); 7063d903220SDoug Rabson shm_nused++; 707921d05b9SRobert Watson if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) { 7083d903220SDoug Rabson /* 7093d903220SDoug Rabson * Somebody else wanted this key while we were asleep. Wake 7103d903220SDoug Rabson * them up now. 7113d903220SDoug Rabson */ 712921d05b9SRobert Watson shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED; 7132cc593fdSAlfred Perlstein wakeup(shmseg); 7143d903220SDoug Rabson } 715b40ce416SJulian Elischer td->td_retval[0] = shmid; 716b618bb96SAlfred Perlstein return (0); 7173d903220SDoug Rabson } 7183d903220SDoug Rabson 71971361470SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 72071361470SJohn Baldwin struct shmget_args { 72171361470SJohn Baldwin key_t key; 72271361470SJohn Baldwin size_t size; 72371361470SJohn Baldwin int shmflg; 72471361470SJohn Baldwin }; 72571361470SJohn Baldwin #endif 7263d903220SDoug Rabson int 727b40ce416SJulian Elischer shmget(td, uap) 728b40ce416SJulian Elischer struct thread *td; 7293d903220SDoug Rabson struct shmget_args *uap; 7303d903220SDoug Rabson { 731b6a4b4f9SMatthew Dillon int segnum, mode; 732b6a4b4f9SMatthew Dillon int error; 7333d903220SDoug Rabson 7340304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 735c6f55f33SJohn Baldwin return (ENOSYS); 736b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 7373d903220SDoug Rabson mode = uap->shmflg & ACCESSPERMS; 7383d903220SDoug Rabson if (uap->key != IPC_PRIVATE) { 7393d903220SDoug Rabson again: 7403d903220SDoug Rabson segnum = shm_find_segment_by_key(uap->key); 7413d903220SDoug Rabson if (segnum >= 0) { 742b40ce416SJulian Elischer error = shmget_existing(td, uap, mode, segnum); 7433d903220SDoug Rabson if (error == EAGAIN) 7443d903220SDoug Rabson goto again; 745b6a4b4f9SMatthew Dillon goto done2; 7463d903220SDoug Rabson } 747b6a4b4f9SMatthew Dillon if ((uap->shmflg & IPC_CREAT) == 0) { 748b6a4b4f9SMatthew Dillon error = ENOENT; 749b6a4b4f9SMatthew Dillon goto done2; 7503d903220SDoug Rabson } 751b6a4b4f9SMatthew Dillon } 752b40ce416SJulian Elischer error = shmget_allocate_segment(td, uap, mode); 753b6a4b4f9SMatthew Dillon done2: 754b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 755b6a4b4f9SMatthew Dillon return (error); 7563d903220SDoug Rabson } 7573d903220SDoug Rabson 75878525ce3SAlfred Perlstein static void 75978525ce3SAlfred Perlstein shmfork_myhook(p1, p2) 7603d903220SDoug Rabson struct proc *p1, *p2; 7613d903220SDoug Rabson { 7623d903220SDoug Rabson struct shmmap_state *shmmap_s; 7633d903220SDoug Rabson size_t size; 7643d903220SDoug Rabson int i; 7653d903220SDoug Rabson 76694ddc707SAlan Cox mtx_lock(&Giant); 7673d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 768a163d034SWarner Losh shmmap_s = malloc(size, M_SHM, M_WAITOK); 7692cc593fdSAlfred Perlstein bcopy(p1->p_vmspace->vm_shm, shmmap_s, size); 7702cc593fdSAlfred Perlstein p2->p_vmspace->vm_shm = shmmap_s; 7713d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 7723d903220SDoug Rabson if (shmmap_s->shmid != -1) 773921d05b9SRobert Watson shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++; 77494ddc707SAlan Cox mtx_unlock(&Giant); 7753d903220SDoug Rabson } 7763d903220SDoug Rabson 77778525ce3SAlfred Perlstein static void 7783db161e0SMatthew Dillon shmexit_myhook(struct vmspace *vm) 7793d903220SDoug Rabson { 7803db161e0SMatthew Dillon struct shmmap_state *base, *shm; 7813d903220SDoug Rabson int i; 7823d903220SDoug Rabson 7833db161e0SMatthew Dillon if ((base = vm->vm_shm) != NULL) { 7843db161e0SMatthew Dillon vm->vm_shm = NULL; 7851a276a3fSAlan Cox mtx_lock(&Giant); 7863db161e0SMatthew Dillon for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 7873db161e0SMatthew Dillon if (shm->shmid != -1) 7883db161e0SMatthew Dillon shm_delete_mapping(vm, shm); 7893db161e0SMatthew Dillon } 7901a276a3fSAlan Cox mtx_unlock(&Giant); 7913db161e0SMatthew Dillon free(base, M_SHM); 7923db161e0SMatthew Dillon } 7933d903220SDoug Rabson } 7943d903220SDoug Rabson 795255108f3SPeter Wemm static void 796255108f3SPeter Wemm shmrealloc(void) 797255108f3SPeter Wemm { 798255108f3SPeter Wemm int i; 799921d05b9SRobert Watson struct shmid_kernel *newsegs; 800255108f3SPeter Wemm 801255108f3SPeter Wemm if (shmalloced >= shminfo.shmmni) 802255108f3SPeter Wemm return; 803255108f3SPeter Wemm 804a163d034SWarner Losh newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 805255108f3SPeter Wemm if (newsegs == NULL) 806255108f3SPeter Wemm return; 807255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 808255108f3SPeter Wemm bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 809255108f3SPeter Wemm for (; i < shminfo.shmmni; i++) { 810921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 811921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 81214cedfc8SRobert Watson #ifdef MAC 81330d239bcSRobert Watson mac_sysvshm_init(&shmsegs[i]); 81414cedfc8SRobert Watson #endif 815255108f3SPeter Wemm } 816255108f3SPeter Wemm free(shmsegs, M_SHM); 817255108f3SPeter Wemm shmsegs = newsegs; 818255108f3SPeter Wemm shmalloced = shminfo.shmmni; 819255108f3SPeter Wemm } 820255108f3SPeter Wemm 82175d633cbSKonstantin Belousov static struct syscall_helper_data shm_syscalls[] = { 82275d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmat), 82375d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmctl), 82475d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmdt), 82575d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmget), 82675d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 82775d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 82875d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(freebsd7_shmctl), 82975d633cbSKonstantin Belousov #endif 83075d633cbSKonstantin Belousov #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) 83175d633cbSKonstantin Belousov SYSCALL_INIT_HELPER(shmsys), 83275d633cbSKonstantin Belousov #endif 83375d633cbSKonstantin Belousov SYSCALL_INIT_LAST 83475d633cbSKonstantin Belousov }; 83575d633cbSKonstantin Belousov 83675d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 83775d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32.h> 83875d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_ipc.h> 83975d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_proto.h> 84075d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_signal.h> 84175d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_syscall.h> 84275d633cbSKonstantin Belousov #include <compat/freebsd32/freebsd32_util.h> 84375d633cbSKonstantin Belousov 84475d633cbSKonstantin Belousov static struct syscall_helper_data shm32_syscalls[] = { 84575d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(shmat), 84675d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(shmdt), 84775d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(shmget), 84875d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(freebsd32_shmsys), 84975d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(freebsd32_shmctl), 85075d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 85175d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 85275d633cbSKonstantin Belousov SYSCALL32_INIT_HELPER(freebsd7_freebsd32_shmctl), 85375d633cbSKonstantin Belousov #endif 85475d633cbSKonstantin Belousov SYSCALL_INIT_LAST 85575d633cbSKonstantin Belousov }; 85675d633cbSKonstantin Belousov #endif 85775d633cbSKonstantin Belousov 85875d633cbSKonstantin Belousov static int 85978525ce3SAlfred Perlstein shminit() 8603d903220SDoug Rabson { 86175d633cbSKonstantin Belousov int i, error; 862255108f3SPeter Wemm 8634d9d1e82SRuslan Ermilov #ifndef BURN_BRIDGES 8644d9d1e82SRuslan Ermilov if (TUNABLE_ULONG_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall) != 0) 8654d9d1e82SRuslan Ermilov printf("kern.ipc.shmmaxpgs is now called kern.ipc.shmall!\n"); 8664d9d1e82SRuslan Ermilov #endif 8674d9d1e82SRuslan Ermilov TUNABLE_ULONG_FETCH("kern.ipc.shmall", &shminfo.shmall); 8684d9d1e82SRuslan Ermilov 8694d9d1e82SRuslan Ermilov /* Initialize shmmax dealing with possible overflow. */ 8709d4156aeSAlfred Perlstein for (i = PAGE_SIZE; i > 0; i--) { 871a4c24c66SJohn Baldwin shminfo.shmmax = shminfo.shmall * i; 8725015c68aSAlfred Perlstein if (shminfo.shmmax >= shminfo.shmall) 8735015c68aSAlfred Perlstein break; 8745015c68aSAlfred Perlstein } 8754d9d1e82SRuslan Ermilov 8769baea4b4SChristian S.J. Peron TUNABLE_ULONG_FETCH("kern.ipc.shmmin", &shminfo.shmmin); 8779baea4b4SChristian S.J. Peron TUNABLE_ULONG_FETCH("kern.ipc.shmmni", &shminfo.shmmni); 8789baea4b4SChristian S.J. Peron TUNABLE_ULONG_FETCH("kern.ipc.shmseg", &shminfo.shmseg); 879b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys); 880b3a4bc42SMichael Reifenberger 881255108f3SPeter Wemm shmalloced = shminfo.shmmni; 882a163d034SWarner Losh shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 883255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) { 884921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 885921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 88614cedfc8SRobert Watson #ifdef MAC 88730d239bcSRobert Watson mac_sysvshm_init(&shmsegs[i]); 88814cedfc8SRobert Watson #endif 8893d903220SDoug Rabson } 8903d903220SDoug Rabson shm_last_free = 0; 8913d903220SDoug Rabson shm_nused = 0; 8923d903220SDoug Rabson shm_committed = 0; 89378525ce3SAlfred Perlstein shmexit_hook = &shmexit_myhook; 89478525ce3SAlfred Perlstein shmfork_hook = &shmfork_myhook; 89575d633cbSKonstantin Belousov 89675d633cbSKonstantin Belousov error = syscall_helper_register(shm_syscalls); 89775d633cbSKonstantin Belousov if (error != 0) 89875d633cbSKonstantin Belousov return (error); 89975d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 90075d633cbSKonstantin Belousov error = syscall32_helper_register(shm32_syscalls); 90175d633cbSKonstantin Belousov if (error != 0) 90275d633cbSKonstantin Belousov return (error); 90375d633cbSKonstantin Belousov #endif 90475d633cbSKonstantin Belousov return (0); 9053d903220SDoug Rabson } 90678525ce3SAlfred Perlstein 90778525ce3SAlfred Perlstein static int 90878525ce3SAlfred Perlstein shmunload() 90978525ce3SAlfred Perlstein { 91014cedfc8SRobert Watson int i; 91178525ce3SAlfred Perlstein 91278525ce3SAlfred Perlstein if (shm_nused > 0) 91378525ce3SAlfred Perlstein return (EBUSY); 91478525ce3SAlfred Perlstein 91575d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 91675d633cbSKonstantin Belousov syscall32_helper_unregister(shm32_syscalls); 91775d633cbSKonstantin Belousov #endif 91875d633cbSKonstantin Belousov syscall_helper_unregister(shm_syscalls); 91975d633cbSKonstantin Belousov 9200d9d996dSKonstantin Belousov for (i = 0; i < shmalloced; i++) { 92114cedfc8SRobert Watson #ifdef MAC 92230d239bcSRobert Watson mac_sysvshm_destroy(&shmsegs[i]); 92314cedfc8SRobert Watson #endif 9240d9d996dSKonstantin Belousov /* 9250d9d996dSKonstantin Belousov * Objects might be still mapped into the processes 9260d9d996dSKonstantin Belousov * address spaces. Actual free would happen on the 9270d9d996dSKonstantin Belousov * last mapping destruction. 9280d9d996dSKonstantin Belousov */ 9290d9d996dSKonstantin Belousov if (shmsegs[i].u.shm_perm.mode != SHMSEG_FREE) 9300d9d996dSKonstantin Belousov vm_object_deallocate(shmsegs[i].object); 9310d9d996dSKonstantin Belousov } 93278525ce3SAlfred Perlstein free(shmsegs, M_SHM); 93378525ce3SAlfred Perlstein shmexit_hook = NULL; 93478525ce3SAlfred Perlstein shmfork_hook = NULL; 93578525ce3SAlfred Perlstein return (0); 93678525ce3SAlfred Perlstein } 93778525ce3SAlfred Perlstein 93878525ce3SAlfred Perlstein static int 939a723c4e1SDima Dorfman sysctl_shmsegs(SYSCTL_HANDLER_ARGS) 940a723c4e1SDima Dorfman { 941a723c4e1SDima Dorfman 942a723c4e1SDima Dorfman return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0]))); 943a723c4e1SDima Dorfman } 944a723c4e1SDima Dorfman 94545f48220SJohn Baldwin #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) 94645f48220SJohn Baldwin struct oshmid_ds { 94745f48220SJohn Baldwin struct ipc_perm_old shm_perm; /* operation perms */ 94845f48220SJohn Baldwin int shm_segsz; /* size of segment (bytes) */ 94945f48220SJohn Baldwin u_short shm_cpid; /* pid, creator */ 95045f48220SJohn Baldwin u_short shm_lpid; /* pid, last operation */ 95145f48220SJohn Baldwin short shm_nattch; /* no. of current attaches */ 95245f48220SJohn Baldwin time_t shm_atime; /* last attach time */ 95345f48220SJohn Baldwin time_t shm_dtime; /* last detach time */ 95445f48220SJohn Baldwin time_t shm_ctime; /* last change time */ 95545f48220SJohn Baldwin void *shm_handle; /* internal handle for shm segment */ 95645f48220SJohn Baldwin }; 95745f48220SJohn Baldwin 95845f48220SJohn Baldwin struct oshmctl_args { 95945f48220SJohn Baldwin int shmid; 96045f48220SJohn Baldwin int cmd; 96145f48220SJohn Baldwin struct oshmid_ds *ubuf; 96245f48220SJohn Baldwin }; 96345f48220SJohn Baldwin 96445f48220SJohn Baldwin static int 965ca998284SJohn Baldwin oshmctl(struct thread *td, struct oshmctl_args *uap) 96645f48220SJohn Baldwin { 96745f48220SJohn Baldwin #ifdef COMPAT_43 96845f48220SJohn Baldwin int error = 0; 96945f48220SJohn Baldwin struct shmid_kernel *shmseg; 97045f48220SJohn Baldwin struct oshmid_ds outbuf; 97145f48220SJohn Baldwin 97245f48220SJohn Baldwin if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 97345f48220SJohn Baldwin return (ENOSYS); 97445f48220SJohn Baldwin mtx_lock(&Giant); 97545f48220SJohn Baldwin shmseg = shm_find_segment_by_shmid(uap->shmid); 97645f48220SJohn Baldwin if (shmseg == NULL) { 97745f48220SJohn Baldwin error = EINVAL; 97845f48220SJohn Baldwin goto done2; 97945f48220SJohn Baldwin } 98045f48220SJohn Baldwin switch (uap->cmd) { 98145f48220SJohn Baldwin case IPC_STAT: 98245f48220SJohn Baldwin error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 98345f48220SJohn Baldwin if (error) 98445f48220SJohn Baldwin goto done2; 98545f48220SJohn Baldwin #ifdef MAC 98645f48220SJohn Baldwin error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, uap->cmd); 98745f48220SJohn Baldwin if (error != 0) 98845f48220SJohn Baldwin goto done2; 98945f48220SJohn Baldwin #endif 99045f48220SJohn Baldwin ipcperm_new2old(&shmseg->u.shm_perm, &outbuf.shm_perm); 99145f48220SJohn Baldwin outbuf.shm_segsz = shmseg->u.shm_segsz; 99245f48220SJohn Baldwin outbuf.shm_cpid = shmseg->u.shm_cpid; 99345f48220SJohn Baldwin outbuf.shm_lpid = shmseg->u.shm_lpid; 99445f48220SJohn Baldwin outbuf.shm_nattch = shmseg->u.shm_nattch; 99545f48220SJohn Baldwin outbuf.shm_atime = shmseg->u.shm_atime; 99645f48220SJohn Baldwin outbuf.shm_dtime = shmseg->u.shm_dtime; 99745f48220SJohn Baldwin outbuf.shm_ctime = shmseg->u.shm_ctime; 99845f48220SJohn Baldwin outbuf.shm_handle = shmseg->object; 99945f48220SJohn Baldwin error = copyout(&outbuf, uap->ubuf, sizeof(outbuf)); 100045f48220SJohn Baldwin if (error) 100145f48220SJohn Baldwin goto done2; 100245f48220SJohn Baldwin break; 100345f48220SJohn Baldwin default: 1004af88b2c2SJohn Baldwin error = freebsd7_shmctl(td, (struct freebsd7_shmctl_args *)uap); 100545f48220SJohn Baldwin break; 100645f48220SJohn Baldwin } 100745f48220SJohn Baldwin done2: 100845f48220SJohn Baldwin mtx_unlock(&Giant); 100945f48220SJohn Baldwin return (error); 101045f48220SJohn Baldwin #else 101145f48220SJohn Baldwin return (EINVAL); 101245f48220SJohn Baldwin #endif 101345f48220SJohn Baldwin } 101445f48220SJohn Baldwin 101545f48220SJohn Baldwin /* XXX casting to (sy_call_t *) is bogus, as usual. */ 101645f48220SJohn Baldwin static sy_call_t *shmcalls[] = { 101745f48220SJohn Baldwin (sy_call_t *)shmat, (sy_call_t *)oshmctl, 101845f48220SJohn Baldwin (sy_call_t *)shmdt, (sy_call_t *)shmget, 1019b648d480SJohn Baldwin (sy_call_t *)freebsd7_shmctl 102045f48220SJohn Baldwin }; 102145f48220SJohn Baldwin 102245f48220SJohn Baldwin int 102345f48220SJohn Baldwin shmsys(td, uap) 102445f48220SJohn Baldwin struct thread *td; 102545f48220SJohn Baldwin /* XXX actually varargs. */ 102645f48220SJohn Baldwin struct shmsys_args /* { 102745f48220SJohn Baldwin int which; 102845f48220SJohn Baldwin int a2; 102945f48220SJohn Baldwin int a3; 103045f48220SJohn Baldwin int a4; 103145f48220SJohn Baldwin } */ *uap; 103245f48220SJohn Baldwin { 103345f48220SJohn Baldwin int error; 103445f48220SJohn Baldwin 103545f48220SJohn Baldwin if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 103645f48220SJohn Baldwin return (ENOSYS); 103745f48220SJohn Baldwin if (uap->which < 0 || 103845f48220SJohn Baldwin uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 103945f48220SJohn Baldwin return (EINVAL); 104045f48220SJohn Baldwin mtx_lock(&Giant); 104145f48220SJohn Baldwin error = (*shmcalls[uap->which])(td, &uap->a2); 104245f48220SJohn Baldwin mtx_unlock(&Giant); 104345f48220SJohn Baldwin return (error); 104445f48220SJohn Baldwin } 104545f48220SJohn Baldwin 104645f48220SJohn Baldwin #endif /* i386 && (COMPAT_FREEBSD4 || COMPAT_43) */ 104745f48220SJohn Baldwin 104875d633cbSKonstantin Belousov #ifdef COMPAT_FREEBSD32 104975d633cbSKonstantin Belousov 105075d633cbSKonstantin Belousov int 105175d633cbSKonstantin Belousov freebsd32_shmsys(struct thread *td, struct freebsd32_shmsys_args *uap) 105275d633cbSKonstantin Belousov { 105375d633cbSKonstantin Belousov 105475d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 105575d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 105675d633cbSKonstantin Belousov switch (uap->which) { 105775d633cbSKonstantin Belousov case 0: { /* shmat */ 105875d633cbSKonstantin Belousov struct shmat_args ap; 105975d633cbSKonstantin Belousov 106075d633cbSKonstantin Belousov ap.shmid = uap->a2; 106175d633cbSKonstantin Belousov ap.shmaddr = PTRIN(uap->a3); 106275d633cbSKonstantin Belousov ap.shmflg = uap->a4; 106375d633cbSKonstantin Belousov return (sysent[SYS_shmat].sy_call(td, &ap)); 106475d633cbSKonstantin Belousov } 106575d633cbSKonstantin Belousov case 2: { /* shmdt */ 106675d633cbSKonstantin Belousov struct shmdt_args ap; 106775d633cbSKonstantin Belousov 106875d633cbSKonstantin Belousov ap.shmaddr = PTRIN(uap->a2); 106975d633cbSKonstantin Belousov return (sysent[SYS_shmdt].sy_call(td, &ap)); 107075d633cbSKonstantin Belousov } 107175d633cbSKonstantin Belousov case 3: { /* shmget */ 107275d633cbSKonstantin Belousov struct shmget_args ap; 107375d633cbSKonstantin Belousov 107475d633cbSKonstantin Belousov ap.key = uap->a2; 107575d633cbSKonstantin Belousov ap.size = uap->a3; 107675d633cbSKonstantin Belousov ap.shmflg = uap->a4; 107775d633cbSKonstantin Belousov return (sysent[SYS_shmget].sy_call(td, &ap)); 107875d633cbSKonstantin Belousov } 107975d633cbSKonstantin Belousov case 4: { /* shmctl */ 108075d633cbSKonstantin Belousov struct freebsd7_freebsd32_shmctl_args ap; 108175d633cbSKonstantin Belousov 108275d633cbSKonstantin Belousov ap.shmid = uap->a2; 108375d633cbSKonstantin Belousov ap.cmd = uap->a3; 108475d633cbSKonstantin Belousov ap.buf = PTRIN(uap->a4); 108575d633cbSKonstantin Belousov return (freebsd7_freebsd32_shmctl(td, &ap)); 108675d633cbSKonstantin Belousov } 108775d633cbSKonstantin Belousov case 1: /* oshmctl */ 108875d633cbSKonstantin Belousov default: 108975d633cbSKonstantin Belousov return (EINVAL); 109075d633cbSKonstantin Belousov } 109175d633cbSKonstantin Belousov #else 109275d633cbSKonstantin Belousov return (nosys(td, NULL)); 109375d633cbSKonstantin Belousov #endif 109475d633cbSKonstantin Belousov } 109575d633cbSKonstantin Belousov 109675d633cbSKonstantin Belousov #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 109775d633cbSKonstantin Belousov defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 109875d633cbSKonstantin Belousov int 109975d633cbSKonstantin Belousov freebsd7_freebsd32_shmctl(struct thread *td, 110075d633cbSKonstantin Belousov struct freebsd7_freebsd32_shmctl_args *uap) 110175d633cbSKonstantin Belousov { 110275d633cbSKonstantin Belousov int error = 0; 110375d633cbSKonstantin Belousov union { 110475d633cbSKonstantin Belousov struct shmid_ds shmid_ds; 110575d633cbSKonstantin Belousov struct shm_info shm_info; 110675d633cbSKonstantin Belousov struct shminfo shminfo; 110775d633cbSKonstantin Belousov } u; 110875d633cbSKonstantin Belousov union { 110975d633cbSKonstantin Belousov struct shmid_ds32_old shmid_ds32; 111075d633cbSKonstantin Belousov struct shm_info32 shm_info32; 111175d633cbSKonstantin Belousov struct shminfo32 shminfo32; 111275d633cbSKonstantin Belousov } u32; 111375d633cbSKonstantin Belousov size_t sz; 111475d633cbSKonstantin Belousov 111575d633cbSKonstantin Belousov if (uap->cmd == IPC_SET) { 111675d633cbSKonstantin Belousov if ((error = copyin(uap->buf, &u32.shmid_ds32, 111775d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)))) 111875d633cbSKonstantin Belousov goto done; 111975d633cbSKonstantin Belousov freebsd32_ipcperm_old_in(&u32.shmid_ds32.shm_perm, 112075d633cbSKonstantin Belousov &u.shmid_ds.shm_perm); 112175d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_segsz); 112275d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_lpid); 112375d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_cpid); 112475d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_nattch); 112575d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_atime); 112675d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_dtime); 112775d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_ctime); 112875d633cbSKonstantin Belousov } 112975d633cbSKonstantin Belousov 113075d633cbSKonstantin Belousov error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&u, &sz); 113175d633cbSKonstantin Belousov if (error) 113275d633cbSKonstantin Belousov goto done; 113375d633cbSKonstantin Belousov 113475d633cbSKonstantin Belousov /* Cases in which we need to copyout */ 113575d633cbSKonstantin Belousov switch (uap->cmd) { 113675d633cbSKonstantin Belousov case IPC_INFO: 113775d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmax); 113875d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmin); 113975d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmni); 114075d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmseg); 114175d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmall); 114275d633cbSKonstantin Belousov error = copyout(&u32.shminfo32, uap->buf, 114375d633cbSKonstantin Belousov sizeof(u32.shminfo32)); 114475d633cbSKonstantin Belousov break; 114575d633cbSKonstantin Belousov case SHM_INFO: 114675d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, used_ids); 114775d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_rss); 114875d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_tot); 114975d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_swp); 115075d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_attempts); 115175d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_successes); 115275d633cbSKonstantin Belousov error = copyout(&u32.shm_info32, uap->buf, 115375d633cbSKonstantin Belousov sizeof(u32.shm_info32)); 115475d633cbSKonstantin Belousov break; 115575d633cbSKonstantin Belousov case SHM_STAT: 115675d633cbSKonstantin Belousov case IPC_STAT: 115775d633cbSKonstantin Belousov freebsd32_ipcperm_old_out(&u.shmid_ds.shm_perm, 115875d633cbSKonstantin Belousov &u32.shmid_ds32.shm_perm); 115975d633cbSKonstantin Belousov if (u.shmid_ds.shm_segsz > INT32_MAX) 116075d633cbSKonstantin Belousov u32.shmid_ds32.shm_segsz = INT32_MAX; 116175d633cbSKonstantin Belousov else 116275d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_segsz); 116375d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_lpid); 116475d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_cpid); 116575d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_nattch); 116675d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_atime); 116775d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_dtime); 116875d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_ctime); 116975d633cbSKonstantin Belousov u32.shmid_ds32.shm_internal = 0; 117075d633cbSKonstantin Belousov error = copyout(&u32.shmid_ds32, uap->buf, 117175d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)); 117275d633cbSKonstantin Belousov break; 117375d633cbSKonstantin Belousov } 117475d633cbSKonstantin Belousov 117575d633cbSKonstantin Belousov done: 117675d633cbSKonstantin Belousov if (error) { 117775d633cbSKonstantin Belousov /* Invalidate the return value */ 117875d633cbSKonstantin Belousov td->td_retval[0] = -1; 117975d633cbSKonstantin Belousov } 118075d633cbSKonstantin Belousov return (error); 118175d633cbSKonstantin Belousov } 118275d633cbSKonstantin Belousov #endif 118375d633cbSKonstantin Belousov 118475d633cbSKonstantin Belousov int 118575d633cbSKonstantin Belousov freebsd32_shmctl(struct thread *td, struct freebsd32_shmctl_args *uap) 118675d633cbSKonstantin Belousov { 118775d633cbSKonstantin Belousov int error = 0; 118875d633cbSKonstantin Belousov union { 118975d633cbSKonstantin Belousov struct shmid_ds shmid_ds; 119075d633cbSKonstantin Belousov struct shm_info shm_info; 119175d633cbSKonstantin Belousov struct shminfo shminfo; 119275d633cbSKonstantin Belousov } u; 119375d633cbSKonstantin Belousov union { 119475d633cbSKonstantin Belousov struct shmid_ds32 shmid_ds32; 119575d633cbSKonstantin Belousov struct shm_info32 shm_info32; 119675d633cbSKonstantin Belousov struct shminfo32 shminfo32; 119775d633cbSKonstantin Belousov } u32; 119875d633cbSKonstantin Belousov size_t sz; 119975d633cbSKonstantin Belousov 120075d633cbSKonstantin Belousov if (uap->cmd == IPC_SET) { 120175d633cbSKonstantin Belousov if ((error = copyin(uap->buf, &u32.shmid_ds32, 120275d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)))) 120375d633cbSKonstantin Belousov goto done; 120475d633cbSKonstantin Belousov freebsd32_ipcperm_in(&u32.shmid_ds32.shm_perm, 120575d633cbSKonstantin Belousov &u.shmid_ds.shm_perm); 120675d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_segsz); 120775d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_lpid); 120875d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_cpid); 120975d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_nattch); 121075d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_atime); 121175d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_dtime); 121275d633cbSKonstantin Belousov CP(u32.shmid_ds32, u.shmid_ds, shm_ctime); 121375d633cbSKonstantin Belousov } 121475d633cbSKonstantin Belousov 121575d633cbSKonstantin Belousov error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&u, &sz); 121675d633cbSKonstantin Belousov if (error) 121775d633cbSKonstantin Belousov goto done; 121875d633cbSKonstantin Belousov 121975d633cbSKonstantin Belousov /* Cases in which we need to copyout */ 122075d633cbSKonstantin Belousov switch (uap->cmd) { 122175d633cbSKonstantin Belousov case IPC_INFO: 122275d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmax); 122375d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmin); 122475d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmmni); 122575d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmseg); 122675d633cbSKonstantin Belousov CP(u.shminfo, u32.shminfo32, shmall); 122775d633cbSKonstantin Belousov error = copyout(&u32.shminfo32, uap->buf, 122875d633cbSKonstantin Belousov sizeof(u32.shminfo32)); 122975d633cbSKonstantin Belousov break; 123075d633cbSKonstantin Belousov case SHM_INFO: 123175d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, used_ids); 123275d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_rss); 123375d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_tot); 123475d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, shm_swp); 123575d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_attempts); 123675d633cbSKonstantin Belousov CP(u.shm_info, u32.shm_info32, swap_successes); 123775d633cbSKonstantin Belousov error = copyout(&u32.shm_info32, uap->buf, 123875d633cbSKonstantin Belousov sizeof(u32.shm_info32)); 123975d633cbSKonstantin Belousov break; 124075d633cbSKonstantin Belousov case SHM_STAT: 124175d633cbSKonstantin Belousov case IPC_STAT: 124275d633cbSKonstantin Belousov freebsd32_ipcperm_out(&u.shmid_ds.shm_perm, 124375d633cbSKonstantin Belousov &u32.shmid_ds32.shm_perm); 124475d633cbSKonstantin Belousov if (u.shmid_ds.shm_segsz > INT32_MAX) 124575d633cbSKonstantin Belousov u32.shmid_ds32.shm_segsz = INT32_MAX; 124675d633cbSKonstantin Belousov else 124775d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_segsz); 124875d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_lpid); 124975d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_cpid); 125075d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_nattch); 125175d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_atime); 125275d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_dtime); 125375d633cbSKonstantin Belousov CP(u.shmid_ds, u32.shmid_ds32, shm_ctime); 125475d633cbSKonstantin Belousov error = copyout(&u32.shmid_ds32, uap->buf, 125575d633cbSKonstantin Belousov sizeof(u32.shmid_ds32)); 125675d633cbSKonstantin Belousov break; 125775d633cbSKonstantin Belousov } 125875d633cbSKonstantin Belousov 125975d633cbSKonstantin Belousov done: 126075d633cbSKonstantin Belousov if (error) { 126175d633cbSKonstantin Belousov /* Invalidate the return value */ 126275d633cbSKonstantin Belousov td->td_retval[0] = -1; 126375d633cbSKonstantin Belousov } 126475d633cbSKonstantin Belousov return (error); 126575d633cbSKonstantin Belousov } 126675d633cbSKonstantin Belousov #endif 126775d633cbSKonstantin Belousov 1268b648d480SJohn Baldwin #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \ 1269b648d480SJohn Baldwin defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) 1270b648d480SJohn Baldwin 127175d633cbSKonstantin Belousov #ifndef CP 1272b648d480SJohn Baldwin #define CP(src, dst, fld) do { (dst).fld = (src).fld; } while (0) 127375d633cbSKonstantin Belousov #endif 1274b648d480SJohn Baldwin 1275b648d480SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 1276b648d480SJohn Baldwin struct freebsd7_shmctl_args { 1277b648d480SJohn Baldwin int shmid; 1278b648d480SJohn Baldwin int cmd; 1279b648d480SJohn Baldwin struct shmid_ds_old *buf; 1280b648d480SJohn Baldwin }; 1281b648d480SJohn Baldwin #endif 1282b648d480SJohn Baldwin int 1283b648d480SJohn Baldwin freebsd7_shmctl(td, uap) 1284b648d480SJohn Baldwin struct thread *td; 1285b648d480SJohn Baldwin struct freebsd7_shmctl_args *uap; 1286b648d480SJohn Baldwin { 1287b648d480SJohn Baldwin int error = 0; 1288b648d480SJohn Baldwin struct shmid_ds_old old; 1289b648d480SJohn Baldwin struct shmid_ds buf; 1290b648d480SJohn Baldwin size_t bufsz; 1291b648d480SJohn Baldwin 1292b648d480SJohn Baldwin /* 1293b648d480SJohn Baldwin * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support 1294b648d480SJohn Baldwin * Linux binaries. If we see the call come through the FreeBSD ABI, 1295b648d480SJohn Baldwin * return an error back to the user since we do not to support this. 1296b648d480SJohn Baldwin */ 1297b648d480SJohn Baldwin if (uap->cmd == IPC_INFO || uap->cmd == SHM_INFO || 1298b648d480SJohn Baldwin uap->cmd == SHM_STAT) 1299b648d480SJohn Baldwin return (EINVAL); 1300b648d480SJohn Baldwin 1301b648d480SJohn Baldwin /* IPC_SET needs to copyin the buffer before calling kern_shmctl */ 1302b648d480SJohn Baldwin if (uap->cmd == IPC_SET) { 1303b648d480SJohn Baldwin if ((error = copyin(uap->buf, &old, sizeof(old)))) 1304b648d480SJohn Baldwin goto done; 1305b648d480SJohn Baldwin ipcperm_old2new(&old.shm_perm, &buf.shm_perm); 1306b648d480SJohn Baldwin CP(old, buf, shm_segsz); 1307b648d480SJohn Baldwin CP(old, buf, shm_lpid); 1308b648d480SJohn Baldwin CP(old, buf, shm_cpid); 1309b648d480SJohn Baldwin CP(old, buf, shm_nattch); 1310b648d480SJohn Baldwin CP(old, buf, shm_atime); 1311b648d480SJohn Baldwin CP(old, buf, shm_dtime); 1312b648d480SJohn Baldwin CP(old, buf, shm_ctime); 1313b648d480SJohn Baldwin } 1314b648d480SJohn Baldwin 1315b648d480SJohn Baldwin error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz); 1316b648d480SJohn Baldwin if (error) 1317b648d480SJohn Baldwin goto done; 1318b648d480SJohn Baldwin 1319b648d480SJohn Baldwin /* Cases in which we need to copyout */ 1320b648d480SJohn Baldwin switch (uap->cmd) { 1321b648d480SJohn Baldwin case IPC_STAT: 1322b648d480SJohn Baldwin ipcperm_new2old(&buf.shm_perm, &old.shm_perm); 1323b648d480SJohn Baldwin if (buf.shm_segsz > INT_MAX) 1324b648d480SJohn Baldwin old.shm_segsz = INT_MAX; 1325b648d480SJohn Baldwin else 1326b648d480SJohn Baldwin CP(buf, old, shm_segsz); 1327b648d480SJohn Baldwin CP(buf, old, shm_lpid); 1328b648d480SJohn Baldwin CP(buf, old, shm_cpid); 1329b648d480SJohn Baldwin if (buf.shm_nattch > SHRT_MAX) 1330b648d480SJohn Baldwin old.shm_nattch = SHRT_MAX; 1331b648d480SJohn Baldwin else 1332b648d480SJohn Baldwin CP(buf, old, shm_nattch); 1333b648d480SJohn Baldwin CP(buf, old, shm_atime); 1334b648d480SJohn Baldwin CP(buf, old, shm_dtime); 1335b648d480SJohn Baldwin CP(buf, old, shm_ctime); 1336b648d480SJohn Baldwin old.shm_internal = NULL; 1337b648d480SJohn Baldwin error = copyout(&old, uap->buf, sizeof(old)); 1338b648d480SJohn Baldwin break; 1339b648d480SJohn Baldwin } 1340b648d480SJohn Baldwin 1341b648d480SJohn Baldwin done: 1342b648d480SJohn Baldwin if (error) { 1343b648d480SJohn Baldwin /* Invalidate the return value */ 1344b648d480SJohn Baldwin td->td_retval[0] = -1; 1345b648d480SJohn Baldwin } 1346b648d480SJohn Baldwin return (error); 1347b648d480SJohn Baldwin } 1348b648d480SJohn Baldwin 1349b648d480SJohn Baldwin #endif /* COMPAT_FREEBSD4 || COMPAT_FREEBSD5 || COMPAT_FREEBSD6 || 1350b648d480SJohn Baldwin COMPAT_FREEBSD7 */ 1351b648d480SJohn Baldwin 1352a723c4e1SDima Dorfman static int 135378525ce3SAlfred Perlstein sysvshm_modload(struct module *module, int cmd, void *arg) 135478525ce3SAlfred Perlstein { 135578525ce3SAlfred Perlstein int error = 0; 135678525ce3SAlfred Perlstein 135778525ce3SAlfred Perlstein switch (cmd) { 135878525ce3SAlfred Perlstein case MOD_LOAD: 135975d633cbSKonstantin Belousov error = shminit(); 136075d633cbSKonstantin Belousov if (error != 0) 136175d633cbSKonstantin Belousov shmunload(); 136278525ce3SAlfred Perlstein break; 136378525ce3SAlfred Perlstein case MOD_UNLOAD: 136478525ce3SAlfred Perlstein error = shmunload(); 136578525ce3SAlfred Perlstein break; 136678525ce3SAlfred Perlstein case MOD_SHUTDOWN: 136778525ce3SAlfred Perlstein break; 136878525ce3SAlfred Perlstein default: 136978525ce3SAlfred Perlstein error = EINVAL; 137078525ce3SAlfred Perlstein break; 137178525ce3SAlfred Perlstein } 137278525ce3SAlfred Perlstein return (error); 137378525ce3SAlfred Perlstein } 137478525ce3SAlfred Perlstein 1375faa784b7SDag-Erling Smørgrav static moduledata_t sysvshm_mod = { 1376faa784b7SDag-Erling Smørgrav "sysvshm", 137778525ce3SAlfred Perlstein &sysvshm_modload, 137878525ce3SAlfred Perlstein NULL 137978525ce3SAlfred Perlstein }; 138078525ce3SAlfred Perlstein 138171361470SJohn Baldwin DECLARE_MODULE(sysvshm, sysvshm_mod, SI_SUB_SYSV_SHM, SI_ORDER_FIRST); 1382faa784b7SDag-Erling Smørgrav MODULE_VERSION(sysvshm, 1); 1383