13d903220SDoug Rabson /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 29454b2d8SWarner Losh /*- 33d903220SDoug Rabson * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 43d903220SDoug Rabson * 53d903220SDoug Rabson * Redistribution and use in source and binary forms, with or without 63d903220SDoug Rabson * modification, are permitted provided that the following conditions 73d903220SDoug Rabson * are met: 83d903220SDoug Rabson * 1. Redistributions of source code must retain the above copyright 93d903220SDoug Rabson * notice, this list of conditions and the following disclaimer. 103d903220SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 113d903220SDoug Rabson * notice, this list of conditions and the following disclaimer in the 123d903220SDoug Rabson * documentation and/or other materials provided with the distribution. 133d903220SDoug Rabson * 3. All advertising materials mentioning features or use of this software 143d903220SDoug Rabson * must display the following acknowledgement: 153d903220SDoug Rabson * This product includes software developed by Adam Glass and Charles 163d903220SDoug Rabson * Hannum. 173d903220SDoug Rabson * 4. The names of the authors may not be used to endorse or promote products 183d903220SDoug Rabson * derived from this software without specific prior written permission. 193d903220SDoug Rabson * 203d903220SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 213d903220SDoug Rabson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 223d903220SDoug Rabson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 233d903220SDoug Rabson * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 243d903220SDoug Rabson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 253d903220SDoug Rabson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 263d903220SDoug Rabson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 273d903220SDoug Rabson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 283d903220SDoug Rabson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 293d903220SDoug Rabson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 303d903220SDoug Rabson */ 3114cedfc8SRobert Watson /*- 3214cedfc8SRobert Watson * Copyright (c) 2003-2005 McAfee, Inc. 3314cedfc8SRobert Watson * All rights reserved. 3414cedfc8SRobert Watson * 3514cedfc8SRobert Watson * This software was developed for the FreeBSD Project in part by McAfee 3614cedfc8SRobert Watson * Research, the Security Research Division of McAfee, Inc under DARPA/SPAWAR 3714cedfc8SRobert Watson * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS research 3814cedfc8SRobert Watson * program. 3914cedfc8SRobert Watson * 4014cedfc8SRobert Watson * Redistribution and use in source and binary forms, with or without 4114cedfc8SRobert Watson * modification, are permitted provided that the following conditions 4214cedfc8SRobert Watson * are met: 4314cedfc8SRobert Watson * 1. Redistributions of source code must retain the above copyright 4414cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer. 4514cedfc8SRobert Watson * 2. Redistributions in binary form must reproduce the above copyright 4614cedfc8SRobert Watson * notice, this list of conditions and the following disclaimer in the 4714cedfc8SRobert Watson * documentation and/or other materials provided with the distribution. 4814cedfc8SRobert Watson * 4914cedfc8SRobert Watson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 5014cedfc8SRobert Watson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 5114cedfc8SRobert Watson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 5214cedfc8SRobert Watson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 5314cedfc8SRobert Watson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 5414cedfc8SRobert Watson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 5514cedfc8SRobert Watson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 5614cedfc8SRobert Watson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 5714cedfc8SRobert Watson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 5814cedfc8SRobert Watson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 5914cedfc8SRobert Watson * SUCH DAMAGE. 6014cedfc8SRobert Watson */ 613d903220SDoug Rabson 62677b542eSDavid E. O'Brien #include <sys/cdefs.h> 63677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 64677b542eSDavid E. O'Brien 655591b823SEivind Eklund #include "opt_compat.h" 66255108f3SPeter Wemm #include "opt_sysvipc.h" 67511b67b7SGarrett Wollman 683d903220SDoug Rabson #include <sys/param.h> 69725db531SBruce Evans #include <sys/systm.h> 703d903220SDoug Rabson #include <sys/kernel.h> 71fb919e4dSMark Murray #include <sys/lock.h> 72255108f3SPeter Wemm #include <sys/sysctl.h> 733d903220SDoug Rabson #include <sys/shm.h> 743d903220SDoug Rabson #include <sys/proc.h> 753d903220SDoug Rabson #include <sys/malloc.h> 763d903220SDoug Rabson #include <sys/mman.h> 7777409fe1SPoul-Henning Kamp #include <sys/module.h> 789dceb26bSJohn Baldwin #include <sys/mutex.h> 7968ba7a1dSTim J. Robbins #include <sys/resourcevar.h> 803d903220SDoug Rabson #include <sys/stat.h> 8178525ce3SAlfred Perlstein #include <sys/syscall.h> 82f130dcf2SMartin Blapp #include <sys/syscallsubr.h> 83725db531SBruce Evans #include <sys/sysent.h> 84fb919e4dSMark Murray #include <sys/sysproto.h> 85cb1f0db9SRobert Watson #include <sys/jail.h> 86aed55708SRobert Watson 87aed55708SRobert Watson #include <security/mac/mac_framework.h> 883d903220SDoug Rabson 893d903220SDoug Rabson #include <vm/vm.h> 90efeaf95aSDavid Greenman #include <vm/vm_param.h> 91efeaf95aSDavid Greenman #include <vm/pmap.h> 92a51f7119SJohn Dyson #include <vm/vm_object.h> 933d903220SDoug Rabson #include <vm/vm_map.h> 941c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 95ae9b8c3aSJohn Dyson #include <vm/vm_pager.h> 963d903220SDoug Rabson 97a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 9855166637SPoul-Henning Kamp 994d77a549SAlfred Perlstein static int shmget_allocate_segment(struct thread *td, 1004d77a549SAlfred Perlstein struct shmget_args *uap, int mode); 1014d77a549SAlfred Perlstein static int shmget_existing(struct thread *td, struct shmget_args *uap, 1024d77a549SAlfred Perlstein int mode, int segnum); 103725db531SBruce Evans 1043d903220SDoug Rabson #define SHMSEG_FREE 0x0200 1053d903220SDoug Rabson #define SHMSEG_REMOVED 0x0400 1063d903220SDoug Rabson #define SHMSEG_ALLOCATED 0x0800 1073d903220SDoug Rabson #define SHMSEG_WANTED 0x1000 1083d903220SDoug Rabson 10965067cc8SKonstantin Belousov static int shm_last_free, shm_nused, shmalloced; 11045329b60SKonstantin Belousov vm_size_t shm_committed; 111921d05b9SRobert Watson static struct shmid_kernel *shmsegs; 1123d903220SDoug Rabson 1133d903220SDoug Rabson struct shmmap_state { 1143d903220SDoug Rabson vm_offset_t va; 1153d903220SDoug Rabson int shmid; 1163d903220SDoug Rabson }; 1173d903220SDoug Rabson 118921d05b9SRobert Watson static void shm_deallocate_segment(struct shmid_kernel *); 1194d77a549SAlfred Perlstein static int shm_find_segment_by_key(key_t); 120921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmid(int); 121921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmidx(int); 1223db161e0SMatthew Dillon static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *); 1234d77a549SAlfred Perlstein static void shmrealloc(void); 1244d77a549SAlfred Perlstein static void shminit(void); 1254d77a549SAlfred Perlstein static int sysvshm_modload(struct module *, int, void *); 1264d77a549SAlfred Perlstein static int shmunload(void); 1273db161e0SMatthew Dillon static void shmexit_myhook(struct vmspace *vm); 1284d77a549SAlfred Perlstein static void shmfork_myhook(struct proc *p1, struct proc *p2); 1294d77a549SAlfred Perlstein static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS); 130255108f3SPeter Wemm 131255108f3SPeter Wemm /* 132028f979dSDima Dorfman * Tuneable values. 133255108f3SPeter Wemm */ 134255108f3SPeter Wemm #ifndef SHMMAXPGS 135028f979dSDima Dorfman #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */ 136255108f3SPeter Wemm #endif 137255108f3SPeter Wemm #ifndef SHMMAX 138255108f3SPeter Wemm #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 139255108f3SPeter Wemm #endif 140255108f3SPeter Wemm #ifndef SHMMIN 141255108f3SPeter Wemm #define SHMMIN 1 142255108f3SPeter Wemm #endif 143255108f3SPeter Wemm #ifndef SHMMNI 1441766b2e5SMatthew Dillon #define SHMMNI 192 145255108f3SPeter Wemm #endif 146255108f3SPeter Wemm #ifndef SHMSEG 1471766b2e5SMatthew Dillon #define SHMSEG 128 148255108f3SPeter Wemm #endif 149255108f3SPeter Wemm #ifndef SHMALL 150255108f3SPeter Wemm #define SHMALL (SHMMAXPGS) 151255108f3SPeter Wemm #endif 152255108f3SPeter Wemm 153255108f3SPeter Wemm struct shminfo shminfo = { 154255108f3SPeter Wemm SHMMAX, 155255108f3SPeter Wemm SHMMIN, 156255108f3SPeter Wemm SHMMNI, 157255108f3SPeter Wemm SHMSEG, 158255108f3SPeter Wemm SHMALL 159255108f3SPeter Wemm }; 160255108f3SPeter Wemm 1618b03c8edSMatthew Dillon static int shm_use_phys; 1622332251cSMax Khon static int shm_allow_removed; 1638b03c8edSMatthew Dillon 1649baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, 16584f85aedSChristian S.J. Peron "Maximum shared memory segment size"); 1669baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, 16784f85aedSChristian S.J. Peron "Minimum shared memory segment size"); 1689baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, 16984f85aedSChristian S.J. Peron "Number of shared memory identifiers"); 1709baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, 17184f85aedSChristian S.J. Peron "Number of segments per process"); 1729baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, 17384f85aedSChristian S.J. Peron "Maximum number of pages available for shared memory"); 174028f979dSDima Dorfman SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, 17584f85aedSChristian S.J. Peron &shm_use_phys, 0, "Enable/Disable locking of shared memory pages in core"); 1762332251cSMax Khon SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW, 17784f85aedSChristian S.J. Peron &shm_allow_removed, 0, 17884f85aedSChristian S.J. Peron "Enable/Disable attachment to attached segments marked for removal"); 179a723c4e1SDima Dorfman SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD, 18084f85aedSChristian S.J. Peron NULL, 0, sysctl_shmsegs, "", 18184f85aedSChristian S.J. Peron "Current number of shared memory segments allocated"); 1823d903220SDoug Rabson 1833d903220SDoug Rabson static int 1843d903220SDoug Rabson shm_find_segment_by_key(key) 1853d903220SDoug Rabson key_t key; 1863d903220SDoug Rabson { 1873d903220SDoug Rabson int i; 1883d903220SDoug Rabson 189255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 190921d05b9SRobert Watson if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) && 191921d05b9SRobert Watson shmsegs[i].u.shm_perm.key == key) 192b618bb96SAlfred Perlstein return (i); 193b618bb96SAlfred Perlstein return (-1); 1943d903220SDoug Rabson } 1953d903220SDoug Rabson 196921d05b9SRobert Watson static struct shmid_kernel * 1972332251cSMax Khon shm_find_segment_by_shmid(int shmid) 1983d903220SDoug Rabson { 1993d903220SDoug Rabson int segnum; 200921d05b9SRobert Watson struct shmid_kernel *shmseg; 2013d903220SDoug Rabson 2023d903220SDoug Rabson segnum = IPCID_TO_IX(shmid); 203255108f3SPeter Wemm if (segnum < 0 || segnum >= shmalloced) 204b618bb96SAlfred Perlstein return (NULL); 2053d903220SDoug Rabson shmseg = &shmsegs[segnum]; 206921d05b9SRobert Watson if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 2072332251cSMax Khon (!shm_allow_removed && 208921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0) || 209921d05b9SRobert Watson shmseg->u.shm_perm.seq != IPCID_TO_SEQ(shmid)) 210b618bb96SAlfred Perlstein return (NULL); 211b618bb96SAlfred Perlstein return (shmseg); 2123d903220SDoug Rabson } 2133d903220SDoug Rabson 214921d05b9SRobert Watson static struct shmid_kernel * 2152332251cSMax Khon shm_find_segment_by_shmidx(int segnum) 216491dec93SMichael Reifenberger { 217921d05b9SRobert Watson struct shmid_kernel *shmseg; 218491dec93SMichael Reifenberger 219491dec93SMichael Reifenberger if (segnum < 0 || segnum >= shmalloced) 220b618bb96SAlfred Perlstein return (NULL); 221491dec93SMichael Reifenberger shmseg = &shmsegs[segnum]; 222921d05b9SRobert Watson if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 2232332251cSMax Khon (!shm_allow_removed && 224921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0)) 225b618bb96SAlfred Perlstein return (NULL); 226b618bb96SAlfred Perlstein return (shmseg); 227491dec93SMichael Reifenberger } 228491dec93SMichael Reifenberger 2293d903220SDoug Rabson static void 2303d903220SDoug Rabson shm_deallocate_segment(shmseg) 231921d05b9SRobert Watson struct shmid_kernel *shmseg; 2323d903220SDoug Rabson { 23345329b60SKonstantin Belousov vm_size_t size; 2343d903220SDoug Rabson 2350cddd8f0SMatthew Dillon GIANT_REQUIRED; 2360cddd8f0SMatthew Dillon 237921d05b9SRobert Watson vm_object_deallocate(shmseg->u.shm_internal); 238921d05b9SRobert Watson shmseg->u.shm_internal = NULL; 23965067cc8SKonstantin Belousov size = round_page(shmseg->shm_bsegsz); 2403d903220SDoug Rabson shm_committed -= btoc(size); 2413d903220SDoug Rabson shm_nused--; 242921d05b9SRobert Watson shmseg->u.shm_perm.mode = SHMSEG_FREE; 24314cedfc8SRobert Watson #ifdef MAC 24430d239bcSRobert Watson mac_sysvshm_cleanup(shmseg); 24514cedfc8SRobert Watson #endif 2463d903220SDoug Rabson } 2473d903220SDoug Rabson 2483d903220SDoug Rabson static int 2493db161e0SMatthew Dillon shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 2503d903220SDoug Rabson { 251921d05b9SRobert Watson struct shmid_kernel *shmseg; 2523d903220SDoug Rabson int segnum, result; 25345329b60SKonstantin Belousov vm_size_t size; 2543d903220SDoug Rabson 2550cddd8f0SMatthew Dillon GIANT_REQUIRED; 256028f979dSDima Dorfman 2573d903220SDoug Rabson segnum = IPCID_TO_IX(shmmap_s->shmid); 2583d903220SDoug Rabson shmseg = &shmsegs[segnum]; 25965067cc8SKonstantin Belousov size = round_page(shmseg->shm_bsegsz); 2603db161e0SMatthew Dillon result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 2613d903220SDoug Rabson if (result != KERN_SUCCESS) 262b618bb96SAlfred Perlstein return (EINVAL); 2633d903220SDoug Rabson shmmap_s->shmid = -1; 264921d05b9SRobert Watson shmseg->u.shm_dtime = time_second; 265921d05b9SRobert Watson if ((--shmseg->u.shm_nattch <= 0) && 266921d05b9SRobert Watson (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) { 2673d903220SDoug Rabson shm_deallocate_segment(shmseg); 2683d903220SDoug Rabson shm_last_free = segnum; 2693d903220SDoug Rabson } 270b618bb96SAlfred Perlstein return (0); 2713d903220SDoug Rabson } 2723d903220SDoug Rabson 273b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 2743d903220SDoug Rabson struct shmdt_args { 275e1d7d0bbSAlfred Perlstein const void *shmaddr; 2763d903220SDoug Rabson }; 277b5d5c0c9SPeter Wemm #endif 2783d903220SDoug Rabson int 279b40ce416SJulian Elischer shmdt(td, uap) 280b40ce416SJulian Elischer struct thread *td; 2813d903220SDoug Rabson struct shmdt_args *uap; 2823d903220SDoug Rabson { 283b40ce416SJulian Elischer struct proc *p = td->td_proc; 2843d903220SDoug Rabson struct shmmap_state *shmmap_s; 28514cedfc8SRobert Watson #ifdef MAC 28614cedfc8SRobert Watson struct shmid_kernel *shmsegptr; 28714cedfc8SRobert Watson #endif 2883d903220SDoug Rabson int i; 289b6a4b4f9SMatthew Dillon int error = 0; 2903d903220SDoug Rabson 2910304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 292c6f55f33SJohn Baldwin return (ENOSYS); 293b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 2948209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 295b6a4b4f9SMatthew Dillon if (shmmap_s == NULL) { 296b6a4b4f9SMatthew Dillon error = EINVAL; 297b6a4b4f9SMatthew Dillon goto done2; 298b6a4b4f9SMatthew Dillon } 299b6a4b4f9SMatthew Dillon for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 3003d903220SDoug Rabson if (shmmap_s->shmid != -1 && 301b6a4b4f9SMatthew Dillon shmmap_s->va == (vm_offset_t)uap->shmaddr) { 3023d903220SDoug Rabson break; 303b6a4b4f9SMatthew Dillon } 304b6a4b4f9SMatthew Dillon } 305b6a4b4f9SMatthew Dillon if (i == shminfo.shmseg) { 306b6a4b4f9SMatthew Dillon error = EINVAL; 307b6a4b4f9SMatthew Dillon goto done2; 308b6a4b4f9SMatthew Dillon } 30914cedfc8SRobert Watson #ifdef MAC 31014cedfc8SRobert Watson shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)]; 31130d239bcSRobert Watson error = mac_sysvshm_check_shmdt(td->td_ucred, shmsegptr); 312f50c4fd8SRobert Watson if (error != 0) 31314cedfc8SRobert Watson goto done2; 31414cedfc8SRobert Watson #endif 3153db161e0SMatthew Dillon error = shm_delete_mapping(p->p_vmspace, shmmap_s); 316b6a4b4f9SMatthew Dillon done2: 317b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 318b6a4b4f9SMatthew Dillon return (error); 3193d903220SDoug Rabson } 3203d903220SDoug Rabson 321b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 3223d903220SDoug Rabson struct shmat_args { 3233d903220SDoug Rabson int shmid; 324e1d7d0bbSAlfred Perlstein const void *shmaddr; 3253d903220SDoug Rabson int shmflg; 3263d903220SDoug Rabson }; 327b5d5c0c9SPeter Wemm #endif 3283d903220SDoug Rabson int 3292332251cSMax Khon kern_shmat(td, shmid, shmaddr, shmflg) 330b40ce416SJulian Elischer struct thread *td; 331f130dcf2SMartin Blapp int shmid; 332f130dcf2SMartin Blapp const void *shmaddr; 333f130dcf2SMartin Blapp int shmflg; 3343d903220SDoug Rabson { 335b40ce416SJulian Elischer struct proc *p = td->td_proc; 336b6a4b4f9SMatthew Dillon int i, flags; 337921d05b9SRobert Watson struct shmid_kernel *shmseg; 3383d903220SDoug Rabson struct shmmap_state *shmmap_s = NULL; 3393d903220SDoug Rabson vm_offset_t attach_va; 3403d903220SDoug Rabson vm_prot_t prot; 3413d903220SDoug Rabson vm_size_t size; 342a51f7119SJohn Dyson int rv; 343b6a4b4f9SMatthew Dillon int error = 0; 3443d903220SDoug Rabson 3450304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 346c6f55f33SJohn Baldwin return (ENOSYS); 347b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 3488209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 3493d903220SDoug Rabson if (shmmap_s == NULL) { 35045329b60SKonstantin Belousov shmmap_s = malloc(shminfo.shmseg * sizeof(struct shmmap_state), 35145329b60SKonstantin Belousov M_SHM, M_WAITOK); 3523d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) 3533d903220SDoug Rabson shmmap_s[i].shmid = -1; 3542cc593fdSAlfred Perlstein p->p_vmspace->vm_shm = shmmap_s; 3553d903220SDoug Rabson } 3562332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 357b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 358b6a4b4f9SMatthew Dillon error = EINVAL; 359b6a4b4f9SMatthew Dillon goto done2; 360b6a4b4f9SMatthew Dillon } 361921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, 362f130dcf2SMartin Blapp (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 363797f2d22SPoul-Henning Kamp if (error) 364b6a4b4f9SMatthew Dillon goto done2; 36514cedfc8SRobert Watson #ifdef MAC 36630d239bcSRobert Watson error = mac_sysvshm_check_shmat(td->td_ucred, shmseg, shmflg); 367f50c4fd8SRobert Watson if (error != 0) 36814cedfc8SRobert Watson goto done2; 36914cedfc8SRobert Watson #endif 3703d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) { 3713d903220SDoug Rabson if (shmmap_s->shmid == -1) 3723d903220SDoug Rabson break; 3733d903220SDoug Rabson shmmap_s++; 3743d903220SDoug Rabson } 375b6a4b4f9SMatthew Dillon if (i >= shminfo.shmseg) { 376b6a4b4f9SMatthew Dillon error = EMFILE; 377b6a4b4f9SMatthew Dillon goto done2; 378b6a4b4f9SMatthew Dillon } 37965067cc8SKonstantin Belousov size = round_page(shmseg->shm_bsegsz); 3803d903220SDoug Rabson prot = VM_PROT_READ; 381f130dcf2SMartin Blapp if ((shmflg & SHM_RDONLY) == 0) 3823d903220SDoug Rabson prot |= VM_PROT_WRITE; 3833d903220SDoug Rabson flags = MAP_ANON | MAP_SHARED; 384f130dcf2SMartin Blapp if (shmaddr) { 3853d903220SDoug Rabson flags |= MAP_FIXED; 386f130dcf2SMartin Blapp if (shmflg & SHM_RND) { 387f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1); 388f130dcf2SMartin Blapp } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) { 389f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr; 390b6a4b4f9SMatthew Dillon } else { 391b6a4b4f9SMatthew Dillon error = EINVAL; 392b6a4b4f9SMatthew Dillon goto done2; 393b6a4b4f9SMatthew Dillon } 3943d903220SDoug Rabson } else { 395028f979dSDima Dorfman /* 396028f979dSDima Dorfman * This is just a hint to vm_map_find() about where to 397028f979dSDima Dorfman * put it. 398028f979dSDima Dorfman */ 39968ba7a1dSTim J. Robbins PROC_LOCK(p); 40068ba7a1dSTim J. Robbins attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr + 40168ba7a1dSTim J. Robbins lim_max(p, RLIMIT_DATA)); 40268ba7a1dSTim J. Robbins PROC_UNLOCK(p); 4033d903220SDoug Rabson } 404a51f7119SJohn Dyson 405921d05b9SRobert Watson vm_object_reference(shmseg->u.shm_internal); 406921d05b9SRobert Watson rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->u.shm_internal, 40765067cc8SKonstantin Belousov 0, &attach_va, size, (flags & MAP_FIXED) ? VMFS_NO_SPACE : 40865067cc8SKonstantin Belousov VMFS_ANY_SPACE, prot, prot, 0); 409a51f7119SJohn Dyson if (rv != KERN_SUCCESS) { 410921d05b9SRobert Watson vm_object_deallocate(shmseg->u.shm_internal); 411b6a4b4f9SMatthew Dillon error = ENOMEM; 412b6a4b4f9SMatthew Dillon goto done2; 413a51f7119SJohn Dyson } 4140463028cSJohn Dyson vm_map_inherit(&p->p_vmspace->vm_map, 4150463028cSJohn Dyson attach_va, attach_va + size, VM_INHERIT_SHARE); 4160463028cSJohn Dyson 4173d903220SDoug Rabson shmmap_s->va = attach_va; 418f130dcf2SMartin Blapp shmmap_s->shmid = shmid; 419921d05b9SRobert Watson shmseg->u.shm_lpid = p->p_pid; 420921d05b9SRobert Watson shmseg->u.shm_atime = time_second; 421921d05b9SRobert Watson shmseg->u.shm_nattch++; 422b40ce416SJulian Elischer td->td_retval[0] = attach_va; 423b6a4b4f9SMatthew Dillon done2: 424b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 425b6a4b4f9SMatthew Dillon return (error); 4263d903220SDoug Rabson } 4273d903220SDoug Rabson 428f130dcf2SMartin Blapp int 429f130dcf2SMartin Blapp shmat(td, uap) 430f130dcf2SMartin Blapp struct thread *td; 431f130dcf2SMartin Blapp struct shmat_args *uap; 432f130dcf2SMartin Blapp { 4332332251cSMax Khon return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg); 434f130dcf2SMartin Blapp } 435f130dcf2SMartin Blapp 4363d903220SDoug Rabson int 4372332251cSMax Khon kern_shmctl(td, shmid, cmd, buf, bufsz) 438b40ce416SJulian Elischer struct thread *td; 439f130dcf2SMartin Blapp int shmid; 440f130dcf2SMartin Blapp int cmd; 441f130dcf2SMartin Blapp void *buf; 442f130dcf2SMartin Blapp size_t *bufsz; 4433d903220SDoug Rabson { 444b6a4b4f9SMatthew Dillon int error = 0; 445921d05b9SRobert Watson struct shmid_kernel *shmseg; 4463d903220SDoug Rabson 4470304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 448c6f55f33SJohn Baldwin return (ENOSYS); 449f130dcf2SMartin Blapp 450b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 451f130dcf2SMartin Blapp switch (cmd) { 4524f18813fSChristian S.J. Peron /* 4534f18813fSChristian S.J. Peron * It is possible that kern_shmctl is being called from the Linux ABI 4544f18813fSChristian S.J. Peron * layer, in which case, we will need to implement IPC_INFO. It should 4554f18813fSChristian S.J. Peron * be noted that other shmctl calls will be funneled through here for 4564f18813fSChristian S.J. Peron * Linix binaries as well. 4574f18813fSChristian S.J. Peron * 4584f18813fSChristian S.J. Peron * NB: The Linux ABI layer will convert this data to structure(s) more 4594f18813fSChristian S.J. Peron * consistent with the Linux ABI. 4604f18813fSChristian S.J. Peron */ 461491dec93SMichael Reifenberger case IPC_INFO: 462f130dcf2SMartin Blapp memcpy(buf, &shminfo, sizeof(shminfo)); 463f130dcf2SMartin Blapp if (bufsz) 464f130dcf2SMartin Blapp *bufsz = sizeof(shminfo); 465491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 466491dec93SMichael Reifenberger goto done2; 467491dec93SMichael Reifenberger case SHM_INFO: { 468491dec93SMichael Reifenberger struct shm_info shm_info; 469491dec93SMichael Reifenberger shm_info.used_ids = shm_nused; 470491dec93SMichael Reifenberger shm_info.shm_rss = 0; /*XXX where to get from ? */ 471491dec93SMichael Reifenberger shm_info.shm_tot = 0; /*XXX where to get from ? */ 472491dec93SMichael Reifenberger shm_info.shm_swp = 0; /*XXX where to get from ? */ 473491dec93SMichael Reifenberger shm_info.swap_attempts = 0; /*XXX where to get from ? */ 474491dec93SMichael Reifenberger shm_info.swap_successes = 0; /*XXX where to get from ? */ 475f130dcf2SMartin Blapp memcpy(buf, &shm_info, sizeof(shm_info)); 476f130dcf2SMartin Blapp if (bufsz) 477f130dcf2SMartin Blapp *bufsz = sizeof(shm_info); 478491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 479491dec93SMichael Reifenberger goto done2; 480491dec93SMichael Reifenberger } 481491dec93SMichael Reifenberger } 482f130dcf2SMartin Blapp if (cmd == SHM_STAT) 4832332251cSMax Khon shmseg = shm_find_segment_by_shmidx(shmid); 484491dec93SMichael Reifenberger else 4852332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 486b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 487b6a4b4f9SMatthew Dillon error = EINVAL; 488b6a4b4f9SMatthew Dillon goto done2; 489b6a4b4f9SMatthew Dillon } 49014cedfc8SRobert Watson #ifdef MAC 49130d239bcSRobert Watson error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, cmd); 492f50c4fd8SRobert Watson if (error != 0) 49314cedfc8SRobert Watson goto done2; 49414cedfc8SRobert Watson #endif 495f130dcf2SMartin Blapp switch (cmd) { 496491dec93SMichael Reifenberger case SHM_STAT: 4973d903220SDoug Rabson case IPC_STAT: 498921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 499797f2d22SPoul-Henning Kamp if (error) 500b6a4b4f9SMatthew Dillon goto done2; 501921d05b9SRobert Watson memcpy(buf, &shmseg->u, sizeof(struct shmid_ds)); 502f130dcf2SMartin Blapp if (bufsz) 503f130dcf2SMartin Blapp *bufsz = sizeof(struct shmid_ds); 504f130dcf2SMartin Blapp if (cmd == SHM_STAT) 505921d05b9SRobert Watson td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->u.shm_perm); 5063d903220SDoug Rabson break; 507f130dcf2SMartin Blapp case IPC_SET: { 508f130dcf2SMartin Blapp struct shmid_ds *shmid; 509f130dcf2SMartin Blapp 510f130dcf2SMartin Blapp shmid = (struct shmid_ds *)buf; 511921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 512797f2d22SPoul-Henning Kamp if (error) 513b6a4b4f9SMatthew Dillon goto done2; 514921d05b9SRobert Watson shmseg->u.shm_perm.uid = shmid->shm_perm.uid; 515921d05b9SRobert Watson shmseg->u.shm_perm.gid = shmid->shm_perm.gid; 516921d05b9SRobert Watson shmseg->u.shm_perm.mode = 517921d05b9SRobert Watson (shmseg->u.shm_perm.mode & ~ACCESSPERMS) | 518f130dcf2SMartin Blapp (shmid->shm_perm.mode & ACCESSPERMS); 519921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 5203d903220SDoug Rabson break; 521f130dcf2SMartin Blapp } 5223d903220SDoug Rabson case IPC_RMID: 523921d05b9SRobert Watson error = ipcperm(td, &shmseg->u.shm_perm, IPC_M); 524797f2d22SPoul-Henning Kamp if (error) 525b6a4b4f9SMatthew Dillon goto done2; 526921d05b9SRobert Watson shmseg->u.shm_perm.key = IPC_PRIVATE; 527921d05b9SRobert Watson shmseg->u.shm_perm.mode |= SHMSEG_REMOVED; 528921d05b9SRobert Watson if (shmseg->u.shm_nattch <= 0) { 5293d903220SDoug Rabson shm_deallocate_segment(shmseg); 530f130dcf2SMartin Blapp shm_last_free = IPCID_TO_IX(shmid); 5313d903220SDoug Rabson } 5323d903220SDoug Rabson break; 5333d903220SDoug Rabson #if 0 5343d903220SDoug Rabson case SHM_LOCK: 5353d903220SDoug Rabson case SHM_UNLOCK: 5363d903220SDoug Rabson #endif 5373d903220SDoug Rabson default: 538b6a4b4f9SMatthew Dillon error = EINVAL; 539b6a4b4f9SMatthew Dillon break; 5403d903220SDoug Rabson } 541b6a4b4f9SMatthew Dillon done2: 542b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 543b6a4b4f9SMatthew Dillon return (error); 5443d903220SDoug Rabson } 5453d903220SDoug Rabson 54671361470SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 54771361470SJohn Baldwin struct shmctl_args { 54871361470SJohn Baldwin int shmid; 54971361470SJohn Baldwin int cmd; 55071361470SJohn Baldwin struct shmid_ds *buf; 55171361470SJohn Baldwin }; 55271361470SJohn Baldwin #endif 553f130dcf2SMartin Blapp int 554f130dcf2SMartin Blapp shmctl(td, uap) 555f130dcf2SMartin Blapp struct thread *td; 556f130dcf2SMartin Blapp struct shmctl_args *uap; 557f130dcf2SMartin Blapp { 558f130dcf2SMartin Blapp int error = 0; 559f130dcf2SMartin Blapp struct shmid_ds buf; 560f130dcf2SMartin Blapp size_t bufsz; 561f130dcf2SMartin Blapp 5624f18813fSChristian S.J. Peron /* 5634f18813fSChristian S.J. Peron * The only reason IPC_INFO, SHM_INFO, SHM_STAT exists is to support 5644f18813fSChristian S.J. Peron * Linux binaries. If we see the call come through the FreeBSD ABI, 5654f18813fSChristian S.J. Peron * return an error back to the user since we do not to support this. 5664f18813fSChristian S.J. Peron */ 5674f18813fSChristian S.J. Peron if (uap->cmd == IPC_INFO || uap->cmd == SHM_INFO || 5684f18813fSChristian S.J. Peron uap->cmd == SHM_STAT) 5694f18813fSChristian S.J. Peron return (EINVAL); 5704f18813fSChristian S.J. Peron 571f130dcf2SMartin Blapp /* IPC_SET needs to copyin the buffer before calling kern_shmctl */ 572f130dcf2SMartin Blapp if (uap->cmd == IPC_SET) { 573f130dcf2SMartin Blapp if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds)))) 574f130dcf2SMartin Blapp goto done; 575f130dcf2SMartin Blapp } 576f130dcf2SMartin Blapp 5772332251cSMax Khon error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz); 578f130dcf2SMartin Blapp if (error) 579f130dcf2SMartin Blapp goto done; 580f130dcf2SMartin Blapp 581f130dcf2SMartin Blapp /* Cases in which we need to copyout */ 582f130dcf2SMartin Blapp switch (uap->cmd) { 583f130dcf2SMartin Blapp case IPC_STAT: 584f130dcf2SMartin Blapp error = copyout(&buf, uap->buf, bufsz); 585f130dcf2SMartin Blapp break; 586f130dcf2SMartin Blapp } 587f130dcf2SMartin Blapp 588f130dcf2SMartin Blapp done: 589f130dcf2SMartin Blapp if (error) { 590f130dcf2SMartin Blapp /* Invalidate the return value */ 591f130dcf2SMartin Blapp td->td_retval[0] = -1; 592f130dcf2SMartin Blapp } 593f130dcf2SMartin Blapp return (error); 594f130dcf2SMartin Blapp } 595f130dcf2SMartin Blapp 596f130dcf2SMartin Blapp 5973d903220SDoug Rabson static int 598b40ce416SJulian Elischer shmget_existing(td, uap, mode, segnum) 599b40ce416SJulian Elischer struct thread *td; 6003d903220SDoug Rabson struct shmget_args *uap; 6013d903220SDoug Rabson int mode; 6023d903220SDoug Rabson int segnum; 6033d903220SDoug Rabson { 604921d05b9SRobert Watson struct shmid_kernel *shmseg; 6053d903220SDoug Rabson int error; 6063d903220SDoug Rabson 6073d903220SDoug Rabson shmseg = &shmsegs[segnum]; 608921d05b9SRobert Watson if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) { 6093d903220SDoug Rabson /* 6103d903220SDoug Rabson * This segment is in the process of being allocated. Wait 6113d903220SDoug Rabson * until it's done, and look the key up again (in case the 6123d903220SDoug Rabson * allocation failed or it was freed). 6133d903220SDoug Rabson */ 614921d05b9SRobert Watson shmseg->u.shm_perm.mode |= SHMSEG_WANTED; 6152cc593fdSAlfred Perlstein error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0); 616797f2d22SPoul-Henning Kamp if (error) 617b618bb96SAlfred Perlstein return (error); 618b618bb96SAlfred Perlstein return (EAGAIN); 6193d903220SDoug Rabson } 620dc92aa57SAlan Cox if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 621b618bb96SAlfred Perlstein return (EEXIST); 62214cedfc8SRobert Watson #ifdef MAC 62330d239bcSRobert Watson error = mac_sysvshm_check_shmget(td->td_ucred, shmseg, uap->shmflg); 624f50c4fd8SRobert Watson if (error != 0) 6257723d5edSRobert Watson return (error); 62614cedfc8SRobert Watson #endif 62745329b60SKonstantin Belousov if (uap->size != 0 && uap->size > shmseg->shm_bsegsz) 628b618bb96SAlfred Perlstein return (EINVAL); 629921d05b9SRobert Watson td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 630b618bb96SAlfred Perlstein return (0); 6313d903220SDoug Rabson } 6323d903220SDoug Rabson 6333d903220SDoug Rabson static int 634b40ce416SJulian Elischer shmget_allocate_segment(td, uap, mode) 635b40ce416SJulian Elischer struct thread *td; 6363d903220SDoug Rabson struct shmget_args *uap; 6373d903220SDoug Rabson int mode; 6383d903220SDoug Rabson { 63965067cc8SKonstantin Belousov int i, segnum, shmid; 64065067cc8SKonstantin Belousov size_t size; 641a854ed98SJohn Baldwin struct ucred *cred = td->td_ucred; 642921d05b9SRobert Watson struct shmid_kernel *shmseg; 6430049f8b2SAlan Cox vm_object_t shm_object; 6443d903220SDoug Rabson 6450cddd8f0SMatthew Dillon GIANT_REQUIRED; 6460cddd8f0SMatthew Dillon 6473d903220SDoug Rabson if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 648b618bb96SAlfred Perlstein return (EINVAL); 649028f979dSDima Dorfman if (shm_nused >= shminfo.shmmni) /* Any shmids left? */ 650b618bb96SAlfred Perlstein return (ENOSPC); 6519e609ddeSJoerg Wunsch size = round_page(uap->size); 6523d903220SDoug Rabson if (shm_committed + btoc(size) > shminfo.shmall) 653b618bb96SAlfred Perlstein return (ENOMEM); 6543d903220SDoug Rabson if (shm_last_free < 0) { 655028f979dSDima Dorfman shmrealloc(); /* Maybe expand the shmsegs[] array. */ 656255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 657921d05b9SRobert Watson if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE) 6583d903220SDoug Rabson break; 659255108f3SPeter Wemm if (i == shmalloced) 660b618bb96SAlfred Perlstein return (ENOSPC); 6613d903220SDoug Rabson segnum = i; 6623d903220SDoug Rabson } else { 6633d903220SDoug Rabson segnum = shm_last_free; 6643d903220SDoug Rabson shm_last_free = -1; 6653d903220SDoug Rabson } 6663d903220SDoug Rabson shmseg = &shmsegs[segnum]; 6673d903220SDoug Rabson /* 6683d903220SDoug Rabson * In case we sleep in malloc(), mark the segment present but deleted 6693d903220SDoug Rabson * so that noone else tries to create the same key. 6703d903220SDoug Rabson */ 671921d05b9SRobert Watson shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 672921d05b9SRobert Watson shmseg->u.shm_perm.key = uap->key; 673921d05b9SRobert Watson shmseg->u.shm_perm.seq = (shmseg->u.shm_perm.seq + 1) & 0x7fff; 674921d05b9SRobert Watson shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); 675a51f7119SJohn Dyson 676ae9b8c3aSJohn Dyson /* 677ae9b8c3aSJohn Dyson * We make sure that we have allocated a pager before we need 678ae9b8c3aSJohn Dyson * to. 679ae9b8c3aSJohn Dyson */ 6803364c323SKonstantin Belousov shm_object = vm_pager_allocate(shm_use_phys ? OBJT_PHYS : OBJT_SWAP, 6813364c323SKonstantin Belousov 0, size, VM_PROT_DEFAULT, 0, cred); 6823364c323SKonstantin Belousov if (shm_object == NULL) 6833364c323SKonstantin Belousov return (ENOMEM); 6840049f8b2SAlan Cox VM_OBJECT_LOCK(shm_object); 6850049f8b2SAlan Cox vm_object_clear_flag(shm_object, OBJ_ONEMAPPING); 6860049f8b2SAlan Cox vm_object_set_flag(shm_object, OBJ_NOSPLIT); 6870049f8b2SAlan Cox VM_OBJECT_UNLOCK(shm_object); 688cbd8ec09SJohn Dyson 689921d05b9SRobert Watson shmseg->u.shm_internal = shm_object; 690921d05b9SRobert Watson shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid; 691921d05b9SRobert Watson shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = cred->cr_gid; 692921d05b9SRobert Watson shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) | 6933d903220SDoug Rabson (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 694921d05b9SRobert Watson shmseg->u.shm_segsz = uap->size; 69565067cc8SKonstantin Belousov shmseg->shm_bsegsz = uap->size; 696921d05b9SRobert Watson shmseg->u.shm_cpid = td->td_proc->p_pid; 697921d05b9SRobert Watson shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0; 698921d05b9SRobert Watson shmseg->u.shm_atime = shmseg->u.shm_dtime = 0; 69914cedfc8SRobert Watson #ifdef MAC 70030d239bcSRobert Watson mac_sysvshm_create(cred, shmseg); 70114cedfc8SRobert Watson #endif 702921d05b9SRobert Watson shmseg->u.shm_ctime = time_second; 7033d903220SDoug Rabson shm_committed += btoc(size); 7043d903220SDoug Rabson shm_nused++; 705921d05b9SRobert Watson if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) { 7063d903220SDoug Rabson /* 7073d903220SDoug Rabson * Somebody else wanted this key while we were asleep. Wake 7083d903220SDoug Rabson * them up now. 7093d903220SDoug Rabson */ 710921d05b9SRobert Watson shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED; 7112cc593fdSAlfred Perlstein wakeup(shmseg); 7123d903220SDoug Rabson } 713b40ce416SJulian Elischer td->td_retval[0] = shmid; 714b618bb96SAlfred Perlstein return (0); 7153d903220SDoug Rabson } 7163d903220SDoug Rabson 71771361470SJohn Baldwin #ifndef _SYS_SYSPROTO_H_ 71871361470SJohn Baldwin struct shmget_args { 71971361470SJohn Baldwin key_t key; 72071361470SJohn Baldwin size_t size; 72171361470SJohn Baldwin int shmflg; 72271361470SJohn Baldwin }; 72371361470SJohn Baldwin #endif 7243d903220SDoug Rabson int 725b40ce416SJulian Elischer shmget(td, uap) 726b40ce416SJulian Elischer struct thread *td; 7273d903220SDoug Rabson struct shmget_args *uap; 7283d903220SDoug Rabson { 729b6a4b4f9SMatthew Dillon int segnum, mode; 730b6a4b4f9SMatthew Dillon int error; 7313d903220SDoug Rabson 7320304c731SJamie Gritton if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 733c6f55f33SJohn Baldwin return (ENOSYS); 734b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 7353d903220SDoug Rabson mode = uap->shmflg & ACCESSPERMS; 7363d903220SDoug Rabson if (uap->key != IPC_PRIVATE) { 7373d903220SDoug Rabson again: 7383d903220SDoug Rabson segnum = shm_find_segment_by_key(uap->key); 7393d903220SDoug Rabson if (segnum >= 0) { 740b40ce416SJulian Elischer error = shmget_existing(td, uap, mode, segnum); 7413d903220SDoug Rabson if (error == EAGAIN) 7423d903220SDoug Rabson goto again; 743b6a4b4f9SMatthew Dillon goto done2; 7443d903220SDoug Rabson } 745b6a4b4f9SMatthew Dillon if ((uap->shmflg & IPC_CREAT) == 0) { 746b6a4b4f9SMatthew Dillon error = ENOENT; 747b6a4b4f9SMatthew Dillon goto done2; 7483d903220SDoug Rabson } 749b6a4b4f9SMatthew Dillon } 750b40ce416SJulian Elischer error = shmget_allocate_segment(td, uap, mode); 751b6a4b4f9SMatthew Dillon done2: 752b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 753b6a4b4f9SMatthew Dillon return (error); 7543d903220SDoug Rabson } 7553d903220SDoug Rabson 75678525ce3SAlfred Perlstein static void 75778525ce3SAlfred Perlstein shmfork_myhook(p1, p2) 7583d903220SDoug Rabson struct proc *p1, *p2; 7593d903220SDoug Rabson { 7603d903220SDoug Rabson struct shmmap_state *shmmap_s; 7613d903220SDoug Rabson size_t size; 7623d903220SDoug Rabson int i; 7633d903220SDoug Rabson 76494ddc707SAlan Cox mtx_lock(&Giant); 7653d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 766a163d034SWarner Losh shmmap_s = malloc(size, M_SHM, M_WAITOK); 7672cc593fdSAlfred Perlstein bcopy(p1->p_vmspace->vm_shm, shmmap_s, size); 7682cc593fdSAlfred Perlstein p2->p_vmspace->vm_shm = shmmap_s; 7693d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 7703d903220SDoug Rabson if (shmmap_s->shmid != -1) 771921d05b9SRobert Watson shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++; 77294ddc707SAlan Cox mtx_unlock(&Giant); 7733d903220SDoug Rabson } 7743d903220SDoug Rabson 77578525ce3SAlfred Perlstein static void 7763db161e0SMatthew Dillon shmexit_myhook(struct vmspace *vm) 7773d903220SDoug Rabson { 7783db161e0SMatthew Dillon struct shmmap_state *base, *shm; 7793d903220SDoug Rabson int i; 7803d903220SDoug Rabson 7813db161e0SMatthew Dillon if ((base = vm->vm_shm) != NULL) { 7823db161e0SMatthew Dillon vm->vm_shm = NULL; 7831a276a3fSAlan Cox mtx_lock(&Giant); 7843db161e0SMatthew Dillon for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 7853db161e0SMatthew Dillon if (shm->shmid != -1) 7863db161e0SMatthew Dillon shm_delete_mapping(vm, shm); 7873db161e0SMatthew Dillon } 7881a276a3fSAlan Cox mtx_unlock(&Giant); 7893db161e0SMatthew Dillon free(base, M_SHM); 7903db161e0SMatthew Dillon } 7913d903220SDoug Rabson } 7923d903220SDoug Rabson 793255108f3SPeter Wemm static void 794255108f3SPeter Wemm shmrealloc(void) 795255108f3SPeter Wemm { 796255108f3SPeter Wemm int i; 797921d05b9SRobert Watson struct shmid_kernel *newsegs; 798255108f3SPeter Wemm 799255108f3SPeter Wemm if (shmalloced >= shminfo.shmmni) 800255108f3SPeter Wemm return; 801255108f3SPeter Wemm 802a163d034SWarner Losh newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 803255108f3SPeter Wemm if (newsegs == NULL) 804255108f3SPeter Wemm return; 805255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 806255108f3SPeter Wemm bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 807255108f3SPeter Wemm for (; i < shminfo.shmmni; i++) { 808921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 809921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 81014cedfc8SRobert Watson #ifdef MAC 81130d239bcSRobert Watson mac_sysvshm_init(&shmsegs[i]); 81214cedfc8SRobert Watson #endif 813255108f3SPeter Wemm } 814255108f3SPeter Wemm free(shmsegs, M_SHM); 815255108f3SPeter Wemm shmsegs = newsegs; 816255108f3SPeter Wemm shmalloced = shminfo.shmmni; 817255108f3SPeter Wemm } 818255108f3SPeter Wemm 819255108f3SPeter Wemm static void 82078525ce3SAlfred Perlstein shminit() 8213d903220SDoug Rabson { 8223d903220SDoug Rabson int i; 823255108f3SPeter Wemm 8249baea4b4SChristian S.J. Peron TUNABLE_ULONG_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall); 8259d4156aeSAlfred Perlstein for (i = PAGE_SIZE; i > 0; i--) { 826a4c24c66SJohn Baldwin shminfo.shmmax = shminfo.shmall * i; 8275015c68aSAlfred Perlstein if (shminfo.shmmax >= shminfo.shmall) 8285015c68aSAlfred Perlstein break; 8295015c68aSAlfred Perlstein } 8309baea4b4SChristian S.J. Peron TUNABLE_ULONG_FETCH("kern.ipc.shmmin", &shminfo.shmmin); 8319baea4b4SChristian S.J. Peron TUNABLE_ULONG_FETCH("kern.ipc.shmmni", &shminfo.shmmni); 8329baea4b4SChristian S.J. Peron TUNABLE_ULONG_FETCH("kern.ipc.shmseg", &shminfo.shmseg); 833b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys); 834b3a4bc42SMichael Reifenberger 835255108f3SPeter Wemm shmalloced = shminfo.shmmni; 836a163d034SWarner Losh shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 837255108f3SPeter Wemm if (shmsegs == NULL) 838255108f3SPeter Wemm panic("cannot allocate initial memory for sysvshm"); 839255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) { 840921d05b9SRobert Watson shmsegs[i].u.shm_perm.mode = SHMSEG_FREE; 841921d05b9SRobert Watson shmsegs[i].u.shm_perm.seq = 0; 84214cedfc8SRobert Watson #ifdef MAC 84330d239bcSRobert Watson mac_sysvshm_init(&shmsegs[i]); 84414cedfc8SRobert Watson #endif 8453d903220SDoug Rabson } 8463d903220SDoug Rabson shm_last_free = 0; 8473d903220SDoug Rabson shm_nused = 0; 8483d903220SDoug Rabson shm_committed = 0; 84978525ce3SAlfred Perlstein shmexit_hook = &shmexit_myhook; 85078525ce3SAlfred Perlstein shmfork_hook = &shmfork_myhook; 8513d903220SDoug Rabson } 85278525ce3SAlfred Perlstein 85378525ce3SAlfred Perlstein static int 85478525ce3SAlfred Perlstein shmunload() 85578525ce3SAlfred Perlstein { 85614cedfc8SRobert Watson #ifdef MAC 85714cedfc8SRobert Watson int i; 85814cedfc8SRobert Watson #endif 85978525ce3SAlfred Perlstein 86078525ce3SAlfred Perlstein if (shm_nused > 0) 86178525ce3SAlfred Perlstein return (EBUSY); 86278525ce3SAlfred Perlstein 86314cedfc8SRobert Watson #ifdef MAC 86414cedfc8SRobert Watson for (i = 0; i < shmalloced; i++) 86530d239bcSRobert Watson mac_sysvshm_destroy(&shmsegs[i]); 86614cedfc8SRobert Watson #endif 86778525ce3SAlfred Perlstein free(shmsegs, M_SHM); 86878525ce3SAlfred Perlstein shmexit_hook = NULL; 86978525ce3SAlfred Perlstein shmfork_hook = NULL; 87078525ce3SAlfred Perlstein return (0); 87178525ce3SAlfred Perlstein } 87278525ce3SAlfred Perlstein 87378525ce3SAlfred Perlstein static int 874a723c4e1SDima Dorfman sysctl_shmsegs(SYSCTL_HANDLER_ARGS) 875a723c4e1SDima Dorfman { 876a723c4e1SDima Dorfman 877a723c4e1SDima Dorfman return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0]))); 878a723c4e1SDima Dorfman } 879a723c4e1SDima Dorfman 88045f48220SJohn Baldwin #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) 88145f48220SJohn Baldwin struct oshmid_ds { 88245f48220SJohn Baldwin struct ipc_perm_old shm_perm; /* operation perms */ 88345f48220SJohn Baldwin int shm_segsz; /* size of segment (bytes) */ 88445f48220SJohn Baldwin u_short shm_cpid; /* pid, creator */ 88545f48220SJohn Baldwin u_short shm_lpid; /* pid, last operation */ 88645f48220SJohn Baldwin short shm_nattch; /* no. of current attaches */ 88745f48220SJohn Baldwin time_t shm_atime; /* last attach time */ 88845f48220SJohn Baldwin time_t shm_dtime; /* last detach time */ 88945f48220SJohn Baldwin time_t shm_ctime; /* last change time */ 89045f48220SJohn Baldwin void *shm_handle; /* internal handle for shm segment */ 89145f48220SJohn Baldwin }; 89245f48220SJohn Baldwin 89345f48220SJohn Baldwin struct oshmctl_args { 89445f48220SJohn Baldwin int shmid; 89545f48220SJohn Baldwin int cmd; 89645f48220SJohn Baldwin struct oshmid_ds *ubuf; 89745f48220SJohn Baldwin }; 89845f48220SJohn Baldwin 89945f48220SJohn Baldwin static int 90045f48220SJohn Baldwin oshmctl(td, uap) 90145f48220SJohn Baldwin struct thread *td; 90245f48220SJohn Baldwin struct oshmctl_args *uap; 90345f48220SJohn Baldwin { 90445f48220SJohn Baldwin #ifdef COMPAT_43 90545f48220SJohn Baldwin int error = 0; 90645f48220SJohn Baldwin struct shmid_kernel *shmseg; 90745f48220SJohn Baldwin struct oshmid_ds outbuf; 90845f48220SJohn Baldwin 90945f48220SJohn Baldwin if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 91045f48220SJohn Baldwin return (ENOSYS); 91145f48220SJohn Baldwin mtx_lock(&Giant); 91245f48220SJohn Baldwin shmseg = shm_find_segment_by_shmid(uap->shmid); 91345f48220SJohn Baldwin if (shmseg == NULL) { 91445f48220SJohn Baldwin error = EINVAL; 91545f48220SJohn Baldwin goto done2; 91645f48220SJohn Baldwin } 91745f48220SJohn Baldwin switch (uap->cmd) { 91845f48220SJohn Baldwin case IPC_STAT: 91945f48220SJohn Baldwin error = ipcperm(td, &shmseg->u.shm_perm, IPC_R); 92045f48220SJohn Baldwin if (error) 92145f48220SJohn Baldwin goto done2; 92245f48220SJohn Baldwin #ifdef MAC 92345f48220SJohn Baldwin error = mac_sysvshm_check_shmctl(td->td_ucred, shmseg, uap->cmd); 92445f48220SJohn Baldwin if (error != 0) 92545f48220SJohn Baldwin goto done2; 92645f48220SJohn Baldwin #endif 92745f48220SJohn Baldwin ipcperm_new2old(&shmseg->u.shm_perm, &outbuf.shm_perm); 92845f48220SJohn Baldwin outbuf.shm_segsz = shmseg->u.shm_segsz; 92945f48220SJohn Baldwin outbuf.shm_cpid = shmseg->u.shm_cpid; 93045f48220SJohn Baldwin outbuf.shm_lpid = shmseg->u.shm_lpid; 93145f48220SJohn Baldwin outbuf.shm_nattch = shmseg->u.shm_nattch; 93245f48220SJohn Baldwin outbuf.shm_atime = shmseg->u.shm_atime; 93345f48220SJohn Baldwin outbuf.shm_dtime = shmseg->u.shm_dtime; 93445f48220SJohn Baldwin outbuf.shm_ctime = shmseg->u.shm_ctime; 93545f48220SJohn Baldwin outbuf.shm_handle = shmseg->object; 93645f48220SJohn Baldwin error = copyout(&outbuf, uap->ubuf, sizeof(outbuf)); 93745f48220SJohn Baldwin if (error) 93845f48220SJohn Baldwin goto done2; 93945f48220SJohn Baldwin break; 94045f48220SJohn Baldwin default: 94145f48220SJohn Baldwin error = freebsd7_shmctl(td, (struct shmctl_args *)uap); 94245f48220SJohn Baldwin break; 94345f48220SJohn Baldwin } 94445f48220SJohn Baldwin done2: 94545f48220SJohn Baldwin mtx_unlock(&Giant); 94645f48220SJohn Baldwin return (error); 94745f48220SJohn Baldwin #else 94845f48220SJohn Baldwin return (EINVAL); 94945f48220SJohn Baldwin #endif 95045f48220SJohn Baldwin } 95145f48220SJohn Baldwin 95245f48220SJohn Baldwin /* XXX casting to (sy_call_t *) is bogus, as usual. */ 95345f48220SJohn Baldwin static sy_call_t *shmcalls[] = { 95445f48220SJohn Baldwin (sy_call_t *)shmat, (sy_call_t *)oshmctl, 95545f48220SJohn Baldwin (sy_call_t *)shmdt, (sy_call_t *)shmget, 95645f48220SJohn Baldwin (sy_call_t *)shmctl 95745f48220SJohn Baldwin }; 95845f48220SJohn Baldwin 95945f48220SJohn Baldwin int 96045f48220SJohn Baldwin shmsys(td, uap) 96145f48220SJohn Baldwin struct thread *td; 96245f48220SJohn Baldwin /* XXX actually varargs. */ 96345f48220SJohn Baldwin struct shmsys_args /* { 96445f48220SJohn Baldwin int which; 96545f48220SJohn Baldwin int a2; 96645f48220SJohn Baldwin int a3; 96745f48220SJohn Baldwin int a4; 96845f48220SJohn Baldwin } */ *uap; 96945f48220SJohn Baldwin { 97045f48220SJohn Baldwin int error; 97145f48220SJohn Baldwin 97245f48220SJohn Baldwin if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) 97345f48220SJohn Baldwin return (ENOSYS); 97445f48220SJohn Baldwin if (uap->which < 0 || 97545f48220SJohn Baldwin uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 97645f48220SJohn Baldwin return (EINVAL); 97745f48220SJohn Baldwin mtx_lock(&Giant); 97845f48220SJohn Baldwin error = (*shmcalls[uap->which])(td, &uap->a2); 97945f48220SJohn Baldwin mtx_unlock(&Giant); 98045f48220SJohn Baldwin return (error); 98145f48220SJohn Baldwin } 98245f48220SJohn Baldwin 98345f48220SJohn Baldwin SYSCALL_MODULE_HELPER(shmsys); 98445f48220SJohn Baldwin #endif /* i386 && (COMPAT_FREEBSD4 || COMPAT_43) */ 98545f48220SJohn Baldwin 986a723c4e1SDima Dorfman static int 98778525ce3SAlfred Perlstein sysvshm_modload(struct module *module, int cmd, void *arg) 98878525ce3SAlfred Perlstein { 98978525ce3SAlfred Perlstein int error = 0; 99078525ce3SAlfred Perlstein 99178525ce3SAlfred Perlstein switch (cmd) { 99278525ce3SAlfred Perlstein case MOD_LOAD: 99378525ce3SAlfred Perlstein shminit(); 99478525ce3SAlfred Perlstein break; 99578525ce3SAlfred Perlstein case MOD_UNLOAD: 99678525ce3SAlfred Perlstein error = shmunload(); 99778525ce3SAlfred Perlstein break; 99878525ce3SAlfred Perlstein case MOD_SHUTDOWN: 99978525ce3SAlfred Perlstein break; 100078525ce3SAlfred Perlstein default: 100178525ce3SAlfred Perlstein error = EINVAL; 100278525ce3SAlfred Perlstein break; 100378525ce3SAlfred Perlstein } 100478525ce3SAlfred Perlstein return (error); 100578525ce3SAlfred Perlstein } 100678525ce3SAlfred Perlstein 1007faa784b7SDag-Erling Smørgrav static moduledata_t sysvshm_mod = { 1008faa784b7SDag-Erling Smørgrav "sysvshm", 100978525ce3SAlfred Perlstein &sysvshm_modload, 101078525ce3SAlfred Perlstein NULL 101178525ce3SAlfred Perlstein }; 101278525ce3SAlfred Perlstein 101321d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmat); 101421d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmctl); 101521d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmdt); 101621d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmget); 101778525ce3SAlfred Perlstein 101871361470SJohn Baldwin DECLARE_MODULE(sysvshm, sysvshm_mod, SI_SUB_SYSV_SHM, SI_ORDER_FIRST); 1019faa784b7SDag-Erling Smørgrav MODULE_VERSION(sysvshm, 1); 1020