1c3aac50fSPeter Wemm /* $FreeBSD$ */ 23d903220SDoug Rabson /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 33d903220SDoug Rabson 43d903220SDoug Rabson /* 53d903220SDoug Rabson * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 63d903220SDoug Rabson * 73d903220SDoug Rabson * Redistribution and use in source and binary forms, with or without 83d903220SDoug Rabson * modification, are permitted provided that the following conditions 93d903220SDoug Rabson * are met: 103d903220SDoug Rabson * 1. Redistributions of source code must retain the above copyright 113d903220SDoug Rabson * notice, this list of conditions and the following disclaimer. 123d903220SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 133d903220SDoug Rabson * notice, this list of conditions and the following disclaimer in the 143d903220SDoug Rabson * documentation and/or other materials provided with the distribution. 153d903220SDoug Rabson * 3. All advertising materials mentioning features or use of this software 163d903220SDoug Rabson * must display the following acknowledgement: 173d903220SDoug Rabson * This product includes software developed by Adam Glass and Charles 183d903220SDoug Rabson * Hannum. 193d903220SDoug Rabson * 4. The names of the authors may not be used to endorse or promote products 203d903220SDoug Rabson * derived from this software without specific prior written permission. 213d903220SDoug Rabson * 223d903220SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 233d903220SDoug Rabson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 243d903220SDoug Rabson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 253d903220SDoug Rabson * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 263d903220SDoug Rabson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 273d903220SDoug Rabson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 283d903220SDoug Rabson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 293d903220SDoug Rabson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 303d903220SDoug Rabson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 313d903220SDoug Rabson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 323d903220SDoug Rabson */ 333d903220SDoug Rabson 345591b823SEivind Eklund #include "opt_compat.h" 35e9822d92SJoerg Wunsch #include "opt_rlimit.h" 36255108f3SPeter Wemm #include "opt_sysvipc.h" 37511b67b7SGarrett Wollman 383d903220SDoug Rabson #include <sys/param.h> 39725db531SBruce Evans #include <sys/systm.h> 403d903220SDoug Rabson #include <sys/kernel.h> 41fb919e4dSMark Murray #include <sys/lock.h> 42255108f3SPeter Wemm #include <sys/sysctl.h> 433d903220SDoug Rabson #include <sys/shm.h> 443d903220SDoug Rabson #include <sys/proc.h> 453d903220SDoug Rabson #include <sys/malloc.h> 463d903220SDoug Rabson #include <sys/mman.h> 479dceb26bSJohn Baldwin #include <sys/mutex.h> 483d903220SDoug Rabson #include <sys/stat.h> 4978525ce3SAlfred Perlstein #include <sys/syscall.h> 50725db531SBruce Evans #include <sys/sysent.h> 51fb919e4dSMark Murray #include <sys/sysproto.h> 52cb1f0db9SRobert Watson #include <sys/jail.h> 533d903220SDoug Rabson 543d903220SDoug Rabson #include <vm/vm.h> 55efeaf95aSDavid Greenman #include <vm/vm_param.h> 56efeaf95aSDavid Greenman #include <vm/pmap.h> 57a51f7119SJohn Dyson #include <vm/vm_object.h> 583d903220SDoug Rabson #include <vm/vm_map.h> 591c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 60ae9b8c3aSJohn Dyson #include <vm/vm_pager.h> 613d903220SDoug Rabson 62a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 6355166637SPoul-Henning Kamp 64725db531SBruce Evans struct oshmctl_args; 65cb226aaaSPoul-Henning Kamp static int oshmctl __P((struct proc *p, struct oshmctl_args *uap)); 66255108f3SPeter Wemm 67028f979dSDima Dorfman static int shmget_allocate_segment __P((struct proc *p, 68028f979dSDima Dorfman struct shmget_args *uap, int mode)); 69028f979dSDima Dorfman static int shmget_existing __P((struct proc *p, struct shmget_args *uap, 70028f979dSDima Dorfman int mode, int segnum)); 71725db531SBruce Evans 72725db531SBruce Evans /* XXX casting to (sy_call_t *) is bogus, as usual. */ 73303b270bSEivind Eklund static sy_call_t *shmcalls[] = { 74725db531SBruce Evans (sy_call_t *)shmat, (sy_call_t *)oshmctl, 75725db531SBruce Evans (sy_call_t *)shmdt, (sy_call_t *)shmget, 76725db531SBruce Evans (sy_call_t *)shmctl 77725db531SBruce Evans }; 783d903220SDoug Rabson 793d903220SDoug Rabson #define SHMSEG_FREE 0x0200 803d903220SDoug Rabson #define SHMSEG_REMOVED 0x0400 813d903220SDoug Rabson #define SHMSEG_ALLOCATED 0x0800 823d903220SDoug Rabson #define SHMSEG_WANTED 0x1000 833d903220SDoug Rabson 84255108f3SPeter Wemm static int shm_last_free, shm_nused, shm_committed, shmalloced; 85255108f3SPeter Wemm static struct shmid_ds *shmsegs; 863d903220SDoug Rabson 873d903220SDoug Rabson struct shm_handle { 88a51f7119SJohn Dyson /* vm_offset_t kva; */ 89a51f7119SJohn Dyson vm_object_t shm_object; 903d903220SDoug Rabson }; 913d903220SDoug Rabson 923d903220SDoug Rabson struct shmmap_state { 933d903220SDoug Rabson vm_offset_t va; 943d903220SDoug Rabson int shmid; 953d903220SDoug Rabson }; 963d903220SDoug Rabson 973d903220SDoug Rabson static void shm_deallocate_segment __P((struct shmid_ds *)); 983d903220SDoug Rabson static int shm_find_segment_by_key __P((key_t)); 993d903220SDoug Rabson static struct shmid_ds *shm_find_segment_by_shmid __P((int)); 1003d903220SDoug Rabson static int shm_delete_mapping __P((struct proc *, struct shmmap_state *)); 101255108f3SPeter Wemm static void shmrealloc __P((void)); 10278525ce3SAlfred Perlstein static void shminit __P((void)); 10378525ce3SAlfred Perlstein static int sysvshm_modload __P((struct module *, int, void *)); 10478525ce3SAlfred Perlstein static int shmunload __P((void)); 10578525ce3SAlfred Perlstein static void shmexit_myhook __P((struct proc *p)); 10678525ce3SAlfred Perlstein static void shmfork_myhook __P((struct proc *p1, struct proc *p2)); 107a723c4e1SDima Dorfman static int sysctl_shmsegs __P((SYSCTL_HANDLER_ARGS)); 108255108f3SPeter Wemm 109255108f3SPeter Wemm /* 110028f979dSDima Dorfman * Tuneable values. 111255108f3SPeter Wemm */ 112255108f3SPeter Wemm #ifndef SHMMAXPGS 113028f979dSDima Dorfman #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */ 114255108f3SPeter Wemm #endif 115255108f3SPeter Wemm #ifndef SHMMAX 116255108f3SPeter Wemm #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 117255108f3SPeter Wemm #endif 118255108f3SPeter Wemm #ifndef SHMMIN 119255108f3SPeter Wemm #define SHMMIN 1 120255108f3SPeter Wemm #endif 121255108f3SPeter Wemm #ifndef SHMMNI 1221766b2e5SMatthew Dillon #define SHMMNI 192 123255108f3SPeter Wemm #endif 124255108f3SPeter Wemm #ifndef SHMSEG 1251766b2e5SMatthew Dillon #define SHMSEG 128 126255108f3SPeter Wemm #endif 127255108f3SPeter Wemm #ifndef SHMALL 128255108f3SPeter Wemm #define SHMALL (SHMMAXPGS) 129255108f3SPeter Wemm #endif 130255108f3SPeter Wemm 131255108f3SPeter Wemm struct shminfo shminfo = { 132255108f3SPeter Wemm SHMMAX, 133255108f3SPeter Wemm SHMMIN, 134255108f3SPeter Wemm SHMMNI, 135255108f3SPeter Wemm SHMSEG, 136255108f3SPeter Wemm SHMALL 137255108f3SPeter Wemm }; 138255108f3SPeter Wemm 1398b03c8edSMatthew Dillon static int shm_use_phys; 1408b03c8edSMatthew Dillon 141255108f3SPeter Wemm SYSCTL_DECL(_kern_ipc); 142255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, ""); 143255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, ""); 144255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, ""); 145a02f3136SBrian Feldman SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RD, &shminfo.shmseg, 0, ""); 146255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, ""); 147028f979dSDima Dorfman SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, 148028f979dSDima Dorfman &shm_use_phys, 0, ""); 149a723c4e1SDima Dorfman SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD, 150a723c4e1SDima Dorfman NULL, 0, sysctl_shmsegs, "", ""); 1513d903220SDoug Rabson 1523d903220SDoug Rabson static int 1533d903220SDoug Rabson shm_find_segment_by_key(key) 1543d903220SDoug Rabson key_t key; 1553d903220SDoug Rabson { 1563d903220SDoug Rabson int i; 1573d903220SDoug Rabson 158255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 1593d903220SDoug Rabson if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 1603d903220SDoug Rabson shmsegs[i].shm_perm.key == key) 1613d903220SDoug Rabson return i; 1623d903220SDoug Rabson return -1; 1633d903220SDoug Rabson } 1643d903220SDoug Rabson 1653d903220SDoug Rabson static struct shmid_ds * 1663d903220SDoug Rabson shm_find_segment_by_shmid(shmid) 1673d903220SDoug Rabson int shmid; 1683d903220SDoug Rabson { 1693d903220SDoug Rabson int segnum; 1703d903220SDoug Rabson struct shmid_ds *shmseg; 1713d903220SDoug Rabson 1723d903220SDoug Rabson segnum = IPCID_TO_IX(shmid); 173255108f3SPeter Wemm if (segnum < 0 || segnum >= shmalloced) 1743d903220SDoug Rabson return NULL; 1753d903220SDoug Rabson shmseg = &shmsegs[segnum]; 1763d903220SDoug Rabson if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) 1773d903220SDoug Rabson != SHMSEG_ALLOCATED || 1783d903220SDoug Rabson shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) 1793d903220SDoug Rabson return NULL; 1803d903220SDoug Rabson return shmseg; 1813d903220SDoug Rabson } 1823d903220SDoug Rabson 1833d903220SDoug Rabson static void 1843d903220SDoug Rabson shm_deallocate_segment(shmseg) 1853d903220SDoug Rabson struct shmid_ds *shmseg; 1863d903220SDoug Rabson { 1873d903220SDoug Rabson struct shm_handle *shm_handle; 1883d903220SDoug Rabson size_t size; 1893d903220SDoug Rabson 1900cddd8f0SMatthew Dillon GIANT_REQUIRED; 1910cddd8f0SMatthew Dillon 1923d903220SDoug Rabson shm_handle = shmseg->shm_internal; 193a51f7119SJohn Dyson vm_object_deallocate(shm_handle->shm_object); 1943d903220SDoug Rabson free((caddr_t)shm_handle, M_SHM); 1953d903220SDoug Rabson shmseg->shm_internal = NULL; 196a51f7119SJohn Dyson size = round_page(shmseg->shm_segsz); 1973d903220SDoug Rabson shm_committed -= btoc(size); 1983d903220SDoug Rabson shm_nused--; 1993d903220SDoug Rabson shmseg->shm_perm.mode = SHMSEG_FREE; 2003d903220SDoug Rabson } 2013d903220SDoug Rabson 2023d903220SDoug Rabson static int 2033d903220SDoug Rabson shm_delete_mapping(p, shmmap_s) 2043d903220SDoug Rabson struct proc *p; 2053d903220SDoug Rabson struct shmmap_state *shmmap_s; 2063d903220SDoug Rabson { 2073d903220SDoug Rabson struct shmid_ds *shmseg; 2083d903220SDoug Rabson int segnum, result; 2093d903220SDoug Rabson size_t size; 2103d903220SDoug Rabson 2110cddd8f0SMatthew Dillon GIANT_REQUIRED; 212028f979dSDima Dorfman 2133d903220SDoug Rabson segnum = IPCID_TO_IX(shmmap_s->shmid); 2143d903220SDoug Rabson shmseg = &shmsegs[segnum]; 215aa8de40aSPoul-Henning Kamp size = round_page(shmseg->shm_segsz); 216028f979dSDima Dorfman result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, 217028f979dSDima Dorfman shmmap_s->va + size); 2183d903220SDoug Rabson if (result != KERN_SUCCESS) 2193d903220SDoug Rabson return EINVAL; 2203d903220SDoug Rabson shmmap_s->shmid = -1; 221227ee8a1SPoul-Henning Kamp shmseg->shm_dtime = time_second; 2223d903220SDoug Rabson if ((--shmseg->shm_nattch <= 0) && 2233d903220SDoug Rabson (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 2243d903220SDoug Rabson shm_deallocate_segment(shmseg); 2253d903220SDoug Rabson shm_last_free = segnum; 2263d903220SDoug Rabson } 2273d903220SDoug Rabson return 0; 2283d903220SDoug Rabson } 2293d903220SDoug Rabson 230b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 2313d903220SDoug Rabson struct shmdt_args { 2323d903220SDoug Rabson void *shmaddr; 2333d903220SDoug Rabson }; 234b5d5c0c9SPeter Wemm #endif 235b5d5c0c9SPeter Wemm 236b6a4b4f9SMatthew Dillon /* 237b6a4b4f9SMatthew Dillon * MPSAFE 238b6a4b4f9SMatthew Dillon */ 2393d903220SDoug Rabson int 240cb226aaaSPoul-Henning Kamp shmdt(p, uap) 2413d903220SDoug Rabson struct proc *p; 2423d903220SDoug Rabson struct shmdt_args *uap; 2433d903220SDoug Rabson { 2443d903220SDoug Rabson struct shmmap_state *shmmap_s; 2453d903220SDoug Rabson int i; 246b6a4b4f9SMatthew Dillon int error = 0; 2473d903220SDoug Rabson 248b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 249b6a4b4f9SMatthew Dillon 250b6a4b4f9SMatthew Dillon if (!jail_sysvipc_allowed && jailed(p->p_ucred)) { 251b6a4b4f9SMatthew Dillon error = ENOSYS; 252b6a4b4f9SMatthew Dillon goto done2; 253b6a4b4f9SMatthew Dillon } 254cb1f0db9SRobert Watson 2553d903220SDoug Rabson shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 256b6a4b4f9SMatthew Dillon if (shmmap_s == NULL) { 257b6a4b4f9SMatthew Dillon error = EINVAL; 258b6a4b4f9SMatthew Dillon goto done2; 259b6a4b4f9SMatthew Dillon } 260b6a4b4f9SMatthew Dillon for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 2613d903220SDoug Rabson if (shmmap_s->shmid != -1 && 262b6a4b4f9SMatthew Dillon shmmap_s->va == (vm_offset_t)uap->shmaddr) { 2633d903220SDoug Rabson break; 264b6a4b4f9SMatthew Dillon } 265b6a4b4f9SMatthew Dillon } 266b6a4b4f9SMatthew Dillon if (i == shminfo.shmseg) { 267b6a4b4f9SMatthew Dillon error = EINVAL; 268b6a4b4f9SMatthew Dillon goto done2; 269b6a4b4f9SMatthew Dillon } 270a8dbafbeSDima Dorfman error = shm_delete_mapping(p, shmmap_s); 271b6a4b4f9SMatthew Dillon done2: 272b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 273b6a4b4f9SMatthew Dillon return (error); 2743d903220SDoug Rabson } 2753d903220SDoug Rabson 276b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 2773d903220SDoug Rabson struct shmat_args { 2783d903220SDoug Rabson int shmid; 2793d903220SDoug Rabson void *shmaddr; 2803d903220SDoug Rabson int shmflg; 2813d903220SDoug Rabson }; 282b5d5c0c9SPeter Wemm #endif 283b5d5c0c9SPeter Wemm 284b6a4b4f9SMatthew Dillon /* 285b6a4b4f9SMatthew Dillon * MPSAFE 286b6a4b4f9SMatthew Dillon */ 2873d903220SDoug Rabson int 288cb226aaaSPoul-Henning Kamp shmat(p, uap) 2893d903220SDoug Rabson struct proc *p; 2903d903220SDoug Rabson struct shmat_args *uap; 2913d903220SDoug Rabson { 292b6a4b4f9SMatthew Dillon int i, flags; 2933d903220SDoug Rabson struct shmid_ds *shmseg; 2943d903220SDoug Rabson struct shmmap_state *shmmap_s = NULL; 295a51f7119SJohn Dyson struct shm_handle *shm_handle; 2963d903220SDoug Rabson vm_offset_t attach_va; 2973d903220SDoug Rabson vm_prot_t prot; 2983d903220SDoug Rabson vm_size_t size; 299a51f7119SJohn Dyson int rv; 300b6a4b4f9SMatthew Dillon int error = 0; 3013d903220SDoug Rabson 302b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 3030cddd8f0SMatthew Dillon 304b6a4b4f9SMatthew Dillon if (!jail_sysvipc_allowed && jailed(p->p_ucred)) { 305b6a4b4f9SMatthew Dillon error = ENOSYS; 306b6a4b4f9SMatthew Dillon goto done2; 307b6a4b4f9SMatthew Dillon } 308cb1f0db9SRobert Watson 3093d903220SDoug Rabson shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 3103d903220SDoug Rabson if (shmmap_s == NULL) { 3113d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 3123d903220SDoug Rabson shmmap_s = malloc(size, M_SHM, M_WAITOK); 3133d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) 3143d903220SDoug Rabson shmmap_s[i].shmid = -1; 3153d903220SDoug Rabson p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 3163d903220SDoug Rabson } 3173d903220SDoug Rabson shmseg = shm_find_segment_by_shmid(uap->shmid); 318b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 319b6a4b4f9SMatthew Dillon error = EINVAL; 320b6a4b4f9SMatthew Dillon goto done2; 321b6a4b4f9SMatthew Dillon } 3221c308b81SPoul-Henning Kamp error = ipcperm(p, &shmseg->shm_perm, 323797f2d22SPoul-Henning Kamp (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 324797f2d22SPoul-Henning Kamp if (error) 325b6a4b4f9SMatthew Dillon goto done2; 3263d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) { 3273d903220SDoug Rabson if (shmmap_s->shmid == -1) 3283d903220SDoug Rabson break; 3293d903220SDoug Rabson shmmap_s++; 3303d903220SDoug Rabson } 331b6a4b4f9SMatthew Dillon if (i >= shminfo.shmseg) { 332b6a4b4f9SMatthew Dillon error = EMFILE; 333b6a4b4f9SMatthew Dillon goto done2; 334b6a4b4f9SMatthew Dillon } 335aa8de40aSPoul-Henning Kamp size = round_page(shmseg->shm_segsz); 336af25d10cSAlan Cox #ifdef VM_PROT_READ_IS_EXEC 337af25d10cSAlan Cox prot = VM_PROT_READ | VM_PROT_EXECUTE; 338af25d10cSAlan Cox #else 3393d903220SDoug Rabson prot = VM_PROT_READ; 340af25d10cSAlan Cox #endif 3413d903220SDoug Rabson if ((uap->shmflg & SHM_RDONLY) == 0) 3423d903220SDoug Rabson prot |= VM_PROT_WRITE; 3433d903220SDoug Rabson flags = MAP_ANON | MAP_SHARED; 3443d903220SDoug Rabson if (uap->shmaddr) { 3453d903220SDoug Rabson flags |= MAP_FIXED; 346b6a4b4f9SMatthew Dillon if (uap->shmflg & SHM_RND) { 3473d903220SDoug Rabson attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); 348b6a4b4f9SMatthew Dillon } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) { 3493d903220SDoug Rabson attach_va = (vm_offset_t)uap->shmaddr; 350b6a4b4f9SMatthew Dillon } else { 351b6a4b4f9SMatthew Dillon error = EINVAL; 352b6a4b4f9SMatthew Dillon goto done2; 353b6a4b4f9SMatthew Dillon } 3543d903220SDoug Rabson } else { 355028f979dSDima Dorfman /* 356028f979dSDima Dorfman * This is just a hint to vm_map_find() about where to 357028f979dSDima Dorfman * put it. 358028f979dSDima Dorfman */ 359028f979dSDima Dorfman attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr 360028f979dSDima Dorfman + MAXTSIZ + MAXDSIZ); 3613d903220SDoug Rabson } 362a51f7119SJohn Dyson 363a51f7119SJohn Dyson shm_handle = shmseg->shm_internal; 364a51f7119SJohn Dyson vm_object_reference(shm_handle->shm_object); 365a51f7119SJohn Dyson rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object, 366a51f7119SJohn Dyson 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0); 367a51f7119SJohn Dyson if (rv != KERN_SUCCESS) { 368b6a4b4f9SMatthew Dillon error = ENOMEM; 369b6a4b4f9SMatthew Dillon goto done2; 370a51f7119SJohn Dyson } 3710463028cSJohn Dyson vm_map_inherit(&p->p_vmspace->vm_map, 3720463028cSJohn Dyson attach_va, attach_va + size, VM_INHERIT_SHARE); 3730463028cSJohn Dyson 3743d903220SDoug Rabson shmmap_s->va = attach_va; 3753d903220SDoug Rabson shmmap_s->shmid = uap->shmid; 3763d903220SDoug Rabson shmseg->shm_lpid = p->p_pid; 377227ee8a1SPoul-Henning Kamp shmseg->shm_atime = time_second; 3783d903220SDoug Rabson shmseg->shm_nattch++; 379cb226aaaSPoul-Henning Kamp p->p_retval[0] = attach_va; 380b6a4b4f9SMatthew Dillon done2: 381b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 382b6a4b4f9SMatthew Dillon return (error); 3833d903220SDoug Rabson } 3843d903220SDoug Rabson 3858bec0921SDoug Rabson struct oshmid_ds { 3868bec0921SDoug Rabson struct ipc_perm shm_perm; /* operation perms */ 3878bec0921SDoug Rabson int shm_segsz; /* size of segment (bytes) */ 3888bec0921SDoug Rabson ushort shm_cpid; /* pid, creator */ 3898bec0921SDoug Rabson ushort shm_lpid; /* pid, last operation */ 3908bec0921SDoug Rabson short shm_nattch; /* no. of current attaches */ 3918bec0921SDoug Rabson time_t shm_atime; /* last attach time */ 3928bec0921SDoug Rabson time_t shm_dtime; /* last detach time */ 3938bec0921SDoug Rabson time_t shm_ctime; /* last change time */ 3948bec0921SDoug Rabson void *shm_handle; /* internal handle for shm segment */ 3958bec0921SDoug Rabson }; 3968bec0921SDoug Rabson 3978bec0921SDoug Rabson struct oshmctl_args { 3988bec0921SDoug Rabson int shmid; 3998bec0921SDoug Rabson int cmd; 4008bec0921SDoug Rabson struct oshmid_ds *ubuf; 4018bec0921SDoug Rabson }; 4028bec0921SDoug Rabson 403b6a4b4f9SMatthew Dillon /* 404b6a4b4f9SMatthew Dillon * MPSAFE 405b6a4b4f9SMatthew Dillon */ 40687b6de2bSPoul-Henning Kamp static int 407cb226aaaSPoul-Henning Kamp oshmctl(p, uap) 4088bec0921SDoug Rabson struct proc *p; 4098bec0921SDoug Rabson struct oshmctl_args *uap; 4108bec0921SDoug Rabson { 4118bec0921SDoug Rabson #ifdef COMPAT_43 412b6a4b4f9SMatthew Dillon int error = 0; 4138bec0921SDoug Rabson struct shmid_ds *shmseg; 4148bec0921SDoug Rabson struct oshmid_ds outbuf; 4158bec0921SDoug Rabson 416b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 417b6a4b4f9SMatthew Dillon 418b6a4b4f9SMatthew Dillon if (!jail_sysvipc_allowed && jailed(p->p_ucred)) { 419b6a4b4f9SMatthew Dillon error = ENOSYS; 420b6a4b4f9SMatthew Dillon goto done2; 421b6a4b4f9SMatthew Dillon } 422cb1f0db9SRobert Watson 4238bec0921SDoug Rabson shmseg = shm_find_segment_by_shmid(uap->shmid); 424b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 425b6a4b4f9SMatthew Dillon error = EINVAL; 426b6a4b4f9SMatthew Dillon goto done2; 427b6a4b4f9SMatthew Dillon } 4288bec0921SDoug Rabson switch (uap->cmd) { 4298bec0921SDoug Rabson case IPC_STAT: 4301c308b81SPoul-Henning Kamp error = ipcperm(p, &shmseg->shm_perm, IPC_R); 431797f2d22SPoul-Henning Kamp if (error) 432b6a4b4f9SMatthew Dillon goto done2; 4338bec0921SDoug Rabson outbuf.shm_perm = shmseg->shm_perm; 4348bec0921SDoug Rabson outbuf.shm_segsz = shmseg->shm_segsz; 4358bec0921SDoug Rabson outbuf.shm_cpid = shmseg->shm_cpid; 4368bec0921SDoug Rabson outbuf.shm_lpid = shmseg->shm_lpid; 4378bec0921SDoug Rabson outbuf.shm_nattch = shmseg->shm_nattch; 4388bec0921SDoug Rabson outbuf.shm_atime = shmseg->shm_atime; 4398bec0921SDoug Rabson outbuf.shm_dtime = shmseg->shm_dtime; 4408bec0921SDoug Rabson outbuf.shm_ctime = shmseg->shm_ctime; 4418bec0921SDoug Rabson outbuf.shm_handle = shmseg->shm_internal; 442797f2d22SPoul-Henning Kamp error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf)); 443797f2d22SPoul-Henning Kamp if (error) 444b6a4b4f9SMatthew Dillon goto done2; 4458bec0921SDoug Rabson break; 4468bec0921SDoug Rabson default: 447725db531SBruce Evans /* XXX casting to (sy_call_t *) is bogus, as usual. */ 448b6a4b4f9SMatthew Dillon error = ((sy_call_t *)shmctl)(p, uap); 449b6a4b4f9SMatthew Dillon break; 4508bec0921SDoug Rabson } 451b6a4b4f9SMatthew Dillon done2: 452b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 453b6a4b4f9SMatthew Dillon return (error); 4548bec0921SDoug Rabson #else 4558bec0921SDoug Rabson return EINVAL; 4568bec0921SDoug Rabson #endif 4578bec0921SDoug Rabson } 4588bec0921SDoug Rabson 459b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 4603d903220SDoug Rabson struct shmctl_args { 4613d903220SDoug Rabson int shmid; 4623d903220SDoug Rabson int cmd; 463b5d5c0c9SPeter Wemm struct shmid_ds *buf; 4643d903220SDoug Rabson }; 465b5d5c0c9SPeter Wemm #endif 466b5d5c0c9SPeter Wemm 467b6a4b4f9SMatthew Dillon /* 468b6a4b4f9SMatthew Dillon * MPSAFE 469b6a4b4f9SMatthew Dillon */ 4703d903220SDoug Rabson int 471cb226aaaSPoul-Henning Kamp shmctl(p, uap) 4723d903220SDoug Rabson struct proc *p; 4733d903220SDoug Rabson struct shmctl_args *uap; 4743d903220SDoug Rabson { 475b6a4b4f9SMatthew Dillon int error = 0; 4763d903220SDoug Rabson struct shmid_ds inbuf; 4773d903220SDoug Rabson struct shmid_ds *shmseg; 4783d903220SDoug Rabson 479b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 4800cddd8f0SMatthew Dillon 481b6a4b4f9SMatthew Dillon if (!jail_sysvipc_allowed && jailed(p->p_ucred)) { 482b6a4b4f9SMatthew Dillon error = ENOSYS; 483b6a4b4f9SMatthew Dillon goto done2; 484b6a4b4f9SMatthew Dillon } 485cb1f0db9SRobert Watson 4863d903220SDoug Rabson shmseg = shm_find_segment_by_shmid(uap->shmid); 487b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 488b6a4b4f9SMatthew Dillon error = EINVAL; 489b6a4b4f9SMatthew Dillon goto done2; 490b6a4b4f9SMatthew Dillon } 4913d903220SDoug Rabson switch (uap->cmd) { 4923d903220SDoug Rabson case IPC_STAT: 4931c308b81SPoul-Henning Kamp error = ipcperm(p, &shmseg->shm_perm, IPC_R); 494797f2d22SPoul-Henning Kamp if (error) 495b6a4b4f9SMatthew Dillon goto done2; 496b5d5c0c9SPeter Wemm error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf)); 497797f2d22SPoul-Henning Kamp if (error) 498b6a4b4f9SMatthew Dillon goto done2; 4993d903220SDoug Rabson break; 5003d903220SDoug Rabson case IPC_SET: 5011c308b81SPoul-Henning Kamp error = ipcperm(p, &shmseg->shm_perm, IPC_M); 502797f2d22SPoul-Henning Kamp if (error) 503b6a4b4f9SMatthew Dillon goto done2; 504b5d5c0c9SPeter Wemm error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf)); 505797f2d22SPoul-Henning Kamp if (error) 506b6a4b4f9SMatthew Dillon goto done2; 5073d903220SDoug Rabson shmseg->shm_perm.uid = inbuf.shm_perm.uid; 5083d903220SDoug Rabson shmseg->shm_perm.gid = inbuf.shm_perm.gid; 5093d903220SDoug Rabson shmseg->shm_perm.mode = 5103d903220SDoug Rabson (shmseg->shm_perm.mode & ~ACCESSPERMS) | 5113d903220SDoug Rabson (inbuf.shm_perm.mode & ACCESSPERMS); 512227ee8a1SPoul-Henning Kamp shmseg->shm_ctime = time_second; 5133d903220SDoug Rabson break; 5143d903220SDoug Rabson case IPC_RMID: 5151c308b81SPoul-Henning Kamp error = ipcperm(p, &shmseg->shm_perm, IPC_M); 516797f2d22SPoul-Henning Kamp if (error) 517b6a4b4f9SMatthew Dillon goto done2; 5183d903220SDoug Rabson shmseg->shm_perm.key = IPC_PRIVATE; 5193d903220SDoug Rabson shmseg->shm_perm.mode |= SHMSEG_REMOVED; 5203d903220SDoug Rabson if (shmseg->shm_nattch <= 0) { 5213d903220SDoug Rabson shm_deallocate_segment(shmseg); 5223d903220SDoug Rabson shm_last_free = IPCID_TO_IX(uap->shmid); 5233d903220SDoug Rabson } 5243d903220SDoug Rabson break; 5253d903220SDoug Rabson #if 0 5263d903220SDoug Rabson case SHM_LOCK: 5273d903220SDoug Rabson case SHM_UNLOCK: 5283d903220SDoug Rabson #endif 5293d903220SDoug Rabson default: 530b6a4b4f9SMatthew Dillon error = EINVAL; 531b6a4b4f9SMatthew Dillon break; 5323d903220SDoug Rabson } 533b6a4b4f9SMatthew Dillon done2: 534b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 535b6a4b4f9SMatthew Dillon return (error); 5363d903220SDoug Rabson } 5373d903220SDoug Rabson 538b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 5393d903220SDoug Rabson struct shmget_args { 5403d903220SDoug Rabson key_t key; 5413d903220SDoug Rabson size_t size; 5423d903220SDoug Rabson int shmflg; 5433d903220SDoug Rabson }; 544b5d5c0c9SPeter Wemm #endif 545b5d5c0c9SPeter Wemm 5463d903220SDoug Rabson static int 547cb226aaaSPoul-Henning Kamp shmget_existing(p, uap, mode, segnum) 5483d903220SDoug Rabson struct proc *p; 5493d903220SDoug Rabson struct shmget_args *uap; 5503d903220SDoug Rabson int mode; 5513d903220SDoug Rabson int segnum; 5523d903220SDoug Rabson { 5533d903220SDoug Rabson struct shmid_ds *shmseg; 5543d903220SDoug Rabson int error; 5553d903220SDoug Rabson 5563d903220SDoug Rabson shmseg = &shmsegs[segnum]; 5573d903220SDoug Rabson if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 5583d903220SDoug Rabson /* 5593d903220SDoug Rabson * This segment is in the process of being allocated. Wait 5603d903220SDoug Rabson * until it's done, and look the key up again (in case the 5613d903220SDoug Rabson * allocation failed or it was freed). 5623d903220SDoug Rabson */ 5633d903220SDoug Rabson shmseg->shm_perm.mode |= SHMSEG_WANTED; 564797f2d22SPoul-Henning Kamp error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0); 565797f2d22SPoul-Henning Kamp if (error) 5663d903220SDoug Rabson return error; 5673d903220SDoug Rabson return EAGAIN; 5683d903220SDoug Rabson } 569dc92aa57SAlan Cox if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 570dc92aa57SAlan Cox return EEXIST; 5711c308b81SPoul-Henning Kamp error = ipcperm(p, &shmseg->shm_perm, mode); 572797f2d22SPoul-Henning Kamp if (error) 5733d903220SDoug Rabson return error; 5743d903220SDoug Rabson if (uap->size && uap->size > shmseg->shm_segsz) 5753d903220SDoug Rabson return EINVAL; 576cb226aaaSPoul-Henning Kamp p->p_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 5773d903220SDoug Rabson return 0; 5783d903220SDoug Rabson } 5793d903220SDoug Rabson 5803d903220SDoug Rabson static int 581cb226aaaSPoul-Henning Kamp shmget_allocate_segment(p, uap, mode) 5823d903220SDoug Rabson struct proc *p; 5833d903220SDoug Rabson struct shmget_args *uap; 5843d903220SDoug Rabson int mode; 5853d903220SDoug Rabson { 586a51f7119SJohn Dyson int i, segnum, shmid, size; 5873d903220SDoug Rabson struct ucred *cred = p->p_ucred; 5883d903220SDoug Rabson struct shmid_ds *shmseg; 5893d903220SDoug Rabson struct shm_handle *shm_handle; 5903d903220SDoug Rabson 5910cddd8f0SMatthew Dillon GIANT_REQUIRED; 5920cddd8f0SMatthew Dillon 5933d903220SDoug Rabson if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 5943d903220SDoug Rabson return EINVAL; 595028f979dSDima Dorfman if (shm_nused >= shminfo.shmmni) /* Any shmids left? */ 5963d903220SDoug Rabson return ENOSPC; 5979e609ddeSJoerg Wunsch size = round_page(uap->size); 5983d903220SDoug Rabson if (shm_committed + btoc(size) > shminfo.shmall) 5993d903220SDoug Rabson return ENOMEM; 6003d903220SDoug Rabson if (shm_last_free < 0) { 601028f979dSDima Dorfman shmrealloc(); /* Maybe expand the shmsegs[] array. */ 602255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 6033d903220SDoug Rabson if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 6043d903220SDoug Rabson break; 605255108f3SPeter Wemm if (i == shmalloced) 606255108f3SPeter Wemm return ENOSPC; 6073d903220SDoug Rabson segnum = i; 6083d903220SDoug Rabson } else { 6093d903220SDoug Rabson segnum = shm_last_free; 6103d903220SDoug Rabson shm_last_free = -1; 6113d903220SDoug Rabson } 6123d903220SDoug Rabson shmseg = &shmsegs[segnum]; 6133d903220SDoug Rabson /* 6143d903220SDoug Rabson * In case we sleep in malloc(), mark the segment present but deleted 6153d903220SDoug Rabson * so that noone else tries to create the same key. 6163d903220SDoug Rabson */ 6173d903220SDoug Rabson shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 6183d903220SDoug Rabson shmseg->shm_perm.key = uap->key; 6193d903220SDoug Rabson shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 6203d903220SDoug Rabson shm_handle = (struct shm_handle *) 6213d903220SDoug Rabson malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 6223d903220SDoug Rabson shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 623a51f7119SJohn Dyson 624ae9b8c3aSJohn Dyson /* 625ae9b8c3aSJohn Dyson * We make sure that we have allocated a pager before we need 626ae9b8c3aSJohn Dyson * to. 627ae9b8c3aSJohn Dyson */ 6288b03c8edSMatthew Dillon if (shm_use_phys) { 62924488c74SPeter Wemm shm_handle->shm_object = 63024488c74SPeter Wemm vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0); 6318b03c8edSMatthew Dillon } else { 632a51f7119SJohn Dyson shm_handle->shm_object = 6336cde7a16SDavid Greenman vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0); 6348b03c8edSMatthew Dillon } 635069e9bc1SDoug Rabson vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 636069e9bc1SDoug Rabson vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 637cbd8ec09SJohn Dyson 6383d903220SDoug Rabson shmseg->shm_internal = shm_handle; 6393d903220SDoug Rabson shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 6403d903220SDoug Rabson shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 6413d903220SDoug Rabson shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 6423d903220SDoug Rabson (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 6433d903220SDoug Rabson shmseg->shm_segsz = uap->size; 6443d903220SDoug Rabson shmseg->shm_cpid = p->p_pid; 6453d903220SDoug Rabson shmseg->shm_lpid = shmseg->shm_nattch = 0; 6463d903220SDoug Rabson shmseg->shm_atime = shmseg->shm_dtime = 0; 647227ee8a1SPoul-Henning Kamp shmseg->shm_ctime = time_second; 6483d903220SDoug Rabson shm_committed += btoc(size); 6493d903220SDoug Rabson shm_nused++; 6503d903220SDoug Rabson if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 6513d903220SDoug Rabson /* 6523d903220SDoug Rabson * Somebody else wanted this key while we were asleep. Wake 6533d903220SDoug Rabson * them up now. 6543d903220SDoug Rabson */ 6553d903220SDoug Rabson shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 6563d903220SDoug Rabson wakeup((caddr_t)shmseg); 6573d903220SDoug Rabson } 658cb226aaaSPoul-Henning Kamp p->p_retval[0] = shmid; 6593d903220SDoug Rabson return 0; 6603d903220SDoug Rabson } 6613d903220SDoug Rabson 662b6a4b4f9SMatthew Dillon /* 663b6a4b4f9SMatthew Dillon * MPSAFE 664b6a4b4f9SMatthew Dillon */ 6653d903220SDoug Rabson int 666cb226aaaSPoul-Henning Kamp shmget(p, uap) 6673d903220SDoug Rabson struct proc *p; 6683d903220SDoug Rabson struct shmget_args *uap; 6693d903220SDoug Rabson { 670b6a4b4f9SMatthew Dillon int segnum, mode; 671b6a4b4f9SMatthew Dillon int error; 6723d903220SDoug Rabson 673b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 674b6a4b4f9SMatthew Dillon 675b6a4b4f9SMatthew Dillon if (!jail_sysvipc_allowed && jailed(p->p_ucred)) { 676b6a4b4f9SMatthew Dillon error = ENOSYS; 677b6a4b4f9SMatthew Dillon goto done2; 678b6a4b4f9SMatthew Dillon } 679cb1f0db9SRobert Watson 6803d903220SDoug Rabson mode = uap->shmflg & ACCESSPERMS; 6813d903220SDoug Rabson if (uap->key != IPC_PRIVATE) { 6823d903220SDoug Rabson again: 6833d903220SDoug Rabson segnum = shm_find_segment_by_key(uap->key); 6843d903220SDoug Rabson if (segnum >= 0) { 685cb226aaaSPoul-Henning Kamp error = shmget_existing(p, uap, mode, segnum); 6863d903220SDoug Rabson if (error == EAGAIN) 6873d903220SDoug Rabson goto again; 688b6a4b4f9SMatthew Dillon goto done2; 6893d903220SDoug Rabson } 690b6a4b4f9SMatthew Dillon if ((uap->shmflg & IPC_CREAT) == 0) { 691b6a4b4f9SMatthew Dillon error = ENOENT; 692b6a4b4f9SMatthew Dillon goto done2; 6933d903220SDoug Rabson } 694b6a4b4f9SMatthew Dillon } 695b6a4b4f9SMatthew Dillon error = shmget_allocate_segment(p, uap, mode); 696b6a4b4f9SMatthew Dillon done2: 697b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 698b6a4b4f9SMatthew Dillon return (error); 6993d903220SDoug Rabson } 7003d903220SDoug Rabson 701b6a4b4f9SMatthew Dillon /* 702b6a4b4f9SMatthew Dillon * MPSAFE 703b6a4b4f9SMatthew Dillon */ 7043d903220SDoug Rabson int 705cb226aaaSPoul-Henning Kamp shmsys(p, uap) 7063d903220SDoug Rabson struct proc *p; 707725db531SBruce Evans /* XXX actually varargs. */ 708725db531SBruce Evans struct shmsys_args /* { 709725db531SBruce Evans u_int which; 710725db531SBruce Evans int a2; 711725db531SBruce Evans int a3; 712725db531SBruce Evans int a4; 713725db531SBruce Evans } */ *uap; 7143d903220SDoug Rabson { 715b6a4b4f9SMatthew Dillon int error; 7163d903220SDoug Rabson 717b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 718cb1f0db9SRobert Watson 719b6a4b4f9SMatthew Dillon if (!jail_sysvipc_allowed && jailed(p->p_ucred)) { 720b6a4b4f9SMatthew Dillon error = ENOSYS; 721b6a4b4f9SMatthew Dillon goto done2; 722b6a4b4f9SMatthew Dillon } 723b6a4b4f9SMatthew Dillon 724b6a4b4f9SMatthew Dillon if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) { 725b6a4b4f9SMatthew Dillon error = EINVAL; 726b6a4b4f9SMatthew Dillon goto done2; 727b6a4b4f9SMatthew Dillon } 728b6a4b4f9SMatthew Dillon error = (*shmcalls[uap->which])(p, &uap->a2); 729b6a4b4f9SMatthew Dillon done2: 730b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 731b6a4b4f9SMatthew Dillon return (error); 7323d903220SDoug Rabson } 7333d903220SDoug Rabson 73478525ce3SAlfred Perlstein static void 73578525ce3SAlfred Perlstein shmfork_myhook(p1, p2) 7363d903220SDoug Rabson struct proc *p1, *p2; 7373d903220SDoug Rabson { 7383d903220SDoug Rabson struct shmmap_state *shmmap_s; 7393d903220SDoug Rabson size_t size; 7403d903220SDoug Rabson int i; 7413d903220SDoug Rabson 7423d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 7433d903220SDoug Rabson shmmap_s = malloc(size, M_SHM, M_WAITOK); 7443d903220SDoug Rabson bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size); 7453d903220SDoug Rabson p2->p_vmspace->vm_shm = (caddr_t)shmmap_s; 7463d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 7473d903220SDoug Rabson if (shmmap_s->shmid != -1) 7483d903220SDoug Rabson shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 7493d903220SDoug Rabson } 7503d903220SDoug Rabson 75178525ce3SAlfred Perlstein static void 75278525ce3SAlfred Perlstein shmexit_myhook(p) 7533d903220SDoug Rabson struct proc *p; 7543d903220SDoug Rabson { 7553d903220SDoug Rabson struct shmmap_state *shmmap_s; 7563d903220SDoug Rabson int i; 7573d903220SDoug Rabson 7580cddd8f0SMatthew Dillon GIANT_REQUIRED; 7590cddd8f0SMatthew Dillon 7603d903220SDoug Rabson shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 7613d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 7623d903220SDoug Rabson if (shmmap_s->shmid != -1) 7633d903220SDoug Rabson shm_delete_mapping(p, shmmap_s); 7643d903220SDoug Rabson free((caddr_t)p->p_vmspace->vm_shm, M_SHM); 7653d903220SDoug Rabson p->p_vmspace->vm_shm = NULL; 7663d903220SDoug Rabson } 7673d903220SDoug Rabson 768255108f3SPeter Wemm static void 769255108f3SPeter Wemm shmrealloc(void) 770255108f3SPeter Wemm { 771255108f3SPeter Wemm int i; 772255108f3SPeter Wemm struct shmid_ds *newsegs; 773255108f3SPeter Wemm 774255108f3SPeter Wemm if (shmalloced >= shminfo.shmmni) 775255108f3SPeter Wemm return; 776255108f3SPeter Wemm 777255108f3SPeter Wemm newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 778255108f3SPeter Wemm if (newsegs == NULL) 779255108f3SPeter Wemm return; 780255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 781255108f3SPeter Wemm bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 782255108f3SPeter Wemm for (; i < shminfo.shmmni; i++) { 783255108f3SPeter Wemm shmsegs[i].shm_perm.mode = SHMSEG_FREE; 784255108f3SPeter Wemm shmsegs[i].shm_perm.seq = 0; 785255108f3SPeter Wemm } 786255108f3SPeter Wemm free(shmsegs, M_SHM); 787255108f3SPeter Wemm shmsegs = newsegs; 788255108f3SPeter Wemm shmalloced = shminfo.shmmni; 789255108f3SPeter Wemm } 790255108f3SPeter Wemm 791255108f3SPeter Wemm static void 79278525ce3SAlfred Perlstein shminit() 7933d903220SDoug Rabson { 7943d903220SDoug Rabson int i; 795255108f3SPeter Wemm 796255108f3SPeter Wemm shmalloced = shminfo.shmmni; 797255108f3SPeter Wemm shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 798255108f3SPeter Wemm if (shmsegs == NULL) 799255108f3SPeter Wemm panic("cannot allocate initial memory for sysvshm"); 800255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) { 8013d903220SDoug Rabson shmsegs[i].shm_perm.mode = SHMSEG_FREE; 8023d903220SDoug Rabson shmsegs[i].shm_perm.seq = 0; 8033d903220SDoug Rabson } 8043d903220SDoug Rabson shm_last_free = 0; 8053d903220SDoug Rabson shm_nused = 0; 8063d903220SDoug Rabson shm_committed = 0; 80778525ce3SAlfred Perlstein shmexit_hook = &shmexit_myhook; 80878525ce3SAlfred Perlstein shmfork_hook = &shmfork_myhook; 8093d903220SDoug Rabson } 81078525ce3SAlfred Perlstein 81178525ce3SAlfred Perlstein static int 81278525ce3SAlfred Perlstein shmunload() 81378525ce3SAlfred Perlstein { 81478525ce3SAlfred Perlstein 81578525ce3SAlfred Perlstein if (shm_nused > 0) 81678525ce3SAlfred Perlstein return (EBUSY); 81778525ce3SAlfred Perlstein 81878525ce3SAlfred Perlstein free(shmsegs, M_SHM); 81978525ce3SAlfred Perlstein shmexit_hook = NULL; 82078525ce3SAlfred Perlstein shmfork_hook = NULL; 82178525ce3SAlfred Perlstein return (0); 82278525ce3SAlfred Perlstein } 82378525ce3SAlfred Perlstein 82478525ce3SAlfred Perlstein static int 825a723c4e1SDima Dorfman sysctl_shmsegs(SYSCTL_HANDLER_ARGS) 826a723c4e1SDima Dorfman { 827a723c4e1SDima Dorfman 828a723c4e1SDima Dorfman return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0]))); 829a723c4e1SDima Dorfman } 830a723c4e1SDima Dorfman 831a723c4e1SDima Dorfman static int 83278525ce3SAlfred Perlstein sysvshm_modload(struct module *module, int cmd, void *arg) 83378525ce3SAlfred Perlstein { 83478525ce3SAlfred Perlstein int error = 0; 83578525ce3SAlfred Perlstein 83678525ce3SAlfred Perlstein switch (cmd) { 83778525ce3SAlfred Perlstein case MOD_LOAD: 83878525ce3SAlfred Perlstein shminit(); 83978525ce3SAlfred Perlstein break; 84078525ce3SAlfred Perlstein case MOD_UNLOAD: 84178525ce3SAlfred Perlstein error = shmunload(); 84278525ce3SAlfred Perlstein break; 84378525ce3SAlfred Perlstein case MOD_SHUTDOWN: 84478525ce3SAlfred Perlstein break; 84578525ce3SAlfred Perlstein default: 84678525ce3SAlfred Perlstein error = EINVAL; 84778525ce3SAlfred Perlstein break; 84878525ce3SAlfred Perlstein } 84978525ce3SAlfred Perlstein return (error); 85078525ce3SAlfred Perlstein } 85178525ce3SAlfred Perlstein 852faa784b7SDag-Erling Smørgrav static moduledata_t sysvshm_mod = { 853faa784b7SDag-Erling Smørgrav "sysvshm", 85478525ce3SAlfred Perlstein &sysvshm_modload, 85578525ce3SAlfred Perlstein NULL 85678525ce3SAlfred Perlstein }; 85778525ce3SAlfred Perlstein 85878525ce3SAlfred Perlstein SYSCALL_MODULE_HELPER(shmsys, 4); 85978525ce3SAlfred Perlstein SYSCALL_MODULE_HELPER(shmat, 3); 86078525ce3SAlfred Perlstein SYSCALL_MODULE_HELPER(shmctl, 3); 86178525ce3SAlfred Perlstein SYSCALL_MODULE_HELPER(shmdt, 1); 86278525ce3SAlfred Perlstein SYSCALL_MODULE_HELPER(shmget, 3); 86378525ce3SAlfred Perlstein 864faa784b7SDag-Erling Smørgrav DECLARE_MODULE(sysvshm, sysvshm_mod, 86578525ce3SAlfred Perlstein SI_SUB_SYSV_SHM, SI_ORDER_FIRST); 866faa784b7SDag-Erling Smørgrav MODULE_VERSION(sysvshm, 1); 867