13d903220SDoug Rabson /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 23d903220SDoug Rabson /* 33d903220SDoug Rabson * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 43d903220SDoug Rabson * 53d903220SDoug Rabson * Redistribution and use in source and binary forms, with or without 63d903220SDoug Rabson * modification, are permitted provided that the following conditions 73d903220SDoug Rabson * are met: 83d903220SDoug Rabson * 1. Redistributions of source code must retain the above copyright 93d903220SDoug Rabson * notice, this list of conditions and the following disclaimer. 103d903220SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 113d903220SDoug Rabson * notice, this list of conditions and the following disclaimer in the 123d903220SDoug Rabson * documentation and/or other materials provided with the distribution. 133d903220SDoug Rabson * 3. All advertising materials mentioning features or use of this software 143d903220SDoug Rabson * must display the following acknowledgement: 153d903220SDoug Rabson * This product includes software developed by Adam Glass and Charles 163d903220SDoug Rabson * Hannum. 173d903220SDoug Rabson * 4. The names of the authors may not be used to endorse or promote products 183d903220SDoug Rabson * derived from this software without specific prior written permission. 193d903220SDoug Rabson * 203d903220SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 213d903220SDoug Rabson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 223d903220SDoug Rabson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 233d903220SDoug Rabson * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 243d903220SDoug Rabson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 253d903220SDoug Rabson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 263d903220SDoug Rabson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 273d903220SDoug Rabson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 283d903220SDoug Rabson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 293d903220SDoug Rabson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 303d903220SDoug Rabson */ 313d903220SDoug Rabson 32677b542eSDavid E. O'Brien #include <sys/cdefs.h> 33677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 34677b542eSDavid E. O'Brien 355591b823SEivind Eklund #include "opt_compat.h" 36255108f3SPeter Wemm #include "opt_sysvipc.h" 37511b67b7SGarrett Wollman 383d903220SDoug Rabson #include <sys/param.h> 39725db531SBruce Evans #include <sys/systm.h> 403d903220SDoug Rabson #include <sys/kernel.h> 41fb919e4dSMark Murray #include <sys/lock.h> 42255108f3SPeter Wemm #include <sys/sysctl.h> 433d903220SDoug Rabson #include <sys/shm.h> 443d903220SDoug Rabson #include <sys/proc.h> 453d903220SDoug Rabson #include <sys/malloc.h> 463d903220SDoug Rabson #include <sys/mman.h> 4777409fe1SPoul-Henning Kamp #include <sys/module.h> 489dceb26bSJohn Baldwin #include <sys/mutex.h> 4968ba7a1dSTim J. Robbins #include <sys/resourcevar.h> 503d903220SDoug Rabson #include <sys/stat.h> 5178525ce3SAlfred Perlstein #include <sys/syscall.h> 52f130dcf2SMartin Blapp #include <sys/syscallsubr.h> 53725db531SBruce Evans #include <sys/sysent.h> 54fb919e4dSMark Murray #include <sys/sysproto.h> 55cb1f0db9SRobert Watson #include <sys/jail.h> 563d903220SDoug Rabson 573d903220SDoug Rabson #include <vm/vm.h> 58efeaf95aSDavid Greenman #include <vm/vm_param.h> 59efeaf95aSDavid Greenman #include <vm/pmap.h> 60a51f7119SJohn Dyson #include <vm/vm_object.h> 613d903220SDoug Rabson #include <vm/vm_map.h> 621c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 63ae9b8c3aSJohn Dyson #include <vm/vm_pager.h> 643d903220SDoug Rabson 65a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 6655166637SPoul-Henning Kamp 67725db531SBruce Evans struct oshmctl_args; 684d77a549SAlfred Perlstein static int oshmctl(struct thread *td, struct oshmctl_args *uap); 69255108f3SPeter Wemm 704d77a549SAlfred Perlstein static int shmget_allocate_segment(struct thread *td, 714d77a549SAlfred Perlstein struct shmget_args *uap, int mode); 724d77a549SAlfred Perlstein static int shmget_existing(struct thread *td, struct shmget_args *uap, 734d77a549SAlfred Perlstein int mode, int segnum); 74725db531SBruce Evans 75725db531SBruce Evans /* XXX casting to (sy_call_t *) is bogus, as usual. */ 76303b270bSEivind Eklund static sy_call_t *shmcalls[] = { 77725db531SBruce Evans (sy_call_t *)shmat, (sy_call_t *)oshmctl, 78725db531SBruce Evans (sy_call_t *)shmdt, (sy_call_t *)shmget, 79725db531SBruce Evans (sy_call_t *)shmctl 80725db531SBruce Evans }; 813d903220SDoug Rabson 823d903220SDoug Rabson #define SHMSEG_FREE 0x0200 833d903220SDoug Rabson #define SHMSEG_REMOVED 0x0400 843d903220SDoug Rabson #define SHMSEG_ALLOCATED 0x0800 853d903220SDoug Rabson #define SHMSEG_WANTED 0x1000 863d903220SDoug Rabson 87255108f3SPeter Wemm static int shm_last_free, shm_nused, shm_committed, shmalloced; 88255108f3SPeter Wemm static struct shmid_ds *shmsegs; 893d903220SDoug Rabson 903d903220SDoug Rabson struct shm_handle { 91a51f7119SJohn Dyson /* vm_offset_t kva; */ 92a51f7119SJohn Dyson vm_object_t shm_object; 933d903220SDoug Rabson }; 943d903220SDoug Rabson 953d903220SDoug Rabson struct shmmap_state { 963d903220SDoug Rabson vm_offset_t va; 973d903220SDoug Rabson int shmid; 983d903220SDoug Rabson }; 993d903220SDoug Rabson 1004d77a549SAlfred Perlstein static void shm_deallocate_segment(struct shmid_ds *); 1014d77a549SAlfred Perlstein static int shm_find_segment_by_key(key_t); 1022332251cSMax Khon static struct shmid_ds *shm_find_segment_by_shmid(int); 1032332251cSMax Khon static struct shmid_ds *shm_find_segment_by_shmidx(int); 1043db161e0SMatthew Dillon static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *); 1054d77a549SAlfred Perlstein static void shmrealloc(void); 1064d77a549SAlfred Perlstein static void shminit(void); 1074d77a549SAlfred Perlstein static int sysvshm_modload(struct module *, int, void *); 1084d77a549SAlfred Perlstein static int shmunload(void); 1093db161e0SMatthew Dillon static void shmexit_myhook(struct vmspace *vm); 1104d77a549SAlfred Perlstein static void shmfork_myhook(struct proc *p1, struct proc *p2); 1114d77a549SAlfred Perlstein static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS); 112255108f3SPeter Wemm 113255108f3SPeter Wemm /* 114028f979dSDima Dorfman * Tuneable values. 115255108f3SPeter Wemm */ 116255108f3SPeter Wemm #ifndef SHMMAXPGS 117028f979dSDima Dorfman #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */ 118255108f3SPeter Wemm #endif 119255108f3SPeter Wemm #ifndef SHMMAX 120255108f3SPeter Wemm #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 121255108f3SPeter Wemm #endif 122255108f3SPeter Wemm #ifndef SHMMIN 123255108f3SPeter Wemm #define SHMMIN 1 124255108f3SPeter Wemm #endif 125255108f3SPeter Wemm #ifndef SHMMNI 1261766b2e5SMatthew Dillon #define SHMMNI 192 127255108f3SPeter Wemm #endif 128255108f3SPeter Wemm #ifndef SHMSEG 1291766b2e5SMatthew Dillon #define SHMSEG 128 130255108f3SPeter Wemm #endif 131255108f3SPeter Wemm #ifndef SHMALL 132255108f3SPeter Wemm #define SHMALL (SHMMAXPGS) 133255108f3SPeter Wemm #endif 134255108f3SPeter Wemm 135255108f3SPeter Wemm struct shminfo shminfo = { 136255108f3SPeter Wemm SHMMAX, 137255108f3SPeter Wemm SHMMIN, 138255108f3SPeter Wemm SHMMNI, 139255108f3SPeter Wemm SHMSEG, 140255108f3SPeter Wemm SHMALL 141255108f3SPeter Wemm }; 142255108f3SPeter Wemm 1438b03c8edSMatthew Dillon static int shm_use_phys; 1442332251cSMax Khon static int shm_allow_removed; 1458b03c8edSMatthew Dillon 146255108f3SPeter Wemm SYSCTL_DECL(_kern_ipc); 147255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, ""); 148255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, ""); 149184dcdc7SMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, ""); 150184dcdc7SMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, ""); 151255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, ""); 152028f979dSDima Dorfman SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, 153028f979dSDima Dorfman &shm_use_phys, 0, ""); 1542332251cSMax Khon SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW, 1552332251cSMax Khon &shm_allow_removed, 0, ""); 156a723c4e1SDima Dorfman SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD, 157a723c4e1SDima Dorfman NULL, 0, sysctl_shmsegs, "", ""); 1583d903220SDoug Rabson 1593d903220SDoug Rabson static int 1603d903220SDoug Rabson shm_find_segment_by_key(key) 1613d903220SDoug Rabson key_t key; 1623d903220SDoug Rabson { 1633d903220SDoug Rabson int i; 1643d903220SDoug Rabson 165255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 1663d903220SDoug Rabson if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 1673d903220SDoug Rabson shmsegs[i].shm_perm.key == key) 168b618bb96SAlfred Perlstein return (i); 169b618bb96SAlfred Perlstein return (-1); 1703d903220SDoug Rabson } 1713d903220SDoug Rabson 1723d903220SDoug Rabson static struct shmid_ds * 1732332251cSMax Khon shm_find_segment_by_shmid(int shmid) 1743d903220SDoug Rabson { 1753d903220SDoug Rabson int segnum; 1763d903220SDoug Rabson struct shmid_ds *shmseg; 1773d903220SDoug Rabson 1783d903220SDoug Rabson segnum = IPCID_TO_IX(shmid); 179255108f3SPeter Wemm if (segnum < 0 || segnum >= shmalloced) 180b618bb96SAlfred Perlstein return (NULL); 1813d903220SDoug Rabson shmseg = &shmsegs[segnum]; 1822332251cSMax Khon if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 1832332251cSMax Khon (!shm_allow_removed && 1842332251cSMax Khon (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) || 1853d903220SDoug Rabson shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) 186b618bb96SAlfred Perlstein return (NULL); 187b618bb96SAlfred Perlstein return (shmseg); 1883d903220SDoug Rabson } 1893d903220SDoug Rabson 190491dec93SMichael Reifenberger static struct shmid_ds * 1912332251cSMax Khon shm_find_segment_by_shmidx(int segnum) 192491dec93SMichael Reifenberger { 193491dec93SMichael Reifenberger struct shmid_ds *shmseg; 194491dec93SMichael Reifenberger 195491dec93SMichael Reifenberger if (segnum < 0 || segnum >= shmalloced) 196b618bb96SAlfred Perlstein return (NULL); 197491dec93SMichael Reifenberger shmseg = &shmsegs[segnum]; 1982332251cSMax Khon if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 1992332251cSMax Khon (!shm_allow_removed && 2002332251cSMax Khon (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0)) 201b618bb96SAlfred Perlstein return (NULL); 202b618bb96SAlfred Perlstein return (shmseg); 203491dec93SMichael Reifenberger } 204491dec93SMichael Reifenberger 2053d903220SDoug Rabson static void 2063d903220SDoug Rabson shm_deallocate_segment(shmseg) 2073d903220SDoug Rabson struct shmid_ds *shmseg; 2083d903220SDoug Rabson { 2093d903220SDoug Rabson struct shm_handle *shm_handle; 2103d903220SDoug Rabson size_t size; 2113d903220SDoug Rabson 2120cddd8f0SMatthew Dillon GIANT_REQUIRED; 2130cddd8f0SMatthew Dillon 2143d903220SDoug Rabson shm_handle = shmseg->shm_internal; 215a51f7119SJohn Dyson vm_object_deallocate(shm_handle->shm_object); 2162cc593fdSAlfred Perlstein free(shm_handle, M_SHM); 2173d903220SDoug Rabson shmseg->shm_internal = NULL; 218a51f7119SJohn Dyson size = round_page(shmseg->shm_segsz); 2193d903220SDoug Rabson shm_committed -= btoc(size); 2203d903220SDoug Rabson shm_nused--; 2213d903220SDoug Rabson shmseg->shm_perm.mode = SHMSEG_FREE; 2223d903220SDoug Rabson } 2233d903220SDoug Rabson 2243d903220SDoug Rabson static int 2253db161e0SMatthew Dillon shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 2263d903220SDoug Rabson { 2273d903220SDoug Rabson struct shmid_ds *shmseg; 2283d903220SDoug Rabson int segnum, result; 2293d903220SDoug Rabson size_t size; 2303d903220SDoug Rabson 2310cddd8f0SMatthew Dillon GIANT_REQUIRED; 232028f979dSDima Dorfman 2333d903220SDoug Rabson segnum = IPCID_TO_IX(shmmap_s->shmid); 2343d903220SDoug Rabson shmseg = &shmsegs[segnum]; 235aa8de40aSPoul-Henning Kamp size = round_page(shmseg->shm_segsz); 2363db161e0SMatthew Dillon result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 2373d903220SDoug Rabson if (result != KERN_SUCCESS) 238b618bb96SAlfred Perlstein return (EINVAL); 2393d903220SDoug Rabson shmmap_s->shmid = -1; 240227ee8a1SPoul-Henning Kamp shmseg->shm_dtime = time_second; 2413d903220SDoug Rabson if ((--shmseg->shm_nattch <= 0) && 2423d903220SDoug Rabson (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 2433d903220SDoug Rabson shm_deallocate_segment(shmseg); 2443d903220SDoug Rabson shm_last_free = segnum; 2453d903220SDoug Rabson } 246b618bb96SAlfred Perlstein return (0); 2473d903220SDoug Rabson } 2483d903220SDoug Rabson 249b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 2503d903220SDoug Rabson struct shmdt_args { 251e1d7d0bbSAlfred Perlstein const void *shmaddr; 2523d903220SDoug Rabson }; 253b5d5c0c9SPeter Wemm #endif 254b5d5c0c9SPeter Wemm 255b6a4b4f9SMatthew Dillon /* 256b6a4b4f9SMatthew Dillon * MPSAFE 257b6a4b4f9SMatthew Dillon */ 2583d903220SDoug Rabson int 259b40ce416SJulian Elischer shmdt(td, uap) 260b40ce416SJulian Elischer struct thread *td; 2613d903220SDoug Rabson struct shmdt_args *uap; 2623d903220SDoug Rabson { 263b40ce416SJulian Elischer struct proc *p = td->td_proc; 2643d903220SDoug Rabson struct shmmap_state *shmmap_s; 2653d903220SDoug Rabson int i; 266b6a4b4f9SMatthew Dillon int error = 0; 2673d903220SDoug Rabson 268c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 269c6f55f33SJohn Baldwin return (ENOSYS); 270b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 2718209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 272b6a4b4f9SMatthew Dillon if (shmmap_s == NULL) { 273b6a4b4f9SMatthew Dillon error = EINVAL; 274b6a4b4f9SMatthew Dillon goto done2; 275b6a4b4f9SMatthew Dillon } 276b6a4b4f9SMatthew Dillon for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 2773d903220SDoug Rabson if (shmmap_s->shmid != -1 && 278b6a4b4f9SMatthew Dillon shmmap_s->va == (vm_offset_t)uap->shmaddr) { 2793d903220SDoug Rabson break; 280b6a4b4f9SMatthew Dillon } 281b6a4b4f9SMatthew Dillon } 282b6a4b4f9SMatthew Dillon if (i == shminfo.shmseg) { 283b6a4b4f9SMatthew Dillon error = EINVAL; 284b6a4b4f9SMatthew Dillon goto done2; 285b6a4b4f9SMatthew Dillon } 2863db161e0SMatthew Dillon error = shm_delete_mapping(p->p_vmspace, shmmap_s); 287b6a4b4f9SMatthew Dillon done2: 288b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 289b6a4b4f9SMatthew Dillon return (error); 2903d903220SDoug Rabson } 2913d903220SDoug Rabson 292b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 2933d903220SDoug Rabson struct shmat_args { 2943d903220SDoug Rabson int shmid; 295e1d7d0bbSAlfred Perlstein const void *shmaddr; 2963d903220SDoug Rabson int shmflg; 2973d903220SDoug Rabson }; 298b5d5c0c9SPeter Wemm #endif 299b5d5c0c9SPeter Wemm 300b6a4b4f9SMatthew Dillon /* 301b6a4b4f9SMatthew Dillon * MPSAFE 302b6a4b4f9SMatthew Dillon */ 3033d903220SDoug Rabson int 3042332251cSMax Khon kern_shmat(td, shmid, shmaddr, shmflg) 305b40ce416SJulian Elischer struct thread *td; 306f130dcf2SMartin Blapp int shmid; 307f130dcf2SMartin Blapp const void *shmaddr; 308f130dcf2SMartin Blapp int shmflg; 3093d903220SDoug Rabson { 310b40ce416SJulian Elischer struct proc *p = td->td_proc; 311b6a4b4f9SMatthew Dillon int i, flags; 3123d903220SDoug Rabson struct shmid_ds *shmseg; 3133d903220SDoug Rabson struct shmmap_state *shmmap_s = NULL; 314a51f7119SJohn Dyson struct shm_handle *shm_handle; 3153d903220SDoug Rabson vm_offset_t attach_va; 3163d903220SDoug Rabson vm_prot_t prot; 3173d903220SDoug Rabson vm_size_t size; 318a51f7119SJohn Dyson int rv; 319b6a4b4f9SMatthew Dillon int error = 0; 3203d903220SDoug Rabson 321c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 322c6f55f33SJohn Baldwin return (ENOSYS); 323b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 3248209f090SAlfred Perlstein shmmap_s = p->p_vmspace->vm_shm; 3253d903220SDoug Rabson if (shmmap_s == NULL) { 3263d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 327a163d034SWarner Losh shmmap_s = malloc(size, M_SHM, M_WAITOK); 3283d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) 3293d903220SDoug Rabson shmmap_s[i].shmid = -1; 3302cc593fdSAlfred Perlstein p->p_vmspace->vm_shm = shmmap_s; 3313d903220SDoug Rabson } 3322332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 333b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 334b6a4b4f9SMatthew Dillon error = EINVAL; 335b6a4b4f9SMatthew Dillon goto done2; 336b6a4b4f9SMatthew Dillon } 337b40ce416SJulian Elischer error = ipcperm(td, &shmseg->shm_perm, 338f130dcf2SMartin Blapp (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 339797f2d22SPoul-Henning Kamp if (error) 340b6a4b4f9SMatthew Dillon goto done2; 3413d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) { 3423d903220SDoug Rabson if (shmmap_s->shmid == -1) 3433d903220SDoug Rabson break; 3443d903220SDoug Rabson shmmap_s++; 3453d903220SDoug Rabson } 346b6a4b4f9SMatthew Dillon if (i >= shminfo.shmseg) { 347b6a4b4f9SMatthew Dillon error = EMFILE; 348b6a4b4f9SMatthew Dillon goto done2; 349b6a4b4f9SMatthew Dillon } 350aa8de40aSPoul-Henning Kamp size = round_page(shmseg->shm_segsz); 351af25d10cSAlan Cox #ifdef VM_PROT_READ_IS_EXEC 352af25d10cSAlan Cox prot = VM_PROT_READ | VM_PROT_EXECUTE; 353af25d10cSAlan Cox #else 3543d903220SDoug Rabson prot = VM_PROT_READ; 355af25d10cSAlan Cox #endif 356f130dcf2SMartin Blapp if ((shmflg & SHM_RDONLY) == 0) 3573d903220SDoug Rabson prot |= VM_PROT_WRITE; 3583d903220SDoug Rabson flags = MAP_ANON | MAP_SHARED; 359f130dcf2SMartin Blapp if (shmaddr) { 3603d903220SDoug Rabson flags |= MAP_FIXED; 361f130dcf2SMartin Blapp if (shmflg & SHM_RND) { 362f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1); 363f130dcf2SMartin Blapp } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) { 364f130dcf2SMartin Blapp attach_va = (vm_offset_t)shmaddr; 365b6a4b4f9SMatthew Dillon } else { 366b6a4b4f9SMatthew Dillon error = EINVAL; 367b6a4b4f9SMatthew Dillon goto done2; 368b6a4b4f9SMatthew Dillon } 3693d903220SDoug Rabson } else { 370028f979dSDima Dorfman /* 371028f979dSDima Dorfman * This is just a hint to vm_map_find() about where to 372028f979dSDima Dorfman * put it. 373028f979dSDima Dorfman */ 37468ba7a1dSTim J. Robbins PROC_LOCK(p); 37568ba7a1dSTim J. Robbins attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr + 37668ba7a1dSTim J. Robbins lim_max(p, RLIMIT_DATA)); 37768ba7a1dSTim J. Robbins PROC_UNLOCK(p); 3783d903220SDoug Rabson } 379a51f7119SJohn Dyson 380a51f7119SJohn Dyson shm_handle = shmseg->shm_internal; 381a51f7119SJohn Dyson vm_object_reference(shm_handle->shm_object); 382a51f7119SJohn Dyson rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object, 383a51f7119SJohn Dyson 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0); 384a51f7119SJohn Dyson if (rv != KERN_SUCCESS) { 385b00a3c85SJacques Vidrine vm_object_deallocate(shm_handle->shm_object); 386b6a4b4f9SMatthew Dillon error = ENOMEM; 387b6a4b4f9SMatthew Dillon goto done2; 388a51f7119SJohn Dyson } 3890463028cSJohn Dyson vm_map_inherit(&p->p_vmspace->vm_map, 3900463028cSJohn Dyson attach_va, attach_va + size, VM_INHERIT_SHARE); 3910463028cSJohn Dyson 3923d903220SDoug Rabson shmmap_s->va = attach_va; 393f130dcf2SMartin Blapp shmmap_s->shmid = shmid; 3943d903220SDoug Rabson shmseg->shm_lpid = p->p_pid; 395227ee8a1SPoul-Henning Kamp shmseg->shm_atime = time_second; 3963d903220SDoug Rabson shmseg->shm_nattch++; 397b40ce416SJulian Elischer td->td_retval[0] = attach_va; 398b6a4b4f9SMatthew Dillon done2: 399b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 400b6a4b4f9SMatthew Dillon return (error); 4013d903220SDoug Rabson } 4023d903220SDoug Rabson 403f130dcf2SMartin Blapp int 404f130dcf2SMartin Blapp shmat(td, uap) 405f130dcf2SMartin Blapp struct thread *td; 406f130dcf2SMartin Blapp struct shmat_args *uap; 407f130dcf2SMartin Blapp { 4082332251cSMax Khon return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg); 409f130dcf2SMartin Blapp } 410f130dcf2SMartin Blapp 4118bec0921SDoug Rabson struct oshmid_ds { 4128bec0921SDoug Rabson struct ipc_perm shm_perm; /* operation perms */ 4138bec0921SDoug Rabson int shm_segsz; /* size of segment (bytes) */ 4148b149b51SJohn Baldwin u_short shm_cpid; /* pid, creator */ 4158b149b51SJohn Baldwin u_short shm_lpid; /* pid, last operation */ 4168bec0921SDoug Rabson short shm_nattch; /* no. of current attaches */ 4178bec0921SDoug Rabson time_t shm_atime; /* last attach time */ 4188bec0921SDoug Rabson time_t shm_dtime; /* last detach time */ 4198bec0921SDoug Rabson time_t shm_ctime; /* last change time */ 4208bec0921SDoug Rabson void *shm_handle; /* internal handle for shm segment */ 4218bec0921SDoug Rabson }; 4228bec0921SDoug Rabson 4238bec0921SDoug Rabson struct oshmctl_args { 4248bec0921SDoug Rabson int shmid; 4258bec0921SDoug Rabson int cmd; 4268bec0921SDoug Rabson struct oshmid_ds *ubuf; 4278bec0921SDoug Rabson }; 4288bec0921SDoug Rabson 429b6a4b4f9SMatthew Dillon /* 430b6a4b4f9SMatthew Dillon * MPSAFE 431b6a4b4f9SMatthew Dillon */ 43287b6de2bSPoul-Henning Kamp static int 433b40ce416SJulian Elischer oshmctl(td, uap) 434b40ce416SJulian Elischer struct thread *td; 4358bec0921SDoug Rabson struct oshmctl_args *uap; 4368bec0921SDoug Rabson { 4378bec0921SDoug Rabson #ifdef COMPAT_43 438b6a4b4f9SMatthew Dillon int error = 0; 4398bec0921SDoug Rabson struct shmid_ds *shmseg; 4408bec0921SDoug Rabson struct oshmid_ds outbuf; 4418bec0921SDoug Rabson 442c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 443c6f55f33SJohn Baldwin return (ENOSYS); 444b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 4452332251cSMax Khon shmseg = shm_find_segment_by_shmid(uap->shmid); 446b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 447b6a4b4f9SMatthew Dillon error = EINVAL; 448b6a4b4f9SMatthew Dillon goto done2; 449b6a4b4f9SMatthew Dillon } 4508bec0921SDoug Rabson switch (uap->cmd) { 4518bec0921SDoug Rabson case IPC_STAT: 452b40ce416SJulian Elischer error = ipcperm(td, &shmseg->shm_perm, IPC_R); 453797f2d22SPoul-Henning Kamp if (error) 454b6a4b4f9SMatthew Dillon goto done2; 4558bec0921SDoug Rabson outbuf.shm_perm = shmseg->shm_perm; 4568bec0921SDoug Rabson outbuf.shm_segsz = shmseg->shm_segsz; 4578bec0921SDoug Rabson outbuf.shm_cpid = shmseg->shm_cpid; 4588bec0921SDoug Rabson outbuf.shm_lpid = shmseg->shm_lpid; 4598bec0921SDoug Rabson outbuf.shm_nattch = shmseg->shm_nattch; 4608bec0921SDoug Rabson outbuf.shm_atime = shmseg->shm_atime; 4618bec0921SDoug Rabson outbuf.shm_dtime = shmseg->shm_dtime; 4628bec0921SDoug Rabson outbuf.shm_ctime = shmseg->shm_ctime; 4638bec0921SDoug Rabson outbuf.shm_handle = shmseg->shm_internal; 4642cc593fdSAlfred Perlstein error = copyout(&outbuf, uap->ubuf, sizeof(outbuf)); 465797f2d22SPoul-Henning Kamp if (error) 466b6a4b4f9SMatthew Dillon goto done2; 4678bec0921SDoug Rabson break; 4688bec0921SDoug Rabson default: 469725db531SBruce Evans /* XXX casting to (sy_call_t *) is bogus, as usual. */ 470b40ce416SJulian Elischer error = ((sy_call_t *)shmctl)(td, uap); 471b6a4b4f9SMatthew Dillon break; 4728bec0921SDoug Rabson } 473b6a4b4f9SMatthew Dillon done2: 474b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 475b6a4b4f9SMatthew Dillon return (error); 4768bec0921SDoug Rabson #else 477b618bb96SAlfred Perlstein return (EINVAL); 4788bec0921SDoug Rabson #endif 4798bec0921SDoug Rabson } 4808bec0921SDoug Rabson 481b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 4823d903220SDoug Rabson struct shmctl_args { 4833d903220SDoug Rabson int shmid; 4843d903220SDoug Rabson int cmd; 485b5d5c0c9SPeter Wemm struct shmid_ds *buf; 4863d903220SDoug Rabson }; 487b5d5c0c9SPeter Wemm #endif 488b5d5c0c9SPeter Wemm 489b6a4b4f9SMatthew Dillon /* 490b6a4b4f9SMatthew Dillon * MPSAFE 491b6a4b4f9SMatthew Dillon */ 4923d903220SDoug Rabson int 4932332251cSMax Khon kern_shmctl(td, shmid, cmd, buf, bufsz) 494b40ce416SJulian Elischer struct thread *td; 495f130dcf2SMartin Blapp int shmid; 496f130dcf2SMartin Blapp int cmd; 497f130dcf2SMartin Blapp void *buf; 498f130dcf2SMartin Blapp size_t *bufsz; 4993d903220SDoug Rabson { 500b6a4b4f9SMatthew Dillon int error = 0; 5013d903220SDoug Rabson struct shmid_ds *shmseg; 5023d903220SDoug Rabson 503c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 504c6f55f33SJohn Baldwin return (ENOSYS); 505f130dcf2SMartin Blapp 506b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 507f130dcf2SMartin Blapp switch (cmd) { 508491dec93SMichael Reifenberger case IPC_INFO: 509f130dcf2SMartin Blapp memcpy(buf, &shminfo, sizeof(shminfo)); 510f130dcf2SMartin Blapp if (bufsz) 511f130dcf2SMartin Blapp *bufsz = sizeof(shminfo); 512491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 513491dec93SMichael Reifenberger goto done2; 514491dec93SMichael Reifenberger case SHM_INFO: { 515491dec93SMichael Reifenberger struct shm_info shm_info; 516491dec93SMichael Reifenberger shm_info.used_ids = shm_nused; 517491dec93SMichael Reifenberger shm_info.shm_rss = 0; /*XXX where to get from ? */ 518491dec93SMichael Reifenberger shm_info.shm_tot = 0; /*XXX where to get from ? */ 519491dec93SMichael Reifenberger shm_info.shm_swp = 0; /*XXX where to get from ? */ 520491dec93SMichael Reifenberger shm_info.swap_attempts = 0; /*XXX where to get from ? */ 521491dec93SMichael Reifenberger shm_info.swap_successes = 0; /*XXX where to get from ? */ 522f130dcf2SMartin Blapp memcpy(buf, &shm_info, sizeof(shm_info)); 523f130dcf2SMartin Blapp if (bufsz) 524f130dcf2SMartin Blapp *bufsz = sizeof(shm_info); 525491dec93SMichael Reifenberger td->td_retval[0] = shmalloced; 526491dec93SMichael Reifenberger goto done2; 527491dec93SMichael Reifenberger } 528491dec93SMichael Reifenberger } 529f130dcf2SMartin Blapp if (cmd == SHM_STAT) 5302332251cSMax Khon shmseg = shm_find_segment_by_shmidx(shmid); 531491dec93SMichael Reifenberger else 5322332251cSMax Khon shmseg = shm_find_segment_by_shmid(shmid); 533b6a4b4f9SMatthew Dillon if (shmseg == NULL) { 534b6a4b4f9SMatthew Dillon error = EINVAL; 535b6a4b4f9SMatthew Dillon goto done2; 536b6a4b4f9SMatthew Dillon } 537f130dcf2SMartin Blapp switch (cmd) { 538491dec93SMichael Reifenberger case SHM_STAT: 5393d903220SDoug Rabson case IPC_STAT: 540b40ce416SJulian Elischer error = ipcperm(td, &shmseg->shm_perm, IPC_R); 541797f2d22SPoul-Henning Kamp if (error) 542b6a4b4f9SMatthew Dillon goto done2; 543f130dcf2SMartin Blapp memcpy(buf, shmseg, sizeof(struct shmid_ds)); 544f130dcf2SMartin Blapp if (bufsz) 545f130dcf2SMartin Blapp *bufsz = sizeof(struct shmid_ds); 546f130dcf2SMartin Blapp if (cmd == SHM_STAT) 547f130dcf2SMartin Blapp td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->shm_perm); 5483d903220SDoug Rabson break; 549f130dcf2SMartin Blapp case IPC_SET: { 550f130dcf2SMartin Blapp struct shmid_ds *shmid; 551f130dcf2SMartin Blapp 552f130dcf2SMartin Blapp shmid = (struct shmid_ds *)buf; 553b40ce416SJulian Elischer error = ipcperm(td, &shmseg->shm_perm, IPC_M); 554797f2d22SPoul-Henning Kamp if (error) 555b6a4b4f9SMatthew Dillon goto done2; 556f130dcf2SMartin Blapp shmseg->shm_perm.uid = shmid->shm_perm.uid; 557f130dcf2SMartin Blapp shmseg->shm_perm.gid = shmid->shm_perm.gid; 5583d903220SDoug Rabson shmseg->shm_perm.mode = 5593d903220SDoug Rabson (shmseg->shm_perm.mode & ~ACCESSPERMS) | 560f130dcf2SMartin Blapp (shmid->shm_perm.mode & ACCESSPERMS); 561227ee8a1SPoul-Henning Kamp shmseg->shm_ctime = time_second; 5623d903220SDoug Rabson break; 563f130dcf2SMartin Blapp } 5643d903220SDoug Rabson case IPC_RMID: 565b40ce416SJulian Elischer error = ipcperm(td, &shmseg->shm_perm, IPC_M); 566797f2d22SPoul-Henning Kamp if (error) 567b6a4b4f9SMatthew Dillon goto done2; 5683d903220SDoug Rabson shmseg->shm_perm.key = IPC_PRIVATE; 5693d903220SDoug Rabson shmseg->shm_perm.mode |= SHMSEG_REMOVED; 5703d903220SDoug Rabson if (shmseg->shm_nattch <= 0) { 5713d903220SDoug Rabson shm_deallocate_segment(shmseg); 572f130dcf2SMartin Blapp shm_last_free = IPCID_TO_IX(shmid); 5733d903220SDoug Rabson } 5743d903220SDoug Rabson break; 5753d903220SDoug Rabson #if 0 5763d903220SDoug Rabson case SHM_LOCK: 5773d903220SDoug Rabson case SHM_UNLOCK: 5783d903220SDoug Rabson #endif 5793d903220SDoug Rabson default: 580b6a4b4f9SMatthew Dillon error = EINVAL; 581b6a4b4f9SMatthew Dillon break; 5823d903220SDoug Rabson } 583b6a4b4f9SMatthew Dillon done2: 584b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 585b6a4b4f9SMatthew Dillon return (error); 5863d903220SDoug Rabson } 5873d903220SDoug Rabson 588f130dcf2SMartin Blapp int 589f130dcf2SMartin Blapp shmctl(td, uap) 590f130dcf2SMartin Blapp struct thread *td; 591f130dcf2SMartin Blapp struct shmctl_args *uap; 592f130dcf2SMartin Blapp { 593f130dcf2SMartin Blapp int error = 0; 594f130dcf2SMartin Blapp struct shmid_ds buf; 595f130dcf2SMartin Blapp size_t bufsz; 596f130dcf2SMartin Blapp 597f130dcf2SMartin Blapp /* IPC_SET needs to copyin the buffer before calling kern_shmctl */ 598f130dcf2SMartin Blapp if (uap->cmd == IPC_SET) { 599f130dcf2SMartin Blapp if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds)))) 600f130dcf2SMartin Blapp goto done; 601f130dcf2SMartin Blapp } 602f130dcf2SMartin Blapp 6032332251cSMax Khon error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz); 604f130dcf2SMartin Blapp if (error) 605f130dcf2SMartin Blapp goto done; 606f130dcf2SMartin Blapp 607f130dcf2SMartin Blapp /* Cases in which we need to copyout */ 608f130dcf2SMartin Blapp switch (uap->cmd) { 609f130dcf2SMartin Blapp case IPC_INFO: 610f130dcf2SMartin Blapp case SHM_INFO: 611f130dcf2SMartin Blapp case SHM_STAT: 612f130dcf2SMartin Blapp case IPC_STAT: 613f130dcf2SMartin Blapp error = copyout(&buf, uap->buf, bufsz); 614f130dcf2SMartin Blapp break; 615f130dcf2SMartin Blapp } 616f130dcf2SMartin Blapp 617f130dcf2SMartin Blapp done: 618f130dcf2SMartin Blapp if (error) { 619f130dcf2SMartin Blapp /* Invalidate the return value */ 620f130dcf2SMartin Blapp td->td_retval[0] = -1; 621f130dcf2SMartin Blapp } 622f130dcf2SMartin Blapp return (error); 623f130dcf2SMartin Blapp } 624f130dcf2SMartin Blapp 625f130dcf2SMartin Blapp 626b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 6273d903220SDoug Rabson struct shmget_args { 6283d903220SDoug Rabson key_t key; 6293d903220SDoug Rabson size_t size; 6303d903220SDoug Rabson int shmflg; 6313d903220SDoug Rabson }; 632b5d5c0c9SPeter Wemm #endif 633b5d5c0c9SPeter Wemm 6343d903220SDoug Rabson static int 635b40ce416SJulian Elischer shmget_existing(td, uap, mode, segnum) 636b40ce416SJulian Elischer struct thread *td; 6373d903220SDoug Rabson struct shmget_args *uap; 6383d903220SDoug Rabson int mode; 6393d903220SDoug Rabson int segnum; 6403d903220SDoug Rabson { 6413d903220SDoug Rabson struct shmid_ds *shmseg; 6423d903220SDoug Rabson int error; 6433d903220SDoug Rabson 6443d903220SDoug Rabson shmseg = &shmsegs[segnum]; 6453d903220SDoug Rabson if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 6463d903220SDoug Rabson /* 6473d903220SDoug Rabson * This segment is in the process of being allocated. Wait 6483d903220SDoug Rabson * until it's done, and look the key up again (in case the 6493d903220SDoug Rabson * allocation failed or it was freed). 6503d903220SDoug Rabson */ 6513d903220SDoug Rabson shmseg->shm_perm.mode |= SHMSEG_WANTED; 6522cc593fdSAlfred Perlstein error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0); 653797f2d22SPoul-Henning Kamp if (error) 654b618bb96SAlfred Perlstein return (error); 655b618bb96SAlfred Perlstein return (EAGAIN); 6563d903220SDoug Rabson } 657dc92aa57SAlan Cox if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 658b618bb96SAlfred Perlstein return (EEXIST); 659b40ce416SJulian Elischer error = ipcperm(td, &shmseg->shm_perm, mode); 660797f2d22SPoul-Henning Kamp if (error) 661b618bb96SAlfred Perlstein return (error); 6623d903220SDoug Rabson if (uap->size && uap->size > shmseg->shm_segsz) 663b618bb96SAlfred Perlstein return (EINVAL); 664b40ce416SJulian Elischer td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 665b618bb96SAlfred Perlstein return (0); 6663d903220SDoug Rabson } 6673d903220SDoug Rabson 6683d903220SDoug Rabson static int 669b40ce416SJulian Elischer shmget_allocate_segment(td, uap, mode) 670b40ce416SJulian Elischer struct thread *td; 6713d903220SDoug Rabson struct shmget_args *uap; 6723d903220SDoug Rabson int mode; 6733d903220SDoug Rabson { 674a51f7119SJohn Dyson int i, segnum, shmid, size; 675a854ed98SJohn Baldwin struct ucred *cred = td->td_ucred; 6763d903220SDoug Rabson struct shmid_ds *shmseg; 6773d903220SDoug Rabson struct shm_handle *shm_handle; 6783d903220SDoug Rabson 6790cddd8f0SMatthew Dillon GIANT_REQUIRED; 6800cddd8f0SMatthew Dillon 6813d903220SDoug Rabson if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 682b618bb96SAlfred Perlstein return (EINVAL); 683028f979dSDima Dorfman if (shm_nused >= shminfo.shmmni) /* Any shmids left? */ 684b618bb96SAlfred Perlstein return (ENOSPC); 6859e609ddeSJoerg Wunsch size = round_page(uap->size); 6863d903220SDoug Rabson if (shm_committed + btoc(size) > shminfo.shmall) 687b618bb96SAlfred Perlstein return (ENOMEM); 6883d903220SDoug Rabson if (shm_last_free < 0) { 689028f979dSDima Dorfman shmrealloc(); /* Maybe expand the shmsegs[] array. */ 690255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 6913d903220SDoug Rabson if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 6923d903220SDoug Rabson break; 693255108f3SPeter Wemm if (i == shmalloced) 694b618bb96SAlfred Perlstein return (ENOSPC); 6953d903220SDoug Rabson segnum = i; 6963d903220SDoug Rabson } else { 6973d903220SDoug Rabson segnum = shm_last_free; 6983d903220SDoug Rabson shm_last_free = -1; 6993d903220SDoug Rabson } 7003d903220SDoug Rabson shmseg = &shmsegs[segnum]; 7013d903220SDoug Rabson /* 7023d903220SDoug Rabson * In case we sleep in malloc(), mark the segment present but deleted 7033d903220SDoug Rabson * so that noone else tries to create the same key. 7043d903220SDoug Rabson */ 7053d903220SDoug Rabson shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 7063d903220SDoug Rabson shmseg->shm_perm.key = uap->key; 7073d903220SDoug Rabson shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 7083d903220SDoug Rabson shm_handle = (struct shm_handle *) 709a163d034SWarner Losh malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 7103d903220SDoug Rabson shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 711a51f7119SJohn Dyson 712ae9b8c3aSJohn Dyson /* 713ae9b8c3aSJohn Dyson * We make sure that we have allocated a pager before we need 714ae9b8c3aSJohn Dyson * to. 715ae9b8c3aSJohn Dyson */ 7168b03c8edSMatthew Dillon if (shm_use_phys) { 71724488c74SPeter Wemm shm_handle->shm_object = 71824488c74SPeter Wemm vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0); 7198b03c8edSMatthew Dillon } else { 720a51f7119SJohn Dyson shm_handle->shm_object = 7216cde7a16SDavid Greenman vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0); 7228b03c8edSMatthew Dillon } 723b077a362SAlan Cox VM_OBJECT_LOCK(shm_handle->shm_object); 724069e9bc1SDoug Rabson vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 725069e9bc1SDoug Rabson vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 726b077a362SAlan Cox VM_OBJECT_UNLOCK(shm_handle->shm_object); 727cbd8ec09SJohn Dyson 7283d903220SDoug Rabson shmseg->shm_internal = shm_handle; 7293d903220SDoug Rabson shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 7303d903220SDoug Rabson shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 7313d903220SDoug Rabson shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 7323d903220SDoug Rabson (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 7333d903220SDoug Rabson shmseg->shm_segsz = uap->size; 734b40ce416SJulian Elischer shmseg->shm_cpid = td->td_proc->p_pid; 7353d903220SDoug Rabson shmseg->shm_lpid = shmseg->shm_nattch = 0; 7363d903220SDoug Rabson shmseg->shm_atime = shmseg->shm_dtime = 0; 737227ee8a1SPoul-Henning Kamp shmseg->shm_ctime = time_second; 7383d903220SDoug Rabson shm_committed += btoc(size); 7393d903220SDoug Rabson shm_nused++; 7403d903220SDoug Rabson if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 7413d903220SDoug Rabson /* 7423d903220SDoug Rabson * Somebody else wanted this key while we were asleep. Wake 7433d903220SDoug Rabson * them up now. 7443d903220SDoug Rabson */ 7453d903220SDoug Rabson shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 7462cc593fdSAlfred Perlstein wakeup(shmseg); 7473d903220SDoug Rabson } 748b40ce416SJulian Elischer td->td_retval[0] = shmid; 749b618bb96SAlfred Perlstein return (0); 7503d903220SDoug Rabson } 7513d903220SDoug Rabson 752b6a4b4f9SMatthew Dillon /* 753b6a4b4f9SMatthew Dillon * MPSAFE 754b6a4b4f9SMatthew Dillon */ 7553d903220SDoug Rabson int 756b40ce416SJulian Elischer shmget(td, uap) 757b40ce416SJulian Elischer struct thread *td; 7583d903220SDoug Rabson struct shmget_args *uap; 7593d903220SDoug Rabson { 760b6a4b4f9SMatthew Dillon int segnum, mode; 761b6a4b4f9SMatthew Dillon int error; 7623d903220SDoug Rabson 763c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 764c6f55f33SJohn Baldwin return (ENOSYS); 765b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 7663d903220SDoug Rabson mode = uap->shmflg & ACCESSPERMS; 7673d903220SDoug Rabson if (uap->key != IPC_PRIVATE) { 7683d903220SDoug Rabson again: 7693d903220SDoug Rabson segnum = shm_find_segment_by_key(uap->key); 7703d903220SDoug Rabson if (segnum >= 0) { 771b40ce416SJulian Elischer error = shmget_existing(td, uap, mode, segnum); 7723d903220SDoug Rabson if (error == EAGAIN) 7733d903220SDoug Rabson goto again; 774b6a4b4f9SMatthew Dillon goto done2; 7753d903220SDoug Rabson } 776b6a4b4f9SMatthew Dillon if ((uap->shmflg & IPC_CREAT) == 0) { 777b6a4b4f9SMatthew Dillon error = ENOENT; 778b6a4b4f9SMatthew Dillon goto done2; 7793d903220SDoug Rabson } 780b6a4b4f9SMatthew Dillon } 781b40ce416SJulian Elischer error = shmget_allocate_segment(td, uap, mode); 782b6a4b4f9SMatthew Dillon done2: 783b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 784b6a4b4f9SMatthew Dillon return (error); 7853d903220SDoug Rabson } 7863d903220SDoug Rabson 787b6a4b4f9SMatthew Dillon /* 788b6a4b4f9SMatthew Dillon * MPSAFE 789b6a4b4f9SMatthew Dillon */ 7903d903220SDoug Rabson int 791b40ce416SJulian Elischer shmsys(td, uap) 792b40ce416SJulian Elischer struct thread *td; 793725db531SBruce Evans /* XXX actually varargs. */ 794725db531SBruce Evans struct shmsys_args /* { 79501b9dc96SJacques Vidrine int which; 796725db531SBruce Evans int a2; 797725db531SBruce Evans int a3; 798725db531SBruce Evans int a4; 799725db531SBruce Evans } */ *uap; 8003d903220SDoug Rabson { 801b6a4b4f9SMatthew Dillon int error; 8023d903220SDoug Rabson 803c6f55f33SJohn Baldwin if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 804c6f55f33SJohn Baldwin return (ENOSYS); 80501b9dc96SJacques Vidrine if (uap->which < 0 || 80601b9dc96SJacques Vidrine uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 807c6f55f33SJohn Baldwin return (EINVAL); 808b6a4b4f9SMatthew Dillon mtx_lock(&Giant); 809b40ce416SJulian Elischer error = (*shmcalls[uap->which])(td, &uap->a2); 810b6a4b4f9SMatthew Dillon mtx_unlock(&Giant); 811b6a4b4f9SMatthew Dillon return (error); 8123d903220SDoug Rabson } 8133d903220SDoug Rabson 81478525ce3SAlfred Perlstein static void 81578525ce3SAlfred Perlstein shmfork_myhook(p1, p2) 8163d903220SDoug Rabson struct proc *p1, *p2; 8173d903220SDoug Rabson { 8183d903220SDoug Rabson struct shmmap_state *shmmap_s; 8193d903220SDoug Rabson size_t size; 8203d903220SDoug Rabson int i; 8213d903220SDoug Rabson 8223d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 823a163d034SWarner Losh shmmap_s = malloc(size, M_SHM, M_WAITOK); 8242cc593fdSAlfred Perlstein bcopy(p1->p_vmspace->vm_shm, shmmap_s, size); 8252cc593fdSAlfred Perlstein p2->p_vmspace->vm_shm = shmmap_s; 8263d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 8273d903220SDoug Rabson if (shmmap_s->shmid != -1) 8283d903220SDoug Rabson shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 8293d903220SDoug Rabson } 8303d903220SDoug Rabson 83178525ce3SAlfred Perlstein static void 8323db161e0SMatthew Dillon shmexit_myhook(struct vmspace *vm) 8333d903220SDoug Rabson { 8343db161e0SMatthew Dillon struct shmmap_state *base, *shm; 8353d903220SDoug Rabson int i; 8363d903220SDoug Rabson 8370cddd8f0SMatthew Dillon GIANT_REQUIRED; 8380cddd8f0SMatthew Dillon 8393db161e0SMatthew Dillon if ((base = vm->vm_shm) != NULL) { 8403db161e0SMatthew Dillon vm->vm_shm = NULL; 8413db161e0SMatthew Dillon for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 8423db161e0SMatthew Dillon if (shm->shmid != -1) 8433db161e0SMatthew Dillon shm_delete_mapping(vm, shm); 8443db161e0SMatthew Dillon } 8453db161e0SMatthew Dillon free(base, M_SHM); 8463db161e0SMatthew Dillon } 8473d903220SDoug Rabson } 8483d903220SDoug Rabson 849255108f3SPeter Wemm static void 850255108f3SPeter Wemm shmrealloc(void) 851255108f3SPeter Wemm { 852255108f3SPeter Wemm int i; 853255108f3SPeter Wemm struct shmid_ds *newsegs; 854255108f3SPeter Wemm 855255108f3SPeter Wemm if (shmalloced >= shminfo.shmmni) 856255108f3SPeter Wemm return; 857255108f3SPeter Wemm 858a163d034SWarner Losh newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 859255108f3SPeter Wemm if (newsegs == NULL) 860255108f3SPeter Wemm return; 861255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) 862255108f3SPeter Wemm bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 863255108f3SPeter Wemm for (; i < shminfo.shmmni; i++) { 864255108f3SPeter Wemm shmsegs[i].shm_perm.mode = SHMSEG_FREE; 865255108f3SPeter Wemm shmsegs[i].shm_perm.seq = 0; 866255108f3SPeter Wemm } 867255108f3SPeter Wemm free(shmsegs, M_SHM); 868255108f3SPeter Wemm shmsegs = newsegs; 869255108f3SPeter Wemm shmalloced = shminfo.shmmni; 870255108f3SPeter Wemm } 871255108f3SPeter Wemm 872255108f3SPeter Wemm static void 87378525ce3SAlfred Perlstein shminit() 8743d903220SDoug Rabson { 8753d903220SDoug Rabson int i; 876255108f3SPeter Wemm 877b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall); 8789d4156aeSAlfred Perlstein for (i = PAGE_SIZE; i > 0; i--) { 879b3a4bc42SMichael Reifenberger shminfo.shmmax = shminfo.shmall * PAGE_SIZE; 8805015c68aSAlfred Perlstein if (shminfo.shmmax >= shminfo.shmall) 8815015c68aSAlfred Perlstein break; 8825015c68aSAlfred Perlstein } 883b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin); 884b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni); 885b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg); 886b3a4bc42SMichael Reifenberger TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys); 887b3a4bc42SMichael Reifenberger 888255108f3SPeter Wemm shmalloced = shminfo.shmmni; 889a163d034SWarner Losh shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 890255108f3SPeter Wemm if (shmsegs == NULL) 891255108f3SPeter Wemm panic("cannot allocate initial memory for sysvshm"); 892255108f3SPeter Wemm for (i = 0; i < shmalloced; i++) { 8933d903220SDoug Rabson shmsegs[i].shm_perm.mode = SHMSEG_FREE; 8943d903220SDoug Rabson shmsegs[i].shm_perm.seq = 0; 8953d903220SDoug Rabson } 8963d903220SDoug Rabson shm_last_free = 0; 8973d903220SDoug Rabson shm_nused = 0; 8983d903220SDoug Rabson shm_committed = 0; 89978525ce3SAlfred Perlstein shmexit_hook = &shmexit_myhook; 90078525ce3SAlfred Perlstein shmfork_hook = &shmfork_myhook; 9013d903220SDoug Rabson } 90278525ce3SAlfred Perlstein 90378525ce3SAlfred Perlstein static int 90478525ce3SAlfred Perlstein shmunload() 90578525ce3SAlfred Perlstein { 90678525ce3SAlfred Perlstein 90778525ce3SAlfred Perlstein if (shm_nused > 0) 90878525ce3SAlfred Perlstein return (EBUSY); 90978525ce3SAlfred Perlstein 91078525ce3SAlfred Perlstein free(shmsegs, M_SHM); 91178525ce3SAlfred Perlstein shmexit_hook = NULL; 91278525ce3SAlfred Perlstein shmfork_hook = NULL; 91378525ce3SAlfred Perlstein return (0); 91478525ce3SAlfred Perlstein } 91578525ce3SAlfred Perlstein 91678525ce3SAlfred Perlstein static int 917a723c4e1SDima Dorfman sysctl_shmsegs(SYSCTL_HANDLER_ARGS) 918a723c4e1SDima Dorfman { 919a723c4e1SDima Dorfman 920a723c4e1SDima Dorfman return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0]))); 921a723c4e1SDima Dorfman } 922a723c4e1SDima Dorfman 923a723c4e1SDima Dorfman static int 92478525ce3SAlfred Perlstein sysvshm_modload(struct module *module, int cmd, void *arg) 92578525ce3SAlfred Perlstein { 92678525ce3SAlfred Perlstein int error = 0; 92778525ce3SAlfred Perlstein 92878525ce3SAlfred Perlstein switch (cmd) { 92978525ce3SAlfred Perlstein case MOD_LOAD: 93078525ce3SAlfred Perlstein shminit(); 93178525ce3SAlfred Perlstein break; 93278525ce3SAlfred Perlstein case MOD_UNLOAD: 93378525ce3SAlfred Perlstein error = shmunload(); 93478525ce3SAlfred Perlstein break; 93578525ce3SAlfred Perlstein case MOD_SHUTDOWN: 93678525ce3SAlfred Perlstein break; 93778525ce3SAlfred Perlstein default: 93878525ce3SAlfred Perlstein error = EINVAL; 93978525ce3SAlfred Perlstein break; 94078525ce3SAlfred Perlstein } 94178525ce3SAlfred Perlstein return (error); 94278525ce3SAlfred Perlstein } 94378525ce3SAlfred Perlstein 944faa784b7SDag-Erling Smørgrav static moduledata_t sysvshm_mod = { 945faa784b7SDag-Erling Smørgrav "sysvshm", 94678525ce3SAlfred Perlstein &sysvshm_modload, 94778525ce3SAlfred Perlstein NULL 94878525ce3SAlfred Perlstein }; 94978525ce3SAlfred Perlstein 95021d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmsys); 95121d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmat); 95221d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmctl); 95321d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmdt); 95421d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmget); 95578525ce3SAlfred Perlstein 956faa784b7SDag-Erling Smørgrav DECLARE_MODULE(sysvshm, sysvshm_mod, 95778525ce3SAlfred Perlstein SI_SUB_SYSV_SHM, SI_ORDER_FIRST); 958faa784b7SDag-Erling Smørgrav MODULE_VERSION(sysvshm, 1); 959