11c7c3c6aSMatthew Dillon /* $Id: sysv_shm.c,v 1.39 1998/10/13 08:24:40 dg Exp $ */ 23d903220SDoug Rabson /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 33d903220SDoug Rabson 43d903220SDoug Rabson /* 53d903220SDoug Rabson * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 63d903220SDoug Rabson * 73d903220SDoug Rabson * Redistribution and use in source and binary forms, with or without 83d903220SDoug Rabson * modification, are permitted provided that the following conditions 93d903220SDoug Rabson * are met: 103d903220SDoug Rabson * 1. Redistributions of source code must retain the above copyright 113d903220SDoug Rabson * notice, this list of conditions and the following disclaimer. 123d903220SDoug Rabson * 2. Redistributions in binary form must reproduce the above copyright 133d903220SDoug Rabson * notice, this list of conditions and the following disclaimer in the 143d903220SDoug Rabson * documentation and/or other materials provided with the distribution. 153d903220SDoug Rabson * 3. All advertising materials mentioning features or use of this software 163d903220SDoug Rabson * must display the following acknowledgement: 173d903220SDoug Rabson * This product includes software developed by Adam Glass and Charles 183d903220SDoug Rabson * Hannum. 193d903220SDoug Rabson * 4. The names of the authors may not be used to endorse or promote products 203d903220SDoug Rabson * derived from this software without specific prior written permission. 213d903220SDoug Rabson * 223d903220SDoug Rabson * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 233d903220SDoug Rabson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 243d903220SDoug Rabson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 253d903220SDoug Rabson * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 263d903220SDoug Rabson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 273d903220SDoug Rabson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 283d903220SDoug Rabson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 293d903220SDoug Rabson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 303d903220SDoug Rabson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 313d903220SDoug Rabson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 323d903220SDoug Rabson */ 333d903220SDoug Rabson 345591b823SEivind Eklund #include "opt_compat.h" 35e9822d92SJoerg Wunsch #include "opt_rlimit.h" 36511b67b7SGarrett Wollman 373d903220SDoug Rabson #include <sys/param.h> 38725db531SBruce Evans #include <sys/systm.h> 39725db531SBruce Evans #include <sys/sysproto.h> 403d903220SDoug Rabson #include <sys/kernel.h> 413d903220SDoug Rabson #include <sys/shm.h> 423d903220SDoug Rabson #include <sys/proc.h> 433d903220SDoug Rabson #include <sys/malloc.h> 443d903220SDoug Rabson #include <sys/mman.h> 453d903220SDoug Rabson #include <sys/stat.h> 46725db531SBruce Evans #include <sys/sysent.h> 473d903220SDoug Rabson 483d903220SDoug Rabson #include <vm/vm.h> 49efeaf95aSDavid Greenman #include <vm/vm_param.h> 50efeaf95aSDavid Greenman #include <vm/vm_prot.h> 51996c772fSJohn Dyson #include <sys/lock.h> 52efeaf95aSDavid Greenman #include <vm/pmap.h> 53a51f7119SJohn Dyson #include <vm/vm_object.h> 543d903220SDoug Rabson #include <vm/vm_map.h> 551c7c3c6aSMatthew Dillon #include <vm/vm_page.h> 56ae9b8c3aSJohn Dyson #include <vm/vm_pager.h> 570463028cSJohn Dyson #include <vm/vm_inherit.h> 583d903220SDoug Rabson 59b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 60725db531SBruce Evans struct shmat_args; 61cb226aaaSPoul-Henning Kamp extern int shmat __P((struct proc *p, struct shmat_args *uap)); 62725db531SBruce Evans struct shmctl_args; 63cb226aaaSPoul-Henning Kamp extern int shmctl __P((struct proc *p, struct shmctl_args *uap)); 64725db531SBruce Evans struct shmdt_args; 65cb226aaaSPoul-Henning Kamp extern int shmdt __P((struct proc *p, struct shmdt_args *uap)); 66725db531SBruce Evans struct shmget_args; 67cb226aaaSPoul-Henning Kamp extern int shmget __P((struct proc *p, struct shmget_args *uap)); 68b5d5c0c9SPeter Wemm #endif 69725db531SBruce Evans 70a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 7155166637SPoul-Henning Kamp 724590fd3aSDavid Greenman static void shminit __P((void *)); 732b14f991SJulian Elischer SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL) 742b14f991SJulian Elischer 75725db531SBruce Evans struct oshmctl_args; 76cb226aaaSPoul-Henning Kamp static int oshmctl __P((struct proc *p, struct oshmctl_args *uap)); 77cb226aaaSPoul-Henning Kamp static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode)); 78cb226aaaSPoul-Henning Kamp static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum)); 79725db531SBruce Evans 80725db531SBruce Evans /* XXX casting to (sy_call_t *) is bogus, as usual. */ 81303b270bSEivind Eklund static sy_call_t *shmcalls[] = { 82725db531SBruce Evans (sy_call_t *)shmat, (sy_call_t *)oshmctl, 83725db531SBruce Evans (sy_call_t *)shmdt, (sy_call_t *)shmget, 84725db531SBruce Evans (sy_call_t *)shmctl 85725db531SBruce Evans }; 863d903220SDoug Rabson 873d903220SDoug Rabson #define SHMSEG_FREE 0x0200 883d903220SDoug Rabson #define SHMSEG_REMOVED 0x0400 893d903220SDoug Rabson #define SHMSEG_ALLOCATED 0x0800 903d903220SDoug Rabson #define SHMSEG_WANTED 0x1000 913d903220SDoug Rabson 9287b6de2bSPoul-Henning Kamp static int shm_last_free, shm_nused, shm_committed; 9328f8db14SBruce Evans struct shmid_ds *shmsegs; 943d903220SDoug Rabson 953d903220SDoug Rabson struct shm_handle { 96a51f7119SJohn Dyson /* vm_offset_t kva; */ 97a51f7119SJohn Dyson vm_object_t shm_object; 983d903220SDoug Rabson }; 993d903220SDoug Rabson 1003d903220SDoug Rabson struct shmmap_state { 1013d903220SDoug Rabson vm_offset_t va; 1023d903220SDoug Rabson int shmid; 1033d903220SDoug Rabson }; 1043d903220SDoug Rabson 1053d903220SDoug Rabson static void shm_deallocate_segment __P((struct shmid_ds *)); 1063d903220SDoug Rabson static int shm_find_segment_by_key __P((key_t)); 1073d903220SDoug Rabson static struct shmid_ds *shm_find_segment_by_shmid __P((int)); 1083d903220SDoug Rabson static int shm_delete_mapping __P((struct proc *, struct shmmap_state *)); 1093d903220SDoug Rabson 1103d903220SDoug Rabson static int 1113d903220SDoug Rabson shm_find_segment_by_key(key) 1123d903220SDoug Rabson key_t key; 1133d903220SDoug Rabson { 1143d903220SDoug Rabson int i; 1153d903220SDoug Rabson 1163d903220SDoug Rabson for (i = 0; i < shminfo.shmmni; i++) 1173d903220SDoug Rabson if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 1183d903220SDoug Rabson shmsegs[i].shm_perm.key == key) 1193d903220SDoug Rabson return i; 1203d903220SDoug Rabson return -1; 1213d903220SDoug Rabson } 1223d903220SDoug Rabson 1233d903220SDoug Rabson static struct shmid_ds * 1243d903220SDoug Rabson shm_find_segment_by_shmid(shmid) 1253d903220SDoug Rabson int shmid; 1263d903220SDoug Rabson { 1273d903220SDoug Rabson int segnum; 1283d903220SDoug Rabson struct shmid_ds *shmseg; 1293d903220SDoug Rabson 1303d903220SDoug Rabson segnum = IPCID_TO_IX(shmid); 1313d903220SDoug Rabson if (segnum < 0 || segnum >= shminfo.shmmni) 1323d903220SDoug Rabson return NULL; 1333d903220SDoug Rabson shmseg = &shmsegs[segnum]; 1343d903220SDoug Rabson if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) 1353d903220SDoug Rabson != SHMSEG_ALLOCATED || 1363d903220SDoug Rabson shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) 1373d903220SDoug Rabson return NULL; 1383d903220SDoug Rabson return shmseg; 1393d903220SDoug Rabson } 1403d903220SDoug Rabson 1413d903220SDoug Rabson static void 1423d903220SDoug Rabson shm_deallocate_segment(shmseg) 1433d903220SDoug Rabson struct shmid_ds *shmseg; 1443d903220SDoug Rabson { 1453d903220SDoug Rabson struct shm_handle *shm_handle; 1463d903220SDoug Rabson size_t size; 1473d903220SDoug Rabson 1483d903220SDoug Rabson shm_handle = shmseg->shm_internal; 149a51f7119SJohn Dyson vm_object_deallocate(shm_handle->shm_object); 1503d903220SDoug Rabson free((caddr_t)shm_handle, M_SHM); 1513d903220SDoug Rabson shmseg->shm_internal = NULL; 152a51f7119SJohn Dyson size = round_page(shmseg->shm_segsz); 1533d903220SDoug Rabson shm_committed -= btoc(size); 1543d903220SDoug Rabson shm_nused--; 1553d903220SDoug Rabson shmseg->shm_perm.mode = SHMSEG_FREE; 1563d903220SDoug Rabson } 1573d903220SDoug Rabson 1583d903220SDoug Rabson static int 1593d903220SDoug Rabson shm_delete_mapping(p, shmmap_s) 1603d903220SDoug Rabson struct proc *p; 1613d903220SDoug Rabson struct shmmap_state *shmmap_s; 1623d903220SDoug Rabson { 1633d903220SDoug Rabson struct shmid_ds *shmseg; 1643d903220SDoug Rabson int segnum, result; 1653d903220SDoug Rabson size_t size; 1663d903220SDoug Rabson 1673d903220SDoug Rabson segnum = IPCID_TO_IX(shmmap_s->shmid); 1683d903220SDoug Rabson shmseg = &shmsegs[segnum]; 169aa8de40aSPoul-Henning Kamp size = round_page(shmseg->shm_segsz); 17068940ac1SDavid Greenman result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size); 1713d903220SDoug Rabson if (result != KERN_SUCCESS) 1723d903220SDoug Rabson return EINVAL; 1733d903220SDoug Rabson shmmap_s->shmid = -1; 174227ee8a1SPoul-Henning Kamp shmseg->shm_dtime = time_second; 1753d903220SDoug Rabson if ((--shmseg->shm_nattch <= 0) && 1763d903220SDoug Rabson (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 1773d903220SDoug Rabson shm_deallocate_segment(shmseg); 1783d903220SDoug Rabson shm_last_free = segnum; 1793d903220SDoug Rabson } 1803d903220SDoug Rabson return 0; 1813d903220SDoug Rabson } 1823d903220SDoug Rabson 183b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 1843d903220SDoug Rabson struct shmdt_args { 1853d903220SDoug Rabson void *shmaddr; 1863d903220SDoug Rabson }; 187b5d5c0c9SPeter Wemm #endif 188b5d5c0c9SPeter Wemm 1893d903220SDoug Rabson int 190cb226aaaSPoul-Henning Kamp shmdt(p, uap) 1913d903220SDoug Rabson struct proc *p; 1923d903220SDoug Rabson struct shmdt_args *uap; 1933d903220SDoug Rabson { 1943d903220SDoug Rabson struct shmmap_state *shmmap_s; 1953d903220SDoug Rabson int i; 1963d903220SDoug Rabson 1973d903220SDoug Rabson shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 198f205c887SJordan K. Hubbard if (shmmap_s == NULL) 199f205c887SJordan K. Hubbard return EINVAL; 2003d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 2013d903220SDoug Rabson if (shmmap_s->shmid != -1 && 2023d903220SDoug Rabson shmmap_s->va == (vm_offset_t)uap->shmaddr) 2033d903220SDoug Rabson break; 2043d903220SDoug Rabson if (i == shminfo.shmseg) 2053d903220SDoug Rabson return EINVAL; 2063d903220SDoug Rabson return shm_delete_mapping(p, shmmap_s); 2073d903220SDoug Rabson } 2083d903220SDoug Rabson 209b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 2103d903220SDoug Rabson struct shmat_args { 2113d903220SDoug Rabson int shmid; 2123d903220SDoug Rabson void *shmaddr; 2133d903220SDoug Rabson int shmflg; 2143d903220SDoug Rabson }; 215b5d5c0c9SPeter Wemm #endif 216b5d5c0c9SPeter Wemm 2173d903220SDoug Rabson int 218cb226aaaSPoul-Henning Kamp shmat(p, uap) 2193d903220SDoug Rabson struct proc *p; 2203d903220SDoug Rabson struct shmat_args *uap; 2213d903220SDoug Rabson { 2223d903220SDoug Rabson int error, i, flags; 2233d903220SDoug Rabson struct ucred *cred = p->p_ucred; 2243d903220SDoug Rabson struct shmid_ds *shmseg; 2253d903220SDoug Rabson struct shmmap_state *shmmap_s = NULL; 226a51f7119SJohn Dyson struct shm_handle *shm_handle; 2273d903220SDoug Rabson vm_offset_t attach_va; 2283d903220SDoug Rabson vm_prot_t prot; 2293d903220SDoug Rabson vm_size_t size; 230a51f7119SJohn Dyson int rv; 2313d903220SDoug Rabson 2323d903220SDoug Rabson shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 2333d903220SDoug Rabson if (shmmap_s == NULL) { 2343d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 2353d903220SDoug Rabson shmmap_s = malloc(size, M_SHM, M_WAITOK); 2363d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) 2373d903220SDoug Rabson shmmap_s[i].shmid = -1; 2383d903220SDoug Rabson p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 2393d903220SDoug Rabson } 2403d903220SDoug Rabson shmseg = shm_find_segment_by_shmid(uap->shmid); 2413d903220SDoug Rabson if (shmseg == NULL) 2423d903220SDoug Rabson return EINVAL; 243797f2d22SPoul-Henning Kamp error = ipcperm(cred, &shmseg->shm_perm, 244797f2d22SPoul-Henning Kamp (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 245797f2d22SPoul-Henning Kamp if (error) 2463d903220SDoug Rabson return error; 2473d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++) { 2483d903220SDoug Rabson if (shmmap_s->shmid == -1) 2493d903220SDoug Rabson break; 2503d903220SDoug Rabson shmmap_s++; 2513d903220SDoug Rabson } 2523d903220SDoug Rabson if (i >= shminfo.shmseg) 2533d903220SDoug Rabson return EMFILE; 254aa8de40aSPoul-Henning Kamp size = round_page(shmseg->shm_segsz); 2553d903220SDoug Rabson prot = VM_PROT_READ; 2563d903220SDoug Rabson if ((uap->shmflg & SHM_RDONLY) == 0) 2573d903220SDoug Rabson prot |= VM_PROT_WRITE; 2583d903220SDoug Rabson flags = MAP_ANON | MAP_SHARED; 2593d903220SDoug Rabson if (uap->shmaddr) { 2603d903220SDoug Rabson flags |= MAP_FIXED; 2613d903220SDoug Rabson if (uap->shmflg & SHM_RND) 2623d903220SDoug Rabson attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); 2633d903220SDoug Rabson else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) 2643d903220SDoug Rabson attach_va = (vm_offset_t)uap->shmaddr; 2653d903220SDoug Rabson else 2663d903220SDoug Rabson return EINVAL; 2673d903220SDoug Rabson } else { 268a51f7119SJohn Dyson /* This is just a hint to vm_map_find() about where to put it. */ 2696cde7a16SDavid Greenman attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ); 2703d903220SDoug Rabson } 271a51f7119SJohn Dyson 272a51f7119SJohn Dyson shm_handle = shmseg->shm_internal; 273a51f7119SJohn Dyson vm_object_reference(shm_handle->shm_object); 274a51f7119SJohn Dyson rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object, 275a51f7119SJohn Dyson 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0); 276a51f7119SJohn Dyson if (rv != KERN_SUCCESS) { 277a51f7119SJohn Dyson return ENOMEM; 278a51f7119SJohn Dyson } 2790463028cSJohn Dyson vm_map_inherit(&p->p_vmspace->vm_map, 2800463028cSJohn Dyson attach_va, attach_va + size, VM_INHERIT_SHARE); 2810463028cSJohn Dyson 2823d903220SDoug Rabson shmmap_s->va = attach_va; 2833d903220SDoug Rabson shmmap_s->shmid = uap->shmid; 2843d903220SDoug Rabson shmseg->shm_lpid = p->p_pid; 285227ee8a1SPoul-Henning Kamp shmseg->shm_atime = time_second; 2863d903220SDoug Rabson shmseg->shm_nattch++; 287cb226aaaSPoul-Henning Kamp p->p_retval[0] = attach_va; 2883d903220SDoug Rabson return 0; 2893d903220SDoug Rabson } 2903d903220SDoug Rabson 2918bec0921SDoug Rabson struct oshmid_ds { 2928bec0921SDoug Rabson struct ipc_perm shm_perm; /* operation perms */ 2938bec0921SDoug Rabson int shm_segsz; /* size of segment (bytes) */ 2948bec0921SDoug Rabson ushort shm_cpid; /* pid, creator */ 2958bec0921SDoug Rabson ushort shm_lpid; /* pid, last operation */ 2968bec0921SDoug Rabson short shm_nattch; /* no. of current attaches */ 2978bec0921SDoug Rabson time_t shm_atime; /* last attach time */ 2988bec0921SDoug Rabson time_t shm_dtime; /* last detach time */ 2998bec0921SDoug Rabson time_t shm_ctime; /* last change time */ 3008bec0921SDoug Rabson void *shm_handle; /* internal handle for shm segment */ 3018bec0921SDoug Rabson }; 3028bec0921SDoug Rabson 3038bec0921SDoug Rabson struct oshmctl_args { 3048bec0921SDoug Rabson int shmid; 3058bec0921SDoug Rabson int cmd; 3068bec0921SDoug Rabson struct oshmid_ds *ubuf; 3078bec0921SDoug Rabson }; 3088bec0921SDoug Rabson 30987b6de2bSPoul-Henning Kamp static int 310cb226aaaSPoul-Henning Kamp oshmctl(p, uap) 3118bec0921SDoug Rabson struct proc *p; 3128bec0921SDoug Rabson struct oshmctl_args *uap; 3138bec0921SDoug Rabson { 3148bec0921SDoug Rabson #ifdef COMPAT_43 315797f2d22SPoul-Henning Kamp int error; 3168bec0921SDoug Rabson struct ucred *cred = p->p_ucred; 3178bec0921SDoug Rabson struct shmid_ds *shmseg; 3188bec0921SDoug Rabson struct oshmid_ds outbuf; 3198bec0921SDoug Rabson 3208bec0921SDoug Rabson shmseg = shm_find_segment_by_shmid(uap->shmid); 3218bec0921SDoug Rabson if (shmseg == NULL) 3228bec0921SDoug Rabson return EINVAL; 3238bec0921SDoug Rabson switch (uap->cmd) { 3248bec0921SDoug Rabson case IPC_STAT: 325797f2d22SPoul-Henning Kamp error = ipcperm(cred, &shmseg->shm_perm, IPC_R); 326797f2d22SPoul-Henning Kamp if (error) 3278bec0921SDoug Rabson return error; 3288bec0921SDoug Rabson outbuf.shm_perm = shmseg->shm_perm; 3298bec0921SDoug Rabson outbuf.shm_segsz = shmseg->shm_segsz; 3308bec0921SDoug Rabson outbuf.shm_cpid = shmseg->shm_cpid; 3318bec0921SDoug Rabson outbuf.shm_lpid = shmseg->shm_lpid; 3328bec0921SDoug Rabson outbuf.shm_nattch = shmseg->shm_nattch; 3338bec0921SDoug Rabson outbuf.shm_atime = shmseg->shm_atime; 3348bec0921SDoug Rabson outbuf.shm_dtime = shmseg->shm_dtime; 3358bec0921SDoug Rabson outbuf.shm_ctime = shmseg->shm_ctime; 3368bec0921SDoug Rabson outbuf.shm_handle = shmseg->shm_internal; 337797f2d22SPoul-Henning Kamp error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf)); 338797f2d22SPoul-Henning Kamp if (error) 3398bec0921SDoug Rabson return error; 3408bec0921SDoug Rabson break; 3418bec0921SDoug Rabson default: 342725db531SBruce Evans /* XXX casting to (sy_call_t *) is bogus, as usual. */ 343cb226aaaSPoul-Henning Kamp return ((sy_call_t *)shmctl)(p, uap); 3448bec0921SDoug Rabson } 3458bec0921SDoug Rabson return 0; 3468bec0921SDoug Rabson #else 3478bec0921SDoug Rabson return EINVAL; 3488bec0921SDoug Rabson #endif 3498bec0921SDoug Rabson } 3508bec0921SDoug Rabson 351b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 3523d903220SDoug Rabson struct shmctl_args { 3533d903220SDoug Rabson int shmid; 3543d903220SDoug Rabson int cmd; 355b5d5c0c9SPeter Wemm struct shmid_ds *buf; 3563d903220SDoug Rabson }; 357b5d5c0c9SPeter Wemm #endif 358b5d5c0c9SPeter Wemm 3593d903220SDoug Rabson int 360cb226aaaSPoul-Henning Kamp shmctl(p, uap) 3613d903220SDoug Rabson struct proc *p; 3623d903220SDoug Rabson struct shmctl_args *uap; 3633d903220SDoug Rabson { 364797f2d22SPoul-Henning Kamp int error; 3653d903220SDoug Rabson struct ucred *cred = p->p_ucred; 3663d903220SDoug Rabson struct shmid_ds inbuf; 3673d903220SDoug Rabson struct shmid_ds *shmseg; 3683d903220SDoug Rabson 3693d903220SDoug Rabson shmseg = shm_find_segment_by_shmid(uap->shmid); 3703d903220SDoug Rabson if (shmseg == NULL) 3713d903220SDoug Rabson return EINVAL; 3723d903220SDoug Rabson switch (uap->cmd) { 3733d903220SDoug Rabson case IPC_STAT: 374797f2d22SPoul-Henning Kamp error = ipcperm(cred, &shmseg->shm_perm, IPC_R); 375797f2d22SPoul-Henning Kamp if (error) 3763d903220SDoug Rabson return error; 377b5d5c0c9SPeter Wemm error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf)); 378797f2d22SPoul-Henning Kamp if (error) 3793d903220SDoug Rabson return error; 3803d903220SDoug Rabson break; 3813d903220SDoug Rabson case IPC_SET: 382797f2d22SPoul-Henning Kamp error = ipcperm(cred, &shmseg->shm_perm, IPC_M); 383797f2d22SPoul-Henning Kamp if (error) 3843d903220SDoug Rabson return error; 385b5d5c0c9SPeter Wemm error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf)); 386797f2d22SPoul-Henning Kamp if (error) 3873d903220SDoug Rabson return error; 3883d903220SDoug Rabson shmseg->shm_perm.uid = inbuf.shm_perm.uid; 3893d903220SDoug Rabson shmseg->shm_perm.gid = inbuf.shm_perm.gid; 3903d903220SDoug Rabson shmseg->shm_perm.mode = 3913d903220SDoug Rabson (shmseg->shm_perm.mode & ~ACCESSPERMS) | 3923d903220SDoug Rabson (inbuf.shm_perm.mode & ACCESSPERMS); 393227ee8a1SPoul-Henning Kamp shmseg->shm_ctime = time_second; 3943d903220SDoug Rabson break; 3953d903220SDoug Rabson case IPC_RMID: 396797f2d22SPoul-Henning Kamp error = ipcperm(cred, &shmseg->shm_perm, IPC_M); 397797f2d22SPoul-Henning Kamp if (error) 3983d903220SDoug Rabson return error; 3993d903220SDoug Rabson shmseg->shm_perm.key = IPC_PRIVATE; 4003d903220SDoug Rabson shmseg->shm_perm.mode |= SHMSEG_REMOVED; 4013d903220SDoug Rabson if (shmseg->shm_nattch <= 0) { 4023d903220SDoug Rabson shm_deallocate_segment(shmseg); 4033d903220SDoug Rabson shm_last_free = IPCID_TO_IX(uap->shmid); 4043d903220SDoug Rabson } 4053d903220SDoug Rabson break; 4063d903220SDoug Rabson #if 0 4073d903220SDoug Rabson case SHM_LOCK: 4083d903220SDoug Rabson case SHM_UNLOCK: 4093d903220SDoug Rabson #endif 4103d903220SDoug Rabson default: 4113d903220SDoug Rabson return EINVAL; 4123d903220SDoug Rabson } 4133d903220SDoug Rabson return 0; 4143d903220SDoug Rabson } 4153d903220SDoug Rabson 416b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_ 4173d903220SDoug Rabson struct shmget_args { 4183d903220SDoug Rabson key_t key; 4193d903220SDoug Rabson size_t size; 4203d903220SDoug Rabson int shmflg; 4213d903220SDoug Rabson }; 422b5d5c0c9SPeter Wemm #endif 423b5d5c0c9SPeter Wemm 4243d903220SDoug Rabson static int 425cb226aaaSPoul-Henning Kamp shmget_existing(p, uap, mode, segnum) 4263d903220SDoug Rabson struct proc *p; 4273d903220SDoug Rabson struct shmget_args *uap; 4283d903220SDoug Rabson int mode; 4293d903220SDoug Rabson int segnum; 4303d903220SDoug Rabson { 4313d903220SDoug Rabson struct shmid_ds *shmseg; 4323d903220SDoug Rabson struct ucred *cred = p->p_ucred; 4333d903220SDoug Rabson int error; 4343d903220SDoug Rabson 4353d903220SDoug Rabson shmseg = &shmsegs[segnum]; 4363d903220SDoug Rabson if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 4373d903220SDoug Rabson /* 4383d903220SDoug Rabson * This segment is in the process of being allocated. Wait 4393d903220SDoug Rabson * until it's done, and look the key up again (in case the 4403d903220SDoug Rabson * allocation failed or it was freed). 4413d903220SDoug Rabson */ 4423d903220SDoug Rabson shmseg->shm_perm.mode |= SHMSEG_WANTED; 443797f2d22SPoul-Henning Kamp error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0); 444797f2d22SPoul-Henning Kamp if (error) 4453d903220SDoug Rabson return error; 4463d903220SDoug Rabson return EAGAIN; 4473d903220SDoug Rabson } 448797f2d22SPoul-Henning Kamp error = ipcperm(cred, &shmseg->shm_perm, mode); 449797f2d22SPoul-Henning Kamp if (error) 4503d903220SDoug Rabson return error; 4513d903220SDoug Rabson if (uap->size && uap->size > shmseg->shm_segsz) 4523d903220SDoug Rabson return EINVAL; 4530209e040SJoerg Wunsch if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 4543d903220SDoug Rabson return EEXIST; 455cb226aaaSPoul-Henning Kamp p->p_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 4563d903220SDoug Rabson return 0; 4573d903220SDoug Rabson } 4583d903220SDoug Rabson 4593d903220SDoug Rabson static int 460cb226aaaSPoul-Henning Kamp shmget_allocate_segment(p, uap, mode) 4613d903220SDoug Rabson struct proc *p; 4623d903220SDoug Rabson struct shmget_args *uap; 4633d903220SDoug Rabson int mode; 4643d903220SDoug Rabson { 465a51f7119SJohn Dyson int i, segnum, shmid, size; 4663d903220SDoug Rabson struct ucred *cred = p->p_ucred; 4673d903220SDoug Rabson struct shmid_ds *shmseg; 4683d903220SDoug Rabson struct shm_handle *shm_handle; 4693d903220SDoug Rabson 4703d903220SDoug Rabson if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 4713d903220SDoug Rabson return EINVAL; 4723d903220SDoug Rabson if (shm_nused >= shminfo.shmmni) /* any shmids left? */ 4733d903220SDoug Rabson return ENOSPC; 4749e609ddeSJoerg Wunsch size = round_page(uap->size); 4753d903220SDoug Rabson if (shm_committed + btoc(size) > shminfo.shmall) 4763d903220SDoug Rabson return ENOMEM; 4773d903220SDoug Rabson if (shm_last_free < 0) { 4783d903220SDoug Rabson for (i = 0; i < shminfo.shmmni; i++) 4793d903220SDoug Rabson if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 4803d903220SDoug Rabson break; 4813d903220SDoug Rabson if (i == shminfo.shmmni) 4823d903220SDoug Rabson panic("shmseg free count inconsistent"); 4833d903220SDoug Rabson segnum = i; 4843d903220SDoug Rabson } else { 4853d903220SDoug Rabson segnum = shm_last_free; 4863d903220SDoug Rabson shm_last_free = -1; 4873d903220SDoug Rabson } 4883d903220SDoug Rabson shmseg = &shmsegs[segnum]; 4893d903220SDoug Rabson /* 4903d903220SDoug Rabson * In case we sleep in malloc(), mark the segment present but deleted 4913d903220SDoug Rabson * so that noone else tries to create the same key. 4923d903220SDoug Rabson */ 4933d903220SDoug Rabson shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 4943d903220SDoug Rabson shmseg->shm_perm.key = uap->key; 4953d903220SDoug Rabson shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 4963d903220SDoug Rabson shm_handle = (struct shm_handle *) 4973d903220SDoug Rabson malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 4983d903220SDoug Rabson shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 499a51f7119SJohn Dyson 500ae9b8c3aSJohn Dyson /* 501ae9b8c3aSJohn Dyson * We make sure that we have allocated a pager before we need 502ae9b8c3aSJohn Dyson * to. 503ae9b8c3aSJohn Dyson */ 504a51f7119SJohn Dyson shm_handle->shm_object = 5056cde7a16SDavid Greenman vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0); 506069e9bc1SDoug Rabson vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 507069e9bc1SDoug Rabson vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 508cbd8ec09SJohn Dyson 5093d903220SDoug Rabson shmseg->shm_internal = shm_handle; 5103d903220SDoug Rabson shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 5113d903220SDoug Rabson shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 5123d903220SDoug Rabson shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 5133d903220SDoug Rabson (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 5143d903220SDoug Rabson shmseg->shm_segsz = uap->size; 5153d903220SDoug Rabson shmseg->shm_cpid = p->p_pid; 5163d903220SDoug Rabson shmseg->shm_lpid = shmseg->shm_nattch = 0; 5173d903220SDoug Rabson shmseg->shm_atime = shmseg->shm_dtime = 0; 518227ee8a1SPoul-Henning Kamp shmseg->shm_ctime = time_second; 5193d903220SDoug Rabson shm_committed += btoc(size); 5203d903220SDoug Rabson shm_nused++; 5213d903220SDoug Rabson if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 5223d903220SDoug Rabson /* 5233d903220SDoug Rabson * Somebody else wanted this key while we were asleep. Wake 5243d903220SDoug Rabson * them up now. 5253d903220SDoug Rabson */ 5263d903220SDoug Rabson shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 5273d903220SDoug Rabson wakeup((caddr_t)shmseg); 5283d903220SDoug Rabson } 529cb226aaaSPoul-Henning Kamp p->p_retval[0] = shmid; 5303d903220SDoug Rabson return 0; 5313d903220SDoug Rabson } 5323d903220SDoug Rabson 5333d903220SDoug Rabson int 534cb226aaaSPoul-Henning Kamp shmget(p, uap) 5353d903220SDoug Rabson struct proc *p; 5363d903220SDoug Rabson struct shmget_args *uap; 5373d903220SDoug Rabson { 5383d903220SDoug Rabson int segnum, mode, error; 5393d903220SDoug Rabson 5403d903220SDoug Rabson mode = uap->shmflg & ACCESSPERMS; 5413d903220SDoug Rabson if (uap->key != IPC_PRIVATE) { 5423d903220SDoug Rabson again: 5433d903220SDoug Rabson segnum = shm_find_segment_by_key(uap->key); 5443d903220SDoug Rabson if (segnum >= 0) { 545cb226aaaSPoul-Henning Kamp error = shmget_existing(p, uap, mode, segnum); 5463d903220SDoug Rabson if (error == EAGAIN) 5473d903220SDoug Rabson goto again; 5483d903220SDoug Rabson return error; 5493d903220SDoug Rabson } 5503d903220SDoug Rabson if ((uap->shmflg & IPC_CREAT) == 0) 5513d903220SDoug Rabson return ENOENT; 5523d903220SDoug Rabson } 553cb226aaaSPoul-Henning Kamp return shmget_allocate_segment(p, uap, mode); 5543d903220SDoug Rabson } 5553d903220SDoug Rabson 5563d903220SDoug Rabson int 557cb226aaaSPoul-Henning Kamp shmsys(p, uap) 5583d903220SDoug Rabson struct proc *p; 559725db531SBruce Evans /* XXX actually varargs. */ 560725db531SBruce Evans struct shmsys_args /* { 561725db531SBruce Evans u_int which; 562725db531SBruce Evans int a2; 563725db531SBruce Evans int a3; 564725db531SBruce Evans int a4; 565725db531SBruce Evans } */ *uap; 5663d903220SDoug Rabson { 5673d903220SDoug Rabson 5683d903220SDoug Rabson if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 5693d903220SDoug Rabson return EINVAL; 570cb226aaaSPoul-Henning Kamp return ((*shmcalls[uap->which])(p, &uap->a2)); 5713d903220SDoug Rabson } 5723d903220SDoug Rabson 5733d903220SDoug Rabson void 574dabee6feSPeter Wemm shmfork(p1, p2) 5753d903220SDoug Rabson struct proc *p1, *p2; 5763d903220SDoug Rabson { 5773d903220SDoug Rabson struct shmmap_state *shmmap_s; 5783d903220SDoug Rabson size_t size; 5793d903220SDoug Rabson int i; 5803d903220SDoug Rabson 5813d903220SDoug Rabson size = shminfo.shmseg * sizeof(struct shmmap_state); 5823d903220SDoug Rabson shmmap_s = malloc(size, M_SHM, M_WAITOK); 5833d903220SDoug Rabson bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size); 5843d903220SDoug Rabson p2->p_vmspace->vm_shm = (caddr_t)shmmap_s; 5853d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 5863d903220SDoug Rabson if (shmmap_s->shmid != -1) 5873d903220SDoug Rabson shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 5883d903220SDoug Rabson } 5893d903220SDoug Rabson 5903d903220SDoug Rabson void 5913d903220SDoug Rabson shmexit(p) 5923d903220SDoug Rabson struct proc *p; 5933d903220SDoug Rabson { 5943d903220SDoug Rabson struct shmmap_state *shmmap_s; 5953d903220SDoug Rabson int i; 5963d903220SDoug Rabson 5973d903220SDoug Rabson shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 5983d903220SDoug Rabson for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 5993d903220SDoug Rabson if (shmmap_s->shmid != -1) 6003d903220SDoug Rabson shm_delete_mapping(p, shmmap_s); 6013d903220SDoug Rabson free((caddr_t)p->p_vmspace->vm_shm, M_SHM); 6023d903220SDoug Rabson p->p_vmspace->vm_shm = NULL; 6033d903220SDoug Rabson } 6043d903220SDoug Rabson 6053d903220SDoug Rabson void 606725db531SBruce Evans shminit(dummy) 607725db531SBruce Evans void *dummy; 6083d903220SDoug Rabson { 6093d903220SDoug Rabson int i; 6103d903220SDoug Rabson for (i = 0; i < shminfo.shmmni; i++) { 6113d903220SDoug Rabson shmsegs[i].shm_perm.mode = SHMSEG_FREE; 6123d903220SDoug Rabson shmsegs[i].shm_perm.seq = 0; 6133d903220SDoug Rabson } 6143d903220SDoug Rabson shm_last_free = 0; 6153d903220SDoug Rabson shm_nused = 0; 6163d903220SDoug Rabson shm_committed = 0; 6173d903220SDoug Rabson } 618