xref: /freebsd/sys/kern/sysv_shm.c (revision f50c4fd81728483c94d956b25d50e46b5386117c)
13d903220SDoug Rabson /*	$NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $	*/
29454b2d8SWarner Losh /*-
33d903220SDoug Rabson  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
43d903220SDoug Rabson  *
53d903220SDoug Rabson  * Redistribution and use in source and binary forms, with or without
63d903220SDoug Rabson  * modification, are permitted provided that the following conditions
73d903220SDoug Rabson  * are met:
83d903220SDoug Rabson  * 1. Redistributions of source code must retain the above copyright
93d903220SDoug Rabson  *    notice, this list of conditions and the following disclaimer.
103d903220SDoug Rabson  * 2. Redistributions in binary form must reproduce the above copyright
113d903220SDoug Rabson  *    notice, this list of conditions and the following disclaimer in the
123d903220SDoug Rabson  *    documentation and/or other materials provided with the distribution.
133d903220SDoug Rabson  * 3. All advertising materials mentioning features or use of this software
143d903220SDoug Rabson  *    must display the following acknowledgement:
153d903220SDoug Rabson  *	This product includes software developed by Adam Glass and Charles
163d903220SDoug Rabson  *	Hannum.
173d903220SDoug Rabson  * 4. The names of the authors may not be used to endorse or promote products
183d903220SDoug Rabson  *    derived from this software without specific prior written permission.
193d903220SDoug Rabson  *
203d903220SDoug Rabson  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
213d903220SDoug Rabson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
223d903220SDoug Rabson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
233d903220SDoug Rabson  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
243d903220SDoug Rabson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
253d903220SDoug Rabson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
263d903220SDoug Rabson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
273d903220SDoug Rabson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
283d903220SDoug Rabson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
293d903220SDoug Rabson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
303d903220SDoug Rabson  */
3114cedfc8SRobert Watson /*-
3214cedfc8SRobert Watson  * Copyright (c) 2003-2005 McAfee, Inc.
3314cedfc8SRobert Watson  * All rights reserved.
3414cedfc8SRobert Watson  *
3514cedfc8SRobert Watson  * This software was developed for the FreeBSD Project in part by McAfee
3614cedfc8SRobert Watson  * Research, the Security Research Division of McAfee, Inc under DARPA/SPAWAR
3714cedfc8SRobert Watson  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS research
3814cedfc8SRobert Watson  * program.
3914cedfc8SRobert Watson  *
4014cedfc8SRobert Watson  * Redistribution and use in source and binary forms, with or without
4114cedfc8SRobert Watson  * modification, are permitted provided that the following conditions
4214cedfc8SRobert Watson  * are met:
4314cedfc8SRobert Watson  * 1. Redistributions of source code must retain the above copyright
4414cedfc8SRobert Watson  *    notice, this list of conditions and the following disclaimer.
4514cedfc8SRobert Watson  * 2. Redistributions in binary form must reproduce the above copyright
4614cedfc8SRobert Watson  *    notice, this list of conditions and the following disclaimer in the
4714cedfc8SRobert Watson  *    documentation and/or other materials provided with the distribution.
4814cedfc8SRobert Watson  *
4914cedfc8SRobert Watson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
5014cedfc8SRobert Watson  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5114cedfc8SRobert Watson  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5214cedfc8SRobert Watson  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
5314cedfc8SRobert Watson  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5414cedfc8SRobert Watson  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5514cedfc8SRobert Watson  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5614cedfc8SRobert Watson  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5714cedfc8SRobert Watson  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5814cedfc8SRobert Watson  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5914cedfc8SRobert Watson  * SUCH DAMAGE.
6014cedfc8SRobert Watson  */
613d903220SDoug Rabson 
62677b542eSDavid E. O'Brien #include <sys/cdefs.h>
63677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
64677b542eSDavid E. O'Brien 
655591b823SEivind Eklund #include "opt_compat.h"
66255108f3SPeter Wemm #include "opt_sysvipc.h"
6714cedfc8SRobert Watson #include "opt_mac.h"
68511b67b7SGarrett Wollman 
693d903220SDoug Rabson #include <sys/param.h>
70725db531SBruce Evans #include <sys/systm.h>
713d903220SDoug Rabson #include <sys/kernel.h>
72fb919e4dSMark Murray #include <sys/lock.h>
73255108f3SPeter Wemm #include <sys/sysctl.h>
743d903220SDoug Rabson #include <sys/shm.h>
753d903220SDoug Rabson #include <sys/proc.h>
763d903220SDoug Rabson #include <sys/malloc.h>
773d903220SDoug Rabson #include <sys/mman.h>
7877409fe1SPoul-Henning Kamp #include <sys/module.h>
799dceb26bSJohn Baldwin #include <sys/mutex.h>
8068ba7a1dSTim J. Robbins #include <sys/resourcevar.h>
813d903220SDoug Rabson #include <sys/stat.h>
8278525ce3SAlfred Perlstein #include <sys/syscall.h>
83f130dcf2SMartin Blapp #include <sys/syscallsubr.h>
84725db531SBruce Evans #include <sys/sysent.h>
85fb919e4dSMark Murray #include <sys/sysproto.h>
86cb1f0db9SRobert Watson #include <sys/jail.h>
8714cedfc8SRobert Watson #include <sys/mac.h>
883d903220SDoug Rabson 
893d903220SDoug Rabson #include <vm/vm.h>
90efeaf95aSDavid Greenman #include <vm/vm_param.h>
91efeaf95aSDavid Greenman #include <vm/pmap.h>
92a51f7119SJohn Dyson #include <vm/vm_object.h>
933d903220SDoug Rabson #include <vm/vm_map.h>
941c7c3c6aSMatthew Dillon #include <vm/vm_page.h>
95ae9b8c3aSJohn Dyson #include <vm/vm_pager.h>
963d903220SDoug Rabson 
97a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
9855166637SPoul-Henning Kamp 
99fbb273bcSPaul Saab #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43))
100725db531SBruce Evans struct oshmctl_args;
1014d77a549SAlfred Perlstein static int oshmctl(struct thread *td, struct oshmctl_args *uap);
102fbb273bcSPaul Saab #endif
103255108f3SPeter Wemm 
1044d77a549SAlfred Perlstein static int shmget_allocate_segment(struct thread *td,
1054d77a549SAlfred Perlstein     struct shmget_args *uap, int mode);
1064d77a549SAlfred Perlstein static int shmget_existing(struct thread *td, struct shmget_args *uap,
1074d77a549SAlfred Perlstein     int mode, int segnum);
108725db531SBruce Evans 
109fbb273bcSPaul Saab #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43))
110725db531SBruce Evans /* XXX casting to (sy_call_t *) is bogus, as usual. */
111303b270bSEivind Eklund static sy_call_t *shmcalls[] = {
112725db531SBruce Evans 	(sy_call_t *)shmat, (sy_call_t *)oshmctl,
113725db531SBruce Evans 	(sy_call_t *)shmdt, (sy_call_t *)shmget,
114725db531SBruce Evans 	(sy_call_t *)shmctl
115725db531SBruce Evans };
116fbb273bcSPaul Saab #endif
1173d903220SDoug Rabson 
1183d903220SDoug Rabson #define	SHMSEG_FREE     	0x0200
1193d903220SDoug Rabson #define	SHMSEG_REMOVED  	0x0400
1203d903220SDoug Rabson #define	SHMSEG_ALLOCATED	0x0800
1213d903220SDoug Rabson #define	SHMSEG_WANTED		0x1000
1223d903220SDoug Rabson 
123255108f3SPeter Wemm static int shm_last_free, shm_nused, shm_committed, shmalloced;
124921d05b9SRobert Watson static struct shmid_kernel	*shmsegs;
1253d903220SDoug Rabson 
1263d903220SDoug Rabson struct shmmap_state {
1273d903220SDoug Rabson 	vm_offset_t va;
1283d903220SDoug Rabson 	int shmid;
1293d903220SDoug Rabson };
1303d903220SDoug Rabson 
131921d05b9SRobert Watson static void shm_deallocate_segment(struct shmid_kernel *);
1324d77a549SAlfred Perlstein static int shm_find_segment_by_key(key_t);
133921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmid(int);
134921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmidx(int);
1353db161e0SMatthew Dillon static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
1364d77a549SAlfred Perlstein static void shmrealloc(void);
1374d77a549SAlfred Perlstein static void shminit(void);
1384d77a549SAlfred Perlstein static int sysvshm_modload(struct module *, int, void *);
1394d77a549SAlfred Perlstein static int shmunload(void);
1403db161e0SMatthew Dillon static void shmexit_myhook(struct vmspace *vm);
1414d77a549SAlfred Perlstein static void shmfork_myhook(struct proc *p1, struct proc *p2);
1424d77a549SAlfred Perlstein static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS);
143255108f3SPeter Wemm 
144255108f3SPeter Wemm /*
145028f979dSDima Dorfman  * Tuneable values.
146255108f3SPeter Wemm  */
147255108f3SPeter Wemm #ifndef SHMMAXPGS
148028f979dSDima Dorfman #define	SHMMAXPGS	8192	/* Note: sysv shared memory is swap backed. */
149255108f3SPeter Wemm #endif
150255108f3SPeter Wemm #ifndef SHMMAX
151255108f3SPeter Wemm #define	SHMMAX	(SHMMAXPGS*PAGE_SIZE)
152255108f3SPeter Wemm #endif
153255108f3SPeter Wemm #ifndef SHMMIN
154255108f3SPeter Wemm #define	SHMMIN	1
155255108f3SPeter Wemm #endif
156255108f3SPeter Wemm #ifndef SHMMNI
1571766b2e5SMatthew Dillon #define	SHMMNI	192
158255108f3SPeter Wemm #endif
159255108f3SPeter Wemm #ifndef SHMSEG
1601766b2e5SMatthew Dillon #define	SHMSEG	128
161255108f3SPeter Wemm #endif
162255108f3SPeter Wemm #ifndef SHMALL
163255108f3SPeter Wemm #define	SHMALL	(SHMMAXPGS)
164255108f3SPeter Wemm #endif
165255108f3SPeter Wemm 
166255108f3SPeter Wemm struct	shminfo shminfo = {
167255108f3SPeter Wemm 	SHMMAX,
168255108f3SPeter Wemm 	SHMMIN,
169255108f3SPeter Wemm 	SHMMNI,
170255108f3SPeter Wemm 	SHMSEG,
171255108f3SPeter Wemm 	SHMALL
172255108f3SPeter Wemm };
173255108f3SPeter Wemm 
1748b03c8edSMatthew Dillon static int shm_use_phys;
1752332251cSMax Khon static int shm_allow_removed;
1768b03c8edSMatthew Dillon 
1779baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0,
17884f85aedSChristian S.J. Peron     "Maximum shared memory segment size");
1799baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0,
18084f85aedSChristian S.J. Peron     "Minimum shared memory segment size");
1819baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0,
18284f85aedSChristian S.J. Peron     "Number of shared memory identifiers");
1839baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0,
18484f85aedSChristian S.J. Peron     "Number of segments per process");
1859baea4b4SChristian S.J. Peron SYSCTL_ULONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0,
18684f85aedSChristian S.J. Peron     "Maximum number of pages available for shared memory");
187028f979dSDima Dorfman SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
18884f85aedSChristian S.J. Peron     &shm_use_phys, 0, "Enable/Disable locking of shared memory pages in core");
1892332251cSMax Khon SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
19084f85aedSChristian S.J. Peron     &shm_allow_removed, 0,
19184f85aedSChristian S.J. Peron     "Enable/Disable attachment to attached segments marked for removal");
192a723c4e1SDima Dorfman SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
19384f85aedSChristian S.J. Peron     NULL, 0, sysctl_shmsegs, "",
19484f85aedSChristian S.J. Peron     "Current number of shared memory segments allocated");
1953d903220SDoug Rabson 
1963d903220SDoug Rabson static int
1973d903220SDoug Rabson shm_find_segment_by_key(key)
1983d903220SDoug Rabson 	key_t key;
1993d903220SDoug Rabson {
2003d903220SDoug Rabson 	int i;
2013d903220SDoug Rabson 
202255108f3SPeter Wemm 	for (i = 0; i < shmalloced; i++)
203921d05b9SRobert Watson 		if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) &&
204921d05b9SRobert Watson 		    shmsegs[i].u.shm_perm.key == key)
205b618bb96SAlfred Perlstein 			return (i);
206b618bb96SAlfred Perlstein 	return (-1);
2073d903220SDoug Rabson }
2083d903220SDoug Rabson 
209921d05b9SRobert Watson static struct shmid_kernel *
2102332251cSMax Khon shm_find_segment_by_shmid(int shmid)
2113d903220SDoug Rabson {
2123d903220SDoug Rabson 	int segnum;
213921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
2143d903220SDoug Rabson 
2153d903220SDoug Rabson 	segnum = IPCID_TO_IX(shmid);
216255108f3SPeter Wemm 	if (segnum < 0 || segnum >= shmalloced)
217b618bb96SAlfred Perlstein 		return (NULL);
2183d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
219921d05b9SRobert Watson 	if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
2202332251cSMax Khon 	    (!shm_allow_removed &&
221921d05b9SRobert Watson 	     (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0) ||
222921d05b9SRobert Watson 	    shmseg->u.shm_perm.seq != IPCID_TO_SEQ(shmid))
223b618bb96SAlfred Perlstein 		return (NULL);
224b618bb96SAlfred Perlstein 	return (shmseg);
2253d903220SDoug Rabson }
2263d903220SDoug Rabson 
227921d05b9SRobert Watson static struct shmid_kernel *
2282332251cSMax Khon shm_find_segment_by_shmidx(int segnum)
229491dec93SMichael Reifenberger {
230921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
231491dec93SMichael Reifenberger 
232491dec93SMichael Reifenberger 	if (segnum < 0 || segnum >= shmalloced)
233b618bb96SAlfred Perlstein 		return (NULL);
234491dec93SMichael Reifenberger 	shmseg = &shmsegs[segnum];
235921d05b9SRobert Watson 	if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
2362332251cSMax Khon 	    (!shm_allow_removed &&
237921d05b9SRobert Watson 	     (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0))
238b618bb96SAlfred Perlstein 		return (NULL);
239b618bb96SAlfred Perlstein 	return (shmseg);
240491dec93SMichael Reifenberger }
241491dec93SMichael Reifenberger 
2423d903220SDoug Rabson static void
2433d903220SDoug Rabson shm_deallocate_segment(shmseg)
244921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
2453d903220SDoug Rabson {
2463d903220SDoug Rabson 	size_t size;
2473d903220SDoug Rabson 
2480cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
2490cddd8f0SMatthew Dillon 
250921d05b9SRobert Watson 	vm_object_deallocate(shmseg->u.shm_internal);
251921d05b9SRobert Watson 	shmseg->u.shm_internal = NULL;
252921d05b9SRobert Watson 	size = round_page(shmseg->u.shm_segsz);
2533d903220SDoug Rabson 	shm_committed -= btoc(size);
2543d903220SDoug Rabson 	shm_nused--;
255921d05b9SRobert Watson 	shmseg->u.shm_perm.mode = SHMSEG_FREE;
25614cedfc8SRobert Watson #ifdef MAC
25714cedfc8SRobert Watson 	mac_cleanup_sysv_shm(shmseg);
25814cedfc8SRobert Watson #endif
2593d903220SDoug Rabson }
2603d903220SDoug Rabson 
2613d903220SDoug Rabson static int
2623db161e0SMatthew Dillon shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
2633d903220SDoug Rabson {
264921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
2653d903220SDoug Rabson 	int segnum, result;
2663d903220SDoug Rabson 	size_t size;
2673d903220SDoug Rabson 
2680cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
269028f979dSDima Dorfman 
2703d903220SDoug Rabson 	segnum = IPCID_TO_IX(shmmap_s->shmid);
2713d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
272921d05b9SRobert Watson 	size = round_page(shmseg->u.shm_segsz);
2733db161e0SMatthew Dillon 	result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
2743d903220SDoug Rabson 	if (result != KERN_SUCCESS)
275b618bb96SAlfred Perlstein 		return (EINVAL);
2763d903220SDoug Rabson 	shmmap_s->shmid = -1;
277921d05b9SRobert Watson 	shmseg->u.shm_dtime = time_second;
278921d05b9SRobert Watson 	if ((--shmseg->u.shm_nattch <= 0) &&
279921d05b9SRobert Watson 	    (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) {
2803d903220SDoug Rabson 		shm_deallocate_segment(shmseg);
2813d903220SDoug Rabson 		shm_last_free = segnum;
2823d903220SDoug Rabson 	}
283b618bb96SAlfred Perlstein 	return (0);
2843d903220SDoug Rabson }
2853d903220SDoug Rabson 
286b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
2873d903220SDoug Rabson struct shmdt_args {
288e1d7d0bbSAlfred Perlstein 	const void *shmaddr;
2893d903220SDoug Rabson };
290b5d5c0c9SPeter Wemm #endif
291b5d5c0c9SPeter Wemm 
292b6a4b4f9SMatthew Dillon /*
293b6a4b4f9SMatthew Dillon  * MPSAFE
294b6a4b4f9SMatthew Dillon  */
2953d903220SDoug Rabson int
296b40ce416SJulian Elischer shmdt(td, uap)
297b40ce416SJulian Elischer 	struct thread *td;
2983d903220SDoug Rabson 	struct shmdt_args *uap;
2993d903220SDoug Rabson {
300b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
3013d903220SDoug Rabson 	struct shmmap_state *shmmap_s;
30214cedfc8SRobert Watson #ifdef MAC
30314cedfc8SRobert Watson 	struct shmid_kernel *shmsegptr;
30414cedfc8SRobert Watson #endif
3053d903220SDoug Rabson 	int i;
306b6a4b4f9SMatthew Dillon 	int error = 0;
3073d903220SDoug Rabson 
308c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
309c6f55f33SJohn Baldwin 		return (ENOSYS);
310b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
3118209f090SAlfred Perlstein 	shmmap_s = p->p_vmspace->vm_shm;
312b6a4b4f9SMatthew Dillon  	if (shmmap_s == NULL) {
313b6a4b4f9SMatthew Dillon 		error = EINVAL;
314b6a4b4f9SMatthew Dillon 		goto done2;
315b6a4b4f9SMatthew Dillon 	}
316b6a4b4f9SMatthew Dillon 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
3173d903220SDoug Rabson 		if (shmmap_s->shmid != -1 &&
318b6a4b4f9SMatthew Dillon 		    shmmap_s->va == (vm_offset_t)uap->shmaddr) {
3193d903220SDoug Rabson 			break;
320b6a4b4f9SMatthew Dillon 		}
321b6a4b4f9SMatthew Dillon 	}
322b6a4b4f9SMatthew Dillon 	if (i == shminfo.shmseg) {
323b6a4b4f9SMatthew Dillon 		error = EINVAL;
324b6a4b4f9SMatthew Dillon 		goto done2;
325b6a4b4f9SMatthew Dillon 	}
32614cedfc8SRobert Watson #ifdef MAC
32714cedfc8SRobert Watson 	shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)];
32814cedfc8SRobert Watson 	error = mac_check_sysv_shmdt(td->td_ucred, shmsegptr);
329f50c4fd8SRobert Watson 	if (error != 0)
33014cedfc8SRobert Watson 		goto done2;
33114cedfc8SRobert Watson #endif
3323db161e0SMatthew Dillon 	error = shm_delete_mapping(p->p_vmspace, shmmap_s);
333b6a4b4f9SMatthew Dillon done2:
334b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
335b6a4b4f9SMatthew Dillon 	return (error);
3363d903220SDoug Rabson }
3373d903220SDoug Rabson 
338b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
3393d903220SDoug Rabson struct shmat_args {
3403d903220SDoug Rabson 	int shmid;
341e1d7d0bbSAlfred Perlstein 	const void *shmaddr;
3423d903220SDoug Rabson 	int shmflg;
3433d903220SDoug Rabson };
344b5d5c0c9SPeter Wemm #endif
345b5d5c0c9SPeter Wemm 
346b6a4b4f9SMatthew Dillon /*
347b6a4b4f9SMatthew Dillon  * MPSAFE
348b6a4b4f9SMatthew Dillon  */
3493d903220SDoug Rabson int
3502332251cSMax Khon kern_shmat(td, shmid, shmaddr, shmflg)
351b40ce416SJulian Elischer 	struct thread *td;
352f130dcf2SMartin Blapp 	int shmid;
353f130dcf2SMartin Blapp 	const void *shmaddr;
354f130dcf2SMartin Blapp 	int shmflg;
3553d903220SDoug Rabson {
356b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
357b6a4b4f9SMatthew Dillon 	int i, flags;
358921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
3593d903220SDoug Rabson 	struct shmmap_state *shmmap_s = NULL;
3603d903220SDoug Rabson 	vm_offset_t attach_va;
3613d903220SDoug Rabson 	vm_prot_t prot;
3623d903220SDoug Rabson 	vm_size_t size;
363a51f7119SJohn Dyson 	int rv;
364b6a4b4f9SMatthew Dillon 	int error = 0;
3653d903220SDoug Rabson 
366c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
367c6f55f33SJohn Baldwin 		return (ENOSYS);
368b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
3698209f090SAlfred Perlstein 	shmmap_s = p->p_vmspace->vm_shm;
3703d903220SDoug Rabson 	if (shmmap_s == NULL) {
3713d903220SDoug Rabson 		size = shminfo.shmseg * sizeof(struct shmmap_state);
372a163d034SWarner Losh 		shmmap_s = malloc(size, M_SHM, M_WAITOK);
3733d903220SDoug Rabson 		for (i = 0; i < shminfo.shmseg; i++)
3743d903220SDoug Rabson 			shmmap_s[i].shmid = -1;
3752cc593fdSAlfred Perlstein 		p->p_vmspace->vm_shm = shmmap_s;
3763d903220SDoug Rabson 	}
3772332251cSMax Khon 	shmseg = shm_find_segment_by_shmid(shmid);
378b6a4b4f9SMatthew Dillon 	if (shmseg == NULL) {
379b6a4b4f9SMatthew Dillon 		error = EINVAL;
380b6a4b4f9SMatthew Dillon 		goto done2;
381b6a4b4f9SMatthew Dillon 	}
382921d05b9SRobert Watson 	error = ipcperm(td, &shmseg->u.shm_perm,
383f130dcf2SMartin Blapp 	    (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
384797f2d22SPoul-Henning Kamp 	if (error)
385b6a4b4f9SMatthew Dillon 		goto done2;
38614cedfc8SRobert Watson #ifdef MAC
38714cedfc8SRobert Watson 	error = mac_check_sysv_shmat(td->td_ucred, shmseg, shmflg);
388f50c4fd8SRobert Watson 	if (error != 0)
38914cedfc8SRobert Watson 		goto done2;
39014cedfc8SRobert Watson #endif
3913d903220SDoug Rabson 	for (i = 0; i < shminfo.shmseg; i++) {
3923d903220SDoug Rabson 		if (shmmap_s->shmid == -1)
3933d903220SDoug Rabson 			break;
3943d903220SDoug Rabson 		shmmap_s++;
3953d903220SDoug Rabson 	}
396b6a4b4f9SMatthew Dillon 	if (i >= shminfo.shmseg) {
397b6a4b4f9SMatthew Dillon 		error = EMFILE;
398b6a4b4f9SMatthew Dillon 		goto done2;
399b6a4b4f9SMatthew Dillon 	}
400921d05b9SRobert Watson 	size = round_page(shmseg->u.shm_segsz);
401af25d10cSAlan Cox #ifdef VM_PROT_READ_IS_EXEC
402af25d10cSAlan Cox 	prot = VM_PROT_READ | VM_PROT_EXECUTE;
403af25d10cSAlan Cox #else
4043d903220SDoug Rabson 	prot = VM_PROT_READ;
405af25d10cSAlan Cox #endif
406f130dcf2SMartin Blapp 	if ((shmflg & SHM_RDONLY) == 0)
4073d903220SDoug Rabson 		prot |= VM_PROT_WRITE;
4083d903220SDoug Rabson 	flags = MAP_ANON | MAP_SHARED;
409f130dcf2SMartin Blapp 	if (shmaddr) {
4103d903220SDoug Rabson 		flags |= MAP_FIXED;
411f130dcf2SMartin Blapp 		if (shmflg & SHM_RND) {
412f130dcf2SMartin Blapp 			attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
413f130dcf2SMartin Blapp 		} else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) {
414f130dcf2SMartin Blapp 			attach_va = (vm_offset_t)shmaddr;
415b6a4b4f9SMatthew Dillon 		} else {
416b6a4b4f9SMatthew Dillon 			error = EINVAL;
417b6a4b4f9SMatthew Dillon 			goto done2;
418b6a4b4f9SMatthew Dillon 		}
4193d903220SDoug Rabson 	} else {
420028f979dSDima Dorfman 		/*
421028f979dSDima Dorfman 		 * This is just a hint to vm_map_find() about where to
422028f979dSDima Dorfman 		 * put it.
423028f979dSDima Dorfman 		 */
42468ba7a1dSTim J. Robbins 		PROC_LOCK(p);
42568ba7a1dSTim J. Robbins 		attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
42668ba7a1dSTim J. Robbins 		    lim_max(p, RLIMIT_DATA));
42768ba7a1dSTim J. Robbins 		PROC_UNLOCK(p);
4283d903220SDoug Rabson 	}
429a51f7119SJohn Dyson 
430921d05b9SRobert Watson 	vm_object_reference(shmseg->u.shm_internal);
431921d05b9SRobert Watson 	rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->u.shm_internal,
432a51f7119SJohn Dyson 		0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
433a51f7119SJohn Dyson 	if (rv != KERN_SUCCESS) {
434921d05b9SRobert Watson 		vm_object_deallocate(shmseg->u.shm_internal);
435b6a4b4f9SMatthew Dillon 		error = ENOMEM;
436b6a4b4f9SMatthew Dillon 		goto done2;
437a51f7119SJohn Dyson 	}
4380463028cSJohn Dyson 	vm_map_inherit(&p->p_vmspace->vm_map,
4390463028cSJohn Dyson 		attach_va, attach_va + size, VM_INHERIT_SHARE);
4400463028cSJohn Dyson 
4413d903220SDoug Rabson 	shmmap_s->va = attach_va;
442f130dcf2SMartin Blapp 	shmmap_s->shmid = shmid;
443921d05b9SRobert Watson 	shmseg->u.shm_lpid = p->p_pid;
444921d05b9SRobert Watson 	shmseg->u.shm_atime = time_second;
445921d05b9SRobert Watson 	shmseg->u.shm_nattch++;
446b40ce416SJulian Elischer 	td->td_retval[0] = attach_va;
447b6a4b4f9SMatthew Dillon done2:
448b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
449b6a4b4f9SMatthew Dillon 	return (error);
4503d903220SDoug Rabson }
4513d903220SDoug Rabson 
452f130dcf2SMartin Blapp int
453f130dcf2SMartin Blapp shmat(td, uap)
454f130dcf2SMartin Blapp 	struct thread *td;
455f130dcf2SMartin Blapp 	struct shmat_args *uap;
456f130dcf2SMartin Blapp {
4572332251cSMax Khon 	return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg);
458f130dcf2SMartin Blapp }
459f130dcf2SMartin Blapp 
460fbb273bcSPaul Saab #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43))
4618bec0921SDoug Rabson struct oshmid_ds {
4628bec0921SDoug Rabson 	struct	ipc_perm shm_perm;	/* operation perms */
4638bec0921SDoug Rabson 	int	shm_segsz;		/* size of segment (bytes) */
4648b149b51SJohn Baldwin 	u_short	shm_cpid;		/* pid, creator */
4658b149b51SJohn Baldwin 	u_short	shm_lpid;		/* pid, last operation */
4668bec0921SDoug Rabson 	short	shm_nattch;		/* no. of current attaches */
4678bec0921SDoug Rabson 	time_t	shm_atime;		/* last attach time */
4688bec0921SDoug Rabson 	time_t	shm_dtime;		/* last detach time */
4698bec0921SDoug Rabson 	time_t	shm_ctime;		/* last change time */
4708bec0921SDoug Rabson 	void	*shm_handle;		/* internal handle for shm segment */
4718bec0921SDoug Rabson };
4728bec0921SDoug Rabson 
4738bec0921SDoug Rabson struct oshmctl_args {
4748bec0921SDoug Rabson 	int shmid;
4758bec0921SDoug Rabson 	int cmd;
4768bec0921SDoug Rabson 	struct oshmid_ds *ubuf;
4778bec0921SDoug Rabson };
4788bec0921SDoug Rabson 
479b6a4b4f9SMatthew Dillon /*
480b6a4b4f9SMatthew Dillon  * MPSAFE
481b6a4b4f9SMatthew Dillon  */
48287b6de2bSPoul-Henning Kamp static int
483b40ce416SJulian Elischer oshmctl(td, uap)
484b40ce416SJulian Elischer 	struct thread *td;
4858bec0921SDoug Rabson 	struct oshmctl_args *uap;
4868bec0921SDoug Rabson {
4878bec0921SDoug Rabson #ifdef COMPAT_43
488b6a4b4f9SMatthew Dillon 	int error = 0;
489921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
4908bec0921SDoug Rabson 	struct oshmid_ds outbuf;
4918bec0921SDoug Rabson 
492c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
493c6f55f33SJohn Baldwin 		return (ENOSYS);
494b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
4952332251cSMax Khon 	shmseg = shm_find_segment_by_shmid(uap->shmid);
496b6a4b4f9SMatthew Dillon 	if (shmseg == NULL) {
497b6a4b4f9SMatthew Dillon 		error = EINVAL;
498b6a4b4f9SMatthew Dillon 		goto done2;
499b6a4b4f9SMatthew Dillon 	}
5008bec0921SDoug Rabson 	switch (uap->cmd) {
5018bec0921SDoug Rabson 	case IPC_STAT:
502921d05b9SRobert Watson 		error = ipcperm(td, &shmseg->u.shm_perm, IPC_R);
503797f2d22SPoul-Henning Kamp 		if (error)
504b6a4b4f9SMatthew Dillon 			goto done2;
50514cedfc8SRobert Watson #ifdef MAC
50614cedfc8SRobert Watson 		error = mac_check_sysv_shmctl(td->td_ucred, shmseg, uap->cmd);
507f50c4fd8SRobert Watson 		if (error != 0)
50814cedfc8SRobert Watson 			goto done2;
50914cedfc8SRobert Watson #endif
510921d05b9SRobert Watson 		outbuf.shm_perm = shmseg->u.shm_perm;
511921d05b9SRobert Watson 		outbuf.shm_segsz = shmseg->u.shm_segsz;
512921d05b9SRobert Watson 		outbuf.shm_cpid = shmseg->u.shm_cpid;
513921d05b9SRobert Watson 		outbuf.shm_lpid = shmseg->u.shm_lpid;
514921d05b9SRobert Watson 		outbuf.shm_nattch = shmseg->u.shm_nattch;
515921d05b9SRobert Watson 		outbuf.shm_atime = shmseg->u.shm_atime;
516921d05b9SRobert Watson 		outbuf.shm_dtime = shmseg->u.shm_dtime;
517921d05b9SRobert Watson 		outbuf.shm_ctime = shmseg->u.shm_ctime;
518921d05b9SRobert Watson 		outbuf.shm_handle = shmseg->u.shm_internal;
5192cc593fdSAlfred Perlstein 		error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
520797f2d22SPoul-Henning Kamp 		if (error)
521b6a4b4f9SMatthew Dillon 			goto done2;
5228bec0921SDoug Rabson 		break;
5238bec0921SDoug Rabson 	default:
52400fbcda8SAlexander Kabaev 		error = shmctl(td, (struct shmctl_args *)uap);
525b6a4b4f9SMatthew Dillon 		break;
5268bec0921SDoug Rabson 	}
527b6a4b4f9SMatthew Dillon done2:
528b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
529b6a4b4f9SMatthew Dillon 	return (error);
5308bec0921SDoug Rabson #else
531b618bb96SAlfred Perlstein 	return (EINVAL);
5328bec0921SDoug Rabson #endif
5338bec0921SDoug Rabson }
534fbb273bcSPaul Saab #endif
5358bec0921SDoug Rabson 
536b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
5373d903220SDoug Rabson struct shmctl_args {
5383d903220SDoug Rabson 	int shmid;
5393d903220SDoug Rabson 	int cmd;
540b5d5c0c9SPeter Wemm 	struct shmid_ds *buf;
5413d903220SDoug Rabson };
542b5d5c0c9SPeter Wemm #endif
543b5d5c0c9SPeter Wemm 
544b6a4b4f9SMatthew Dillon /*
545b6a4b4f9SMatthew Dillon  * MPSAFE
546b6a4b4f9SMatthew Dillon  */
5473d903220SDoug Rabson int
5482332251cSMax Khon kern_shmctl(td, shmid, cmd, buf, bufsz)
549b40ce416SJulian Elischer 	struct thread *td;
550f130dcf2SMartin Blapp 	int shmid;
551f130dcf2SMartin Blapp 	int cmd;
552f130dcf2SMartin Blapp 	void *buf;
553f130dcf2SMartin Blapp 	size_t *bufsz;
5543d903220SDoug Rabson {
555b6a4b4f9SMatthew Dillon 	int error = 0;
556921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
5573d903220SDoug Rabson 
558c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
559c6f55f33SJohn Baldwin 		return (ENOSYS);
560f130dcf2SMartin Blapp 
561b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
562f130dcf2SMartin Blapp 	switch (cmd) {
563491dec93SMichael Reifenberger 	case IPC_INFO:
564f130dcf2SMartin Blapp 		memcpy(buf, &shminfo, sizeof(shminfo));
565f130dcf2SMartin Blapp 		if (bufsz)
566f130dcf2SMartin Blapp 			*bufsz = sizeof(shminfo);
567491dec93SMichael Reifenberger 		td->td_retval[0] = shmalloced;
568491dec93SMichael Reifenberger 		goto done2;
569491dec93SMichael Reifenberger 	case SHM_INFO: {
570491dec93SMichael Reifenberger 		struct shm_info shm_info;
571491dec93SMichael Reifenberger 		shm_info.used_ids = shm_nused;
572491dec93SMichael Reifenberger 		shm_info.shm_rss = 0;	/*XXX where to get from ? */
573491dec93SMichael Reifenberger 		shm_info.shm_tot = 0;	/*XXX where to get from ? */
574491dec93SMichael Reifenberger 		shm_info.shm_swp = 0;	/*XXX where to get from ? */
575491dec93SMichael Reifenberger 		shm_info.swap_attempts = 0;	/*XXX where to get from ? */
576491dec93SMichael Reifenberger 		shm_info.swap_successes = 0;	/*XXX where to get from ? */
577f130dcf2SMartin Blapp 		memcpy(buf, &shm_info, sizeof(shm_info));
578f130dcf2SMartin Blapp 		if (bufsz)
579f130dcf2SMartin Blapp 			*bufsz = sizeof(shm_info);
580491dec93SMichael Reifenberger 		td->td_retval[0] = shmalloced;
581491dec93SMichael Reifenberger 		goto done2;
582491dec93SMichael Reifenberger 	}
583491dec93SMichael Reifenberger 	}
584f130dcf2SMartin Blapp 	if (cmd == SHM_STAT)
5852332251cSMax Khon 		shmseg = shm_find_segment_by_shmidx(shmid);
586491dec93SMichael Reifenberger 	else
5872332251cSMax Khon 		shmseg = shm_find_segment_by_shmid(shmid);
588b6a4b4f9SMatthew Dillon 	if (shmseg == NULL) {
589b6a4b4f9SMatthew Dillon 		error = EINVAL;
590b6a4b4f9SMatthew Dillon 		goto done2;
591b6a4b4f9SMatthew Dillon 	}
59214cedfc8SRobert Watson #ifdef MAC
59314cedfc8SRobert Watson 	error = mac_check_sysv_shmctl(td->td_ucred, shmseg, cmd);
594f50c4fd8SRobert Watson 	if (error != 0)
59514cedfc8SRobert Watson 		goto done2;
59614cedfc8SRobert Watson #endif
597f130dcf2SMartin Blapp 	switch (cmd) {
598491dec93SMichael Reifenberger 	case SHM_STAT:
5993d903220SDoug Rabson 	case IPC_STAT:
600921d05b9SRobert Watson 		error = ipcperm(td, &shmseg->u.shm_perm, IPC_R);
601797f2d22SPoul-Henning Kamp 		if (error)
602b6a4b4f9SMatthew Dillon 			goto done2;
603921d05b9SRobert Watson 		memcpy(buf, &shmseg->u, sizeof(struct shmid_ds));
604f130dcf2SMartin Blapp 		if (bufsz)
605f130dcf2SMartin Blapp 			*bufsz = sizeof(struct shmid_ds);
606f130dcf2SMartin Blapp 		if (cmd == SHM_STAT)
607921d05b9SRobert Watson 			td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->u.shm_perm);
6083d903220SDoug Rabson 		break;
609f130dcf2SMartin Blapp 	case IPC_SET: {
610f130dcf2SMartin Blapp 		struct shmid_ds *shmid;
611f130dcf2SMartin Blapp 
612f130dcf2SMartin Blapp 		shmid = (struct shmid_ds *)buf;
613921d05b9SRobert Watson 		error = ipcperm(td, &shmseg->u.shm_perm, IPC_M);
614797f2d22SPoul-Henning Kamp 		if (error)
615b6a4b4f9SMatthew Dillon 			goto done2;
616921d05b9SRobert Watson 		shmseg->u.shm_perm.uid = shmid->shm_perm.uid;
617921d05b9SRobert Watson 		shmseg->u.shm_perm.gid = shmid->shm_perm.gid;
618921d05b9SRobert Watson 		shmseg->u.shm_perm.mode =
619921d05b9SRobert Watson 		    (shmseg->u.shm_perm.mode & ~ACCESSPERMS) |
620f130dcf2SMartin Blapp 		    (shmid->shm_perm.mode & ACCESSPERMS);
621921d05b9SRobert Watson 		shmseg->u.shm_ctime = time_second;
6223d903220SDoug Rabson 		break;
623f130dcf2SMartin Blapp 	}
6243d903220SDoug Rabson 	case IPC_RMID:
625921d05b9SRobert Watson 		error = ipcperm(td, &shmseg->u.shm_perm, IPC_M);
626797f2d22SPoul-Henning Kamp 		if (error)
627b6a4b4f9SMatthew Dillon 			goto done2;
628921d05b9SRobert Watson 		shmseg->u.shm_perm.key = IPC_PRIVATE;
629921d05b9SRobert Watson 		shmseg->u.shm_perm.mode |= SHMSEG_REMOVED;
630921d05b9SRobert Watson 		if (shmseg->u.shm_nattch <= 0) {
6313d903220SDoug Rabson 			shm_deallocate_segment(shmseg);
632f130dcf2SMartin Blapp 			shm_last_free = IPCID_TO_IX(shmid);
6333d903220SDoug Rabson 		}
6343d903220SDoug Rabson 		break;
6353d903220SDoug Rabson #if 0
6363d903220SDoug Rabson 	case SHM_LOCK:
6373d903220SDoug Rabson 	case SHM_UNLOCK:
6383d903220SDoug Rabson #endif
6393d903220SDoug Rabson 	default:
640b6a4b4f9SMatthew Dillon 		error = EINVAL;
641b6a4b4f9SMatthew Dillon 		break;
6423d903220SDoug Rabson 	}
643b6a4b4f9SMatthew Dillon done2:
644b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
645b6a4b4f9SMatthew Dillon 	return (error);
6463d903220SDoug Rabson }
6473d903220SDoug Rabson 
648f130dcf2SMartin Blapp int
649f130dcf2SMartin Blapp shmctl(td, uap)
650f130dcf2SMartin Blapp 	struct thread *td;
651f130dcf2SMartin Blapp 	struct shmctl_args *uap;
652f130dcf2SMartin Blapp {
653f130dcf2SMartin Blapp 	int error = 0;
654f130dcf2SMartin Blapp 	struct shmid_ds buf;
655f130dcf2SMartin Blapp 	size_t bufsz;
656f130dcf2SMartin Blapp 
657f130dcf2SMartin Blapp 	/* IPC_SET needs to copyin the buffer before calling kern_shmctl */
658f130dcf2SMartin Blapp 	if (uap->cmd == IPC_SET) {
659f130dcf2SMartin Blapp 		if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
660f130dcf2SMartin Blapp 			goto done;
661f130dcf2SMartin Blapp 	}
662f130dcf2SMartin Blapp 
6632332251cSMax Khon 	error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
664f130dcf2SMartin Blapp 	if (error)
665f130dcf2SMartin Blapp 		goto done;
666f130dcf2SMartin Blapp 
667f130dcf2SMartin Blapp 	/* Cases in which we need to copyout */
668f130dcf2SMartin Blapp 	switch (uap->cmd) {
669f130dcf2SMartin Blapp 	case IPC_INFO:
670f130dcf2SMartin Blapp 	case SHM_INFO:
671f130dcf2SMartin Blapp 	case SHM_STAT:
672f130dcf2SMartin Blapp 	case IPC_STAT:
673f130dcf2SMartin Blapp 		error = copyout(&buf, uap->buf, bufsz);
674f130dcf2SMartin Blapp 		break;
675f130dcf2SMartin Blapp 	}
676f130dcf2SMartin Blapp 
677f130dcf2SMartin Blapp done:
678f130dcf2SMartin Blapp 	if (error) {
679f130dcf2SMartin Blapp 		/* Invalidate the return value */
680f130dcf2SMartin Blapp 		td->td_retval[0] = -1;
681f130dcf2SMartin Blapp 	}
682f130dcf2SMartin Blapp 	return (error);
683f130dcf2SMartin Blapp }
684f130dcf2SMartin Blapp 
685f130dcf2SMartin Blapp 
686b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
6873d903220SDoug Rabson struct shmget_args {
6883d903220SDoug Rabson 	key_t key;
6893d903220SDoug Rabson 	size_t size;
6903d903220SDoug Rabson 	int shmflg;
6913d903220SDoug Rabson };
692b5d5c0c9SPeter Wemm #endif
693b5d5c0c9SPeter Wemm 
6943d903220SDoug Rabson static int
695b40ce416SJulian Elischer shmget_existing(td, uap, mode, segnum)
696b40ce416SJulian Elischer 	struct thread *td;
6973d903220SDoug Rabson 	struct shmget_args *uap;
6983d903220SDoug Rabson 	int mode;
6993d903220SDoug Rabson 	int segnum;
7003d903220SDoug Rabson {
701921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
7023d903220SDoug Rabson 	int error;
7033d903220SDoug Rabson 
7043d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
705921d05b9SRobert Watson 	if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) {
7063d903220SDoug Rabson 		/*
7073d903220SDoug Rabson 		 * This segment is in the process of being allocated.  Wait
7083d903220SDoug Rabson 		 * until it's done, and look the key up again (in case the
7093d903220SDoug Rabson 		 * allocation failed or it was freed).
7103d903220SDoug Rabson 		 */
711921d05b9SRobert Watson 		shmseg->u.shm_perm.mode |= SHMSEG_WANTED;
7122cc593fdSAlfred Perlstein 		error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
713797f2d22SPoul-Henning Kamp 		if (error)
714b618bb96SAlfred Perlstein 			return (error);
715b618bb96SAlfred Perlstein 		return (EAGAIN);
7163d903220SDoug Rabson 	}
717dc92aa57SAlan Cox 	if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
718b618bb96SAlfred Perlstein 		return (EEXIST);
71914cedfc8SRobert Watson #ifdef MAC
72014cedfc8SRobert Watson 	error = mac_check_sysv_shmget(td->td_ucred, shmseg, uap->shmflg);
721f50c4fd8SRobert Watson 	if (error != 0)
7227723d5edSRobert Watson 		return (error);
72314cedfc8SRobert Watson #endif
7247723d5edSRobert Watson 	error = ipcperm(td, &shmseg->u.shm_perm, mode);
725797f2d22SPoul-Henning Kamp 	if (error)
726b618bb96SAlfred Perlstein 		return (error);
727921d05b9SRobert Watson 	if (uap->size && uap->size > shmseg->u.shm_segsz)
728b618bb96SAlfred Perlstein 		return (EINVAL);
729921d05b9SRobert Watson 	td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
730b618bb96SAlfred Perlstein 	return (0);
7313d903220SDoug Rabson }
7323d903220SDoug Rabson 
7333d903220SDoug Rabson static int
734b40ce416SJulian Elischer shmget_allocate_segment(td, uap, mode)
735b40ce416SJulian Elischer 	struct thread *td;
7363d903220SDoug Rabson 	struct shmget_args *uap;
7373d903220SDoug Rabson 	int mode;
7383d903220SDoug Rabson {
739a51f7119SJohn Dyson 	int i, segnum, shmid, size;
740a854ed98SJohn Baldwin 	struct ucred *cred = td->td_ucred;
741921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
7420049f8b2SAlan Cox 	vm_object_t shm_object;
7433d903220SDoug Rabson 
7440cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
7450cddd8f0SMatthew Dillon 
7463d903220SDoug Rabson 	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
747b618bb96SAlfred Perlstein 		return (EINVAL);
748028f979dSDima Dorfman 	if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
749b618bb96SAlfred Perlstein 		return (ENOSPC);
7509e609ddeSJoerg Wunsch 	size = round_page(uap->size);
7513d903220SDoug Rabson 	if (shm_committed + btoc(size) > shminfo.shmall)
752b618bb96SAlfred Perlstein 		return (ENOMEM);
7533d903220SDoug Rabson 	if (shm_last_free < 0) {
754028f979dSDima Dorfman 		shmrealloc();	/* Maybe expand the shmsegs[] array. */
755255108f3SPeter Wemm 		for (i = 0; i < shmalloced; i++)
756921d05b9SRobert Watson 			if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE)
7573d903220SDoug Rabson 				break;
758255108f3SPeter Wemm 		if (i == shmalloced)
759b618bb96SAlfred Perlstein 			return (ENOSPC);
7603d903220SDoug Rabson 		segnum = i;
7613d903220SDoug Rabson 	} else  {
7623d903220SDoug Rabson 		segnum = shm_last_free;
7633d903220SDoug Rabson 		shm_last_free = -1;
7643d903220SDoug Rabson 	}
7653d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
7663d903220SDoug Rabson 	/*
7673d903220SDoug Rabson 	 * In case we sleep in malloc(), mark the segment present but deleted
7683d903220SDoug Rabson 	 * so that noone else tries to create the same key.
7693d903220SDoug Rabson 	 */
770921d05b9SRobert Watson 	shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
771921d05b9SRobert Watson 	shmseg->u.shm_perm.key = uap->key;
772921d05b9SRobert Watson 	shmseg->u.shm_perm.seq = (shmseg->u.shm_perm.seq + 1) & 0x7fff;
773921d05b9SRobert Watson 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
774a51f7119SJohn Dyson 
775ae9b8c3aSJohn Dyson 	/*
776ae9b8c3aSJohn Dyson 	 * We make sure that we have allocated a pager before we need
777ae9b8c3aSJohn Dyson 	 * to.
778ae9b8c3aSJohn Dyson 	 */
7798b03c8edSMatthew Dillon 	if (shm_use_phys) {
7800049f8b2SAlan Cox 		shm_object =
78124488c74SPeter Wemm 		    vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
7828b03c8edSMatthew Dillon 	} else {
7830049f8b2SAlan Cox 		shm_object =
7846cde7a16SDavid Greenman 		    vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
7858b03c8edSMatthew Dillon 	}
7860049f8b2SAlan Cox 	VM_OBJECT_LOCK(shm_object);
7870049f8b2SAlan Cox 	vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
7880049f8b2SAlan Cox 	vm_object_set_flag(shm_object, OBJ_NOSPLIT);
7890049f8b2SAlan Cox 	VM_OBJECT_UNLOCK(shm_object);
790cbd8ec09SJohn Dyson 
791921d05b9SRobert Watson 	shmseg->u.shm_internal = shm_object;
792921d05b9SRobert Watson 	shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid;
793921d05b9SRobert Watson 	shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = cred->cr_gid;
794921d05b9SRobert Watson 	shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) |
7953d903220SDoug Rabson 	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
796921d05b9SRobert Watson 	shmseg->u.shm_segsz = uap->size;
797921d05b9SRobert Watson 	shmseg->u.shm_cpid = td->td_proc->p_pid;
798921d05b9SRobert Watson 	shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0;
799921d05b9SRobert Watson 	shmseg->u.shm_atime = shmseg->u.shm_dtime = 0;
80014cedfc8SRobert Watson #ifdef MAC
80114cedfc8SRobert Watson 	mac_create_sysv_shm(cred, shmseg);
80214cedfc8SRobert Watson #endif
803921d05b9SRobert Watson 	shmseg->u.shm_ctime = time_second;
8043d903220SDoug Rabson 	shm_committed += btoc(size);
8053d903220SDoug Rabson 	shm_nused++;
806921d05b9SRobert Watson 	if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) {
8073d903220SDoug Rabson 		/*
8083d903220SDoug Rabson 		 * Somebody else wanted this key while we were asleep.  Wake
8093d903220SDoug Rabson 		 * them up now.
8103d903220SDoug Rabson 		 */
811921d05b9SRobert Watson 		shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED;
8122cc593fdSAlfred Perlstein 		wakeup(shmseg);
8133d903220SDoug Rabson 	}
814b40ce416SJulian Elischer 	td->td_retval[0] = shmid;
815b618bb96SAlfred Perlstein 	return (0);
8163d903220SDoug Rabson }
8173d903220SDoug Rabson 
818b6a4b4f9SMatthew Dillon /*
819b6a4b4f9SMatthew Dillon  * MPSAFE
820b6a4b4f9SMatthew Dillon  */
8213d903220SDoug Rabson int
822b40ce416SJulian Elischer shmget(td, uap)
823b40ce416SJulian Elischer 	struct thread *td;
8243d903220SDoug Rabson 	struct shmget_args *uap;
8253d903220SDoug Rabson {
826b6a4b4f9SMatthew Dillon 	int segnum, mode;
827b6a4b4f9SMatthew Dillon 	int error;
8283d903220SDoug Rabson 
829c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
830c6f55f33SJohn Baldwin 		return (ENOSYS);
831b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
8323d903220SDoug Rabson 	mode = uap->shmflg & ACCESSPERMS;
8333d903220SDoug Rabson 	if (uap->key != IPC_PRIVATE) {
8343d903220SDoug Rabson 	again:
8353d903220SDoug Rabson 		segnum = shm_find_segment_by_key(uap->key);
8363d903220SDoug Rabson 		if (segnum >= 0) {
837b40ce416SJulian Elischer 			error = shmget_existing(td, uap, mode, segnum);
8383d903220SDoug Rabson 			if (error == EAGAIN)
8393d903220SDoug Rabson 				goto again;
840b6a4b4f9SMatthew Dillon 			goto done2;
8413d903220SDoug Rabson 		}
842b6a4b4f9SMatthew Dillon 		if ((uap->shmflg & IPC_CREAT) == 0) {
843b6a4b4f9SMatthew Dillon 			error = ENOENT;
844b6a4b4f9SMatthew Dillon 			goto done2;
8453d903220SDoug Rabson 		}
846b6a4b4f9SMatthew Dillon 	}
847b40ce416SJulian Elischer 	error = shmget_allocate_segment(td, uap, mode);
848b6a4b4f9SMatthew Dillon done2:
849b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
850b6a4b4f9SMatthew Dillon 	return (error);
8513d903220SDoug Rabson }
8523d903220SDoug Rabson 
853b6a4b4f9SMatthew Dillon /*
854b6a4b4f9SMatthew Dillon  * MPSAFE
855b6a4b4f9SMatthew Dillon  */
8563d903220SDoug Rabson int
857b40ce416SJulian Elischer shmsys(td, uap)
858b40ce416SJulian Elischer 	struct thread *td;
859725db531SBruce Evans 	/* XXX actually varargs. */
860725db531SBruce Evans 	struct shmsys_args /* {
86101b9dc96SJacques Vidrine 		int	which;
862725db531SBruce Evans 		int	a2;
863725db531SBruce Evans 		int	a3;
864725db531SBruce Evans 		int	a4;
865725db531SBruce Evans 	} */ *uap;
8663d903220SDoug Rabson {
867fbb273bcSPaul Saab #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43))
868b6a4b4f9SMatthew Dillon 	int error;
8693d903220SDoug Rabson 
870c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
871c6f55f33SJohn Baldwin 		return (ENOSYS);
87201b9dc96SJacques Vidrine 	if (uap->which < 0 ||
87301b9dc96SJacques Vidrine 	    uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
874c6f55f33SJohn Baldwin 		return (EINVAL);
875b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
876b40ce416SJulian Elischer 	error = (*shmcalls[uap->which])(td, &uap->a2);
877b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
878b6a4b4f9SMatthew Dillon 	return (error);
879fbb273bcSPaul Saab #else
880fbb273bcSPaul Saab 	return (nosys(td, NULL));
881fbb273bcSPaul Saab #endif
8823d903220SDoug Rabson }
8833d903220SDoug Rabson 
88478525ce3SAlfred Perlstein static void
88578525ce3SAlfred Perlstein shmfork_myhook(p1, p2)
8863d903220SDoug Rabson 	struct proc *p1, *p2;
8873d903220SDoug Rabson {
8883d903220SDoug Rabson 	struct shmmap_state *shmmap_s;
8893d903220SDoug Rabson 	size_t size;
8903d903220SDoug Rabson 	int i;
8913d903220SDoug Rabson 
89294ddc707SAlan Cox 	mtx_lock(&Giant);
8933d903220SDoug Rabson 	size = shminfo.shmseg * sizeof(struct shmmap_state);
894a163d034SWarner Losh 	shmmap_s = malloc(size, M_SHM, M_WAITOK);
8952cc593fdSAlfred Perlstein 	bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
8962cc593fdSAlfred Perlstein 	p2->p_vmspace->vm_shm = shmmap_s;
8973d903220SDoug Rabson 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
8983d903220SDoug Rabson 		if (shmmap_s->shmid != -1)
899921d05b9SRobert Watson 			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++;
90094ddc707SAlan Cox 	mtx_unlock(&Giant);
9013d903220SDoug Rabson }
9023d903220SDoug Rabson 
90378525ce3SAlfred Perlstein static void
9043db161e0SMatthew Dillon shmexit_myhook(struct vmspace *vm)
9053d903220SDoug Rabson {
9063db161e0SMatthew Dillon 	struct shmmap_state *base, *shm;
9073d903220SDoug Rabson 	int i;
9083d903220SDoug Rabson 
9093db161e0SMatthew Dillon 	if ((base = vm->vm_shm) != NULL) {
9103db161e0SMatthew Dillon 		vm->vm_shm = NULL;
9111a276a3fSAlan Cox 		mtx_lock(&Giant);
9123db161e0SMatthew Dillon 		for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
9133db161e0SMatthew Dillon 			if (shm->shmid != -1)
9143db161e0SMatthew Dillon 				shm_delete_mapping(vm, shm);
9153db161e0SMatthew Dillon 		}
9161a276a3fSAlan Cox 		mtx_unlock(&Giant);
9173db161e0SMatthew Dillon 		free(base, M_SHM);
9183db161e0SMatthew Dillon 	}
9193d903220SDoug Rabson }
9203d903220SDoug Rabson 
921255108f3SPeter Wemm static void
922255108f3SPeter Wemm shmrealloc(void)
923255108f3SPeter Wemm {
924255108f3SPeter Wemm 	int i;
925921d05b9SRobert Watson 	struct shmid_kernel *newsegs;
926255108f3SPeter Wemm 
927255108f3SPeter Wemm 	if (shmalloced >= shminfo.shmmni)
928255108f3SPeter Wemm 		return;
929255108f3SPeter Wemm 
930a163d034SWarner Losh 	newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
931255108f3SPeter Wemm 	if (newsegs == NULL)
932255108f3SPeter Wemm 		return;
933255108f3SPeter Wemm 	for (i = 0; i < shmalloced; i++)
934255108f3SPeter Wemm 		bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
935255108f3SPeter Wemm 	for (; i < shminfo.shmmni; i++) {
936921d05b9SRobert Watson 		shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
937921d05b9SRobert Watson 		shmsegs[i].u.shm_perm.seq = 0;
93814cedfc8SRobert Watson #ifdef MAC
93914cedfc8SRobert Watson 		mac_init_sysv_shm(&shmsegs[i]);
94014cedfc8SRobert Watson #endif
941255108f3SPeter Wemm 	}
942255108f3SPeter Wemm 	free(shmsegs, M_SHM);
943255108f3SPeter Wemm 	shmsegs = newsegs;
944255108f3SPeter Wemm 	shmalloced = shminfo.shmmni;
945255108f3SPeter Wemm }
946255108f3SPeter Wemm 
947255108f3SPeter Wemm static void
94878525ce3SAlfred Perlstein shminit()
9493d903220SDoug Rabson {
9503d903220SDoug Rabson 	int i;
951255108f3SPeter Wemm 
9529baea4b4SChristian S.J. Peron 	TUNABLE_ULONG_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
9539d4156aeSAlfred Perlstein 	for (i = PAGE_SIZE; i > 0; i--) {
954a4c24c66SJohn Baldwin 		shminfo.shmmax = shminfo.shmall * i;
9555015c68aSAlfred Perlstein 		if (shminfo.shmmax >= shminfo.shmall)
9565015c68aSAlfred Perlstein 			break;
9575015c68aSAlfred Perlstein 	}
9589baea4b4SChristian S.J. Peron 	TUNABLE_ULONG_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
9599baea4b4SChristian S.J. Peron 	TUNABLE_ULONG_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
9609baea4b4SChristian S.J. Peron 	TUNABLE_ULONG_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
961b3a4bc42SMichael Reifenberger 	TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys);
962b3a4bc42SMichael Reifenberger 
963255108f3SPeter Wemm 	shmalloced = shminfo.shmmni;
964a163d034SWarner Losh 	shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
965255108f3SPeter Wemm 	if (shmsegs == NULL)
966255108f3SPeter Wemm 		panic("cannot allocate initial memory for sysvshm");
967255108f3SPeter Wemm 	for (i = 0; i < shmalloced; i++) {
968921d05b9SRobert Watson 		shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
969921d05b9SRobert Watson 		shmsegs[i].u.shm_perm.seq = 0;
97014cedfc8SRobert Watson #ifdef MAC
97114cedfc8SRobert Watson 		mac_init_sysv_shm(&shmsegs[i]);
97214cedfc8SRobert Watson #endif
9733d903220SDoug Rabson 	}
9743d903220SDoug Rabson 	shm_last_free = 0;
9753d903220SDoug Rabson 	shm_nused = 0;
9763d903220SDoug Rabson 	shm_committed = 0;
97778525ce3SAlfred Perlstein 	shmexit_hook = &shmexit_myhook;
97878525ce3SAlfred Perlstein 	shmfork_hook = &shmfork_myhook;
9793d903220SDoug Rabson }
98078525ce3SAlfred Perlstein 
98178525ce3SAlfred Perlstein static int
98278525ce3SAlfred Perlstein shmunload()
98378525ce3SAlfred Perlstein {
98414cedfc8SRobert Watson #ifdef MAC
98514cedfc8SRobert Watson 	int i;
98614cedfc8SRobert Watson #endif
98778525ce3SAlfred Perlstein 
98878525ce3SAlfred Perlstein 	if (shm_nused > 0)
98978525ce3SAlfred Perlstein 		return (EBUSY);
99078525ce3SAlfred Perlstein 
99114cedfc8SRobert Watson #ifdef MAC
99214cedfc8SRobert Watson 	for (i = 0; i < shmalloced; i++)
99314cedfc8SRobert Watson 		mac_destroy_sysv_shm(&shmsegs[i]);
99414cedfc8SRobert Watson #endif
99578525ce3SAlfred Perlstein 	free(shmsegs, M_SHM);
99678525ce3SAlfred Perlstein 	shmexit_hook = NULL;
99778525ce3SAlfred Perlstein 	shmfork_hook = NULL;
99878525ce3SAlfred Perlstein 	return (0);
99978525ce3SAlfred Perlstein }
100078525ce3SAlfred Perlstein 
100178525ce3SAlfred Perlstein static int
1002a723c4e1SDima Dorfman sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
1003a723c4e1SDima Dorfman {
1004a723c4e1SDima Dorfman 
1005a723c4e1SDima Dorfman 	return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
1006a723c4e1SDima Dorfman }
1007a723c4e1SDima Dorfman 
1008a723c4e1SDima Dorfman static int
100978525ce3SAlfred Perlstein sysvshm_modload(struct module *module, int cmd, void *arg)
101078525ce3SAlfred Perlstein {
101178525ce3SAlfred Perlstein 	int error = 0;
101278525ce3SAlfred Perlstein 
101378525ce3SAlfred Perlstein 	switch (cmd) {
101478525ce3SAlfred Perlstein 	case MOD_LOAD:
101578525ce3SAlfred Perlstein 		shminit();
101678525ce3SAlfred Perlstein 		break;
101778525ce3SAlfred Perlstein 	case MOD_UNLOAD:
101878525ce3SAlfred Perlstein 		error = shmunload();
101978525ce3SAlfred Perlstein 		break;
102078525ce3SAlfred Perlstein 	case MOD_SHUTDOWN:
102178525ce3SAlfred Perlstein 		break;
102278525ce3SAlfred Perlstein 	default:
102378525ce3SAlfred Perlstein 		error = EINVAL;
102478525ce3SAlfred Perlstein 		break;
102578525ce3SAlfred Perlstein 	}
102678525ce3SAlfred Perlstein 	return (error);
102778525ce3SAlfred Perlstein }
102878525ce3SAlfred Perlstein 
1029faa784b7SDag-Erling Smørgrav static moduledata_t sysvshm_mod = {
1030faa784b7SDag-Erling Smørgrav 	"sysvshm",
103178525ce3SAlfred Perlstein 	&sysvshm_modload,
103278525ce3SAlfred Perlstein 	NULL
103378525ce3SAlfred Perlstein };
103478525ce3SAlfred Perlstein 
103521d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmsys);
103621d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmat);
103721d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmctl);
103821d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmdt);
103921d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmget);
104078525ce3SAlfred Perlstein 
1041faa784b7SDag-Erling Smørgrav DECLARE_MODULE(sysvshm, sysvshm_mod,
104278525ce3SAlfred Perlstein 	SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
1043faa784b7SDag-Erling Smørgrav MODULE_VERSION(sysvshm, 1);
1044