xref: /freebsd/sys/kern/sysv_shm.c (revision 9454b2d864463f856c6aaf147851104b25cf4037)
13d903220SDoug Rabson /*	$NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $	*/
29454b2d8SWarner Losh /*-
33d903220SDoug Rabson  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
43d903220SDoug Rabson  *
53d903220SDoug Rabson  * Redistribution and use in source and binary forms, with or without
63d903220SDoug Rabson  * modification, are permitted provided that the following conditions
73d903220SDoug Rabson  * are met:
83d903220SDoug Rabson  * 1. Redistributions of source code must retain the above copyright
93d903220SDoug Rabson  *    notice, this list of conditions and the following disclaimer.
103d903220SDoug Rabson  * 2. Redistributions in binary form must reproduce the above copyright
113d903220SDoug Rabson  *    notice, this list of conditions and the following disclaimer in the
123d903220SDoug Rabson  *    documentation and/or other materials provided with the distribution.
133d903220SDoug Rabson  * 3. All advertising materials mentioning features or use of this software
143d903220SDoug Rabson  *    must display the following acknowledgement:
153d903220SDoug Rabson  *	This product includes software developed by Adam Glass and Charles
163d903220SDoug Rabson  *	Hannum.
173d903220SDoug Rabson  * 4. The names of the authors may not be used to endorse or promote products
183d903220SDoug Rabson  *    derived from this software without specific prior written permission.
193d903220SDoug Rabson  *
203d903220SDoug Rabson  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
213d903220SDoug Rabson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
223d903220SDoug Rabson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
233d903220SDoug Rabson  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
243d903220SDoug Rabson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
253d903220SDoug Rabson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
263d903220SDoug Rabson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
273d903220SDoug Rabson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
283d903220SDoug Rabson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
293d903220SDoug Rabson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
303d903220SDoug Rabson  */
313d903220SDoug Rabson 
32677b542eSDavid E. O'Brien #include <sys/cdefs.h>
33677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
34677b542eSDavid E. O'Brien 
355591b823SEivind Eklund #include "opt_compat.h"
36255108f3SPeter Wemm #include "opt_sysvipc.h"
37511b67b7SGarrett Wollman 
383d903220SDoug Rabson #include <sys/param.h>
39725db531SBruce Evans #include <sys/systm.h>
403d903220SDoug Rabson #include <sys/kernel.h>
41fb919e4dSMark Murray #include <sys/lock.h>
42255108f3SPeter Wemm #include <sys/sysctl.h>
433d903220SDoug Rabson #include <sys/shm.h>
443d903220SDoug Rabson #include <sys/proc.h>
453d903220SDoug Rabson #include <sys/malloc.h>
463d903220SDoug Rabson #include <sys/mman.h>
4777409fe1SPoul-Henning Kamp #include <sys/module.h>
489dceb26bSJohn Baldwin #include <sys/mutex.h>
4968ba7a1dSTim J. Robbins #include <sys/resourcevar.h>
503d903220SDoug Rabson #include <sys/stat.h>
5178525ce3SAlfred Perlstein #include <sys/syscall.h>
52f130dcf2SMartin Blapp #include <sys/syscallsubr.h>
53725db531SBruce Evans #include <sys/sysent.h>
54fb919e4dSMark Murray #include <sys/sysproto.h>
55cb1f0db9SRobert Watson #include <sys/jail.h>
563d903220SDoug Rabson 
573d903220SDoug Rabson #include <vm/vm.h>
58efeaf95aSDavid Greenman #include <vm/vm_param.h>
59efeaf95aSDavid Greenman #include <vm/pmap.h>
60a51f7119SJohn Dyson #include <vm/vm_object.h>
613d903220SDoug Rabson #include <vm/vm_map.h>
621c7c3c6aSMatthew Dillon #include <vm/vm_page.h>
63ae9b8c3aSJohn Dyson #include <vm/vm_pager.h>
643d903220SDoug Rabson 
65a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
6655166637SPoul-Henning Kamp 
67725db531SBruce Evans struct oshmctl_args;
684d77a549SAlfred Perlstein static int oshmctl(struct thread *td, struct oshmctl_args *uap);
69255108f3SPeter Wemm 
704d77a549SAlfred Perlstein static int shmget_allocate_segment(struct thread *td,
714d77a549SAlfred Perlstein     struct shmget_args *uap, int mode);
724d77a549SAlfred Perlstein static int shmget_existing(struct thread *td, struct shmget_args *uap,
734d77a549SAlfred Perlstein     int mode, int segnum);
74725db531SBruce Evans 
75725db531SBruce Evans /* XXX casting to (sy_call_t *) is bogus, as usual. */
76303b270bSEivind Eklund static sy_call_t *shmcalls[] = {
77725db531SBruce Evans 	(sy_call_t *)shmat, (sy_call_t *)oshmctl,
78725db531SBruce Evans 	(sy_call_t *)shmdt, (sy_call_t *)shmget,
79725db531SBruce Evans 	(sy_call_t *)shmctl
80725db531SBruce Evans };
813d903220SDoug Rabson 
823d903220SDoug Rabson #define	SHMSEG_FREE     	0x0200
833d903220SDoug Rabson #define	SHMSEG_REMOVED  	0x0400
843d903220SDoug Rabson #define	SHMSEG_ALLOCATED	0x0800
853d903220SDoug Rabson #define	SHMSEG_WANTED		0x1000
863d903220SDoug Rabson 
87255108f3SPeter Wemm static int shm_last_free, shm_nused, shm_committed, shmalloced;
88921d05b9SRobert Watson static struct shmid_kernel	*shmsegs;
893d903220SDoug Rabson 
903d903220SDoug Rabson struct shmmap_state {
913d903220SDoug Rabson 	vm_offset_t va;
923d903220SDoug Rabson 	int shmid;
933d903220SDoug Rabson };
943d903220SDoug Rabson 
95921d05b9SRobert Watson static void shm_deallocate_segment(struct shmid_kernel *);
964d77a549SAlfred Perlstein static int shm_find_segment_by_key(key_t);
97921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmid(int);
98921d05b9SRobert Watson static struct shmid_kernel *shm_find_segment_by_shmidx(int);
993db161e0SMatthew Dillon static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
1004d77a549SAlfred Perlstein static void shmrealloc(void);
1014d77a549SAlfred Perlstein static void shminit(void);
1024d77a549SAlfred Perlstein static int sysvshm_modload(struct module *, int, void *);
1034d77a549SAlfred Perlstein static int shmunload(void);
1043db161e0SMatthew Dillon static void shmexit_myhook(struct vmspace *vm);
1054d77a549SAlfred Perlstein static void shmfork_myhook(struct proc *p1, struct proc *p2);
1064d77a549SAlfred Perlstein static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS);
107255108f3SPeter Wemm 
108255108f3SPeter Wemm /*
109028f979dSDima Dorfman  * Tuneable values.
110255108f3SPeter Wemm  */
111255108f3SPeter Wemm #ifndef SHMMAXPGS
112028f979dSDima Dorfman #define	SHMMAXPGS	8192	/* Note: sysv shared memory is swap backed. */
113255108f3SPeter Wemm #endif
114255108f3SPeter Wemm #ifndef SHMMAX
115255108f3SPeter Wemm #define	SHMMAX	(SHMMAXPGS*PAGE_SIZE)
116255108f3SPeter Wemm #endif
117255108f3SPeter Wemm #ifndef SHMMIN
118255108f3SPeter Wemm #define	SHMMIN	1
119255108f3SPeter Wemm #endif
120255108f3SPeter Wemm #ifndef SHMMNI
1211766b2e5SMatthew Dillon #define	SHMMNI	192
122255108f3SPeter Wemm #endif
123255108f3SPeter Wemm #ifndef SHMSEG
1241766b2e5SMatthew Dillon #define	SHMSEG	128
125255108f3SPeter Wemm #endif
126255108f3SPeter Wemm #ifndef SHMALL
127255108f3SPeter Wemm #define	SHMALL	(SHMMAXPGS)
128255108f3SPeter Wemm #endif
129255108f3SPeter Wemm 
130255108f3SPeter Wemm struct	shminfo shminfo = {
131255108f3SPeter Wemm 	SHMMAX,
132255108f3SPeter Wemm 	SHMMIN,
133255108f3SPeter Wemm 	SHMMNI,
134255108f3SPeter Wemm 	SHMSEG,
135255108f3SPeter Wemm 	SHMALL
136255108f3SPeter Wemm };
137255108f3SPeter Wemm 
1388b03c8edSMatthew Dillon static int shm_use_phys;
1392332251cSMax Khon static int shm_allow_removed;
1408b03c8edSMatthew Dillon 
141255108f3SPeter Wemm SYSCTL_DECL(_kern_ipc);
142255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
143255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
144184dcdc7SMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, "");
145184dcdc7SMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, "");
146255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
147028f979dSDima Dorfman SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
148028f979dSDima Dorfman     &shm_use_phys, 0, "");
1492332251cSMax Khon SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
1502332251cSMax Khon     &shm_allow_removed, 0, "");
151a723c4e1SDima Dorfman SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
152a723c4e1SDima Dorfman     NULL, 0, sysctl_shmsegs, "", "");
1533d903220SDoug Rabson 
1543d903220SDoug Rabson static int
1553d903220SDoug Rabson shm_find_segment_by_key(key)
1563d903220SDoug Rabson 	key_t key;
1573d903220SDoug Rabson {
1583d903220SDoug Rabson 	int i;
1593d903220SDoug Rabson 
160255108f3SPeter Wemm 	for (i = 0; i < shmalloced; i++)
161921d05b9SRobert Watson 		if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) &&
162921d05b9SRobert Watson 		    shmsegs[i].u.shm_perm.key == key)
163b618bb96SAlfred Perlstein 			return (i);
164b618bb96SAlfred Perlstein 	return (-1);
1653d903220SDoug Rabson }
1663d903220SDoug Rabson 
167921d05b9SRobert Watson static struct shmid_kernel *
1682332251cSMax Khon shm_find_segment_by_shmid(int shmid)
1693d903220SDoug Rabson {
1703d903220SDoug Rabson 	int segnum;
171921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
1723d903220SDoug Rabson 
1733d903220SDoug Rabson 	segnum = IPCID_TO_IX(shmid);
174255108f3SPeter Wemm 	if (segnum < 0 || segnum >= shmalloced)
175b618bb96SAlfred Perlstein 		return (NULL);
1763d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
177921d05b9SRobert Watson 	if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
1782332251cSMax Khon 	    (!shm_allow_removed &&
179921d05b9SRobert Watson 	     (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0) ||
180921d05b9SRobert Watson 	    shmseg->u.shm_perm.seq != IPCID_TO_SEQ(shmid))
181b618bb96SAlfred Perlstein 		return (NULL);
182b618bb96SAlfred Perlstein 	return (shmseg);
1833d903220SDoug Rabson }
1843d903220SDoug Rabson 
185921d05b9SRobert Watson static struct shmid_kernel *
1862332251cSMax Khon shm_find_segment_by_shmidx(int segnum)
187491dec93SMichael Reifenberger {
188921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
189491dec93SMichael Reifenberger 
190491dec93SMichael Reifenberger 	if (segnum < 0 || segnum >= shmalloced)
191b618bb96SAlfred Perlstein 		return (NULL);
192491dec93SMichael Reifenberger 	shmseg = &shmsegs[segnum];
193921d05b9SRobert Watson 	if ((shmseg->u.shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
1942332251cSMax Khon 	    (!shm_allow_removed &&
195921d05b9SRobert Watson 	     (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) != 0))
196b618bb96SAlfred Perlstein 		return (NULL);
197b618bb96SAlfred Perlstein 	return (shmseg);
198491dec93SMichael Reifenberger }
199491dec93SMichael Reifenberger 
2003d903220SDoug Rabson static void
2013d903220SDoug Rabson shm_deallocate_segment(shmseg)
202921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
2033d903220SDoug Rabson {
2043d903220SDoug Rabson 	size_t size;
2053d903220SDoug Rabson 
2060cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
2070cddd8f0SMatthew Dillon 
208921d05b9SRobert Watson 	vm_object_deallocate(shmseg->u.shm_internal);
209921d05b9SRobert Watson 	shmseg->u.shm_internal = NULL;
210921d05b9SRobert Watson 	size = round_page(shmseg->u.shm_segsz);
2113d903220SDoug Rabson 	shm_committed -= btoc(size);
2123d903220SDoug Rabson 	shm_nused--;
213921d05b9SRobert Watson 	shmseg->u.shm_perm.mode = SHMSEG_FREE;
2143d903220SDoug Rabson }
2153d903220SDoug Rabson 
2163d903220SDoug Rabson static int
2173db161e0SMatthew Dillon shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
2183d903220SDoug Rabson {
219921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
2203d903220SDoug Rabson 	int segnum, result;
2213d903220SDoug Rabson 	size_t size;
2223d903220SDoug Rabson 
2230cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
224028f979dSDima Dorfman 
2253d903220SDoug Rabson 	segnum = IPCID_TO_IX(shmmap_s->shmid);
2263d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
227921d05b9SRobert Watson 	size = round_page(shmseg->u.shm_segsz);
2283db161e0SMatthew Dillon 	result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
2293d903220SDoug Rabson 	if (result != KERN_SUCCESS)
230b618bb96SAlfred Perlstein 		return (EINVAL);
2313d903220SDoug Rabson 	shmmap_s->shmid = -1;
232921d05b9SRobert Watson 	shmseg->u.shm_dtime = time_second;
233921d05b9SRobert Watson 	if ((--shmseg->u.shm_nattch <= 0) &&
234921d05b9SRobert Watson 	    (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) {
2353d903220SDoug Rabson 		shm_deallocate_segment(shmseg);
2363d903220SDoug Rabson 		shm_last_free = segnum;
2373d903220SDoug Rabson 	}
238b618bb96SAlfred Perlstein 	return (0);
2393d903220SDoug Rabson }
2403d903220SDoug Rabson 
241b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
2423d903220SDoug Rabson struct shmdt_args {
243e1d7d0bbSAlfred Perlstein 	const void *shmaddr;
2443d903220SDoug Rabson };
245b5d5c0c9SPeter Wemm #endif
246b5d5c0c9SPeter Wemm 
247b6a4b4f9SMatthew Dillon /*
248b6a4b4f9SMatthew Dillon  * MPSAFE
249b6a4b4f9SMatthew Dillon  */
2503d903220SDoug Rabson int
251b40ce416SJulian Elischer shmdt(td, uap)
252b40ce416SJulian Elischer 	struct thread *td;
2533d903220SDoug Rabson 	struct shmdt_args *uap;
2543d903220SDoug Rabson {
255b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
2563d903220SDoug Rabson 	struct shmmap_state *shmmap_s;
2573d903220SDoug Rabson 	int i;
258b6a4b4f9SMatthew Dillon 	int error = 0;
2593d903220SDoug Rabson 
260c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
261c6f55f33SJohn Baldwin 		return (ENOSYS);
262b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
2638209f090SAlfred Perlstein 	shmmap_s = p->p_vmspace->vm_shm;
264b6a4b4f9SMatthew Dillon  	if (shmmap_s == NULL) {
265b6a4b4f9SMatthew Dillon 		error = EINVAL;
266b6a4b4f9SMatthew Dillon 		goto done2;
267b6a4b4f9SMatthew Dillon 	}
268b6a4b4f9SMatthew Dillon 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
2693d903220SDoug Rabson 		if (shmmap_s->shmid != -1 &&
270b6a4b4f9SMatthew Dillon 		    shmmap_s->va == (vm_offset_t)uap->shmaddr) {
2713d903220SDoug Rabson 			break;
272b6a4b4f9SMatthew Dillon 		}
273b6a4b4f9SMatthew Dillon 	}
274b6a4b4f9SMatthew Dillon 	if (i == shminfo.shmseg) {
275b6a4b4f9SMatthew Dillon 		error = EINVAL;
276b6a4b4f9SMatthew Dillon 		goto done2;
277b6a4b4f9SMatthew Dillon 	}
2783db161e0SMatthew Dillon 	error = shm_delete_mapping(p->p_vmspace, shmmap_s);
279b6a4b4f9SMatthew Dillon done2:
280b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
281b6a4b4f9SMatthew Dillon 	return (error);
2823d903220SDoug Rabson }
2833d903220SDoug Rabson 
284b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
2853d903220SDoug Rabson struct shmat_args {
2863d903220SDoug Rabson 	int shmid;
287e1d7d0bbSAlfred Perlstein 	const void *shmaddr;
2883d903220SDoug Rabson 	int shmflg;
2893d903220SDoug Rabson };
290b5d5c0c9SPeter Wemm #endif
291b5d5c0c9SPeter Wemm 
292b6a4b4f9SMatthew Dillon /*
293b6a4b4f9SMatthew Dillon  * MPSAFE
294b6a4b4f9SMatthew Dillon  */
2953d903220SDoug Rabson int
2962332251cSMax Khon kern_shmat(td, shmid, shmaddr, shmflg)
297b40ce416SJulian Elischer 	struct thread *td;
298f130dcf2SMartin Blapp 	int shmid;
299f130dcf2SMartin Blapp 	const void *shmaddr;
300f130dcf2SMartin Blapp 	int shmflg;
3013d903220SDoug Rabson {
302b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
303b6a4b4f9SMatthew Dillon 	int i, flags;
304921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
3053d903220SDoug Rabson 	struct shmmap_state *shmmap_s = NULL;
3063d903220SDoug Rabson 	vm_offset_t attach_va;
3073d903220SDoug Rabson 	vm_prot_t prot;
3083d903220SDoug Rabson 	vm_size_t size;
309a51f7119SJohn Dyson 	int rv;
310b6a4b4f9SMatthew Dillon 	int error = 0;
3113d903220SDoug Rabson 
312c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
313c6f55f33SJohn Baldwin 		return (ENOSYS);
314b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
3158209f090SAlfred Perlstein 	shmmap_s = p->p_vmspace->vm_shm;
3163d903220SDoug Rabson 	if (shmmap_s == NULL) {
3173d903220SDoug Rabson 		size = shminfo.shmseg * sizeof(struct shmmap_state);
318a163d034SWarner Losh 		shmmap_s = malloc(size, M_SHM, M_WAITOK);
3193d903220SDoug Rabson 		for (i = 0; i < shminfo.shmseg; i++)
3203d903220SDoug Rabson 			shmmap_s[i].shmid = -1;
3212cc593fdSAlfred Perlstein 		p->p_vmspace->vm_shm = shmmap_s;
3223d903220SDoug Rabson 	}
3232332251cSMax Khon 	shmseg = shm_find_segment_by_shmid(shmid);
324b6a4b4f9SMatthew Dillon 	if (shmseg == NULL) {
325b6a4b4f9SMatthew Dillon 		error = EINVAL;
326b6a4b4f9SMatthew Dillon 		goto done2;
327b6a4b4f9SMatthew Dillon 	}
328921d05b9SRobert Watson 	error = ipcperm(td, &shmseg->u.shm_perm,
329f130dcf2SMartin Blapp 	    (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
330797f2d22SPoul-Henning Kamp 	if (error)
331b6a4b4f9SMatthew Dillon 		goto done2;
3323d903220SDoug Rabson 	for (i = 0; i < shminfo.shmseg; i++) {
3333d903220SDoug Rabson 		if (shmmap_s->shmid == -1)
3343d903220SDoug Rabson 			break;
3353d903220SDoug Rabson 		shmmap_s++;
3363d903220SDoug Rabson 	}
337b6a4b4f9SMatthew Dillon 	if (i >= shminfo.shmseg) {
338b6a4b4f9SMatthew Dillon 		error = EMFILE;
339b6a4b4f9SMatthew Dillon 		goto done2;
340b6a4b4f9SMatthew Dillon 	}
341921d05b9SRobert Watson 	size = round_page(shmseg->u.shm_segsz);
342af25d10cSAlan Cox #ifdef VM_PROT_READ_IS_EXEC
343af25d10cSAlan Cox 	prot = VM_PROT_READ | VM_PROT_EXECUTE;
344af25d10cSAlan Cox #else
3453d903220SDoug Rabson 	prot = VM_PROT_READ;
346af25d10cSAlan Cox #endif
347f130dcf2SMartin Blapp 	if ((shmflg & SHM_RDONLY) == 0)
3483d903220SDoug Rabson 		prot |= VM_PROT_WRITE;
3493d903220SDoug Rabson 	flags = MAP_ANON | MAP_SHARED;
350f130dcf2SMartin Blapp 	if (shmaddr) {
3513d903220SDoug Rabson 		flags |= MAP_FIXED;
352f130dcf2SMartin Blapp 		if (shmflg & SHM_RND) {
353f130dcf2SMartin Blapp 			attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
354f130dcf2SMartin Blapp 		} else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) {
355f130dcf2SMartin Blapp 			attach_va = (vm_offset_t)shmaddr;
356b6a4b4f9SMatthew Dillon 		} else {
357b6a4b4f9SMatthew Dillon 			error = EINVAL;
358b6a4b4f9SMatthew Dillon 			goto done2;
359b6a4b4f9SMatthew Dillon 		}
3603d903220SDoug Rabson 	} else {
361028f979dSDima Dorfman 		/*
362028f979dSDima Dorfman 		 * This is just a hint to vm_map_find() about where to
363028f979dSDima Dorfman 		 * put it.
364028f979dSDima Dorfman 		 */
36568ba7a1dSTim J. Robbins 		PROC_LOCK(p);
36668ba7a1dSTim J. Robbins 		attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
36768ba7a1dSTim J. Robbins 		    lim_max(p, RLIMIT_DATA));
36868ba7a1dSTim J. Robbins 		PROC_UNLOCK(p);
3693d903220SDoug Rabson 	}
370a51f7119SJohn Dyson 
371921d05b9SRobert Watson 	vm_object_reference(shmseg->u.shm_internal);
372921d05b9SRobert Watson 	rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->u.shm_internal,
373a51f7119SJohn Dyson 		0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
374a51f7119SJohn Dyson 	if (rv != KERN_SUCCESS) {
375921d05b9SRobert Watson 		vm_object_deallocate(shmseg->u.shm_internal);
376b6a4b4f9SMatthew Dillon 		error = ENOMEM;
377b6a4b4f9SMatthew Dillon 		goto done2;
378a51f7119SJohn Dyson 	}
3790463028cSJohn Dyson 	vm_map_inherit(&p->p_vmspace->vm_map,
3800463028cSJohn Dyson 		attach_va, attach_va + size, VM_INHERIT_SHARE);
3810463028cSJohn Dyson 
3823d903220SDoug Rabson 	shmmap_s->va = attach_va;
383f130dcf2SMartin Blapp 	shmmap_s->shmid = shmid;
384921d05b9SRobert Watson 	shmseg->u.shm_lpid = p->p_pid;
385921d05b9SRobert Watson 	shmseg->u.shm_atime = time_second;
386921d05b9SRobert Watson 	shmseg->u.shm_nattch++;
387b40ce416SJulian Elischer 	td->td_retval[0] = attach_va;
388b6a4b4f9SMatthew Dillon done2:
389b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
390b6a4b4f9SMatthew Dillon 	return (error);
3913d903220SDoug Rabson }
3923d903220SDoug Rabson 
393f130dcf2SMartin Blapp int
394f130dcf2SMartin Blapp shmat(td, uap)
395f130dcf2SMartin Blapp 	struct thread *td;
396f130dcf2SMartin Blapp 	struct shmat_args *uap;
397f130dcf2SMartin Blapp {
3982332251cSMax Khon 	return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg);
399f130dcf2SMartin Blapp }
400f130dcf2SMartin Blapp 
4018bec0921SDoug Rabson struct oshmid_ds {
4028bec0921SDoug Rabson 	struct	ipc_perm shm_perm;	/* operation perms */
4038bec0921SDoug Rabson 	int	shm_segsz;		/* size of segment (bytes) */
4048b149b51SJohn Baldwin 	u_short	shm_cpid;		/* pid, creator */
4058b149b51SJohn Baldwin 	u_short	shm_lpid;		/* pid, last operation */
4068bec0921SDoug Rabson 	short	shm_nattch;		/* no. of current attaches */
4078bec0921SDoug Rabson 	time_t	shm_atime;		/* last attach time */
4088bec0921SDoug Rabson 	time_t	shm_dtime;		/* last detach time */
4098bec0921SDoug Rabson 	time_t	shm_ctime;		/* last change time */
4108bec0921SDoug Rabson 	void	*shm_handle;		/* internal handle for shm segment */
4118bec0921SDoug Rabson };
4128bec0921SDoug Rabson 
4138bec0921SDoug Rabson struct oshmctl_args {
4148bec0921SDoug Rabson 	int shmid;
4158bec0921SDoug Rabson 	int cmd;
4168bec0921SDoug Rabson 	struct oshmid_ds *ubuf;
4178bec0921SDoug Rabson };
4188bec0921SDoug Rabson 
419b6a4b4f9SMatthew Dillon /*
420b6a4b4f9SMatthew Dillon  * MPSAFE
421b6a4b4f9SMatthew Dillon  */
42287b6de2bSPoul-Henning Kamp static int
423b40ce416SJulian Elischer oshmctl(td, uap)
424b40ce416SJulian Elischer 	struct thread *td;
4258bec0921SDoug Rabson 	struct oshmctl_args *uap;
4268bec0921SDoug Rabson {
4278bec0921SDoug Rabson #ifdef COMPAT_43
428b6a4b4f9SMatthew Dillon 	int error = 0;
429921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
4308bec0921SDoug Rabson 	struct oshmid_ds outbuf;
4318bec0921SDoug Rabson 
432c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
433c6f55f33SJohn Baldwin 		return (ENOSYS);
434b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
4352332251cSMax Khon 	shmseg = shm_find_segment_by_shmid(uap->shmid);
436b6a4b4f9SMatthew Dillon 	if (shmseg == NULL) {
437b6a4b4f9SMatthew Dillon 		error = EINVAL;
438b6a4b4f9SMatthew Dillon 		goto done2;
439b6a4b4f9SMatthew Dillon 	}
4408bec0921SDoug Rabson 	switch (uap->cmd) {
4418bec0921SDoug Rabson 	case IPC_STAT:
442921d05b9SRobert Watson 		error = ipcperm(td, &shmseg->u.shm_perm, IPC_R);
443797f2d22SPoul-Henning Kamp 		if (error)
444b6a4b4f9SMatthew Dillon 			goto done2;
445921d05b9SRobert Watson 		outbuf.shm_perm = shmseg->u.shm_perm;
446921d05b9SRobert Watson 		outbuf.shm_segsz = shmseg->u.shm_segsz;
447921d05b9SRobert Watson 		outbuf.shm_cpid = shmseg->u.shm_cpid;
448921d05b9SRobert Watson 		outbuf.shm_lpid = shmseg->u.shm_lpid;
449921d05b9SRobert Watson 		outbuf.shm_nattch = shmseg->u.shm_nattch;
450921d05b9SRobert Watson 		outbuf.shm_atime = shmseg->u.shm_atime;
451921d05b9SRobert Watson 		outbuf.shm_dtime = shmseg->u.shm_dtime;
452921d05b9SRobert Watson 		outbuf.shm_ctime = shmseg->u.shm_ctime;
453921d05b9SRobert Watson 		outbuf.shm_handle = shmseg->u.shm_internal;
4542cc593fdSAlfred Perlstein 		error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
455797f2d22SPoul-Henning Kamp 		if (error)
456b6a4b4f9SMatthew Dillon 			goto done2;
4578bec0921SDoug Rabson 		break;
4588bec0921SDoug Rabson 	default:
45900fbcda8SAlexander Kabaev 		error = shmctl(td, (struct shmctl_args *)uap);
460b6a4b4f9SMatthew Dillon 		break;
4618bec0921SDoug Rabson 	}
462b6a4b4f9SMatthew Dillon done2:
463b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
464b6a4b4f9SMatthew Dillon 	return (error);
4658bec0921SDoug Rabson #else
466b618bb96SAlfred Perlstein 	return (EINVAL);
4678bec0921SDoug Rabson #endif
4688bec0921SDoug Rabson }
4698bec0921SDoug Rabson 
470b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
4713d903220SDoug Rabson struct shmctl_args {
4723d903220SDoug Rabson 	int shmid;
4733d903220SDoug Rabson 	int cmd;
474b5d5c0c9SPeter Wemm 	struct shmid_ds *buf;
4753d903220SDoug Rabson };
476b5d5c0c9SPeter Wemm #endif
477b5d5c0c9SPeter Wemm 
478b6a4b4f9SMatthew Dillon /*
479b6a4b4f9SMatthew Dillon  * MPSAFE
480b6a4b4f9SMatthew Dillon  */
4813d903220SDoug Rabson int
4822332251cSMax Khon kern_shmctl(td, shmid, cmd, buf, bufsz)
483b40ce416SJulian Elischer 	struct thread *td;
484f130dcf2SMartin Blapp 	int shmid;
485f130dcf2SMartin Blapp 	int cmd;
486f130dcf2SMartin Blapp 	void *buf;
487f130dcf2SMartin Blapp 	size_t *bufsz;
4883d903220SDoug Rabson {
489b6a4b4f9SMatthew Dillon 	int error = 0;
490921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
4913d903220SDoug Rabson 
492c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
493c6f55f33SJohn Baldwin 		return (ENOSYS);
494f130dcf2SMartin Blapp 
495b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
496f130dcf2SMartin Blapp 	switch (cmd) {
497491dec93SMichael Reifenberger 	case IPC_INFO:
498f130dcf2SMartin Blapp 		memcpy(buf, &shminfo, sizeof(shminfo));
499f130dcf2SMartin Blapp 		if (bufsz)
500f130dcf2SMartin Blapp 			*bufsz = sizeof(shminfo);
501491dec93SMichael Reifenberger 		td->td_retval[0] = shmalloced;
502491dec93SMichael Reifenberger 		goto done2;
503491dec93SMichael Reifenberger 	case SHM_INFO: {
504491dec93SMichael Reifenberger 		struct shm_info shm_info;
505491dec93SMichael Reifenberger 		shm_info.used_ids = shm_nused;
506491dec93SMichael Reifenberger 		shm_info.shm_rss = 0;	/*XXX where to get from ? */
507491dec93SMichael Reifenberger 		shm_info.shm_tot = 0;	/*XXX where to get from ? */
508491dec93SMichael Reifenberger 		shm_info.shm_swp = 0;	/*XXX where to get from ? */
509491dec93SMichael Reifenberger 		shm_info.swap_attempts = 0;	/*XXX where to get from ? */
510491dec93SMichael Reifenberger 		shm_info.swap_successes = 0;	/*XXX where to get from ? */
511f130dcf2SMartin Blapp 		memcpy(buf, &shm_info, sizeof(shm_info));
512f130dcf2SMartin Blapp 		if (bufsz)
513f130dcf2SMartin Blapp 			*bufsz = sizeof(shm_info);
514491dec93SMichael Reifenberger 		td->td_retval[0] = shmalloced;
515491dec93SMichael Reifenberger 		goto done2;
516491dec93SMichael Reifenberger 	}
517491dec93SMichael Reifenberger 	}
518f130dcf2SMartin Blapp 	if (cmd == SHM_STAT)
5192332251cSMax Khon 		shmseg = shm_find_segment_by_shmidx(shmid);
520491dec93SMichael Reifenberger 	else
5212332251cSMax Khon 		shmseg = shm_find_segment_by_shmid(shmid);
522b6a4b4f9SMatthew Dillon 	if (shmseg == NULL) {
523b6a4b4f9SMatthew Dillon 		error = EINVAL;
524b6a4b4f9SMatthew Dillon 		goto done2;
525b6a4b4f9SMatthew Dillon 	}
526f130dcf2SMartin Blapp 	switch (cmd) {
527491dec93SMichael Reifenberger 	case SHM_STAT:
5283d903220SDoug Rabson 	case IPC_STAT:
529921d05b9SRobert Watson 		error = ipcperm(td, &shmseg->u.shm_perm, IPC_R);
530797f2d22SPoul-Henning Kamp 		if (error)
531b6a4b4f9SMatthew Dillon 			goto done2;
532921d05b9SRobert Watson 		memcpy(buf, &shmseg->u, sizeof(struct shmid_ds));
533f130dcf2SMartin Blapp 		if (bufsz)
534f130dcf2SMartin Blapp 			*bufsz = sizeof(struct shmid_ds);
535f130dcf2SMartin Blapp 		if (cmd == SHM_STAT)
536921d05b9SRobert Watson 			td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->u.shm_perm);
5373d903220SDoug Rabson 		break;
538f130dcf2SMartin Blapp 	case IPC_SET: {
539f130dcf2SMartin Blapp 		struct shmid_ds *shmid;
540f130dcf2SMartin Blapp 
541f130dcf2SMartin Blapp 		shmid = (struct shmid_ds *)buf;
542921d05b9SRobert Watson 		error = ipcperm(td, &shmseg->u.shm_perm, IPC_M);
543797f2d22SPoul-Henning Kamp 		if (error)
544b6a4b4f9SMatthew Dillon 			goto done2;
545921d05b9SRobert Watson 		shmseg->u.shm_perm.uid = shmid->shm_perm.uid;
546921d05b9SRobert Watson 		shmseg->u.shm_perm.gid = shmid->shm_perm.gid;
547921d05b9SRobert Watson 		shmseg->u.shm_perm.mode =
548921d05b9SRobert Watson 		    (shmseg->u.shm_perm.mode & ~ACCESSPERMS) |
549f130dcf2SMartin Blapp 		    (shmid->shm_perm.mode & ACCESSPERMS);
550921d05b9SRobert Watson 		shmseg->u.shm_ctime = time_second;
5513d903220SDoug Rabson 		break;
552f130dcf2SMartin Blapp 	}
5533d903220SDoug Rabson 	case IPC_RMID:
554921d05b9SRobert Watson 		error = ipcperm(td, &shmseg->u.shm_perm, IPC_M);
555797f2d22SPoul-Henning Kamp 		if (error)
556b6a4b4f9SMatthew Dillon 			goto done2;
557921d05b9SRobert Watson 		shmseg->u.shm_perm.key = IPC_PRIVATE;
558921d05b9SRobert Watson 		shmseg->u.shm_perm.mode |= SHMSEG_REMOVED;
559921d05b9SRobert Watson 		if (shmseg->u.shm_nattch <= 0) {
5603d903220SDoug Rabson 			shm_deallocate_segment(shmseg);
561f130dcf2SMartin Blapp 			shm_last_free = IPCID_TO_IX(shmid);
5623d903220SDoug Rabson 		}
5633d903220SDoug Rabson 		break;
5643d903220SDoug Rabson #if 0
5653d903220SDoug Rabson 	case SHM_LOCK:
5663d903220SDoug Rabson 	case SHM_UNLOCK:
5673d903220SDoug Rabson #endif
5683d903220SDoug Rabson 	default:
569b6a4b4f9SMatthew Dillon 		error = EINVAL;
570b6a4b4f9SMatthew Dillon 		break;
5713d903220SDoug Rabson 	}
572b6a4b4f9SMatthew Dillon done2:
573b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
574b6a4b4f9SMatthew Dillon 	return (error);
5753d903220SDoug Rabson }
5763d903220SDoug Rabson 
577f130dcf2SMartin Blapp int
578f130dcf2SMartin Blapp shmctl(td, uap)
579f130dcf2SMartin Blapp 	struct thread *td;
580f130dcf2SMartin Blapp 	struct shmctl_args *uap;
581f130dcf2SMartin Blapp {
582f130dcf2SMartin Blapp 	int error = 0;
583f130dcf2SMartin Blapp 	struct shmid_ds buf;
584f130dcf2SMartin Blapp 	size_t bufsz;
585f130dcf2SMartin Blapp 
586f130dcf2SMartin Blapp 	/* IPC_SET needs to copyin the buffer before calling kern_shmctl */
587f130dcf2SMartin Blapp 	if (uap->cmd == IPC_SET) {
588f130dcf2SMartin Blapp 		if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
589f130dcf2SMartin Blapp 			goto done;
590f130dcf2SMartin Blapp 	}
591f130dcf2SMartin Blapp 
5922332251cSMax Khon 	error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
593f130dcf2SMartin Blapp 	if (error)
594f130dcf2SMartin Blapp 		goto done;
595f130dcf2SMartin Blapp 
596f130dcf2SMartin Blapp 	/* Cases in which we need to copyout */
597f130dcf2SMartin Blapp 	switch (uap->cmd) {
598f130dcf2SMartin Blapp 	case IPC_INFO:
599f130dcf2SMartin Blapp 	case SHM_INFO:
600f130dcf2SMartin Blapp 	case SHM_STAT:
601f130dcf2SMartin Blapp 	case IPC_STAT:
602f130dcf2SMartin Blapp 		error = copyout(&buf, uap->buf, bufsz);
603f130dcf2SMartin Blapp 		break;
604f130dcf2SMartin Blapp 	}
605f130dcf2SMartin Blapp 
606f130dcf2SMartin Blapp done:
607f130dcf2SMartin Blapp 	if (error) {
608f130dcf2SMartin Blapp 		/* Invalidate the return value */
609f130dcf2SMartin Blapp 		td->td_retval[0] = -1;
610f130dcf2SMartin Blapp 	}
611f130dcf2SMartin Blapp 	return (error);
612f130dcf2SMartin Blapp }
613f130dcf2SMartin Blapp 
614f130dcf2SMartin Blapp 
615b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
6163d903220SDoug Rabson struct shmget_args {
6173d903220SDoug Rabson 	key_t key;
6183d903220SDoug Rabson 	size_t size;
6193d903220SDoug Rabson 	int shmflg;
6203d903220SDoug Rabson };
621b5d5c0c9SPeter Wemm #endif
622b5d5c0c9SPeter Wemm 
6233d903220SDoug Rabson static int
624b40ce416SJulian Elischer shmget_existing(td, uap, mode, segnum)
625b40ce416SJulian Elischer 	struct thread *td;
6263d903220SDoug Rabson 	struct shmget_args *uap;
6273d903220SDoug Rabson 	int mode;
6283d903220SDoug Rabson 	int segnum;
6293d903220SDoug Rabson {
630921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
6313d903220SDoug Rabson 	int error;
6323d903220SDoug Rabson 
6333d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
634921d05b9SRobert Watson 	if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) {
6353d903220SDoug Rabson 		/*
6363d903220SDoug Rabson 		 * This segment is in the process of being allocated.  Wait
6373d903220SDoug Rabson 		 * until it's done, and look the key up again (in case the
6383d903220SDoug Rabson 		 * allocation failed or it was freed).
6393d903220SDoug Rabson 		 */
640921d05b9SRobert Watson 		shmseg->u.shm_perm.mode |= SHMSEG_WANTED;
6412cc593fdSAlfred Perlstein 		error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
642797f2d22SPoul-Henning Kamp 		if (error)
643b618bb96SAlfred Perlstein 			return (error);
644b618bb96SAlfred Perlstein 		return (EAGAIN);
6453d903220SDoug Rabson 	}
646dc92aa57SAlan Cox 	if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
647b618bb96SAlfred Perlstein 		return (EEXIST);
648921d05b9SRobert Watson 	error = ipcperm(td, &shmseg->u.shm_perm, mode);
649797f2d22SPoul-Henning Kamp 	if (error)
650b618bb96SAlfred Perlstein 		return (error);
651921d05b9SRobert Watson 	if (uap->size && uap->size > shmseg->u.shm_segsz)
652b618bb96SAlfred Perlstein 		return (EINVAL);
653921d05b9SRobert Watson 	td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
654b618bb96SAlfred Perlstein 	return (0);
6553d903220SDoug Rabson }
6563d903220SDoug Rabson 
6573d903220SDoug Rabson static int
658b40ce416SJulian Elischer shmget_allocate_segment(td, uap, mode)
659b40ce416SJulian Elischer 	struct thread *td;
6603d903220SDoug Rabson 	struct shmget_args *uap;
6613d903220SDoug Rabson 	int mode;
6623d903220SDoug Rabson {
663a51f7119SJohn Dyson 	int i, segnum, shmid, size;
664a854ed98SJohn Baldwin 	struct ucred *cred = td->td_ucred;
665921d05b9SRobert Watson 	struct shmid_kernel *shmseg;
6660049f8b2SAlan Cox 	vm_object_t shm_object;
6673d903220SDoug Rabson 
6680cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
6690cddd8f0SMatthew Dillon 
6703d903220SDoug Rabson 	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
671b618bb96SAlfred Perlstein 		return (EINVAL);
672028f979dSDima Dorfman 	if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
673b618bb96SAlfred Perlstein 		return (ENOSPC);
6749e609ddeSJoerg Wunsch 	size = round_page(uap->size);
6753d903220SDoug Rabson 	if (shm_committed + btoc(size) > shminfo.shmall)
676b618bb96SAlfred Perlstein 		return (ENOMEM);
6773d903220SDoug Rabson 	if (shm_last_free < 0) {
678028f979dSDima Dorfman 		shmrealloc();	/* Maybe expand the shmsegs[] array. */
679255108f3SPeter Wemm 		for (i = 0; i < shmalloced; i++)
680921d05b9SRobert Watson 			if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE)
6813d903220SDoug Rabson 				break;
682255108f3SPeter Wemm 		if (i == shmalloced)
683b618bb96SAlfred Perlstein 			return (ENOSPC);
6843d903220SDoug Rabson 		segnum = i;
6853d903220SDoug Rabson 	} else  {
6863d903220SDoug Rabson 		segnum = shm_last_free;
6873d903220SDoug Rabson 		shm_last_free = -1;
6883d903220SDoug Rabson 	}
6893d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
6903d903220SDoug Rabson 	/*
6913d903220SDoug Rabson 	 * In case we sleep in malloc(), mark the segment present but deleted
6923d903220SDoug Rabson 	 * so that noone else tries to create the same key.
6933d903220SDoug Rabson 	 */
694921d05b9SRobert Watson 	shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
695921d05b9SRobert Watson 	shmseg->u.shm_perm.key = uap->key;
696921d05b9SRobert Watson 	shmseg->u.shm_perm.seq = (shmseg->u.shm_perm.seq + 1) & 0x7fff;
697921d05b9SRobert Watson 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
698a51f7119SJohn Dyson 
699ae9b8c3aSJohn Dyson 	/*
700ae9b8c3aSJohn Dyson 	 * We make sure that we have allocated a pager before we need
701ae9b8c3aSJohn Dyson 	 * to.
702ae9b8c3aSJohn Dyson 	 */
7038b03c8edSMatthew Dillon 	if (shm_use_phys) {
7040049f8b2SAlan Cox 		shm_object =
70524488c74SPeter Wemm 		    vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
7068b03c8edSMatthew Dillon 	} else {
7070049f8b2SAlan Cox 		shm_object =
7086cde7a16SDavid Greenman 		    vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
7098b03c8edSMatthew Dillon 	}
7100049f8b2SAlan Cox 	VM_OBJECT_LOCK(shm_object);
7110049f8b2SAlan Cox 	vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
7120049f8b2SAlan Cox 	vm_object_set_flag(shm_object, OBJ_NOSPLIT);
7130049f8b2SAlan Cox 	VM_OBJECT_UNLOCK(shm_object);
714cbd8ec09SJohn Dyson 
715921d05b9SRobert Watson 	shmseg->u.shm_internal = shm_object;
716921d05b9SRobert Watson 	shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = cred->cr_uid;
717921d05b9SRobert Watson 	shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = cred->cr_gid;
718921d05b9SRobert Watson 	shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) |
7193d903220SDoug Rabson 	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
720921d05b9SRobert Watson 	shmseg->u.shm_segsz = uap->size;
721921d05b9SRobert Watson 	shmseg->u.shm_cpid = td->td_proc->p_pid;
722921d05b9SRobert Watson 	shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0;
723921d05b9SRobert Watson 	shmseg->u.shm_atime = shmseg->u.shm_dtime = 0;
724921d05b9SRobert Watson 	shmseg->u.shm_ctime = time_second;
7253d903220SDoug Rabson 	shm_committed += btoc(size);
7263d903220SDoug Rabson 	shm_nused++;
727921d05b9SRobert Watson 	if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) {
7283d903220SDoug Rabson 		/*
7293d903220SDoug Rabson 		 * Somebody else wanted this key while we were asleep.  Wake
7303d903220SDoug Rabson 		 * them up now.
7313d903220SDoug Rabson 		 */
732921d05b9SRobert Watson 		shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED;
7332cc593fdSAlfred Perlstein 		wakeup(shmseg);
7343d903220SDoug Rabson 	}
735b40ce416SJulian Elischer 	td->td_retval[0] = shmid;
736b618bb96SAlfred Perlstein 	return (0);
7373d903220SDoug Rabson }
7383d903220SDoug Rabson 
739b6a4b4f9SMatthew Dillon /*
740b6a4b4f9SMatthew Dillon  * MPSAFE
741b6a4b4f9SMatthew Dillon  */
7423d903220SDoug Rabson int
743b40ce416SJulian Elischer shmget(td, uap)
744b40ce416SJulian Elischer 	struct thread *td;
7453d903220SDoug Rabson 	struct shmget_args *uap;
7463d903220SDoug Rabson {
747b6a4b4f9SMatthew Dillon 	int segnum, mode;
748b6a4b4f9SMatthew Dillon 	int error;
7493d903220SDoug Rabson 
750c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
751c6f55f33SJohn Baldwin 		return (ENOSYS);
752b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
7533d903220SDoug Rabson 	mode = uap->shmflg & ACCESSPERMS;
7543d903220SDoug Rabson 	if (uap->key != IPC_PRIVATE) {
7553d903220SDoug Rabson 	again:
7563d903220SDoug Rabson 		segnum = shm_find_segment_by_key(uap->key);
7573d903220SDoug Rabson 		if (segnum >= 0) {
758b40ce416SJulian Elischer 			error = shmget_existing(td, uap, mode, segnum);
7593d903220SDoug Rabson 			if (error == EAGAIN)
7603d903220SDoug Rabson 				goto again;
761b6a4b4f9SMatthew Dillon 			goto done2;
7623d903220SDoug Rabson 		}
763b6a4b4f9SMatthew Dillon 		if ((uap->shmflg & IPC_CREAT) == 0) {
764b6a4b4f9SMatthew Dillon 			error = ENOENT;
765b6a4b4f9SMatthew Dillon 			goto done2;
7663d903220SDoug Rabson 		}
767b6a4b4f9SMatthew Dillon 	}
768b40ce416SJulian Elischer 	error = shmget_allocate_segment(td, uap, mode);
769b6a4b4f9SMatthew Dillon done2:
770b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
771b6a4b4f9SMatthew Dillon 	return (error);
7723d903220SDoug Rabson }
7733d903220SDoug Rabson 
774b6a4b4f9SMatthew Dillon /*
775b6a4b4f9SMatthew Dillon  * MPSAFE
776b6a4b4f9SMatthew Dillon  */
7773d903220SDoug Rabson int
778b40ce416SJulian Elischer shmsys(td, uap)
779b40ce416SJulian Elischer 	struct thread *td;
780725db531SBruce Evans 	/* XXX actually varargs. */
781725db531SBruce Evans 	struct shmsys_args /* {
78201b9dc96SJacques Vidrine 		int	which;
783725db531SBruce Evans 		int	a2;
784725db531SBruce Evans 		int	a3;
785725db531SBruce Evans 		int	a4;
786725db531SBruce Evans 	} */ *uap;
7873d903220SDoug Rabson {
788b6a4b4f9SMatthew Dillon 	int error;
7893d903220SDoug Rabson 
790c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
791c6f55f33SJohn Baldwin 		return (ENOSYS);
79201b9dc96SJacques Vidrine 	if (uap->which < 0 ||
79301b9dc96SJacques Vidrine 	    uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
794c6f55f33SJohn Baldwin 		return (EINVAL);
795b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
796b40ce416SJulian Elischer 	error = (*shmcalls[uap->which])(td, &uap->a2);
797b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
798b6a4b4f9SMatthew Dillon 	return (error);
7993d903220SDoug Rabson }
8003d903220SDoug Rabson 
80178525ce3SAlfred Perlstein static void
80278525ce3SAlfred Perlstein shmfork_myhook(p1, p2)
8033d903220SDoug Rabson 	struct proc *p1, *p2;
8043d903220SDoug Rabson {
8053d903220SDoug Rabson 	struct shmmap_state *shmmap_s;
8063d903220SDoug Rabson 	size_t size;
8073d903220SDoug Rabson 	int i;
8083d903220SDoug Rabson 
80994ddc707SAlan Cox 	mtx_lock(&Giant);
8103d903220SDoug Rabson 	size = shminfo.shmseg * sizeof(struct shmmap_state);
811a163d034SWarner Losh 	shmmap_s = malloc(size, M_SHM, M_WAITOK);
8122cc593fdSAlfred Perlstein 	bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
8132cc593fdSAlfred Perlstein 	p2->p_vmspace->vm_shm = shmmap_s;
8143d903220SDoug Rabson 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
8153d903220SDoug Rabson 		if (shmmap_s->shmid != -1)
816921d05b9SRobert Watson 			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++;
81794ddc707SAlan Cox 	mtx_unlock(&Giant);
8183d903220SDoug Rabson }
8193d903220SDoug Rabson 
82078525ce3SAlfred Perlstein static void
8213db161e0SMatthew Dillon shmexit_myhook(struct vmspace *vm)
8223d903220SDoug Rabson {
8233db161e0SMatthew Dillon 	struct shmmap_state *base, *shm;
8243d903220SDoug Rabson 	int i;
8253d903220SDoug Rabson 
8263db161e0SMatthew Dillon 	if ((base = vm->vm_shm) != NULL) {
8273db161e0SMatthew Dillon 		vm->vm_shm = NULL;
8281a276a3fSAlan Cox 		mtx_lock(&Giant);
8293db161e0SMatthew Dillon 		for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
8303db161e0SMatthew Dillon 			if (shm->shmid != -1)
8313db161e0SMatthew Dillon 				shm_delete_mapping(vm, shm);
8323db161e0SMatthew Dillon 		}
8331a276a3fSAlan Cox 		mtx_unlock(&Giant);
8343db161e0SMatthew Dillon 		free(base, M_SHM);
8353db161e0SMatthew Dillon 	}
8363d903220SDoug Rabson }
8373d903220SDoug Rabson 
838255108f3SPeter Wemm static void
839255108f3SPeter Wemm shmrealloc(void)
840255108f3SPeter Wemm {
841255108f3SPeter Wemm 	int i;
842921d05b9SRobert Watson 	struct shmid_kernel *newsegs;
843255108f3SPeter Wemm 
844255108f3SPeter Wemm 	if (shmalloced >= shminfo.shmmni)
845255108f3SPeter Wemm 		return;
846255108f3SPeter Wemm 
847a163d034SWarner Losh 	newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
848255108f3SPeter Wemm 	if (newsegs == NULL)
849255108f3SPeter Wemm 		return;
850255108f3SPeter Wemm 	for (i = 0; i < shmalloced; i++)
851255108f3SPeter Wemm 		bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
852255108f3SPeter Wemm 	for (; i < shminfo.shmmni; i++) {
853921d05b9SRobert Watson 		shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
854921d05b9SRobert Watson 		shmsegs[i].u.shm_perm.seq = 0;
855255108f3SPeter Wemm 	}
856255108f3SPeter Wemm 	free(shmsegs, M_SHM);
857255108f3SPeter Wemm 	shmsegs = newsegs;
858255108f3SPeter Wemm 	shmalloced = shminfo.shmmni;
859255108f3SPeter Wemm }
860255108f3SPeter Wemm 
861255108f3SPeter Wemm static void
86278525ce3SAlfred Perlstein shminit()
8633d903220SDoug Rabson {
8643d903220SDoug Rabson 	int i;
865255108f3SPeter Wemm 
866b3a4bc42SMichael Reifenberger 	TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
8679d4156aeSAlfred Perlstein 	for (i = PAGE_SIZE; i > 0; i--) {
868b3a4bc42SMichael Reifenberger 		shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
8695015c68aSAlfred Perlstein 		if (shminfo.shmmax >= shminfo.shmall)
8705015c68aSAlfred Perlstein 			break;
8715015c68aSAlfred Perlstein 	}
872b3a4bc42SMichael Reifenberger 	TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
873b3a4bc42SMichael Reifenberger 	TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
874b3a4bc42SMichael Reifenberger 	TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
875b3a4bc42SMichael Reifenberger 	TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys);
876b3a4bc42SMichael Reifenberger 
877255108f3SPeter Wemm 	shmalloced = shminfo.shmmni;
878a163d034SWarner Losh 	shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
879255108f3SPeter Wemm 	if (shmsegs == NULL)
880255108f3SPeter Wemm 		panic("cannot allocate initial memory for sysvshm");
881255108f3SPeter Wemm 	for (i = 0; i < shmalloced; i++) {
882921d05b9SRobert Watson 		shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
883921d05b9SRobert Watson 		shmsegs[i].u.shm_perm.seq = 0;
8843d903220SDoug Rabson 	}
8853d903220SDoug Rabson 	shm_last_free = 0;
8863d903220SDoug Rabson 	shm_nused = 0;
8873d903220SDoug Rabson 	shm_committed = 0;
88878525ce3SAlfred Perlstein 	shmexit_hook = &shmexit_myhook;
88978525ce3SAlfred Perlstein 	shmfork_hook = &shmfork_myhook;
8903d903220SDoug Rabson }
89178525ce3SAlfred Perlstein 
89278525ce3SAlfred Perlstein static int
89378525ce3SAlfred Perlstein shmunload()
89478525ce3SAlfred Perlstein {
89578525ce3SAlfred Perlstein 
89678525ce3SAlfred Perlstein 	if (shm_nused > 0)
89778525ce3SAlfred Perlstein 		return (EBUSY);
89878525ce3SAlfred Perlstein 
89978525ce3SAlfred Perlstein 	free(shmsegs, M_SHM);
90078525ce3SAlfred Perlstein 	shmexit_hook = NULL;
90178525ce3SAlfred Perlstein 	shmfork_hook = NULL;
90278525ce3SAlfred Perlstein 	return (0);
90378525ce3SAlfred Perlstein }
90478525ce3SAlfred Perlstein 
90578525ce3SAlfred Perlstein static int
906a723c4e1SDima Dorfman sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
907a723c4e1SDima Dorfman {
908a723c4e1SDima Dorfman 
909a723c4e1SDima Dorfman 	return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
910a723c4e1SDima Dorfman }
911a723c4e1SDima Dorfman 
912a723c4e1SDima Dorfman static int
91378525ce3SAlfred Perlstein sysvshm_modload(struct module *module, int cmd, void *arg)
91478525ce3SAlfred Perlstein {
91578525ce3SAlfred Perlstein 	int error = 0;
91678525ce3SAlfred Perlstein 
91778525ce3SAlfred Perlstein 	switch (cmd) {
91878525ce3SAlfred Perlstein 	case MOD_LOAD:
91978525ce3SAlfred Perlstein 		shminit();
92078525ce3SAlfred Perlstein 		break;
92178525ce3SAlfred Perlstein 	case MOD_UNLOAD:
92278525ce3SAlfred Perlstein 		error = shmunload();
92378525ce3SAlfred Perlstein 		break;
92478525ce3SAlfred Perlstein 	case MOD_SHUTDOWN:
92578525ce3SAlfred Perlstein 		break;
92678525ce3SAlfred Perlstein 	default:
92778525ce3SAlfred Perlstein 		error = EINVAL;
92878525ce3SAlfred Perlstein 		break;
92978525ce3SAlfred Perlstein 	}
93078525ce3SAlfred Perlstein 	return (error);
93178525ce3SAlfred Perlstein }
93278525ce3SAlfred Perlstein 
933faa784b7SDag-Erling Smørgrav static moduledata_t sysvshm_mod = {
934faa784b7SDag-Erling Smørgrav 	"sysvshm",
93578525ce3SAlfred Perlstein 	&sysvshm_modload,
93678525ce3SAlfred Perlstein 	NULL
93778525ce3SAlfred Perlstein };
93878525ce3SAlfred Perlstein 
93921d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmsys);
94021d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmat);
94121d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmctl);
94221d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmdt);
94321d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmget);
94478525ce3SAlfred Perlstein 
945faa784b7SDag-Erling Smørgrav DECLARE_MODULE(sysvshm, sysvshm_mod,
94678525ce3SAlfred Perlstein 	SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
947faa784b7SDag-Erling Smørgrav MODULE_VERSION(sysvshm, 1);
948