xref: /freebsd/sys/kern/sysv_shm.c (revision 0049f8b27b056b2a674fb838e13cd6e86f2a48e1)
13d903220SDoug Rabson /*	$NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $	*/
23d903220SDoug Rabson /*
33d903220SDoug Rabson  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
43d903220SDoug Rabson  *
53d903220SDoug Rabson  * Redistribution and use in source and binary forms, with or without
63d903220SDoug Rabson  * modification, are permitted provided that the following conditions
73d903220SDoug Rabson  * are met:
83d903220SDoug Rabson  * 1. Redistributions of source code must retain the above copyright
93d903220SDoug Rabson  *    notice, this list of conditions and the following disclaimer.
103d903220SDoug Rabson  * 2. Redistributions in binary form must reproduce the above copyright
113d903220SDoug Rabson  *    notice, this list of conditions and the following disclaimer in the
123d903220SDoug Rabson  *    documentation and/or other materials provided with the distribution.
133d903220SDoug Rabson  * 3. All advertising materials mentioning features or use of this software
143d903220SDoug Rabson  *    must display the following acknowledgement:
153d903220SDoug Rabson  *	This product includes software developed by Adam Glass and Charles
163d903220SDoug Rabson  *	Hannum.
173d903220SDoug Rabson  * 4. The names of the authors may not be used to endorse or promote products
183d903220SDoug Rabson  *    derived from this software without specific prior written permission.
193d903220SDoug Rabson  *
203d903220SDoug Rabson  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
213d903220SDoug Rabson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
223d903220SDoug Rabson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
233d903220SDoug Rabson  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
243d903220SDoug Rabson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
253d903220SDoug Rabson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
263d903220SDoug Rabson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
273d903220SDoug Rabson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
283d903220SDoug Rabson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
293d903220SDoug Rabson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
303d903220SDoug Rabson  */
313d903220SDoug Rabson 
32677b542eSDavid E. O'Brien #include <sys/cdefs.h>
33677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
34677b542eSDavid E. O'Brien 
355591b823SEivind Eklund #include "opt_compat.h"
36255108f3SPeter Wemm #include "opt_sysvipc.h"
37511b67b7SGarrett Wollman 
383d903220SDoug Rabson #include <sys/param.h>
39725db531SBruce Evans #include <sys/systm.h>
403d903220SDoug Rabson #include <sys/kernel.h>
41fb919e4dSMark Murray #include <sys/lock.h>
42255108f3SPeter Wemm #include <sys/sysctl.h>
433d903220SDoug Rabson #include <sys/shm.h>
443d903220SDoug Rabson #include <sys/proc.h>
453d903220SDoug Rabson #include <sys/malloc.h>
463d903220SDoug Rabson #include <sys/mman.h>
4777409fe1SPoul-Henning Kamp #include <sys/module.h>
489dceb26bSJohn Baldwin #include <sys/mutex.h>
4968ba7a1dSTim J. Robbins #include <sys/resourcevar.h>
503d903220SDoug Rabson #include <sys/stat.h>
5178525ce3SAlfred Perlstein #include <sys/syscall.h>
52f130dcf2SMartin Blapp #include <sys/syscallsubr.h>
53725db531SBruce Evans #include <sys/sysent.h>
54fb919e4dSMark Murray #include <sys/sysproto.h>
55cb1f0db9SRobert Watson #include <sys/jail.h>
563d903220SDoug Rabson 
573d903220SDoug Rabson #include <vm/vm.h>
58efeaf95aSDavid Greenman #include <vm/vm_param.h>
59efeaf95aSDavid Greenman #include <vm/pmap.h>
60a51f7119SJohn Dyson #include <vm/vm_object.h>
613d903220SDoug Rabson #include <vm/vm_map.h>
621c7c3c6aSMatthew Dillon #include <vm/vm_page.h>
63ae9b8c3aSJohn Dyson #include <vm/vm_pager.h>
643d903220SDoug Rabson 
65a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
6655166637SPoul-Henning Kamp 
67725db531SBruce Evans struct oshmctl_args;
684d77a549SAlfred Perlstein static int oshmctl(struct thread *td, struct oshmctl_args *uap);
69255108f3SPeter Wemm 
704d77a549SAlfred Perlstein static int shmget_allocate_segment(struct thread *td,
714d77a549SAlfred Perlstein     struct shmget_args *uap, int mode);
724d77a549SAlfred Perlstein static int shmget_existing(struct thread *td, struct shmget_args *uap,
734d77a549SAlfred Perlstein     int mode, int segnum);
74725db531SBruce Evans 
75725db531SBruce Evans /* XXX casting to (sy_call_t *) is bogus, as usual. */
76303b270bSEivind Eklund static sy_call_t *shmcalls[] = {
77725db531SBruce Evans 	(sy_call_t *)shmat, (sy_call_t *)oshmctl,
78725db531SBruce Evans 	(sy_call_t *)shmdt, (sy_call_t *)shmget,
79725db531SBruce Evans 	(sy_call_t *)shmctl
80725db531SBruce Evans };
813d903220SDoug Rabson 
823d903220SDoug Rabson #define	SHMSEG_FREE     	0x0200
833d903220SDoug Rabson #define	SHMSEG_REMOVED  	0x0400
843d903220SDoug Rabson #define	SHMSEG_ALLOCATED	0x0800
853d903220SDoug Rabson #define	SHMSEG_WANTED		0x1000
863d903220SDoug Rabson 
87255108f3SPeter Wemm static int shm_last_free, shm_nused, shm_committed, shmalloced;
88255108f3SPeter Wemm static struct shmid_ds	*shmsegs;
893d903220SDoug Rabson 
903d903220SDoug Rabson struct shmmap_state {
913d903220SDoug Rabson 	vm_offset_t va;
923d903220SDoug Rabson 	int shmid;
933d903220SDoug Rabson };
943d903220SDoug Rabson 
954d77a549SAlfred Perlstein static void shm_deallocate_segment(struct shmid_ds *);
964d77a549SAlfred Perlstein static int shm_find_segment_by_key(key_t);
972332251cSMax Khon static struct shmid_ds *shm_find_segment_by_shmid(int);
982332251cSMax Khon static struct shmid_ds *shm_find_segment_by_shmidx(int);
993db161e0SMatthew Dillon static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *);
1004d77a549SAlfred Perlstein static void shmrealloc(void);
1014d77a549SAlfred Perlstein static void shminit(void);
1024d77a549SAlfred Perlstein static int sysvshm_modload(struct module *, int, void *);
1034d77a549SAlfred Perlstein static int shmunload(void);
1043db161e0SMatthew Dillon static void shmexit_myhook(struct vmspace *vm);
1054d77a549SAlfred Perlstein static void shmfork_myhook(struct proc *p1, struct proc *p2);
1064d77a549SAlfred Perlstein static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS);
107255108f3SPeter Wemm 
108255108f3SPeter Wemm /*
109028f979dSDima Dorfman  * Tuneable values.
110255108f3SPeter Wemm  */
111255108f3SPeter Wemm #ifndef SHMMAXPGS
112028f979dSDima Dorfman #define	SHMMAXPGS	8192	/* Note: sysv shared memory is swap backed. */
113255108f3SPeter Wemm #endif
114255108f3SPeter Wemm #ifndef SHMMAX
115255108f3SPeter Wemm #define	SHMMAX	(SHMMAXPGS*PAGE_SIZE)
116255108f3SPeter Wemm #endif
117255108f3SPeter Wemm #ifndef SHMMIN
118255108f3SPeter Wemm #define	SHMMIN	1
119255108f3SPeter Wemm #endif
120255108f3SPeter Wemm #ifndef SHMMNI
1211766b2e5SMatthew Dillon #define	SHMMNI	192
122255108f3SPeter Wemm #endif
123255108f3SPeter Wemm #ifndef SHMSEG
1241766b2e5SMatthew Dillon #define	SHMSEG	128
125255108f3SPeter Wemm #endif
126255108f3SPeter Wemm #ifndef SHMALL
127255108f3SPeter Wemm #define	SHMALL	(SHMMAXPGS)
128255108f3SPeter Wemm #endif
129255108f3SPeter Wemm 
130255108f3SPeter Wemm struct	shminfo shminfo = {
131255108f3SPeter Wemm 	SHMMAX,
132255108f3SPeter Wemm 	SHMMIN,
133255108f3SPeter Wemm 	SHMMNI,
134255108f3SPeter Wemm 	SHMSEG,
135255108f3SPeter Wemm 	SHMALL
136255108f3SPeter Wemm };
137255108f3SPeter Wemm 
1388b03c8edSMatthew Dillon static int shm_use_phys;
1392332251cSMax Khon static int shm_allow_removed;
1408b03c8edSMatthew Dillon 
141255108f3SPeter Wemm SYSCTL_DECL(_kern_ipc);
142255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
143255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
144184dcdc7SMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, "");
145184dcdc7SMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, "");
146255108f3SPeter Wemm SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
147028f979dSDima Dorfman SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
148028f979dSDima Dorfman     &shm_use_phys, 0, "");
1492332251cSMax Khon SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
1502332251cSMax Khon     &shm_allow_removed, 0, "");
151a723c4e1SDima Dorfman SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD,
152a723c4e1SDima Dorfman     NULL, 0, sysctl_shmsegs, "", "");
1533d903220SDoug Rabson 
1543d903220SDoug Rabson static int
1553d903220SDoug Rabson shm_find_segment_by_key(key)
1563d903220SDoug Rabson 	key_t key;
1573d903220SDoug Rabson {
1583d903220SDoug Rabson 	int i;
1593d903220SDoug Rabson 
160255108f3SPeter Wemm 	for (i = 0; i < shmalloced; i++)
1613d903220SDoug Rabson 		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
1623d903220SDoug Rabson 		    shmsegs[i].shm_perm.key == key)
163b618bb96SAlfred Perlstein 			return (i);
164b618bb96SAlfred Perlstein 	return (-1);
1653d903220SDoug Rabson }
1663d903220SDoug Rabson 
1673d903220SDoug Rabson static struct shmid_ds *
1682332251cSMax Khon shm_find_segment_by_shmid(int shmid)
1693d903220SDoug Rabson {
1703d903220SDoug Rabson 	int segnum;
1713d903220SDoug Rabson 	struct shmid_ds *shmseg;
1723d903220SDoug Rabson 
1733d903220SDoug Rabson 	segnum = IPCID_TO_IX(shmid);
174255108f3SPeter Wemm 	if (segnum < 0 || segnum >= shmalloced)
175b618bb96SAlfred Perlstein 		return (NULL);
1763d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
1772332251cSMax Khon 	if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
1782332251cSMax Khon 	    (!shm_allow_removed &&
1792332251cSMax Khon 	     (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
1803d903220SDoug Rabson 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
181b618bb96SAlfred Perlstein 		return (NULL);
182b618bb96SAlfred Perlstein 	return (shmseg);
1833d903220SDoug Rabson }
1843d903220SDoug Rabson 
185491dec93SMichael Reifenberger static struct shmid_ds *
1862332251cSMax Khon shm_find_segment_by_shmidx(int segnum)
187491dec93SMichael Reifenberger {
188491dec93SMichael Reifenberger 	struct shmid_ds *shmseg;
189491dec93SMichael Reifenberger 
190491dec93SMichael Reifenberger 	if (segnum < 0 || segnum >= shmalloced)
191b618bb96SAlfred Perlstein 		return (NULL);
192491dec93SMichael Reifenberger 	shmseg = &shmsegs[segnum];
1932332251cSMax Khon 	if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
1942332251cSMax Khon 	    (!shm_allow_removed &&
1952332251cSMax Khon 	     (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0))
196b618bb96SAlfred Perlstein 		return (NULL);
197b618bb96SAlfred Perlstein 	return (shmseg);
198491dec93SMichael Reifenberger }
199491dec93SMichael Reifenberger 
2003d903220SDoug Rabson static void
2013d903220SDoug Rabson shm_deallocate_segment(shmseg)
2023d903220SDoug Rabson 	struct shmid_ds *shmseg;
2033d903220SDoug Rabson {
2043d903220SDoug Rabson 	size_t size;
2053d903220SDoug Rabson 
2060cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
2070cddd8f0SMatthew Dillon 
2080049f8b2SAlan Cox 	vm_object_deallocate(shmseg->shm_internal);
2093d903220SDoug Rabson 	shmseg->shm_internal = NULL;
210a51f7119SJohn Dyson 	size = round_page(shmseg->shm_segsz);
2113d903220SDoug Rabson 	shm_committed -= btoc(size);
2123d903220SDoug Rabson 	shm_nused--;
2133d903220SDoug Rabson 	shmseg->shm_perm.mode = SHMSEG_FREE;
2143d903220SDoug Rabson }
2153d903220SDoug Rabson 
2163d903220SDoug Rabson static int
2173db161e0SMatthew Dillon shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
2183d903220SDoug Rabson {
2193d903220SDoug Rabson 	struct shmid_ds *shmseg;
2203d903220SDoug Rabson 	int segnum, result;
2213d903220SDoug Rabson 	size_t size;
2223d903220SDoug Rabson 
2230cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
224028f979dSDima Dorfman 
2253d903220SDoug Rabson 	segnum = IPCID_TO_IX(shmmap_s->shmid);
2263d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
227aa8de40aSPoul-Henning Kamp 	size = round_page(shmseg->shm_segsz);
2283db161e0SMatthew Dillon 	result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
2293d903220SDoug Rabson 	if (result != KERN_SUCCESS)
230b618bb96SAlfred Perlstein 		return (EINVAL);
2313d903220SDoug Rabson 	shmmap_s->shmid = -1;
232227ee8a1SPoul-Henning Kamp 	shmseg->shm_dtime = time_second;
2333d903220SDoug Rabson 	if ((--shmseg->shm_nattch <= 0) &&
2343d903220SDoug Rabson 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
2353d903220SDoug Rabson 		shm_deallocate_segment(shmseg);
2363d903220SDoug Rabson 		shm_last_free = segnum;
2373d903220SDoug Rabson 	}
238b618bb96SAlfred Perlstein 	return (0);
2393d903220SDoug Rabson }
2403d903220SDoug Rabson 
241b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
2423d903220SDoug Rabson struct shmdt_args {
243e1d7d0bbSAlfred Perlstein 	const void *shmaddr;
2443d903220SDoug Rabson };
245b5d5c0c9SPeter Wemm #endif
246b5d5c0c9SPeter Wemm 
247b6a4b4f9SMatthew Dillon /*
248b6a4b4f9SMatthew Dillon  * MPSAFE
249b6a4b4f9SMatthew Dillon  */
2503d903220SDoug Rabson int
251b40ce416SJulian Elischer shmdt(td, uap)
252b40ce416SJulian Elischer 	struct thread *td;
2533d903220SDoug Rabson 	struct shmdt_args *uap;
2543d903220SDoug Rabson {
255b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
2563d903220SDoug Rabson 	struct shmmap_state *shmmap_s;
2573d903220SDoug Rabson 	int i;
258b6a4b4f9SMatthew Dillon 	int error = 0;
2593d903220SDoug Rabson 
260c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
261c6f55f33SJohn Baldwin 		return (ENOSYS);
262b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
2638209f090SAlfred Perlstein 	shmmap_s = p->p_vmspace->vm_shm;
264b6a4b4f9SMatthew Dillon  	if (shmmap_s == NULL) {
265b6a4b4f9SMatthew Dillon 		error = EINVAL;
266b6a4b4f9SMatthew Dillon 		goto done2;
267b6a4b4f9SMatthew Dillon 	}
268b6a4b4f9SMatthew Dillon 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
2693d903220SDoug Rabson 		if (shmmap_s->shmid != -1 &&
270b6a4b4f9SMatthew Dillon 		    shmmap_s->va == (vm_offset_t)uap->shmaddr) {
2713d903220SDoug Rabson 			break;
272b6a4b4f9SMatthew Dillon 		}
273b6a4b4f9SMatthew Dillon 	}
274b6a4b4f9SMatthew Dillon 	if (i == shminfo.shmseg) {
275b6a4b4f9SMatthew Dillon 		error = EINVAL;
276b6a4b4f9SMatthew Dillon 		goto done2;
277b6a4b4f9SMatthew Dillon 	}
2783db161e0SMatthew Dillon 	error = shm_delete_mapping(p->p_vmspace, shmmap_s);
279b6a4b4f9SMatthew Dillon done2:
280b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
281b6a4b4f9SMatthew Dillon 	return (error);
2823d903220SDoug Rabson }
2833d903220SDoug Rabson 
284b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
2853d903220SDoug Rabson struct shmat_args {
2863d903220SDoug Rabson 	int shmid;
287e1d7d0bbSAlfred Perlstein 	const void *shmaddr;
2883d903220SDoug Rabson 	int shmflg;
2893d903220SDoug Rabson };
290b5d5c0c9SPeter Wemm #endif
291b5d5c0c9SPeter Wemm 
292b6a4b4f9SMatthew Dillon /*
293b6a4b4f9SMatthew Dillon  * MPSAFE
294b6a4b4f9SMatthew Dillon  */
2953d903220SDoug Rabson int
2962332251cSMax Khon kern_shmat(td, shmid, shmaddr, shmflg)
297b40ce416SJulian Elischer 	struct thread *td;
298f130dcf2SMartin Blapp 	int shmid;
299f130dcf2SMartin Blapp 	const void *shmaddr;
300f130dcf2SMartin Blapp 	int shmflg;
3013d903220SDoug Rabson {
302b40ce416SJulian Elischer 	struct proc *p = td->td_proc;
303b6a4b4f9SMatthew Dillon 	int i, flags;
3043d903220SDoug Rabson 	struct shmid_ds *shmseg;
3053d903220SDoug Rabson 	struct shmmap_state *shmmap_s = NULL;
3063d903220SDoug Rabson 	vm_offset_t attach_va;
3073d903220SDoug Rabson 	vm_prot_t prot;
3083d903220SDoug Rabson 	vm_size_t size;
309a51f7119SJohn Dyson 	int rv;
310b6a4b4f9SMatthew Dillon 	int error = 0;
3113d903220SDoug Rabson 
312c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
313c6f55f33SJohn Baldwin 		return (ENOSYS);
314b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
3158209f090SAlfred Perlstein 	shmmap_s = p->p_vmspace->vm_shm;
3163d903220SDoug Rabson 	if (shmmap_s == NULL) {
3173d903220SDoug Rabson 		size = shminfo.shmseg * sizeof(struct shmmap_state);
318a163d034SWarner Losh 		shmmap_s = malloc(size, M_SHM, M_WAITOK);
3193d903220SDoug Rabson 		for (i = 0; i < shminfo.shmseg; i++)
3203d903220SDoug Rabson 			shmmap_s[i].shmid = -1;
3212cc593fdSAlfred Perlstein 		p->p_vmspace->vm_shm = shmmap_s;
3223d903220SDoug Rabson 	}
3232332251cSMax Khon 	shmseg = shm_find_segment_by_shmid(shmid);
324b6a4b4f9SMatthew Dillon 	if (shmseg == NULL) {
325b6a4b4f9SMatthew Dillon 		error = EINVAL;
326b6a4b4f9SMatthew Dillon 		goto done2;
327b6a4b4f9SMatthew Dillon 	}
328b40ce416SJulian Elischer 	error = ipcperm(td, &shmseg->shm_perm,
329f130dcf2SMartin Blapp 	    (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
330797f2d22SPoul-Henning Kamp 	if (error)
331b6a4b4f9SMatthew Dillon 		goto done2;
3323d903220SDoug Rabson 	for (i = 0; i < shminfo.shmseg; i++) {
3333d903220SDoug Rabson 		if (shmmap_s->shmid == -1)
3343d903220SDoug Rabson 			break;
3353d903220SDoug Rabson 		shmmap_s++;
3363d903220SDoug Rabson 	}
337b6a4b4f9SMatthew Dillon 	if (i >= shminfo.shmseg) {
338b6a4b4f9SMatthew Dillon 		error = EMFILE;
339b6a4b4f9SMatthew Dillon 		goto done2;
340b6a4b4f9SMatthew Dillon 	}
341aa8de40aSPoul-Henning Kamp 	size = round_page(shmseg->shm_segsz);
342af25d10cSAlan Cox #ifdef VM_PROT_READ_IS_EXEC
343af25d10cSAlan Cox 	prot = VM_PROT_READ | VM_PROT_EXECUTE;
344af25d10cSAlan Cox #else
3453d903220SDoug Rabson 	prot = VM_PROT_READ;
346af25d10cSAlan Cox #endif
347f130dcf2SMartin Blapp 	if ((shmflg & SHM_RDONLY) == 0)
3483d903220SDoug Rabson 		prot |= VM_PROT_WRITE;
3493d903220SDoug Rabson 	flags = MAP_ANON | MAP_SHARED;
350f130dcf2SMartin Blapp 	if (shmaddr) {
3513d903220SDoug Rabson 		flags |= MAP_FIXED;
352f130dcf2SMartin Blapp 		if (shmflg & SHM_RND) {
353f130dcf2SMartin Blapp 			attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
354f130dcf2SMartin Blapp 		} else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) {
355f130dcf2SMartin Blapp 			attach_va = (vm_offset_t)shmaddr;
356b6a4b4f9SMatthew Dillon 		} else {
357b6a4b4f9SMatthew Dillon 			error = EINVAL;
358b6a4b4f9SMatthew Dillon 			goto done2;
359b6a4b4f9SMatthew Dillon 		}
3603d903220SDoug Rabson 	} else {
361028f979dSDima Dorfman 		/*
362028f979dSDima Dorfman 		 * This is just a hint to vm_map_find() about where to
363028f979dSDima Dorfman 		 * put it.
364028f979dSDima Dorfman 		 */
36568ba7a1dSTim J. Robbins 		PROC_LOCK(p);
36668ba7a1dSTim J. Robbins 		attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
36768ba7a1dSTim J. Robbins 		    lim_max(p, RLIMIT_DATA));
36868ba7a1dSTim J. Robbins 		PROC_UNLOCK(p);
3693d903220SDoug Rabson 	}
370a51f7119SJohn Dyson 
3710049f8b2SAlan Cox 	vm_object_reference(shmseg->shm_internal);
3720049f8b2SAlan Cox 	rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->shm_internal,
373a51f7119SJohn Dyson 		0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
374a51f7119SJohn Dyson 	if (rv != KERN_SUCCESS) {
3750049f8b2SAlan Cox 		vm_object_deallocate(shmseg->shm_internal);
376b6a4b4f9SMatthew Dillon 		error = ENOMEM;
377b6a4b4f9SMatthew Dillon 		goto done2;
378a51f7119SJohn Dyson 	}
3790463028cSJohn Dyson 	vm_map_inherit(&p->p_vmspace->vm_map,
3800463028cSJohn Dyson 		attach_va, attach_va + size, VM_INHERIT_SHARE);
3810463028cSJohn Dyson 
3823d903220SDoug Rabson 	shmmap_s->va = attach_va;
383f130dcf2SMartin Blapp 	shmmap_s->shmid = shmid;
3843d903220SDoug Rabson 	shmseg->shm_lpid = p->p_pid;
385227ee8a1SPoul-Henning Kamp 	shmseg->shm_atime = time_second;
3863d903220SDoug Rabson 	shmseg->shm_nattch++;
387b40ce416SJulian Elischer 	td->td_retval[0] = attach_va;
388b6a4b4f9SMatthew Dillon done2:
389b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
390b6a4b4f9SMatthew Dillon 	return (error);
3913d903220SDoug Rabson }
3923d903220SDoug Rabson 
393f130dcf2SMartin Blapp int
394f130dcf2SMartin Blapp shmat(td, uap)
395f130dcf2SMartin Blapp 	struct thread *td;
396f130dcf2SMartin Blapp 	struct shmat_args *uap;
397f130dcf2SMartin Blapp {
3982332251cSMax Khon 	return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg);
399f130dcf2SMartin Blapp }
400f130dcf2SMartin Blapp 
4018bec0921SDoug Rabson struct oshmid_ds {
4028bec0921SDoug Rabson 	struct	ipc_perm shm_perm;	/* operation perms */
4038bec0921SDoug Rabson 	int	shm_segsz;		/* size of segment (bytes) */
4048b149b51SJohn Baldwin 	u_short	shm_cpid;		/* pid, creator */
4058b149b51SJohn Baldwin 	u_short	shm_lpid;		/* pid, last operation */
4068bec0921SDoug Rabson 	short	shm_nattch;		/* no. of current attaches */
4078bec0921SDoug Rabson 	time_t	shm_atime;		/* last attach time */
4088bec0921SDoug Rabson 	time_t	shm_dtime;		/* last detach time */
4098bec0921SDoug Rabson 	time_t	shm_ctime;		/* last change time */
4108bec0921SDoug Rabson 	void	*shm_handle;		/* internal handle for shm segment */
4118bec0921SDoug Rabson };
4128bec0921SDoug Rabson 
4138bec0921SDoug Rabson struct oshmctl_args {
4148bec0921SDoug Rabson 	int shmid;
4158bec0921SDoug Rabson 	int cmd;
4168bec0921SDoug Rabson 	struct oshmid_ds *ubuf;
4178bec0921SDoug Rabson };
4188bec0921SDoug Rabson 
419b6a4b4f9SMatthew Dillon /*
420b6a4b4f9SMatthew Dillon  * MPSAFE
421b6a4b4f9SMatthew Dillon  */
42287b6de2bSPoul-Henning Kamp static int
423b40ce416SJulian Elischer oshmctl(td, uap)
424b40ce416SJulian Elischer 	struct thread *td;
4258bec0921SDoug Rabson 	struct oshmctl_args *uap;
4268bec0921SDoug Rabson {
4278bec0921SDoug Rabson #ifdef COMPAT_43
428b6a4b4f9SMatthew Dillon 	int error = 0;
4298bec0921SDoug Rabson 	struct shmid_ds *shmseg;
4308bec0921SDoug Rabson 	struct oshmid_ds outbuf;
4318bec0921SDoug Rabson 
432c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
433c6f55f33SJohn Baldwin 		return (ENOSYS);
434b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
4352332251cSMax Khon 	shmseg = shm_find_segment_by_shmid(uap->shmid);
436b6a4b4f9SMatthew Dillon 	if (shmseg == NULL) {
437b6a4b4f9SMatthew Dillon 		error = EINVAL;
438b6a4b4f9SMatthew Dillon 		goto done2;
439b6a4b4f9SMatthew Dillon 	}
4408bec0921SDoug Rabson 	switch (uap->cmd) {
4418bec0921SDoug Rabson 	case IPC_STAT:
442b40ce416SJulian Elischer 		error = ipcperm(td, &shmseg->shm_perm, IPC_R);
443797f2d22SPoul-Henning Kamp 		if (error)
444b6a4b4f9SMatthew Dillon 			goto done2;
4458bec0921SDoug Rabson 		outbuf.shm_perm = shmseg->shm_perm;
4468bec0921SDoug Rabson 		outbuf.shm_segsz = shmseg->shm_segsz;
4478bec0921SDoug Rabson 		outbuf.shm_cpid = shmseg->shm_cpid;
4488bec0921SDoug Rabson 		outbuf.shm_lpid = shmseg->shm_lpid;
4498bec0921SDoug Rabson 		outbuf.shm_nattch = shmseg->shm_nattch;
4508bec0921SDoug Rabson 		outbuf.shm_atime = shmseg->shm_atime;
4518bec0921SDoug Rabson 		outbuf.shm_dtime = shmseg->shm_dtime;
4528bec0921SDoug Rabson 		outbuf.shm_ctime = shmseg->shm_ctime;
4538bec0921SDoug Rabson 		outbuf.shm_handle = shmseg->shm_internal;
4542cc593fdSAlfred Perlstein 		error = copyout(&outbuf, uap->ubuf, sizeof(outbuf));
455797f2d22SPoul-Henning Kamp 		if (error)
456b6a4b4f9SMatthew Dillon 			goto done2;
4578bec0921SDoug Rabson 		break;
4588bec0921SDoug Rabson 	default:
459725db531SBruce Evans 		/* XXX casting to (sy_call_t *) is bogus, as usual. */
460b40ce416SJulian Elischer 		error = ((sy_call_t *)shmctl)(td, uap);
461b6a4b4f9SMatthew Dillon 		break;
4628bec0921SDoug Rabson 	}
463b6a4b4f9SMatthew Dillon done2:
464b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
465b6a4b4f9SMatthew Dillon 	return (error);
4668bec0921SDoug Rabson #else
467b618bb96SAlfred Perlstein 	return (EINVAL);
4688bec0921SDoug Rabson #endif
4698bec0921SDoug Rabson }
4708bec0921SDoug Rabson 
471b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
4723d903220SDoug Rabson struct shmctl_args {
4733d903220SDoug Rabson 	int shmid;
4743d903220SDoug Rabson 	int cmd;
475b5d5c0c9SPeter Wemm 	struct shmid_ds *buf;
4763d903220SDoug Rabson };
477b5d5c0c9SPeter Wemm #endif
478b5d5c0c9SPeter Wemm 
479b6a4b4f9SMatthew Dillon /*
480b6a4b4f9SMatthew Dillon  * MPSAFE
481b6a4b4f9SMatthew Dillon  */
4823d903220SDoug Rabson int
4832332251cSMax Khon kern_shmctl(td, shmid, cmd, buf, bufsz)
484b40ce416SJulian Elischer 	struct thread *td;
485f130dcf2SMartin Blapp 	int shmid;
486f130dcf2SMartin Blapp 	int cmd;
487f130dcf2SMartin Blapp 	void *buf;
488f130dcf2SMartin Blapp 	size_t *bufsz;
4893d903220SDoug Rabson {
490b6a4b4f9SMatthew Dillon 	int error = 0;
4913d903220SDoug Rabson 	struct shmid_ds *shmseg;
4923d903220SDoug Rabson 
493c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
494c6f55f33SJohn Baldwin 		return (ENOSYS);
495f130dcf2SMartin Blapp 
496b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
497f130dcf2SMartin Blapp 	switch (cmd) {
498491dec93SMichael Reifenberger 	case IPC_INFO:
499f130dcf2SMartin Blapp 		memcpy(buf, &shminfo, sizeof(shminfo));
500f130dcf2SMartin Blapp 		if (bufsz)
501f130dcf2SMartin Blapp 			*bufsz = sizeof(shminfo);
502491dec93SMichael Reifenberger 		td->td_retval[0] = shmalloced;
503491dec93SMichael Reifenberger 		goto done2;
504491dec93SMichael Reifenberger 	case SHM_INFO: {
505491dec93SMichael Reifenberger 		struct shm_info shm_info;
506491dec93SMichael Reifenberger 		shm_info.used_ids = shm_nused;
507491dec93SMichael Reifenberger 		shm_info.shm_rss = 0;	/*XXX where to get from ? */
508491dec93SMichael Reifenberger 		shm_info.shm_tot = 0;	/*XXX where to get from ? */
509491dec93SMichael Reifenberger 		shm_info.shm_swp = 0;	/*XXX where to get from ? */
510491dec93SMichael Reifenberger 		shm_info.swap_attempts = 0;	/*XXX where to get from ? */
511491dec93SMichael Reifenberger 		shm_info.swap_successes = 0;	/*XXX where to get from ? */
512f130dcf2SMartin Blapp 		memcpy(buf, &shm_info, sizeof(shm_info));
513f130dcf2SMartin Blapp 		if (bufsz)
514f130dcf2SMartin Blapp 			*bufsz = sizeof(shm_info);
515491dec93SMichael Reifenberger 		td->td_retval[0] = shmalloced;
516491dec93SMichael Reifenberger 		goto done2;
517491dec93SMichael Reifenberger 	}
518491dec93SMichael Reifenberger 	}
519f130dcf2SMartin Blapp 	if (cmd == SHM_STAT)
5202332251cSMax Khon 		shmseg = shm_find_segment_by_shmidx(shmid);
521491dec93SMichael Reifenberger 	else
5222332251cSMax Khon 		shmseg = shm_find_segment_by_shmid(shmid);
523b6a4b4f9SMatthew Dillon 	if (shmseg == NULL) {
524b6a4b4f9SMatthew Dillon 		error = EINVAL;
525b6a4b4f9SMatthew Dillon 		goto done2;
526b6a4b4f9SMatthew Dillon 	}
527f130dcf2SMartin Blapp 	switch (cmd) {
528491dec93SMichael Reifenberger 	case SHM_STAT:
5293d903220SDoug Rabson 	case IPC_STAT:
530b40ce416SJulian Elischer 		error = ipcperm(td, &shmseg->shm_perm, IPC_R);
531797f2d22SPoul-Henning Kamp 		if (error)
532b6a4b4f9SMatthew Dillon 			goto done2;
533f130dcf2SMartin Blapp 		memcpy(buf, shmseg, sizeof(struct shmid_ds));
534f130dcf2SMartin Blapp 		if (bufsz)
535f130dcf2SMartin Blapp 			*bufsz = sizeof(struct shmid_ds);
536f130dcf2SMartin Blapp 		if (cmd == SHM_STAT)
537f130dcf2SMartin Blapp 			td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->shm_perm);
5383d903220SDoug Rabson 		break;
539f130dcf2SMartin Blapp 	case IPC_SET: {
540f130dcf2SMartin Blapp 		struct shmid_ds *shmid;
541f130dcf2SMartin Blapp 
542f130dcf2SMartin Blapp 		shmid = (struct shmid_ds *)buf;
543b40ce416SJulian Elischer 		error = ipcperm(td, &shmseg->shm_perm, IPC_M);
544797f2d22SPoul-Henning Kamp 		if (error)
545b6a4b4f9SMatthew Dillon 			goto done2;
546f130dcf2SMartin Blapp 		shmseg->shm_perm.uid = shmid->shm_perm.uid;
547f130dcf2SMartin Blapp 		shmseg->shm_perm.gid = shmid->shm_perm.gid;
5483d903220SDoug Rabson 		shmseg->shm_perm.mode =
5493d903220SDoug Rabson 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
550f130dcf2SMartin Blapp 		    (shmid->shm_perm.mode & ACCESSPERMS);
551227ee8a1SPoul-Henning Kamp 		shmseg->shm_ctime = time_second;
5523d903220SDoug Rabson 		break;
553f130dcf2SMartin Blapp 	}
5543d903220SDoug Rabson 	case IPC_RMID:
555b40ce416SJulian Elischer 		error = ipcperm(td, &shmseg->shm_perm, IPC_M);
556797f2d22SPoul-Henning Kamp 		if (error)
557b6a4b4f9SMatthew Dillon 			goto done2;
5583d903220SDoug Rabson 		shmseg->shm_perm.key = IPC_PRIVATE;
5593d903220SDoug Rabson 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
5603d903220SDoug Rabson 		if (shmseg->shm_nattch <= 0) {
5613d903220SDoug Rabson 			shm_deallocate_segment(shmseg);
562f130dcf2SMartin Blapp 			shm_last_free = IPCID_TO_IX(shmid);
5633d903220SDoug Rabson 		}
5643d903220SDoug Rabson 		break;
5653d903220SDoug Rabson #if 0
5663d903220SDoug Rabson 	case SHM_LOCK:
5673d903220SDoug Rabson 	case SHM_UNLOCK:
5683d903220SDoug Rabson #endif
5693d903220SDoug Rabson 	default:
570b6a4b4f9SMatthew Dillon 		error = EINVAL;
571b6a4b4f9SMatthew Dillon 		break;
5723d903220SDoug Rabson 	}
573b6a4b4f9SMatthew Dillon done2:
574b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
575b6a4b4f9SMatthew Dillon 	return (error);
5763d903220SDoug Rabson }
5773d903220SDoug Rabson 
578f130dcf2SMartin Blapp int
579f130dcf2SMartin Blapp shmctl(td, uap)
580f130dcf2SMartin Blapp 	struct thread *td;
581f130dcf2SMartin Blapp 	struct shmctl_args *uap;
582f130dcf2SMartin Blapp {
583f130dcf2SMartin Blapp 	int error = 0;
584f130dcf2SMartin Blapp 	struct shmid_ds buf;
585f130dcf2SMartin Blapp 	size_t bufsz;
586f130dcf2SMartin Blapp 
587f130dcf2SMartin Blapp 	/* IPC_SET needs to copyin the buffer before calling kern_shmctl */
588f130dcf2SMartin Blapp 	if (uap->cmd == IPC_SET) {
589f130dcf2SMartin Blapp 		if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds))))
590f130dcf2SMartin Blapp 			goto done;
591f130dcf2SMartin Blapp 	}
592f130dcf2SMartin Blapp 
5932332251cSMax Khon 	error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz);
594f130dcf2SMartin Blapp 	if (error)
595f130dcf2SMartin Blapp 		goto done;
596f130dcf2SMartin Blapp 
597f130dcf2SMartin Blapp 	/* Cases in which we need to copyout */
598f130dcf2SMartin Blapp 	switch (uap->cmd) {
599f130dcf2SMartin Blapp 	case IPC_INFO:
600f130dcf2SMartin Blapp 	case SHM_INFO:
601f130dcf2SMartin Blapp 	case SHM_STAT:
602f130dcf2SMartin Blapp 	case IPC_STAT:
603f130dcf2SMartin Blapp 		error = copyout(&buf, uap->buf, bufsz);
604f130dcf2SMartin Blapp 		break;
605f130dcf2SMartin Blapp 	}
606f130dcf2SMartin Blapp 
607f130dcf2SMartin Blapp done:
608f130dcf2SMartin Blapp 	if (error) {
609f130dcf2SMartin Blapp 		/* Invalidate the return value */
610f130dcf2SMartin Blapp 		td->td_retval[0] = -1;
611f130dcf2SMartin Blapp 	}
612f130dcf2SMartin Blapp 	return (error);
613f130dcf2SMartin Blapp }
614f130dcf2SMartin Blapp 
615f130dcf2SMartin Blapp 
616b5d5c0c9SPeter Wemm #ifndef _SYS_SYSPROTO_H_
6173d903220SDoug Rabson struct shmget_args {
6183d903220SDoug Rabson 	key_t key;
6193d903220SDoug Rabson 	size_t size;
6203d903220SDoug Rabson 	int shmflg;
6213d903220SDoug Rabson };
622b5d5c0c9SPeter Wemm #endif
623b5d5c0c9SPeter Wemm 
6243d903220SDoug Rabson static int
625b40ce416SJulian Elischer shmget_existing(td, uap, mode, segnum)
626b40ce416SJulian Elischer 	struct thread *td;
6273d903220SDoug Rabson 	struct shmget_args *uap;
6283d903220SDoug Rabson 	int mode;
6293d903220SDoug Rabson 	int segnum;
6303d903220SDoug Rabson {
6313d903220SDoug Rabson 	struct shmid_ds *shmseg;
6323d903220SDoug Rabson 	int error;
6333d903220SDoug Rabson 
6343d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
6353d903220SDoug Rabson 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
6363d903220SDoug Rabson 		/*
6373d903220SDoug Rabson 		 * This segment is in the process of being allocated.  Wait
6383d903220SDoug Rabson 		 * until it's done, and look the key up again (in case the
6393d903220SDoug Rabson 		 * allocation failed or it was freed).
6403d903220SDoug Rabson 		 */
6413d903220SDoug Rabson 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
6422cc593fdSAlfred Perlstein 		error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0);
643797f2d22SPoul-Henning Kamp 		if (error)
644b618bb96SAlfred Perlstein 			return (error);
645b618bb96SAlfred Perlstein 		return (EAGAIN);
6463d903220SDoug Rabson 	}
647dc92aa57SAlan Cox 	if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
648b618bb96SAlfred Perlstein 		return (EEXIST);
649b40ce416SJulian Elischer 	error = ipcperm(td, &shmseg->shm_perm, mode);
650797f2d22SPoul-Henning Kamp 	if (error)
651b618bb96SAlfred Perlstein 		return (error);
6523d903220SDoug Rabson 	if (uap->size && uap->size > shmseg->shm_segsz)
653b618bb96SAlfred Perlstein 		return (EINVAL);
654b40ce416SJulian Elischer 	td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
655b618bb96SAlfred Perlstein 	return (0);
6563d903220SDoug Rabson }
6573d903220SDoug Rabson 
6583d903220SDoug Rabson static int
659b40ce416SJulian Elischer shmget_allocate_segment(td, uap, mode)
660b40ce416SJulian Elischer 	struct thread *td;
6613d903220SDoug Rabson 	struct shmget_args *uap;
6623d903220SDoug Rabson 	int mode;
6633d903220SDoug Rabson {
664a51f7119SJohn Dyson 	int i, segnum, shmid, size;
665a854ed98SJohn Baldwin 	struct ucred *cred = td->td_ucred;
6663d903220SDoug Rabson 	struct shmid_ds *shmseg;
6670049f8b2SAlan Cox 	vm_object_t shm_object;
6683d903220SDoug Rabson 
6690cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
6700cddd8f0SMatthew Dillon 
6713d903220SDoug Rabson 	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
672b618bb96SAlfred Perlstein 		return (EINVAL);
673028f979dSDima Dorfman 	if (shm_nused >= shminfo.shmmni) /* Any shmids left? */
674b618bb96SAlfred Perlstein 		return (ENOSPC);
6759e609ddeSJoerg Wunsch 	size = round_page(uap->size);
6763d903220SDoug Rabson 	if (shm_committed + btoc(size) > shminfo.shmall)
677b618bb96SAlfred Perlstein 		return (ENOMEM);
6783d903220SDoug Rabson 	if (shm_last_free < 0) {
679028f979dSDima Dorfman 		shmrealloc();	/* Maybe expand the shmsegs[] array. */
680255108f3SPeter Wemm 		for (i = 0; i < shmalloced; i++)
6813d903220SDoug Rabson 			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
6823d903220SDoug Rabson 				break;
683255108f3SPeter Wemm 		if (i == shmalloced)
684b618bb96SAlfred Perlstein 			return (ENOSPC);
6853d903220SDoug Rabson 		segnum = i;
6863d903220SDoug Rabson 	} else  {
6873d903220SDoug Rabson 		segnum = shm_last_free;
6883d903220SDoug Rabson 		shm_last_free = -1;
6893d903220SDoug Rabson 	}
6903d903220SDoug Rabson 	shmseg = &shmsegs[segnum];
6913d903220SDoug Rabson 	/*
6923d903220SDoug Rabson 	 * In case we sleep in malloc(), mark the segment present but deleted
6933d903220SDoug Rabson 	 * so that noone else tries to create the same key.
6943d903220SDoug Rabson 	 */
6953d903220SDoug Rabson 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
6963d903220SDoug Rabson 	shmseg->shm_perm.key = uap->key;
6973d903220SDoug Rabson 	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
6983d903220SDoug Rabson 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
699a51f7119SJohn Dyson 
700ae9b8c3aSJohn Dyson 	/*
701ae9b8c3aSJohn Dyson 	 * We make sure that we have allocated a pager before we need
702ae9b8c3aSJohn Dyson 	 * to.
703ae9b8c3aSJohn Dyson 	 */
7048b03c8edSMatthew Dillon 	if (shm_use_phys) {
7050049f8b2SAlan Cox 		shm_object =
70624488c74SPeter Wemm 		    vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
7078b03c8edSMatthew Dillon 	} else {
7080049f8b2SAlan Cox 		shm_object =
7096cde7a16SDavid Greenman 		    vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
7108b03c8edSMatthew Dillon 	}
7110049f8b2SAlan Cox 	VM_OBJECT_LOCK(shm_object);
7120049f8b2SAlan Cox 	vm_object_clear_flag(shm_object, OBJ_ONEMAPPING);
7130049f8b2SAlan Cox 	vm_object_set_flag(shm_object, OBJ_NOSPLIT);
7140049f8b2SAlan Cox 	VM_OBJECT_UNLOCK(shm_object);
715cbd8ec09SJohn Dyson 
7160049f8b2SAlan Cox 	shmseg->shm_internal = shm_object;
7173d903220SDoug Rabson 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
7183d903220SDoug Rabson 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
7193d903220SDoug Rabson 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
7203d903220SDoug Rabson 	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
7213d903220SDoug Rabson 	shmseg->shm_segsz = uap->size;
722b40ce416SJulian Elischer 	shmseg->shm_cpid = td->td_proc->p_pid;
7233d903220SDoug Rabson 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
7243d903220SDoug Rabson 	shmseg->shm_atime = shmseg->shm_dtime = 0;
725227ee8a1SPoul-Henning Kamp 	shmseg->shm_ctime = time_second;
7263d903220SDoug Rabson 	shm_committed += btoc(size);
7273d903220SDoug Rabson 	shm_nused++;
7283d903220SDoug Rabson 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
7293d903220SDoug Rabson 		/*
7303d903220SDoug Rabson 		 * Somebody else wanted this key while we were asleep.  Wake
7313d903220SDoug Rabson 		 * them up now.
7323d903220SDoug Rabson 		 */
7333d903220SDoug Rabson 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
7342cc593fdSAlfred Perlstein 		wakeup(shmseg);
7353d903220SDoug Rabson 	}
736b40ce416SJulian Elischer 	td->td_retval[0] = shmid;
737b618bb96SAlfred Perlstein 	return (0);
7383d903220SDoug Rabson }
7393d903220SDoug Rabson 
740b6a4b4f9SMatthew Dillon /*
741b6a4b4f9SMatthew Dillon  * MPSAFE
742b6a4b4f9SMatthew Dillon  */
7433d903220SDoug Rabson int
744b40ce416SJulian Elischer shmget(td, uap)
745b40ce416SJulian Elischer 	struct thread *td;
7463d903220SDoug Rabson 	struct shmget_args *uap;
7473d903220SDoug Rabson {
748b6a4b4f9SMatthew Dillon 	int segnum, mode;
749b6a4b4f9SMatthew Dillon 	int error;
7503d903220SDoug Rabson 
751c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
752c6f55f33SJohn Baldwin 		return (ENOSYS);
753b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
7543d903220SDoug Rabson 	mode = uap->shmflg & ACCESSPERMS;
7553d903220SDoug Rabson 	if (uap->key != IPC_PRIVATE) {
7563d903220SDoug Rabson 	again:
7573d903220SDoug Rabson 		segnum = shm_find_segment_by_key(uap->key);
7583d903220SDoug Rabson 		if (segnum >= 0) {
759b40ce416SJulian Elischer 			error = shmget_existing(td, uap, mode, segnum);
7603d903220SDoug Rabson 			if (error == EAGAIN)
7613d903220SDoug Rabson 				goto again;
762b6a4b4f9SMatthew Dillon 			goto done2;
7633d903220SDoug Rabson 		}
764b6a4b4f9SMatthew Dillon 		if ((uap->shmflg & IPC_CREAT) == 0) {
765b6a4b4f9SMatthew Dillon 			error = ENOENT;
766b6a4b4f9SMatthew Dillon 			goto done2;
7673d903220SDoug Rabson 		}
768b6a4b4f9SMatthew Dillon 	}
769b40ce416SJulian Elischer 	error = shmget_allocate_segment(td, uap, mode);
770b6a4b4f9SMatthew Dillon done2:
771b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
772b6a4b4f9SMatthew Dillon 	return (error);
7733d903220SDoug Rabson }
7743d903220SDoug Rabson 
775b6a4b4f9SMatthew Dillon /*
776b6a4b4f9SMatthew Dillon  * MPSAFE
777b6a4b4f9SMatthew Dillon  */
7783d903220SDoug Rabson int
779b40ce416SJulian Elischer shmsys(td, uap)
780b40ce416SJulian Elischer 	struct thread *td;
781725db531SBruce Evans 	/* XXX actually varargs. */
782725db531SBruce Evans 	struct shmsys_args /* {
78301b9dc96SJacques Vidrine 		int	which;
784725db531SBruce Evans 		int	a2;
785725db531SBruce Evans 		int	a3;
786725db531SBruce Evans 		int	a4;
787725db531SBruce Evans 	} */ *uap;
7883d903220SDoug Rabson {
789b6a4b4f9SMatthew Dillon 	int error;
7903d903220SDoug Rabson 
791c6f55f33SJohn Baldwin 	if (!jail_sysvipc_allowed && jailed(td->td_ucred))
792c6f55f33SJohn Baldwin 		return (ENOSYS);
79301b9dc96SJacques Vidrine 	if (uap->which < 0 ||
79401b9dc96SJacques Vidrine 	    uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
795c6f55f33SJohn Baldwin 		return (EINVAL);
796b6a4b4f9SMatthew Dillon 	mtx_lock(&Giant);
797b40ce416SJulian Elischer 	error = (*shmcalls[uap->which])(td, &uap->a2);
798b6a4b4f9SMatthew Dillon 	mtx_unlock(&Giant);
799b6a4b4f9SMatthew Dillon 	return (error);
8003d903220SDoug Rabson }
8013d903220SDoug Rabson 
80278525ce3SAlfred Perlstein static void
80378525ce3SAlfred Perlstein shmfork_myhook(p1, p2)
8043d903220SDoug Rabson 	struct proc *p1, *p2;
8053d903220SDoug Rabson {
8063d903220SDoug Rabson 	struct shmmap_state *shmmap_s;
8073d903220SDoug Rabson 	size_t size;
8083d903220SDoug Rabson 	int i;
8093d903220SDoug Rabson 
8103d903220SDoug Rabson 	size = shminfo.shmseg * sizeof(struct shmmap_state);
811a163d034SWarner Losh 	shmmap_s = malloc(size, M_SHM, M_WAITOK);
8122cc593fdSAlfred Perlstein 	bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
8132cc593fdSAlfred Perlstein 	p2->p_vmspace->vm_shm = shmmap_s;
8143d903220SDoug Rabson 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
8153d903220SDoug Rabson 		if (shmmap_s->shmid != -1)
8163d903220SDoug Rabson 			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
8173d903220SDoug Rabson }
8183d903220SDoug Rabson 
81978525ce3SAlfred Perlstein static void
8203db161e0SMatthew Dillon shmexit_myhook(struct vmspace *vm)
8213d903220SDoug Rabson {
8223db161e0SMatthew Dillon 	struct shmmap_state *base, *shm;
8233d903220SDoug Rabson 	int i;
8243d903220SDoug Rabson 
8250cddd8f0SMatthew Dillon 	GIANT_REQUIRED;
8260cddd8f0SMatthew Dillon 
8273db161e0SMatthew Dillon 	if ((base = vm->vm_shm) != NULL) {
8283db161e0SMatthew Dillon 		vm->vm_shm = NULL;
8293db161e0SMatthew Dillon 		for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
8303db161e0SMatthew Dillon 			if (shm->shmid != -1)
8313db161e0SMatthew Dillon 				shm_delete_mapping(vm, shm);
8323db161e0SMatthew Dillon 		}
8333db161e0SMatthew Dillon 		free(base, M_SHM);
8343db161e0SMatthew Dillon 	}
8353d903220SDoug Rabson }
8363d903220SDoug Rabson 
837255108f3SPeter Wemm static void
838255108f3SPeter Wemm shmrealloc(void)
839255108f3SPeter Wemm {
840255108f3SPeter Wemm 	int i;
841255108f3SPeter Wemm 	struct shmid_ds *newsegs;
842255108f3SPeter Wemm 
843255108f3SPeter Wemm 	if (shmalloced >= shminfo.shmmni)
844255108f3SPeter Wemm 		return;
845255108f3SPeter Wemm 
846a163d034SWarner Losh 	newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
847255108f3SPeter Wemm 	if (newsegs == NULL)
848255108f3SPeter Wemm 		return;
849255108f3SPeter Wemm 	for (i = 0; i < shmalloced; i++)
850255108f3SPeter Wemm 		bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
851255108f3SPeter Wemm 	for (; i < shminfo.shmmni; i++) {
852255108f3SPeter Wemm 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
853255108f3SPeter Wemm 		shmsegs[i].shm_perm.seq = 0;
854255108f3SPeter Wemm 	}
855255108f3SPeter Wemm 	free(shmsegs, M_SHM);
856255108f3SPeter Wemm 	shmsegs = newsegs;
857255108f3SPeter Wemm 	shmalloced = shminfo.shmmni;
858255108f3SPeter Wemm }
859255108f3SPeter Wemm 
860255108f3SPeter Wemm static void
86178525ce3SAlfred Perlstein shminit()
8623d903220SDoug Rabson {
8633d903220SDoug Rabson 	int i;
864255108f3SPeter Wemm 
865b3a4bc42SMichael Reifenberger 	TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall);
8669d4156aeSAlfred Perlstein 	for (i = PAGE_SIZE; i > 0; i--) {
867b3a4bc42SMichael Reifenberger 		shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
8685015c68aSAlfred Perlstein 		if (shminfo.shmmax >= shminfo.shmall)
8695015c68aSAlfred Perlstein 			break;
8705015c68aSAlfred Perlstein 	}
871b3a4bc42SMichael Reifenberger 	TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin);
872b3a4bc42SMichael Reifenberger 	TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni);
873b3a4bc42SMichael Reifenberger 	TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg);
874b3a4bc42SMichael Reifenberger 	TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys);
875b3a4bc42SMichael Reifenberger 
876255108f3SPeter Wemm 	shmalloced = shminfo.shmmni;
877a163d034SWarner Losh 	shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
878255108f3SPeter Wemm 	if (shmsegs == NULL)
879255108f3SPeter Wemm 		panic("cannot allocate initial memory for sysvshm");
880255108f3SPeter Wemm 	for (i = 0; i < shmalloced; i++) {
8813d903220SDoug Rabson 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
8823d903220SDoug Rabson 		shmsegs[i].shm_perm.seq = 0;
8833d903220SDoug Rabson 	}
8843d903220SDoug Rabson 	shm_last_free = 0;
8853d903220SDoug Rabson 	shm_nused = 0;
8863d903220SDoug Rabson 	shm_committed = 0;
88778525ce3SAlfred Perlstein 	shmexit_hook = &shmexit_myhook;
88878525ce3SAlfred Perlstein 	shmfork_hook = &shmfork_myhook;
8893d903220SDoug Rabson }
89078525ce3SAlfred Perlstein 
89178525ce3SAlfred Perlstein static int
89278525ce3SAlfred Perlstein shmunload()
89378525ce3SAlfred Perlstein {
89478525ce3SAlfred Perlstein 
89578525ce3SAlfred Perlstein 	if (shm_nused > 0)
89678525ce3SAlfred Perlstein 		return (EBUSY);
89778525ce3SAlfred Perlstein 
89878525ce3SAlfred Perlstein 	free(shmsegs, M_SHM);
89978525ce3SAlfred Perlstein 	shmexit_hook = NULL;
90078525ce3SAlfred Perlstein 	shmfork_hook = NULL;
90178525ce3SAlfred Perlstein 	return (0);
90278525ce3SAlfred Perlstein }
90378525ce3SAlfred Perlstein 
90478525ce3SAlfred Perlstein static int
905a723c4e1SDima Dorfman sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
906a723c4e1SDima Dorfman {
907a723c4e1SDima Dorfman 
908a723c4e1SDima Dorfman 	return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
909a723c4e1SDima Dorfman }
910a723c4e1SDima Dorfman 
911a723c4e1SDima Dorfman static int
91278525ce3SAlfred Perlstein sysvshm_modload(struct module *module, int cmd, void *arg)
91378525ce3SAlfred Perlstein {
91478525ce3SAlfred Perlstein 	int error = 0;
91578525ce3SAlfred Perlstein 
91678525ce3SAlfred Perlstein 	switch (cmd) {
91778525ce3SAlfred Perlstein 	case MOD_LOAD:
91878525ce3SAlfred Perlstein 		shminit();
91978525ce3SAlfred Perlstein 		break;
92078525ce3SAlfred Perlstein 	case MOD_UNLOAD:
92178525ce3SAlfred Perlstein 		error = shmunload();
92278525ce3SAlfred Perlstein 		break;
92378525ce3SAlfred Perlstein 	case MOD_SHUTDOWN:
92478525ce3SAlfred Perlstein 		break;
92578525ce3SAlfred Perlstein 	default:
92678525ce3SAlfred Perlstein 		error = EINVAL;
92778525ce3SAlfred Perlstein 		break;
92878525ce3SAlfred Perlstein 	}
92978525ce3SAlfred Perlstein 	return (error);
93078525ce3SAlfred Perlstein }
93178525ce3SAlfred Perlstein 
932faa784b7SDag-Erling Smørgrav static moduledata_t sysvshm_mod = {
933faa784b7SDag-Erling Smørgrav 	"sysvshm",
93478525ce3SAlfred Perlstein 	&sysvshm_modload,
93578525ce3SAlfred Perlstein 	NULL
93678525ce3SAlfred Perlstein };
93778525ce3SAlfred Perlstein 
93821d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmsys);
93921d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmat);
94021d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmctl);
94121d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmdt);
94221d56e9cSAlfred Perlstein SYSCALL_MODULE_HELPER(shmget);
94378525ce3SAlfred Perlstein 
944faa784b7SDag-Erling Smørgrav DECLARE_MODULE(sysvshm, sysvshm_mod,
94578525ce3SAlfred Perlstein 	SI_SUB_SYSV_SHM, SI_ORDER_FIRST);
946faa784b7SDag-Erling Smørgrav MODULE_VERSION(sysvshm, 1);
947