xref: /freebsd/sys/kern/sysv_shm.c (revision b601c69bdbe8755d26570261d7fd4c02ee4eff74)
1 /* $FreeBSD$ */
2 /*	$NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $	*/
3 
4 /*
5  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Adam Glass and Charles
18  *	Hannum.
19  * 4. The names of the authors may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "opt_compat.h"
35 #include "opt_rlimit.h"
36 #include "opt_sysvipc.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/shm.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/mman.h>
47 #include <sys/stat.h>
48 #include <sys/sysent.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_param.h>
52 #include <sys/lock.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_pager.h>
58 
59 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
60 
61 struct oshmctl_args;
62 static int oshmctl __P((struct proc *p, struct oshmctl_args *uap));
63 
64 static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode));
65 static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum));
66 
67 /* XXX casting to (sy_call_t *) is bogus, as usual. */
68 static sy_call_t *shmcalls[] = {
69 	(sy_call_t *)shmat, (sy_call_t *)oshmctl,
70 	(sy_call_t *)shmdt, (sy_call_t *)shmget,
71 	(sy_call_t *)shmctl
72 };
73 
74 #define	SHMSEG_FREE     	0x0200
75 #define	SHMSEG_REMOVED  	0x0400
76 #define	SHMSEG_ALLOCATED	0x0800
77 #define	SHMSEG_WANTED		0x1000
78 
79 static int shm_last_free, shm_nused, shm_committed, shmalloced;
80 static struct shmid_ds	*shmsegs;
81 
82 struct shm_handle {
83 	/* vm_offset_t kva; */
84 	vm_object_t shm_object;
85 };
86 
87 struct shmmap_state {
88 	vm_offset_t va;
89 	int shmid;
90 };
91 
92 static void shm_deallocate_segment __P((struct shmid_ds *));
93 static int shm_find_segment_by_key __P((key_t));
94 static struct shmid_ds *shm_find_segment_by_shmid __P((int));
95 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *));
96 static void shmrealloc __P((void));
97 static void shminit __P((void *));
98 
99 /*
100  * Tuneable values
101  */
102 #ifndef SHMMAXPGS
103 #define	SHMMAXPGS	1024	/* XXX increase this, it's not in kva! */
104 #endif
105 #ifndef SHMMAX
106 #define	SHMMAX	(SHMMAXPGS*PAGE_SIZE)
107 #endif
108 #ifndef SHMMIN
109 #define	SHMMIN	1
110 #endif
111 #ifndef SHMMNI
112 #define	SHMMNI	96
113 #endif
114 #ifndef SHMSEG
115 #define	SHMSEG	64
116 #endif
117 #ifndef SHMALL
118 #define	SHMALL	(SHMMAXPGS)
119 #endif
120 
121 struct	shminfo shminfo = {
122 	SHMMAX,
123 	SHMMIN,
124 	SHMMNI,
125 	SHMSEG,
126 	SHMALL
127 };
128 
129 static int shm_use_phys;
130 
131 SYSCTL_DECL(_kern_ipc);
132 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, "");
133 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, "");
134 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, "");
135 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, "");
136 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, "");
137 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, "");
138 
139 static int
140 shm_find_segment_by_key(key)
141 	key_t key;
142 {
143 	int i;
144 
145 	for (i = 0; i < shmalloced; i++)
146 		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
147 		    shmsegs[i].shm_perm.key == key)
148 			return i;
149 	return -1;
150 }
151 
152 static struct shmid_ds *
153 shm_find_segment_by_shmid(shmid)
154 	int shmid;
155 {
156 	int segnum;
157 	struct shmid_ds *shmseg;
158 
159 	segnum = IPCID_TO_IX(shmid);
160 	if (segnum < 0 || segnum >= shmalloced)
161 		return NULL;
162 	shmseg = &shmsegs[segnum];
163 	if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
164 	    != SHMSEG_ALLOCATED ||
165 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
166 		return NULL;
167 	return shmseg;
168 }
169 
170 static void
171 shm_deallocate_segment(shmseg)
172 	struct shmid_ds *shmseg;
173 {
174 	struct shm_handle *shm_handle;
175 	size_t size;
176 
177 	shm_handle = shmseg->shm_internal;
178 	vm_object_deallocate(shm_handle->shm_object);
179 	free((caddr_t)shm_handle, M_SHM);
180 	shmseg->shm_internal = NULL;
181 	size = round_page(shmseg->shm_segsz);
182 	shm_committed -= btoc(size);
183 	shm_nused--;
184 	shmseg->shm_perm.mode = SHMSEG_FREE;
185 }
186 
187 static int
188 shm_delete_mapping(p, shmmap_s)
189 	struct proc *p;
190 	struct shmmap_state *shmmap_s;
191 {
192 	struct shmid_ds *shmseg;
193 	int segnum, result;
194 	size_t size;
195 
196 	segnum = IPCID_TO_IX(shmmap_s->shmid);
197 	shmseg = &shmsegs[segnum];
198 	size = round_page(shmseg->shm_segsz);
199 	result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size);
200 	if (result != KERN_SUCCESS)
201 		return EINVAL;
202 	shmmap_s->shmid = -1;
203 	shmseg->shm_dtime = time_second;
204 	if ((--shmseg->shm_nattch <= 0) &&
205 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
206 		shm_deallocate_segment(shmseg);
207 		shm_last_free = segnum;
208 	}
209 	return 0;
210 }
211 
212 #ifndef _SYS_SYSPROTO_H_
213 struct shmdt_args {
214 	void *shmaddr;
215 };
216 #endif
217 
218 int
219 shmdt(p, uap)
220 	struct proc *p;
221 	struct shmdt_args *uap;
222 {
223 	struct shmmap_state *shmmap_s;
224 	int i;
225 
226 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
227  	if (shmmap_s == NULL)
228  	    return EINVAL;
229 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
230 		if (shmmap_s->shmid != -1 &&
231 		    shmmap_s->va == (vm_offset_t)uap->shmaddr)
232 			break;
233 	if (i == shminfo.shmseg)
234 		return EINVAL;
235 	return shm_delete_mapping(p, shmmap_s);
236 }
237 
238 #ifndef _SYS_SYSPROTO_H_
239 struct shmat_args {
240 	int shmid;
241 	void *shmaddr;
242 	int shmflg;
243 };
244 #endif
245 
246 int
247 shmat(p, uap)
248 	struct proc *p;
249 	struct shmat_args *uap;
250 {
251 	int error, i, flags;
252 	struct shmid_ds *shmseg;
253 	struct shmmap_state *shmmap_s = NULL;
254 	struct shm_handle *shm_handle;
255 	vm_offset_t attach_va;
256 	vm_prot_t prot;
257 	vm_size_t size;
258 	int rv;
259 
260 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
261 	if (shmmap_s == NULL) {
262 		size = shminfo.shmseg * sizeof(struct shmmap_state);
263 		shmmap_s = malloc(size, M_SHM, M_WAITOK);
264 		for (i = 0; i < shminfo.shmseg; i++)
265 			shmmap_s[i].shmid = -1;
266 		p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
267 	}
268 	shmseg = shm_find_segment_by_shmid(uap->shmid);
269 	if (shmseg == NULL)
270 		return EINVAL;
271 	error = ipcperm(p, &shmseg->shm_perm,
272 	    (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
273 	if (error)
274 		return error;
275 	for (i = 0; i < shminfo.shmseg; i++) {
276 		if (shmmap_s->shmid == -1)
277 			break;
278 		shmmap_s++;
279 	}
280 	if (i >= shminfo.shmseg)
281 		return EMFILE;
282 	size = round_page(shmseg->shm_segsz);
283 #ifdef VM_PROT_READ_IS_EXEC
284 	prot = VM_PROT_READ | VM_PROT_EXECUTE;
285 #else
286 	prot = VM_PROT_READ;
287 #endif
288 	if ((uap->shmflg & SHM_RDONLY) == 0)
289 		prot |= VM_PROT_WRITE;
290 	flags = MAP_ANON | MAP_SHARED;
291 	if (uap->shmaddr) {
292 		flags |= MAP_FIXED;
293 		if (uap->shmflg & SHM_RND)
294 			attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
295 		else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0)
296 			attach_va = (vm_offset_t)uap->shmaddr;
297 		else
298 			return EINVAL;
299 	} else {
300 		/* This is just a hint to vm_map_find() about where to put it. */
301 		attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ);
302 	}
303 
304 	shm_handle = shmseg->shm_internal;
305 	vm_object_reference(shm_handle->shm_object);
306 	rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object,
307 		0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0);
308 	if (rv != KERN_SUCCESS) {
309 		return ENOMEM;
310 	}
311 	vm_map_inherit(&p->p_vmspace->vm_map,
312 		attach_va, attach_va + size, VM_INHERIT_SHARE);
313 
314 	shmmap_s->va = attach_va;
315 	shmmap_s->shmid = uap->shmid;
316 	shmseg->shm_lpid = p->p_pid;
317 	shmseg->shm_atime = time_second;
318 	shmseg->shm_nattch++;
319 	p->p_retval[0] = attach_va;
320 	return 0;
321 }
322 
323 struct oshmid_ds {
324 	struct	ipc_perm shm_perm;	/* operation perms */
325 	int	shm_segsz;		/* size of segment (bytes) */
326 	ushort	shm_cpid;		/* pid, creator */
327 	ushort	shm_lpid;		/* pid, last operation */
328 	short	shm_nattch;		/* no. of current attaches */
329 	time_t	shm_atime;		/* last attach time */
330 	time_t	shm_dtime;		/* last detach time */
331 	time_t	shm_ctime;		/* last change time */
332 	void	*shm_handle;		/* internal handle for shm segment */
333 };
334 
335 struct oshmctl_args {
336 	int shmid;
337 	int cmd;
338 	struct oshmid_ds *ubuf;
339 };
340 
341 static int
342 oshmctl(p, uap)
343 	struct proc *p;
344 	struct oshmctl_args *uap;
345 {
346 #ifdef COMPAT_43
347 	int error;
348 	struct shmid_ds *shmseg;
349 	struct oshmid_ds outbuf;
350 
351 	shmseg = shm_find_segment_by_shmid(uap->shmid);
352 	if (shmseg == NULL)
353 		return EINVAL;
354 	switch (uap->cmd) {
355 	case IPC_STAT:
356 		error = ipcperm(p, &shmseg->shm_perm, IPC_R);
357 		if (error)
358 			return error;
359 		outbuf.shm_perm = shmseg->shm_perm;
360 		outbuf.shm_segsz = shmseg->shm_segsz;
361 		outbuf.shm_cpid = shmseg->shm_cpid;
362 		outbuf.shm_lpid = shmseg->shm_lpid;
363 		outbuf.shm_nattch = shmseg->shm_nattch;
364 		outbuf.shm_atime = shmseg->shm_atime;
365 		outbuf.shm_dtime = shmseg->shm_dtime;
366 		outbuf.shm_ctime = shmseg->shm_ctime;
367 		outbuf.shm_handle = shmseg->shm_internal;
368 		error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf));
369 		if (error)
370 			return error;
371 		break;
372 	default:
373 		/* XXX casting to (sy_call_t *) is bogus, as usual. */
374 		return ((sy_call_t *)shmctl)(p, uap);
375 	}
376 	return 0;
377 #else
378 	return EINVAL;
379 #endif
380 }
381 
382 #ifndef _SYS_SYSPROTO_H_
383 struct shmctl_args {
384 	int shmid;
385 	int cmd;
386 	struct shmid_ds *buf;
387 };
388 #endif
389 
390 int
391 shmctl(p, uap)
392 	struct proc *p;
393 	struct shmctl_args *uap;
394 {
395 	int error;
396 	struct shmid_ds inbuf;
397 	struct shmid_ds *shmseg;
398 
399 	shmseg = shm_find_segment_by_shmid(uap->shmid);
400 	if (shmseg == NULL)
401 		return EINVAL;
402 	switch (uap->cmd) {
403 	case IPC_STAT:
404 		error = ipcperm(p, &shmseg->shm_perm, IPC_R);
405 		if (error)
406 			return error;
407 		error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf));
408 		if (error)
409 			return error;
410 		break;
411 	case IPC_SET:
412 		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
413 		if (error)
414 			return error;
415 		error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf));
416 		if (error)
417 			return error;
418 		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
419 		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
420 		shmseg->shm_perm.mode =
421 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
422 		    (inbuf.shm_perm.mode & ACCESSPERMS);
423 		shmseg->shm_ctime = time_second;
424 		break;
425 	case IPC_RMID:
426 		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
427 		if (error)
428 			return error;
429 		shmseg->shm_perm.key = IPC_PRIVATE;
430 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
431 		if (shmseg->shm_nattch <= 0) {
432 			shm_deallocate_segment(shmseg);
433 			shm_last_free = IPCID_TO_IX(uap->shmid);
434 		}
435 		break;
436 #if 0
437 	case SHM_LOCK:
438 	case SHM_UNLOCK:
439 #endif
440 	default:
441 		return EINVAL;
442 	}
443 	return 0;
444 }
445 
446 #ifndef _SYS_SYSPROTO_H_
447 struct shmget_args {
448 	key_t key;
449 	size_t size;
450 	int shmflg;
451 };
452 #endif
453 
454 static int
455 shmget_existing(p, uap, mode, segnum)
456 	struct proc *p;
457 	struct shmget_args *uap;
458 	int mode;
459 	int segnum;
460 {
461 	struct shmid_ds *shmseg;
462 	int error;
463 
464 	shmseg = &shmsegs[segnum];
465 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
466 		/*
467 		 * This segment is in the process of being allocated.  Wait
468 		 * until it's done, and look the key up again (in case the
469 		 * allocation failed or it was freed).
470 		 */
471 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
472 		error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
473 		if (error)
474 			return error;
475 		return EAGAIN;
476 	}
477 	if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
478 		return EEXIST;
479 	error = ipcperm(p, &shmseg->shm_perm, mode);
480 	if (error)
481 		return error;
482 	if (uap->size && uap->size > shmseg->shm_segsz)
483 		return EINVAL;
484 	p->p_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
485 	return 0;
486 }
487 
488 static int
489 shmget_allocate_segment(p, uap, mode)
490 	struct proc *p;
491 	struct shmget_args *uap;
492 	int mode;
493 {
494 	int i, segnum, shmid, size;
495 	struct ucred *cred = p->p_ucred;
496 	struct shmid_ds *shmseg;
497 	struct shm_handle *shm_handle;
498 
499 	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
500 		return EINVAL;
501 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
502 		return ENOSPC;
503 	size = round_page(uap->size);
504 	if (shm_committed + btoc(size) > shminfo.shmall)
505 		return ENOMEM;
506 	if (shm_last_free < 0) {
507 		shmrealloc();	/* maybe expand the shmsegs[] array */
508 		for (i = 0; i < shmalloced; i++)
509 			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
510 				break;
511 		if (i == shmalloced)
512 			return ENOSPC;
513 		segnum = i;
514 	} else  {
515 		segnum = shm_last_free;
516 		shm_last_free = -1;
517 	}
518 	shmseg = &shmsegs[segnum];
519 	/*
520 	 * In case we sleep in malloc(), mark the segment present but deleted
521 	 * so that noone else tries to create the same key.
522 	 */
523 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
524 	shmseg->shm_perm.key = uap->key;
525 	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
526 	shm_handle = (struct shm_handle *)
527 	    malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
528 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
529 
530 	/*
531 	 * We make sure that we have allocated a pager before we need
532 	 * to.
533 	 */
534 	if (shm_use_phys) {
535 		shm_handle->shm_object =
536 		    vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0);
537 	} else {
538 		shm_handle->shm_object =
539 		    vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0);
540 	}
541 	vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
542 	vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
543 
544 	shmseg->shm_internal = shm_handle;
545 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
546 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
547 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
548 	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
549 	shmseg->shm_segsz = uap->size;
550 	shmseg->shm_cpid = p->p_pid;
551 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
552 	shmseg->shm_atime = shmseg->shm_dtime = 0;
553 	shmseg->shm_ctime = time_second;
554 	shm_committed += btoc(size);
555 	shm_nused++;
556 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
557 		/*
558 		 * Somebody else wanted this key while we were asleep.  Wake
559 		 * them up now.
560 		 */
561 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
562 		wakeup((caddr_t)shmseg);
563 	}
564 	p->p_retval[0] = shmid;
565 	return 0;
566 }
567 
568 int
569 shmget(p, uap)
570 	struct proc *p;
571 	struct shmget_args *uap;
572 {
573 	int segnum, mode, error;
574 
575 	mode = uap->shmflg & ACCESSPERMS;
576 	if (uap->key != IPC_PRIVATE) {
577 	again:
578 		segnum = shm_find_segment_by_key(uap->key);
579 		if (segnum >= 0) {
580 			error = shmget_existing(p, uap, mode, segnum);
581 			if (error == EAGAIN)
582 				goto again;
583 			return error;
584 		}
585 		if ((uap->shmflg & IPC_CREAT) == 0)
586 			return ENOENT;
587 	}
588 	return shmget_allocate_segment(p, uap, mode);
589 }
590 
591 int
592 shmsys(p, uap)
593 	struct proc *p;
594 	/* XXX actually varargs. */
595 	struct shmsys_args /* {
596 		u_int	which;
597 		int	a2;
598 		int	a3;
599 		int	a4;
600 	} */ *uap;
601 {
602 
603 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
604 		return EINVAL;
605 	return ((*shmcalls[uap->which])(p, &uap->a2));
606 }
607 
608 void
609 shmfork(p1, p2)
610 	struct proc *p1, *p2;
611 {
612 	struct shmmap_state *shmmap_s;
613 	size_t size;
614 	int i;
615 
616 	size = shminfo.shmseg * sizeof(struct shmmap_state);
617 	shmmap_s = malloc(size, M_SHM, M_WAITOK);
618 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
619 	p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
620 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
621 		if (shmmap_s->shmid != -1)
622 			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
623 }
624 
625 void
626 shmexit(p)
627 	struct proc *p;
628 {
629 	struct shmmap_state *shmmap_s;
630 	int i;
631 
632 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
633 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
634 		if (shmmap_s->shmid != -1)
635 			shm_delete_mapping(p, shmmap_s);
636 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
637 	p->p_vmspace->vm_shm = NULL;
638 }
639 
640 static void
641 shmrealloc(void)
642 {
643 	int i;
644 	struct shmid_ds *newsegs;
645 
646 	if (shmalloced >= shminfo.shmmni)
647 		return;
648 
649 	newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
650 	if (newsegs == NULL)
651 		return;
652 	for (i = 0; i < shmalloced; i++)
653 		bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
654 	for (; i < shminfo.shmmni; i++) {
655 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
656 		shmsegs[i].shm_perm.seq = 0;
657 	}
658 	free(shmsegs, M_SHM);
659 	shmsegs = newsegs;
660 	shmalloced = shminfo.shmmni;
661 }
662 
663 static void
664 shminit(dummy)
665 	void *dummy;
666 {
667 	int i;
668 
669 	shmalloced = shminfo.shmmni;
670 	shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
671 	if (shmsegs == NULL)
672 		panic("cannot allocate initial memory for sysvshm");
673 	for (i = 0; i < shmalloced; i++) {
674 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
675 		shmsegs[i].shm_perm.seq = 0;
676 	}
677 	shm_last_free = 0;
678 	shm_nused = 0;
679 	shm_committed = 0;
680 }
681 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);
682