1 /* $FreeBSD$ */ 2 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 3 4 /* 5 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Adam Glass and Charles 18 * Hannum. 19 * 4. The names of the authors may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "opt_compat.h" 35 #include "opt_rlimit.h" 36 #include "opt_sysvipc.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/kernel.h> 42 #include <sys/sysctl.h> 43 #include <sys/shm.h> 44 #include <sys/proc.h> 45 #include <sys/malloc.h> 46 #include <sys/mman.h> 47 #include <sys/stat.h> 48 #include <sys/sysent.h> 49 #include <sys/jail.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_param.h> 53 #include <sys/lock.h> 54 #include <vm/pmap.h> 55 #include <vm/vm_object.h> 56 #include <vm/vm_map.h> 57 #include <vm/vm_page.h> 58 #include <vm/vm_pager.h> 59 60 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 61 62 struct oshmctl_args; 63 static int oshmctl __P((struct proc *p, struct oshmctl_args *uap)); 64 65 static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode)); 66 static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum)); 67 68 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 69 static sy_call_t *shmcalls[] = { 70 (sy_call_t *)shmat, (sy_call_t *)oshmctl, 71 (sy_call_t *)shmdt, (sy_call_t *)shmget, 72 (sy_call_t *)shmctl 73 }; 74 75 #define SHMSEG_FREE 0x0200 76 #define SHMSEG_REMOVED 0x0400 77 #define SHMSEG_ALLOCATED 0x0800 78 #define SHMSEG_WANTED 0x1000 79 80 static int shm_last_free, shm_nused, shm_committed, shmalloced; 81 static struct shmid_ds *shmsegs; 82 83 struct shm_handle { 84 /* vm_offset_t kva; */ 85 vm_object_t shm_object; 86 }; 87 88 struct shmmap_state { 89 vm_offset_t va; 90 int shmid; 91 }; 92 93 static void shm_deallocate_segment __P((struct shmid_ds *)); 94 static int shm_find_segment_by_key __P((key_t)); 95 static struct shmid_ds *shm_find_segment_by_shmid __P((int)); 96 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *)); 97 static void shmrealloc __P((void)); 98 static void shminit __P((void *)); 99 100 /* 101 * Tuneable values 102 */ 103 #ifndef SHMMAXPGS 104 #define SHMMAXPGS 1024 /* XXX increase this, it's not in kva! */ 105 #endif 106 #ifndef SHMMAX 107 #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 108 #endif 109 #ifndef SHMMIN 110 #define SHMMIN 1 111 #endif 112 #ifndef SHMMNI 113 #define SHMMNI 96 114 #endif 115 #ifndef SHMSEG 116 #define SHMSEG 64 117 #endif 118 #ifndef SHMALL 119 #define SHMALL (SHMMAXPGS) 120 #endif 121 122 struct shminfo shminfo = { 123 SHMMAX, 124 SHMMIN, 125 SHMMNI, 126 SHMSEG, 127 SHMALL 128 }; 129 130 static int shm_use_phys; 131 132 SYSCTL_DECL(_kern_ipc); 133 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, ""); 134 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, ""); 135 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, ""); 136 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, ""); 137 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, ""); 138 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, ""); 139 140 static int 141 shm_find_segment_by_key(key) 142 key_t key; 143 { 144 int i; 145 146 for (i = 0; i < shmalloced; i++) 147 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 148 shmsegs[i].shm_perm.key == key) 149 return i; 150 return -1; 151 } 152 153 static struct shmid_ds * 154 shm_find_segment_by_shmid(shmid) 155 int shmid; 156 { 157 int segnum; 158 struct shmid_ds *shmseg; 159 160 segnum = IPCID_TO_IX(shmid); 161 if (segnum < 0 || segnum >= shmalloced) 162 return NULL; 163 shmseg = &shmsegs[segnum]; 164 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) 165 != SHMSEG_ALLOCATED || 166 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) 167 return NULL; 168 return shmseg; 169 } 170 171 static void 172 shm_deallocate_segment(shmseg) 173 struct shmid_ds *shmseg; 174 { 175 struct shm_handle *shm_handle; 176 size_t size; 177 178 shm_handle = shmseg->shm_internal; 179 vm_object_deallocate(shm_handle->shm_object); 180 free((caddr_t)shm_handle, M_SHM); 181 shmseg->shm_internal = NULL; 182 size = round_page(shmseg->shm_segsz); 183 shm_committed -= btoc(size); 184 shm_nused--; 185 shmseg->shm_perm.mode = SHMSEG_FREE; 186 } 187 188 static int 189 shm_delete_mapping(p, shmmap_s) 190 struct proc *p; 191 struct shmmap_state *shmmap_s; 192 { 193 struct shmid_ds *shmseg; 194 int segnum, result; 195 size_t size; 196 197 segnum = IPCID_TO_IX(shmmap_s->shmid); 198 shmseg = &shmsegs[segnum]; 199 size = round_page(shmseg->shm_segsz); 200 result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size); 201 if (result != KERN_SUCCESS) 202 return EINVAL; 203 shmmap_s->shmid = -1; 204 shmseg->shm_dtime = time_second; 205 if ((--shmseg->shm_nattch <= 0) && 206 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 207 shm_deallocate_segment(shmseg); 208 shm_last_free = segnum; 209 } 210 return 0; 211 } 212 213 #ifndef _SYS_SYSPROTO_H_ 214 struct shmdt_args { 215 void *shmaddr; 216 }; 217 #endif 218 219 int 220 shmdt(p, uap) 221 struct proc *p; 222 struct shmdt_args *uap; 223 { 224 struct shmmap_state *shmmap_s; 225 int i; 226 227 if (!jail_sysvipc_allowed && p->p_prison != NULL) 228 return (ENOSYS); 229 230 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 231 if (shmmap_s == NULL) 232 return EINVAL; 233 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 234 if (shmmap_s->shmid != -1 && 235 shmmap_s->va == (vm_offset_t)uap->shmaddr) 236 break; 237 if (i == shminfo.shmseg) 238 return EINVAL; 239 return shm_delete_mapping(p, shmmap_s); 240 } 241 242 #ifndef _SYS_SYSPROTO_H_ 243 struct shmat_args { 244 int shmid; 245 void *shmaddr; 246 int shmflg; 247 }; 248 #endif 249 250 int 251 shmat(p, uap) 252 struct proc *p; 253 struct shmat_args *uap; 254 { 255 int error, i, flags; 256 struct shmid_ds *shmseg; 257 struct shmmap_state *shmmap_s = NULL; 258 struct shm_handle *shm_handle; 259 vm_offset_t attach_va; 260 vm_prot_t prot; 261 vm_size_t size; 262 int rv; 263 264 if (!jail_sysvipc_allowed && p->p_prison != NULL) 265 return (ENOSYS); 266 267 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 268 if (shmmap_s == NULL) { 269 size = shminfo.shmseg * sizeof(struct shmmap_state); 270 shmmap_s = malloc(size, M_SHM, M_WAITOK); 271 for (i = 0; i < shminfo.shmseg; i++) 272 shmmap_s[i].shmid = -1; 273 p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 274 } 275 shmseg = shm_find_segment_by_shmid(uap->shmid); 276 if (shmseg == NULL) 277 return EINVAL; 278 error = ipcperm(p, &shmseg->shm_perm, 279 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 280 if (error) 281 return error; 282 for (i = 0; i < shminfo.shmseg; i++) { 283 if (shmmap_s->shmid == -1) 284 break; 285 shmmap_s++; 286 } 287 if (i >= shminfo.shmseg) 288 return EMFILE; 289 size = round_page(shmseg->shm_segsz); 290 #ifdef VM_PROT_READ_IS_EXEC 291 prot = VM_PROT_READ | VM_PROT_EXECUTE; 292 #else 293 prot = VM_PROT_READ; 294 #endif 295 if ((uap->shmflg & SHM_RDONLY) == 0) 296 prot |= VM_PROT_WRITE; 297 flags = MAP_ANON | MAP_SHARED; 298 if (uap->shmaddr) { 299 flags |= MAP_FIXED; 300 if (uap->shmflg & SHM_RND) 301 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); 302 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) 303 attach_va = (vm_offset_t)uap->shmaddr; 304 else 305 return EINVAL; 306 } else { 307 /* This is just a hint to vm_map_find() about where to put it. */ 308 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + MAXTSIZ + MAXDSIZ); 309 } 310 311 shm_handle = shmseg->shm_internal; 312 vm_object_reference(shm_handle->shm_object); 313 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object, 314 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0); 315 if (rv != KERN_SUCCESS) { 316 return ENOMEM; 317 } 318 vm_map_inherit(&p->p_vmspace->vm_map, 319 attach_va, attach_va + size, VM_INHERIT_SHARE); 320 321 shmmap_s->va = attach_va; 322 shmmap_s->shmid = uap->shmid; 323 shmseg->shm_lpid = p->p_pid; 324 shmseg->shm_atime = time_second; 325 shmseg->shm_nattch++; 326 p->p_retval[0] = attach_va; 327 return 0; 328 } 329 330 struct oshmid_ds { 331 struct ipc_perm shm_perm; /* operation perms */ 332 int shm_segsz; /* size of segment (bytes) */ 333 ushort shm_cpid; /* pid, creator */ 334 ushort shm_lpid; /* pid, last operation */ 335 short shm_nattch; /* no. of current attaches */ 336 time_t shm_atime; /* last attach time */ 337 time_t shm_dtime; /* last detach time */ 338 time_t shm_ctime; /* last change time */ 339 void *shm_handle; /* internal handle for shm segment */ 340 }; 341 342 struct oshmctl_args { 343 int shmid; 344 int cmd; 345 struct oshmid_ds *ubuf; 346 }; 347 348 static int 349 oshmctl(p, uap) 350 struct proc *p; 351 struct oshmctl_args *uap; 352 { 353 #ifdef COMPAT_43 354 int error; 355 struct shmid_ds *shmseg; 356 struct oshmid_ds outbuf; 357 358 if (!jail_sysvipc_allowed && p->p_prison != NULL) 359 return (ENOSYS); 360 361 shmseg = shm_find_segment_by_shmid(uap->shmid); 362 if (shmseg == NULL) 363 return EINVAL; 364 switch (uap->cmd) { 365 case IPC_STAT: 366 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 367 if (error) 368 return error; 369 outbuf.shm_perm = shmseg->shm_perm; 370 outbuf.shm_segsz = shmseg->shm_segsz; 371 outbuf.shm_cpid = shmseg->shm_cpid; 372 outbuf.shm_lpid = shmseg->shm_lpid; 373 outbuf.shm_nattch = shmseg->shm_nattch; 374 outbuf.shm_atime = shmseg->shm_atime; 375 outbuf.shm_dtime = shmseg->shm_dtime; 376 outbuf.shm_ctime = shmseg->shm_ctime; 377 outbuf.shm_handle = shmseg->shm_internal; 378 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf)); 379 if (error) 380 return error; 381 break; 382 default: 383 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 384 return ((sy_call_t *)shmctl)(p, uap); 385 } 386 return 0; 387 #else 388 return EINVAL; 389 #endif 390 } 391 392 #ifndef _SYS_SYSPROTO_H_ 393 struct shmctl_args { 394 int shmid; 395 int cmd; 396 struct shmid_ds *buf; 397 }; 398 #endif 399 400 int 401 shmctl(p, uap) 402 struct proc *p; 403 struct shmctl_args *uap; 404 { 405 int error; 406 struct shmid_ds inbuf; 407 struct shmid_ds *shmseg; 408 409 if (!jail_sysvipc_allowed && p->p_prison != NULL) 410 return (ENOSYS); 411 412 shmseg = shm_find_segment_by_shmid(uap->shmid); 413 if (shmseg == NULL) 414 return EINVAL; 415 switch (uap->cmd) { 416 case IPC_STAT: 417 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 418 if (error) 419 return error; 420 error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf)); 421 if (error) 422 return error; 423 break; 424 case IPC_SET: 425 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 426 if (error) 427 return error; 428 error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf)); 429 if (error) 430 return error; 431 shmseg->shm_perm.uid = inbuf.shm_perm.uid; 432 shmseg->shm_perm.gid = inbuf.shm_perm.gid; 433 shmseg->shm_perm.mode = 434 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 435 (inbuf.shm_perm.mode & ACCESSPERMS); 436 shmseg->shm_ctime = time_second; 437 break; 438 case IPC_RMID: 439 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 440 if (error) 441 return error; 442 shmseg->shm_perm.key = IPC_PRIVATE; 443 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 444 if (shmseg->shm_nattch <= 0) { 445 shm_deallocate_segment(shmseg); 446 shm_last_free = IPCID_TO_IX(uap->shmid); 447 } 448 break; 449 #if 0 450 case SHM_LOCK: 451 case SHM_UNLOCK: 452 #endif 453 default: 454 return EINVAL; 455 } 456 return 0; 457 } 458 459 #ifndef _SYS_SYSPROTO_H_ 460 struct shmget_args { 461 key_t key; 462 size_t size; 463 int shmflg; 464 }; 465 #endif 466 467 static int 468 shmget_existing(p, uap, mode, segnum) 469 struct proc *p; 470 struct shmget_args *uap; 471 int mode; 472 int segnum; 473 { 474 struct shmid_ds *shmseg; 475 int error; 476 477 shmseg = &shmsegs[segnum]; 478 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 479 /* 480 * This segment is in the process of being allocated. Wait 481 * until it's done, and look the key up again (in case the 482 * allocation failed or it was freed). 483 */ 484 shmseg->shm_perm.mode |= SHMSEG_WANTED; 485 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0); 486 if (error) 487 return error; 488 return EAGAIN; 489 } 490 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 491 return EEXIST; 492 error = ipcperm(p, &shmseg->shm_perm, mode); 493 if (error) 494 return error; 495 if (uap->size && uap->size > shmseg->shm_segsz) 496 return EINVAL; 497 p->p_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 498 return 0; 499 } 500 501 static int 502 shmget_allocate_segment(p, uap, mode) 503 struct proc *p; 504 struct shmget_args *uap; 505 int mode; 506 { 507 int i, segnum, shmid, size; 508 struct ucred *cred = p->p_ucred; 509 struct shmid_ds *shmseg; 510 struct shm_handle *shm_handle; 511 512 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 513 return EINVAL; 514 if (shm_nused >= shminfo.shmmni) /* any shmids left? */ 515 return ENOSPC; 516 size = round_page(uap->size); 517 if (shm_committed + btoc(size) > shminfo.shmall) 518 return ENOMEM; 519 if (shm_last_free < 0) { 520 shmrealloc(); /* maybe expand the shmsegs[] array */ 521 for (i = 0; i < shmalloced; i++) 522 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 523 break; 524 if (i == shmalloced) 525 return ENOSPC; 526 segnum = i; 527 } else { 528 segnum = shm_last_free; 529 shm_last_free = -1; 530 } 531 shmseg = &shmsegs[segnum]; 532 /* 533 * In case we sleep in malloc(), mark the segment present but deleted 534 * so that noone else tries to create the same key. 535 */ 536 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 537 shmseg->shm_perm.key = uap->key; 538 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 539 shm_handle = (struct shm_handle *) 540 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 541 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 542 543 /* 544 * We make sure that we have allocated a pager before we need 545 * to. 546 */ 547 if (shm_use_phys) { 548 shm_handle->shm_object = 549 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0); 550 } else { 551 shm_handle->shm_object = 552 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0); 553 } 554 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 555 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 556 557 shmseg->shm_internal = shm_handle; 558 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 559 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 560 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 561 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 562 shmseg->shm_segsz = uap->size; 563 shmseg->shm_cpid = p->p_pid; 564 shmseg->shm_lpid = shmseg->shm_nattch = 0; 565 shmseg->shm_atime = shmseg->shm_dtime = 0; 566 shmseg->shm_ctime = time_second; 567 shm_committed += btoc(size); 568 shm_nused++; 569 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 570 /* 571 * Somebody else wanted this key while we were asleep. Wake 572 * them up now. 573 */ 574 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 575 wakeup((caddr_t)shmseg); 576 } 577 p->p_retval[0] = shmid; 578 return 0; 579 } 580 581 int 582 shmget(p, uap) 583 struct proc *p; 584 struct shmget_args *uap; 585 { 586 int segnum, mode, error; 587 588 if (!jail_sysvipc_allowed && p->p_prison != NULL) 589 return (ENOSYS); 590 591 mode = uap->shmflg & ACCESSPERMS; 592 if (uap->key != IPC_PRIVATE) { 593 again: 594 segnum = shm_find_segment_by_key(uap->key); 595 if (segnum >= 0) { 596 error = shmget_existing(p, uap, mode, segnum); 597 if (error == EAGAIN) 598 goto again; 599 return error; 600 } 601 if ((uap->shmflg & IPC_CREAT) == 0) 602 return ENOENT; 603 } 604 return shmget_allocate_segment(p, uap, mode); 605 } 606 607 int 608 shmsys(p, uap) 609 struct proc *p; 610 /* XXX actually varargs. */ 611 struct shmsys_args /* { 612 u_int which; 613 int a2; 614 int a3; 615 int a4; 616 } */ *uap; 617 { 618 619 if (!jail_sysvipc_allowed && p->p_prison != NULL) 620 return (ENOSYS); 621 622 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 623 return EINVAL; 624 return ((*shmcalls[uap->which])(p, &uap->a2)); 625 } 626 627 void 628 shmfork(p1, p2) 629 struct proc *p1, *p2; 630 { 631 struct shmmap_state *shmmap_s; 632 size_t size; 633 int i; 634 635 size = shminfo.shmseg * sizeof(struct shmmap_state); 636 shmmap_s = malloc(size, M_SHM, M_WAITOK); 637 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size); 638 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s; 639 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 640 if (shmmap_s->shmid != -1) 641 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 642 } 643 644 void 645 shmexit(p) 646 struct proc *p; 647 { 648 struct shmmap_state *shmmap_s; 649 int i; 650 651 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 652 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 653 if (shmmap_s->shmid != -1) 654 shm_delete_mapping(p, shmmap_s); 655 free((caddr_t)p->p_vmspace->vm_shm, M_SHM); 656 p->p_vmspace->vm_shm = NULL; 657 } 658 659 static void 660 shmrealloc(void) 661 { 662 int i; 663 struct shmid_ds *newsegs; 664 665 if (shmalloced >= shminfo.shmmni) 666 return; 667 668 newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 669 if (newsegs == NULL) 670 return; 671 for (i = 0; i < shmalloced; i++) 672 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 673 for (; i < shminfo.shmmni; i++) { 674 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 675 shmsegs[i].shm_perm.seq = 0; 676 } 677 free(shmsegs, M_SHM); 678 shmsegs = newsegs; 679 shmalloced = shminfo.shmmni; 680 } 681 682 static void 683 shminit(dummy) 684 void *dummy; 685 { 686 int i; 687 688 shmalloced = shminfo.shmmni; 689 shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 690 if (shmsegs == NULL) 691 panic("cannot allocate initial memory for sysvshm"); 692 for (i = 0; i < shmalloced; i++) { 693 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 694 shmsegs[i].shm_perm.seq = 0; 695 } 696 shm_last_free = 0; 697 shm_nused = 0; 698 shm_committed = 0; 699 } 700 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL); 701