1 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 2 /* 3 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Adam Glass and Charles 16 * Hannum. 17 * 4. The names of the authors may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_compat.h" 36 #include "opt_sysvipc.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/sysctl.h> 43 #include <sys/shm.h> 44 #include <sys/proc.h> 45 #include <sys/malloc.h> 46 #include <sys/mman.h> 47 #include <sys/module.h> 48 #include <sys/mutex.h> 49 #include <sys/resourcevar.h> 50 #include <sys/stat.h> 51 #include <sys/syscall.h> 52 #include <sys/syscallsubr.h> 53 #include <sys/sysent.h> 54 #include <sys/sysproto.h> 55 #include <sys/jail.h> 56 57 #include <vm/vm.h> 58 #include <vm/vm_param.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_page.h> 63 #include <vm/vm_pager.h> 64 65 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 66 67 struct oshmctl_args; 68 static int oshmctl(struct thread *td, struct oshmctl_args *uap); 69 70 static int shmget_allocate_segment(struct thread *td, 71 struct shmget_args *uap, int mode); 72 static int shmget_existing(struct thread *td, struct shmget_args *uap, 73 int mode, int segnum); 74 75 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 76 static sy_call_t *shmcalls[] = { 77 (sy_call_t *)shmat, (sy_call_t *)oshmctl, 78 (sy_call_t *)shmdt, (sy_call_t *)shmget, 79 (sy_call_t *)shmctl 80 }; 81 82 #define SHMSEG_FREE 0x0200 83 #define SHMSEG_REMOVED 0x0400 84 #define SHMSEG_ALLOCATED 0x0800 85 #define SHMSEG_WANTED 0x1000 86 87 static int shm_last_free, shm_nused, shm_committed, shmalloced; 88 static struct shmid_ds *shmsegs; 89 90 struct shmmap_state { 91 vm_offset_t va; 92 int shmid; 93 }; 94 95 static void shm_deallocate_segment(struct shmid_ds *); 96 static int shm_find_segment_by_key(key_t); 97 static struct shmid_ds *shm_find_segment_by_shmid(int); 98 static struct shmid_ds *shm_find_segment_by_shmidx(int); 99 static int shm_delete_mapping(struct vmspace *vm, struct shmmap_state *); 100 static void shmrealloc(void); 101 static void shminit(void); 102 static int sysvshm_modload(struct module *, int, void *); 103 static int shmunload(void); 104 static void shmexit_myhook(struct vmspace *vm); 105 static void shmfork_myhook(struct proc *p1, struct proc *p2); 106 static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS); 107 108 /* 109 * Tuneable values. 110 */ 111 #ifndef SHMMAXPGS 112 #define SHMMAXPGS 8192 /* Note: sysv shared memory is swap backed. */ 113 #endif 114 #ifndef SHMMAX 115 #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 116 #endif 117 #ifndef SHMMIN 118 #define SHMMIN 1 119 #endif 120 #ifndef SHMMNI 121 #define SHMMNI 192 122 #endif 123 #ifndef SHMSEG 124 #define SHMSEG 128 125 #endif 126 #ifndef SHMALL 127 #define SHMALL (SHMMAXPGS) 128 #endif 129 130 struct shminfo shminfo = { 131 SHMMAX, 132 SHMMIN, 133 SHMMNI, 134 SHMSEG, 135 SHMALL 136 }; 137 138 static int shm_use_phys; 139 static int shm_allow_removed; 140 141 SYSCTL_DECL(_kern_ipc); 142 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, ""); 143 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, ""); 144 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RDTUN, &shminfo.shmmni, 0, ""); 145 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RDTUN, &shminfo.shmseg, 0, ""); 146 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, ""); 147 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, 148 &shm_use_phys, 0, ""); 149 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW, 150 &shm_allow_removed, 0, ""); 151 SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLFLAG_RD, 152 NULL, 0, sysctl_shmsegs, "", ""); 153 154 static int 155 shm_find_segment_by_key(key) 156 key_t key; 157 { 158 int i; 159 160 for (i = 0; i < shmalloced; i++) 161 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 162 shmsegs[i].shm_perm.key == key) 163 return (i); 164 return (-1); 165 } 166 167 static struct shmid_ds * 168 shm_find_segment_by_shmid(int shmid) 169 { 170 int segnum; 171 struct shmid_ds *shmseg; 172 173 segnum = IPCID_TO_IX(shmid); 174 if (segnum < 0 || segnum >= shmalloced) 175 return (NULL); 176 shmseg = &shmsegs[segnum]; 177 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 178 (!shm_allow_removed && 179 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) || 180 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) 181 return (NULL); 182 return (shmseg); 183 } 184 185 static struct shmid_ds * 186 shm_find_segment_by_shmidx(int segnum) 187 { 188 struct shmid_ds *shmseg; 189 190 if (segnum < 0 || segnum >= shmalloced) 191 return (NULL); 192 shmseg = &shmsegs[segnum]; 193 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 194 (!shm_allow_removed && 195 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0)) 196 return (NULL); 197 return (shmseg); 198 } 199 200 static void 201 shm_deallocate_segment(shmseg) 202 struct shmid_ds *shmseg; 203 { 204 size_t size; 205 206 GIANT_REQUIRED; 207 208 vm_object_deallocate(shmseg->shm_internal); 209 shmseg->shm_internal = NULL; 210 size = round_page(shmseg->shm_segsz); 211 shm_committed -= btoc(size); 212 shm_nused--; 213 shmseg->shm_perm.mode = SHMSEG_FREE; 214 } 215 216 static int 217 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 218 { 219 struct shmid_ds *shmseg; 220 int segnum, result; 221 size_t size; 222 223 GIANT_REQUIRED; 224 225 segnum = IPCID_TO_IX(shmmap_s->shmid); 226 shmseg = &shmsegs[segnum]; 227 size = round_page(shmseg->shm_segsz); 228 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 229 if (result != KERN_SUCCESS) 230 return (EINVAL); 231 shmmap_s->shmid = -1; 232 shmseg->shm_dtime = time_second; 233 if ((--shmseg->shm_nattch <= 0) && 234 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 235 shm_deallocate_segment(shmseg); 236 shm_last_free = segnum; 237 } 238 return (0); 239 } 240 241 #ifndef _SYS_SYSPROTO_H_ 242 struct shmdt_args { 243 const void *shmaddr; 244 }; 245 #endif 246 247 /* 248 * MPSAFE 249 */ 250 int 251 shmdt(td, uap) 252 struct thread *td; 253 struct shmdt_args *uap; 254 { 255 struct proc *p = td->td_proc; 256 struct shmmap_state *shmmap_s; 257 int i; 258 int error = 0; 259 260 if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 261 return (ENOSYS); 262 mtx_lock(&Giant); 263 shmmap_s = p->p_vmspace->vm_shm; 264 if (shmmap_s == NULL) { 265 error = EINVAL; 266 goto done2; 267 } 268 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 269 if (shmmap_s->shmid != -1 && 270 shmmap_s->va == (vm_offset_t)uap->shmaddr) { 271 break; 272 } 273 } 274 if (i == shminfo.shmseg) { 275 error = EINVAL; 276 goto done2; 277 } 278 error = shm_delete_mapping(p->p_vmspace, shmmap_s); 279 done2: 280 mtx_unlock(&Giant); 281 return (error); 282 } 283 284 #ifndef _SYS_SYSPROTO_H_ 285 struct shmat_args { 286 int shmid; 287 const void *shmaddr; 288 int shmflg; 289 }; 290 #endif 291 292 /* 293 * MPSAFE 294 */ 295 int 296 kern_shmat(td, shmid, shmaddr, shmflg) 297 struct thread *td; 298 int shmid; 299 const void *shmaddr; 300 int shmflg; 301 { 302 struct proc *p = td->td_proc; 303 int i, flags; 304 struct shmid_ds *shmseg; 305 struct shmmap_state *shmmap_s = NULL; 306 vm_offset_t attach_va; 307 vm_prot_t prot; 308 vm_size_t size; 309 int rv; 310 int error = 0; 311 312 if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 313 return (ENOSYS); 314 mtx_lock(&Giant); 315 shmmap_s = p->p_vmspace->vm_shm; 316 if (shmmap_s == NULL) { 317 size = shminfo.shmseg * sizeof(struct shmmap_state); 318 shmmap_s = malloc(size, M_SHM, M_WAITOK); 319 for (i = 0; i < shminfo.shmseg; i++) 320 shmmap_s[i].shmid = -1; 321 p->p_vmspace->vm_shm = shmmap_s; 322 } 323 shmseg = shm_find_segment_by_shmid(shmid); 324 if (shmseg == NULL) { 325 error = EINVAL; 326 goto done2; 327 } 328 error = ipcperm(td, &shmseg->shm_perm, 329 (shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 330 if (error) 331 goto done2; 332 for (i = 0; i < shminfo.shmseg; i++) { 333 if (shmmap_s->shmid == -1) 334 break; 335 shmmap_s++; 336 } 337 if (i >= shminfo.shmseg) { 338 error = EMFILE; 339 goto done2; 340 } 341 size = round_page(shmseg->shm_segsz); 342 #ifdef VM_PROT_READ_IS_EXEC 343 prot = VM_PROT_READ | VM_PROT_EXECUTE; 344 #else 345 prot = VM_PROT_READ; 346 #endif 347 if ((shmflg & SHM_RDONLY) == 0) 348 prot |= VM_PROT_WRITE; 349 flags = MAP_ANON | MAP_SHARED; 350 if (shmaddr) { 351 flags |= MAP_FIXED; 352 if (shmflg & SHM_RND) { 353 attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1); 354 } else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0) { 355 attach_va = (vm_offset_t)shmaddr; 356 } else { 357 error = EINVAL; 358 goto done2; 359 } 360 } else { 361 /* 362 * This is just a hint to vm_map_find() about where to 363 * put it. 364 */ 365 PROC_LOCK(p); 366 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_daddr + 367 lim_max(p, RLIMIT_DATA)); 368 PROC_UNLOCK(p); 369 } 370 371 vm_object_reference(shmseg->shm_internal); 372 rv = vm_map_find(&p->p_vmspace->vm_map, shmseg->shm_internal, 373 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0); 374 if (rv != KERN_SUCCESS) { 375 vm_object_deallocate(shmseg->shm_internal); 376 error = ENOMEM; 377 goto done2; 378 } 379 vm_map_inherit(&p->p_vmspace->vm_map, 380 attach_va, attach_va + size, VM_INHERIT_SHARE); 381 382 shmmap_s->va = attach_va; 383 shmmap_s->shmid = shmid; 384 shmseg->shm_lpid = p->p_pid; 385 shmseg->shm_atime = time_second; 386 shmseg->shm_nattch++; 387 td->td_retval[0] = attach_va; 388 done2: 389 mtx_unlock(&Giant); 390 return (error); 391 } 392 393 int 394 shmat(td, uap) 395 struct thread *td; 396 struct shmat_args *uap; 397 { 398 return kern_shmat(td, uap->shmid, uap->shmaddr, uap->shmflg); 399 } 400 401 struct oshmid_ds { 402 struct ipc_perm shm_perm; /* operation perms */ 403 int shm_segsz; /* size of segment (bytes) */ 404 u_short shm_cpid; /* pid, creator */ 405 u_short shm_lpid; /* pid, last operation */ 406 short shm_nattch; /* no. of current attaches */ 407 time_t shm_atime; /* last attach time */ 408 time_t shm_dtime; /* last detach time */ 409 time_t shm_ctime; /* last change time */ 410 void *shm_handle; /* internal handle for shm segment */ 411 }; 412 413 struct oshmctl_args { 414 int shmid; 415 int cmd; 416 struct oshmid_ds *ubuf; 417 }; 418 419 /* 420 * MPSAFE 421 */ 422 static int 423 oshmctl(td, uap) 424 struct thread *td; 425 struct oshmctl_args *uap; 426 { 427 #ifdef COMPAT_43 428 int error = 0; 429 struct shmid_ds *shmseg; 430 struct oshmid_ds outbuf; 431 432 if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 433 return (ENOSYS); 434 mtx_lock(&Giant); 435 shmseg = shm_find_segment_by_shmid(uap->shmid); 436 if (shmseg == NULL) { 437 error = EINVAL; 438 goto done2; 439 } 440 switch (uap->cmd) { 441 case IPC_STAT: 442 error = ipcperm(td, &shmseg->shm_perm, IPC_R); 443 if (error) 444 goto done2; 445 outbuf.shm_perm = shmseg->shm_perm; 446 outbuf.shm_segsz = shmseg->shm_segsz; 447 outbuf.shm_cpid = shmseg->shm_cpid; 448 outbuf.shm_lpid = shmseg->shm_lpid; 449 outbuf.shm_nattch = shmseg->shm_nattch; 450 outbuf.shm_atime = shmseg->shm_atime; 451 outbuf.shm_dtime = shmseg->shm_dtime; 452 outbuf.shm_ctime = shmseg->shm_ctime; 453 outbuf.shm_handle = shmseg->shm_internal; 454 error = copyout(&outbuf, uap->ubuf, sizeof(outbuf)); 455 if (error) 456 goto done2; 457 break; 458 default: 459 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 460 error = ((sy_call_t *)shmctl)(td, uap); 461 break; 462 } 463 done2: 464 mtx_unlock(&Giant); 465 return (error); 466 #else 467 return (EINVAL); 468 #endif 469 } 470 471 #ifndef _SYS_SYSPROTO_H_ 472 struct shmctl_args { 473 int shmid; 474 int cmd; 475 struct shmid_ds *buf; 476 }; 477 #endif 478 479 /* 480 * MPSAFE 481 */ 482 int 483 kern_shmctl(td, shmid, cmd, buf, bufsz) 484 struct thread *td; 485 int shmid; 486 int cmd; 487 void *buf; 488 size_t *bufsz; 489 { 490 int error = 0; 491 struct shmid_ds *shmseg; 492 493 if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 494 return (ENOSYS); 495 496 mtx_lock(&Giant); 497 switch (cmd) { 498 case IPC_INFO: 499 memcpy(buf, &shminfo, sizeof(shminfo)); 500 if (bufsz) 501 *bufsz = sizeof(shminfo); 502 td->td_retval[0] = shmalloced; 503 goto done2; 504 case SHM_INFO: { 505 struct shm_info shm_info; 506 shm_info.used_ids = shm_nused; 507 shm_info.shm_rss = 0; /*XXX where to get from ? */ 508 shm_info.shm_tot = 0; /*XXX where to get from ? */ 509 shm_info.shm_swp = 0; /*XXX where to get from ? */ 510 shm_info.swap_attempts = 0; /*XXX where to get from ? */ 511 shm_info.swap_successes = 0; /*XXX where to get from ? */ 512 memcpy(buf, &shm_info, sizeof(shm_info)); 513 if (bufsz) 514 *bufsz = sizeof(shm_info); 515 td->td_retval[0] = shmalloced; 516 goto done2; 517 } 518 } 519 if (cmd == SHM_STAT) 520 shmseg = shm_find_segment_by_shmidx(shmid); 521 else 522 shmseg = shm_find_segment_by_shmid(shmid); 523 if (shmseg == NULL) { 524 error = EINVAL; 525 goto done2; 526 } 527 switch (cmd) { 528 case SHM_STAT: 529 case IPC_STAT: 530 error = ipcperm(td, &shmseg->shm_perm, IPC_R); 531 if (error) 532 goto done2; 533 memcpy(buf, shmseg, sizeof(struct shmid_ds)); 534 if (bufsz) 535 *bufsz = sizeof(struct shmid_ds); 536 if (cmd == SHM_STAT) 537 td->td_retval[0] = IXSEQ_TO_IPCID(shmid, shmseg->shm_perm); 538 break; 539 case IPC_SET: { 540 struct shmid_ds *shmid; 541 542 shmid = (struct shmid_ds *)buf; 543 error = ipcperm(td, &shmseg->shm_perm, IPC_M); 544 if (error) 545 goto done2; 546 shmseg->shm_perm.uid = shmid->shm_perm.uid; 547 shmseg->shm_perm.gid = shmid->shm_perm.gid; 548 shmseg->shm_perm.mode = 549 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 550 (shmid->shm_perm.mode & ACCESSPERMS); 551 shmseg->shm_ctime = time_second; 552 break; 553 } 554 case IPC_RMID: 555 error = ipcperm(td, &shmseg->shm_perm, IPC_M); 556 if (error) 557 goto done2; 558 shmseg->shm_perm.key = IPC_PRIVATE; 559 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 560 if (shmseg->shm_nattch <= 0) { 561 shm_deallocate_segment(shmseg); 562 shm_last_free = IPCID_TO_IX(shmid); 563 } 564 break; 565 #if 0 566 case SHM_LOCK: 567 case SHM_UNLOCK: 568 #endif 569 default: 570 error = EINVAL; 571 break; 572 } 573 done2: 574 mtx_unlock(&Giant); 575 return (error); 576 } 577 578 int 579 shmctl(td, uap) 580 struct thread *td; 581 struct shmctl_args *uap; 582 { 583 int error = 0; 584 struct shmid_ds buf; 585 size_t bufsz; 586 587 /* IPC_SET needs to copyin the buffer before calling kern_shmctl */ 588 if (uap->cmd == IPC_SET) { 589 if ((error = copyin(uap->buf, &buf, sizeof(struct shmid_ds)))) 590 goto done; 591 } 592 593 error = kern_shmctl(td, uap->shmid, uap->cmd, (void *)&buf, &bufsz); 594 if (error) 595 goto done; 596 597 /* Cases in which we need to copyout */ 598 switch (uap->cmd) { 599 case IPC_INFO: 600 case SHM_INFO: 601 case SHM_STAT: 602 case IPC_STAT: 603 error = copyout(&buf, uap->buf, bufsz); 604 break; 605 } 606 607 done: 608 if (error) { 609 /* Invalidate the return value */ 610 td->td_retval[0] = -1; 611 } 612 return (error); 613 } 614 615 616 #ifndef _SYS_SYSPROTO_H_ 617 struct shmget_args { 618 key_t key; 619 size_t size; 620 int shmflg; 621 }; 622 #endif 623 624 static int 625 shmget_existing(td, uap, mode, segnum) 626 struct thread *td; 627 struct shmget_args *uap; 628 int mode; 629 int segnum; 630 { 631 struct shmid_ds *shmseg; 632 int error; 633 634 shmseg = &shmsegs[segnum]; 635 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 636 /* 637 * This segment is in the process of being allocated. Wait 638 * until it's done, and look the key up again (in case the 639 * allocation failed or it was freed). 640 */ 641 shmseg->shm_perm.mode |= SHMSEG_WANTED; 642 error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0); 643 if (error) 644 return (error); 645 return (EAGAIN); 646 } 647 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 648 return (EEXIST); 649 error = ipcperm(td, &shmseg->shm_perm, mode); 650 if (error) 651 return (error); 652 if (uap->size && uap->size > shmseg->shm_segsz) 653 return (EINVAL); 654 td->td_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 655 return (0); 656 } 657 658 static int 659 shmget_allocate_segment(td, uap, mode) 660 struct thread *td; 661 struct shmget_args *uap; 662 int mode; 663 { 664 int i, segnum, shmid, size; 665 struct ucred *cred = td->td_ucred; 666 struct shmid_ds *shmseg; 667 vm_object_t shm_object; 668 669 GIANT_REQUIRED; 670 671 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 672 return (EINVAL); 673 if (shm_nused >= shminfo.shmmni) /* Any shmids left? */ 674 return (ENOSPC); 675 size = round_page(uap->size); 676 if (shm_committed + btoc(size) > shminfo.shmall) 677 return (ENOMEM); 678 if (shm_last_free < 0) { 679 shmrealloc(); /* Maybe expand the shmsegs[] array. */ 680 for (i = 0; i < shmalloced; i++) 681 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 682 break; 683 if (i == shmalloced) 684 return (ENOSPC); 685 segnum = i; 686 } else { 687 segnum = shm_last_free; 688 shm_last_free = -1; 689 } 690 shmseg = &shmsegs[segnum]; 691 /* 692 * In case we sleep in malloc(), mark the segment present but deleted 693 * so that noone else tries to create the same key. 694 */ 695 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 696 shmseg->shm_perm.key = uap->key; 697 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 698 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 699 700 /* 701 * We make sure that we have allocated a pager before we need 702 * to. 703 */ 704 if (shm_use_phys) { 705 shm_object = 706 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0); 707 } else { 708 shm_object = 709 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0); 710 } 711 VM_OBJECT_LOCK(shm_object); 712 vm_object_clear_flag(shm_object, OBJ_ONEMAPPING); 713 vm_object_set_flag(shm_object, OBJ_NOSPLIT); 714 VM_OBJECT_UNLOCK(shm_object); 715 716 shmseg->shm_internal = shm_object; 717 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 718 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 719 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 720 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 721 shmseg->shm_segsz = uap->size; 722 shmseg->shm_cpid = td->td_proc->p_pid; 723 shmseg->shm_lpid = shmseg->shm_nattch = 0; 724 shmseg->shm_atime = shmseg->shm_dtime = 0; 725 shmseg->shm_ctime = time_second; 726 shm_committed += btoc(size); 727 shm_nused++; 728 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 729 /* 730 * Somebody else wanted this key while we were asleep. Wake 731 * them up now. 732 */ 733 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 734 wakeup(shmseg); 735 } 736 td->td_retval[0] = shmid; 737 return (0); 738 } 739 740 /* 741 * MPSAFE 742 */ 743 int 744 shmget(td, uap) 745 struct thread *td; 746 struct shmget_args *uap; 747 { 748 int segnum, mode; 749 int error; 750 751 if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 752 return (ENOSYS); 753 mtx_lock(&Giant); 754 mode = uap->shmflg & ACCESSPERMS; 755 if (uap->key != IPC_PRIVATE) { 756 again: 757 segnum = shm_find_segment_by_key(uap->key); 758 if (segnum >= 0) { 759 error = shmget_existing(td, uap, mode, segnum); 760 if (error == EAGAIN) 761 goto again; 762 goto done2; 763 } 764 if ((uap->shmflg & IPC_CREAT) == 0) { 765 error = ENOENT; 766 goto done2; 767 } 768 } 769 error = shmget_allocate_segment(td, uap, mode); 770 done2: 771 mtx_unlock(&Giant); 772 return (error); 773 } 774 775 /* 776 * MPSAFE 777 */ 778 int 779 shmsys(td, uap) 780 struct thread *td; 781 /* XXX actually varargs. */ 782 struct shmsys_args /* { 783 int which; 784 int a2; 785 int a3; 786 int a4; 787 } */ *uap; 788 { 789 int error; 790 791 if (!jail_sysvipc_allowed && jailed(td->td_ucred)) 792 return (ENOSYS); 793 if (uap->which < 0 || 794 uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 795 return (EINVAL); 796 mtx_lock(&Giant); 797 error = (*shmcalls[uap->which])(td, &uap->a2); 798 mtx_unlock(&Giant); 799 return (error); 800 } 801 802 static void 803 shmfork_myhook(p1, p2) 804 struct proc *p1, *p2; 805 { 806 struct shmmap_state *shmmap_s; 807 size_t size; 808 int i; 809 810 size = shminfo.shmseg * sizeof(struct shmmap_state); 811 shmmap_s = malloc(size, M_SHM, M_WAITOK); 812 bcopy(p1->p_vmspace->vm_shm, shmmap_s, size); 813 p2->p_vmspace->vm_shm = shmmap_s; 814 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 815 if (shmmap_s->shmid != -1) 816 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 817 } 818 819 static void 820 shmexit_myhook(struct vmspace *vm) 821 { 822 struct shmmap_state *base, *shm; 823 int i; 824 825 GIANT_REQUIRED; 826 827 if ((base = vm->vm_shm) != NULL) { 828 vm->vm_shm = NULL; 829 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 830 if (shm->shmid != -1) 831 shm_delete_mapping(vm, shm); 832 } 833 free(base, M_SHM); 834 } 835 } 836 837 static void 838 shmrealloc(void) 839 { 840 int i; 841 struct shmid_ds *newsegs; 842 843 if (shmalloced >= shminfo.shmmni) 844 return; 845 846 newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 847 if (newsegs == NULL) 848 return; 849 for (i = 0; i < shmalloced; i++) 850 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 851 for (; i < shminfo.shmmni; i++) { 852 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 853 shmsegs[i].shm_perm.seq = 0; 854 } 855 free(shmsegs, M_SHM); 856 shmsegs = newsegs; 857 shmalloced = shminfo.shmmni; 858 } 859 860 static void 861 shminit() 862 { 863 int i; 864 865 TUNABLE_INT_FETCH("kern.ipc.shmmaxpgs", &shminfo.shmall); 866 for (i = PAGE_SIZE; i > 0; i--) { 867 shminfo.shmmax = shminfo.shmall * PAGE_SIZE; 868 if (shminfo.shmmax >= shminfo.shmall) 869 break; 870 } 871 TUNABLE_INT_FETCH("kern.ipc.shmmin", &shminfo.shmmin); 872 TUNABLE_INT_FETCH("kern.ipc.shmmni", &shminfo.shmmni); 873 TUNABLE_INT_FETCH("kern.ipc.shmseg", &shminfo.shmseg); 874 TUNABLE_INT_FETCH("kern.ipc.shm_use_phys", &shm_use_phys); 875 876 shmalloced = shminfo.shmmni; 877 shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 878 if (shmsegs == NULL) 879 panic("cannot allocate initial memory for sysvshm"); 880 for (i = 0; i < shmalloced; i++) { 881 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 882 shmsegs[i].shm_perm.seq = 0; 883 } 884 shm_last_free = 0; 885 shm_nused = 0; 886 shm_committed = 0; 887 shmexit_hook = &shmexit_myhook; 888 shmfork_hook = &shmfork_myhook; 889 } 890 891 static int 892 shmunload() 893 { 894 895 if (shm_nused > 0) 896 return (EBUSY); 897 898 free(shmsegs, M_SHM); 899 shmexit_hook = NULL; 900 shmfork_hook = NULL; 901 return (0); 902 } 903 904 static int 905 sysctl_shmsegs(SYSCTL_HANDLER_ARGS) 906 { 907 908 return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0]))); 909 } 910 911 static int 912 sysvshm_modload(struct module *module, int cmd, void *arg) 913 { 914 int error = 0; 915 916 switch (cmd) { 917 case MOD_LOAD: 918 shminit(); 919 break; 920 case MOD_UNLOAD: 921 error = shmunload(); 922 break; 923 case MOD_SHUTDOWN: 924 break; 925 default: 926 error = EINVAL; 927 break; 928 } 929 return (error); 930 } 931 932 static moduledata_t sysvshm_mod = { 933 "sysvshm", 934 &sysvshm_modload, 935 NULL 936 }; 937 938 SYSCALL_MODULE_HELPER(shmsys); 939 SYSCALL_MODULE_HELPER(shmat); 940 SYSCALL_MODULE_HELPER(shmctl); 941 SYSCALL_MODULE_HELPER(shmdt); 942 SYSCALL_MODULE_HELPER(shmget); 943 944 DECLARE_MODULE(sysvshm, sysvshm_mod, 945 SI_SUB_SYSV_SHM, SI_ORDER_FIRST); 946 MODULE_VERSION(sysvshm, 1); 947