1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 /* 42 * Inter-Process Communication Shared Memory Facility. 43 * 44 * See os/ipc.c for a description of common IPC functionality. 45 * 46 * Resource controls 47 * ----------------- 48 * 49 * Control: zone.max-shm-ids (rc_zone_shmmni) 50 * Description: Maximum number of shared memory ids allowed a zone. 51 * 52 * When shmget() is used to allocate a shared memory segment, one id 53 * is allocated. If the id allocation doesn't succeed, shmget() 54 * fails and errno is set to ENOSPC. Upon successful shmctl(, 55 * IPC_RMID) the id is deallocated. 56 * 57 * Control: project.max-shm-ids (rc_project_shmmni) 58 * Description: Maximum number of shared memory ids allowed a project. 59 * 60 * When shmget() is used to allocate a shared memory segment, one id 61 * is allocated. If the id allocation doesn't succeed, shmget() 62 * fails and errno is set to ENOSPC. Upon successful shmctl(, 63 * IPC_RMID) the id is deallocated. 64 * 65 * Control: zone.max-shm-memory (rc_zone_shmmax) 66 * Description: Total amount of shared memory allowed a zone. 67 * 68 * When shmget() is used to allocate a shared memory segment, the 69 * segment's size is allocated against this limit. If the space 70 * allocation doesn't succeed, shmget() fails and errno is set to 71 * EINVAL. The size will be deallocated once the last process has 72 * detached the segment and the segment has been successfully 73 * shmctl(, IPC_RMID)ed. 74 * 75 * Control: project.max-shm-memory (rc_project_shmmax) 76 * Description: Total amount of shared memory allowed a project. 77 * 78 * When shmget() is used to allocate a shared memory segment, the 79 * segment's size is allocated against this limit. If the space 80 * allocation doesn't succeed, shmget() fails and errno is set to 81 * EINVAL. The size will be deallocated once the last process has 82 * detached the segment and the segment has been successfully 83 * shmctl(, IPC_RMID)ed. 84 */ 85 86 #include <sys/types.h> 87 #include <sys/param.h> 88 #include <sys/cred.h> 89 #include <sys/errno.h> 90 #include <sys/time.h> 91 #include <sys/kmem.h> 92 #include <sys/user.h> 93 #include <sys/proc.h> 94 #include <sys/systm.h> 95 #include <sys/prsystm.h> 96 #include <sys/sysmacros.h> 97 #include <sys/tuneable.h> 98 #include <sys/vm.h> 99 #include <sys/mman.h> 100 #include <sys/swap.h> 101 #include <sys/cmn_err.h> 102 #include <sys/debug.h> 103 #include <sys/lwpchan_impl.h> 104 #include <sys/avl.h> 105 #include <sys/modctl.h> 106 #include <sys/syscall.h> 107 #include <sys/task.h> 108 #include <sys/project.h> 109 #include <sys/policy.h> 110 #include <sys/zone.h> 111 112 #include <sys/ipc.h> 113 #include <sys/ipc_impl.h> 114 #include <sys/shm.h> 115 #include <sys/shm_impl.h> 116 117 #include <vm/hat.h> 118 #include <vm/seg.h> 119 #include <vm/as.h> 120 #include <vm/seg_vn.h> 121 #include <vm/anon.h> 122 #include <vm/page.h> 123 #include <vm/vpage.h> 124 #include <vm/seg_spt.h> 125 126 #include <c2/audit.h> 127 128 static int shmem_lock(struct anon_map *amp); 129 static void shmem_unlock(struct anon_map *amp, uint_t lck); 130 static void sa_add(struct proc *pp, caddr_t addr, size_t len, ulong_t flags, 131 kshmid_t *id); 132 static void shm_rm_amp(struct anon_map *amp, uint_t lckflag); 133 static void shm_dtor(kipc_perm_t *); 134 static void shm_rmid(kipc_perm_t *); 135 static void shm_remove_zone(zoneid_t, void *); 136 137 /* 138 * Semantics for share_page_table and ism_off: 139 * 140 * These are hooks in /etc/system - only for internal testing purpose. 141 * 142 * Setting share_page_table automatically turns on the SHM_SHARE_MMU (ISM) flag 143 * in a call to shmat(2). In other words, with share_page_table set, you always 144 * get ISM, even if say, DISM is specified. It should really be called "ism_on". 145 * 146 * Setting ism_off turns off the SHM_SHARE_MMU flag from the flags passed to 147 * shmat(2). 148 * 149 * If both share_page_table and ism_off are set, share_page_table prevails. 150 * 151 * Although these tunables should probably be removed, they do have some 152 * external exposure; as long as they exist, they should at least work sensibly. 153 */ 154 155 int share_page_table; 156 int ism_off; 157 158 /* 159 * The following tunables are obsolete. Though for compatibility we 160 * still read and interpret shminfo_shmmax and shminfo_shmmni (see 161 * os/project.c), the preferred mechanism for administrating the IPC 162 * Shared Memory facility is through the resource controls described at 163 * the top of this file. 164 */ 165 size_t shminfo_shmmax = 0x800000; /* (obsolete) */ 166 int shminfo_shmmni = 100; /* (obsolete) */ 167 size_t shminfo_shmmin = 1; /* (obsolete) */ 168 int shminfo_shmseg = 6; /* (obsolete) */ 169 170 extern rctl_hndl_t rc_zone_shmmax; 171 extern rctl_hndl_t rc_zone_shmmni; 172 extern rctl_hndl_t rc_project_shmmax; 173 extern rctl_hndl_t rc_project_shmmni; 174 static ipc_service_t *shm_svc; 175 static zone_key_t shm_zone_key; 176 177 /* 178 * Module linkage information for the kernel. 179 */ 180 static uintptr_t shmsys(int, uintptr_t, uintptr_t, uintptr_t); 181 182 static struct sysent ipcshm_sysent = { 183 4, 184 #ifdef _SYSCALL32_IMPL 185 SE_ARGC | SE_NOUNLOAD | SE_64RVAL, 186 #else /* _SYSCALL32_IMPL */ 187 SE_ARGC | SE_NOUNLOAD | SE_32RVAL1, 188 #endif /* _SYSCALL32_IMPL */ 189 (int (*)())shmsys 190 }; 191 192 #ifdef _SYSCALL32_IMPL 193 static struct sysent ipcshm_sysent32 = { 194 4, 195 SE_ARGC | SE_NOUNLOAD | SE_32RVAL1, 196 (int (*)())shmsys 197 }; 198 #endif /* _SYSCALL32_IMPL */ 199 200 static struct modlsys modlsys = { 201 &mod_syscallops, "System V shared memory", &ipcshm_sysent 202 }; 203 204 #ifdef _SYSCALL32_IMPL 205 static struct modlsys modlsys32 = { 206 &mod_syscallops32, "32-bit System V shared memory", &ipcshm_sysent32 207 }; 208 #endif /* _SYSCALL32_IMPL */ 209 210 static struct modlinkage modlinkage = { 211 MODREV_1, 212 &modlsys, 213 #ifdef _SYSCALL32_IMPL 214 &modlsys32, 215 #endif 216 NULL 217 }; 218 219 220 int 221 _init(void) 222 { 223 int result; 224 225 shm_svc = ipcs_create("shmids", rc_project_shmmni, rc_zone_shmmni, 226 sizeof (kshmid_t), shm_dtor, shm_rmid, AT_IPC_SHM, 227 offsetof(ipc_rqty_t, ipcq_shmmni)); 228 zone_key_create(&shm_zone_key, NULL, shm_remove_zone, NULL); 229 230 if ((result = mod_install(&modlinkage)) == 0) 231 return (0); 232 233 (void) zone_key_delete(shm_zone_key); 234 ipcs_destroy(shm_svc); 235 236 return (result); 237 } 238 239 int 240 _fini(void) 241 { 242 return (EBUSY); 243 } 244 245 int 246 _info(struct modinfo *modinfop) 247 { 248 return (mod_info(&modlinkage, modinfop)); 249 } 250 251 /* 252 * Shmat (attach shared segment) system call. 253 */ 254 static int 255 shmat(int shmid, caddr_t uaddr, int uflags, uintptr_t *rvp) 256 { 257 kshmid_t *sp; /* shared memory header ptr */ 258 size_t size; 259 int error = 0; 260 proc_t *pp = curproc; 261 struct as *as = pp->p_as; 262 struct segvn_crargs crargs; /* segvn create arguments */ 263 kmutex_t *lock; 264 struct seg *segspt = NULL; 265 caddr_t addr = uaddr; 266 int flags = (uflags & SHMAT_VALID_FLAGS_MASK); 267 int useISM; 268 uchar_t prot = PROT_ALL; 269 int result; 270 271 if ((lock = ipc_lookup(shm_svc, shmid, (kipc_perm_t **)&sp)) == NULL) 272 return (EINVAL); 273 if (error = ipcperm_access(&sp->shm_perm, SHM_R, CRED())) 274 goto errret; 275 if ((flags & SHM_RDONLY) == 0 && 276 (error = ipcperm_access(&sp->shm_perm, SHM_W, CRED()))) 277 goto errret; 278 if (spt_invalid(flags)) { 279 error = EINVAL; 280 goto errret; 281 } 282 if (ism_off) 283 flags = flags & ~SHM_SHARE_MMU; 284 if (share_page_table) { 285 flags = flags & ~SHM_PAGEABLE; 286 flags = flags | SHM_SHARE_MMU; 287 } 288 useISM = (spt_locked(flags) || spt_pageable(flags)); 289 if (useISM && (error = ipcperm_access(&sp->shm_perm, SHM_W, CRED()))) 290 goto errret; 291 if (useISM && isspt(sp)) { 292 uint_t newsptflags = flags | spt_flags(sp->shm_sptseg); 293 /* 294 * If trying to change an existing {D}ISM segment from ISM 295 * to DISM or vice versa, return error. Note that this 296 * validation of flags needs to be done after the effect of 297 * tunables such as ism_off and share_page_table, for 298 * semantics that are consistent with the tunables' settings. 299 */ 300 if (spt_invalid(newsptflags)) { 301 error = EINVAL; 302 goto errret; 303 } 304 } 305 ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); 306 size = sp->shm_amp->size; 307 ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); 308 309 /* somewhere to record spt info for final detach */ 310 if (sp->shm_sptinfo == NULL) 311 sp->shm_sptinfo = kmem_zalloc(sizeof (sptinfo_t), KM_SLEEP); 312 313 as_rangelock(as); 314 315 if (useISM) { 316 /* 317 * Handle ISM 318 */ 319 uint_t n, share_szc; 320 size_t share_size; 321 struct shm_data ssd; 322 uintptr_t align_hint; 323 324 n = page_num_pagesizes(); 325 if (n < 2) { /* large pages aren't supported */ 326 as_rangeunlock(as); 327 error = EINVAL; 328 goto errret; 329 } 330 331 /* 332 * Pick a share pagesize to use, if (!isspt(sp)). 333 * Otherwise use the already chosen page size. 334 * 335 * For the initial shmat (!isspt(sp)), where sptcreate is 336 * called, map_pgsz is called to recommend a [D]ISM pagesize, 337 * important for systems which offer more than one potential 338 * [D]ISM pagesize. 339 * If the shmat is just to attach to an already created 340 * [D]ISM segment, then use the previously selected page size. 341 */ 342 if (!isspt(sp)) { 343 share_size = map_pgsz(MAPPGSZ_ISM, 344 pp, addr, size, NULL); 345 if (share_size == 0) { 346 as_rangeunlock(as); 347 error = EINVAL; 348 goto errret; 349 } 350 share_szc = page_szc(share_size); 351 } else { 352 share_szc = sp->shm_sptseg->s_szc; 353 share_size = page_get_pagesize(share_szc); 354 } 355 size = P2ROUNDUP(size, share_size); 356 357 align_hint = share_size; 358 #if defined(__i386) || defined(__amd64) 359 /* 360 * For 64 bit amd64, we want to share an entire page table 361 * if possible. We know (ugh) that there are 512 entries in 362 * in a page table. The number for 32 bit non-PAE should be 363 * 1024, but I'm not going to special case that. Note using 512 364 * won't cause a failure below. It retries with align_hint set 365 * to share_size 366 */ 367 while (size >= 512 * (uint64_t)align_hint) 368 align_hint *= 512; 369 #endif /* __i386 || __amd64 */ 370 371 #if defined(__sparcv9) 372 if (addr == 0 && curproc->p_model == DATAMODEL_LP64) { 373 /* 374 * If no address has been passed in, and this is a 375 * 64-bit process, we'll try to find an address 376 * in the predict-ISM zone. 377 */ 378 caddr_t predbase = (caddr_t)PREDISM_1T_BASE; 379 size_t len = PREDISM_BOUND - PREDISM_1T_BASE; 380 381 as_purge(as); 382 if (as_gap(as, size + share_size, &predbase, &len, 383 AH_LO, (caddr_t)NULL) != -1) { 384 /* 385 * We found an address which looks like a 386 * candidate. We want to round it up, and 387 * then check that it's a valid user range. 388 * This assures that we won't fail below. 389 */ 390 addr = (caddr_t)P2ROUNDUP((uintptr_t)predbase, 391 share_size); 392 393 if (valid_usr_range(addr, size, prot, 394 as, as->a_userlimit) != RANGE_OKAY) { 395 addr = 0; 396 } 397 } 398 } 399 #endif /* __sparcv9 */ 400 401 if (addr == 0) { 402 for (;;) { 403 addr = (caddr_t)align_hint; 404 map_addr(&addr, size, 0ll, 1, MAP_ALIGN); 405 if (addr != NULL || align_hint == share_size) 406 break; 407 align_hint = share_size; 408 } 409 if (addr == NULL) { 410 as_rangeunlock(as); 411 error = ENOMEM; 412 goto errret; 413 } 414 ASSERT(((uintptr_t)addr & (align_hint - 1)) == 0); 415 } else { 416 /* Use the user-supplied attach address */ 417 caddr_t base; 418 size_t len; 419 420 /* 421 * Check that the address range 422 * 1) is properly aligned 423 * 2) is correct in unix terms 424 * 3) is within an unmapped address segment 425 */ 426 base = addr; 427 len = size; /* use spt aligned size */ 428 /* XXX - in SunOS, is sp->shm_segsz */ 429 if ((uintptr_t)base & (share_size - 1)) { 430 error = EINVAL; 431 as_rangeunlock(as); 432 goto errret; 433 } 434 result = valid_usr_range(base, len, prot, as, 435 as->a_userlimit); 436 if (result == RANGE_BADPROT) { 437 /* 438 * We try to accomodate processors which 439 * may not support execute permissions on 440 * all ISM segments by trying the check 441 * again but without PROT_EXEC. 442 */ 443 prot &= ~PROT_EXEC; 444 result = valid_usr_range(base, len, prot, as, 445 as->a_userlimit); 446 } 447 as_purge(as); 448 if (result != RANGE_OKAY || 449 as_gap(as, len, &base, &len, AH_LO, 450 (caddr_t)NULL) != 0) { 451 error = EINVAL; 452 as_rangeunlock(as); 453 goto errret; 454 } 455 } 456 457 if (!isspt(sp)) { 458 error = sptcreate(size, &segspt, sp->shm_amp, prot, 459 flags, share_szc); 460 if (error) { 461 as_rangeunlock(as); 462 goto errret; 463 } 464 sp->shm_sptinfo->sptas = segspt->s_as; 465 sp->shm_sptseg = segspt; 466 sp->shm_sptprot = prot; 467 sp->shm_lkcnt = 0; 468 } else if ((prot & sp->shm_sptprot) != sp->shm_sptprot) { 469 /* 470 * Ensure we're attaching to an ISM segment with 471 * fewer or equal permissions than what we're 472 * allowed. Fail if the segment has more 473 * permissions than what we're allowed. 474 */ 475 error = EACCES; 476 as_rangeunlock(as); 477 goto errret; 478 } 479 480 ssd.shm_sptseg = sp->shm_sptseg; 481 ssd.shm_sptas = sp->shm_sptinfo->sptas; 482 ssd.shm_amp = sp->shm_amp; 483 error = as_map(as, addr, size, segspt_shmattach, &ssd); 484 if (error == 0) 485 sp->shm_ismattch++; /* keep count of ISM attaches */ 486 } else { 487 488 /* 489 * Normal case. 490 */ 491 if (flags & SHM_RDONLY) 492 prot &= ~PROT_WRITE; 493 494 if (addr == 0) { 495 /* Let the system pick the attach address */ 496 map_addr(&addr, size, 0ll, 1, 0); 497 if (addr == NULL) { 498 as_rangeunlock(as); 499 error = ENOMEM; 500 goto errret; 501 } 502 } else { 503 /* Use the user-supplied attach address */ 504 caddr_t base; 505 size_t len; 506 507 if (flags & SHM_RND) 508 addr = (caddr_t)((uintptr_t)addr & 509 ~(SHMLBA - 1)); 510 /* 511 * Check that the address range 512 * 1) is properly aligned 513 * 2) is correct in unix terms 514 * 3) is within an unmapped address segment 515 */ 516 base = addr; 517 len = size; /* use aligned size */ 518 /* XXX - in SunOS, is sp->shm_segsz */ 519 if ((uintptr_t)base & PAGEOFFSET) { 520 error = EINVAL; 521 as_rangeunlock(as); 522 goto errret; 523 } 524 result = valid_usr_range(base, len, prot, as, 525 as->a_userlimit); 526 if (result == RANGE_BADPROT) { 527 prot &= ~PROT_EXEC; 528 result = valid_usr_range(base, len, prot, as, 529 as->a_userlimit); 530 } 531 as_purge(as); 532 if (result != RANGE_OKAY || 533 as_gap(as, len, &base, &len, 534 AH_LO, (caddr_t)NULL) != 0) { 535 error = EINVAL; 536 as_rangeunlock(as); 537 goto errret; 538 } 539 } 540 541 /* Initialize the create arguments and map the segment */ 542 crargs = *(struct segvn_crargs *)zfod_argsp; 543 crargs.offset = 0; 544 crargs.type = MAP_SHARED; 545 crargs.amp = sp->shm_amp; 546 crargs.prot = prot; 547 crargs.maxprot = crargs.prot; 548 crargs.flags = 0; 549 550 error = as_map(as, addr, size, segvn_create, &crargs); 551 } 552 553 as_rangeunlock(as); 554 if (error) 555 goto errret; 556 557 /* record shmem range for the detach */ 558 sa_add(pp, addr, (size_t)size, useISM ? SHMSA_ISM : 0, sp); 559 *rvp = (uintptr_t)addr; 560 561 sp->shm_atime = gethrestime_sec(); 562 sp->shm_lpid = pp->p_pid; 563 ipc_hold(shm_svc, (kipc_perm_t *)sp); 564 errret: 565 mutex_exit(lock); 566 return (error); 567 } 568 569 static void 570 shm_dtor(kipc_perm_t *perm) 571 { 572 kshmid_t *sp = (kshmid_t *)perm; 573 uint_t cnt; 574 size_t rsize; 575 576 if (sp->shm_sptinfo) { 577 if (isspt(sp)) 578 sptdestroy(sp->shm_sptinfo->sptas, sp->shm_amp); 579 kmem_free(sp->shm_sptinfo, sizeof (sptinfo_t)); 580 } 581 582 ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); 583 cnt = --sp->shm_amp->refcnt; 584 ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); 585 ASSERT(cnt == 0); 586 shm_rm_amp(sp->shm_amp, sp->shm_lkcnt); 587 588 if (sp->shm_perm.ipc_id != IPC_ID_INVAL) { 589 rsize = ptob(btopr(sp->shm_segsz)); 590 ipcs_lock(shm_svc); 591 sp->shm_perm.ipc_proj->kpj_data.kpd_shmmax -= rsize; 592 sp->shm_perm.ipc_zone->zone_shmmax -= rsize; 593 ipcs_unlock(shm_svc); 594 } 595 } 596 597 /* ARGSUSED */ 598 static void 599 shm_rmid(kipc_perm_t *perm) 600 { 601 /* nothing to do */ 602 } 603 604 /* 605 * Shmctl system call. 606 */ 607 /* ARGSUSED */ 608 static int 609 shmctl(int shmid, int cmd, void *arg) 610 { 611 kshmid_t *sp; /* shared memory header ptr */ 612 STRUCT_DECL(shmid_ds, ds); /* for SVR4 IPC_SET */ 613 int error = 0; 614 struct cred *cr = CRED(); 615 kmutex_t *lock; 616 model_t mdl = get_udatamodel(); 617 struct shmid_ds64 ds64; 618 shmatt_t nattch; 619 620 STRUCT_INIT(ds, mdl); 621 622 /* 623 * Perform pre- or non-lookup actions (e.g. copyins, RMID). 624 */ 625 switch (cmd) { 626 case IPC_SET: 627 if (copyin(arg, STRUCT_BUF(ds), STRUCT_SIZE(ds))) 628 return (EFAULT); 629 break; 630 631 case IPC_SET64: 632 if (copyin(arg, &ds64, sizeof (struct shmid_ds64))) 633 return (EFAULT); 634 break; 635 636 case IPC_RMID: 637 return (ipc_rmid(shm_svc, shmid, cr)); 638 } 639 640 if ((lock = ipc_lookup(shm_svc, shmid, (kipc_perm_t **)&sp)) == NULL) 641 return (EINVAL); 642 643 switch (cmd) { 644 /* Set ownership and permissions. */ 645 case IPC_SET: 646 if (error = ipcperm_set(shm_svc, cr, &sp->shm_perm, 647 &STRUCT_BUF(ds)->shm_perm, mdl)) 648 break; 649 sp->shm_ctime = gethrestime_sec(); 650 break; 651 652 case IPC_STAT: 653 if (error = ipcperm_access(&sp->shm_perm, SHM_R, cr)) 654 break; 655 656 nattch = sp->shm_perm.ipc_ref - 1; 657 658 ipcperm_stat(&STRUCT_BUF(ds)->shm_perm, &sp->shm_perm, mdl); 659 STRUCT_FSET(ds, shm_segsz, sp->shm_segsz); 660 STRUCT_FSETP(ds, shm_amp, NULL); /* kernel addr */ 661 STRUCT_FSET(ds, shm_lkcnt, sp->shm_lkcnt); 662 STRUCT_FSET(ds, shm_lpid, sp->shm_lpid); 663 STRUCT_FSET(ds, shm_cpid, sp->shm_cpid); 664 STRUCT_FSET(ds, shm_nattch, nattch); 665 STRUCT_FSET(ds, shm_cnattch, sp->shm_ismattch); 666 STRUCT_FSET(ds, shm_atime, sp->shm_atime); 667 STRUCT_FSET(ds, shm_dtime, sp->shm_dtime); 668 STRUCT_FSET(ds, shm_ctime, sp->shm_ctime); 669 670 mutex_exit(lock); 671 if (copyout(STRUCT_BUF(ds), arg, STRUCT_SIZE(ds))) 672 return (EFAULT); 673 674 return (0); 675 676 case IPC_SET64: 677 if (error = ipcperm_set64(shm_svc, cr, 678 &sp->shm_perm, &ds64.shmx_perm)) 679 break; 680 sp->shm_ctime = gethrestime_sec(); 681 break; 682 683 case IPC_STAT64: 684 nattch = sp->shm_perm.ipc_ref - 1; 685 686 ipcperm_stat64(&ds64.shmx_perm, &sp->shm_perm); 687 ds64.shmx_segsz = sp->shm_segsz; 688 ds64.shmx_lkcnt = sp->shm_lkcnt; 689 ds64.shmx_lpid = sp->shm_lpid; 690 ds64.shmx_cpid = sp->shm_cpid; 691 ds64.shmx_nattch = nattch; 692 ds64.shmx_cnattch = sp->shm_ismattch; 693 ds64.shmx_atime = sp->shm_atime; 694 ds64.shmx_dtime = sp->shm_dtime; 695 ds64.shmx_ctime = sp->shm_ctime; 696 697 mutex_exit(lock); 698 if (copyout(&ds64, arg, sizeof (struct shmid_ds64))) 699 return (EFAULT); 700 701 return (0); 702 703 /* Lock segment in memory */ 704 case SHM_LOCK: 705 if ((error = secpolicy_lock_memory(cr)) != 0) 706 break; 707 708 if (!isspt(sp) && (sp->shm_lkcnt++ == 0)) { 709 if (error = shmem_lock(sp->shm_amp)) { 710 ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); 711 cmn_err(CE_NOTE, 712 "shmctl - couldn't lock %ld pages into memory", 713 sp->shm_amp->size); 714 ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); 715 error = ENOMEM; 716 sp->shm_lkcnt--; 717 shmem_unlock(sp->shm_amp, 0); 718 } 719 } 720 break; 721 722 /* Unlock segment */ 723 case SHM_UNLOCK: 724 if ((error = secpolicy_lock_memory(cr)) != 0) 725 break; 726 727 if (!isspt(sp)) { 728 if (sp->shm_lkcnt && (--sp->shm_lkcnt == 0)) { 729 shmem_unlock(sp->shm_amp, 1); 730 } 731 } 732 break; 733 734 default: 735 error = EINVAL; 736 break; 737 } 738 mutex_exit(lock); 739 return (error); 740 } 741 742 static void 743 shm_detach(proc_t *pp, segacct_t *sap) 744 { 745 kshmid_t *sp = sap->sa_id; 746 size_t len = sap->sa_len; 747 caddr_t addr = sap->sa_addr; 748 749 /* 750 * Discard lwpchan mappings. 751 */ 752 if (pp->p_lcp != NULL) 753 lwpchan_delete_mapping(pp, addr, addr + len); 754 (void) as_unmap(pp->p_as, addr, len); 755 756 /* 757 * Perform some detach-time accounting. 758 */ 759 (void) ipc_lock(shm_svc, sp->shm_perm.ipc_id); 760 if (sap->sa_flags & SHMSA_ISM) 761 sp->shm_ismattch--; 762 sp->shm_dtime = gethrestime_sec(); 763 sp->shm_lpid = pp->p_pid; 764 ipc_rele(shm_svc, (kipc_perm_t *)sp); /* Drops lock */ 765 766 kmem_free(sap, sizeof (segacct_t)); 767 } 768 769 static int 770 shmdt(caddr_t addr) 771 { 772 proc_t *pp = curproc; 773 segacct_t *sap, template; 774 775 mutex_enter(&pp->p_lock); 776 prbarrier(pp); /* block /proc. See shmgetid(). */ 777 778 template.sa_addr = addr; 779 template.sa_len = 0; 780 if ((pp->p_segacct == NULL) || 781 ((sap = avl_find(pp->p_segacct, &template, NULL)) == NULL)) { 782 mutex_exit(&pp->p_lock); 783 return (EINVAL); 784 } 785 if (sap->sa_addr != addr) { 786 mutex_exit(&pp->p_lock); 787 return (EINVAL); 788 } 789 avl_remove(pp->p_segacct, sap); 790 mutex_exit(&pp->p_lock); 791 792 shm_detach(pp, sap); 793 794 return (0); 795 } 796 797 /* 798 * Remove all shared memory segments associated with a given zone. 799 * Called by zone_shutdown when the zone is halted. 800 */ 801 /*ARGSUSED1*/ 802 static void 803 shm_remove_zone(zoneid_t zoneid, void *arg) 804 { 805 ipc_remove_zone(shm_svc, zoneid); 806 } 807 808 /* 809 * Shmget (create new shmem) system call. 810 */ 811 static int 812 shmget(key_t key, size_t size, int shmflg, uintptr_t *rvp) 813 { 814 proc_t *pp = curproc; 815 kshmid_t *sp; 816 kmutex_t *lock; 817 int error; 818 819 top: 820 if (error = ipc_get(shm_svc, key, shmflg, (kipc_perm_t **)&sp, &lock)) 821 return (error); 822 823 if (!IPC_FREE(&sp->shm_perm)) { 824 /* 825 * A segment with the requested key exists. 826 */ 827 if (size > sp->shm_segsz) { 828 mutex_exit(lock); 829 return (EINVAL); 830 } 831 } else { 832 /* 833 * A new segment should be created. 834 */ 835 size_t npages = btopr(size); 836 size_t rsize = ptob(npages); 837 838 /* 839 * Check rsize and the per-project and per-zone limit on 840 * shared memory. Checking rsize handles both the size == 0 841 * case and the size < ULONG_MAX & PAGEMASK case (i.e. 842 * rounding up wraps a size_t). 843 */ 844 if (rsize == 0 || 845 (rctl_test(rc_project_shmmax, 846 pp->p_task->tk_proj->kpj_rctls, pp, rsize, 847 RCA_SAFE) & RCT_DENY) || 848 (rctl_test(rc_zone_shmmax, 849 pp->p_zone->zone_rctls, pp, rsize, 850 RCA_SAFE) & RCT_DENY)) { 851 852 mutex_exit(&pp->p_lock); 853 mutex_exit(lock); 854 ipc_cleanup(shm_svc, (kipc_perm_t *)sp); 855 return (EINVAL); 856 } 857 mutex_exit(&pp->p_lock); 858 mutex_exit(lock); 859 860 if (anon_resv(rsize) == 0) { 861 ipc_cleanup(shm_svc, (kipc_perm_t *)sp); 862 return (ENOMEM); 863 } 864 865 sp->shm_amp = anonmap_alloc(rsize, rsize); 866 867 /* 868 * Store the original user's requested size, in bytes, 869 * rather than the page-aligned size. The former is 870 * used for IPC_STAT and shmget() lookups. The latter 871 * is saved in the anon_map structure and is used for 872 * calls to the vm layer. 873 */ 874 sp->shm_segsz = size; 875 sp->shm_atime = sp->shm_dtime = 0; 876 sp->shm_ctime = gethrestime_sec(); 877 sp->shm_lpid = (pid_t)0; 878 sp->shm_cpid = curproc->p_pid; 879 sp->shm_ismattch = 0; 880 sp->shm_sptinfo = NULL; 881 882 /* 883 * Check limits one last time, push id into global 884 * visibility, and update resource usage counts. 885 */ 886 if (error = ipc_commit_begin(shm_svc, key, shmflg, 887 (kipc_perm_t *)sp)) { 888 if (error == EAGAIN) 889 goto top; 890 return (error); 891 } 892 893 if ((rctl_test(rc_project_shmmax, 894 sp->shm_perm.ipc_proj->kpj_rctls, pp, rsize, 895 RCA_SAFE) & RCT_DENY) || 896 (rctl_test(rc_zone_shmmax, 897 sp->shm_perm.ipc_zone->zone_rctls, pp, rsize, 898 RCA_SAFE) & RCT_DENY)) { 899 ipc_cleanup(shm_svc, (kipc_perm_t *)sp); 900 return (EINVAL); 901 } 902 sp->shm_perm.ipc_proj->kpj_data.kpd_shmmax += rsize; 903 sp->shm_perm.ipc_zone->zone_shmmax += rsize; 904 905 lock = ipc_commit_end(shm_svc, &sp->shm_perm); 906 } 907 908 #ifdef C2_AUDIT 909 if (audit_active) 910 audit_ipcget(AT_IPC_SHM, (void *)sp); 911 #endif 912 913 *rvp = (uintptr_t)(sp->shm_perm.ipc_id); 914 915 mutex_exit(lock); 916 return (0); 917 } 918 919 /* 920 * shmids system call. 921 */ 922 static int 923 shmids(int *buf, uint_t nids, uint_t *pnids) 924 { 925 return (ipc_ids(shm_svc, buf, nids, pnids)); 926 } 927 928 /* 929 * System entry point for shmat, shmctl, shmdt, and shmget system calls. 930 */ 931 static uintptr_t 932 shmsys(int opcode, uintptr_t a0, uintptr_t a1, uintptr_t a2) 933 { 934 int error; 935 uintptr_t r_val = 0; 936 937 switch (opcode) { 938 case SHMAT: 939 error = shmat((int)a0, (caddr_t)a1, (int)a2, &r_val); 940 break; 941 case SHMCTL: 942 error = shmctl((int)a0, (int)a1, (void *)a2); 943 break; 944 case SHMDT: 945 error = shmdt((caddr_t)a0); 946 break; 947 case SHMGET: 948 error = shmget((key_t)a0, (size_t)a1, (int)a2, &r_val); 949 break; 950 case SHMIDS: 951 error = shmids((int *)a0, (uint_t)a1, (uint_t *)a2); 952 break; 953 default: 954 error = EINVAL; 955 break; 956 } 957 958 if (error) 959 return ((uintptr_t)set_errno(error)); 960 961 return (r_val); 962 } 963 964 /* 965 * segacct_t comparator 966 * This works as expected, with one minor change: the first of two real 967 * segments with equal addresses is considered to be 'greater than' the 968 * second. We only return equal when searching using a template, in 969 * which case we explicitly set the template segment's length to 0 970 * (which is invalid for a real segment). 971 */ 972 static int 973 shm_sacompar(const void *x, const void *y) 974 { 975 segacct_t *sa1 = (segacct_t *)x; 976 segacct_t *sa2 = (segacct_t *)y; 977 978 if (sa1->sa_addr < sa2->sa_addr) { 979 return (-1); 980 } else if (sa2->sa_len != 0) { 981 if (sa1->sa_addr >= sa2->sa_addr + sa2->sa_len) { 982 return (1); 983 } else if (sa1->sa_len != 0) { 984 return (1); 985 } else { 986 return (0); 987 } 988 } else if (sa1->sa_addr > sa2->sa_addr) { 989 return (1); 990 } else { 991 return (0); 992 } 993 } 994 995 /* 996 * add this record to the segacct list. 997 */ 998 static void 999 sa_add(struct proc *pp, caddr_t addr, size_t len, ulong_t flags, kshmid_t *id) 1000 { 1001 segacct_t *nsap; 1002 avl_tree_t *tree = NULL; 1003 avl_index_t where; 1004 1005 nsap = kmem_alloc(sizeof (segacct_t), KM_SLEEP); 1006 nsap->sa_addr = addr; 1007 nsap->sa_len = len; 1008 nsap->sa_flags = flags; 1009 nsap->sa_id = id; 1010 1011 if (pp->p_segacct == NULL) 1012 tree = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 1013 1014 mutex_enter(&pp->p_lock); 1015 prbarrier(pp); /* block /proc. See shmgetid(). */ 1016 1017 if (pp->p_segacct == NULL) { 1018 avl_create(tree, shm_sacompar, sizeof (segacct_t), 1019 offsetof(segacct_t, sa_tree)); 1020 pp->p_segacct = tree; 1021 } else if (tree) { 1022 kmem_free(tree, sizeof (avl_tree_t)); 1023 } 1024 1025 /* 1026 * We can ignore the result of avl_find, as the comparator will 1027 * never return equal for segments with non-zero length. This 1028 * is a necessary hack to get around the fact that we do, in 1029 * fact, have duplicate keys. 1030 */ 1031 (void) avl_find(pp->p_segacct, nsap, &where); 1032 avl_insert(pp->p_segacct, nsap, where); 1033 1034 mutex_exit(&pp->p_lock); 1035 } 1036 1037 /* 1038 * Duplicate parent's segacct records in child. 1039 */ 1040 void 1041 shmfork(struct proc *ppp, struct proc *cpp) 1042 { 1043 segacct_t *sap; 1044 kshmid_t *sp; 1045 kmutex_t *mp; 1046 1047 ASSERT(ppp->p_segacct != NULL); 1048 1049 /* 1050 * We are the only lwp running in the parent so nobody can 1051 * mess with our p_segacct list. Thus it is safe to traverse 1052 * the list without holding p_lock. This is essential because 1053 * we can't hold p_lock during a KM_SLEEP allocation. 1054 */ 1055 for (sap = (segacct_t *)avl_first(ppp->p_segacct); sap != NULL; 1056 sap = (segacct_t *)AVL_NEXT(ppp->p_segacct, sap)) { 1057 sa_add(cpp, sap->sa_addr, sap->sa_len, sap->sa_flags, 1058 sap->sa_id); 1059 sp = sap->sa_id; 1060 mp = ipc_lock(shm_svc, sp->shm_perm.ipc_id); 1061 if (sap->sa_flags & SHMSA_ISM) 1062 sp->shm_ismattch++; 1063 ipc_hold(shm_svc, (kipc_perm_t *)sp); 1064 mutex_exit(mp); 1065 } 1066 } 1067 1068 /* 1069 * Detach shared memory segments from exiting process. 1070 */ 1071 void 1072 shmexit(struct proc *pp) 1073 { 1074 segacct_t *sap; 1075 avl_tree_t *tree; 1076 void *cookie = NULL; 1077 1078 ASSERT(pp->p_segacct != NULL); 1079 1080 mutex_enter(&pp->p_lock); 1081 prbarrier(pp); 1082 tree = pp->p_segacct; 1083 pp->p_segacct = NULL; 1084 mutex_exit(&pp->p_lock); 1085 1086 while ((sap = avl_destroy_nodes(tree, &cookie)) != NULL) 1087 (void) shm_detach(pp, sap); 1088 1089 avl_destroy(tree); 1090 kmem_free(tree, sizeof (avl_tree_t)); 1091 } 1092 1093 /* 1094 * At this time pages should be in memory, so just lock them. 1095 */ 1096 static void 1097 lock_again(size_t npages, struct anon_map *amp) 1098 { 1099 struct anon *ap; 1100 struct page *pp; 1101 struct vnode *vp; 1102 anoff_t off; 1103 ulong_t anon_idx; 1104 anon_sync_obj_t cookie; 1105 1106 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 1107 1108 for (anon_idx = 0; npages != 0; anon_idx++, npages--) { 1109 1110 anon_array_enter(amp, anon_idx, &cookie); 1111 ap = anon_get_ptr(amp->ahp, anon_idx); 1112 swap_xlate(ap, &vp, &off); 1113 anon_array_exit(&cookie); 1114 1115 pp = page_lookup(vp, (u_offset_t)off, SE_SHARED); 1116 if (pp == NULL) { 1117 panic("lock_again: page not in the system"); 1118 /*NOTREACHED*/ 1119 } 1120 (void) page_pp_lock(pp, 0, 0); 1121 page_unlock(pp); 1122 } 1123 ANON_LOCK_EXIT(&->a_rwlock); 1124 } 1125 1126 /* check if this segment is already locked. */ 1127 /*ARGSUSED*/ 1128 static int 1129 check_locked(struct as *as, struct segvn_data *svd, size_t npages) 1130 { 1131 struct vpage *vpp = svd->vpage; 1132 size_t i; 1133 if (svd->vpage == NULL) 1134 return (0); /* unlocked */ 1135 1136 SEGVN_LOCK_ENTER(as, &svd->lock, RW_READER); 1137 for (i = 0; i < npages; i++, vpp++) { 1138 if (VPP_ISPPLOCK(vpp) == 0) { 1139 SEGVN_LOCK_EXIT(as, &svd->lock); 1140 return (1); /* partially locked */ 1141 } 1142 } 1143 SEGVN_LOCK_EXIT(as, &svd->lock); 1144 return (2); /* locked */ 1145 } 1146 1147 1148 /* 1149 * Attach the shared memory segment to the process 1150 * address space and lock the pages. 1151 */ 1152 static int 1153 shmem_lock(struct anon_map *amp) 1154 { 1155 size_t npages = btopr(amp->size); 1156 struct seg *seg; 1157 struct as *as; 1158 struct segvn_crargs crargs; 1159 struct segvn_data *svd; 1160 proc_t *p = curproc; 1161 caddr_t addr; 1162 uint_t error, ret; 1163 caddr_t seg_base; 1164 size_t seg_sz; 1165 1166 as = p->p_as; 1167 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 1168 /* check if shared memory is already attached */ 1169 for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) { 1170 svd = (struct segvn_data *)seg->s_data; 1171 if ((seg->s_ops == &segvn_ops) && (svd->amp == amp) && 1172 (amp->size == seg->s_size)) { 1173 switch (ret = check_locked(as, svd, npages)) { 1174 case 0: /* unlocked */ 1175 case 1: /* partially locked */ 1176 seg_base = seg->s_base; 1177 seg_sz = seg->s_size; 1178 1179 AS_LOCK_EXIT(as, &as->a_lock); 1180 if ((error = as_ctl(as, seg_base, seg_sz, 1181 MC_LOCK, 0, 0, NULL, 0)) == 0) 1182 lock_again(npages, amp); 1183 (void) as_ctl(as, seg_base, seg_sz, MC_UNLOCK, 1184 0, 0, NULL, NULL); 1185 return (error); 1186 case 2: /* locked */ 1187 AS_LOCK_EXIT(as, &as->a_lock); 1188 lock_again(npages, amp); 1189 return (0); 1190 default: 1191 cmn_err(CE_WARN, "shmem_lock: deflt %d", ret); 1192 break; 1193 } 1194 } 1195 } 1196 AS_LOCK_EXIT(as, &as->a_lock); 1197 1198 /* attach shm segment to our address space */ 1199 as_rangelock(as); 1200 map_addr(&addr, amp->size, 0ll, 1, 0); 1201 if (addr == NULL) { 1202 as_rangeunlock(as); 1203 return (ENOMEM); 1204 } 1205 1206 /* Initialize the create arguments and map the segment */ 1207 crargs = *(struct segvn_crargs *)zfod_argsp; /* structure copy */ 1208 crargs.offset = (u_offset_t)0; 1209 crargs.type = MAP_SHARED; 1210 crargs.amp = amp; 1211 crargs.prot = PROT_ALL; 1212 crargs.maxprot = crargs.prot; 1213 crargs.flags = 0; 1214 1215 error = as_map(as, addr, amp->size, segvn_create, &crargs); 1216 as_rangeunlock(as); 1217 if (!error) { 1218 if ((error = as_ctl(as, addr, amp->size, MC_LOCK, 0, 0, 1219 NULL, 0)) == 0) { 1220 lock_again(npages, amp); 1221 } 1222 (void) as_unmap(as, addr, amp->size); 1223 } 1224 return (error); 1225 } 1226 1227 1228 /* 1229 * Unlock shared memory 1230 */ 1231 static void 1232 shmem_unlock(struct anon_map *amp, uint_t lck) 1233 { 1234 struct anon *ap; 1235 pgcnt_t npages = btopr(amp->size); 1236 struct vnode *vp; 1237 struct page *pp; 1238 anoff_t off; 1239 ulong_t anon_idx; 1240 1241 for (anon_idx = 0; anon_idx < npages; anon_idx++) { 1242 1243 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { 1244 if (lck) { 1245 panic("shmem_unlock: null app"); 1246 /*NOTREACHED*/ 1247 } 1248 continue; 1249 } 1250 swap_xlate(ap, &vp, &off); 1251 pp = page_lookup(vp, off, SE_SHARED); 1252 if (pp == NULL) { 1253 if (lck) { 1254 panic("shmem_unlock: page not in the system"); 1255 /*NOTREACHED*/ 1256 } 1257 continue; 1258 } 1259 if (pp->p_lckcnt) { 1260 page_pp_unlock(pp, 0, 0); 1261 } 1262 page_unlock(pp); 1263 } 1264 } 1265 1266 /* 1267 * We call this routine when we have removed all references to this 1268 * amp. This means all shmdt()s and the IPC_RMID have been done. 1269 */ 1270 static void 1271 shm_rm_amp(struct anon_map *amp, uint_t lckflag) 1272 { 1273 /* 1274 * If we are finally deleting the 1275 * shared memory, and if no one did 1276 * the SHM_UNLOCK, we must do it now. 1277 */ 1278 shmem_unlock(amp, lckflag); 1279 1280 /* 1281 * Free up the anon_map. 1282 */ 1283 lgrp_shm_policy_fini(amp, NULL); 1284 if (amp->a_szc != 0) { 1285 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1286 anon_shmap_free_pages(amp, 0, amp->size); 1287 ANON_LOCK_EXIT(&->a_rwlock); 1288 } else { 1289 anon_free(amp->ahp, 0, amp->size); 1290 } 1291 anon_unresv(amp->swresv); 1292 anonmap_free(amp); 1293 } 1294 1295 /* 1296 * Return the shared memory id for the process's virtual address. 1297 * Return SHMID_NONE if addr is not within a SysV shared memory segment. 1298 * Return SHMID_FREE if addr's SysV shared memory segment's id has been freed. 1299 * 1300 * shmgetid() is called from code in /proc with the process locked but 1301 * with pp->p_lock not held. The address space lock is held, so we 1302 * cannot grab pp->p_lock here due to lock-ordering constraints. 1303 * Because of all this, modifications to the p_segacct list must only 1304 * be made after calling prbarrier() to ensure the process is not locked. 1305 * See shmdt() and sa_add(), above. shmgetid() may also be called on a 1306 * thread's own process without the process locked. 1307 */ 1308 int 1309 shmgetid(proc_t *pp, caddr_t addr) 1310 { 1311 segacct_t *sap, template; 1312 1313 ASSERT(MUTEX_NOT_HELD(&pp->p_lock)); 1314 ASSERT((pp->p_proc_flag & P_PR_LOCK) || pp == curproc); 1315 1316 if (pp->p_segacct == NULL) 1317 return (SHMID_NONE); 1318 1319 template.sa_addr = addr; 1320 template.sa_len = 0; 1321 if ((sap = avl_find(pp->p_segacct, &template, NULL)) == NULL) 1322 return (SHMID_NONE); 1323 1324 if (IPC_FREE(&sap->sa_id->shm_perm)) 1325 return (SHMID_FREE); 1326 1327 return (sap->sa_id->shm_perm.ipc_id); 1328 } 1329