Lines Matching +full:shared +full:- +full:memory
30 * University Copyright- Copyright (c) 1982, 1986, 1988
34 * University Acknowledgment- Portions of this document are derived from
40 * Inter-Process Communication Shared Memory Facility.
45 * -----------------
47 * Control: zone.max-shm-ids (rc_zone_shmmni)
48 * Description: Maximum number of shared memory ids allowed a zone.
50 * When shmget() is used to allocate a shared memory segment, one id
55 * Control: project.max-shm-ids (rc_project_shmmni)
56 * Description: Maximum number of shared memory ids allowed a project.
58 * When shmget() is used to allocate a shared memory segment, one id
63 * Control: zone.max-shm-memory (rc_zone_shmmax)
64 * Description: Total amount of shared memory allowed a zone.
66 * When shmget() is used to allocate a shared memory segment, the
73 * Control: project.max-shm-memory (rc_project_shmmax)
74 * Description: Total amount of shared memory allowed a project.
76 * When shmget() is used to allocate a shared memory segment, the
139 * These are hooks in /etc/system - only for internal testing purpose.
161 * Shared Memory facility is through the resource controls described at
200 &mod_syscallops, "System V shared memory", &ipcshm_sysent
205 &mod_syscallops32, "32-bit System V shared memory", &ipcshm_sysent32
251 * Shmat (attach shared segment) system call.
256 kshmid_t *sp; /* shared memory header ptr */ in shmat()
260 struct as *as = pp->p_as; in shmat()
272 if (error = ipcperm_access(&sp->shm_perm, SHM_R, CRED())) in shmat()
275 (error = ipcperm_access(&sp->shm_perm, SHM_W, CRED()))) in shmat()
288 if (useISM && (error = ipcperm_access(&sp->shm_perm, SHM_W, CRED()))) in shmat()
291 uint_t newsptflags = flags | spt_flags(sp->shm_sptseg); in shmat()
304 ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); in shmat()
305 size = sp->shm_amp->size; in shmat()
306 ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); in shmat()
309 if (sp->shm_sptinfo == NULL) in shmat()
310 sp->shm_sptinfo = kmem_zalloc(sizeof (sptinfo_t), KM_SLEEP); in shmat()
343 share_szc = sp->shm_sptseg->s_szc; in shmat()
371 pp->p_model == DATAMODEL_LP64 && AS_TYPE_64BIT(as)) { in shmat()
374 * 64-bit process, we'll try to find an address in shmat()
375 * in the predict-ISM zone. in shmat()
378 size_t len = PREDISM_BOUND - PREDISM_1T_BASE; in shmat()
382 AH_LO, (caddr_t)NULL) != -1) { in shmat()
393 as, as->a_userlimit) != RANGE_OKAY) { in shmat()
413 ASSERT(((uintptr_t)addr & (align_hint - 1)) == 0); in shmat()
415 /* Use the user-supplied attach address */ in shmat()
427 /* XXX - in SunOS, is sp->shm_segsz */ in shmat()
428 if ((uintptr_t)base & (share_size - 1)) { in shmat()
434 as->a_userlimit); in shmat()
444 as->a_userlimit); in shmat()
457 error = sptcreate(size, &segspt, sp->shm_amp, prot, in shmat()
463 sp->shm_sptinfo->sptas = segspt->s_as; in shmat()
464 sp->shm_sptseg = segspt; in shmat()
465 sp->shm_sptprot = prot; in shmat()
466 } else if ((prot & sp->shm_sptprot) != sp->shm_sptprot) { in shmat()
478 ssd.shm_sptseg = sp->shm_sptseg; in shmat()
479 ssd.shm_sptas = sp->shm_sptinfo->sptas; in shmat()
480 ssd.shm_amp = sp->shm_amp; in shmat()
483 sp->shm_ismattch++; /* keep count of ISM attaches */ in shmat()
501 /* Use the user-supplied attach address */ in shmat()
507 ~(SHMLBA - 1)); in shmat()
516 /* XXX - in SunOS, is sp->shm_segsz */ in shmat()
523 as->a_userlimit); in shmat()
527 as->a_userlimit); in shmat()
543 crargs.amp = sp->shm_amp; in shmat()
559 sp->shm_atime = gethrestime_sec(); in shmat()
560 sp->shm_lpid = pp->p_pid; in shmat()
564 * Tell machine specific code that lwp has mapped shared memory in shmat()
580 ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); in shm_dtor()
581 anonmap_purge(sp->shm_amp); in shm_dtor()
582 ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); in shm_dtor()
584 if (sp->shm_sptinfo) { in shm_dtor()
586 sptdestroy(sp->shm_sptinfo->sptas, sp->shm_amp); in shm_dtor()
587 sp->shm_lkcnt = 0; in shm_dtor()
589 kmem_free(sp->shm_sptinfo, sizeof (sptinfo_t)); in shm_dtor()
592 if (sp->shm_lkcnt > 0) { in shm_dtor()
593 shmem_unlock(sp, sp->shm_amp); in shm_dtor()
594 sp->shm_lkcnt = 0; in shm_dtor()
597 ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, RW_WRITER); in shm_dtor()
598 cnt = --sp->shm_amp->refcnt; in shm_dtor()
599 ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); in shm_dtor()
603 if (sp->shm_perm.ipc_id != IPC_ID_INVAL) { in shm_dtor()
604 rsize = ptob(btopr(sp->shm_segsz)); in shm_dtor()
606 sp->shm_perm.ipc_proj->kpj_data.kpd_shmmax -= rsize; in shm_dtor()
607 sp->shm_perm.ipc_zone_ref.zref_zone->zone_shmmax -= rsize; in shm_dtor()
626 kshmid_t *sp; /* shared memory header ptr */ in shmctl()
638 * Perform pre- or non-lookup actions (e.g. copyins, RMID). in shmctl()
661 if (error = ipcperm_set(shm_svc, cr, &sp->shm_perm, in shmctl()
662 &STRUCT_BUF(ds)->shm_perm, mdl)) in shmctl()
664 sp->shm_ctime = gethrestime_sec(); in shmctl()
668 if (error = ipcperm_access(&sp->shm_perm, SHM_R, cr)) in shmctl()
671 nattch = sp->shm_perm.ipc_ref - 1; in shmctl()
673 ipcperm_stat(&STRUCT_BUF(ds)->shm_perm, &sp->shm_perm, mdl); in shmctl()
674 STRUCT_FSET(ds, shm_segsz, sp->shm_segsz); in shmctl()
676 STRUCT_FSET(ds, shm_lkcnt, sp->shm_lkcnt); in shmctl()
677 STRUCT_FSET(ds, shm_lpid, sp->shm_lpid); in shmctl()
678 STRUCT_FSET(ds, shm_cpid, sp->shm_cpid); in shmctl()
680 STRUCT_FSET(ds, shm_cnattch, sp->shm_ismattch); in shmctl()
681 STRUCT_FSET(ds, shm_atime, sp->shm_atime); in shmctl()
682 STRUCT_FSET(ds, shm_dtime, sp->shm_dtime); in shmctl()
683 STRUCT_FSET(ds, shm_ctime, sp->shm_ctime); in shmctl()
693 &sp->shm_perm, &ds64.shmx_perm)) in shmctl()
695 sp->shm_ctime = gethrestime_sec(); in shmctl()
699 nattch = sp->shm_perm.ipc_ref - 1; in shmctl()
701 ipcperm_stat64(&ds64.shmx_perm, &sp->shm_perm); in shmctl()
702 ds64.shmx_segsz = sp->shm_segsz; in shmctl()
703 ds64.shmx_lkcnt = sp->shm_lkcnt; in shmctl()
704 ds64.shmx_lpid = sp->shm_lpid; in shmctl()
705 ds64.shmx_cpid = sp->shm_cpid; in shmctl()
707 ds64.shmx_cnattch = sp->shm_ismattch; in shmctl()
708 ds64.shmx_atime = sp->shm_atime; in shmctl()
709 ds64.shmx_dtime = sp->shm_dtime; in shmctl()
710 ds64.shmx_ctime = sp->shm_ctime; in shmctl()
718 /* Lock segment in memory */ in shmctl()
724 if (sp->shm_lkcnt >= USHRT_MAX) { in shmctl()
728 if (!isspt(sp) && (sp->shm_lkcnt++ == 0)) { in shmctl()
729 if (error = shmem_lock(sp, sp->shm_amp)) { in shmctl()
730 ANON_LOCK_ENTER(&sp->shm_amp->a_rwlock, in shmctl()
732 cmn_err(CE_NOTE, "shmctl - couldn't lock %ld" in shmctl()
733 " pages into memory", sp->shm_amp->size); in shmctl()
734 ANON_LOCK_EXIT(&sp->shm_amp->a_rwlock); in shmctl()
736 sp->shm_lkcnt--; in shmctl()
746 if (sp->shm_lkcnt && (--sp->shm_lkcnt == 0)) { in shmctl()
747 shmem_unlock(sp, sp->shm_amp); in shmctl()
762 kshmid_t *sp = sap->sa_id; in shm_detach()
763 size_t len = sap->sa_len; in shm_detach()
764 caddr_t addr = sap->sa_addr; in shm_detach()
769 if (pp->p_lcp != NULL) in shm_detach()
771 (void) as_unmap(pp->p_as, addr, len); in shm_detach()
774 * Perform some detach-time accounting. in shm_detach()
776 (void) ipc_lock(shm_svc, sp->shm_perm.ipc_id); in shm_detach()
777 if (sap->sa_flags & SHMSA_ISM) in shm_detach()
778 sp->shm_ismattch--; in shm_detach()
779 sp->shm_dtime = gethrestime_sec(); in shm_detach()
780 sp->shm_lpid = pp->p_pid; in shm_detach()
792 mutex_enter(&pp->p_lock); in shmdt()
797 if ((pp->p_segacct == NULL) || in shmdt()
798 ((sap = avl_find(pp->p_segacct, &template, NULL)) == NULL)) { in shmdt()
799 mutex_exit(&pp->p_lock); in shmdt()
802 if (sap->sa_addr != addr) { in shmdt()
803 mutex_exit(&pp->p_lock); in shmdt()
806 avl_remove(pp->p_segacct, sap); in shmdt()
807 mutex_exit(&pp->p_lock); in shmdt()
815 * Remove all shared memory segments associated with a given zone.
840 if (!IPC_FREE(&sp->shm_perm)) { in shmget()
844 if (size > sp->shm_segsz) { in shmget()
856 * Check rsize and the per-project and per-zone limit on in shmget()
857 * shared memory. Checking rsize handles both the size == 0 in shmget()
863 pp->p_task->tk_proj->kpj_rctls, pp, rsize, in shmget()
866 pp->p_zone->zone_rctls, pp, rsize, in shmget()
869 mutex_exit(&pp->p_lock); in shmget()
874 mutex_exit(&pp->p_lock); in shmget()
890 * ipcs_cleanup() -> shm_dtor() -> shm_rm_amp(). If in shmget()
895 sp->shm_amp = anonmap_alloc(rsize, rsize, ANON_SLEEP); in shmget()
896 sp->shm_amp->a_sp = sp; in shmget()
899 * rather than the page-aligned size. The former is in shmget()
904 sp->shm_segsz = size; in shmget()
905 sp->shm_atime = sp->shm_dtime = 0; in shmget()
906 sp->shm_ctime = gethrestime_sec(); in shmget()
907 sp->shm_lpid = (pid_t)0; in shmget()
908 sp->shm_cpid = curproc->p_pid; in shmget()
909 sp->shm_ismattch = 0; in shmget()
910 sp->shm_sptinfo = NULL; in shmget()
923 sp->shm_perm.ipc_proj->kpj_rctls, pp, rsize, in shmget()
926 sp->shm_perm.ipc_zone_ref.zref_zone->zone_rctls, pp, rsize, in shmget()
931 sp->shm_perm.ipc_proj->kpj_data.kpd_shmmax += rsize; in shmget()
932 sp->shm_perm.ipc_zone_ref.zref_zone->zone_shmmax += rsize; in shmget()
934 lock = ipc_commit_end(shm_svc, &sp->shm_perm); in shmget()
940 *rvp = (uintptr_t)(sp->shm_perm.ipc_id); in shmget()
1005 if (sa1->sa_addr < sa2->sa_addr) { in shm_sacompar()
1006 return (-1); in shm_sacompar()
1007 } else if (sa2->sa_len != 0) { in shm_sacompar()
1008 if (sa1->sa_addr >= sa2->sa_addr + sa2->sa_len) { in shm_sacompar()
1010 } else if (sa1->sa_len != 0) { in shm_sacompar()
1015 } else if (sa1->sa_addr > sa2->sa_addr) { in shm_sacompar()
1033 nsap->sa_addr = addr; in sa_add()
1034 nsap->sa_len = len; in sa_add()
1035 nsap->sa_flags = flags; in sa_add()
1036 nsap->sa_id = id; in sa_add()
1038 if (pp->p_segacct == NULL) in sa_add()
1041 mutex_enter(&pp->p_lock); in sa_add()
1044 if (pp->p_segacct == NULL) { in sa_add()
1047 pp->p_segacct = tree; in sa_add()
1054 * never return equal for segments with non-zero length. This in sa_add()
1058 (void) avl_find(pp->p_segacct, nsap, &where); in sa_add()
1059 avl_insert(pp->p_segacct, nsap, where); in sa_add()
1061 mutex_exit(&pp->p_lock); in sa_add()
1074 ASSERT(ppp->p_segacct != NULL); in shmfork()
1082 for (sap = (segacct_t *)avl_first(ppp->p_segacct); sap != NULL; in shmfork()
1083 sap = (segacct_t *)AVL_NEXT(ppp->p_segacct, sap)) { in shmfork()
1084 sa_add(cpp, sap->sa_addr, sap->sa_len, sap->sa_flags, in shmfork()
1085 sap->sa_id); in shmfork()
1086 sp = sap->sa_id; in shmfork()
1087 mp = ipc_lock(shm_svc, sp->shm_perm.ipc_id); in shmfork()
1088 if (sap->sa_flags & SHMSA_ISM) in shmfork()
1089 sp->shm_ismattch++; in shmfork()
1096 * Detach shared memory segments from exiting process.
1105 ASSERT(pp->p_segacct != NULL); in shmexit()
1107 mutex_enter(&pp->p_lock); in shmexit()
1109 tree = pp->p_segacct; in shmexit()
1110 pp->p_segacct = NULL; in shmexit()
1111 mutex_exit(&pp->p_lock); in shmexit()
1121 * At this time pages should be in memory, so just lock them.
1133 mutex_enter(&sp->shm_mlock); in lock_again()
1134 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); in lock_again()
1135 for (anon_idx = 0; npages != 0; anon_idx++, npages--) { in lock_again()
1138 ap = anon_get_ptr(amp->ahp, anon_idx); in lock_again()
1149 ASSERT(pp->p_lckcnt > 0); in lock_again()
1153 ANON_LOCK_EXIT(&->a_rwlock); in lock_again()
1154 mutex_exit(&sp->shm_mlock); in lock_again()
1158 * Attach the shared memory segment to the process
1164 size_t npages = btopr(amp->size); in shmem_lock()
1173 sp->shm_lkpages = npages; in shmem_lock()
1184 error = as_map(as, 0x0, amp->size, segvn_create, &crargs); in shmem_lock()
1186 if ((error = as_ctl(as, 0x0, amp->size, MC_LOCK, 0, 0, in shmem_lock()
1190 (void) as_unmap(as, 0x0, amp->size); in shmem_lock()
1198 * Unlock shared memory
1204 pgcnt_t npages = sp->shm_lkpages; in shmem_unlock()
1213 proj = sp->shm_perm.ipc_proj; in shmem_unlock()
1214 mutex_enter(&sp->shm_mlock); in shmem_unlock()
1215 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); in shmem_unlock()
1219 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) { in shmem_unlock()
1234 ASSERT(pp->p_lckcnt > 0); in shmem_unlock()
1236 if (pp->p_lckcnt == 0) in shmem_unlock()
1246 ANON_LOCK_EXIT(&->a_rwlock); in shmem_unlock()
1247 mutex_exit(&sp->shm_mlock); in shmem_unlock()
1257 struct anon_map *amp = sp->shm_amp; in shm_rm_amp()
1260 zone = sp->shm_perm.ipc_zone_ref.zref_zone; in shm_rm_amp()
1266 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); in shm_rm_amp()
1267 if (amp->a_szc != 0) { in shm_rm_amp()
1268 anon_shmap_free_pages(amp, 0, amp->size); in shm_rm_amp()
1270 anon_free(amp->ahp, 0, amp->size); in shm_rm_amp()
1272 ANON_LOCK_EXIT(&->a_rwlock); in shm_rm_amp()
1273 anon_unresv_zone(amp->swresv, zone); in shm_rm_amp()
1278 * Return the shared memory id for the process's virtual address.
1279 * Return SHMID_NONE if addr is not within a SysV shared memory segment.
1280 * Return SHMID_FREE if addr's SysV shared memory segment's id has been freed.
1283 * with pp->p_lock not held. The address space lock is held, so we
1284 * cannot grab pp->p_lock here due to lock-ordering constraints.
1295 ASSERT(MUTEX_NOT_HELD(&pp->p_lock)); in shmgetid()
1296 ASSERT((pp->p_proc_flag & P_PR_LOCK) || pp == curproc); in shmgetid()
1298 if (pp->p_segacct == NULL) in shmgetid()
1303 if ((sap = avl_find(pp->p_segacct, &template, NULL)) == NULL) in shmgetid()
1306 if (IPC_FREE(&sap->sa_id->shm_perm)) in shmgetid()
1309 return (sap->sa_id->shm_perm.ipc_id); in shmgetid()