1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 #include "umem_base.h"
30 #include "vmem_base.h"
31
32 #include <unistd.h>
33
34 /*
35 * The following functions are for pre- and post-fork1(2) handling. See
36 * "Lock Ordering" in lib/libumem/common/umem.c for the lock ordering used.
37 */
38
39 static void
umem_lockup_cache(umem_cache_t * cp)40 umem_lockup_cache(umem_cache_t *cp)
41 {
42 int idx;
43 int ncpus = cp->cache_cpu_mask + 1;
44
45 for (idx = 0; idx < ncpus; idx++)
46 (void) mutex_lock(&cp->cache_cpu[idx].cc_lock);
47
48 (void) mutex_lock(&cp->cache_depot_lock);
49 (void) mutex_lock(&cp->cache_lock);
50 }
51
52 static void
umem_release_cache(umem_cache_t * cp)53 umem_release_cache(umem_cache_t *cp)
54 {
55 int idx;
56 int ncpus = cp->cache_cpu_mask + 1;
57
58 (void) mutex_unlock(&cp->cache_lock);
59 (void) mutex_unlock(&cp->cache_depot_lock);
60
61 for (idx = 0; idx < ncpus; idx++)
62 (void) mutex_unlock(&cp->cache_cpu[idx].cc_lock);
63 }
64
65 static void
umem_lockup_log_header(umem_log_header_t * lhp)66 umem_lockup_log_header(umem_log_header_t *lhp)
67 {
68 int idx;
69 if (lhp == NULL)
70 return;
71 for (idx = 0; idx < umem_max_ncpus; idx++)
72 (void) mutex_lock(&lhp->lh_cpu[idx].clh_lock);
73
74 (void) mutex_lock(&lhp->lh_lock);
75 }
76
77 static void
umem_release_log_header(umem_log_header_t * lhp)78 umem_release_log_header(umem_log_header_t *lhp)
79 {
80 int idx;
81 if (lhp == NULL)
82 return;
83
84 (void) mutex_unlock(&lhp->lh_lock);
85
86 for (idx = 0; idx < umem_max_ncpus; idx++)
87 (void) mutex_unlock(&lhp->lh_cpu[idx].clh_lock);
88 }
89
90 static void
umem_lockup(void)91 umem_lockup(void)
92 {
93 umem_cache_t *cp;
94
95 (void) mutex_lock(&umem_init_lock);
96 /*
97 * If another thread is busy initializing the library, we must
98 * wait for it to complete (by calling umem_init()) before allowing
99 * the fork() to proceed.
100 */
101 if (umem_ready == UMEM_READY_INITING && umem_init_thr != thr_self()) {
102 (void) mutex_unlock(&umem_init_lock);
103 (void) umem_init();
104 (void) mutex_lock(&umem_init_lock);
105 }
106
107 vmem_lockup();
108 vmem_sbrk_lockup();
109
110 (void) mutex_lock(&umem_cache_lock);
111 (void) mutex_lock(&umem_update_lock);
112 (void) mutex_lock(&umem_flags_lock);
113
114 umem_lockup_cache(&umem_null_cache);
115 for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
116 cp = cp->cache_prev)
117 umem_lockup_cache(cp);
118
119 umem_lockup_log_header(umem_transaction_log);
120 umem_lockup_log_header(umem_content_log);
121 umem_lockup_log_header(umem_failure_log);
122 umem_lockup_log_header(umem_slab_log);
123
124 (void) cond_broadcast(&umem_update_cv);
125
126 }
127
128 static void
umem_do_release(int as_child)129 umem_do_release(int as_child)
130 {
131 umem_cache_t *cp;
132 int cleanup_update = 0;
133
134 /*
135 * Clean up the update state if we are the child process and
136 * another thread was processing updates.
137 */
138 if (as_child) {
139 if (umem_update_thr != thr_self()) {
140 umem_update_thr = 0;
141 cleanup_update = 1;
142 }
143 if (umem_st_update_thr != thr_self()) {
144 umem_st_update_thr = 0;
145 cleanup_update = 1;
146 }
147 }
148
149 if (cleanup_update) {
150 umem_reaping = UMEM_REAP_DONE;
151
152 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
153 cp = cp->cache_next) {
154 if (cp->cache_uflags & UMU_NOTIFY)
155 cp->cache_uflags &= ~UMU_NOTIFY;
156
157 /*
158 * If the cache is active, we just re-add it to
159 * the update list. This will re-do any active
160 * updates on the cache, but that won't break
161 * anything.
162 *
163 * The worst that can happen is a cache has
164 * its magazines rescaled twice, instead of once.
165 */
166 if (cp->cache_uflags & UMU_ACTIVE) {
167 umem_cache_t *cnext, *cprev;
168
169 ASSERT(cp->cache_unext == NULL &&
170 cp->cache_uprev == NULL);
171
172 cp->cache_uflags &= ~UMU_ACTIVE;
173 cp->cache_unext = cnext = &umem_null_cache;
174 cp->cache_uprev = cprev =
175 umem_null_cache.cache_uprev;
176 cnext->cache_uprev = cp;
177 cprev->cache_unext = cp;
178 }
179 }
180 }
181
182 umem_release_log_header(umem_slab_log);
183 umem_release_log_header(umem_failure_log);
184 umem_release_log_header(umem_content_log);
185 umem_release_log_header(umem_transaction_log);
186
187 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
188 cp = cp->cache_next)
189 umem_release_cache(cp);
190 umem_release_cache(&umem_null_cache);
191
192 (void) mutex_unlock(&umem_flags_lock);
193 (void) mutex_unlock(&umem_update_lock);
194 (void) mutex_unlock(&umem_cache_lock);
195
196 vmem_sbrk_release();
197 vmem_release();
198
199 (void) mutex_unlock(&umem_init_lock);
200 }
201
202 static void
umem_release(void)203 umem_release(void)
204 {
205 umem_do_release(0);
206 }
207
208 static void
umem_release_child(void)209 umem_release_child(void)
210 {
211 umem_do_release(1);
212 }
213
214 void
umem_forkhandler_init(void)215 umem_forkhandler_init(void)
216 {
217 /*
218 * There is no way to unregister these atfork functions,
219 * but we don't need to. The dynamic linker and libc take
220 * care of unregistering them if/when the library is unloaded.
221 */
222 (void) pthread_atfork(umem_lockup, umem_release, umem_release_child);
223 }
224