xref: /titanic_41/usr/src/lib/libumem/common/umem_fork.c (revision 70025d765b044c6d8594bb965a2247a61e991a99)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "mtlib.h"
30 #include "umem_base.h"
31 #include "vmem_base.h"
32 
33 #include <unistd.h>
34 
35 /*
36  * The following functions are for pre- and post-fork1(2) handling.  See
37  * "Lock Ordering" in lib/libumem/common/umem.c for the lock ordering used.
38  */
39 
40 static void
41 umem_lockup_cache(umem_cache_t *cp)
42 {
43 	int idx;
44 	int ncpus = cp->cache_cpu_mask + 1;
45 
46 	for (idx = 0; idx < ncpus; idx++)
47 		(void) mutex_lock(&cp->cache_cpu[idx].cc_lock);
48 
49 	(void) mutex_lock(&cp->cache_depot_lock);
50 	(void) mutex_lock(&cp->cache_lock);
51 }
52 
53 static void
54 umem_release_cache(umem_cache_t *cp)
55 {
56 	int idx;
57 	int ncpus = cp->cache_cpu_mask + 1;
58 
59 	(void) mutex_unlock(&cp->cache_lock);
60 	(void) mutex_unlock(&cp->cache_depot_lock);
61 
62 	for (idx = 0; idx < ncpus; idx++)
63 		(void) mutex_unlock(&cp->cache_cpu[idx].cc_lock);
64 }
65 
66 static void
67 umem_lockup_log_header(umem_log_header_t *lhp)
68 {
69 	int idx;
70 	if (lhp == NULL)
71 		return;
72 	for (idx = 0; idx < umem_max_ncpus; idx++)
73 		(void) mutex_lock(&lhp->lh_cpu[idx].clh_lock);
74 
75 	(void) mutex_lock(&lhp->lh_lock);
76 }
77 
78 static void
79 umem_release_log_header(umem_log_header_t *lhp)
80 {
81 	int idx;
82 	if (lhp == NULL)
83 		return;
84 
85 	(void) mutex_unlock(&lhp->lh_lock);
86 
87 	for (idx = 0; idx < umem_max_ncpus; idx++)
88 		(void) mutex_unlock(&lhp->lh_cpu[idx].clh_lock);
89 }
90 
91 static void
92 umem_lockup(void)
93 {
94 	umem_cache_t *cp;
95 
96 	(void) mutex_lock(&umem_init_lock);
97 	/*
98 	 * If another thread is busy initializing the library, we must
99 	 * wait for it to complete (by calling umem_init()) before allowing
100 	 * the fork() to proceed.
101 	 */
102 	if (umem_ready == UMEM_READY_INITING && umem_init_thr != thr_self()) {
103 		(void) mutex_unlock(&umem_init_lock);
104 		(void) umem_init();
105 		(void) mutex_lock(&umem_init_lock);
106 	}
107 
108 	vmem_lockup();
109 	vmem_sbrk_lockup();
110 
111 	(void) mutex_lock(&umem_cache_lock);
112 	(void) mutex_lock(&umem_update_lock);
113 	(void) mutex_lock(&umem_flags_lock);
114 
115 	umem_lockup_cache(&umem_null_cache);
116 	for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
117 	    cp = cp->cache_prev)
118 		umem_lockup_cache(cp);
119 
120 	umem_lockup_log_header(umem_transaction_log);
121 	umem_lockup_log_header(umem_content_log);
122 	umem_lockup_log_header(umem_failure_log);
123 	umem_lockup_log_header(umem_slab_log);
124 
125 	(void) cond_broadcast(&umem_update_cv);
126 
127 }
128 
129 static void
130 umem_do_release(int as_child)
131 {
132 	umem_cache_t *cp;
133 	int cleanup_update = 0;
134 
135 	/*
136 	 * Clean up the update state if we are the child process and
137 	 * another thread was processing updates.
138 	 */
139 	if (as_child) {
140 		if (umem_update_thr != thr_self()) {
141 			umem_update_thr = 0;
142 			cleanup_update = 1;
143 		}
144 		if (umem_st_update_thr != thr_self()) {
145 			umem_st_update_thr = 0;
146 			cleanup_update = 1;
147 		}
148 	}
149 
150 	if (cleanup_update) {
151 		umem_reaping = UMEM_REAP_DONE;
152 
153 		for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
154 		    cp = cp->cache_next) {
155 			if (cp->cache_uflags & UMU_NOTIFY)
156 				cp->cache_uflags &= ~UMU_NOTIFY;
157 
158 			/*
159 			 * If the cache is active, we just re-add it to
160 			 * the update list.  This will re-do any active
161 			 * updates on the cache, but that won't break
162 			 * anything.
163 			 *
164 			 * The worst that can happen is a cache has
165 			 * its magazines rescaled twice, instead of once.
166 			 */
167 			if (cp->cache_uflags & UMU_ACTIVE) {
168 				umem_cache_t *cnext, *cprev;
169 
170 				ASSERT(cp->cache_unext == NULL &&
171 				    cp->cache_uprev == NULL);
172 
173 				cp->cache_uflags &= ~UMU_ACTIVE;
174 				cp->cache_unext = cnext = &umem_null_cache;
175 				cp->cache_uprev = cprev =
176 				    umem_null_cache.cache_uprev;
177 				cnext->cache_uprev = cp;
178 				cprev->cache_unext = cp;
179 			}
180 		}
181 	}
182 
183 	umem_release_log_header(umem_slab_log);
184 	umem_release_log_header(umem_failure_log);
185 	umem_release_log_header(umem_content_log);
186 	umem_release_log_header(umem_transaction_log);
187 
188 	for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
189 	    cp = cp->cache_next)
190 		umem_release_cache(cp);
191 	umem_release_cache(&umem_null_cache);
192 
193 	(void) mutex_unlock(&umem_flags_lock);
194 	(void) mutex_unlock(&umem_update_lock);
195 	(void) mutex_unlock(&umem_cache_lock);
196 
197 	vmem_sbrk_release();
198 	vmem_release();
199 
200 	(void) mutex_unlock(&umem_init_lock);
201 }
202 
203 static void
204 umem_release(void)
205 {
206 	umem_do_release(0);
207 }
208 
209 static void
210 umem_release_child(void)
211 {
212 	umem_do_release(1);
213 }
214 
215 void
216 umem_forkhandler_init(void)
217 {
218 	/*
219 	 * There is no way to unregister these atfork functions,
220 	 * but we don't need to.  The dynamic linker and libc take
221 	 * care of unregistering them if/when the library is unloaded.
222 	 */
223 	(void) pthread_atfork(umem_lockup, umem_release, umem_release_child);
224 }
225