xref: /titanic_54/usr/src/uts/common/os/netstack.c (revision 0a0e9771ca0211c15f3ac4466b661c145feeb9e4)
1f4b3ec61Sdh155122 /*
2f4b3ec61Sdh155122  * CDDL HEADER START
3f4b3ec61Sdh155122  *
4f4b3ec61Sdh155122  * The contents of this file are subject to the terms of the
5f4b3ec61Sdh155122  * Common Development and Distribution License (the "License").
6f4b3ec61Sdh155122  * You may not use this file except in compliance with the License.
7f4b3ec61Sdh155122  *
8f4b3ec61Sdh155122  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9f4b3ec61Sdh155122  * or http://www.opensolaris.org/os/licensing.
10f4b3ec61Sdh155122  * See the License for the specific language governing permissions
11f4b3ec61Sdh155122  * and limitations under the License.
12f4b3ec61Sdh155122  *
13f4b3ec61Sdh155122  * When distributing Covered Code, include this CDDL HEADER in each
14f4b3ec61Sdh155122  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15f4b3ec61Sdh155122  * If applicable, add the following below this CDDL HEADER, with the
16f4b3ec61Sdh155122  * fields enclosed by brackets "[]" replaced with your own identifying
17f4b3ec61Sdh155122  * information: Portions Copyright [yyyy] [name of copyright owner]
18f4b3ec61Sdh155122  *
19f4b3ec61Sdh155122  * CDDL HEADER END
20f4b3ec61Sdh155122  */
21f4b3ec61Sdh155122 
22f4b3ec61Sdh155122 /*
23*0a0e9771SDarren Reed  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24f4b3ec61Sdh155122  * Use is subject to license terms.
25f4b3ec61Sdh155122  */
26f4b3ec61Sdh155122 
27f4b3ec61Sdh155122 #include <sys/param.h>
28f4b3ec61Sdh155122 #include <sys/sysmacros.h>
29f4b3ec61Sdh155122 #include <sys/vm.h>
30f4b3ec61Sdh155122 #include <sys/proc.h>
31f4b3ec61Sdh155122 #include <sys/tuneable.h>
32f4b3ec61Sdh155122 #include <sys/systm.h>
33f4b3ec61Sdh155122 #include <sys/cmn_err.h>
34f4b3ec61Sdh155122 #include <sys/debug.h>
35f4b3ec61Sdh155122 #include <sys/sdt.h>
36f4b3ec61Sdh155122 #include <sys/mutex.h>
37f4b3ec61Sdh155122 #include <sys/bitmap.h>
38f4b3ec61Sdh155122 #include <sys/atomic.h>
39f4b3ec61Sdh155122 #include <sys/kobj.h>
40f4b3ec61Sdh155122 #include <sys/disp.h>
41f4b3ec61Sdh155122 #include <vm/seg_kmem.h>
42f4b3ec61Sdh155122 #include <sys/zone.h>
43f4b3ec61Sdh155122 #include <sys/netstack.h>
44f4b3ec61Sdh155122 
45f4b3ec61Sdh155122 /*
46f4b3ec61Sdh155122  * What we use so that the zones framework can tell us about new zones,
47f4b3ec61Sdh155122  * which we use to create new stacks.
48f4b3ec61Sdh155122  */
49f4b3ec61Sdh155122 static zone_key_t netstack_zone_key;
50f4b3ec61Sdh155122 
51f4b3ec61Sdh155122 static int	netstack_initialized = 0;
52f4b3ec61Sdh155122 
53f4b3ec61Sdh155122 /*
54f4b3ec61Sdh155122  * Track the registered netstacks.
55f4b3ec61Sdh155122  * The global lock protects
56f4b3ec61Sdh155122  * - ns_reg
57f4b3ec61Sdh155122  * - the list starting at netstack_head and following the netstack_next
58f4b3ec61Sdh155122  *   pointers.
59f4b3ec61Sdh155122  */
60f4b3ec61Sdh155122 static kmutex_t netstack_g_lock;
61f4b3ec61Sdh155122 
62f4b3ec61Sdh155122 /*
63f4b3ec61Sdh155122  * Registry of netstacks with their create/shutdown/destory functions.
64f4b3ec61Sdh155122  */
65f4b3ec61Sdh155122 static struct netstack_registry	ns_reg[NS_MAX];
66f4b3ec61Sdh155122 
67f4b3ec61Sdh155122 /*
68f4b3ec61Sdh155122  * Global list of existing stacks.  We use this when a new zone with
69f4b3ec61Sdh155122  * an exclusive IP instance is created.
70f4b3ec61Sdh155122  *
71f4b3ec61Sdh155122  * Note that in some cases a netstack_t needs to stay around after the zone
72f4b3ec61Sdh155122  * has gone away. This is because there might be outstanding references
73f4b3ec61Sdh155122  * (from TCP TIME_WAIT connections, IPsec state, etc). The netstack_t data
74f4b3ec61Sdh155122  * structure and all the foo_stack_t's hanging off of it will be cleaned up
75f4b3ec61Sdh155122  * when the last reference to it is dropped.
76f4b3ec61Sdh155122  * However, the same zone might be rebooted. That is handled using the
77f4b3ec61Sdh155122  * assumption that the zones framework picks a new zoneid each time a zone
78f4b3ec61Sdh155122  * is (re)booted. We assert for that condition in netstack_zone_create().
79f4b3ec61Sdh155122  * Thus the old netstack_t can take its time for things to time out.
80f4b3ec61Sdh155122  */
81f4b3ec61Sdh155122 static netstack_t *netstack_head;
82f4b3ec61Sdh155122 
83f4b3ec61Sdh155122 /*
84f4b3ec61Sdh155122  * To support kstat_create_netstack() using kstat_zone_add we need
85f4b3ec61Sdh155122  * to track both
86f4b3ec61Sdh155122  *  - all zoneids that use the global/shared stack
87f4b3ec61Sdh155122  *  - all kstats that have been added for the shared stack
88f4b3ec61Sdh155122  */
89f4b3ec61Sdh155122 struct shared_zone_list {
90f4b3ec61Sdh155122 	struct shared_zone_list *sz_next;
91f4b3ec61Sdh155122 	zoneid_t		sz_zoneid;
92f4b3ec61Sdh155122 };
93f4b3ec61Sdh155122 
94f4b3ec61Sdh155122 struct shared_kstat_list {
95f4b3ec61Sdh155122 	struct shared_kstat_list *sk_next;
96f4b3ec61Sdh155122 	kstat_t			 *sk_kstat;
97f4b3ec61Sdh155122 };
98f4b3ec61Sdh155122 
99f4b3ec61Sdh155122 static kmutex_t netstack_shared_lock;	/* protects the following two */
100f4b3ec61Sdh155122 static struct shared_zone_list	*netstack_shared_zones;
101f4b3ec61Sdh155122 static struct shared_kstat_list	*netstack_shared_kstats;
102f4b3ec61Sdh155122 
103f4b3ec61Sdh155122 static void	*netstack_zone_create(zoneid_t zoneid);
104f4b3ec61Sdh155122 static void	netstack_zone_shutdown(zoneid_t zoneid, void *arg);
105f4b3ec61Sdh155122 static void	netstack_zone_destroy(zoneid_t zoneid, void *arg);
106f4b3ec61Sdh155122 
107f4b3ec61Sdh155122 static void	netstack_shared_zone_add(zoneid_t zoneid);
108f4b3ec61Sdh155122 static void	netstack_shared_zone_remove(zoneid_t zoneid);
109f4b3ec61Sdh155122 static void	netstack_shared_kstat_add(kstat_t *ks);
110f4b3ec61Sdh155122 static void	netstack_shared_kstat_remove(kstat_t *ks);
111f4b3ec61Sdh155122 
11223f4867fSnordmark typedef boolean_t applyfn_t(kmutex_t *, netstack_t *, int);
113f4b3ec61Sdh155122 
114bd41d0a8Snordmark static void	apply_all_netstacks(int, applyfn_t *);
115bd41d0a8Snordmark static void	apply_all_modules(netstack_t *, applyfn_t *);
116bd41d0a8Snordmark static void	apply_all_modules_reverse(netstack_t *, applyfn_t *);
117bd41d0a8Snordmark static boolean_t netstack_apply_create(kmutex_t *, netstack_t *, int);
118bd41d0a8Snordmark static boolean_t netstack_apply_shutdown(kmutex_t *, netstack_t *, int);
119bd41d0a8Snordmark static boolean_t netstack_apply_destroy(kmutex_t *, netstack_t *, int);
120bd41d0a8Snordmark static boolean_t wait_for_zone_creator(netstack_t *, kmutex_t *);
121bd41d0a8Snordmark static boolean_t wait_for_nms_inprogress(netstack_t *, nm_state_t *,
122bd41d0a8Snordmark     kmutex_t *);
123bd41d0a8Snordmark 
124f4b3ec61Sdh155122 void
125f4b3ec61Sdh155122 netstack_init(void)
126f4b3ec61Sdh155122 {
127f4b3ec61Sdh155122 	mutex_init(&netstack_g_lock, NULL, MUTEX_DEFAULT, NULL);
128f4b3ec61Sdh155122 	mutex_init(&netstack_shared_lock, NULL, MUTEX_DEFAULT, NULL);
129f4b3ec61Sdh155122 
130f4b3ec61Sdh155122 	netstack_initialized = 1;
131f4b3ec61Sdh155122 
132f4b3ec61Sdh155122 	/*
133f4b3ec61Sdh155122 	 * We want to be informed each time a zone is created or
134f4b3ec61Sdh155122 	 * destroyed in the kernel, so we can maintain the
135f4b3ec61Sdh155122 	 * stack instance information.
136f4b3ec61Sdh155122 	 */
137f4b3ec61Sdh155122 	zone_key_create(&netstack_zone_key, netstack_zone_create,
138f4b3ec61Sdh155122 	    netstack_zone_shutdown, netstack_zone_destroy);
139f4b3ec61Sdh155122 }
140f4b3ec61Sdh155122 
141f4b3ec61Sdh155122 /*
142f4b3ec61Sdh155122  * Register a new module with the framework.
143f4b3ec61Sdh155122  * This registers interest in changes to the set of netstacks.
144f4b3ec61Sdh155122  * The createfn and destroyfn are required, but the shutdownfn can be
145f4b3ec61Sdh155122  * NULL.
146f4b3ec61Sdh155122  * Note that due to the current zsd implementation, when the create
147f4b3ec61Sdh155122  * function is called the zone isn't fully present, thus functions
148f4b3ec61Sdh155122  * like zone_find_by_* will fail, hence the create function can not
149f4b3ec61Sdh155122  * use many zones kernel functions including zcmn_err().
150f4b3ec61Sdh155122  */
151f4b3ec61Sdh155122 void
152f4b3ec61Sdh155122 netstack_register(int moduleid,
153f4b3ec61Sdh155122     void *(*module_create)(netstackid_t, netstack_t *),
154f4b3ec61Sdh155122     void (*module_shutdown)(netstackid_t, void *),
155f4b3ec61Sdh155122     void (*module_destroy)(netstackid_t, void *))
156f4b3ec61Sdh155122 {
157f4b3ec61Sdh155122 	netstack_t *ns;
158f4b3ec61Sdh155122 
159f4b3ec61Sdh155122 	ASSERT(netstack_initialized);
160f4b3ec61Sdh155122 	ASSERT(moduleid >= 0 && moduleid < NS_MAX);
161f4b3ec61Sdh155122 	ASSERT(module_create != NULL);
162f4b3ec61Sdh155122 
163bd41d0a8Snordmark 	/*
164bd41d0a8Snordmark 	 * Make instances created after this point in time run the create
165bd41d0a8Snordmark 	 * callback.
166bd41d0a8Snordmark 	 */
167f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
168f4b3ec61Sdh155122 	ASSERT(ns_reg[moduleid].nr_create == NULL);
169f4b3ec61Sdh155122 	ASSERT(ns_reg[moduleid].nr_flags == 0);
170f4b3ec61Sdh155122 	ns_reg[moduleid].nr_create = module_create;
171f4b3ec61Sdh155122 	ns_reg[moduleid].nr_shutdown = module_shutdown;
172f4b3ec61Sdh155122 	ns_reg[moduleid].nr_destroy = module_destroy;
173f4b3ec61Sdh155122 	ns_reg[moduleid].nr_flags = NRF_REGISTERED;
174f4b3ec61Sdh155122 
175f4b3ec61Sdh155122 	/*
176f4b3ec61Sdh155122 	 * Determine the set of stacks that exist before we drop the lock.
177bd41d0a8Snordmark 	 * Set NSS_CREATE_NEEDED for each of those.
178f4b3ec61Sdh155122 	 * netstacks which have been deleted will have NSS_CREATE_COMPLETED
179f4b3ec61Sdh155122 	 * set, but check NSF_CLOSING to be sure.
180f4b3ec61Sdh155122 	 */
181f4b3ec61Sdh155122 	for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
182bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[moduleid];
183bd41d0a8Snordmark 
184f4b3ec61Sdh155122 		mutex_enter(&ns->netstack_lock);
185f4b3ec61Sdh155122 		if (!(ns->netstack_flags & NSF_CLOSING) &&
186bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_ALL) == 0) {
187bd41d0a8Snordmark 			nms->nms_flags |= NSS_CREATE_NEEDED;
188f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__create__needed,
189f4b3ec61Sdh155122 			    netstack_t *, ns, int, moduleid);
190f4b3ec61Sdh155122 		}
191f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
192f4b3ec61Sdh155122 	}
193f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
194f4b3ec61Sdh155122 
195f4b3ec61Sdh155122 	/*
196bd41d0a8Snordmark 	 * At this point in time a new instance can be created or an instance
197bd41d0a8Snordmark 	 * can be destroyed, or some other module can register or unregister.
198bd41d0a8Snordmark 	 * Make sure we either run all the create functions for this moduleid
199bd41d0a8Snordmark 	 * or we wait for any other creators for this moduleid.
200f4b3ec61Sdh155122 	 */
201bd41d0a8Snordmark 	apply_all_netstacks(moduleid, netstack_apply_create);
202f4b3ec61Sdh155122 }
203f4b3ec61Sdh155122 
204f4b3ec61Sdh155122 void
205f4b3ec61Sdh155122 netstack_unregister(int moduleid)
206f4b3ec61Sdh155122 {
207f4b3ec61Sdh155122 	netstack_t *ns;
208f4b3ec61Sdh155122 
209f4b3ec61Sdh155122 	ASSERT(moduleid >= 0 && moduleid < NS_MAX);
210f4b3ec61Sdh155122 
211f4b3ec61Sdh155122 	ASSERT(ns_reg[moduleid].nr_create != NULL);
212f4b3ec61Sdh155122 	ASSERT(ns_reg[moduleid].nr_flags & NRF_REGISTERED);
213f4b3ec61Sdh155122 
214f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
215f4b3ec61Sdh155122 	/*
216f4b3ec61Sdh155122 	 * Determine the set of stacks that exist before we drop the lock.
217bd41d0a8Snordmark 	 * Set NSS_SHUTDOWN_NEEDED and NSS_DESTROY_NEEDED for each of those.
218bd41d0a8Snordmark 	 * That ensures that when we return all the callbacks for existing
219bd41d0a8Snordmark 	 * instances have completed. And since we set NRF_DYING no new
220bd41d0a8Snordmark 	 * instances can use this module.
221f4b3ec61Sdh155122 	 */
222f4b3ec61Sdh155122 	for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
223bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[moduleid];
224bd41d0a8Snordmark 
225f4b3ec61Sdh155122 		mutex_enter(&ns->netstack_lock);
226f4b3ec61Sdh155122 		if (ns_reg[moduleid].nr_shutdown != NULL &&
227bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_COMPLETED) &&
228bd41d0a8Snordmark 		    (nms->nms_flags & NSS_SHUTDOWN_ALL) == 0) {
229bd41d0a8Snordmark 			nms->nms_flags |= NSS_SHUTDOWN_NEEDED;
230f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__shutdown__needed,
231f4b3ec61Sdh155122 			    netstack_t *, ns, int, moduleid);
232f4b3ec61Sdh155122 		}
233f4b3ec61Sdh155122 		if ((ns_reg[moduleid].nr_flags & NRF_REGISTERED) &&
234f4b3ec61Sdh155122 		    ns_reg[moduleid].nr_destroy != NULL &&
235bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_COMPLETED) &&
236bd41d0a8Snordmark 		    (nms->nms_flags & NSS_DESTROY_ALL) == 0) {
237bd41d0a8Snordmark 			nms->nms_flags |= NSS_DESTROY_NEEDED;
238f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__destroy__needed,
239f4b3ec61Sdh155122 			    netstack_t *, ns, int, moduleid);
240f4b3ec61Sdh155122 		}
241f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
242f4b3ec61Sdh155122 	}
243bd41d0a8Snordmark 	/*
244bd41d0a8Snordmark 	 * Prevent any new netstack from calling the registered create
245bd41d0a8Snordmark 	 * function, while keeping the function pointers in place until the
246bd41d0a8Snordmark 	 * shutdown and destroy callbacks are complete.
247bd41d0a8Snordmark 	 */
248bd41d0a8Snordmark 	ns_reg[moduleid].nr_flags |= NRF_DYING;
249f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
250f4b3ec61Sdh155122 
251bd41d0a8Snordmark 	apply_all_netstacks(moduleid, netstack_apply_shutdown);
252bd41d0a8Snordmark 	apply_all_netstacks(moduleid, netstack_apply_destroy);
253f4b3ec61Sdh155122 
254f4b3ec61Sdh155122 	/*
255bd41d0a8Snordmark 	 * Clear the nms_flags so that we can handle this module
256f4b3ec61Sdh155122 	 * being loaded again.
257bd41d0a8Snordmark 	 * Also remove the registered functions.
258f4b3ec61Sdh155122 	 */
259f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
260bd41d0a8Snordmark 	ASSERT(ns_reg[moduleid].nr_flags & NRF_REGISTERED);
261bd41d0a8Snordmark 	ASSERT(ns_reg[moduleid].nr_flags & NRF_DYING);
262f4b3ec61Sdh155122 	for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
263bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[moduleid];
264bd41d0a8Snordmark 
265f4b3ec61Sdh155122 		mutex_enter(&ns->netstack_lock);
266bd41d0a8Snordmark 		if (nms->nms_flags & NSS_DESTROY_COMPLETED) {
267bd41d0a8Snordmark 			nms->nms_flags = 0;
268f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__destroy__done,
269f4b3ec61Sdh155122 			    netstack_t *, ns, int, moduleid);
270f4b3ec61Sdh155122 		}
271f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
272f4b3ec61Sdh155122 	}
273f4b3ec61Sdh155122 
274f4b3ec61Sdh155122 	ns_reg[moduleid].nr_create = NULL;
275f4b3ec61Sdh155122 	ns_reg[moduleid].nr_shutdown = NULL;
276f4b3ec61Sdh155122 	ns_reg[moduleid].nr_destroy = NULL;
277f4b3ec61Sdh155122 	ns_reg[moduleid].nr_flags = 0;
278f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
279f4b3ec61Sdh155122 }
280f4b3ec61Sdh155122 
281f4b3ec61Sdh155122 /*
282f4b3ec61Sdh155122  * Lookup and/or allocate a netstack for this zone.
283f4b3ec61Sdh155122  */
284f4b3ec61Sdh155122 static void *
285f4b3ec61Sdh155122 netstack_zone_create(zoneid_t zoneid)
286f4b3ec61Sdh155122 {
287f4b3ec61Sdh155122 	netstackid_t stackid;
288f4b3ec61Sdh155122 	netstack_t *ns;
289f4b3ec61Sdh155122 	netstack_t **nsp;
290f4b3ec61Sdh155122 	zone_t	*zone;
291f4b3ec61Sdh155122 	int i;
292f4b3ec61Sdh155122 
293f4b3ec61Sdh155122 	ASSERT(netstack_initialized);
294f4b3ec61Sdh155122 
295f4b3ec61Sdh155122 	zone = zone_find_by_id_nolock(zoneid);
296f4b3ec61Sdh155122 	ASSERT(zone != NULL);
297f4b3ec61Sdh155122 
298f4b3ec61Sdh155122 	if (zone->zone_flags & ZF_NET_EXCL) {
299f4b3ec61Sdh155122 		stackid = zoneid;
300f4b3ec61Sdh155122 	} else {
301f4b3ec61Sdh155122 		/* Look for the stack instance for the global */
302f4b3ec61Sdh155122 		stackid = GLOBAL_NETSTACKID;
303f4b3ec61Sdh155122 	}
304f4b3ec61Sdh155122 
305f4b3ec61Sdh155122 	/* Allocate even if it isn't needed; simplifies locking */
306f4b3ec61Sdh155122 	ns = (netstack_t *)kmem_zalloc(sizeof (netstack_t), KM_SLEEP);
307f4b3ec61Sdh155122 
308f4b3ec61Sdh155122 	/* Look if there is a matching stack instance */
309f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
310f4b3ec61Sdh155122 	for (nsp = &netstack_head; *nsp != NULL;
311f4b3ec61Sdh155122 	    nsp = &((*nsp)->netstack_next)) {
312f4b3ec61Sdh155122 		if ((*nsp)->netstack_stackid == stackid) {
313f4b3ec61Sdh155122 			/*
314f4b3ec61Sdh155122 			 * Should never find a pre-existing exclusive stack
315f4b3ec61Sdh155122 			 */
316f4b3ec61Sdh155122 			ASSERT(stackid == GLOBAL_NETSTACKID);
317f4b3ec61Sdh155122 			kmem_free(ns, sizeof (netstack_t));
318f4b3ec61Sdh155122 			ns = *nsp;
319f4b3ec61Sdh155122 			mutex_enter(&ns->netstack_lock);
320f4b3ec61Sdh155122 			ns->netstack_numzones++;
321f4b3ec61Sdh155122 			mutex_exit(&ns->netstack_lock);
322f4b3ec61Sdh155122 			mutex_exit(&netstack_g_lock);
323f4b3ec61Sdh155122 			DTRACE_PROBE1(netstack__inc__numzones,
324f4b3ec61Sdh155122 			    netstack_t *, ns);
325f4b3ec61Sdh155122 			/* Record that we have a new shared stack zone */
326f4b3ec61Sdh155122 			netstack_shared_zone_add(zoneid);
327f4b3ec61Sdh155122 			zone->zone_netstack = ns;
328f4b3ec61Sdh155122 			return (ns);
329f4b3ec61Sdh155122 		}
330f4b3ec61Sdh155122 	}
331f4b3ec61Sdh155122 	/* Not found */
332f4b3ec61Sdh155122 	mutex_init(&ns->netstack_lock, NULL, MUTEX_DEFAULT, NULL);
333bd41d0a8Snordmark 	cv_init(&ns->netstack_cv, NULL, CV_DEFAULT, NULL);
334f4b3ec61Sdh155122 	ns->netstack_stackid = zoneid;
335f4b3ec61Sdh155122 	ns->netstack_numzones = 1;
336f4b3ec61Sdh155122 	ns->netstack_refcnt = 1; /* Decremented by netstack_zone_destroy */
337f4b3ec61Sdh155122 	ns->netstack_flags = NSF_UNINIT;
338f4b3ec61Sdh155122 	*nsp = ns;
339f4b3ec61Sdh155122 	zone->zone_netstack = ns;
340f4b3ec61Sdh155122 
341bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
342bd41d0a8Snordmark 	/*
343bd41d0a8Snordmark 	 * Mark this netstack as having a CREATE running so
344bd41d0a8Snordmark 	 * any netstack_register/netstack_unregister waits for
345bd41d0a8Snordmark 	 * the existing create callbacks to complete in moduleid order
346bd41d0a8Snordmark 	 */
347bd41d0a8Snordmark 	ns->netstack_flags |= NSF_ZONE_CREATE;
348bd41d0a8Snordmark 
349f4b3ec61Sdh155122 	/*
350f4b3ec61Sdh155122 	 * Determine the set of module create functions that need to be
351f4b3ec61Sdh155122 	 * called before we drop the lock.
352bd41d0a8Snordmark 	 * Set NSS_CREATE_NEEDED for each of those.
353bd41d0a8Snordmark 	 * Skip any with NRF_DYING set, since those are in the process of
354bd41d0a8Snordmark 	 * going away, by checking for flags being exactly NRF_REGISTERED.
355f4b3ec61Sdh155122 	 */
356f4b3ec61Sdh155122 	for (i = 0; i < NS_MAX; i++) {
357bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[i];
358bd41d0a8Snordmark 
359bd41d0a8Snordmark 		cv_init(&nms->nms_cv, NULL, CV_DEFAULT, NULL);
360bd41d0a8Snordmark 
361bd41d0a8Snordmark 		if ((ns_reg[i].nr_flags == NRF_REGISTERED) &&
362bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_ALL) == 0) {
363bd41d0a8Snordmark 			nms->nms_flags |= NSS_CREATE_NEEDED;
364f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__create__needed,
365f4b3ec61Sdh155122 			    netstack_t *, ns, int, i);
366f4b3ec61Sdh155122 		}
367f4b3ec61Sdh155122 	}
368bd41d0a8Snordmark 	mutex_exit(&ns->netstack_lock);
369f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
370f4b3ec61Sdh155122 
371bd41d0a8Snordmark 	apply_all_modules(ns, netstack_apply_create);
372f4b3ec61Sdh155122 
373bd41d0a8Snordmark 	/* Tell any waiting netstack_register/netstack_unregister to proceed */
374f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
375f4b3ec61Sdh155122 	ns->netstack_flags &= ~NSF_UNINIT;
376bd41d0a8Snordmark 	ASSERT(ns->netstack_flags & NSF_ZONE_CREATE);
377bd41d0a8Snordmark 	ns->netstack_flags &= ~NSF_ZONE_CREATE;
378bd41d0a8Snordmark 	cv_broadcast(&ns->netstack_cv);
379f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
380f4b3ec61Sdh155122 
381f4b3ec61Sdh155122 	return (ns);
382f4b3ec61Sdh155122 }
383f4b3ec61Sdh155122 
384f4b3ec61Sdh155122 /* ARGSUSED */
385f4b3ec61Sdh155122 static void
386f4b3ec61Sdh155122 netstack_zone_shutdown(zoneid_t zoneid, void *arg)
387f4b3ec61Sdh155122 {
388f4b3ec61Sdh155122 	netstack_t *ns = (netstack_t *)arg;
389f4b3ec61Sdh155122 	int i;
390f4b3ec61Sdh155122 
391f4b3ec61Sdh155122 	ASSERT(arg != NULL);
392f4b3ec61Sdh155122 
393f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
394f4b3ec61Sdh155122 	ASSERT(ns->netstack_numzones > 0);
395f4b3ec61Sdh155122 	if (ns->netstack_numzones != 1) {
396f4b3ec61Sdh155122 		/* Stack instance being used by other zone */
397f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
398f4b3ec61Sdh155122 		ASSERT(ns->netstack_stackid == GLOBAL_NETSTACKID);
399f4b3ec61Sdh155122 		return;
400f4b3ec61Sdh155122 	}
401f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
402f4b3ec61Sdh155122 
403f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
404bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
405bd41d0a8Snordmark 	/*
406bd41d0a8Snordmark 	 * Mark this netstack as having a SHUTDOWN running so
407bd41d0a8Snordmark 	 * any netstack_register/netstack_unregister waits for
408bd41d0a8Snordmark 	 * the existing create callbacks to complete in moduleid order
409bd41d0a8Snordmark 	 */
410bd41d0a8Snordmark 	ASSERT(!(ns->netstack_flags & NSF_ZONE_INPROGRESS));
411bd41d0a8Snordmark 	ns->netstack_flags |= NSF_ZONE_SHUTDOWN;
412bd41d0a8Snordmark 
413f4b3ec61Sdh155122 	/*
414f4b3ec61Sdh155122 	 * Determine the set of stacks that exist before we drop the lock.
415bd41d0a8Snordmark 	 * Set NSS_SHUTDOWN_NEEDED for each of those.
416f4b3ec61Sdh155122 	 */
417f4b3ec61Sdh155122 	for (i = 0; i < NS_MAX; i++) {
418bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[i];
419bd41d0a8Snordmark 
420f4b3ec61Sdh155122 		if ((ns_reg[i].nr_flags & NRF_REGISTERED) &&
421f4b3ec61Sdh155122 		    ns_reg[i].nr_shutdown != NULL &&
422bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_COMPLETED) &&
423bd41d0a8Snordmark 		    (nms->nms_flags & NSS_SHUTDOWN_ALL) == 0) {
424bd41d0a8Snordmark 			nms->nms_flags |= NSS_SHUTDOWN_NEEDED;
425f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__shutdown__needed,
426f4b3ec61Sdh155122 			    netstack_t *, ns, int, i);
427f4b3ec61Sdh155122 		}
428f4b3ec61Sdh155122 	}
429bd41d0a8Snordmark 	mutex_exit(&ns->netstack_lock);
430f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
431f4b3ec61Sdh155122 
43223f4867fSnordmark 	/*
43323f4867fSnordmark 	 * Call the shutdown function for all registered modules for this
43423f4867fSnordmark 	 * netstack.
43523f4867fSnordmark 	 */
4367ddc9b1aSDarren Reed 	apply_all_modules_reverse(ns, netstack_apply_shutdown);
437bd41d0a8Snordmark 
438bd41d0a8Snordmark 	/* Tell any waiting netstack_register/netstack_unregister to proceed */
439bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
440bd41d0a8Snordmark 	ASSERT(ns->netstack_flags & NSF_ZONE_SHUTDOWN);
441bd41d0a8Snordmark 	ns->netstack_flags &= ~NSF_ZONE_SHUTDOWN;
442bd41d0a8Snordmark 	cv_broadcast(&ns->netstack_cv);
443bd41d0a8Snordmark 	mutex_exit(&ns->netstack_lock);
444f4b3ec61Sdh155122 }
445f4b3ec61Sdh155122 
446f4b3ec61Sdh155122 /*
447f4b3ec61Sdh155122  * Common routine to release a zone.
448f4b3ec61Sdh155122  * If this was the last zone using the stack instance then prepare to
449f4b3ec61Sdh155122  * have the refcnt dropping to zero free the zone.
450f4b3ec61Sdh155122  */
451f4b3ec61Sdh155122 /* ARGSUSED */
452f4b3ec61Sdh155122 static void
453f4b3ec61Sdh155122 netstack_zone_destroy(zoneid_t zoneid, void *arg)
454f4b3ec61Sdh155122 {
455f4b3ec61Sdh155122 	netstack_t *ns = (netstack_t *)arg;
456f4b3ec61Sdh155122 
457f4b3ec61Sdh155122 	ASSERT(arg != NULL);
458f4b3ec61Sdh155122 
459f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
460f4b3ec61Sdh155122 	ASSERT(ns->netstack_numzones > 0);
461f4b3ec61Sdh155122 	ns->netstack_numzones--;
462f4b3ec61Sdh155122 	if (ns->netstack_numzones != 0) {
463f4b3ec61Sdh155122 		/* Stack instance being used by other zone */
464f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
465f4b3ec61Sdh155122 		ASSERT(ns->netstack_stackid == GLOBAL_NETSTACKID);
466f4b3ec61Sdh155122 		/* Record that we a shared stack zone has gone away */
467f4b3ec61Sdh155122 		netstack_shared_zone_remove(zoneid);
468f4b3ec61Sdh155122 		return;
469f4b3ec61Sdh155122 	}
470f4b3ec61Sdh155122 	/*
47123f4867fSnordmark 	 * Set CLOSING so that netstack_find_by will not find it.
472f4b3ec61Sdh155122 	 */
473f4b3ec61Sdh155122 	ns->netstack_flags |= NSF_CLOSING;
474f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
475f4b3ec61Sdh155122 	DTRACE_PROBE1(netstack__dec__numzones, netstack_t *, ns);
476f4b3ec61Sdh155122 	/* No other thread can call zone_destroy for this stack */
477f4b3ec61Sdh155122 
478f4b3ec61Sdh155122 	/*
479f4b3ec61Sdh155122 	 * Decrease refcnt to account for the one in netstack_zone_init()
480f4b3ec61Sdh155122 	 */
481f4b3ec61Sdh155122 	netstack_rele(ns);
482f4b3ec61Sdh155122 }
483f4b3ec61Sdh155122 
484f4b3ec61Sdh155122 /*
485f4b3ec61Sdh155122  * Called when the reference count drops to zero.
486f4b3ec61Sdh155122  * Call the destroy functions for each registered module.
487f4b3ec61Sdh155122  */
488f4b3ec61Sdh155122 static void
489f4b3ec61Sdh155122 netstack_stack_inactive(netstack_t *ns)
490f4b3ec61Sdh155122 {
491f4b3ec61Sdh155122 	int i;
492f4b3ec61Sdh155122 
493f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
494bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
495bd41d0a8Snordmark 	/*
496bd41d0a8Snordmark 	 * Mark this netstack as having a DESTROY running so
497bd41d0a8Snordmark 	 * any netstack_register/netstack_unregister waits for
498bd41d0a8Snordmark 	 * the existing destroy callbacks to complete in reverse moduleid order
499bd41d0a8Snordmark 	 */
500bd41d0a8Snordmark 	ASSERT(!(ns->netstack_flags & NSF_ZONE_INPROGRESS));
501bd41d0a8Snordmark 	ns->netstack_flags |= NSF_ZONE_DESTROY;
502f4b3ec61Sdh155122 	/*
503f4b3ec61Sdh155122 	 * If the shutdown callback wasn't called earlier (e.g., if this is
504bd41d0a8Snordmark 	 * a netstack shared between multiple zones), then we schedule it now.
505bd41d0a8Snordmark 	 *
506bd41d0a8Snordmark 	 * Determine the set of stacks that exist before we drop the lock.
507bd41d0a8Snordmark 	 * Set NSS_DESTROY_NEEDED for each of those. That
508bd41d0a8Snordmark 	 * ensures that when we return all the callbacks for existing
509bd41d0a8Snordmark 	 * instances have completed.
510f4b3ec61Sdh155122 	 */
511f4b3ec61Sdh155122 	for (i = 0; i < NS_MAX; i++) {
512bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[i];
513bd41d0a8Snordmark 
514f4b3ec61Sdh155122 		if ((ns_reg[i].nr_flags & NRF_REGISTERED) &&
515f4b3ec61Sdh155122 		    ns_reg[i].nr_shutdown != NULL &&
516bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_COMPLETED) &&
517bd41d0a8Snordmark 		    (nms->nms_flags & NSS_SHUTDOWN_ALL) == 0) {
518bd41d0a8Snordmark 			nms->nms_flags |= NSS_SHUTDOWN_NEEDED;
519f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__shutdown__needed,
520f4b3ec61Sdh155122 			    netstack_t *, ns, int, i);
521f4b3ec61Sdh155122 		}
522bd41d0a8Snordmark 
523f4b3ec61Sdh155122 		if ((ns_reg[i].nr_flags & NRF_REGISTERED) &&
524f4b3ec61Sdh155122 		    ns_reg[i].nr_destroy != NULL &&
525bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_COMPLETED) &&
526bd41d0a8Snordmark 		    (nms->nms_flags & NSS_DESTROY_ALL) == 0) {
527bd41d0a8Snordmark 			nms->nms_flags |= NSS_DESTROY_NEEDED;
528f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__destroy__needed,
529f4b3ec61Sdh155122 			    netstack_t *, ns, int, i);
530f4b3ec61Sdh155122 		}
531f4b3ec61Sdh155122 	}
532bd41d0a8Snordmark 	mutex_exit(&ns->netstack_lock);
533f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
534f4b3ec61Sdh155122 
53523f4867fSnordmark 	/*
53623f4867fSnordmark 	 * Call the shutdown and destroy functions for all registered modules
53723f4867fSnordmark 	 * for this netstack.
538bd41d0a8Snordmark 	 *
539bd41d0a8Snordmark 	 * Since there are some ordering dependencies between the modules we
540bd41d0a8Snordmark 	 * tear them down in the reverse order of what was used to create them.
541bd41d0a8Snordmark 	 *
542bd41d0a8Snordmark 	 * Since a netstack_t is never reused (when a zone is rebooted it gets
543bd41d0a8Snordmark 	 * a new zoneid == netstackid i.e. a new netstack_t is allocated) we
544bd41d0a8Snordmark 	 * leave nms_flags the way it is i.e. with NSS_DESTROY_COMPLETED set.
545bd41d0a8Snordmark 	 * That is different than in the netstack_unregister() case.
54623f4867fSnordmark 	 */
5477ddc9b1aSDarren Reed 	apply_all_modules_reverse(ns, netstack_apply_shutdown);
548bd41d0a8Snordmark 	apply_all_modules_reverse(ns, netstack_apply_destroy);
549f4b3ec61Sdh155122 
550bd41d0a8Snordmark 	/* Tell any waiting netstack_register/netstack_unregister to proceed */
551f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
552bd41d0a8Snordmark 	ASSERT(ns->netstack_flags & NSF_ZONE_DESTROY);
553bd41d0a8Snordmark 	ns->netstack_flags &= ~NSF_ZONE_DESTROY;
554bd41d0a8Snordmark 	cv_broadcast(&ns->netstack_cv);
555f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
556f4b3ec61Sdh155122 }
557f4b3ec61Sdh155122 
55823f4867fSnordmark /*
55923f4867fSnordmark  * Apply a function to all netstacks for a particular moduleid.
56023f4867fSnordmark  *
561bd41d0a8Snordmark  * If there is any zone activity (due to a zone being created, shutdown,
562bd41d0a8Snordmark  * or destroyed) we wait for that to complete before we proceed. This ensures
563bd41d0a8Snordmark  * that the moduleids are processed in order when a zone is created or
564bd41d0a8Snordmark  * destroyed.
565bd41d0a8Snordmark  *
56623f4867fSnordmark  * The applyfn has to drop netstack_g_lock if it does some work.
567bd41d0a8Snordmark  * In that case we don't follow netstack_next,
568bd41d0a8Snordmark  * even if it is possible to do so without any hazards. This is
56923f4867fSnordmark  * because we want the design to allow for the list of netstacks threaded
57023f4867fSnordmark  * by netstack_next to change in any arbitrary way during the time the
57123f4867fSnordmark  * lock was dropped.
57223f4867fSnordmark  *
57323f4867fSnordmark  * It is safe to restart the loop at netstack_head since the applyfn
57423f4867fSnordmark  * changes netstack_m_state as it processes things, so a subsequent
57523f4867fSnordmark  * pass through will have no effect in applyfn, hence the loop will terminate
57623f4867fSnordmark  * in at worst O(N^2).
57723f4867fSnordmark  */
578f4b3ec61Sdh155122 static void
57923f4867fSnordmark apply_all_netstacks(int moduleid, applyfn_t *applyfn)
580f4b3ec61Sdh155122 {
581f4b3ec61Sdh155122 	netstack_t *ns;
582f4b3ec61Sdh155122 
58323f4867fSnordmark 	mutex_enter(&netstack_g_lock);
58423f4867fSnordmark 	ns = netstack_head;
585f4b3ec61Sdh155122 	while (ns != NULL) {
586bd41d0a8Snordmark 		if (wait_for_zone_creator(ns, &netstack_g_lock)) {
58723f4867fSnordmark 			/* Lock dropped - restart at head */
588bd41d0a8Snordmark 			ns = netstack_head;
589bd41d0a8Snordmark 		} else if ((applyfn)(&netstack_g_lock, ns, moduleid)) {
590bd41d0a8Snordmark 			/* Lock dropped - restart at head */
59123f4867fSnordmark 			ns = netstack_head;
59223f4867fSnordmark 		} else {
59323f4867fSnordmark 			ns = ns->netstack_next;
59423f4867fSnordmark 		}
59523f4867fSnordmark 	}
59623f4867fSnordmark 	mutex_exit(&netstack_g_lock);
59723f4867fSnordmark }
59823f4867fSnordmark 
59923f4867fSnordmark /*
60023f4867fSnordmark  * Apply a function to all moduleids for a particular netstack.
60123f4867fSnordmark  *
60223f4867fSnordmark  * Since the netstack linkage doesn't matter in this case we can
60323f4867fSnordmark  * ignore whether the function drops the lock.
60423f4867fSnordmark  */
60523f4867fSnordmark static void
60623f4867fSnordmark apply_all_modules(netstack_t *ns, applyfn_t *applyfn)
60723f4867fSnordmark {
60823f4867fSnordmark 	int i;
60923f4867fSnordmark 
61023f4867fSnordmark 	mutex_enter(&netstack_g_lock);
611f4b3ec61Sdh155122 	for (i = 0; i < NS_MAX; i++) {
612f4b3ec61Sdh155122 		/*
613bd41d0a8Snordmark 		 * We don't care whether the lock was dropped
614bd41d0a8Snordmark 		 * since we are not iterating over netstack_head.
615f4b3ec61Sdh155122 		 */
616bd41d0a8Snordmark 		(void) (applyfn)(&netstack_g_lock, ns, i);
617f4b3ec61Sdh155122 	}
61823f4867fSnordmark 	mutex_exit(&netstack_g_lock);
619f4b3ec61Sdh155122 }
620f4b3ec61Sdh155122 
62123f4867fSnordmark /* Like the above but in reverse moduleid order */
622f4b3ec61Sdh155122 static void
62323f4867fSnordmark apply_all_modules_reverse(netstack_t *ns, applyfn_t *applyfn)
624f4b3ec61Sdh155122 {
625f4b3ec61Sdh155122 	int i;
626f4b3ec61Sdh155122 
62723f4867fSnordmark 	mutex_enter(&netstack_g_lock);
628f4b3ec61Sdh155122 	for (i = NS_MAX-1; i >= 0; i--) {
629f4b3ec61Sdh155122 		/*
630bd41d0a8Snordmark 		 * We don't care whether the lock was dropped
631bd41d0a8Snordmark 		 * since we are not iterating over netstack_head.
632f4b3ec61Sdh155122 		 */
633bd41d0a8Snordmark 		(void) (applyfn)(&netstack_g_lock, ns, i);
634f4b3ec61Sdh155122 	}
63523f4867fSnordmark 	mutex_exit(&netstack_g_lock);
636f4b3ec61Sdh155122 }
637f4b3ec61Sdh155122 
638f4b3ec61Sdh155122 /*
639bd41d0a8Snordmark  * Call the create function for the ns and moduleid if CREATE_NEEDED
640bd41d0a8Snordmark  * is set.
641bd41d0a8Snordmark  * If some other thread gets here first and sets *_INPROGRESS, then
642bd41d0a8Snordmark  * we wait for that thread to complete so that we can ensure that
643bd41d0a8Snordmark  * all the callbacks are done when we've looped over all netstacks/moduleids.
64423f4867fSnordmark  *
645bd41d0a8Snordmark  * When we call the create function, we temporarily drop the netstack_lock
646bd41d0a8Snordmark  * held by the caller, and return true to tell the caller it needs to
647bd41d0a8Snordmark  * re-evalute the state.
648f4b3ec61Sdh155122  */
649bd41d0a8Snordmark static boolean_t
650bd41d0a8Snordmark netstack_apply_create(kmutex_t *lockp, netstack_t *ns, int moduleid)
651f4b3ec61Sdh155122 {
652bd41d0a8Snordmark 	void *result;
653bd41d0a8Snordmark 	netstackid_t stackid;
654bd41d0a8Snordmark 	nm_state_t *nms = &ns->netstack_m_state[moduleid];
655bd41d0a8Snordmark 	boolean_t dropped = B_FALSE;
656bd41d0a8Snordmark 
657bd41d0a8Snordmark 	ASSERT(MUTEX_HELD(lockp));
658bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
659bd41d0a8Snordmark 
660bd41d0a8Snordmark 	if (wait_for_nms_inprogress(ns, nms, lockp))
661bd41d0a8Snordmark 		dropped = B_TRUE;
662bd41d0a8Snordmark 
663bd41d0a8Snordmark 	if (nms->nms_flags & NSS_CREATE_NEEDED) {
664bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_CREATE_NEEDED;
665bd41d0a8Snordmark 		nms->nms_flags |= NSS_CREATE_INPROGRESS;
666bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__create__inprogress,
667bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
668bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
669bd41d0a8Snordmark 		mutex_exit(lockp);
670bd41d0a8Snordmark 		dropped = B_TRUE;
671bd41d0a8Snordmark 
672bd41d0a8Snordmark 		ASSERT(ns_reg[moduleid].nr_create != NULL);
673bd41d0a8Snordmark 		stackid = ns->netstack_stackid;
674bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__create__start,
675bd41d0a8Snordmark 		    netstackid_t, stackid,
676bd41d0a8Snordmark 		    netstack_t *, ns);
677bd41d0a8Snordmark 		result = (ns_reg[moduleid].nr_create)(stackid, ns);
678bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__create__end,
679bd41d0a8Snordmark 		    void *, result, netstack_t *, ns);
680bd41d0a8Snordmark 
681bd41d0a8Snordmark 		ASSERT(result != NULL);
682bd41d0a8Snordmark 		mutex_enter(lockp);
683bd41d0a8Snordmark 		mutex_enter(&ns->netstack_lock);
684bd41d0a8Snordmark 		ns->netstack_modules[moduleid] = result;
685bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_CREATE_INPROGRESS;
686bd41d0a8Snordmark 		nms->nms_flags |= NSS_CREATE_COMPLETED;
687bd41d0a8Snordmark 		cv_broadcast(&nms->nms_cv);
688bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__create__completed,
689bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
690bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
691bd41d0a8Snordmark 		return (dropped);
69223f4867fSnordmark 	} else {
693bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
694bd41d0a8Snordmark 		return (dropped);
69523f4867fSnordmark 	}
696f4b3ec61Sdh155122 }
697f4b3ec61Sdh155122 
698f4b3ec61Sdh155122 /*
699bd41d0a8Snordmark  * Call the shutdown function for the ns and moduleid if SHUTDOWN_NEEDED
700bd41d0a8Snordmark  * is set.
701bd41d0a8Snordmark  * If some other thread gets here first and sets *_INPROGRESS, then
702bd41d0a8Snordmark  * we wait for that thread to complete so that we can ensure that
703bd41d0a8Snordmark  * all the callbacks are done when we've looped over all netstacks/moduleids.
704f4b3ec61Sdh155122  *
705bd41d0a8Snordmark  * When we call the shutdown function, we temporarily drop the netstack_lock
706bd41d0a8Snordmark  * held by the caller, and return true to tell the caller it needs to
707bd41d0a8Snordmark  * re-evalute the state.
708f4b3ec61Sdh155122  */
709bd41d0a8Snordmark static boolean_t
710bd41d0a8Snordmark netstack_apply_shutdown(kmutex_t *lockp, netstack_t *ns, int moduleid)
711f4b3ec61Sdh155122 {
712bd41d0a8Snordmark 	netstackid_t stackid;
713bd41d0a8Snordmark 	void * netstack_module;
714bd41d0a8Snordmark 	nm_state_t *nms = &ns->netstack_m_state[moduleid];
715bd41d0a8Snordmark 	boolean_t dropped = B_FALSE;
716bd41d0a8Snordmark 
717bd41d0a8Snordmark 	ASSERT(MUTEX_HELD(lockp));
718bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
719bd41d0a8Snordmark 
720bd41d0a8Snordmark 	if (wait_for_nms_inprogress(ns, nms, lockp))
721bd41d0a8Snordmark 		dropped = B_TRUE;
722bd41d0a8Snordmark 
723bd41d0a8Snordmark 	if (nms->nms_flags & NSS_SHUTDOWN_NEEDED) {
724bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_SHUTDOWN_NEEDED;
725bd41d0a8Snordmark 		nms->nms_flags |= NSS_SHUTDOWN_INPROGRESS;
726bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__shutdown__inprogress,
727bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
728bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
729bd41d0a8Snordmark 		mutex_exit(lockp);
730bd41d0a8Snordmark 		dropped = B_TRUE;
731bd41d0a8Snordmark 
732bd41d0a8Snordmark 		ASSERT(ns_reg[moduleid].nr_shutdown != NULL);
733bd41d0a8Snordmark 		stackid = ns->netstack_stackid;
734bd41d0a8Snordmark 		netstack_module = ns->netstack_modules[moduleid];
735bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__shutdown__start,
736bd41d0a8Snordmark 		    netstackid_t, stackid,
737bd41d0a8Snordmark 		    void *, netstack_module);
738bd41d0a8Snordmark 		(ns_reg[moduleid].nr_shutdown)(stackid, netstack_module);
739bd41d0a8Snordmark 		DTRACE_PROBE1(netstack__shutdown__end,
740bd41d0a8Snordmark 		    netstack_t *, ns);
741bd41d0a8Snordmark 
742bd41d0a8Snordmark 		mutex_enter(lockp);
743bd41d0a8Snordmark 		mutex_enter(&ns->netstack_lock);
744bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_SHUTDOWN_INPROGRESS;
745bd41d0a8Snordmark 		nms->nms_flags |= NSS_SHUTDOWN_COMPLETED;
746bd41d0a8Snordmark 		cv_broadcast(&nms->nms_cv);
747bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__shutdown__completed,
748bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
749bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
750bd41d0a8Snordmark 		return (dropped);
751bd41d0a8Snordmark 	} else {
752bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
753bd41d0a8Snordmark 		return (dropped);
754bd41d0a8Snordmark 	}
755f4b3ec61Sdh155122 }
756f4b3ec61Sdh155122 
757f4b3ec61Sdh155122 /*
758bd41d0a8Snordmark  * Call the destroy function for the ns and moduleid if DESTROY_NEEDED
759bd41d0a8Snordmark  * is set.
760bd41d0a8Snordmark  * If some other thread gets here first and sets *_INPROGRESS, then
761bd41d0a8Snordmark  * we wait for that thread to complete so that we can ensure that
762bd41d0a8Snordmark  * all the callbacks are done when we've looped over all netstacks/moduleids.
763f4b3ec61Sdh155122  *
764bd41d0a8Snordmark  * When we call the destroy function, we temporarily drop the netstack_lock
765bd41d0a8Snordmark  * held by the caller, and return true to tell the caller it needs to
766bd41d0a8Snordmark  * re-evalute the state.
767f4b3ec61Sdh155122  */
768bd41d0a8Snordmark static boolean_t
769bd41d0a8Snordmark netstack_apply_destroy(kmutex_t *lockp, netstack_t *ns, int moduleid)
770f4b3ec61Sdh155122 {
771bd41d0a8Snordmark 	netstackid_t stackid;
772bd41d0a8Snordmark 	void * netstack_module;
773bd41d0a8Snordmark 	nm_state_t *nms = &ns->netstack_m_state[moduleid];
774bd41d0a8Snordmark 	boolean_t dropped = B_FALSE;
775bd41d0a8Snordmark 
776bd41d0a8Snordmark 	ASSERT(MUTEX_HELD(lockp));
777bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
778bd41d0a8Snordmark 
779bd41d0a8Snordmark 	if (wait_for_nms_inprogress(ns, nms, lockp))
780bd41d0a8Snordmark 		dropped = B_TRUE;
781bd41d0a8Snordmark 
782bd41d0a8Snordmark 	if (nms->nms_flags & NSS_DESTROY_NEEDED) {
783bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_DESTROY_NEEDED;
784bd41d0a8Snordmark 		nms->nms_flags |= NSS_DESTROY_INPROGRESS;
785bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__destroy__inprogress,
786bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
787bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
788bd41d0a8Snordmark 		mutex_exit(lockp);
789bd41d0a8Snordmark 		dropped = B_TRUE;
790bd41d0a8Snordmark 
791bd41d0a8Snordmark 		ASSERT(ns_reg[moduleid].nr_destroy != NULL);
792bd41d0a8Snordmark 		stackid = ns->netstack_stackid;
793bd41d0a8Snordmark 		netstack_module = ns->netstack_modules[moduleid];
794bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__destroy__start,
795bd41d0a8Snordmark 		    netstackid_t, stackid,
796bd41d0a8Snordmark 		    void *, netstack_module);
797bd41d0a8Snordmark 		(ns_reg[moduleid].nr_destroy)(stackid, netstack_module);
798bd41d0a8Snordmark 		DTRACE_PROBE1(netstack__destroy__end,
799bd41d0a8Snordmark 		    netstack_t *, ns);
800bd41d0a8Snordmark 
801bd41d0a8Snordmark 		mutex_enter(lockp);
802bd41d0a8Snordmark 		mutex_enter(&ns->netstack_lock);
803bd41d0a8Snordmark 		ns->netstack_modules[moduleid] = NULL;
804bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_DESTROY_INPROGRESS;
805bd41d0a8Snordmark 		nms->nms_flags |= NSS_DESTROY_COMPLETED;
806bd41d0a8Snordmark 		cv_broadcast(&nms->nms_cv);
807bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__destroy__completed,
808bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
809bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
810bd41d0a8Snordmark 		return (dropped);
811bd41d0a8Snordmark 	} else {
812bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
813bd41d0a8Snordmark 		return (dropped);
814bd41d0a8Snordmark 	}
815f4b3ec61Sdh155122 }
816f4b3ec61Sdh155122 
817f4b3ec61Sdh155122 /*
818bd41d0a8Snordmark  * If somebody  is creating the netstack (due to a new zone being created)
819bd41d0a8Snordmark  * then we wait for them to complete. This ensures that any additional
820bd41d0a8Snordmark  * netstack_register() doesn't cause the create functions to run out of
821bd41d0a8Snordmark  * order.
822bd41d0a8Snordmark  * Note that we do not need such a global wait in the case of the shutdown
823bd41d0a8Snordmark  * and destroy callbacks, since in that case it is sufficient for both
824bd41d0a8Snordmark  * threads to set NEEDED and wait for INPROGRESS to ensure ordering.
825bd41d0a8Snordmark  * Returns true if lockp was temporarily dropped while waiting.
826f4b3ec61Sdh155122  */
827bd41d0a8Snordmark static boolean_t
828bd41d0a8Snordmark wait_for_zone_creator(netstack_t *ns, kmutex_t *lockp)
829f4b3ec61Sdh155122 {
830bd41d0a8Snordmark 	boolean_t dropped = B_FALSE;
831bd41d0a8Snordmark 
832bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
833bd41d0a8Snordmark 	while (ns->netstack_flags & NSF_ZONE_CREATE) {
834bd41d0a8Snordmark 		DTRACE_PROBE1(netstack__wait__zone__inprogress,
835bd41d0a8Snordmark 		    netstack_t *, ns);
836bd41d0a8Snordmark 		if (lockp != NULL) {
837bd41d0a8Snordmark 			dropped = B_TRUE;
838bd41d0a8Snordmark 			mutex_exit(lockp);
839bd41d0a8Snordmark 		}
840bd41d0a8Snordmark 		cv_wait(&ns->netstack_cv, &ns->netstack_lock);
841bd41d0a8Snordmark 		if (lockp != NULL) {
842bd41d0a8Snordmark 			/* First drop netstack_lock to preserve order */
843bd41d0a8Snordmark 			mutex_exit(&ns->netstack_lock);
844bd41d0a8Snordmark 			mutex_enter(lockp);
845bd41d0a8Snordmark 			mutex_enter(&ns->netstack_lock);
846bd41d0a8Snordmark 		}
847bd41d0a8Snordmark 	}
848bd41d0a8Snordmark 	mutex_exit(&ns->netstack_lock);
849bd41d0a8Snordmark 	return (dropped);
850bd41d0a8Snordmark }
851bd41d0a8Snordmark 
852f4b3ec61Sdh155122 /*
853bd41d0a8Snordmark  * Wait for any INPROGRESS flag to be cleared for the netstack/moduleid
854bd41d0a8Snordmark  * combination.
855bd41d0a8Snordmark  * Returns true if lockp was temporarily dropped while waiting.
856f4b3ec61Sdh155122  */
857bd41d0a8Snordmark static boolean_t
858bd41d0a8Snordmark wait_for_nms_inprogress(netstack_t *ns, nm_state_t *nms, kmutex_t *lockp)
859bd41d0a8Snordmark {
860bd41d0a8Snordmark 	boolean_t dropped = B_FALSE;
861bd41d0a8Snordmark 
862bd41d0a8Snordmark 	while (nms->nms_flags & NSS_ALL_INPROGRESS) {
863bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__wait__nms__inprogress,
864bd41d0a8Snordmark 		    netstack_t *, ns, nm_state_t *, nms);
865bd41d0a8Snordmark 		if (lockp != NULL) {
866bd41d0a8Snordmark 			dropped = B_TRUE;
867bd41d0a8Snordmark 			mutex_exit(lockp);
868bd41d0a8Snordmark 		}
869bd41d0a8Snordmark 		cv_wait(&nms->nms_cv, &ns->netstack_lock);
870bd41d0a8Snordmark 		if (lockp != NULL) {
871bd41d0a8Snordmark 			/* First drop netstack_lock to preserve order */
872bd41d0a8Snordmark 			mutex_exit(&ns->netstack_lock);
873bd41d0a8Snordmark 			mutex_enter(lockp);
874bd41d0a8Snordmark 			mutex_enter(&ns->netstack_lock);
875bd41d0a8Snordmark 		}
876bd41d0a8Snordmark 	}
877bd41d0a8Snordmark 	return (dropped);
878f4b3ec61Sdh155122 }
879f4b3ec61Sdh155122 
880f4b3ec61Sdh155122 /*
881f4b3ec61Sdh155122  * Get the stack instance used in caller's zone.
882f4b3ec61Sdh155122  * Increases the reference count, caller must do a netstack_rele.
883f4b3ec61Sdh155122  * It can't be called after zone_destroy() has started.
884f4b3ec61Sdh155122  */
885fd006805Snordmark netstack_t *
886f4b3ec61Sdh155122 netstack_get_current(void)
887f4b3ec61Sdh155122 {
888f4b3ec61Sdh155122 	netstack_t *ns;
889f4b3ec61Sdh155122 
890f4b3ec61Sdh155122 	ns = curproc->p_zone->zone_netstack;
891f4b3ec61Sdh155122 	ASSERT(ns != NULL);
892f4b3ec61Sdh155122 	if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
893f4b3ec61Sdh155122 		return (NULL);
894f4b3ec61Sdh155122 
895f4b3ec61Sdh155122 	netstack_hold(ns);
896f4b3ec61Sdh155122 
897f4b3ec61Sdh155122 	return (ns);
898f4b3ec61Sdh155122 }
899f4b3ec61Sdh155122 
900f4b3ec61Sdh155122 /*
901f4b3ec61Sdh155122  * Find a stack instance given the cred.
902f4b3ec61Sdh155122  * This is used by the modules to potentially allow for a future when
903f4b3ec61Sdh155122  * something other than the zoneid is used to determine the stack.
904f4b3ec61Sdh155122  */
905f4b3ec61Sdh155122 netstack_t *
906f4b3ec61Sdh155122 netstack_find_by_cred(const cred_t *cr)
907f4b3ec61Sdh155122 {
908f4b3ec61Sdh155122 	zoneid_t zoneid = crgetzoneid(cr);
909f4b3ec61Sdh155122 
910f4b3ec61Sdh155122 	/* Handle the case when cr_zone is NULL */
911f4b3ec61Sdh155122 	if (zoneid == (zoneid_t)-1)
912f4b3ec61Sdh155122 		zoneid = GLOBAL_ZONEID;
913f4b3ec61Sdh155122 
914f4b3ec61Sdh155122 	/* For performance ... */
915f4b3ec61Sdh155122 	if (curproc->p_zone->zone_id == zoneid)
916f4b3ec61Sdh155122 		return (netstack_get_current());
917f4b3ec61Sdh155122 	else
918f4b3ec61Sdh155122 		return (netstack_find_by_zoneid(zoneid));
919f4b3ec61Sdh155122 }
920f4b3ec61Sdh155122 
921f4b3ec61Sdh155122 /*
922f4b3ec61Sdh155122  * Find a stack instance given the zoneid.
923f4b3ec61Sdh155122  * Increases the reference count if found; caller must do a
924f4b3ec61Sdh155122  * netstack_rele().
925f4b3ec61Sdh155122  *
926f4b3ec61Sdh155122  * If there is no exact match then assume the shared stack instance
927f4b3ec61Sdh155122  * matches.
928f4b3ec61Sdh155122  *
929f4b3ec61Sdh155122  * Skip the unitialized ones.
930f4b3ec61Sdh155122  */
931f4b3ec61Sdh155122 netstack_t *
932f4b3ec61Sdh155122 netstack_find_by_zoneid(zoneid_t zoneid)
933f4b3ec61Sdh155122 {
934f4b3ec61Sdh155122 	netstack_t *ns;
935f4b3ec61Sdh155122 	zone_t *zone;
936f4b3ec61Sdh155122 
937f4b3ec61Sdh155122 	zone = zone_find_by_id(zoneid);
938f4b3ec61Sdh155122 
939f4b3ec61Sdh155122 	if (zone == NULL)
940f4b3ec61Sdh155122 		return (NULL);
941f4b3ec61Sdh155122 
942f4b3ec61Sdh155122 	ns = zone->zone_netstack;
943f4b3ec61Sdh155122 	ASSERT(ns != NULL);
944f4b3ec61Sdh155122 	if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
945f4b3ec61Sdh155122 		ns = NULL;
946f4b3ec61Sdh155122 	else
947f4b3ec61Sdh155122 		netstack_hold(ns);
948f4b3ec61Sdh155122 
949f4b3ec61Sdh155122 	zone_rele(zone);
950f4b3ec61Sdh155122 	return (ns);
951f4b3ec61Sdh155122 }
952f4b3ec61Sdh155122 
953f4b3ec61Sdh155122 /*
954bd41d0a8Snordmark  * Find a stack instance given the zoneid. Can only be called from
955bd41d0a8Snordmark  * the create callback. See the comments in zone_find_by_id_nolock why
956bd41d0a8Snordmark  * that limitation exists.
957bd41d0a8Snordmark  *
958f4b3ec61Sdh155122  * Increases the reference count if found; caller must do a
959f4b3ec61Sdh155122  * netstack_rele().
960f4b3ec61Sdh155122  *
961f4b3ec61Sdh155122  * If there is no exact match then assume the shared stack instance
962f4b3ec61Sdh155122  * matches.
963f4b3ec61Sdh155122  *
964f4b3ec61Sdh155122  * Skip the unitialized ones.
965f4b3ec61Sdh155122  */
966f4b3ec61Sdh155122 netstack_t *
967f4b3ec61Sdh155122 netstack_find_by_zoneid_nolock(zoneid_t zoneid)
968f4b3ec61Sdh155122 {
969f4b3ec61Sdh155122 	netstack_t *ns;
970f4b3ec61Sdh155122 	zone_t *zone;
971f4b3ec61Sdh155122 
972f4b3ec61Sdh155122 	zone = zone_find_by_id_nolock(zoneid);
973f4b3ec61Sdh155122 
974f4b3ec61Sdh155122 	if (zone == NULL)
975f4b3ec61Sdh155122 		return (NULL);
976f4b3ec61Sdh155122 
977f4b3ec61Sdh155122 	ns = zone->zone_netstack;
978f4b3ec61Sdh155122 	ASSERT(ns != NULL);
979f4b3ec61Sdh155122 
980f4b3ec61Sdh155122 	if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
981f4b3ec61Sdh155122 		ns = NULL;
982f4b3ec61Sdh155122 	else
983f4b3ec61Sdh155122 		netstack_hold(ns);
984f4b3ec61Sdh155122 
985bd41d0a8Snordmark 	/* zone_find_by_id_nolock does not have a hold on the zone */
986f4b3ec61Sdh155122 	return (ns);
987f4b3ec61Sdh155122 }
988f4b3ec61Sdh155122 
989f4b3ec61Sdh155122 /*
990f4b3ec61Sdh155122  * Find a stack instance given the stackid with exact match?
991f4b3ec61Sdh155122  * Increases the reference count if found; caller must do a
992f4b3ec61Sdh155122  * netstack_rele().
993f4b3ec61Sdh155122  *
994f4b3ec61Sdh155122  * Skip the unitialized ones.
995f4b3ec61Sdh155122  */
996f4b3ec61Sdh155122 netstack_t *
997f4b3ec61Sdh155122 netstack_find_by_stackid(netstackid_t stackid)
998f4b3ec61Sdh155122 {
999f4b3ec61Sdh155122 	netstack_t *ns;
1000f4b3ec61Sdh155122 
1001f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
1002f4b3ec61Sdh155122 	for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
1003f4b3ec61Sdh155122 		mutex_enter(&ns->netstack_lock);
1004f4b3ec61Sdh155122 		if (ns->netstack_stackid == stackid &&
1005f4b3ec61Sdh155122 		    !(ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))) {
1006f4b3ec61Sdh155122 			mutex_exit(&ns->netstack_lock);
1007f4b3ec61Sdh155122 			netstack_hold(ns);
1008f4b3ec61Sdh155122 			mutex_exit(&netstack_g_lock);
1009f4b3ec61Sdh155122 			return (ns);
1010f4b3ec61Sdh155122 		}
1011f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
1012f4b3ec61Sdh155122 	}
1013f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
1014f4b3ec61Sdh155122 	return (NULL);
1015f4b3ec61Sdh155122 }
1016f4b3ec61Sdh155122 
1017f4b3ec61Sdh155122 void
1018f4b3ec61Sdh155122 netstack_rele(netstack_t *ns)
1019f4b3ec61Sdh155122 {
1020f4b3ec61Sdh155122 	netstack_t **nsp;
1021f4b3ec61Sdh155122 	boolean_t found;
1022f4b3ec61Sdh155122 	int refcnt, numzones;
1023bd41d0a8Snordmark 	int i;
1024f4b3ec61Sdh155122 
1025f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
1026f4b3ec61Sdh155122 	ASSERT(ns->netstack_refcnt > 0);
1027f4b3ec61Sdh155122 	ns->netstack_refcnt--;
1028f4b3ec61Sdh155122 	/*
1029f4b3ec61Sdh155122 	 * As we drop the lock additional netstack_rele()s can come in
1030f4b3ec61Sdh155122 	 * and decrement the refcnt to zero and free the netstack_t.
1031f4b3ec61Sdh155122 	 * Store pointers in local variables and if we were not the last
1032f4b3ec61Sdh155122 	 * then don't reference the netstack_t after that.
1033f4b3ec61Sdh155122 	 */
1034f4b3ec61Sdh155122 	refcnt = ns->netstack_refcnt;
1035f4b3ec61Sdh155122 	numzones = ns->netstack_numzones;
1036f4b3ec61Sdh155122 	DTRACE_PROBE1(netstack__dec__ref, netstack_t *, ns);
1037f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
1038f4b3ec61Sdh155122 
1039f4b3ec61Sdh155122 	if (refcnt == 0 && numzones == 0) {
1040f4b3ec61Sdh155122 		/*
1041f4b3ec61Sdh155122 		 * Time to call the destroy functions and free up
1042f4b3ec61Sdh155122 		 * the structure
1043f4b3ec61Sdh155122 		 */
1044f4b3ec61Sdh155122 		netstack_stack_inactive(ns);
1045f4b3ec61Sdh155122 
104623f4867fSnordmark 		/* Make sure nothing increased the references */
104723f4867fSnordmark 		ASSERT(ns->netstack_refcnt == 0);
104823f4867fSnordmark 		ASSERT(ns->netstack_numzones == 0);
104923f4867fSnordmark 
1050f4b3ec61Sdh155122 		/* Finally remove from list of netstacks */
1051f4b3ec61Sdh155122 		mutex_enter(&netstack_g_lock);
1052f4b3ec61Sdh155122 		found = B_FALSE;
1053f4b3ec61Sdh155122 		for (nsp = &netstack_head; *nsp != NULL;
1054f4b3ec61Sdh155122 		    nsp = &(*nsp)->netstack_next) {
1055f4b3ec61Sdh155122 			if (*nsp == ns) {
1056f4b3ec61Sdh155122 				*nsp = ns->netstack_next;
1057f4b3ec61Sdh155122 				ns->netstack_next = NULL;
1058f4b3ec61Sdh155122 				found = B_TRUE;
1059f4b3ec61Sdh155122 				break;
1060f4b3ec61Sdh155122 			}
1061f4b3ec61Sdh155122 		}
1062f4b3ec61Sdh155122 		ASSERT(found);
1063f4b3ec61Sdh155122 		mutex_exit(&netstack_g_lock);
1064f4b3ec61Sdh155122 
106523f4867fSnordmark 		/* Make sure nothing increased the references */
106623f4867fSnordmark 		ASSERT(ns->netstack_refcnt == 0);
106723f4867fSnordmark 		ASSERT(ns->netstack_numzones == 0);
106823f4867fSnordmark 
1069f4b3ec61Sdh155122 		ASSERT(ns->netstack_flags & NSF_CLOSING);
1070bd41d0a8Snordmark 
1071bd41d0a8Snordmark 		for (i = 0; i < NS_MAX; i++) {
1072bd41d0a8Snordmark 			nm_state_t *nms = &ns->netstack_m_state[i];
1073bd41d0a8Snordmark 
1074bd41d0a8Snordmark 			cv_destroy(&nms->nms_cv);
1075bd41d0a8Snordmark 		}
1076bd41d0a8Snordmark 		mutex_destroy(&ns->netstack_lock);
1077bd41d0a8Snordmark 		cv_destroy(&ns->netstack_cv);
1078f4b3ec61Sdh155122 		kmem_free(ns, sizeof (*ns));
1079f4b3ec61Sdh155122 	}
1080f4b3ec61Sdh155122 }
1081f4b3ec61Sdh155122 
1082f4b3ec61Sdh155122 void
1083f4b3ec61Sdh155122 netstack_hold(netstack_t *ns)
1084f4b3ec61Sdh155122 {
1085f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
1086f4b3ec61Sdh155122 	ns->netstack_refcnt++;
1087f4b3ec61Sdh155122 	ASSERT(ns->netstack_refcnt > 0);
1088f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
1089f4b3ec61Sdh155122 	DTRACE_PROBE1(netstack__inc__ref, netstack_t *, ns);
1090f4b3ec61Sdh155122 }
1091f4b3ec61Sdh155122 
1092f4b3ec61Sdh155122 /*
1093f4b3ec61Sdh155122  * To support kstat_create_netstack() using kstat_zone_add we need
1094f4b3ec61Sdh155122  * to track both
1095f4b3ec61Sdh155122  *  - all zoneids that use the global/shared stack
1096f4b3ec61Sdh155122  *  - all kstats that have been added for the shared stack
1097f4b3ec61Sdh155122  */
1098f4b3ec61Sdh155122 kstat_t *
1099f4b3ec61Sdh155122 kstat_create_netstack(char *ks_module, int ks_instance, char *ks_name,
1100f4b3ec61Sdh155122     char *ks_class, uchar_t ks_type, uint_t ks_ndata, uchar_t ks_flags,
1101f4b3ec61Sdh155122     netstackid_t ks_netstackid)
1102f4b3ec61Sdh155122 {
1103f4b3ec61Sdh155122 	kstat_t *ks;
1104f4b3ec61Sdh155122 
1105f4b3ec61Sdh155122 	if (ks_netstackid == GLOBAL_NETSTACKID) {
1106f4b3ec61Sdh155122 		ks = kstat_create_zone(ks_module, ks_instance, ks_name,
1107f4b3ec61Sdh155122 		    ks_class, ks_type, ks_ndata, ks_flags, GLOBAL_ZONEID);
1108f4b3ec61Sdh155122 		if (ks != NULL)
1109f4b3ec61Sdh155122 			netstack_shared_kstat_add(ks);
1110f4b3ec61Sdh155122 		return (ks);
1111f4b3ec61Sdh155122 	} else {
1112f4b3ec61Sdh155122 		zoneid_t zoneid = ks_netstackid;
1113f4b3ec61Sdh155122 
1114f4b3ec61Sdh155122 		return (kstat_create_zone(ks_module, ks_instance, ks_name,
1115f4b3ec61Sdh155122 		    ks_class, ks_type, ks_ndata, ks_flags, zoneid));
1116f4b3ec61Sdh155122 	}
1117f4b3ec61Sdh155122 }
1118f4b3ec61Sdh155122 
1119f4b3ec61Sdh155122 void
1120f4b3ec61Sdh155122 kstat_delete_netstack(kstat_t *ks, netstackid_t ks_netstackid)
1121f4b3ec61Sdh155122 {
1122f4b3ec61Sdh155122 	if (ks_netstackid == GLOBAL_NETSTACKID) {
1123f4b3ec61Sdh155122 		netstack_shared_kstat_remove(ks);
1124f4b3ec61Sdh155122 	}
1125f4b3ec61Sdh155122 	kstat_delete(ks);
1126f4b3ec61Sdh155122 }
1127f4b3ec61Sdh155122 
1128f4b3ec61Sdh155122 static void
1129f4b3ec61Sdh155122 netstack_shared_zone_add(zoneid_t zoneid)
1130f4b3ec61Sdh155122 {
1131f4b3ec61Sdh155122 	struct shared_zone_list *sz;
1132f4b3ec61Sdh155122 	struct shared_kstat_list *sk;
1133f4b3ec61Sdh155122 
1134f4b3ec61Sdh155122 	sz = (struct shared_zone_list *)kmem_zalloc(sizeof (*sz), KM_SLEEP);
1135f4b3ec61Sdh155122 	sz->sz_zoneid = zoneid;
1136f4b3ec61Sdh155122 
1137f4b3ec61Sdh155122 	/* Insert in list */
1138f4b3ec61Sdh155122 	mutex_enter(&netstack_shared_lock);
1139f4b3ec61Sdh155122 	sz->sz_next = netstack_shared_zones;
1140f4b3ec61Sdh155122 	netstack_shared_zones = sz;
1141f4b3ec61Sdh155122 
1142f4b3ec61Sdh155122 	/*
1143f4b3ec61Sdh155122 	 * Perform kstat_zone_add for each existing shared stack kstat.
1144f4b3ec61Sdh155122 	 * Note: Holds netstack_shared_lock lock across kstat_zone_add.
1145f4b3ec61Sdh155122 	 */
1146f4b3ec61Sdh155122 	for (sk = netstack_shared_kstats; sk != NULL; sk = sk->sk_next) {
1147f4b3ec61Sdh155122 		kstat_zone_add(sk->sk_kstat, zoneid);
1148f4b3ec61Sdh155122 	}
1149f4b3ec61Sdh155122 	mutex_exit(&netstack_shared_lock);
1150f4b3ec61Sdh155122 }
1151f4b3ec61Sdh155122 
1152f4b3ec61Sdh155122 static void
1153f4b3ec61Sdh155122 netstack_shared_zone_remove(zoneid_t zoneid)
1154f4b3ec61Sdh155122 {
1155f4b3ec61Sdh155122 	struct shared_zone_list **szp, *sz;
1156f4b3ec61Sdh155122 	struct shared_kstat_list *sk;
1157f4b3ec61Sdh155122 
1158f4b3ec61Sdh155122 	/* Find in list */
1159f4b3ec61Sdh155122 	mutex_enter(&netstack_shared_lock);
1160f4b3ec61Sdh155122 	sz = NULL;
1161f4b3ec61Sdh155122 	for (szp = &netstack_shared_zones; *szp != NULL;
1162f4b3ec61Sdh155122 	    szp = &((*szp)->sz_next)) {
1163f4b3ec61Sdh155122 		if ((*szp)->sz_zoneid == zoneid) {
1164f4b3ec61Sdh155122 			sz = *szp;
1165f4b3ec61Sdh155122 			break;
1166f4b3ec61Sdh155122 		}
1167f4b3ec61Sdh155122 	}
1168f4b3ec61Sdh155122 	/* We must find it */
1169f4b3ec61Sdh155122 	ASSERT(sz != NULL);
1170f4b3ec61Sdh155122 	*szp = sz->sz_next;
1171f4b3ec61Sdh155122 	sz->sz_next = NULL;
1172f4b3ec61Sdh155122 
1173f4b3ec61Sdh155122 	/*
1174f4b3ec61Sdh155122 	 * Perform kstat_zone_remove for each existing shared stack kstat.
1175f4b3ec61Sdh155122 	 * Note: Holds netstack_shared_lock lock across kstat_zone_remove.
1176f4b3ec61Sdh155122 	 */
1177f4b3ec61Sdh155122 	for (sk = netstack_shared_kstats; sk != NULL; sk = sk->sk_next) {
1178f4b3ec61Sdh155122 		kstat_zone_remove(sk->sk_kstat, zoneid);
1179f4b3ec61Sdh155122 	}
1180f4b3ec61Sdh155122 	mutex_exit(&netstack_shared_lock);
1181f4b3ec61Sdh155122 
1182f4b3ec61Sdh155122 	kmem_free(sz, sizeof (*sz));
1183f4b3ec61Sdh155122 }
1184f4b3ec61Sdh155122 
1185f4b3ec61Sdh155122 static void
1186f4b3ec61Sdh155122 netstack_shared_kstat_add(kstat_t *ks)
1187f4b3ec61Sdh155122 {
1188f4b3ec61Sdh155122 	struct shared_zone_list *sz;
1189f4b3ec61Sdh155122 	struct shared_kstat_list *sk;
1190f4b3ec61Sdh155122 
1191f4b3ec61Sdh155122 	sk = (struct shared_kstat_list *)kmem_zalloc(sizeof (*sk), KM_SLEEP);
1192f4b3ec61Sdh155122 	sk->sk_kstat = ks;
1193f4b3ec61Sdh155122 
1194f4b3ec61Sdh155122 	/* Insert in list */
1195f4b3ec61Sdh155122 	mutex_enter(&netstack_shared_lock);
1196f4b3ec61Sdh155122 	sk->sk_next = netstack_shared_kstats;
1197f4b3ec61Sdh155122 	netstack_shared_kstats = sk;
1198f4b3ec61Sdh155122 
1199f4b3ec61Sdh155122 	/*
1200f4b3ec61Sdh155122 	 * Perform kstat_zone_add for each existing shared stack zone.
1201f4b3ec61Sdh155122 	 * Note: Holds netstack_shared_lock lock across kstat_zone_add.
1202f4b3ec61Sdh155122 	 */
1203f4b3ec61Sdh155122 	for (sz = netstack_shared_zones; sz != NULL; sz = sz->sz_next) {
1204f4b3ec61Sdh155122 		kstat_zone_add(ks, sz->sz_zoneid);
1205f4b3ec61Sdh155122 	}
1206f4b3ec61Sdh155122 	mutex_exit(&netstack_shared_lock);
1207f4b3ec61Sdh155122 }
1208f4b3ec61Sdh155122 
1209f4b3ec61Sdh155122 static void
1210f4b3ec61Sdh155122 netstack_shared_kstat_remove(kstat_t *ks)
1211f4b3ec61Sdh155122 {
1212f4b3ec61Sdh155122 	struct shared_zone_list *sz;
1213f4b3ec61Sdh155122 	struct shared_kstat_list **skp, *sk;
1214f4b3ec61Sdh155122 
1215f4b3ec61Sdh155122 	/* Find in list */
1216f4b3ec61Sdh155122 	mutex_enter(&netstack_shared_lock);
1217f4b3ec61Sdh155122 	sk = NULL;
1218f4b3ec61Sdh155122 	for (skp = &netstack_shared_kstats; *skp != NULL;
1219f4b3ec61Sdh155122 	    skp = &((*skp)->sk_next)) {
1220f4b3ec61Sdh155122 		if ((*skp)->sk_kstat == ks) {
1221f4b3ec61Sdh155122 			sk = *skp;
1222f4b3ec61Sdh155122 			break;
1223f4b3ec61Sdh155122 		}
1224f4b3ec61Sdh155122 	}
1225f4b3ec61Sdh155122 	/* Must find it */
1226f4b3ec61Sdh155122 	ASSERT(sk != NULL);
1227f4b3ec61Sdh155122 	*skp = sk->sk_next;
1228f4b3ec61Sdh155122 	sk->sk_next = NULL;
1229f4b3ec61Sdh155122 
1230f4b3ec61Sdh155122 	/*
1231f4b3ec61Sdh155122 	 * Perform kstat_zone_remove for each existing shared stack kstat.
1232f4b3ec61Sdh155122 	 * Note: Holds netstack_shared_lock lock across kstat_zone_remove.
1233f4b3ec61Sdh155122 	 */
1234f4b3ec61Sdh155122 	for (sz = netstack_shared_zones; sz != NULL; sz = sz->sz_next) {
1235f4b3ec61Sdh155122 		kstat_zone_remove(ks, sz->sz_zoneid);
1236f4b3ec61Sdh155122 	}
1237f4b3ec61Sdh155122 	mutex_exit(&netstack_shared_lock);
1238f4b3ec61Sdh155122 	kmem_free(sk, sizeof (*sk));
1239f4b3ec61Sdh155122 }
1240f4b3ec61Sdh155122 
1241f4b3ec61Sdh155122 /*
1242f4b3ec61Sdh155122  * If a zoneid is part of the shared zone, return true
1243f4b3ec61Sdh155122  */
1244f4b3ec61Sdh155122 static boolean_t
1245f4b3ec61Sdh155122 netstack_find_shared_zoneid(zoneid_t zoneid)
1246f4b3ec61Sdh155122 {
1247f4b3ec61Sdh155122 	struct shared_zone_list *sz;
1248f4b3ec61Sdh155122 
1249f4b3ec61Sdh155122 	mutex_enter(&netstack_shared_lock);
1250f4b3ec61Sdh155122 	for (sz = netstack_shared_zones; sz != NULL; sz = sz->sz_next) {
1251f4b3ec61Sdh155122 		if (sz->sz_zoneid == zoneid) {
1252f4b3ec61Sdh155122 			mutex_exit(&netstack_shared_lock);
1253f4b3ec61Sdh155122 			return (B_TRUE);
1254f4b3ec61Sdh155122 		}
1255f4b3ec61Sdh155122 	}
1256f4b3ec61Sdh155122 	mutex_exit(&netstack_shared_lock);
1257f4b3ec61Sdh155122 	return (B_FALSE);
1258f4b3ec61Sdh155122 }
1259f4b3ec61Sdh155122 
1260f4b3ec61Sdh155122 /*
1261f4b3ec61Sdh155122  * Hide the fact that zoneids and netstackids are allocated from
1262f4b3ec61Sdh155122  * the same space in the current implementation.
1263bd41d0a8Snordmark  * We currently do not check that the stackid/zoneids are valid, since there
1264bd41d0a8Snordmark  * is no need for that. But this should only be done for ids that are
1265bd41d0a8Snordmark  * valid.
1266f4b3ec61Sdh155122  */
1267f4b3ec61Sdh155122 zoneid_t
1268f4b3ec61Sdh155122 netstackid_to_zoneid(netstackid_t stackid)
1269f4b3ec61Sdh155122 {
1270f4b3ec61Sdh155122 	return (stackid);
1271f4b3ec61Sdh155122 }
1272f4b3ec61Sdh155122 
1273f4b3ec61Sdh155122 netstackid_t
1274f4b3ec61Sdh155122 zoneid_to_netstackid(zoneid_t zoneid)
1275f4b3ec61Sdh155122 {
1276f4b3ec61Sdh155122 	if (netstack_find_shared_zoneid(zoneid))
1277f4b3ec61Sdh155122 		return (GLOBAL_ZONEID);
1278f4b3ec61Sdh155122 	else
1279f4b3ec61Sdh155122 		return (zoneid);
1280f4b3ec61Sdh155122 }
1281f4b3ec61Sdh155122 
1282*0a0e9771SDarren Reed zoneid_t
1283*0a0e9771SDarren Reed netstack_get_zoneid(netstack_t *ns)
1284*0a0e9771SDarren Reed {
1285*0a0e9771SDarren Reed 	return (netstackid_to_zoneid(ns->netstack_stackid));
1286*0a0e9771SDarren Reed }
1287*0a0e9771SDarren Reed 
1288f4b3ec61Sdh155122 /*
1289f4b3ec61Sdh155122  * Simplistic support for walking all the handles.
1290f4b3ec61Sdh155122  * Example usage:
1291f4b3ec61Sdh155122  *	netstack_handle_t nh;
1292f4b3ec61Sdh155122  *	netstack_t *ns;
1293f4b3ec61Sdh155122  *
1294f4b3ec61Sdh155122  *	netstack_next_init(&nh);
1295f4b3ec61Sdh155122  *	while ((ns = netstack_next(&nh)) != NULL) {
1296f4b3ec61Sdh155122  *		do something;
1297f4b3ec61Sdh155122  *		netstack_rele(ns);
1298f4b3ec61Sdh155122  *	}
1299f4b3ec61Sdh155122  *	netstack_next_fini(&nh);
1300f4b3ec61Sdh155122  */
1301f4b3ec61Sdh155122 void
1302f4b3ec61Sdh155122 netstack_next_init(netstack_handle_t *handle)
1303f4b3ec61Sdh155122 {
1304f4b3ec61Sdh155122 	*handle = 0;
1305f4b3ec61Sdh155122 }
1306f4b3ec61Sdh155122 
1307f4b3ec61Sdh155122 /* ARGSUSED */
1308f4b3ec61Sdh155122 void
1309f4b3ec61Sdh155122 netstack_next_fini(netstack_handle_t *handle)
1310f4b3ec61Sdh155122 {
1311f4b3ec61Sdh155122 }
1312f4b3ec61Sdh155122 
1313f4b3ec61Sdh155122 netstack_t *
1314f4b3ec61Sdh155122 netstack_next(netstack_handle_t *handle)
1315f4b3ec61Sdh155122 {
1316f4b3ec61Sdh155122 	netstack_t *ns;
1317f4b3ec61Sdh155122 	int i, end;
1318f4b3ec61Sdh155122 
1319f4b3ec61Sdh155122 	end = *handle;
1320f4b3ec61Sdh155122 	/* Walk skipping *handle number of instances */
1321f4b3ec61Sdh155122 
1322f4b3ec61Sdh155122 	/* Look if there is a matching stack instance */
1323f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
1324f4b3ec61Sdh155122 	ns = netstack_head;
1325f4b3ec61Sdh155122 	for (i = 0; i < end; i++) {
1326f4b3ec61Sdh155122 		if (ns == NULL)
1327f4b3ec61Sdh155122 			break;
1328f4b3ec61Sdh155122 		ns = ns->netstack_next;
1329f4b3ec61Sdh155122 	}
1330f4b3ec61Sdh155122 	/* skip those with that aren't really here */
1331f4b3ec61Sdh155122 	while (ns != NULL) {
1332f4b3ec61Sdh155122 		mutex_enter(&ns->netstack_lock);
1333f4b3ec61Sdh155122 		if ((ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING)) == 0) {
1334f4b3ec61Sdh155122 			mutex_exit(&ns->netstack_lock);
1335f4b3ec61Sdh155122 			break;
1336f4b3ec61Sdh155122 		}
1337f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
1338f4b3ec61Sdh155122 		end++;
1339f4b3ec61Sdh155122 		ns = ns->netstack_next;
1340f4b3ec61Sdh155122 	}
1341f4b3ec61Sdh155122 	if (ns != NULL) {
1342f4b3ec61Sdh155122 		*handle = end + 1;
1343f4b3ec61Sdh155122 		netstack_hold(ns);
1344f4b3ec61Sdh155122 	}
1345f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
1346f4b3ec61Sdh155122 	return (ns);
1347f4b3ec61Sdh155122 }
1348