xref: /titanic_53/usr/src/uts/common/os/netstack.c (revision bd41d0a82bd89bc81d63ae5dfc2ba4245f74ea6c)
1f4b3ec61Sdh155122 /*
2f4b3ec61Sdh155122  * CDDL HEADER START
3f4b3ec61Sdh155122  *
4f4b3ec61Sdh155122  * The contents of this file are subject to the terms of the
5f4b3ec61Sdh155122  * Common Development and Distribution License (the "License").
6f4b3ec61Sdh155122  * You may not use this file except in compliance with the License.
7f4b3ec61Sdh155122  *
8f4b3ec61Sdh155122  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9f4b3ec61Sdh155122  * or http://www.opensolaris.org/os/licensing.
10f4b3ec61Sdh155122  * See the License for the specific language governing permissions
11f4b3ec61Sdh155122  * and limitations under the License.
12f4b3ec61Sdh155122  *
13f4b3ec61Sdh155122  * When distributing Covered Code, include this CDDL HEADER in each
14f4b3ec61Sdh155122  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15f4b3ec61Sdh155122  * If applicable, add the following below this CDDL HEADER, with the
16f4b3ec61Sdh155122  * fields enclosed by brackets "[]" replaced with your own identifying
17f4b3ec61Sdh155122  * information: Portions Copyright [yyyy] [name of copyright owner]
18f4b3ec61Sdh155122  *
19f4b3ec61Sdh155122  * CDDL HEADER END
20f4b3ec61Sdh155122  */
21f4b3ec61Sdh155122 
22f4b3ec61Sdh155122 /*
23*bd41d0a8Snordmark  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24f4b3ec61Sdh155122  * Use is subject to license terms.
25f4b3ec61Sdh155122  */
26f4b3ec61Sdh155122 
27f4b3ec61Sdh155122 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28f4b3ec61Sdh155122 
29f4b3ec61Sdh155122 #include <sys/param.h>
30f4b3ec61Sdh155122 #include <sys/sysmacros.h>
31f4b3ec61Sdh155122 #include <sys/vm.h>
32f4b3ec61Sdh155122 #include <sys/proc.h>
33f4b3ec61Sdh155122 #include <sys/tuneable.h>
34f4b3ec61Sdh155122 #include <sys/systm.h>
35f4b3ec61Sdh155122 #include <sys/cmn_err.h>
36f4b3ec61Sdh155122 #include <sys/debug.h>
37f4b3ec61Sdh155122 #include <sys/sdt.h>
38f4b3ec61Sdh155122 #include <sys/mutex.h>
39f4b3ec61Sdh155122 #include <sys/bitmap.h>
40f4b3ec61Sdh155122 #include <sys/atomic.h>
41f4b3ec61Sdh155122 #include <sys/kobj.h>
42f4b3ec61Sdh155122 #include <sys/disp.h>
43f4b3ec61Sdh155122 #include <vm/seg_kmem.h>
44f4b3ec61Sdh155122 #include <sys/zone.h>
45f4b3ec61Sdh155122 #include <sys/netstack.h>
46f4b3ec61Sdh155122 
47f4b3ec61Sdh155122 /*
48f4b3ec61Sdh155122  * What we use so that the zones framework can tell us about new zones,
49f4b3ec61Sdh155122  * which we use to create new stacks.
50f4b3ec61Sdh155122  */
51f4b3ec61Sdh155122 static zone_key_t netstack_zone_key;
52f4b3ec61Sdh155122 
53f4b3ec61Sdh155122 static int	netstack_initialized = 0;
54f4b3ec61Sdh155122 
55f4b3ec61Sdh155122 /*
56f4b3ec61Sdh155122  * Track the registered netstacks.
57f4b3ec61Sdh155122  * The global lock protects
58f4b3ec61Sdh155122  * - ns_reg
59f4b3ec61Sdh155122  * - the list starting at netstack_head and following the netstack_next
60f4b3ec61Sdh155122  *   pointers.
61f4b3ec61Sdh155122  */
62f4b3ec61Sdh155122 static kmutex_t netstack_g_lock;
63f4b3ec61Sdh155122 
64f4b3ec61Sdh155122 /*
65f4b3ec61Sdh155122  * Registry of netstacks with their create/shutdown/destory functions.
66f4b3ec61Sdh155122  */
67f4b3ec61Sdh155122 static struct netstack_registry	ns_reg[NS_MAX];
68f4b3ec61Sdh155122 
69f4b3ec61Sdh155122 /*
70f4b3ec61Sdh155122  * Global list of existing stacks.  We use this when a new zone with
71f4b3ec61Sdh155122  * an exclusive IP instance is created.
72f4b3ec61Sdh155122  *
73f4b3ec61Sdh155122  * Note that in some cases a netstack_t needs to stay around after the zone
74f4b3ec61Sdh155122  * has gone away. This is because there might be outstanding references
75f4b3ec61Sdh155122  * (from TCP TIME_WAIT connections, IPsec state, etc). The netstack_t data
76f4b3ec61Sdh155122  * structure and all the foo_stack_t's hanging off of it will be cleaned up
77f4b3ec61Sdh155122  * when the last reference to it is dropped.
78f4b3ec61Sdh155122  * However, the same zone might be rebooted. That is handled using the
79f4b3ec61Sdh155122  * assumption that the zones framework picks a new zoneid each time a zone
80f4b3ec61Sdh155122  * is (re)booted. We assert for that condition in netstack_zone_create().
81f4b3ec61Sdh155122  * Thus the old netstack_t can take its time for things to time out.
82f4b3ec61Sdh155122  */
83f4b3ec61Sdh155122 static netstack_t *netstack_head;
84f4b3ec61Sdh155122 
85f4b3ec61Sdh155122 /*
86f4b3ec61Sdh155122  * To support kstat_create_netstack() using kstat_zone_add we need
87f4b3ec61Sdh155122  * to track both
88f4b3ec61Sdh155122  *  - all zoneids that use the global/shared stack
89f4b3ec61Sdh155122  *  - all kstats that have been added for the shared stack
90f4b3ec61Sdh155122  */
91f4b3ec61Sdh155122 struct shared_zone_list {
92f4b3ec61Sdh155122 	struct shared_zone_list *sz_next;
93f4b3ec61Sdh155122 	zoneid_t		sz_zoneid;
94f4b3ec61Sdh155122 };
95f4b3ec61Sdh155122 
96f4b3ec61Sdh155122 struct shared_kstat_list {
97f4b3ec61Sdh155122 	struct shared_kstat_list *sk_next;
98f4b3ec61Sdh155122 	kstat_t			 *sk_kstat;
99f4b3ec61Sdh155122 };
100f4b3ec61Sdh155122 
101f4b3ec61Sdh155122 static kmutex_t netstack_shared_lock;	/* protects the following two */
102f4b3ec61Sdh155122 static struct shared_zone_list	*netstack_shared_zones;
103f4b3ec61Sdh155122 static struct shared_kstat_list	*netstack_shared_kstats;
104f4b3ec61Sdh155122 
105f4b3ec61Sdh155122 static void	*netstack_zone_create(zoneid_t zoneid);
106f4b3ec61Sdh155122 static void	netstack_zone_shutdown(zoneid_t zoneid, void *arg);
107f4b3ec61Sdh155122 static void	netstack_zone_destroy(zoneid_t zoneid, void *arg);
108f4b3ec61Sdh155122 
109f4b3ec61Sdh155122 static void	netstack_shared_zone_add(zoneid_t zoneid);
110f4b3ec61Sdh155122 static void	netstack_shared_zone_remove(zoneid_t zoneid);
111f4b3ec61Sdh155122 static void	netstack_shared_kstat_add(kstat_t *ks);
112f4b3ec61Sdh155122 static void	netstack_shared_kstat_remove(kstat_t *ks);
113f4b3ec61Sdh155122 
11423f4867fSnordmark typedef boolean_t applyfn_t(kmutex_t *, netstack_t *, int);
115f4b3ec61Sdh155122 
116*bd41d0a8Snordmark static void	apply_all_netstacks(int, applyfn_t *);
117*bd41d0a8Snordmark static void	apply_all_modules(netstack_t *, applyfn_t *);
118*bd41d0a8Snordmark static void	apply_all_modules_reverse(netstack_t *, applyfn_t *);
119*bd41d0a8Snordmark static boolean_t netstack_apply_create(kmutex_t *, netstack_t *, int);
120*bd41d0a8Snordmark static boolean_t netstack_apply_shutdown(kmutex_t *, netstack_t *, int);
121*bd41d0a8Snordmark static boolean_t netstack_apply_destroy(kmutex_t *, netstack_t *, int);
122*bd41d0a8Snordmark static boolean_t wait_for_zone_creator(netstack_t *, kmutex_t *);
123*bd41d0a8Snordmark static boolean_t wait_for_nms_inprogress(netstack_t *, nm_state_t *,
124*bd41d0a8Snordmark     kmutex_t *);
125*bd41d0a8Snordmark 
126f4b3ec61Sdh155122 void
127f4b3ec61Sdh155122 netstack_init(void)
128f4b3ec61Sdh155122 {
129f4b3ec61Sdh155122 	mutex_init(&netstack_g_lock, NULL, MUTEX_DEFAULT, NULL);
130f4b3ec61Sdh155122 	mutex_init(&netstack_shared_lock, NULL, MUTEX_DEFAULT, NULL);
131f4b3ec61Sdh155122 
132f4b3ec61Sdh155122 	netstack_initialized = 1;
133f4b3ec61Sdh155122 
134f4b3ec61Sdh155122 	/*
135f4b3ec61Sdh155122 	 * We want to be informed each time a zone is created or
136f4b3ec61Sdh155122 	 * destroyed in the kernel, so we can maintain the
137f4b3ec61Sdh155122 	 * stack instance information.
138f4b3ec61Sdh155122 	 */
139f4b3ec61Sdh155122 	zone_key_create(&netstack_zone_key, netstack_zone_create,
140f4b3ec61Sdh155122 	    netstack_zone_shutdown, netstack_zone_destroy);
141f4b3ec61Sdh155122 }
142f4b3ec61Sdh155122 
143f4b3ec61Sdh155122 /*
144f4b3ec61Sdh155122  * Register a new module with the framework.
145f4b3ec61Sdh155122  * This registers interest in changes to the set of netstacks.
146f4b3ec61Sdh155122  * The createfn and destroyfn are required, but the shutdownfn can be
147f4b3ec61Sdh155122  * NULL.
148f4b3ec61Sdh155122  * Note that due to the current zsd implementation, when the create
149f4b3ec61Sdh155122  * function is called the zone isn't fully present, thus functions
150f4b3ec61Sdh155122  * like zone_find_by_* will fail, hence the create function can not
151f4b3ec61Sdh155122  * use many zones kernel functions including zcmn_err().
152f4b3ec61Sdh155122  */
153f4b3ec61Sdh155122 void
154f4b3ec61Sdh155122 netstack_register(int moduleid,
155f4b3ec61Sdh155122     void *(*module_create)(netstackid_t, netstack_t *),
156f4b3ec61Sdh155122     void (*module_shutdown)(netstackid_t, void *),
157f4b3ec61Sdh155122     void (*module_destroy)(netstackid_t, void *))
158f4b3ec61Sdh155122 {
159f4b3ec61Sdh155122 	netstack_t *ns;
160f4b3ec61Sdh155122 
161f4b3ec61Sdh155122 	ASSERT(netstack_initialized);
162f4b3ec61Sdh155122 	ASSERT(moduleid >= 0 && moduleid < NS_MAX);
163f4b3ec61Sdh155122 	ASSERT(module_create != NULL);
164f4b3ec61Sdh155122 
165*bd41d0a8Snordmark 	/*
166*bd41d0a8Snordmark 	 * Make instances created after this point in time run the create
167*bd41d0a8Snordmark 	 * callback.
168*bd41d0a8Snordmark 	 */
169f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
170f4b3ec61Sdh155122 	ASSERT(ns_reg[moduleid].nr_create == NULL);
171f4b3ec61Sdh155122 	ASSERT(ns_reg[moduleid].nr_flags == 0);
172f4b3ec61Sdh155122 	ns_reg[moduleid].nr_create = module_create;
173f4b3ec61Sdh155122 	ns_reg[moduleid].nr_shutdown = module_shutdown;
174f4b3ec61Sdh155122 	ns_reg[moduleid].nr_destroy = module_destroy;
175f4b3ec61Sdh155122 	ns_reg[moduleid].nr_flags = NRF_REGISTERED;
176f4b3ec61Sdh155122 
177f4b3ec61Sdh155122 	/*
178f4b3ec61Sdh155122 	 * Determine the set of stacks that exist before we drop the lock.
179*bd41d0a8Snordmark 	 * Set NSS_CREATE_NEEDED for each of those.
180f4b3ec61Sdh155122 	 * netstacks which have been deleted will have NSS_CREATE_COMPLETED
181f4b3ec61Sdh155122 	 * set, but check NSF_CLOSING to be sure.
182f4b3ec61Sdh155122 	 */
183f4b3ec61Sdh155122 	for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
184*bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[moduleid];
185*bd41d0a8Snordmark 
186f4b3ec61Sdh155122 		mutex_enter(&ns->netstack_lock);
187f4b3ec61Sdh155122 		if (!(ns->netstack_flags & NSF_CLOSING) &&
188*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_ALL) == 0) {
189*bd41d0a8Snordmark 			nms->nms_flags |= NSS_CREATE_NEEDED;
190f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__create__needed,
191f4b3ec61Sdh155122 			    netstack_t *, ns, int, moduleid);
192f4b3ec61Sdh155122 		}
193f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
194f4b3ec61Sdh155122 	}
195f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
196f4b3ec61Sdh155122 
197f4b3ec61Sdh155122 	/*
198*bd41d0a8Snordmark 	 * At this point in time a new instance can be created or an instance
199*bd41d0a8Snordmark 	 * can be destroyed, or some other module can register or unregister.
200*bd41d0a8Snordmark 	 * Make sure we either run all the create functions for this moduleid
201*bd41d0a8Snordmark 	 * or we wait for any other creators for this moduleid.
202f4b3ec61Sdh155122 	 */
203*bd41d0a8Snordmark 	apply_all_netstacks(moduleid, netstack_apply_create);
204f4b3ec61Sdh155122 }
205f4b3ec61Sdh155122 
206f4b3ec61Sdh155122 void
207f4b3ec61Sdh155122 netstack_unregister(int moduleid)
208f4b3ec61Sdh155122 {
209f4b3ec61Sdh155122 	netstack_t *ns;
210f4b3ec61Sdh155122 
211f4b3ec61Sdh155122 	ASSERT(moduleid >= 0 && moduleid < NS_MAX);
212f4b3ec61Sdh155122 
213f4b3ec61Sdh155122 	ASSERT(ns_reg[moduleid].nr_create != NULL);
214f4b3ec61Sdh155122 	ASSERT(ns_reg[moduleid].nr_flags & NRF_REGISTERED);
215f4b3ec61Sdh155122 
216f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
217f4b3ec61Sdh155122 	/*
218f4b3ec61Sdh155122 	 * Determine the set of stacks that exist before we drop the lock.
219*bd41d0a8Snordmark 	 * Set NSS_SHUTDOWN_NEEDED and NSS_DESTROY_NEEDED for each of those.
220*bd41d0a8Snordmark 	 * That ensures that when we return all the callbacks for existing
221*bd41d0a8Snordmark 	 * instances have completed. And since we set NRF_DYING no new
222*bd41d0a8Snordmark 	 * instances can use this module.
223f4b3ec61Sdh155122 	 */
224f4b3ec61Sdh155122 	for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
225*bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[moduleid];
226*bd41d0a8Snordmark 
227f4b3ec61Sdh155122 		mutex_enter(&ns->netstack_lock);
228f4b3ec61Sdh155122 		if (ns_reg[moduleid].nr_shutdown != NULL &&
229*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_COMPLETED) &&
230*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_SHUTDOWN_ALL) == 0) {
231*bd41d0a8Snordmark 			nms->nms_flags |= NSS_SHUTDOWN_NEEDED;
232f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__shutdown__needed,
233f4b3ec61Sdh155122 			    netstack_t *, ns, int, moduleid);
234f4b3ec61Sdh155122 		}
235f4b3ec61Sdh155122 		if ((ns_reg[moduleid].nr_flags & NRF_REGISTERED) &&
236f4b3ec61Sdh155122 		    ns_reg[moduleid].nr_destroy != NULL &&
237*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_COMPLETED) &&
238*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_DESTROY_ALL) == 0) {
239*bd41d0a8Snordmark 			nms->nms_flags |= NSS_DESTROY_NEEDED;
240f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__destroy__needed,
241f4b3ec61Sdh155122 			    netstack_t *, ns, int, moduleid);
242f4b3ec61Sdh155122 		}
243f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
244f4b3ec61Sdh155122 	}
245*bd41d0a8Snordmark 	/*
246*bd41d0a8Snordmark 	 * Prevent any new netstack from calling the registered create
247*bd41d0a8Snordmark 	 * function, while keeping the function pointers in place until the
248*bd41d0a8Snordmark 	 * shutdown and destroy callbacks are complete.
249*bd41d0a8Snordmark 	 */
250*bd41d0a8Snordmark 	ns_reg[moduleid].nr_flags |= NRF_DYING;
251f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
252f4b3ec61Sdh155122 
253*bd41d0a8Snordmark 	apply_all_netstacks(moduleid, netstack_apply_shutdown);
254*bd41d0a8Snordmark 	apply_all_netstacks(moduleid, netstack_apply_destroy);
255f4b3ec61Sdh155122 
256f4b3ec61Sdh155122 	/*
257*bd41d0a8Snordmark 	 * Clear the nms_flags so that we can handle this module
258f4b3ec61Sdh155122 	 * being loaded again.
259*bd41d0a8Snordmark 	 * Also remove the registered functions.
260f4b3ec61Sdh155122 	 */
261f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
262*bd41d0a8Snordmark 	ASSERT(ns_reg[moduleid].nr_flags & NRF_REGISTERED);
263*bd41d0a8Snordmark 	ASSERT(ns_reg[moduleid].nr_flags & NRF_DYING);
264f4b3ec61Sdh155122 	for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
265*bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[moduleid];
266*bd41d0a8Snordmark 
267f4b3ec61Sdh155122 		mutex_enter(&ns->netstack_lock);
268*bd41d0a8Snordmark 		if (nms->nms_flags & NSS_DESTROY_COMPLETED) {
269*bd41d0a8Snordmark 			nms->nms_flags = 0;
270f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__destroy__done,
271f4b3ec61Sdh155122 			    netstack_t *, ns, int, moduleid);
272f4b3ec61Sdh155122 		}
273f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
274f4b3ec61Sdh155122 	}
275f4b3ec61Sdh155122 
276f4b3ec61Sdh155122 	ns_reg[moduleid].nr_create = NULL;
277f4b3ec61Sdh155122 	ns_reg[moduleid].nr_shutdown = NULL;
278f4b3ec61Sdh155122 	ns_reg[moduleid].nr_destroy = NULL;
279f4b3ec61Sdh155122 	ns_reg[moduleid].nr_flags = 0;
280f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
281f4b3ec61Sdh155122 }
282f4b3ec61Sdh155122 
283f4b3ec61Sdh155122 /*
284f4b3ec61Sdh155122  * Lookup and/or allocate a netstack for this zone.
285f4b3ec61Sdh155122  */
286f4b3ec61Sdh155122 static void *
287f4b3ec61Sdh155122 netstack_zone_create(zoneid_t zoneid)
288f4b3ec61Sdh155122 {
289f4b3ec61Sdh155122 	netstackid_t stackid;
290f4b3ec61Sdh155122 	netstack_t *ns;
291f4b3ec61Sdh155122 	netstack_t **nsp;
292f4b3ec61Sdh155122 	zone_t	*zone;
293f4b3ec61Sdh155122 	int i;
294f4b3ec61Sdh155122 
295f4b3ec61Sdh155122 	ASSERT(netstack_initialized);
296f4b3ec61Sdh155122 
297f4b3ec61Sdh155122 	zone = zone_find_by_id_nolock(zoneid);
298f4b3ec61Sdh155122 	ASSERT(zone != NULL);
299f4b3ec61Sdh155122 
300f4b3ec61Sdh155122 	if (zone->zone_flags & ZF_NET_EXCL) {
301f4b3ec61Sdh155122 		stackid = zoneid;
302f4b3ec61Sdh155122 	} else {
303f4b3ec61Sdh155122 		/* Look for the stack instance for the global */
304f4b3ec61Sdh155122 		stackid = GLOBAL_NETSTACKID;
305f4b3ec61Sdh155122 	}
306f4b3ec61Sdh155122 
307f4b3ec61Sdh155122 	/* Allocate even if it isn't needed; simplifies locking */
308f4b3ec61Sdh155122 	ns = (netstack_t *)kmem_zalloc(sizeof (netstack_t), KM_SLEEP);
309f4b3ec61Sdh155122 
310f4b3ec61Sdh155122 	/* Look if there is a matching stack instance */
311f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
312f4b3ec61Sdh155122 	for (nsp = &netstack_head; *nsp != NULL;
313f4b3ec61Sdh155122 	    nsp = &((*nsp)->netstack_next)) {
314f4b3ec61Sdh155122 		if ((*nsp)->netstack_stackid == stackid) {
315f4b3ec61Sdh155122 			/*
316f4b3ec61Sdh155122 			 * Should never find a pre-existing exclusive stack
317f4b3ec61Sdh155122 			 */
318f4b3ec61Sdh155122 			ASSERT(stackid == GLOBAL_NETSTACKID);
319f4b3ec61Sdh155122 			kmem_free(ns, sizeof (netstack_t));
320f4b3ec61Sdh155122 			ns = *nsp;
321f4b3ec61Sdh155122 			mutex_enter(&ns->netstack_lock);
322f4b3ec61Sdh155122 			ns->netstack_numzones++;
323f4b3ec61Sdh155122 			mutex_exit(&ns->netstack_lock);
324f4b3ec61Sdh155122 			mutex_exit(&netstack_g_lock);
325f4b3ec61Sdh155122 			DTRACE_PROBE1(netstack__inc__numzones,
326f4b3ec61Sdh155122 			    netstack_t *, ns);
327f4b3ec61Sdh155122 			/* Record that we have a new shared stack zone */
328f4b3ec61Sdh155122 			netstack_shared_zone_add(zoneid);
329f4b3ec61Sdh155122 			zone->zone_netstack = ns;
330f4b3ec61Sdh155122 			return (ns);
331f4b3ec61Sdh155122 		}
332f4b3ec61Sdh155122 	}
333f4b3ec61Sdh155122 	/* Not found */
334f4b3ec61Sdh155122 	mutex_init(&ns->netstack_lock, NULL, MUTEX_DEFAULT, NULL);
335*bd41d0a8Snordmark 	cv_init(&ns->netstack_cv, NULL, CV_DEFAULT, NULL);
336f4b3ec61Sdh155122 	ns->netstack_stackid = zoneid;
337f4b3ec61Sdh155122 	ns->netstack_numzones = 1;
338f4b3ec61Sdh155122 	ns->netstack_refcnt = 1; /* Decremented by netstack_zone_destroy */
339f4b3ec61Sdh155122 	ns->netstack_flags = NSF_UNINIT;
340f4b3ec61Sdh155122 	*nsp = ns;
341f4b3ec61Sdh155122 	zone->zone_netstack = ns;
342f4b3ec61Sdh155122 
343*bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
344*bd41d0a8Snordmark 	/*
345*bd41d0a8Snordmark 	 * Mark this netstack as having a CREATE running so
346*bd41d0a8Snordmark 	 * any netstack_register/netstack_unregister waits for
347*bd41d0a8Snordmark 	 * the existing create callbacks to complete in moduleid order
348*bd41d0a8Snordmark 	 */
349*bd41d0a8Snordmark 	ns->netstack_flags |= NSF_ZONE_CREATE;
350*bd41d0a8Snordmark 
351f4b3ec61Sdh155122 	/*
352f4b3ec61Sdh155122 	 * Determine the set of module create functions that need to be
353f4b3ec61Sdh155122 	 * called before we drop the lock.
354*bd41d0a8Snordmark 	 * Set NSS_CREATE_NEEDED for each of those.
355*bd41d0a8Snordmark 	 * Skip any with NRF_DYING set, since those are in the process of
356*bd41d0a8Snordmark 	 * going away, by checking for flags being exactly NRF_REGISTERED.
357f4b3ec61Sdh155122 	 */
358f4b3ec61Sdh155122 	for (i = 0; i < NS_MAX; i++) {
359*bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[i];
360*bd41d0a8Snordmark 
361*bd41d0a8Snordmark 		cv_init(&nms->nms_cv, NULL, CV_DEFAULT, NULL);
362*bd41d0a8Snordmark 
363*bd41d0a8Snordmark 		if ((ns_reg[i].nr_flags == NRF_REGISTERED) &&
364*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_ALL) == 0) {
365*bd41d0a8Snordmark 			nms->nms_flags |= NSS_CREATE_NEEDED;
366f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__create__needed,
367f4b3ec61Sdh155122 			    netstack_t *, ns, int, i);
368f4b3ec61Sdh155122 		}
369f4b3ec61Sdh155122 	}
370*bd41d0a8Snordmark 	mutex_exit(&ns->netstack_lock);
371f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
372f4b3ec61Sdh155122 
373*bd41d0a8Snordmark 	apply_all_modules(ns, netstack_apply_create);
374f4b3ec61Sdh155122 
375*bd41d0a8Snordmark 	/* Tell any waiting netstack_register/netstack_unregister to proceed */
376f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
377f4b3ec61Sdh155122 	ns->netstack_flags &= ~NSF_UNINIT;
378*bd41d0a8Snordmark 	ASSERT(ns->netstack_flags & NSF_ZONE_CREATE);
379*bd41d0a8Snordmark 	ns->netstack_flags &= ~NSF_ZONE_CREATE;
380*bd41d0a8Snordmark 	cv_broadcast(&ns->netstack_cv);
381f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
382f4b3ec61Sdh155122 
383f4b3ec61Sdh155122 	return (ns);
384f4b3ec61Sdh155122 }
385f4b3ec61Sdh155122 
386f4b3ec61Sdh155122 /* ARGSUSED */
387f4b3ec61Sdh155122 static void
388f4b3ec61Sdh155122 netstack_zone_shutdown(zoneid_t zoneid, void *arg)
389f4b3ec61Sdh155122 {
390f4b3ec61Sdh155122 	netstack_t *ns = (netstack_t *)arg;
391f4b3ec61Sdh155122 	int i;
392f4b3ec61Sdh155122 
393f4b3ec61Sdh155122 	ASSERT(arg != NULL);
394f4b3ec61Sdh155122 
395f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
396f4b3ec61Sdh155122 	ASSERT(ns->netstack_numzones > 0);
397f4b3ec61Sdh155122 	if (ns->netstack_numzones != 1) {
398f4b3ec61Sdh155122 		/* Stack instance being used by other zone */
399f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
400f4b3ec61Sdh155122 		ASSERT(ns->netstack_stackid == GLOBAL_NETSTACKID);
401f4b3ec61Sdh155122 		return;
402f4b3ec61Sdh155122 	}
403f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
404f4b3ec61Sdh155122 
405f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
406*bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
407*bd41d0a8Snordmark 	/*
408*bd41d0a8Snordmark 	 * Mark this netstack as having a SHUTDOWN running so
409*bd41d0a8Snordmark 	 * any netstack_register/netstack_unregister waits for
410*bd41d0a8Snordmark 	 * the existing create callbacks to complete in moduleid order
411*bd41d0a8Snordmark 	 */
412*bd41d0a8Snordmark 	ASSERT(!(ns->netstack_flags & NSF_ZONE_INPROGRESS));
413*bd41d0a8Snordmark 	ns->netstack_flags |= NSF_ZONE_SHUTDOWN;
414*bd41d0a8Snordmark 
415f4b3ec61Sdh155122 	/*
416f4b3ec61Sdh155122 	 * Determine the set of stacks that exist before we drop the lock.
417*bd41d0a8Snordmark 	 * Set NSS_SHUTDOWN_NEEDED for each of those.
418f4b3ec61Sdh155122 	 */
419f4b3ec61Sdh155122 	for (i = 0; i < NS_MAX; i++) {
420*bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[i];
421*bd41d0a8Snordmark 
422f4b3ec61Sdh155122 		if ((ns_reg[i].nr_flags & NRF_REGISTERED) &&
423f4b3ec61Sdh155122 		    ns_reg[i].nr_shutdown != NULL &&
424*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_COMPLETED) &&
425*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_SHUTDOWN_ALL) == 0) {
426*bd41d0a8Snordmark 			nms->nms_flags |= NSS_SHUTDOWN_NEEDED;
427f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__shutdown__needed,
428f4b3ec61Sdh155122 			    netstack_t *, ns, int, i);
429f4b3ec61Sdh155122 		}
430f4b3ec61Sdh155122 	}
431*bd41d0a8Snordmark 	mutex_exit(&ns->netstack_lock);
432f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
433f4b3ec61Sdh155122 
43423f4867fSnordmark 	/*
43523f4867fSnordmark 	 * Call the shutdown function for all registered modules for this
43623f4867fSnordmark 	 * netstack.
43723f4867fSnordmark 	 */
438*bd41d0a8Snordmark 	apply_all_modules(ns, netstack_apply_shutdown);
439*bd41d0a8Snordmark 
440*bd41d0a8Snordmark 	/* Tell any waiting netstack_register/netstack_unregister to proceed */
441*bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
442*bd41d0a8Snordmark 	ASSERT(ns->netstack_flags & NSF_ZONE_SHUTDOWN);
443*bd41d0a8Snordmark 	ns->netstack_flags &= ~NSF_ZONE_SHUTDOWN;
444*bd41d0a8Snordmark 	cv_broadcast(&ns->netstack_cv);
445*bd41d0a8Snordmark 	mutex_exit(&ns->netstack_lock);
446f4b3ec61Sdh155122 }
447f4b3ec61Sdh155122 
448f4b3ec61Sdh155122 /*
449f4b3ec61Sdh155122  * Common routine to release a zone.
450f4b3ec61Sdh155122  * If this was the last zone using the stack instance then prepare to
451f4b3ec61Sdh155122  * have the refcnt dropping to zero free the zone.
452f4b3ec61Sdh155122  */
453f4b3ec61Sdh155122 /* ARGSUSED */
454f4b3ec61Sdh155122 static void
455f4b3ec61Sdh155122 netstack_zone_destroy(zoneid_t zoneid, void *arg)
456f4b3ec61Sdh155122 {
457f4b3ec61Sdh155122 	netstack_t *ns = (netstack_t *)arg;
458f4b3ec61Sdh155122 
459f4b3ec61Sdh155122 	ASSERT(arg != NULL);
460f4b3ec61Sdh155122 
461f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
462f4b3ec61Sdh155122 	ASSERT(ns->netstack_numzones > 0);
463f4b3ec61Sdh155122 	ns->netstack_numzones--;
464f4b3ec61Sdh155122 	if (ns->netstack_numzones != 0) {
465f4b3ec61Sdh155122 		/* Stack instance being used by other zone */
466f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
467f4b3ec61Sdh155122 		ASSERT(ns->netstack_stackid == GLOBAL_NETSTACKID);
468f4b3ec61Sdh155122 		/* Record that we a shared stack zone has gone away */
469f4b3ec61Sdh155122 		netstack_shared_zone_remove(zoneid);
470f4b3ec61Sdh155122 		return;
471f4b3ec61Sdh155122 	}
472f4b3ec61Sdh155122 	/*
47323f4867fSnordmark 	 * Set CLOSING so that netstack_find_by will not find it.
474f4b3ec61Sdh155122 	 */
475f4b3ec61Sdh155122 	ns->netstack_flags |= NSF_CLOSING;
476f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
477f4b3ec61Sdh155122 	DTRACE_PROBE1(netstack__dec__numzones, netstack_t *, ns);
478f4b3ec61Sdh155122 	/* No other thread can call zone_destroy for this stack */
479f4b3ec61Sdh155122 
480f4b3ec61Sdh155122 	/*
481f4b3ec61Sdh155122 	 * Decrease refcnt to account for the one in netstack_zone_init()
482f4b3ec61Sdh155122 	 */
483f4b3ec61Sdh155122 	netstack_rele(ns);
484f4b3ec61Sdh155122 }
485f4b3ec61Sdh155122 
486f4b3ec61Sdh155122 /*
487f4b3ec61Sdh155122  * Called when the reference count drops to zero.
488f4b3ec61Sdh155122  * Call the destroy functions for each registered module.
489f4b3ec61Sdh155122  */
490f4b3ec61Sdh155122 static void
491f4b3ec61Sdh155122 netstack_stack_inactive(netstack_t *ns)
492f4b3ec61Sdh155122 {
493f4b3ec61Sdh155122 	int i;
494f4b3ec61Sdh155122 
495f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
496*bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
497*bd41d0a8Snordmark 	/*
498*bd41d0a8Snordmark 	 * Mark this netstack as having a DESTROY running so
499*bd41d0a8Snordmark 	 * any netstack_register/netstack_unregister waits for
500*bd41d0a8Snordmark 	 * the existing destroy callbacks to complete in reverse moduleid order
501*bd41d0a8Snordmark 	 */
502*bd41d0a8Snordmark 	ASSERT(!(ns->netstack_flags & NSF_ZONE_INPROGRESS));
503*bd41d0a8Snordmark 	ns->netstack_flags |= NSF_ZONE_DESTROY;
504f4b3ec61Sdh155122 	/*
505f4b3ec61Sdh155122 	 * If the shutdown callback wasn't called earlier (e.g., if this is
506*bd41d0a8Snordmark 	 * a netstack shared between multiple zones), then we schedule it now.
507*bd41d0a8Snordmark 	 *
508*bd41d0a8Snordmark 	 * Determine the set of stacks that exist before we drop the lock.
509*bd41d0a8Snordmark 	 * Set NSS_DESTROY_NEEDED for each of those. That
510*bd41d0a8Snordmark 	 * ensures that when we return all the callbacks for existing
511*bd41d0a8Snordmark 	 * instances have completed.
512f4b3ec61Sdh155122 	 */
513f4b3ec61Sdh155122 	for (i = 0; i < NS_MAX; i++) {
514*bd41d0a8Snordmark 		nm_state_t *nms = &ns->netstack_m_state[i];
515*bd41d0a8Snordmark 
516f4b3ec61Sdh155122 		if ((ns_reg[i].nr_flags & NRF_REGISTERED) &&
517f4b3ec61Sdh155122 		    ns_reg[i].nr_shutdown != NULL &&
518*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_COMPLETED) &&
519*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_SHUTDOWN_ALL) == 0) {
520*bd41d0a8Snordmark 			nms->nms_flags |= NSS_SHUTDOWN_NEEDED;
521f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__shutdown__needed,
522f4b3ec61Sdh155122 			    netstack_t *, ns, int, i);
523f4b3ec61Sdh155122 		}
524*bd41d0a8Snordmark 
525f4b3ec61Sdh155122 		if ((ns_reg[i].nr_flags & NRF_REGISTERED) &&
526f4b3ec61Sdh155122 		    ns_reg[i].nr_destroy != NULL &&
527*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_CREATE_COMPLETED) &&
528*bd41d0a8Snordmark 		    (nms->nms_flags & NSS_DESTROY_ALL) == 0) {
529*bd41d0a8Snordmark 			nms->nms_flags |= NSS_DESTROY_NEEDED;
530f4b3ec61Sdh155122 			DTRACE_PROBE2(netstack__destroy__needed,
531f4b3ec61Sdh155122 			    netstack_t *, ns, int, i);
532f4b3ec61Sdh155122 		}
533f4b3ec61Sdh155122 	}
534*bd41d0a8Snordmark 	mutex_exit(&ns->netstack_lock);
535f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
536f4b3ec61Sdh155122 
53723f4867fSnordmark 	/*
53823f4867fSnordmark 	 * Call the shutdown and destroy functions for all registered modules
53923f4867fSnordmark 	 * for this netstack.
540*bd41d0a8Snordmark 	 *
541*bd41d0a8Snordmark 	 * Since there are some ordering dependencies between the modules we
542*bd41d0a8Snordmark 	 * tear them down in the reverse order of what was used to create them.
543*bd41d0a8Snordmark 	 *
544*bd41d0a8Snordmark 	 * Since a netstack_t is never reused (when a zone is rebooted it gets
545*bd41d0a8Snordmark 	 * a new zoneid == netstackid i.e. a new netstack_t is allocated) we
546*bd41d0a8Snordmark 	 * leave nms_flags the way it is i.e. with NSS_DESTROY_COMPLETED set.
547*bd41d0a8Snordmark 	 * That is different than in the netstack_unregister() case.
54823f4867fSnordmark 	 */
549*bd41d0a8Snordmark 	apply_all_modules(ns, netstack_apply_shutdown);
550*bd41d0a8Snordmark 	apply_all_modules_reverse(ns, netstack_apply_destroy);
551f4b3ec61Sdh155122 
552*bd41d0a8Snordmark 	/* Tell any waiting netstack_register/netstack_unregister to proceed */
553f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
554*bd41d0a8Snordmark 	ASSERT(ns->netstack_flags & NSF_ZONE_DESTROY);
555*bd41d0a8Snordmark 	ns->netstack_flags &= ~NSF_ZONE_DESTROY;
556*bd41d0a8Snordmark 	cv_broadcast(&ns->netstack_cv);
557f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
558f4b3ec61Sdh155122 }
559f4b3ec61Sdh155122 
56023f4867fSnordmark /*
56123f4867fSnordmark  * Apply a function to all netstacks for a particular moduleid.
56223f4867fSnordmark  *
563*bd41d0a8Snordmark  * If there is any zone activity (due to a zone being created, shutdown,
564*bd41d0a8Snordmark  * or destroyed) we wait for that to complete before we proceed. This ensures
565*bd41d0a8Snordmark  * that the moduleids are processed in order when a zone is created or
566*bd41d0a8Snordmark  * destroyed.
567*bd41d0a8Snordmark  *
56823f4867fSnordmark  * The applyfn has to drop netstack_g_lock if it does some work.
569*bd41d0a8Snordmark  * In that case we don't follow netstack_next,
570*bd41d0a8Snordmark  * even if it is possible to do so without any hazards. This is
57123f4867fSnordmark  * because we want the design to allow for the list of netstacks threaded
57223f4867fSnordmark  * by netstack_next to change in any arbitrary way during the time the
57323f4867fSnordmark  * lock was dropped.
57423f4867fSnordmark  *
57523f4867fSnordmark  * It is safe to restart the loop at netstack_head since the applyfn
57623f4867fSnordmark  * changes netstack_m_state as it processes things, so a subsequent
57723f4867fSnordmark  * pass through will have no effect in applyfn, hence the loop will terminate
57823f4867fSnordmark  * in at worst O(N^2).
57923f4867fSnordmark  */
580f4b3ec61Sdh155122 static void
58123f4867fSnordmark apply_all_netstacks(int moduleid, applyfn_t *applyfn)
582f4b3ec61Sdh155122 {
583f4b3ec61Sdh155122 	netstack_t *ns;
584f4b3ec61Sdh155122 
58523f4867fSnordmark 	mutex_enter(&netstack_g_lock);
58623f4867fSnordmark 	ns = netstack_head;
587f4b3ec61Sdh155122 	while (ns != NULL) {
588*bd41d0a8Snordmark 		if (wait_for_zone_creator(ns, &netstack_g_lock)) {
58923f4867fSnordmark 			/* Lock dropped - restart at head */
590*bd41d0a8Snordmark 			ns = netstack_head;
591*bd41d0a8Snordmark 		} else if ((applyfn)(&netstack_g_lock, ns, moduleid)) {
592*bd41d0a8Snordmark 			/* Lock dropped - restart at head */
59323f4867fSnordmark 			ns = netstack_head;
59423f4867fSnordmark 		} else {
59523f4867fSnordmark 			ns = ns->netstack_next;
59623f4867fSnordmark 		}
59723f4867fSnordmark 	}
59823f4867fSnordmark 	mutex_exit(&netstack_g_lock);
59923f4867fSnordmark }
60023f4867fSnordmark 
60123f4867fSnordmark /*
60223f4867fSnordmark  * Apply a function to all moduleids for a particular netstack.
60323f4867fSnordmark  *
60423f4867fSnordmark  * Since the netstack linkage doesn't matter in this case we can
60523f4867fSnordmark  * ignore whether the function drops the lock.
60623f4867fSnordmark  */
60723f4867fSnordmark static void
60823f4867fSnordmark apply_all_modules(netstack_t *ns, applyfn_t *applyfn)
60923f4867fSnordmark {
61023f4867fSnordmark 	int i;
61123f4867fSnordmark 
61223f4867fSnordmark 	mutex_enter(&netstack_g_lock);
613f4b3ec61Sdh155122 	for (i = 0; i < NS_MAX; i++) {
614f4b3ec61Sdh155122 		/*
615*bd41d0a8Snordmark 		 * We don't care whether the lock was dropped
616*bd41d0a8Snordmark 		 * since we are not iterating over netstack_head.
617f4b3ec61Sdh155122 		 */
618*bd41d0a8Snordmark 		(void) (applyfn)(&netstack_g_lock, ns, i);
619f4b3ec61Sdh155122 	}
62023f4867fSnordmark 	mutex_exit(&netstack_g_lock);
621f4b3ec61Sdh155122 }
622f4b3ec61Sdh155122 
62323f4867fSnordmark /* Like the above but in reverse moduleid order */
624f4b3ec61Sdh155122 static void
62523f4867fSnordmark apply_all_modules_reverse(netstack_t *ns, applyfn_t *applyfn)
626f4b3ec61Sdh155122 {
627f4b3ec61Sdh155122 	int i;
628f4b3ec61Sdh155122 
62923f4867fSnordmark 	mutex_enter(&netstack_g_lock);
630f4b3ec61Sdh155122 	for (i = NS_MAX-1; i >= 0; i--) {
631f4b3ec61Sdh155122 		/*
632*bd41d0a8Snordmark 		 * We don't care whether the lock was dropped
633*bd41d0a8Snordmark 		 * since we are not iterating over netstack_head.
634f4b3ec61Sdh155122 		 */
635*bd41d0a8Snordmark 		(void) (applyfn)(&netstack_g_lock, ns, i);
636f4b3ec61Sdh155122 	}
63723f4867fSnordmark 	mutex_exit(&netstack_g_lock);
638f4b3ec61Sdh155122 }
639f4b3ec61Sdh155122 
640f4b3ec61Sdh155122 /*
641*bd41d0a8Snordmark  * Call the create function for the ns and moduleid if CREATE_NEEDED
642*bd41d0a8Snordmark  * is set.
643*bd41d0a8Snordmark  * If some other thread gets here first and sets *_INPROGRESS, then
644*bd41d0a8Snordmark  * we wait for that thread to complete so that we can ensure that
645*bd41d0a8Snordmark  * all the callbacks are done when we've looped over all netstacks/moduleids.
64623f4867fSnordmark  *
647*bd41d0a8Snordmark  * When we call the create function, we temporarily drop the netstack_lock
648*bd41d0a8Snordmark  * held by the caller, and return true to tell the caller it needs to
649*bd41d0a8Snordmark  * re-evalute the state.
650f4b3ec61Sdh155122  */
651*bd41d0a8Snordmark static boolean_t
652*bd41d0a8Snordmark netstack_apply_create(kmutex_t *lockp, netstack_t *ns, int moduleid)
653f4b3ec61Sdh155122 {
654*bd41d0a8Snordmark 	void *result;
655*bd41d0a8Snordmark 	netstackid_t stackid;
656*bd41d0a8Snordmark 	nm_state_t *nms = &ns->netstack_m_state[moduleid];
657*bd41d0a8Snordmark 	boolean_t dropped = B_FALSE;
658*bd41d0a8Snordmark 
659*bd41d0a8Snordmark 	ASSERT(MUTEX_HELD(lockp));
660*bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
661*bd41d0a8Snordmark 
662*bd41d0a8Snordmark 	if (wait_for_nms_inprogress(ns, nms, lockp))
663*bd41d0a8Snordmark 		dropped = B_TRUE;
664*bd41d0a8Snordmark 
665*bd41d0a8Snordmark 	if (nms->nms_flags & NSS_CREATE_NEEDED) {
666*bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_CREATE_NEEDED;
667*bd41d0a8Snordmark 		nms->nms_flags |= NSS_CREATE_INPROGRESS;
668*bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__create__inprogress,
669*bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
670*bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
671*bd41d0a8Snordmark 		mutex_exit(lockp);
672*bd41d0a8Snordmark 		dropped = B_TRUE;
673*bd41d0a8Snordmark 
674*bd41d0a8Snordmark 		ASSERT(ns_reg[moduleid].nr_create != NULL);
675*bd41d0a8Snordmark 		stackid = ns->netstack_stackid;
676*bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__create__start,
677*bd41d0a8Snordmark 		    netstackid_t, stackid,
678*bd41d0a8Snordmark 		    netstack_t *, ns);
679*bd41d0a8Snordmark 		result = (ns_reg[moduleid].nr_create)(stackid, ns);
680*bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__create__end,
681*bd41d0a8Snordmark 		    void *, result, netstack_t *, ns);
682*bd41d0a8Snordmark 
683*bd41d0a8Snordmark 		ASSERT(result != NULL);
684*bd41d0a8Snordmark 		mutex_enter(lockp);
685*bd41d0a8Snordmark 		mutex_enter(&ns->netstack_lock);
686*bd41d0a8Snordmark 		ns->netstack_modules[moduleid] = result;
687*bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_CREATE_INPROGRESS;
688*bd41d0a8Snordmark 		nms->nms_flags |= NSS_CREATE_COMPLETED;
689*bd41d0a8Snordmark 		cv_broadcast(&nms->nms_cv);
690*bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__create__completed,
691*bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
692*bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
693*bd41d0a8Snordmark 		return (dropped);
69423f4867fSnordmark 	} else {
695*bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
696*bd41d0a8Snordmark 		return (dropped);
69723f4867fSnordmark 	}
698f4b3ec61Sdh155122 }
699f4b3ec61Sdh155122 
700f4b3ec61Sdh155122 /*
701*bd41d0a8Snordmark  * Call the shutdown function for the ns and moduleid if SHUTDOWN_NEEDED
702*bd41d0a8Snordmark  * is set.
703*bd41d0a8Snordmark  * If some other thread gets here first and sets *_INPROGRESS, then
704*bd41d0a8Snordmark  * we wait for that thread to complete so that we can ensure that
705*bd41d0a8Snordmark  * all the callbacks are done when we've looped over all netstacks/moduleids.
706f4b3ec61Sdh155122  *
707*bd41d0a8Snordmark  * When we call the shutdown function, we temporarily drop the netstack_lock
708*bd41d0a8Snordmark  * held by the caller, and return true to tell the caller it needs to
709*bd41d0a8Snordmark  * re-evalute the state.
710f4b3ec61Sdh155122  */
711*bd41d0a8Snordmark static boolean_t
712*bd41d0a8Snordmark netstack_apply_shutdown(kmutex_t *lockp, netstack_t *ns, int moduleid)
713f4b3ec61Sdh155122 {
714*bd41d0a8Snordmark 	netstackid_t stackid;
715*bd41d0a8Snordmark 	void * netstack_module;
716*bd41d0a8Snordmark 	nm_state_t *nms = &ns->netstack_m_state[moduleid];
717*bd41d0a8Snordmark 	boolean_t dropped = B_FALSE;
718*bd41d0a8Snordmark 
719*bd41d0a8Snordmark 	ASSERT(MUTEX_HELD(lockp));
720*bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
721*bd41d0a8Snordmark 
722*bd41d0a8Snordmark 	if (wait_for_nms_inprogress(ns, nms, lockp))
723*bd41d0a8Snordmark 		dropped = B_TRUE;
724*bd41d0a8Snordmark 
725*bd41d0a8Snordmark 	if (nms->nms_flags & NSS_SHUTDOWN_NEEDED) {
726*bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_SHUTDOWN_NEEDED;
727*bd41d0a8Snordmark 		nms->nms_flags |= NSS_SHUTDOWN_INPROGRESS;
728*bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__shutdown__inprogress,
729*bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
730*bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
731*bd41d0a8Snordmark 		mutex_exit(lockp);
732*bd41d0a8Snordmark 		dropped = B_TRUE;
733*bd41d0a8Snordmark 
734*bd41d0a8Snordmark 		ASSERT(ns_reg[moduleid].nr_shutdown != NULL);
735*bd41d0a8Snordmark 		stackid = ns->netstack_stackid;
736*bd41d0a8Snordmark 		netstack_module = ns->netstack_modules[moduleid];
737*bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__shutdown__start,
738*bd41d0a8Snordmark 		    netstackid_t, stackid,
739*bd41d0a8Snordmark 		    void *, netstack_module);
740*bd41d0a8Snordmark 		(ns_reg[moduleid].nr_shutdown)(stackid, netstack_module);
741*bd41d0a8Snordmark 		DTRACE_PROBE1(netstack__shutdown__end,
742*bd41d0a8Snordmark 		    netstack_t *, ns);
743*bd41d0a8Snordmark 
744*bd41d0a8Snordmark 		mutex_enter(lockp);
745*bd41d0a8Snordmark 		mutex_enter(&ns->netstack_lock);
746*bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_SHUTDOWN_INPROGRESS;
747*bd41d0a8Snordmark 		nms->nms_flags |= NSS_SHUTDOWN_COMPLETED;
748*bd41d0a8Snordmark 		cv_broadcast(&nms->nms_cv);
749*bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__shutdown__completed,
750*bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
751*bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
752*bd41d0a8Snordmark 		return (dropped);
753*bd41d0a8Snordmark 	} else {
754*bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
755*bd41d0a8Snordmark 		return (dropped);
756*bd41d0a8Snordmark 	}
757f4b3ec61Sdh155122 }
758f4b3ec61Sdh155122 
759f4b3ec61Sdh155122 /*
760*bd41d0a8Snordmark  * Call the destroy function for the ns and moduleid if DESTROY_NEEDED
761*bd41d0a8Snordmark  * is set.
762*bd41d0a8Snordmark  * If some other thread gets here first and sets *_INPROGRESS, then
763*bd41d0a8Snordmark  * we wait for that thread to complete so that we can ensure that
764*bd41d0a8Snordmark  * all the callbacks are done when we've looped over all netstacks/moduleids.
765f4b3ec61Sdh155122  *
766*bd41d0a8Snordmark  * When we call the destroy function, we temporarily drop the netstack_lock
767*bd41d0a8Snordmark  * held by the caller, and return true to tell the caller it needs to
768*bd41d0a8Snordmark  * re-evalute the state.
769f4b3ec61Sdh155122  */
770*bd41d0a8Snordmark static boolean_t
771*bd41d0a8Snordmark netstack_apply_destroy(kmutex_t *lockp, netstack_t *ns, int moduleid)
772f4b3ec61Sdh155122 {
773*bd41d0a8Snordmark 	netstackid_t stackid;
774*bd41d0a8Snordmark 	void * netstack_module;
775*bd41d0a8Snordmark 	nm_state_t *nms = &ns->netstack_m_state[moduleid];
776*bd41d0a8Snordmark 	boolean_t dropped = B_FALSE;
777*bd41d0a8Snordmark 
778*bd41d0a8Snordmark 	ASSERT(MUTEX_HELD(lockp));
779*bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
780*bd41d0a8Snordmark 
781*bd41d0a8Snordmark 	if (wait_for_nms_inprogress(ns, nms, lockp))
782*bd41d0a8Snordmark 		dropped = B_TRUE;
783*bd41d0a8Snordmark 
784*bd41d0a8Snordmark 	if (nms->nms_flags & NSS_DESTROY_NEEDED) {
785*bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_DESTROY_NEEDED;
786*bd41d0a8Snordmark 		nms->nms_flags |= NSS_DESTROY_INPROGRESS;
787*bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__destroy__inprogress,
788*bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
789*bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
790*bd41d0a8Snordmark 		mutex_exit(lockp);
791*bd41d0a8Snordmark 		dropped = B_TRUE;
792*bd41d0a8Snordmark 
793*bd41d0a8Snordmark 		ASSERT(ns_reg[moduleid].nr_destroy != NULL);
794*bd41d0a8Snordmark 		stackid = ns->netstack_stackid;
795*bd41d0a8Snordmark 		netstack_module = ns->netstack_modules[moduleid];
796*bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__destroy__start,
797*bd41d0a8Snordmark 		    netstackid_t, stackid,
798*bd41d0a8Snordmark 		    void *, netstack_module);
799*bd41d0a8Snordmark 		(ns_reg[moduleid].nr_destroy)(stackid, netstack_module);
800*bd41d0a8Snordmark 		DTRACE_PROBE1(netstack__destroy__end,
801*bd41d0a8Snordmark 		    netstack_t *, ns);
802*bd41d0a8Snordmark 
803*bd41d0a8Snordmark 		mutex_enter(lockp);
804*bd41d0a8Snordmark 		mutex_enter(&ns->netstack_lock);
805*bd41d0a8Snordmark 		ns->netstack_modules[moduleid] = NULL;
806*bd41d0a8Snordmark 		nms->nms_flags &= ~NSS_DESTROY_INPROGRESS;
807*bd41d0a8Snordmark 		nms->nms_flags |= NSS_DESTROY_COMPLETED;
808*bd41d0a8Snordmark 		cv_broadcast(&nms->nms_cv);
809*bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__destroy__completed,
810*bd41d0a8Snordmark 		    netstack_t *, ns, int, moduleid);
811*bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
812*bd41d0a8Snordmark 		return (dropped);
813*bd41d0a8Snordmark 	} else {
814*bd41d0a8Snordmark 		mutex_exit(&ns->netstack_lock);
815*bd41d0a8Snordmark 		return (dropped);
816*bd41d0a8Snordmark 	}
817f4b3ec61Sdh155122 }
818f4b3ec61Sdh155122 
819f4b3ec61Sdh155122 /*
820*bd41d0a8Snordmark  * If somebody  is creating the netstack (due to a new zone being created)
821*bd41d0a8Snordmark  * then we wait for them to complete. This ensures that any additional
822*bd41d0a8Snordmark  * netstack_register() doesn't cause the create functions to run out of
823*bd41d0a8Snordmark  * order.
824*bd41d0a8Snordmark  * Note that we do not need such a global wait in the case of the shutdown
825*bd41d0a8Snordmark  * and destroy callbacks, since in that case it is sufficient for both
826*bd41d0a8Snordmark  * threads to set NEEDED and wait for INPROGRESS to ensure ordering.
827*bd41d0a8Snordmark  * Returns true if lockp was temporarily dropped while waiting.
828f4b3ec61Sdh155122  */
829*bd41d0a8Snordmark static boolean_t
830*bd41d0a8Snordmark wait_for_zone_creator(netstack_t *ns, kmutex_t *lockp)
831f4b3ec61Sdh155122 {
832*bd41d0a8Snordmark 	boolean_t dropped = B_FALSE;
833*bd41d0a8Snordmark 
834*bd41d0a8Snordmark 	mutex_enter(&ns->netstack_lock);
835*bd41d0a8Snordmark 	while (ns->netstack_flags & NSF_ZONE_CREATE) {
836*bd41d0a8Snordmark 		DTRACE_PROBE1(netstack__wait__zone__inprogress,
837*bd41d0a8Snordmark 		    netstack_t *, ns);
838*bd41d0a8Snordmark 		if (lockp != NULL) {
839*bd41d0a8Snordmark 			dropped = B_TRUE;
840*bd41d0a8Snordmark 			mutex_exit(lockp);
841*bd41d0a8Snordmark 		}
842*bd41d0a8Snordmark 		cv_wait(&ns->netstack_cv, &ns->netstack_lock);
843*bd41d0a8Snordmark 		if (lockp != NULL) {
844*bd41d0a8Snordmark 			/* First drop netstack_lock to preserve order */
845*bd41d0a8Snordmark 			mutex_exit(&ns->netstack_lock);
846*bd41d0a8Snordmark 			mutex_enter(lockp);
847*bd41d0a8Snordmark 			mutex_enter(&ns->netstack_lock);
848*bd41d0a8Snordmark 		}
849*bd41d0a8Snordmark 	}
850*bd41d0a8Snordmark 	mutex_exit(&ns->netstack_lock);
851*bd41d0a8Snordmark 	return (dropped);
852*bd41d0a8Snordmark }
853*bd41d0a8Snordmark 
854f4b3ec61Sdh155122 /*
855*bd41d0a8Snordmark  * Wait for any INPROGRESS flag to be cleared for the netstack/moduleid
856*bd41d0a8Snordmark  * combination.
857*bd41d0a8Snordmark  * Returns true if lockp was temporarily dropped while waiting.
858f4b3ec61Sdh155122  */
859*bd41d0a8Snordmark static boolean_t
860*bd41d0a8Snordmark wait_for_nms_inprogress(netstack_t *ns, nm_state_t *nms, kmutex_t *lockp)
861*bd41d0a8Snordmark {
862*bd41d0a8Snordmark 	boolean_t dropped = B_FALSE;
863*bd41d0a8Snordmark 
864*bd41d0a8Snordmark 	while (nms->nms_flags & NSS_ALL_INPROGRESS) {
865*bd41d0a8Snordmark 		DTRACE_PROBE2(netstack__wait__nms__inprogress,
866*bd41d0a8Snordmark 		    netstack_t *, ns, nm_state_t *, nms);
867*bd41d0a8Snordmark 		if (lockp != NULL) {
868*bd41d0a8Snordmark 			dropped = B_TRUE;
869*bd41d0a8Snordmark 			mutex_exit(lockp);
870*bd41d0a8Snordmark 		}
871*bd41d0a8Snordmark 		cv_wait(&nms->nms_cv, &ns->netstack_lock);
872*bd41d0a8Snordmark 		if (lockp != NULL) {
873*bd41d0a8Snordmark 			/* First drop netstack_lock to preserve order */
874*bd41d0a8Snordmark 			mutex_exit(&ns->netstack_lock);
875*bd41d0a8Snordmark 			mutex_enter(lockp);
876*bd41d0a8Snordmark 			mutex_enter(&ns->netstack_lock);
877*bd41d0a8Snordmark 		}
878*bd41d0a8Snordmark 	}
879*bd41d0a8Snordmark 	return (dropped);
880f4b3ec61Sdh155122 }
881f4b3ec61Sdh155122 
882f4b3ec61Sdh155122 /*
883f4b3ec61Sdh155122  * Get the stack instance used in caller's zone.
884f4b3ec61Sdh155122  * Increases the reference count, caller must do a netstack_rele.
885f4b3ec61Sdh155122  * It can't be called after zone_destroy() has started.
886f4b3ec61Sdh155122  */
887fd006805Snordmark netstack_t *
888f4b3ec61Sdh155122 netstack_get_current(void)
889f4b3ec61Sdh155122 {
890f4b3ec61Sdh155122 	netstack_t *ns;
891f4b3ec61Sdh155122 
892f4b3ec61Sdh155122 	ns = curproc->p_zone->zone_netstack;
893f4b3ec61Sdh155122 	ASSERT(ns != NULL);
894f4b3ec61Sdh155122 	if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
895f4b3ec61Sdh155122 		return (NULL);
896f4b3ec61Sdh155122 
897f4b3ec61Sdh155122 	netstack_hold(ns);
898f4b3ec61Sdh155122 
899f4b3ec61Sdh155122 	return (ns);
900f4b3ec61Sdh155122 }
901f4b3ec61Sdh155122 
902f4b3ec61Sdh155122 /*
903f4b3ec61Sdh155122  * Find a stack instance given the cred.
904f4b3ec61Sdh155122  * This is used by the modules to potentially allow for a future when
905f4b3ec61Sdh155122  * something other than the zoneid is used to determine the stack.
906f4b3ec61Sdh155122  */
907f4b3ec61Sdh155122 netstack_t *
908f4b3ec61Sdh155122 netstack_find_by_cred(const cred_t *cr)
909f4b3ec61Sdh155122 {
910f4b3ec61Sdh155122 	zoneid_t zoneid = crgetzoneid(cr);
911f4b3ec61Sdh155122 
912f4b3ec61Sdh155122 	/* Handle the case when cr_zone is NULL */
913f4b3ec61Sdh155122 	if (zoneid == (zoneid_t)-1)
914f4b3ec61Sdh155122 		zoneid = GLOBAL_ZONEID;
915f4b3ec61Sdh155122 
916f4b3ec61Sdh155122 	/* For performance ... */
917f4b3ec61Sdh155122 	if (curproc->p_zone->zone_id == zoneid)
918f4b3ec61Sdh155122 		return (netstack_get_current());
919f4b3ec61Sdh155122 	else
920f4b3ec61Sdh155122 		return (netstack_find_by_zoneid(zoneid));
921f4b3ec61Sdh155122 }
922f4b3ec61Sdh155122 
923f4b3ec61Sdh155122 /*
924f4b3ec61Sdh155122  * Find a stack instance given the zoneid.
925f4b3ec61Sdh155122  * Increases the reference count if found; caller must do a
926f4b3ec61Sdh155122  * netstack_rele().
927f4b3ec61Sdh155122  *
928f4b3ec61Sdh155122  * If there is no exact match then assume the shared stack instance
929f4b3ec61Sdh155122  * matches.
930f4b3ec61Sdh155122  *
931f4b3ec61Sdh155122  * Skip the unitialized ones.
932f4b3ec61Sdh155122  */
933f4b3ec61Sdh155122 netstack_t *
934f4b3ec61Sdh155122 netstack_find_by_zoneid(zoneid_t zoneid)
935f4b3ec61Sdh155122 {
936f4b3ec61Sdh155122 	netstack_t *ns;
937f4b3ec61Sdh155122 	zone_t *zone;
938f4b3ec61Sdh155122 
939f4b3ec61Sdh155122 	zone = zone_find_by_id(zoneid);
940f4b3ec61Sdh155122 
941f4b3ec61Sdh155122 	if (zone == NULL)
942f4b3ec61Sdh155122 		return (NULL);
943f4b3ec61Sdh155122 
944f4b3ec61Sdh155122 	ns = zone->zone_netstack;
945f4b3ec61Sdh155122 	ASSERT(ns != NULL);
946f4b3ec61Sdh155122 	if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
947f4b3ec61Sdh155122 		ns = NULL;
948f4b3ec61Sdh155122 	else
949f4b3ec61Sdh155122 		netstack_hold(ns);
950f4b3ec61Sdh155122 
951f4b3ec61Sdh155122 	zone_rele(zone);
952f4b3ec61Sdh155122 	return (ns);
953f4b3ec61Sdh155122 }
954f4b3ec61Sdh155122 
955f4b3ec61Sdh155122 /*
956*bd41d0a8Snordmark  * Find a stack instance given the zoneid. Can only be called from
957*bd41d0a8Snordmark  * the create callback. See the comments in zone_find_by_id_nolock why
958*bd41d0a8Snordmark  * that limitation exists.
959*bd41d0a8Snordmark  *
960f4b3ec61Sdh155122  * Increases the reference count if found; caller must do a
961f4b3ec61Sdh155122  * netstack_rele().
962f4b3ec61Sdh155122  *
963f4b3ec61Sdh155122  * If there is no exact match then assume the shared stack instance
964f4b3ec61Sdh155122  * matches.
965f4b3ec61Sdh155122  *
966f4b3ec61Sdh155122  * Skip the unitialized ones.
967f4b3ec61Sdh155122  */
968f4b3ec61Sdh155122 netstack_t *
969f4b3ec61Sdh155122 netstack_find_by_zoneid_nolock(zoneid_t zoneid)
970f4b3ec61Sdh155122 {
971f4b3ec61Sdh155122 	netstack_t *ns;
972f4b3ec61Sdh155122 	zone_t *zone;
973f4b3ec61Sdh155122 
974f4b3ec61Sdh155122 	zone = zone_find_by_id_nolock(zoneid);
975f4b3ec61Sdh155122 
976f4b3ec61Sdh155122 	if (zone == NULL)
977f4b3ec61Sdh155122 		return (NULL);
978f4b3ec61Sdh155122 
979f4b3ec61Sdh155122 	ns = zone->zone_netstack;
980f4b3ec61Sdh155122 	ASSERT(ns != NULL);
981f4b3ec61Sdh155122 
982f4b3ec61Sdh155122 	if (ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))
983f4b3ec61Sdh155122 		ns = NULL;
984f4b3ec61Sdh155122 	else
985f4b3ec61Sdh155122 		netstack_hold(ns);
986f4b3ec61Sdh155122 
987*bd41d0a8Snordmark 	/* zone_find_by_id_nolock does not have a hold on the zone */
988f4b3ec61Sdh155122 	return (ns);
989f4b3ec61Sdh155122 }
990f4b3ec61Sdh155122 
991f4b3ec61Sdh155122 /*
992f4b3ec61Sdh155122  * Find a stack instance given the stackid with exact match?
993f4b3ec61Sdh155122  * Increases the reference count if found; caller must do a
994f4b3ec61Sdh155122  * netstack_rele().
995f4b3ec61Sdh155122  *
996f4b3ec61Sdh155122  * Skip the unitialized ones.
997f4b3ec61Sdh155122  */
998f4b3ec61Sdh155122 netstack_t *
999f4b3ec61Sdh155122 netstack_find_by_stackid(netstackid_t stackid)
1000f4b3ec61Sdh155122 {
1001f4b3ec61Sdh155122 	netstack_t *ns;
1002f4b3ec61Sdh155122 
1003f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
1004f4b3ec61Sdh155122 	for (ns = netstack_head; ns != NULL; ns = ns->netstack_next) {
1005f4b3ec61Sdh155122 		mutex_enter(&ns->netstack_lock);
1006f4b3ec61Sdh155122 		if (ns->netstack_stackid == stackid &&
1007f4b3ec61Sdh155122 		    !(ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING))) {
1008f4b3ec61Sdh155122 			mutex_exit(&ns->netstack_lock);
1009f4b3ec61Sdh155122 			netstack_hold(ns);
1010f4b3ec61Sdh155122 			mutex_exit(&netstack_g_lock);
1011f4b3ec61Sdh155122 			return (ns);
1012f4b3ec61Sdh155122 		}
1013f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
1014f4b3ec61Sdh155122 	}
1015f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
1016f4b3ec61Sdh155122 	return (NULL);
1017f4b3ec61Sdh155122 }
1018f4b3ec61Sdh155122 
1019f4b3ec61Sdh155122 void
1020f4b3ec61Sdh155122 netstack_rele(netstack_t *ns)
1021f4b3ec61Sdh155122 {
1022f4b3ec61Sdh155122 	netstack_t **nsp;
1023f4b3ec61Sdh155122 	boolean_t found;
1024f4b3ec61Sdh155122 	int refcnt, numzones;
1025*bd41d0a8Snordmark 	int i;
1026f4b3ec61Sdh155122 
1027f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
1028f4b3ec61Sdh155122 	ASSERT(ns->netstack_refcnt > 0);
1029f4b3ec61Sdh155122 	ns->netstack_refcnt--;
1030f4b3ec61Sdh155122 	/*
1031f4b3ec61Sdh155122 	 * As we drop the lock additional netstack_rele()s can come in
1032f4b3ec61Sdh155122 	 * and decrement the refcnt to zero and free the netstack_t.
1033f4b3ec61Sdh155122 	 * Store pointers in local variables and if we were not the last
1034f4b3ec61Sdh155122 	 * then don't reference the netstack_t after that.
1035f4b3ec61Sdh155122 	 */
1036f4b3ec61Sdh155122 	refcnt = ns->netstack_refcnt;
1037f4b3ec61Sdh155122 	numzones = ns->netstack_numzones;
1038f4b3ec61Sdh155122 	DTRACE_PROBE1(netstack__dec__ref, netstack_t *, ns);
1039f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
1040f4b3ec61Sdh155122 
1041f4b3ec61Sdh155122 	if (refcnt == 0 && numzones == 0) {
1042f4b3ec61Sdh155122 		/*
1043f4b3ec61Sdh155122 		 * Time to call the destroy functions and free up
1044f4b3ec61Sdh155122 		 * the structure
1045f4b3ec61Sdh155122 		 */
1046f4b3ec61Sdh155122 		netstack_stack_inactive(ns);
1047f4b3ec61Sdh155122 
104823f4867fSnordmark 		/* Make sure nothing increased the references */
104923f4867fSnordmark 		ASSERT(ns->netstack_refcnt == 0);
105023f4867fSnordmark 		ASSERT(ns->netstack_numzones == 0);
105123f4867fSnordmark 
1052f4b3ec61Sdh155122 		/* Finally remove from list of netstacks */
1053f4b3ec61Sdh155122 		mutex_enter(&netstack_g_lock);
1054f4b3ec61Sdh155122 		found = B_FALSE;
1055f4b3ec61Sdh155122 		for (nsp = &netstack_head; *nsp != NULL;
1056f4b3ec61Sdh155122 		    nsp = &(*nsp)->netstack_next) {
1057f4b3ec61Sdh155122 			if (*nsp == ns) {
1058f4b3ec61Sdh155122 				*nsp = ns->netstack_next;
1059f4b3ec61Sdh155122 				ns->netstack_next = NULL;
1060f4b3ec61Sdh155122 				found = B_TRUE;
1061f4b3ec61Sdh155122 				break;
1062f4b3ec61Sdh155122 			}
1063f4b3ec61Sdh155122 		}
1064f4b3ec61Sdh155122 		ASSERT(found);
1065f4b3ec61Sdh155122 		mutex_exit(&netstack_g_lock);
1066f4b3ec61Sdh155122 
106723f4867fSnordmark 		/* Make sure nothing increased the references */
106823f4867fSnordmark 		ASSERT(ns->netstack_refcnt == 0);
106923f4867fSnordmark 		ASSERT(ns->netstack_numzones == 0);
107023f4867fSnordmark 
1071f4b3ec61Sdh155122 		ASSERT(ns->netstack_flags & NSF_CLOSING);
1072*bd41d0a8Snordmark 
1073*bd41d0a8Snordmark 		for (i = 0; i < NS_MAX; i++) {
1074*bd41d0a8Snordmark 			nm_state_t *nms = &ns->netstack_m_state[i];
1075*bd41d0a8Snordmark 
1076*bd41d0a8Snordmark 			cv_destroy(&nms->nms_cv);
1077*bd41d0a8Snordmark 		}
1078*bd41d0a8Snordmark 		mutex_destroy(&ns->netstack_lock);
1079*bd41d0a8Snordmark 		cv_destroy(&ns->netstack_cv);
1080f4b3ec61Sdh155122 		kmem_free(ns, sizeof (*ns));
1081f4b3ec61Sdh155122 	}
1082f4b3ec61Sdh155122 }
1083f4b3ec61Sdh155122 
1084f4b3ec61Sdh155122 void
1085f4b3ec61Sdh155122 netstack_hold(netstack_t *ns)
1086f4b3ec61Sdh155122 {
1087f4b3ec61Sdh155122 	mutex_enter(&ns->netstack_lock);
1088f4b3ec61Sdh155122 	ns->netstack_refcnt++;
1089f4b3ec61Sdh155122 	ASSERT(ns->netstack_refcnt > 0);
1090f4b3ec61Sdh155122 	mutex_exit(&ns->netstack_lock);
1091f4b3ec61Sdh155122 	DTRACE_PROBE1(netstack__inc__ref, netstack_t *, ns);
1092f4b3ec61Sdh155122 }
1093f4b3ec61Sdh155122 
1094f4b3ec61Sdh155122 /*
1095f4b3ec61Sdh155122  * To support kstat_create_netstack() using kstat_zone_add we need
1096f4b3ec61Sdh155122  * to track both
1097f4b3ec61Sdh155122  *  - all zoneids that use the global/shared stack
1098f4b3ec61Sdh155122  *  - all kstats that have been added for the shared stack
1099f4b3ec61Sdh155122  */
1100f4b3ec61Sdh155122 kstat_t *
1101f4b3ec61Sdh155122 kstat_create_netstack(char *ks_module, int ks_instance, char *ks_name,
1102f4b3ec61Sdh155122     char *ks_class, uchar_t ks_type, uint_t ks_ndata, uchar_t ks_flags,
1103f4b3ec61Sdh155122     netstackid_t ks_netstackid)
1104f4b3ec61Sdh155122 {
1105f4b3ec61Sdh155122 	kstat_t *ks;
1106f4b3ec61Sdh155122 
1107f4b3ec61Sdh155122 	if (ks_netstackid == GLOBAL_NETSTACKID) {
1108f4b3ec61Sdh155122 		ks = kstat_create_zone(ks_module, ks_instance, ks_name,
1109f4b3ec61Sdh155122 		    ks_class, ks_type, ks_ndata, ks_flags, GLOBAL_ZONEID);
1110f4b3ec61Sdh155122 		if (ks != NULL)
1111f4b3ec61Sdh155122 			netstack_shared_kstat_add(ks);
1112f4b3ec61Sdh155122 		return (ks);
1113f4b3ec61Sdh155122 	} else {
1114f4b3ec61Sdh155122 		zoneid_t zoneid = ks_netstackid;
1115f4b3ec61Sdh155122 
1116f4b3ec61Sdh155122 		return (kstat_create_zone(ks_module, ks_instance, ks_name,
1117f4b3ec61Sdh155122 		    ks_class, ks_type, ks_ndata, ks_flags, zoneid));
1118f4b3ec61Sdh155122 	}
1119f4b3ec61Sdh155122 }
1120f4b3ec61Sdh155122 
1121f4b3ec61Sdh155122 void
1122f4b3ec61Sdh155122 kstat_delete_netstack(kstat_t *ks, netstackid_t ks_netstackid)
1123f4b3ec61Sdh155122 {
1124f4b3ec61Sdh155122 	if (ks_netstackid == GLOBAL_NETSTACKID) {
1125f4b3ec61Sdh155122 		netstack_shared_kstat_remove(ks);
1126f4b3ec61Sdh155122 	}
1127f4b3ec61Sdh155122 	kstat_delete(ks);
1128f4b3ec61Sdh155122 }
1129f4b3ec61Sdh155122 
1130f4b3ec61Sdh155122 static void
1131f4b3ec61Sdh155122 netstack_shared_zone_add(zoneid_t zoneid)
1132f4b3ec61Sdh155122 {
1133f4b3ec61Sdh155122 	struct shared_zone_list *sz;
1134f4b3ec61Sdh155122 	struct shared_kstat_list *sk;
1135f4b3ec61Sdh155122 
1136f4b3ec61Sdh155122 	sz = (struct shared_zone_list *)kmem_zalloc(sizeof (*sz), KM_SLEEP);
1137f4b3ec61Sdh155122 	sz->sz_zoneid = zoneid;
1138f4b3ec61Sdh155122 
1139f4b3ec61Sdh155122 	/* Insert in list */
1140f4b3ec61Sdh155122 	mutex_enter(&netstack_shared_lock);
1141f4b3ec61Sdh155122 	sz->sz_next = netstack_shared_zones;
1142f4b3ec61Sdh155122 	netstack_shared_zones = sz;
1143f4b3ec61Sdh155122 
1144f4b3ec61Sdh155122 	/*
1145f4b3ec61Sdh155122 	 * Perform kstat_zone_add for each existing shared stack kstat.
1146f4b3ec61Sdh155122 	 * Note: Holds netstack_shared_lock lock across kstat_zone_add.
1147f4b3ec61Sdh155122 	 */
1148f4b3ec61Sdh155122 	for (sk = netstack_shared_kstats; sk != NULL; sk = sk->sk_next) {
1149f4b3ec61Sdh155122 		kstat_zone_add(sk->sk_kstat, zoneid);
1150f4b3ec61Sdh155122 	}
1151f4b3ec61Sdh155122 	mutex_exit(&netstack_shared_lock);
1152f4b3ec61Sdh155122 }
1153f4b3ec61Sdh155122 
1154f4b3ec61Sdh155122 static void
1155f4b3ec61Sdh155122 netstack_shared_zone_remove(zoneid_t zoneid)
1156f4b3ec61Sdh155122 {
1157f4b3ec61Sdh155122 	struct shared_zone_list **szp, *sz;
1158f4b3ec61Sdh155122 	struct shared_kstat_list *sk;
1159f4b3ec61Sdh155122 
1160f4b3ec61Sdh155122 	/* Find in list */
1161f4b3ec61Sdh155122 	mutex_enter(&netstack_shared_lock);
1162f4b3ec61Sdh155122 	sz = NULL;
1163f4b3ec61Sdh155122 	for (szp = &netstack_shared_zones; *szp != NULL;
1164f4b3ec61Sdh155122 	    szp = &((*szp)->sz_next)) {
1165f4b3ec61Sdh155122 		if ((*szp)->sz_zoneid == zoneid) {
1166f4b3ec61Sdh155122 			sz = *szp;
1167f4b3ec61Sdh155122 			break;
1168f4b3ec61Sdh155122 		}
1169f4b3ec61Sdh155122 	}
1170f4b3ec61Sdh155122 	/* We must find it */
1171f4b3ec61Sdh155122 	ASSERT(sz != NULL);
1172f4b3ec61Sdh155122 	*szp = sz->sz_next;
1173f4b3ec61Sdh155122 	sz->sz_next = NULL;
1174f4b3ec61Sdh155122 
1175f4b3ec61Sdh155122 	/*
1176f4b3ec61Sdh155122 	 * Perform kstat_zone_remove for each existing shared stack kstat.
1177f4b3ec61Sdh155122 	 * Note: Holds netstack_shared_lock lock across kstat_zone_remove.
1178f4b3ec61Sdh155122 	 */
1179f4b3ec61Sdh155122 	for (sk = netstack_shared_kstats; sk != NULL; sk = sk->sk_next) {
1180f4b3ec61Sdh155122 		kstat_zone_remove(sk->sk_kstat, zoneid);
1181f4b3ec61Sdh155122 	}
1182f4b3ec61Sdh155122 	mutex_exit(&netstack_shared_lock);
1183f4b3ec61Sdh155122 
1184f4b3ec61Sdh155122 	kmem_free(sz, sizeof (*sz));
1185f4b3ec61Sdh155122 }
1186f4b3ec61Sdh155122 
1187f4b3ec61Sdh155122 static void
1188f4b3ec61Sdh155122 netstack_shared_kstat_add(kstat_t *ks)
1189f4b3ec61Sdh155122 {
1190f4b3ec61Sdh155122 	struct shared_zone_list *sz;
1191f4b3ec61Sdh155122 	struct shared_kstat_list *sk;
1192f4b3ec61Sdh155122 
1193f4b3ec61Sdh155122 	sk = (struct shared_kstat_list *)kmem_zalloc(sizeof (*sk), KM_SLEEP);
1194f4b3ec61Sdh155122 	sk->sk_kstat = ks;
1195f4b3ec61Sdh155122 
1196f4b3ec61Sdh155122 	/* Insert in list */
1197f4b3ec61Sdh155122 	mutex_enter(&netstack_shared_lock);
1198f4b3ec61Sdh155122 	sk->sk_next = netstack_shared_kstats;
1199f4b3ec61Sdh155122 	netstack_shared_kstats = sk;
1200f4b3ec61Sdh155122 
1201f4b3ec61Sdh155122 	/*
1202f4b3ec61Sdh155122 	 * Perform kstat_zone_add for each existing shared stack zone.
1203f4b3ec61Sdh155122 	 * Note: Holds netstack_shared_lock lock across kstat_zone_add.
1204f4b3ec61Sdh155122 	 */
1205f4b3ec61Sdh155122 	for (sz = netstack_shared_zones; sz != NULL; sz = sz->sz_next) {
1206f4b3ec61Sdh155122 		kstat_zone_add(ks, sz->sz_zoneid);
1207f4b3ec61Sdh155122 	}
1208f4b3ec61Sdh155122 	mutex_exit(&netstack_shared_lock);
1209f4b3ec61Sdh155122 }
1210f4b3ec61Sdh155122 
1211f4b3ec61Sdh155122 static void
1212f4b3ec61Sdh155122 netstack_shared_kstat_remove(kstat_t *ks)
1213f4b3ec61Sdh155122 {
1214f4b3ec61Sdh155122 	struct shared_zone_list *sz;
1215f4b3ec61Sdh155122 	struct shared_kstat_list **skp, *sk;
1216f4b3ec61Sdh155122 
1217f4b3ec61Sdh155122 	/* Find in list */
1218f4b3ec61Sdh155122 	mutex_enter(&netstack_shared_lock);
1219f4b3ec61Sdh155122 	sk = NULL;
1220f4b3ec61Sdh155122 	for (skp = &netstack_shared_kstats; *skp != NULL;
1221f4b3ec61Sdh155122 	    skp = &((*skp)->sk_next)) {
1222f4b3ec61Sdh155122 		if ((*skp)->sk_kstat == ks) {
1223f4b3ec61Sdh155122 			sk = *skp;
1224f4b3ec61Sdh155122 			break;
1225f4b3ec61Sdh155122 		}
1226f4b3ec61Sdh155122 	}
1227f4b3ec61Sdh155122 	/* Must find it */
1228f4b3ec61Sdh155122 	ASSERT(sk != NULL);
1229f4b3ec61Sdh155122 	*skp = sk->sk_next;
1230f4b3ec61Sdh155122 	sk->sk_next = NULL;
1231f4b3ec61Sdh155122 
1232f4b3ec61Sdh155122 	/*
1233f4b3ec61Sdh155122 	 * Perform kstat_zone_remove for each existing shared stack kstat.
1234f4b3ec61Sdh155122 	 * Note: Holds netstack_shared_lock lock across kstat_zone_remove.
1235f4b3ec61Sdh155122 	 */
1236f4b3ec61Sdh155122 	for (sz = netstack_shared_zones; sz != NULL; sz = sz->sz_next) {
1237f4b3ec61Sdh155122 		kstat_zone_remove(ks, sz->sz_zoneid);
1238f4b3ec61Sdh155122 	}
1239f4b3ec61Sdh155122 	mutex_exit(&netstack_shared_lock);
1240f4b3ec61Sdh155122 	kmem_free(sk, sizeof (*sk));
1241f4b3ec61Sdh155122 }
1242f4b3ec61Sdh155122 
1243f4b3ec61Sdh155122 /*
1244f4b3ec61Sdh155122  * If a zoneid is part of the shared zone, return true
1245f4b3ec61Sdh155122  */
1246f4b3ec61Sdh155122 static boolean_t
1247f4b3ec61Sdh155122 netstack_find_shared_zoneid(zoneid_t zoneid)
1248f4b3ec61Sdh155122 {
1249f4b3ec61Sdh155122 	struct shared_zone_list *sz;
1250f4b3ec61Sdh155122 
1251f4b3ec61Sdh155122 	mutex_enter(&netstack_shared_lock);
1252f4b3ec61Sdh155122 	for (sz = netstack_shared_zones; sz != NULL; sz = sz->sz_next) {
1253f4b3ec61Sdh155122 		if (sz->sz_zoneid == zoneid) {
1254f4b3ec61Sdh155122 			mutex_exit(&netstack_shared_lock);
1255f4b3ec61Sdh155122 			return (B_TRUE);
1256f4b3ec61Sdh155122 		}
1257f4b3ec61Sdh155122 	}
1258f4b3ec61Sdh155122 	mutex_exit(&netstack_shared_lock);
1259f4b3ec61Sdh155122 	return (B_FALSE);
1260f4b3ec61Sdh155122 }
1261f4b3ec61Sdh155122 
1262f4b3ec61Sdh155122 /*
1263f4b3ec61Sdh155122  * Hide the fact that zoneids and netstackids are allocated from
1264f4b3ec61Sdh155122  * the same space in the current implementation.
1265*bd41d0a8Snordmark  * We currently do not check that the stackid/zoneids are valid, since there
1266*bd41d0a8Snordmark  * is no need for that. But this should only be done for ids that are
1267*bd41d0a8Snordmark  * valid.
1268f4b3ec61Sdh155122  */
1269f4b3ec61Sdh155122 zoneid_t
1270f4b3ec61Sdh155122 netstackid_to_zoneid(netstackid_t stackid)
1271f4b3ec61Sdh155122 {
1272f4b3ec61Sdh155122 	return (stackid);
1273f4b3ec61Sdh155122 }
1274f4b3ec61Sdh155122 
1275f4b3ec61Sdh155122 netstackid_t
1276f4b3ec61Sdh155122 zoneid_to_netstackid(zoneid_t zoneid)
1277f4b3ec61Sdh155122 {
1278f4b3ec61Sdh155122 	if (netstack_find_shared_zoneid(zoneid))
1279f4b3ec61Sdh155122 		return (GLOBAL_ZONEID);
1280f4b3ec61Sdh155122 	else
1281f4b3ec61Sdh155122 		return (zoneid);
1282f4b3ec61Sdh155122 }
1283f4b3ec61Sdh155122 
1284f4b3ec61Sdh155122 /*
1285f4b3ec61Sdh155122  * Simplistic support for walking all the handles.
1286f4b3ec61Sdh155122  * Example usage:
1287f4b3ec61Sdh155122  *	netstack_handle_t nh;
1288f4b3ec61Sdh155122  *	netstack_t *ns;
1289f4b3ec61Sdh155122  *
1290f4b3ec61Sdh155122  *	netstack_next_init(&nh);
1291f4b3ec61Sdh155122  *	while ((ns = netstack_next(&nh)) != NULL) {
1292f4b3ec61Sdh155122  *		do something;
1293f4b3ec61Sdh155122  *		netstack_rele(ns);
1294f4b3ec61Sdh155122  *	}
1295f4b3ec61Sdh155122  *	netstack_next_fini(&nh);
1296f4b3ec61Sdh155122  */
1297f4b3ec61Sdh155122 void
1298f4b3ec61Sdh155122 netstack_next_init(netstack_handle_t *handle)
1299f4b3ec61Sdh155122 {
1300f4b3ec61Sdh155122 	*handle = 0;
1301f4b3ec61Sdh155122 }
1302f4b3ec61Sdh155122 
1303f4b3ec61Sdh155122 /* ARGSUSED */
1304f4b3ec61Sdh155122 void
1305f4b3ec61Sdh155122 netstack_next_fini(netstack_handle_t *handle)
1306f4b3ec61Sdh155122 {
1307f4b3ec61Sdh155122 }
1308f4b3ec61Sdh155122 
1309f4b3ec61Sdh155122 netstack_t *
1310f4b3ec61Sdh155122 netstack_next(netstack_handle_t *handle)
1311f4b3ec61Sdh155122 {
1312f4b3ec61Sdh155122 	netstack_t *ns;
1313f4b3ec61Sdh155122 	int i, end;
1314f4b3ec61Sdh155122 
1315f4b3ec61Sdh155122 	end = *handle;
1316f4b3ec61Sdh155122 	/* Walk skipping *handle number of instances */
1317f4b3ec61Sdh155122 
1318f4b3ec61Sdh155122 	/* Look if there is a matching stack instance */
1319f4b3ec61Sdh155122 	mutex_enter(&netstack_g_lock);
1320f4b3ec61Sdh155122 	ns = netstack_head;
1321f4b3ec61Sdh155122 	for (i = 0; i < end; i++) {
1322f4b3ec61Sdh155122 		if (ns == NULL)
1323f4b3ec61Sdh155122 			break;
1324f4b3ec61Sdh155122 		ns = ns->netstack_next;
1325f4b3ec61Sdh155122 	}
1326f4b3ec61Sdh155122 	/* skip those with that aren't really here */
1327f4b3ec61Sdh155122 	while (ns != NULL) {
1328f4b3ec61Sdh155122 		mutex_enter(&ns->netstack_lock);
1329f4b3ec61Sdh155122 		if ((ns->netstack_flags & (NSF_UNINIT|NSF_CLOSING)) == 0) {
1330f4b3ec61Sdh155122 			mutex_exit(&ns->netstack_lock);
1331f4b3ec61Sdh155122 			break;
1332f4b3ec61Sdh155122 		}
1333f4b3ec61Sdh155122 		mutex_exit(&ns->netstack_lock);
1334f4b3ec61Sdh155122 		end++;
1335f4b3ec61Sdh155122 		ns = ns->netstack_next;
1336f4b3ec61Sdh155122 	}
1337f4b3ec61Sdh155122 	if (ns != NULL) {
1338f4b3ec61Sdh155122 		*handle = end + 1;
1339f4b3ec61Sdh155122 		netstack_hold(ns);
1340f4b3ec61Sdh155122 	}
1341f4b3ec61Sdh155122 	mutex_exit(&netstack_g_lock);
1342f4b3ec61Sdh155122 	return (ns);
1343f4b3ec61Sdh155122 }
1344