xref: /titanic_41/usr/src/uts/common/io/neti_stack.c (revision 342440ec94087b8c751c580ab9ed6c693d31d418)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/param.h>
27 #include <sys/atomic.h>
28 #include <sys/kmem.h>
29 #include <sys/rwlock.h>
30 #include <sys/errno.h>
31 #include <sys/queue.h>
32 #include <sys/sunddi.h>
33 #include <inet/common.h>
34 #include <inet/led.h>
35 #include <inet/ip.h>
36 #include <sys/neti.h>
37 #include <sys/zone.h>
38 #include <sys/sdt.h>
39 
40 
41 typedef boolean_t napplyfn_t(kmutex_t *, neti_stack_t *, void *);
42 
43 static void *neti_stack_init(netstackid_t stackid, netstack_t *ns);
44 static void neti_stack_fini(netstackid_t stackid, void *arg);
45 static net_instance_int_t *net_instance_int_create(net_instance_t *nin,
46     net_instance_int_t *parent);
47 static void neti_stack_shutdown(netstackid_t stackid, void *arg);
48 static void net_instance_int_free(net_instance_int_t *nini);
49 
50 static boolean_t neti_stack_apply_create(kmutex_t *, neti_stack_t *, void *);
51 static boolean_t neti_stack_apply_destroy(kmutex_t *, neti_stack_t *, void *);
52 static boolean_t neti_stack_apply_shutdown(kmutex_t *, neti_stack_t *, void *);
53 static void neti_apply_all_instances(neti_stack_t *, napplyfn_t *);
54 static void neti_apply_all_stacks(void *, napplyfn_t *);
55 static boolean_t wait_for_nini_inprogress(neti_stack_t *, kmutex_t *,
56     net_instance_int_t *, uint32_t);
57 
58 static nini_head_t neti_instance_list;
59 static neti_stack_head_t neti_stack_list;
60 static kmutex_t neti_stack_lock;
61 
62 void
63 neti_init()
64 {
65 	mutex_init(&neti_stack_lock, NULL, MUTEX_DRIVER, NULL);
66 
67 	LIST_INIT(&neti_instance_list);
68 	LIST_INIT(&neti_stack_list);
69 	/*
70 	 * We want to be informed each time a netstack is created or
71 	 * destroyed in the kernel.
72 	 */
73 	netstack_register(NS_NETI, neti_stack_init, neti_stack_shutdown,
74 	    neti_stack_fini);
75 }
76 
77 void
78 neti_fini()
79 {
80 	ASSERT(LIST_EMPTY(&neti_instance_list));
81 	ASSERT(LIST_EMPTY(&neti_stack_list));
82 
83 	netstack_unregister(NS_NETI);
84 
85 	mutex_destroy(&neti_stack_lock);
86 }
87 
88 /*
89  * Initialize the neti stack instance.  Because this is called out of the
90  * netstack framework, it is not possible for it to be called twice with
91  * the same values for (stackid,ns).  The same also applies to the other
92  * two functions used with netstack_register: neti_stack_shutdown and
93  * neti_stack_fini.
94  */
95 static void *
96 neti_stack_init(netstackid_t stackid, netstack_t *ns)
97 {
98 	net_instance_int_t *dup;
99 	net_instance_int_t *n;
100 	neti_stack_t *nts;
101 
102 	nts = kmem_zalloc(sizeof (*nts), KM_SLEEP);
103 	LIST_INIT(&nts->nts_instances);
104 	nts->nts_id = (netid_t)stackid;
105 	nts->nts_stackid = stackid;
106 	nts->nts_netstack = ns;
107 	nts->nts_zoneid = netstackid_to_zoneid(stackid);
108 	nts->nts_flags = NSF_ZONE_CREATE;
109 	cv_init(&nts->nts_cv, NULL, CV_DRIVER, NULL);
110 	mutex_init(&nts->nts_lock, NULL, MUTEX_DRIVER, NULL);
111 
112 	mutex_enter(&neti_stack_lock);
113 	LIST_INSERT_HEAD(&neti_stack_list, nts, nts_next);
114 
115 	LIST_FOREACH(n, &neti_instance_list, nini_next) {
116 		/*
117 		 * This function returns with the NSS_CREATE_NEEDED flag
118 		 * set in "dup", so it is adequately prepared for the
119 		 * upcoming apply.
120 		 */
121 		dup = net_instance_int_create(n->nini_instance, n);
122 
123 		mutex_enter(&nts->nts_lock);
124 		LIST_INSERT_HEAD(&nts->nts_instances, dup, nini_next);
125 		mutex_exit(&nts->nts_lock);
126 	}
127 
128 	neti_apply_all_instances(nts, neti_stack_apply_create);
129 
130 	mutex_enter(&nts->nts_lock);
131 	nts->nts_flags &= ~NSF_ZONE_CREATE;
132 	cv_signal(&nts->nts_cv);
133 	mutex_exit(&nts->nts_lock);
134 
135 	mutex_exit(&neti_stack_lock);
136 
137 	return (nts);
138 }
139 
140 /*
141  * Run the shutdown for all of the hooks.
142  */
143 /*ARGSUSED*/
144 static void
145 neti_stack_shutdown(netstackid_t stackid, void *arg)
146 {
147 	neti_stack_t *nts = arg;
148 	net_instance_int_t *n;
149 	struct net_data *nd;
150 
151 	ASSERT(nts != NULL);
152 
153 	mutex_enter(&neti_stack_lock);
154 	mutex_enter(&nts->nts_lock);
155 	/*
156 	 * Walk through all of the protocol stacks and mark them as shutting
157 	 * down.
158 	 */
159 	LIST_FOREACH(nd, &nts->nts_netd_head, netd_list) {
160 		nd->netd_condemned = 1;
161 	}
162 
163 	/*
164 	 * Now proceed to see which callbacks are waiting to hear about the
165 	 * impending shutdown...
166 	 */
167 	LIST_FOREACH(n, &nts->nts_instances, nini_next) {
168 		if (n->nini_instance->nin_shutdown == NULL) {
169 			/*
170 			 * If there is no shutdown function registered,
171 			 * fake that we have completed it.
172 			 */
173 			n->nini_flags |= NSS_SHUTDOWN_COMPLETED;
174 			continue;
175 		}
176 
177 		/*
178 		 * We need to ensure that we don't try and shutdown something
179 		 * that is already in the process of being shutdown or
180 		 * destroyed. If it is still being created, that's ok, the
181 		 * shtudown flag is added to the mix of things to do.
182 		 */
183 		if ((n->nini_flags & (NSS_DESTROY_ALL|NSS_SHUTDOWN_ALL)) == 0)
184 			n->nini_flags |= NSS_SHUTDOWN_NEEDED;
185 	}
186 	nts->nts_flags |= NSF_ZONE_SHUTDOWN;
187 	mutex_exit(&nts->nts_lock);
188 
189 	neti_apply_all_instances(nts, neti_stack_apply_shutdown);
190 
191 	mutex_enter(&nts->nts_lock);
192 
193 	nts->nts_netstack = NULL;
194 	mutex_exit(&nts->nts_lock);
195 
196 	mutex_exit(&neti_stack_lock);
197 	ASSERT(nts != NULL);
198 }
199 
200 /*
201  * Free the neti stack instance.
202  * This function relies on the netstack framework only calling the _destroy
203  * callback once for each stackid.  The netstack framework also provides us
204  * with assurance that nobody else will be doing any work (_create, _shutdown)
205  * on it, so there is no need to set and use flags to guard against
206  * simultaneous execution (ie. no need to set NSF_CLOSING.)
207  *  What is required, however, is to make sure that we don't corrupt the
208  * list of neti_stack_t's for other code that walks it.
209  */
210 /*ARGSUSED*/
211 static void
212 neti_stack_fini(netstackid_t stackid, void *arg)
213 {
214 	neti_stack_t *nts = arg;
215 	net_instance_int_t *n;
216 	struct net_data *nd;
217 
218 	mutex_enter(&neti_stack_lock);
219 	LIST_REMOVE(nts, nts_next);
220 
221 	mutex_enter(&nts->nts_lock);
222 	nts->nts_flags |= NSF_ZONE_DESTROY;
223 	/*
224 	 * Walk through all of the protocol stacks and mark them as being
225 	 * destroyed.
226 	 */
227 	LIST_FOREACH(nd, &nts->nts_netd_head, netd_list) {
228 		nd->netd_condemned = 2;
229 	}
230 
231 	LIST_FOREACH(n, &nts->nts_instances, nini_next) {
232 		ASSERT((n->nini_flags & NSS_SHUTDOWN_ALL) != 0);
233 		if (n->nini_instance->nin_shutdown == NULL)
234 			continue;
235 		if ((n->nini_flags & NSS_DESTROY_ALL) == 0)
236 			n->nini_flags |= NSS_DESTROY_NEEDED;
237 	}
238 	mutex_exit(&nts->nts_lock);
239 
240 	neti_apply_all_instances(nts, neti_stack_apply_destroy);
241 	mutex_exit(&neti_stack_lock);
242 
243 	while (!LIST_EMPTY(&nts->nts_instances)) {
244 		n = LIST_FIRST(&nts->nts_instances);
245 		LIST_REMOVE(n, nini_next);
246 
247 		net_instance_int_free(n);
248 	}
249 
250 	ASSERT(LIST_EMPTY(&nts->nts_netd_head));
251 
252 	mutex_destroy(&nts->nts_lock);
253 	cv_destroy(&nts->nts_cv);
254 
255 	kmem_free(nts, sizeof (*nts));
256 }
257 
258 static net_instance_int_t *
259 net_instance_int_create(net_instance_t *nin, net_instance_int_t *parent)
260 {
261 	net_instance_int_t *nini;
262 
263 	nini = kmem_zalloc(sizeof (net_instance_int_t), KM_SLEEP);
264 	nini->nini_instance = nin;
265 	nini->nini_parent = parent;
266 	if (parent != NULL) {
267 		/*
268 		 * If the parent pointer is non-NULL then we take that as
269 		 * an indication that the net_instance_int_t is being
270 		 * created for an active instance and there will expect
271 		 * the create function to be called.  In contrast, if
272 		 * parent is NULL then this code assumes the object is
273 		 * being prepared for insertion onto the master list of
274 		 * callbacks to be called when an instance is created, etc.
275 		 */
276 		parent->nini_ref++;
277 		nini->nini_flags |= NSS_CREATE_NEEDED;
278 	}
279 
280 	cv_init(&nini->nini_cv, NULL, CV_DRIVER, NULL);
281 
282 	return (nini);
283 }
284 
285 static void
286 net_instance_int_free(net_instance_int_t *nini)
287 {
288 
289 	cv_destroy(&nini->nini_cv);
290 
291 	if (nini->nini_parent != NULL)
292 		nini->nini_parent->nini_ref--;
293 
294 	ASSERT(nini->nini_ref == 0);
295 	kmem_free(nini, sizeof (*nini));
296 }
297 
298 net_instance_t *
299 net_instance_alloc(const int version)
300 {
301 	net_instance_t *nin;
302 
303 	if (version != NETINFO_VERSION)
304 		return (NULL);
305 
306 	nin = kmem_zalloc(sizeof (net_instance_t), KM_SLEEP);
307 	nin->nin_version = version;
308 
309 	return (nin);
310 }
311 
312 void
313 net_instance_free(net_instance_t *nin)
314 {
315 	kmem_free(nin, sizeof (*nin));
316 }
317 
318 int
319 net_instance_register(net_instance_t *nin)
320 {
321 	net_instance_int_t *parent;
322 	net_instance_int_t *tmp;
323 	neti_stack_t *nts;
324 
325 	ASSERT(nin->nin_name != NULL);
326 
327 	if (nin->nin_create == NULL || nin->nin_destroy == NULL)
328 		return (DDI_FAILURE);
329 
330 	mutex_enter(&neti_stack_lock);
331 	/*
332 	 * Search for duplicate, either on the global list or on any
333 	 * of the known instances.
334 	 */
335 	LIST_FOREACH(tmp, &neti_instance_list, nini_next) {
336 		if (strcmp(nin->nin_name, tmp->nini_instance->nin_name) == 0) {
337 			mutex_exit(&neti_stack_lock);
338 			return (DDI_FAILURE);
339 		}
340 	}
341 
342 	/*
343 	 * Now insert and activate.
344 	 */
345 	parent = net_instance_int_create(nin, NULL);
346 	ASSERT(parent != NULL);
347 	LIST_INSERT_HEAD(&neti_instance_list, parent, nini_next);
348 
349 	LIST_FOREACH(nts, &neti_stack_list, nts_next) {
350 		mutex_enter(&nts->nts_lock);
351 		/*
352 		 * If shutdown of the zone has begun then do not add a new
353 		 * instance of the object being registered.
354 		 */
355 		if ((nts->nts_flags & NSF_ZONE_SHUTDOWN) ||
356 		    (nts->nts_netstack == NULL)) {
357 			mutex_exit(&nts->nts_lock);
358 			continue;
359 		}
360 		/*
361 		 * This function returns with the NSS_CREATE_NEEDED flag
362 		 * set in "dup", so it is adequately prepared for the
363 		 * upcoming apply.
364 		 */
365 		tmp = net_instance_int_create(nin, parent);
366 		ASSERT(tmp != NULL);
367 		LIST_INSERT_HEAD(&nts->nts_instances, tmp, nini_next);
368 		mutex_exit(&nts->nts_lock);
369 
370 	}
371 
372 	neti_apply_all_stacks(parent, neti_stack_apply_create);
373 	mutex_exit(&neti_stack_lock);
374 
375 	return (DDI_SUCCESS);
376 }
377 
378 /*
379  * While net_instance_register() isn't likely to be racing against itself,
380  * net_instance_unregister() can be entered from various directions that
381  * can compete: shutdown of a zone, unloading of a module (and it calling
382  * _unregister() as part of that) and the module doing an _unregister()
383  * anyway.
384  */
385 int
386 net_instance_unregister(net_instance_t *nin)
387 {
388 	net_instance_int_t *parent;
389 	net_instance_int_t *tmp;
390 	neti_stack_t *nts;
391 
392 	mutex_enter(&neti_stack_lock);
393 
394 	LIST_FOREACH(tmp, &neti_instance_list, nini_next) {
395 		if (strcmp(tmp->nini_instance->nin_name, nin->nin_name) == 0) {
396 			LIST_REMOVE(tmp, nini_next);
397 			break;
398 		}
399 	}
400 
401 	if (tmp == NULL) {
402 		mutex_exit(&neti_stack_lock);
403 		return (DDI_FAILURE);
404 	}
405 	parent = tmp;
406 
407 	LIST_FOREACH(nts, &neti_stack_list, nts_next) {
408 		mutex_enter(&nts->nts_lock);
409 		LIST_FOREACH(tmp, &nts->nts_instances, nini_next) {
410 			if (tmp->nini_parent != parent)
411 				continue;
412 			/*
413 			 * Netstack difference:
414 			 * In netstack.c, there is a check for
415 			 * NSS_CREATE_COMPLETED before setting the other
416 			 * _NEEDED flags.  If we consider that a list
417 			 * member must always have at least the _CREATE_NEEDED
418 			 * flag set and that wait_for_nini_inprogress will
419 			 * also wait for that flag to be cleared in both of
420 			 * the shutdown and destroy apply functions.
421 			 *
422 			 * It is possible to optimize out the case where
423 			 * all three _NEEDED flags are set to being able
424 			 * to pretend everything has been done and just
425 			 * set all three _COMPLETE flags.  This makes a
426 			 * special case that we then need to consider in
427 			 * other locations, so for the sake of simplicity,
428 			 * we leave it as it is.
429 			 */
430 			if ((tmp->nini_flags & NSS_SHUTDOWN_ALL) == 0)
431 				tmp->nini_flags |= NSS_SHUTDOWN_NEEDED;
432 			if ((tmp->nini_flags & NSS_DESTROY_ALL) == 0)
433 				tmp->nini_flags |= NSS_DESTROY_NEEDED;
434 		}
435 		mutex_exit(&nts->nts_lock);
436 	}
437 
438 	/*
439 	 * Each of these functions ensures that the requisite _COMPLETED
440 	 * flag is present before calling the apply function. So we are
441 	 * guaranteed to have NSS_CREATE_COMPLETED|NSS_SHUTDOWN_COMPLETED
442 	 * both set after the first call here and when the second completes,
443 	 * NSS_DESTROY_COMPLETED is also set.
444 	 */
445 	neti_apply_all_stacks(parent, neti_stack_apply_shutdown);
446 	neti_apply_all_stacks(parent, neti_stack_apply_destroy);
447 
448 	/*
449 	 * Remove the instance callback information from each stack.
450 	 */
451 	LIST_FOREACH(nts, &neti_stack_list, nts_next) {
452 		mutex_enter(&nts->nts_lock);
453 		LIST_FOREACH(tmp, &nts->nts_instances, nini_next) {
454 			if ((tmp->nini_parent == parent) &&
455 			    (tmp->nini_flags & NSS_SHUTDOWN_COMPLETED) &&
456 			    (tmp->nini_flags & NSS_DESTROY_COMPLETED)) {
457 				/*
458 				 * There should only be one entry that has a
459 				 * matching nini_parent so there is no need to
460 				 * worry about continuing a loop where we are
461 				 * free'ing the structure holding the 'next'
462 				 * pointer.
463 				 */
464 				LIST_REMOVE(tmp, nini_next);
465 				net_instance_int_free(tmp);
466 				break;
467 			}
468 		}
469 		mutex_exit(&nts->nts_lock);
470 	}
471 	mutex_exit(&neti_stack_lock);
472 
473 	net_instance_int_free(parent);
474 
475 	return (DDI_SUCCESS);
476 }
477 
478 static void
479 neti_apply_all_instances(neti_stack_t *nts, napplyfn_t *applyfn)
480 {
481 	net_instance_int_t *n;
482 
483 	ASSERT(mutex_owned(&neti_stack_lock));
484 
485 	n = LIST_FIRST(&nts->nts_instances);
486 	while (n != NULL) {
487 		if ((applyfn)(&neti_stack_lock, nts, n->nini_parent)) {
488 			/* Lock dropped - restart at head */
489 			n = LIST_FIRST(&nts->nts_instances);
490 		} else {
491 			n = LIST_NEXT(n, nini_next);
492 		}
493 	}
494 }
495 
496 static void
497 neti_apply_all_stacks(void *parent, napplyfn_t *applyfn)
498 {
499 	neti_stack_t *nts;
500 
501 	ASSERT(mutex_owned(&neti_stack_lock));
502 
503 	nts = LIST_FIRST(&neti_stack_list);
504 	while (nts != NULL) {
505 		/*
506 		 * This function differs, in that it doesn't have a call to
507 		 * a "wait_creator" call, from the zsd/netstack code.  The
508 		 * waiting is pushed into the apply functions which cause
509 		 * the waiting to be done in wait_for_nini_progress with
510 		 * the passing in of cmask.
511 		 */
512 		if ((applyfn)(&neti_stack_lock, nts, parent)) {
513 			/* Lock dropped - restart at head */
514 			nts = LIST_FIRST(&neti_stack_list);
515 		} else {
516 			nts = LIST_NEXT(nts, nts_next);
517 		}
518 	}
519 }
520 
521 static boolean_t
522 neti_stack_apply_create(kmutex_t *lockp, neti_stack_t *nts, void *parent)
523 {
524 	void *result;
525 	boolean_t dropped = B_FALSE;
526 	net_instance_int_t *tmp;
527 	net_instance_t *nin;
528 
529 	ASSERT(parent != NULL);
530 	ASSERT(lockp != NULL);
531 	ASSERT(mutex_owned(lockp));
532 
533 	mutex_enter(&nts->nts_lock);
534 
535 	LIST_FOREACH(tmp, &nts->nts_instances, nini_next) {
536 		if (tmp->nini_parent == parent)
537 			break;
538 	}
539 	if (tmp == NULL) {
540 		mutex_exit(&nts->nts_lock);
541 		return (dropped);
542 	}
543 
544 	if (wait_for_nini_inprogress(nts, lockp, tmp, 0))
545 		dropped = B_TRUE;
546 
547 	if (tmp->nini_flags & NSS_CREATE_NEEDED) {
548 		nin = tmp->nini_instance;
549 		tmp->nini_flags &= ~NSS_CREATE_NEEDED;
550 		tmp->nini_flags |= NSS_CREATE_INPROGRESS;
551 		DTRACE_PROBE2(neti__stack__create__inprogress,
552 		    neti_stack_t *, nts, net_instance_int_t *, tmp);
553 		mutex_exit(&nts->nts_lock);
554 		mutex_exit(lockp);
555 		dropped = B_TRUE;
556 
557 		ASSERT(tmp->nini_created == NULL);
558 		ASSERT(nin->nin_create != NULL);
559 		DTRACE_PROBE2(neti__stack__create__start,
560 		    netstackid_t, nts->nts_id,
561 		    neti_stack_t *, nts);
562 		result = (nin->nin_create)(nts->nts_id);
563 		DTRACE_PROBE2(neti__stack__create__end,
564 		    void *, result, neti_stack_t *, nts);
565 
566 		ASSERT(result != NULL);
567 		mutex_enter(lockp);
568 		mutex_enter(&nts->nts_lock);
569 		tmp->nini_created = result;
570 		tmp->nini_flags &= ~NSS_CREATE_INPROGRESS;
571 		tmp->nini_flags |= NSS_CREATE_COMPLETED;
572 		cv_broadcast(&tmp->nini_cv);
573 		DTRACE_PROBE2(neti__stack__create__completed,
574 		    neti_stack_t *, nts, net_instance_int_t *, tmp);
575 	}
576 	mutex_exit(&nts->nts_lock);
577 	return (dropped);
578 }
579 
580 
581 static boolean_t
582 neti_stack_apply_shutdown(kmutex_t *lockp, neti_stack_t *nts, void *parent)
583 {
584 	boolean_t dropped = B_FALSE;
585 	net_instance_int_t *tmp;
586 	net_instance_t *nin;
587 
588 	ASSERT(parent != NULL);
589 	ASSERT(lockp != NULL);
590 	ASSERT(mutex_owned(lockp));
591 
592 	mutex_enter(&nts->nts_lock);
593 
594 	LIST_FOREACH(tmp, &nts->nts_instances, nini_next) {
595 		if (tmp->nini_parent == parent)
596 			break;
597 	}
598 	if (tmp == NULL) {
599 		mutex_exit(&nts->nts_lock);
600 		return (dropped);
601 	}
602 
603 	if (wait_for_nini_inprogress(nts, lockp, tmp, NSS_CREATE_NEEDED))
604 		dropped = B_TRUE;
605 
606 	nin = tmp->nini_instance;
607 	if (nin->nin_shutdown == NULL) {
608 		/*
609 		 * If there is no shutdown function, fake having completed it.
610 		 */
611 		if (tmp->nini_flags & NSS_SHUTDOWN_NEEDED) {
612 			tmp->nini_flags &= ~NSS_SHUTDOWN_NEEDED;
613 			tmp->nini_flags |= NSS_SHUTDOWN_COMPLETED;
614 		}
615 
616 		mutex_exit(&nts->nts_lock);
617 		return (dropped);
618 	}
619 
620 	if (tmp->nini_flags & NSS_SHUTDOWN_NEEDED) {
621 		ASSERT((tmp->nini_flags & NSS_CREATE_COMPLETED) != 0);
622 		tmp->nini_flags &= ~NSS_SHUTDOWN_NEEDED;
623 		tmp->nini_flags |= NSS_SHUTDOWN_INPROGRESS;
624 		DTRACE_PROBE2(neti__stack__shutdown__inprogress,
625 		    neti_stack_t *, nts, net_instance_int_t *, tmp);
626 		mutex_exit(&nts->nts_lock);
627 		mutex_exit(lockp);
628 		dropped = B_TRUE;
629 
630 		ASSERT(nin->nin_shutdown != NULL);
631 		DTRACE_PROBE2(neti__stack__shutdown__start,
632 		    netstackid_t, nts->nts_id,
633 		    neti_stack_t *, nts);
634 		(nin->nin_shutdown)(nts->nts_id, tmp->nini_created);
635 		DTRACE_PROBE1(neti__stack__shutdown__end,
636 		    neti_stack_t *, nts);
637 
638 		mutex_enter(lockp);
639 		mutex_enter(&nts->nts_lock);
640 		tmp->nini_flags &= ~NSS_SHUTDOWN_INPROGRESS;
641 		tmp->nini_flags |= NSS_SHUTDOWN_COMPLETED;
642 		cv_broadcast(&tmp->nini_cv);
643 		DTRACE_PROBE2(neti__stack__shutdown__completed,
644 		    neti_stack_t *, nts, net_instance_int_t *, tmp);
645 	}
646 	ASSERT((tmp->nini_flags & NSS_SHUTDOWN_COMPLETED) != 0);
647 	mutex_exit(&nts->nts_lock);
648 	return (dropped);
649 }
650 
651 static boolean_t
652 neti_stack_apply_destroy(kmutex_t *lockp, neti_stack_t *nts, void *parent)
653 {
654 	boolean_t dropped = B_FALSE;
655 	net_instance_int_t *tmp;
656 	net_instance_t *nin;
657 
658 	ASSERT(parent != NULL);
659 	ASSERT(lockp != NULL);
660 	ASSERT(mutex_owned(lockp));
661 
662 	mutex_enter(&nts->nts_lock);
663 
664 	LIST_FOREACH(tmp, &nts->nts_instances, nini_next) {
665 		if (tmp->nini_parent == parent)
666 			break;
667 	}
668 	if (tmp == NULL) {
669 		mutex_exit(&nts->nts_lock);
670 		return (dropped);
671 	}
672 
673 	/*
674 	 * We pause here so that when we continue we know that we're the
675 	 * only one doing anything active with this node.
676 	 */
677 	if (wait_for_nini_inprogress(nts, lockp, tmp,
678 	    NSS_CREATE_NEEDED|NSS_SHUTDOWN_NEEDED))
679 		dropped = B_TRUE;
680 
681 	if (tmp->nini_flags & NSS_DESTROY_NEEDED) {
682 		ASSERT((tmp->nini_flags & NSS_SHUTDOWN_COMPLETED) != 0);
683 		nin = tmp->nini_instance;
684 		tmp->nini_flags &= ~NSS_DESTROY_NEEDED;
685 		tmp->nini_flags |= NSS_DESTROY_INPROGRESS;
686 		DTRACE_PROBE2(neti__stack__destroy__inprogress,
687 		    neti_stack_t *, nts, net_instance_int_t *, tmp);
688 		mutex_exit(&nts->nts_lock);
689 		mutex_exit(lockp);
690 		dropped = B_TRUE;
691 
692 		ASSERT(nin->nin_destroy != NULL);
693 		DTRACE_PROBE2(neti__stack__destroy__start,
694 		    netstackid_t, nts->nts_id,
695 		    neti_stack_t *, nts);
696 		(nin->nin_destroy)(nts->nts_id, tmp->nini_created);
697 		DTRACE_PROBE1(neti__stack__destroy__end,
698 		    neti_stack_t *, nts);
699 
700 		mutex_enter(lockp);
701 		mutex_enter(&nts->nts_lock);
702 		tmp->nini_flags &= ~NSS_DESTROY_INPROGRESS;
703 		tmp->nini_flags |= NSS_DESTROY_COMPLETED;
704 		cv_broadcast(&tmp->nini_cv);
705 		DTRACE_PROBE2(neti__stack__destroy__completed,
706 		    neti_stack_t *, nts, net_instance_int_t *, tmp);
707 	}
708 	mutex_exit(&nts->nts_lock);
709 	return (dropped);
710 }
711 
712 static boolean_t
713 wait_for_nini_inprogress(neti_stack_t *nts, kmutex_t *lockp,
714     net_instance_int_t *nini, uint32_t cmask)
715 {
716 	boolean_t dropped = B_FALSE;
717 
718 	ASSERT(lockp != NULL);
719 	ASSERT(mutex_owned(lockp));
720 
721 	while (nini->nini_flags & (NSS_ALL_INPROGRESS|cmask)) {
722 		DTRACE_PROBE2(netstack__wait__nms__inprogress,
723 		    neti_stack_t *, nts, net_instance_int_t *, nini);
724 		dropped = B_TRUE;
725 		mutex_exit(lockp);
726 
727 		cv_wait(&nini->nini_cv, &nts->nts_lock);
728 
729 		/* First drop netstack_lock to preserve order */
730 		mutex_exit(&nts->nts_lock);
731 		mutex_enter(lockp);
732 		mutex_enter(&nts->nts_lock);
733 	}
734 	return (dropped);
735 }
736 
737 /* ======================================================================= */
738 
739 netid_t
740 net_zoneidtonetid(zoneid_t zoneid)
741 {
742 
743 	neti_stack_t *nts;
744 
745 	mutex_enter(&neti_stack_lock);
746 	LIST_FOREACH(nts, &neti_stack_list, nts_next) {
747 		if (nts->nts_zoneid == zoneid) {
748 			mutex_exit(&neti_stack_lock);
749 			return (nts->nts_id);
750 		}
751 	}
752 	mutex_exit(&neti_stack_lock);
753 
754 	return (-1);
755 }
756 
757 zoneid_t
758 net_getzoneidbynetid(netid_t netid)
759 {
760 	neti_stack_t *nts;
761 
762 	mutex_enter(&neti_stack_lock);
763 	LIST_FOREACH(nts, &neti_stack_list, nts_next) {
764 		if (nts->nts_id == netid) {
765 			mutex_exit(&neti_stack_lock);
766 			return (nts->nts_zoneid);
767 		}
768 	}
769 	mutex_exit(&neti_stack_lock);
770 
771 	return (-1);
772 }
773 
774 netstackid_t
775 net_getnetstackidbynetid(netid_t netid)
776 {
777 	neti_stack_t *nts;
778 
779 	mutex_enter(&neti_stack_lock);
780 	LIST_FOREACH(nts, &neti_stack_list, nts_next) {
781 		if (nts->nts_id == netid) {
782 			mutex_exit(&neti_stack_lock);
783 			return (nts->nts_stackid);
784 		}
785 	}
786 	mutex_exit(&neti_stack_lock);
787 
788 	return (-1);
789 }
790 
791 netid_t
792 net_getnetidbynetstackid(netstackid_t netstackid)
793 {
794 	neti_stack_t *nts;
795 
796 	mutex_enter(&neti_stack_lock);
797 	LIST_FOREACH(nts, &neti_stack_list, nts_next) {
798 		if (nts->nts_stackid == netstackid) {
799 			mutex_exit(&neti_stack_lock);
800 			return (nts->nts_id);
801 		}
802 	}
803 	mutex_exit(&neti_stack_lock);
804 
805 	return (-1);
806 }
807 
808 neti_stack_t *
809 net_getnetistackbyid(netid_t netid)
810 {
811 	neti_stack_t *nts;
812 
813 	mutex_enter(&neti_stack_lock);
814 	LIST_FOREACH(nts, &neti_stack_list, nts_next) {
815 		if (nts->nts_id == netid) {
816 			mutex_exit(&neti_stack_lock);
817 			return (nts);
818 		}
819 	}
820 	mutex_exit(&neti_stack_lock);
821 
822 	return (NULL);
823 }
824 
825 int
826 net_instance_notify_register(netid_t netid, hook_notify_fn_t callback,
827     void *arg)
828 {
829 
830 	return (hook_stack_notify_register(net_getnetstackidbynetid(netid),
831 	    callback, arg));
832 }
833 
834 int
835 net_instance_notify_unregister(netid_t netid, hook_notify_fn_t callback)
836 {
837 
838 	return (hook_stack_notify_unregister(net_getnetstackidbynetid(netid),
839 	    callback));
840 }
841