xref: /titanic_41/usr/src/uts/common/inet/ip/ipmp.c (revision 5819f75e225cf93d9c11f52e04ee71c2dcd0eca9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
22  */
23 
24 #include <inet/ip.h>
25 #include <inet/ip6.h>
26 #include <inet/ip_if.h>
27 #include <inet/ip_ire.h>
28 #include <inet/ip_multi.h>
29 #include <inet/ip_ndp.h>
30 #include <inet/ip_rts.h>
31 #include <inet/mi.h>
32 #include <net/if_types.h>
33 #include <sys/dlpi.h>
34 #include <sys/kmem.h>
35 #include <sys/modhash.h>
36 #include <sys/sdt.h>
37 #include <sys/strsun.h>
38 #include <sys/sunddi.h>
39 #include <sys/types.h>
40 
41 /*
42  * Convenience macros for getting the ip_stack_t associated with an
43  * ipmp_illgrp_t or ipmp_grp_t.
44  */
45 #define	IPMP_GRP_TO_IPST(grp)		PHYINT_TO_IPST((grp)->gr_phyint)
46 #define	IPMP_ILLGRP_TO_IPST(illg)	((illg)->ig_ipmp_ill->ill_ipst)
47 
48 /*
49  * Assorted constants that aren't important enough to be tunable.
50  */
51 #define	IPMP_GRP_HASH_SIZE		64
52 #define	IPMP_ILL_REFRESH_TIMEOUT	120	/* seconds */
53 
54 /*
55  * IPMP meta-interface kstats (based on those in PSARC/1997/198).
56  */
57 static const kstat_named_t ipmp_kstats[IPMP_KSTAT_MAX] = {
58 	{ "obytes",	KSTAT_DATA_UINT32 },
59 	{ "obytes64",	KSTAT_DATA_UINT64 },
60 	{ "rbytes",	KSTAT_DATA_UINT32 },
61 	{ "rbytes64",	KSTAT_DATA_UINT64 },
62 	{ "opackets",	KSTAT_DATA_UINT32 },
63 	{ "opackets64",	KSTAT_DATA_UINT64 },
64 	{ "oerrors",	KSTAT_DATA_UINT32 },
65 	{ "ipackets",	KSTAT_DATA_UINT32 },
66 	{ "ipackets64",	KSTAT_DATA_UINT64 },
67 	{ "ierrors",	KSTAT_DATA_UINT32 },
68 	{ "multircv",	KSTAT_DATA_UINT32 },
69 	{ "multixmt",	KSTAT_DATA_UINT32 },
70 	{ "brdcstrcv",	KSTAT_DATA_UINT32 },
71 	{ "brdcstxmt",	KSTAT_DATA_UINT32 },
72 	{ "link_up",	KSTAT_DATA_UINT32 }
73 };
74 
75 static void	ipmp_grp_insert(ipmp_grp_t *, mod_hash_hndl_t);
76 static int	ipmp_grp_create_kstats(ipmp_grp_t *);
77 static int	ipmp_grp_update_kstats(kstat_t *, int);
78 static void	ipmp_grp_destroy_kstats(ipmp_grp_t *);
79 static ill_t	*ipmp_illgrp_min_ill(ipmp_illgrp_t *);
80 static ill_t	*ipmp_illgrp_max_ill(ipmp_illgrp_t *);
81 static void	ipmp_illgrp_set_cast(ipmp_illgrp_t *, ill_t *);
82 static void	ipmp_illgrp_set_mtu(ipmp_illgrp_t *, uint_t);
83 static boolean_t ipmp_ill_activate(ill_t *);
84 static void	ipmp_ill_deactivate(ill_t *);
85 static void	ipmp_ill_ire_mark_testhidden(ire_t *, char *);
86 static void	ipmp_ill_ire_clear_testhidden(ire_t *, char *);
87 static void	ipmp_ill_refresh_active_timer_start(ill_t *);
88 static void	ipmp_ill_rtsaddrmsg(ill_t *, int);
89 static void	ipmp_ill_bind_ipif(ill_t *, ipif_t *, enum ip_resolver_action);
90 static ipif_t	*ipmp_ill_unbind_ipif(ill_t *, ipif_t *, boolean_t);
91 static void	ipmp_phyint_get_kstats(phyint_t *, uint64_t *);
92 static boolean_t ipmp_ipif_is_up_dataaddr(const ipif_t *);
93 static void	ipmp_ncec_delete_nonlocal(ncec_t *, uchar_t *);
94 
95 /*
96  * Initialize IPMP state for IP stack `ipst'; called from ip_stack_init().
97  */
98 void
99 ipmp_init(ip_stack_t *ipst)
100 {
101 	ipst->ips_ipmp_grp_hash = mod_hash_create_extended("ipmp_grp_hash",
102 	    IPMP_GRP_HASH_SIZE, mod_hash_null_keydtor, mod_hash_null_valdtor,
103 	    mod_hash_bystr, NULL, mod_hash_strkey_cmp, KM_SLEEP);
104 	rw_init(&ipst->ips_ipmp_lock, NULL, RW_DEFAULT, 0);
105 }
106 
107 /*
108  * Destroy IPMP state for IP stack `ipst'; called from ip_stack_fini().
109  */
110 void
111 ipmp_destroy(ip_stack_t *ipst)
112 {
113 	mod_hash_destroy_hash(ipst->ips_ipmp_grp_hash);
114 	rw_destroy(&ipst->ips_ipmp_lock);
115 }
116 
117 /*
118  * Create an IPMP group named `grname', associate it with IPMP phyint `phyi',
119  * and add it to the hash.  On success, return a pointer to the created group.
120  * Caller must ensure `grname' is not yet in the hash.  Assumes that the IPMP
121  * meta-interface associated with the group also has the same name (but they
122  * may differ later via ipmp_grp_rename()).
123  */
124 ipmp_grp_t *
125 ipmp_grp_create(const char *grname, phyint_t *phyi)
126 {
127 	ipmp_grp_t *grp;
128 	ip_stack_t *ipst = PHYINT_TO_IPST(phyi);
129 	mod_hash_hndl_t mh;
130 
131 	ASSERT(RW_WRITE_HELD(&ipst->ips_ipmp_lock));
132 
133 	if ((grp = kmem_zalloc(sizeof (ipmp_grp_t), KM_NOSLEEP)) == NULL)
134 		return (NULL);
135 
136 	(void) strlcpy(grp->gr_name, grname, sizeof (grp->gr_name));
137 	(void) strlcpy(grp->gr_ifname, grname, sizeof (grp->gr_ifname));
138 
139 	/*
140 	 * Cache the group's phyint.  This is safe since a phyint_t will
141 	 * outlive its ipmp_grp_t.
142 	 */
143 	grp->gr_phyint = phyi;
144 
145 	/*
146 	 * Create IPMP group kstats.
147 	 */
148 	if (ipmp_grp_create_kstats(grp) != 0) {
149 		kmem_free(grp, sizeof (ipmp_grp_t));
150 		return (NULL);
151 	}
152 
153 	/*
154 	 * Insert the group into the hash.
155 	 */
156 	if (mod_hash_reserve_nosleep(ipst->ips_ipmp_grp_hash, &mh) != 0) {
157 		ipmp_grp_destroy_kstats(grp);
158 		kmem_free(grp, sizeof (ipmp_grp_t));
159 		return (NULL);
160 	}
161 	ipmp_grp_insert(grp, mh);
162 
163 	return (grp);
164 }
165 
166 /*
167  * Create IPMP kstat structures for `grp'.  Return an errno upon failure.
168  */
169 static int
170 ipmp_grp_create_kstats(ipmp_grp_t *grp)
171 {
172 	kstat_t *ksp;
173 	netstackid_t id = IPMP_GRP_TO_IPST(grp)->ips_netstack->netstack_stackid;
174 
175 	ksp = kstat_create_netstack("ipmp", 0, grp->gr_ifname, "net",
176 	    KSTAT_TYPE_NAMED, IPMP_KSTAT_MAX, 0, id);
177 	if (ksp == NULL)
178 		return (ENOMEM);
179 
180 	ksp->ks_update = ipmp_grp_update_kstats;
181 	ksp->ks_private = grp;
182 	bcopy(ipmp_kstats, ksp->ks_data, sizeof (ipmp_kstats));
183 
184 	kstat_install(ksp);
185 	grp->gr_ksp = ksp;
186 	return (0);
187 }
188 
189 /*
190  * Update the IPMP kstats tracked by `ksp'; called by the kstats framework.
191  */
192 static int
193 ipmp_grp_update_kstats(kstat_t *ksp, int rw)
194 {
195 	uint_t		i;
196 	kstat_named_t	*kn = KSTAT_NAMED_PTR(ksp);
197 	ipmp_grp_t	*grp = ksp->ks_private;
198 	ip_stack_t	*ipst = IPMP_GRP_TO_IPST(grp);
199 	ipsq_t		*ipsq, *grp_ipsq = grp->gr_phyint->phyint_ipsq;
200 	phyint_t	*phyi;
201 	uint64_t	phyi_kstats[IPMP_KSTAT_MAX];
202 
203 	if (rw == KSTAT_WRITE)
204 		return (EACCES);
205 
206 	/*
207 	 * Start with the group's baseline values.
208 	 */
209 	for (i = 0; i < IPMP_KSTAT_MAX; i++) {
210 		if (kn[i].data_type == KSTAT_DATA_UINT32) {
211 			kn[i].value.ui32 = grp->gr_kstats0[i];
212 		} else {
213 			ASSERT(kn[i].data_type == KSTAT_DATA_UINT64);
214 			kn[i].value.ui64 = grp->gr_kstats0[i];
215 		}
216 	}
217 
218 	/*
219 	 * Add in the stats of each phyint currently in the group.  Since we
220 	 * don't directly track the phyints in a group, we cheat by walking
221 	 * the IPSQ set under ill_g_lock.  (The IPSQ list cannot change while
222 	 * ill_g_lock is held.)
223 	 */
224 	rw_enter(&ipst->ips_ill_g_lock, RW_READER);
225 	ipsq = grp_ipsq->ipsq_next;
226 	for (; ipsq != grp_ipsq; ipsq = ipsq->ipsq_next) {
227 		phyi = ipsq->ipsq_phyint;
228 
229 		/*
230 		 * If a phyint in a group is being unplumbed, it's possible
231 		 * that ill_glist_delete() -> phyint_free() already freed the
232 		 * phyint (and set ipsq_phyint to NULL), but the unplumb
233 		 * operation has yet to complete (and thus ipsq_dq() has yet
234 		 * to remove the phyint's IPSQ from the group IPSQ's phyint
235 		 * list).  We skip those phyints here (note that their kstats
236 		 * have already been added to gr_kstats0[]).
237 		 */
238 		if (phyi == NULL)
239 			continue;
240 
241 		ipmp_phyint_get_kstats(phyi, phyi_kstats);
242 
243 		for (i = 0; i < IPMP_KSTAT_MAX; i++) {
244 			phyi_kstats[i] -= phyi->phyint_kstats0[i];
245 			if (kn[i].data_type == KSTAT_DATA_UINT32)
246 				kn[i].value.ui32 += phyi_kstats[i];
247 			else
248 				kn[i].value.ui64 += phyi_kstats[i];
249 		}
250 	}
251 
252 	kn[IPMP_KSTAT_LINK_UP].value.ui32 =
253 	    (grp->gr_phyint->phyint_flags & PHYI_RUNNING) != 0;
254 
255 	rw_exit(&ipst->ips_ill_g_lock);
256 	return (0);
257 }
258 
259 /*
260  * Destroy IPMP kstat structures for `grp'.
261  */
262 static void
263 ipmp_grp_destroy_kstats(ipmp_grp_t *grp)
264 {
265 	netstackid_t id = IPMP_GRP_TO_IPST(grp)->ips_netstack->netstack_stackid;
266 
267 	kstat_delete_netstack(grp->gr_ksp, id);
268 	bzero(grp->gr_kstats0, sizeof (grp->gr_kstats0));
269 	grp->gr_ksp = NULL;
270 }
271 
272 /*
273  * Look up an IPMP group named `grname' on IP stack `ipst'.  Return NULL if it
274  * does not exist.
275  */
276 ipmp_grp_t *
277 ipmp_grp_lookup(const char *grname, ip_stack_t *ipst)
278 {
279 	ipmp_grp_t *grp;
280 
281 	ASSERT(RW_LOCK_HELD(&ipst->ips_ipmp_lock));
282 
283 	if (mod_hash_find(ipst->ips_ipmp_grp_hash, (mod_hash_key_t)grname,
284 	    (mod_hash_val_t *)&grp) == 0)
285 		return (grp);
286 
287 	return (NULL);
288 }
289 
290 /*
291  * Place information about group `grp' into `lifgr'.
292  */
293 void
294 ipmp_grp_info(const ipmp_grp_t *grp, lifgroupinfo_t *lifgr)
295 {
296 	ill_t *ill;
297 	ip_stack_t *ipst = IPMP_GRP_TO_IPST(grp);
298 
299 	ASSERT(RW_LOCK_HELD(&ipst->ips_ipmp_lock));
300 
301 	lifgr->gi_v4 = (grp->gr_v4 != NULL);
302 	lifgr->gi_v6 = (grp->gr_v6 != NULL);
303 	lifgr->gi_nv4 = grp->gr_nv4 + grp->gr_pendv4;
304 	lifgr->gi_nv6 = grp->gr_nv6 + grp->gr_pendv6;
305 	lifgr->gi_mactype = grp->gr_nif > 0 ? grp->gr_mactype : SUNW_DL_IPMP;
306 	(void) strlcpy(lifgr->gi_grifname, grp->gr_ifname, LIFNAMSIZ);
307 	lifgr->gi_m4ifname[0] = '\0';
308 	lifgr->gi_m6ifname[0] = '\0';
309 	lifgr->gi_bcifname[0] = '\0';
310 
311 	if (grp->gr_v4 != NULL && (ill = grp->gr_v4->ig_cast_ill) != NULL) {
312 		(void) strlcpy(lifgr->gi_m4ifname, ill->ill_name, LIFNAMSIZ);
313 		(void) strlcpy(lifgr->gi_bcifname, ill->ill_name, LIFNAMSIZ);
314 	}
315 
316 	if (grp->gr_v6 != NULL && (ill = grp->gr_v6->ig_cast_ill) != NULL)
317 		(void) strlcpy(lifgr->gi_m6ifname, ill->ill_name, LIFNAMSIZ);
318 }
319 
320 /*
321  * Insert `grp' into the hash using the reserved hash entry `mh'.
322  * Caller must ensure `grp' is not yet in the hash.
323  */
324 static void
325 ipmp_grp_insert(ipmp_grp_t *grp, mod_hash_hndl_t mh)
326 {
327 	int err;
328 	ip_stack_t *ipst = IPMP_GRP_TO_IPST(grp);
329 
330 	ASSERT(RW_WRITE_HELD(&ipst->ips_ipmp_lock));
331 
332 	/*
333 	 * Since grp->gr_name will exist at least as long as `grp' is in the
334 	 * hash, we use it directly as the key.
335 	 */
336 	err = mod_hash_insert_reserve(ipst->ips_ipmp_grp_hash,
337 	    (mod_hash_key_t)grp->gr_name, (mod_hash_val_t)grp, mh);
338 	if (err != 0) {
339 		/*
340 		 * This should never happen since `mh' was preallocated.
341 		 */
342 		panic("cannot insert IPMP group \"%s\" (err %d)",
343 		    grp->gr_name, err);
344 	}
345 }
346 
347 /*
348  * Remove `grp' from the hash.  Caller must ensure `grp' is in it.
349  */
350 static void
351 ipmp_grp_remove(ipmp_grp_t *grp)
352 {
353 	int err;
354 	mod_hash_val_t val;
355 	mod_hash_key_t key = (mod_hash_key_t)grp->gr_name;
356 	ip_stack_t *ipst = IPMP_GRP_TO_IPST(grp);
357 
358 	ASSERT(RW_WRITE_HELD(&ipst->ips_ipmp_lock));
359 
360 	err = mod_hash_remove(ipst->ips_ipmp_grp_hash, key, &val);
361 	if (err != 0 || val != grp) {
362 		panic("cannot remove IPMP group \"%s\" (err %d)",
363 		    grp->gr_name, err);
364 	}
365 }
366 
367 /*
368  * Attempt to rename `grp' to new name `grname'.  Return an errno if the new
369  * group name already exists or is invalid, or if there isn't enough memory.
370  */
371 int
372 ipmp_grp_rename(ipmp_grp_t *grp, const char *grname)
373 {
374 	mod_hash_hndl_t mh;
375 	ip_stack_t *ipst = IPMP_GRP_TO_IPST(grp);
376 
377 	ASSERT(RW_WRITE_HELD(&ipst->ips_ipmp_lock));
378 
379 	if (grname[0] == '\0')
380 		return (EINVAL);
381 
382 	if (mod_hash_find(ipst->ips_ipmp_grp_hash, (mod_hash_key_t)grname,
383 	    (mod_hash_val_t *)&grp) != MH_ERR_NOTFOUND)
384 		return (EEXIST);
385 
386 	/*
387 	 * Before we remove the group from the hash, ensure we'll be able to
388 	 * re-insert it by reserving space.
389 	 */
390 	if (mod_hash_reserve_nosleep(ipst->ips_ipmp_grp_hash, &mh) != 0)
391 		return (ENOMEM);
392 
393 	ipmp_grp_remove(grp);
394 	(void) strlcpy(grp->gr_name, grname, sizeof (grp->gr_name));
395 	ipmp_grp_insert(grp, mh);
396 
397 	return (0);
398 }
399 
400 /*
401  * Destroy `grp' and remove it from the hash.  Caller must ensure `grp' is in
402  * the hash, and that there are no interfaces on it.
403  */
404 void
405 ipmp_grp_destroy(ipmp_grp_t *grp)
406 {
407 	ip_stack_t *ipst = IPMP_GRP_TO_IPST(grp);
408 
409 	ASSERT(RW_WRITE_HELD(&ipst->ips_ipmp_lock));
410 
411 	/*
412 	 * If there are still interfaces using this group, panic before things
413 	 * go really off the rails.
414 	 */
415 	if (grp->gr_nif != 0)
416 		panic("cannot destroy IPMP group \"%s\": in use", grp->gr_name);
417 
418 	ipmp_grp_remove(grp);
419 	ipmp_grp_destroy_kstats(grp);
420 
421 	ASSERT(grp->gr_v4 == NULL);
422 	ASSERT(grp->gr_v6 == NULL);
423 	ASSERT(grp->gr_nv4 == 0);
424 	ASSERT(grp->gr_nv6 == 0);
425 	ASSERT(grp->gr_nactif == 0);
426 	ASSERT(grp->gr_linkdownmp == NULL);
427 	grp->gr_phyint = NULL;
428 
429 	kmem_free(grp, sizeof (ipmp_grp_t));
430 }
431 
432 /*
433  * Check whether `ill' is suitable for inclusion into `grp', and return an
434  * errno describing the problem (if any).  NOTE: many of these errno values
435  * are interpreted by ifconfig, which will take corrective action and retry
436  * the SIOCSLIFGROUPNAME, so please exercise care when changing them.
437  */
438 static int
439 ipmp_grp_vet_ill(ipmp_grp_t *grp, ill_t *ill)
440 {
441 	ip_stack_t *ipst = IPMP_GRP_TO_IPST(grp);
442 
443 	ASSERT(IAM_WRITER_ILL(ill));
444 	ASSERT(RW_LOCK_HELD(&ipst->ips_ipmp_lock));
445 
446 	/*
447 	 * To sidestep complicated address migration logic in the kernel and
448 	 * to force the kernel's all-hosts multicast memberships to be blown
449 	 * away, all addresses that had been brought up must be brought back
450 	 * down prior to adding an interface to a group.  (This includes
451 	 * addresses currently down due to DAD.)  Once the interface has been
452 	 * added to the group, its addresses can then be brought back up, at
453 	 * which point they will be moved to the IPMP meta-interface.
454 	 * NOTE: we do this before ill_appaddr_cnt() since bringing down the
455 	 * link-local causes in.ndpd to remove its ADDRCONF'd addresses.
456 	 */
457 	if (ill->ill_ipif_up_count + ill->ill_ipif_dup_count > 0)
458 		return (EADDRINUSE);
459 
460 	/*
461 	 * To avoid confusing applications by changing addresses that are
462 	 * under their control, all such control must be removed prior to
463 	 * adding an interface into a group.
464 	 */
465 	if (ill_appaddr_cnt(ill) != 0)
466 		return (EADDRNOTAVAIL);
467 
468 	/*
469 	 * Since PTP addresses do not share the same broadcast domain, they
470 	 * are not allowed to be in an IPMP group.
471 	 */
472 	if (ill_ptpaddr_cnt(ill) != 0)
473 		return (EINVAL);
474 
475 	/*
476 	 * An ill must support multicast to be allowed into a group.
477 	 */
478 	if (!(ill->ill_flags & ILLF_MULTICAST))
479 		return (ENOTSUP);
480 
481 	/*
482 	 * An ill must strictly be using ARP and/or ND for address
483 	 * resolution for it to be allowed into a group.
484 	 */
485 	if (ill->ill_flags & (ILLF_NONUD | ILLF_NOARP))
486 		return (ENOTSUP);
487 
488 	/*
489 	 * An ill cannot also be using usesrc groups.  (Although usesrc uses
490 	 * ill_g_usesrc_lock, we don't need to grab it since usesrc also does
491 	 * all its modifications as writer.)
492 	 */
493 	if (IS_USESRC_ILL(ill) || IS_USESRC_CLI_ILL(ill))
494 		return (ENOTSUP);
495 
496 	/*
497 	 * All ills in a group must be the same mactype.
498 	 */
499 	if (grp->gr_nif > 0 && grp->gr_mactype != ill->ill_mactype)
500 		return (EINVAL);
501 
502 	return (0);
503 }
504 
505 /*
506  * Check whether `phyi' is suitable for inclusion into `grp', and return an
507  * errno describing the problem (if any).  See comment above ipmp_grp_vet_ill()
508  * regarding errno values.
509  */
510 int
511 ipmp_grp_vet_phyint(ipmp_grp_t *grp, phyint_t *phyi)
512 {
513 	int err = 0;
514 	ip_stack_t *ipst = IPMP_GRP_TO_IPST(grp);
515 
516 	ASSERT(IAM_WRITER_IPSQ(phyi->phyint_ipsq));
517 	ASSERT(RW_LOCK_HELD(&ipst->ips_ipmp_lock));
518 
519 	/*
520 	 * An interface cannot have address families plumbed that are not
521 	 * configured in the group.
522 	 */
523 	if (phyi->phyint_illv4 != NULL && grp->gr_v4 == NULL ||
524 	    phyi->phyint_illv6 != NULL && grp->gr_v6 == NULL)
525 		return (EAFNOSUPPORT);
526 
527 	if (phyi->phyint_illv4 != NULL)
528 		err = ipmp_grp_vet_ill(grp, phyi->phyint_illv4);
529 	if (err == 0 && phyi->phyint_illv6 != NULL)
530 		err = ipmp_grp_vet_ill(grp, phyi->phyint_illv6);
531 
532 	return (err);
533 }
534 
535 /*
536  * Create a new illgrp on IPMP meta-interface `ill'.
537  */
538 ipmp_illgrp_t *
539 ipmp_illgrp_create(ill_t *ill)
540 {
541 	uint_t mtu = ill->ill_isv6 ? IPV6_MIN_MTU : IP_MIN_MTU;
542 	ipmp_illgrp_t *illg;
543 
544 	ASSERT(IAM_WRITER_ILL(ill));
545 	ASSERT(IS_IPMP(ill));
546 	ASSERT(ill->ill_grp == NULL);
547 
548 	if ((illg = kmem_zalloc(sizeof (ipmp_illgrp_t), KM_NOSLEEP)) == NULL)
549 		return (NULL);
550 
551 	list_create(&illg->ig_if, sizeof (ill_t), offsetof(ill_t, ill_grpnode));
552 	list_create(&illg->ig_actif, sizeof (ill_t),
553 	    offsetof(ill_t, ill_actnode));
554 	list_create(&illg->ig_arpent, sizeof (ipmp_arpent_t),
555 	    offsetof(ipmp_arpent_t, ia_node));
556 
557 	illg->ig_ipmp_ill = ill;
558 	ill->ill_grp = illg;
559 	ipmp_illgrp_set_mtu(illg, mtu);
560 
561 	return (illg);
562 }
563 
564 /*
565  * Destroy illgrp `illg', and disconnect it from its IPMP meta-interface.
566  */
567 void
568 ipmp_illgrp_destroy(ipmp_illgrp_t *illg)
569 {
570 	ASSERT(IAM_WRITER_ILL(illg->ig_ipmp_ill));
571 	ASSERT(IS_IPMP(illg->ig_ipmp_ill));
572 
573 	/*
574 	 * Verify `illg' is empty.
575 	 */
576 	ASSERT(illg->ig_next_ill == NULL);
577 	ASSERT(illg->ig_cast_ill == NULL);
578 	ASSERT(list_is_empty(&illg->ig_arpent));
579 	ASSERT(list_is_empty(&illg->ig_if));
580 	ASSERT(list_is_empty(&illg->ig_actif));
581 	ASSERT(illg->ig_nactif == 0);
582 
583 	/*
584 	 * Destroy `illg'.
585 	 */
586 	illg->ig_ipmp_ill->ill_grp = NULL;
587 	illg->ig_ipmp_ill = NULL;
588 	list_destroy(&illg->ig_if);
589 	list_destroy(&illg->ig_actif);
590 	list_destroy(&illg->ig_arpent);
591 	kmem_free(illg, sizeof (ipmp_illgrp_t));
592 }
593 
594 /*
595  * Add `ipif' to the pool of usable data addresses on `illg' and attempt to
596  * bind it to an underlying ill, while keeping an even address distribution.
597  * If the bind is successful, return a pointer to the bound ill.
598  */
599 ill_t *
600 ipmp_illgrp_add_ipif(ipmp_illgrp_t *illg, ipif_t *ipif)
601 {
602 	ill_t *minill;
603 	ipmp_arpent_t *entp;
604 
605 	ASSERT(IAM_WRITER_IPIF(ipif));
606 	ASSERT(ipmp_ipif_is_dataaddr(ipif));
607 
608 	/*
609 	 * IPMP data address mappings are internally managed by IP itself, so
610 	 * delete any existing ARP entries associated with the address.
611 	 */
612 	if (!ipif->ipif_isv6) {
613 		entp = ipmp_illgrp_lookup_arpent(illg, &ipif->ipif_lcl_addr);
614 		if (entp != NULL)
615 			ipmp_illgrp_destroy_arpent(illg, entp);
616 	}
617 
618 	if ((minill = ipmp_illgrp_min_ill(illg)) != NULL)
619 		ipmp_ill_bind_ipif(minill, ipif, Res_act_none);
620 
621 	return (ipif->ipif_bound ? ipif->ipif_bound_ill : NULL);
622 }
623 
624 /*
625  * Delete `ipif' from the pool of usable data addresses on `illg'.  If it's
626  * bound, unbind it from the underlying ill while keeping an even address
627  * distribution.
628  */
629 void
630 ipmp_illgrp_del_ipif(ipmp_illgrp_t *illg, ipif_t *ipif)
631 {
632 	ill_t *maxill, *boundill = ipif->ipif_bound_ill;
633 
634 	ASSERT(IAM_WRITER_IPIF(ipif));
635 
636 	if (boundill != NULL) {
637 		(void) ipmp_ill_unbind_ipif(boundill, ipif, B_FALSE);
638 
639 		maxill = ipmp_illgrp_max_ill(illg);
640 		if (maxill->ill_bound_cnt > boundill->ill_bound_cnt + 1) {
641 			ipif = ipmp_ill_unbind_ipif(maxill, NULL, B_TRUE);
642 			ipmp_ill_bind_ipif(boundill, ipif, Res_act_rebind);
643 		}
644 	}
645 }
646 
647 /*
648  * Return the active ill with the greatest number of data addresses in `illg'.
649  */
650 static ill_t *
651 ipmp_illgrp_max_ill(ipmp_illgrp_t *illg)
652 {
653 	ill_t *ill, *bestill = NULL;
654 
655 	ASSERT(IAM_WRITER_ILL(illg->ig_ipmp_ill));
656 
657 	ill = list_head(&illg->ig_actif);
658 	for (; ill != NULL; ill = list_next(&illg->ig_actif, ill)) {
659 		if (bestill == NULL ||
660 		    ill->ill_bound_cnt > bestill->ill_bound_cnt) {
661 			bestill = ill;
662 		}
663 	}
664 	return (bestill);
665 }
666 
667 /*
668  * Return the active ill with the fewest number of data addresses in `illg'.
669  */
670 static ill_t *
671 ipmp_illgrp_min_ill(ipmp_illgrp_t *illg)
672 {
673 	ill_t *ill, *bestill = NULL;
674 
675 	ASSERT(IAM_WRITER_ILL(illg->ig_ipmp_ill));
676 
677 	ill = list_head(&illg->ig_actif);
678 	for (; ill != NULL; ill = list_next(&illg->ig_actif, ill)) {
679 		if (bestill == NULL ||
680 		    ill->ill_bound_cnt < bestill->ill_bound_cnt) {
681 			if (ill->ill_bound_cnt == 0)
682 				return (ill);	 /* can't get better */
683 			bestill = ill;
684 		}
685 	}
686 	return (bestill);
687 }
688 
689 /*
690  * Return a pointer to IPMP meta-interface for `illg' (which must exist).
691  * Since ig_ipmp_ill never changes for a given illg, no locks are needed.
692  */
693 ill_t *
694 ipmp_illgrp_ipmp_ill(ipmp_illgrp_t *illg)
695 {
696 	return (illg->ig_ipmp_ill);
697 }
698 
699 /*
700  * Return a pointer to the next available underlying ill in `illg', or NULL if
701  * one doesn't exist.  Caller must be inside the IPSQ.
702  */
703 ill_t *
704 ipmp_illgrp_next_ill(ipmp_illgrp_t *illg)
705 {
706 	ill_t *ill;
707 	ip_stack_t *ipst = IPMP_ILLGRP_TO_IPST(illg);
708 
709 	ASSERT(IAM_WRITER_ILL(illg->ig_ipmp_ill));
710 
711 	rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
712 	if ((ill = illg->ig_next_ill) != NULL) {
713 		illg->ig_next_ill = list_next(&illg->ig_actif, ill);
714 		if (illg->ig_next_ill == NULL)
715 			illg->ig_next_ill = list_head(&illg->ig_actif);
716 	}
717 	rw_exit(&ipst->ips_ipmp_lock);
718 
719 	return (ill);
720 }
721 
722 /*
723  * Return a held pointer to the next available underlying ill in `illg', or
724  * NULL if one doesn't exist.  Caller need not be inside the IPSQ.
725  */
726 ill_t *
727 ipmp_illgrp_hold_next_ill(ipmp_illgrp_t *illg)
728 {
729 	ill_t *ill;
730 	uint_t i;
731 	ip_stack_t *ipst = IPMP_ILLGRP_TO_IPST(illg);
732 
733 	rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
734 	for (i = 0; i < illg->ig_nactif; i++) {
735 		ill = illg->ig_next_ill;
736 		illg->ig_next_ill = list_next(&illg->ig_actif, ill);
737 		if (illg->ig_next_ill == NULL)
738 			illg->ig_next_ill = list_head(&illg->ig_actif);
739 
740 		if (ill_check_and_refhold(ill)) {
741 			rw_exit(&ipst->ips_ipmp_lock);
742 			return (ill);
743 		}
744 	}
745 	rw_exit(&ipst->ips_ipmp_lock);
746 
747 	return (NULL);
748 }
749 
750 /*
751  * Return a held pointer to the nominated multicast ill in `illg', or NULL if
752  * one doesn't exist.  Caller need not be inside the IPSQ.
753  */
754 ill_t *
755 ipmp_illgrp_hold_cast_ill(ipmp_illgrp_t *illg)
756 {
757 	ill_t *castill;
758 	ip_stack_t *ipst = IPMP_ILLGRP_TO_IPST(illg);
759 
760 	rw_enter(&ipst->ips_ipmp_lock, RW_READER);
761 	castill = illg->ig_cast_ill;
762 	if (castill != NULL && ill_check_and_refhold(castill)) {
763 		rw_exit(&ipst->ips_ipmp_lock);
764 		return (castill);
765 	}
766 	rw_exit(&ipst->ips_ipmp_lock);
767 	return (NULL);
768 }
769 
770 /*
771  * Set the nominated cast ill on `illg' to `castill'.  If `castill' is NULL,
772  * any existing nomination is removed.  Caller must be inside the IPSQ.
773  */
774 static void
775 ipmp_illgrp_set_cast(ipmp_illgrp_t *illg, ill_t *castill)
776 {
777 	ill_t *ocastill = illg->ig_cast_ill;
778 	ill_t *ipmp_ill = illg->ig_ipmp_ill;
779 	ip_stack_t *ipst = IPMP_ILLGRP_TO_IPST(illg);
780 
781 	ASSERT(IAM_WRITER_ILL(ipmp_ill));
782 
783 	/*
784 	 * Disable old nominated ill (if any).
785 	 */
786 	if (ocastill != NULL) {
787 		DTRACE_PROBE2(ipmp__illgrp__cast__disable, ipmp_illgrp_t *,
788 		    illg, ill_t *, ocastill);
789 		ASSERT(ocastill->ill_nom_cast);
790 		ocastill->ill_nom_cast = B_FALSE;
791 		/*
792 		 * If the IPMP meta-interface is down, we never did the join,
793 		 * so we must not try to leave.
794 		 */
795 		if (ipmp_ill->ill_dl_up)
796 			ill_leave_multicast(ipmp_ill);
797 
798 		/*
799 		 * Delete any NCEs tied to the old nomination.  We must do this
800 		 * last since ill_leave_multicast() may trigger IREs to be
801 		 * built using ig_cast_ill.
802 		 */
803 		ncec_walk(ocastill, (pfi_t)ipmp_ncec_delete_nonlocal, ocastill,
804 		    ocastill->ill_ipst);
805 	}
806 
807 	/*
808 	 * Set new nomination.
809 	 */
810 	rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
811 	illg->ig_cast_ill = castill;
812 	rw_exit(&ipst->ips_ipmp_lock);
813 
814 	/*
815 	 * Enable new nominated ill (if any).
816 	 */
817 	if (castill != NULL) {
818 		DTRACE_PROBE2(ipmp__illgrp__cast__enable, ipmp_illgrp_t *,
819 		    illg, ill_t *, castill);
820 		ASSERT(!castill->ill_nom_cast);
821 		castill->ill_nom_cast = B_TRUE;
822 		/*
823 		 * If the IPMP meta-interface is down, the attempt to recover
824 		 * will silently fail but ill_need_recover_multicast will be
825 		 * erroneously cleared -- so check first.
826 		 */
827 		if (ipmp_ill->ill_dl_up)
828 			ill_recover_multicast(ipmp_ill);
829 	}
830 }
831 
832 /*
833  * Create an IPMP ARP entry and add it to the set tracked on `illg'.  If an
834  * entry for the same IP address already exists, destroy it first.  Return the
835  * created IPMP ARP entry, or NULL on failure.
836  */
837 ipmp_arpent_t *
838 ipmp_illgrp_create_arpent(ipmp_illgrp_t *illg, boolean_t proxyarp,
839     ipaddr_t ipaddr, uchar_t *lladdr, size_t lladdr_len, uint16_t flags)
840 {
841 	ipmp_arpent_t *entp, *oentp;
842 
843 	ASSERT(IAM_WRITER_ILL(illg->ig_ipmp_ill));
844 
845 	if ((entp = kmem_alloc(sizeof (ipmp_arpent_t) + lladdr_len,
846 	    KM_NOSLEEP)) == NULL)
847 		return (NULL);
848 
849 	/*
850 	 * Delete any existing ARP entry for this address.
851 	 */
852 	if ((oentp = ipmp_illgrp_lookup_arpent(illg, &entp->ia_ipaddr)) != NULL)
853 		ipmp_illgrp_destroy_arpent(illg, oentp);
854 
855 	/*
856 	 * Prepend the new entry.
857 	 */
858 	entp->ia_ipaddr = ipaddr;
859 	entp->ia_flags = flags;
860 	entp->ia_lladdr_len = lladdr_len;
861 	entp->ia_lladdr = (uchar_t *)&entp[1];
862 	bcopy(lladdr, entp->ia_lladdr, lladdr_len);
863 	entp->ia_proxyarp = proxyarp;
864 	entp->ia_notified = B_TRUE;
865 	list_insert_head(&illg->ig_arpent, entp);
866 	return (entp);
867 }
868 
869 /*
870  * Remove IPMP ARP entry `entp' from the set tracked on `illg' and destroy it.
871  */
872 void
873 ipmp_illgrp_destroy_arpent(ipmp_illgrp_t *illg, ipmp_arpent_t *entp)
874 {
875 	ASSERT(IAM_WRITER_ILL(illg->ig_ipmp_ill));
876 
877 	list_remove(&illg->ig_arpent, entp);
878 	kmem_free(entp, sizeof (ipmp_arpent_t) + entp->ia_lladdr_len);
879 }
880 
881 /*
882  * Mark that ARP has been notified about the IP address on `entp'; `illg' is
883  * taken as a debugging aid for DTrace FBT probes.
884  */
885 /* ARGSUSED */
886 void
887 ipmp_illgrp_mark_arpent(ipmp_illgrp_t *illg, ipmp_arpent_t *entp)
888 {
889 	entp->ia_notified = B_TRUE;
890 }
891 
892 /*
893  * Look up the IPMP ARP entry for IP address `addrp' on `illg'; if `addrp' is
894  * NULL, any IPMP ARP entry is requested.  Return NULL if it does not exist.
895  */
896 ipmp_arpent_t *
897 ipmp_illgrp_lookup_arpent(ipmp_illgrp_t *illg, ipaddr_t *addrp)
898 {
899 	ipmp_arpent_t *entp = list_head(&illg->ig_arpent);
900 
901 	ASSERT(IAM_WRITER_ILL(illg->ig_ipmp_ill));
902 
903 	if (addrp == NULL)
904 		return (entp);
905 
906 	for (; entp != NULL; entp = list_next(&illg->ig_arpent, entp))
907 		if (entp->ia_ipaddr == *addrp)
908 			break;
909 	return (entp);
910 }
911 
912 /*
913  * Refresh ARP entries on `illg' to be distributed across its active
914  * interfaces.  Entries that cannot be refreshed (e.g., because there are no
915  * active interfaces) are marked so that subsequent calls can try again.
916  */
917 void
918 ipmp_illgrp_refresh_arpent(ipmp_illgrp_t *illg)
919 {
920 	ill_t *ill, *ipmp_ill = illg->ig_ipmp_ill;
921 	uint_t paddrlen = ipmp_ill->ill_phys_addr_length;
922 	ipmp_arpent_t *entp;
923 	ncec_t *ncec;
924 	nce_t  *nce;
925 
926 	ASSERT(IAM_WRITER_ILL(ipmp_ill));
927 	ASSERT(!ipmp_ill->ill_isv6);
928 
929 	ill = list_head(&illg->ig_actif);
930 	entp = list_head(&illg->ig_arpent);
931 	for (; entp != NULL; entp = list_next(&illg->ig_arpent, entp)) {
932 		if (ill == NULL || ipmp_ill->ill_ipif_up_count == 0) {
933 			entp->ia_notified = B_FALSE;
934 			continue;
935 		}
936 
937 		ASSERT(paddrlen == ill->ill_phys_addr_length);
938 
939 		/*
940 		 * If this is a proxy ARP entry, we can skip notifying ARP if
941 		 * the entry is already up-to-date.  If it has changed, we
942 		 * update the entry's hardware address before notifying ARP.
943 		 */
944 		if (entp->ia_proxyarp) {
945 			if (bcmp(ill->ill_phys_addr, entp->ia_lladdr,
946 			    paddrlen) == 0 && entp->ia_notified)
947 				continue;
948 			bcopy(ill->ill_phys_addr, entp->ia_lladdr, paddrlen);
949 		}
950 
951 		(void) nce_lookup_then_add_v4(ipmp_ill, entp->ia_lladdr,
952 		    paddrlen, &entp->ia_ipaddr, entp->ia_flags, ND_UNCHANGED,
953 		    &nce);
954 		if (nce == NULL || !entp->ia_proxyarp) {
955 			if (nce != NULL)
956 				nce_refrele(nce);
957 			continue;
958 		}
959 		ncec = nce->nce_common;
960 		mutex_enter(&ncec->ncec_lock);
961 		nce_update(ncec, ND_UNCHANGED, ill->ill_phys_addr);
962 		mutex_exit(&ncec->ncec_lock);
963 		nce_refrele(nce);
964 		ipmp_illgrp_mark_arpent(illg, entp);
965 
966 		if ((ill = list_next(&illg->ig_actif, ill)) == NULL)
967 			ill = list_head(&illg->ig_actif);
968 	}
969 }
970 
971 /*
972  * Return an interface in `illg' with the specified `physaddr', or NULL if one
973  * doesn't exist.  Caller must hold ill_g_lock if it's not inside the IPSQ.
974  */
975 ill_t *
976 ipmp_illgrp_find_ill(ipmp_illgrp_t *illg, uchar_t *physaddr, uint_t paddrlen)
977 {
978 	ill_t *ill;
979 	ill_t *ipmp_ill = illg->ig_ipmp_ill;
980 	ip_stack_t *ipst = IPMP_ILLGRP_TO_IPST(illg);
981 
982 	ASSERT(IAM_WRITER_ILL(ipmp_ill) || RW_LOCK_HELD(&ipst->ips_ill_g_lock));
983 
984 	ill = list_head(&illg->ig_if);
985 	for (; ill != NULL; ill = list_next(&illg->ig_if, ill)) {
986 		if (ill->ill_phys_addr_length == paddrlen &&
987 		    bcmp(ill->ill_phys_addr, physaddr, paddrlen) == 0)
988 			return (ill);
989 	}
990 	return (NULL);
991 }
992 
993 /*
994  * Asynchronously update the MTU for an IPMP ill by injecting a DL_NOTIFY_IND.
995  * Caller must be inside the IPSQ unless this is initialization.
996  */
997 static void
998 ipmp_illgrp_set_mtu(ipmp_illgrp_t *illg, uint_t mtu)
999 {
1000 	ill_t *ill = illg->ig_ipmp_ill;
1001 	mblk_t *mp;
1002 
1003 	ASSERT(illg->ig_mtu == 0 || IAM_WRITER_ILL(ill));
1004 
1005 	/*
1006 	 * If allocation fails, we have bigger problems than MTU.
1007 	 */
1008 	if ((mp = ip_dlnotify_alloc(DL_NOTE_SDU_SIZE, mtu)) != NULL) {
1009 		illg->ig_mtu = mtu;
1010 		put(ill->ill_rq, mp);
1011 	}
1012 }
1013 
1014 /*
1015  * Recalculate the IPMP group MTU for `illg', and update its associated IPMP
1016  * ill MTU if necessary.
1017  */
1018 void
1019 ipmp_illgrp_refresh_mtu(ipmp_illgrp_t *illg)
1020 {
1021 	ill_t *ill;
1022 	ill_t *ipmp_ill = illg->ig_ipmp_ill;
1023 	uint_t mtu = 0;
1024 
1025 	ASSERT(IAM_WRITER_ILL(ipmp_ill));
1026 
1027 	/*
1028 	 * Since ill_mtu can only change under ill_lock, we hold ill_lock
1029 	 * for each ill as we iterate through the list.  Any changes to the
1030 	 * ill_mtu will also trigger an update, so even if we missed it
1031 	 * this time around, the update will catch it.
1032 	 */
1033 	ill = list_head(&illg->ig_if);
1034 	for (; ill != NULL; ill = list_next(&illg->ig_if, ill)) {
1035 		mutex_enter(&ill->ill_lock);
1036 		if (mtu == 0 || ill->ill_mtu < mtu)
1037 			mtu = ill->ill_mtu;
1038 		mutex_exit(&ill->ill_lock);
1039 	}
1040 
1041 	/*
1042 	 * MTU must be at least the minimum MTU.
1043 	 */
1044 	mtu = MAX(mtu, ipmp_ill->ill_isv6 ? IPV6_MIN_MTU : IP_MIN_MTU);
1045 
1046 	if (illg->ig_mtu != mtu)
1047 		ipmp_illgrp_set_mtu(illg, mtu);
1048 }
1049 
1050 /*
1051  * Link illgrp `illg' to IPMP group `grp'.  To simplify the caller, silently
1052  * allow the same link to be established more than once.
1053  */
1054 void
1055 ipmp_illgrp_link_grp(ipmp_illgrp_t *illg, ipmp_grp_t *grp)
1056 {
1057 	ip_stack_t *ipst = IPMP_ILLGRP_TO_IPST(illg);
1058 
1059 	ASSERT(RW_WRITE_HELD(&ipst->ips_ipmp_lock));
1060 
1061 	if (illg->ig_ipmp_ill->ill_isv6) {
1062 		ASSERT(grp->gr_v6 == NULL || grp->gr_v6 == illg);
1063 		grp->gr_v6 = illg;
1064 	} else {
1065 		ASSERT(grp->gr_v4 == NULL || grp->gr_v4 == illg);
1066 		grp->gr_v4 = illg;
1067 	}
1068 }
1069 
1070 /*
1071  * Unlink illgrp `illg' from its IPMP group.  Return an errno if the illgrp
1072  * cannot be unlinked (e.g., because there are still interfaces using it).
1073  */
1074 int
1075 ipmp_illgrp_unlink_grp(ipmp_illgrp_t *illg)
1076 {
1077 	ipmp_grp_t *grp = illg->ig_ipmp_ill->ill_phyint->phyint_grp;
1078 	ip_stack_t *ipst = IPMP_ILLGRP_TO_IPST(illg);
1079 
1080 	ASSERT(RW_WRITE_HELD(&ipst->ips_ipmp_lock));
1081 
1082 	if (illg->ig_ipmp_ill->ill_isv6) {
1083 		if (grp->gr_nv6 + grp->gr_pendv6 != 0)
1084 			return (EBUSY);
1085 		grp->gr_v6 = NULL;
1086 	} else {
1087 		if (grp->gr_nv4 + grp->gr_pendv4 != 0)
1088 			return (EBUSY);
1089 		grp->gr_v4 = NULL;
1090 	}
1091 	return (0);
1092 }
1093 
1094 /*
1095  * Place `ill' into `illg', and rebalance the data addresses on `illg'
1096  * to be spread evenly across the ills now in it.  Also, adjust the IPMP
1097  * ill as necessary to account for `ill' (e.g., MTU).
1098  */
1099 void
1100 ipmp_ill_join_illgrp(ill_t *ill, ipmp_illgrp_t *illg)
1101 {
1102 	ill_t *ipmp_ill;
1103 	ipif_t *ipif;
1104 	ip_stack_t *ipst = ill->ill_ipst;
1105 
1106 	/* IS_UNDER_IPMP() requires ill_grp to be non-NULL */
1107 	ASSERT(!IS_IPMP(ill) && ill->ill_phyint->phyint_grp != NULL);
1108 	ASSERT(IAM_WRITER_ILL(ill));
1109 	ASSERT(ill->ill_grp == NULL);
1110 
1111 	ipmp_ill = illg->ig_ipmp_ill;
1112 
1113 	/*
1114 	 * Account for `ill' joining the illgrp.
1115 	 */
1116 	rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
1117 	if (ill->ill_isv6)
1118 		ill->ill_phyint->phyint_grp->gr_nv6++;
1119 	else
1120 		ill->ill_phyint->phyint_grp->gr_nv4++;
1121 	rw_exit(&ipst->ips_ipmp_lock);
1122 
1123 	/*
1124 	 * Ensure the ILLF_ROUTER flag remains consistent across the group.
1125 	 */
1126 	mutex_enter(&ill->ill_lock);
1127 	if (ipmp_ill->ill_flags & ILLF_ROUTER)
1128 		ill->ill_flags |= ILLF_ROUTER;
1129 	else
1130 		ill->ill_flags &= ~ILLF_ROUTER;
1131 	mutex_exit(&ill->ill_lock);
1132 
1133 	/*
1134 	 * Blow away all multicast memberships that currently exist on `ill'.
1135 	 * This may seem odd, but it's consistent with the application view
1136 	 * that `ill' no longer exists (e.g., due to ipmp_ill_rtsaddrmsg()).
1137 	 * The ill_grp_pending bit prevents multicast group joins after
1138 	 * update_conn_ill() and before ill_grp assignment.
1139 	 */
1140 	mutex_enter(&ill->ill_mcast_serializer);
1141 	ill->ill_grp_pending = 1;
1142 	mutex_exit(&ill->ill_mcast_serializer);
1143 	update_conn_ill(ill, ill->ill_ipst);
1144 	if (ill->ill_isv6) {
1145 		reset_mrt_ill(ill);
1146 	} else {
1147 		ipif = ill->ill_ipif;
1148 		for (; ipif != NULL; ipif = ipif->ipif_next) {
1149 			reset_mrt_vif_ipif(ipif);
1150 		}
1151 	}
1152 	ip_purge_allmulti(ill);
1153 
1154 	/*
1155 	 * Borrow the first ill's ill_phys_addr_length value for the illgrp's
1156 	 * physical address length.  All other ills must have the same value,
1157 	 * since they are required to all be the same mactype.  Also update
1158 	 * the IPMP ill's MTU and CoS marking, if necessary.
1159 	 */
1160 	if (list_is_empty(&illg->ig_if)) {
1161 		ASSERT(ipmp_ill->ill_phys_addr_length == 0);
1162 		/*
1163 		 * NOTE: we leave ill_phys_addr NULL since the IPMP group
1164 		 * doesn't have a physical address.  This means that code must
1165 		 * not assume that ill_phys_addr is non-NULL just because
1166 		 * ill_phys_addr_length is non-zero.  Likewise for ill_nd_lla.
1167 		 */
1168 		ipmp_ill->ill_phys_addr_length = ill->ill_phys_addr_length;
1169 		ipmp_ill->ill_nd_lla_len = ill->ill_phys_addr_length;
1170 		ipmp_ill->ill_type = ill->ill_type;
1171 
1172 		if (ill->ill_flags & ILLF_COS_ENABLED) {
1173 			mutex_enter(&ipmp_ill->ill_lock);
1174 			ipmp_ill->ill_flags |= ILLF_COS_ENABLED;
1175 			mutex_exit(&ipmp_ill->ill_lock);
1176 		}
1177 		ipmp_illgrp_set_mtu(illg, ill->ill_mtu);
1178 	} else {
1179 		ASSERT(ipmp_ill->ill_phys_addr_length ==
1180 		    ill->ill_phys_addr_length);
1181 		ASSERT(ipmp_ill->ill_type == ill->ill_type);
1182 
1183 		if (!(ill->ill_flags & ILLF_COS_ENABLED)) {
1184 			mutex_enter(&ipmp_ill->ill_lock);
1185 			ipmp_ill->ill_flags &= ~ILLF_COS_ENABLED;
1186 			mutex_exit(&ipmp_ill->ill_lock);
1187 		}
1188 		if (illg->ig_mtu > ill->ill_mtu)
1189 			ipmp_illgrp_set_mtu(illg, ill->ill_mtu);
1190 	}
1191 
1192 	rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
1193 	list_insert_tail(&illg->ig_if, ill);
1194 	ill->ill_grp = illg;
1195 	rw_exit(&ipst->ips_ill_g_lock);
1196 
1197 	mutex_enter(&ill->ill_mcast_serializer);
1198 	ill->ill_grp_pending = 0;
1199 	mutex_exit(&ill->ill_mcast_serializer);
1200 
1201 	/*
1202 	 * Hide the IREs on `ill' so that we don't accidentally find them when
1203 	 * sending data traffic.
1204 	 */
1205 	ire_walk_ill(MATCH_IRE_ILL, 0, ipmp_ill_ire_mark_testhidden, ill, ill);
1206 
1207 	ipmp_ill_refresh_active(ill);
1208 }
1209 
1210 /*
1211  * Remove `ill' from its illgrp, and rebalance the data addresses in that
1212  * illgrp to be spread evenly across the remaining ills.  Also, adjust the
1213  * IPMP ill as necessary now that `ill' is removed (e.g., MTU).
1214  */
1215 void
1216 ipmp_ill_leave_illgrp(ill_t *ill)
1217 {
1218 	ill_t *ipmp_ill;
1219 	ipif_t *ipif;
1220 	ipmp_arpent_t *entp;
1221 	ipmp_illgrp_t *illg = ill->ill_grp;
1222 	ip_stack_t *ipst = IPMP_ILLGRP_TO_IPST(illg);
1223 
1224 	ASSERT(IS_UNDER_IPMP(ill));
1225 	ASSERT(IAM_WRITER_ILL(ill));
1226 	ASSERT(illg != NULL);
1227 
1228 	ipmp_ill = illg->ig_ipmp_ill;
1229 
1230 	/*
1231 	 * Cancel IPMP-specific ill timeouts.
1232 	 */
1233 	(void) untimeout(ill->ill_refresh_tid);
1234 
1235 	/*
1236 	 * Expose any previously-hidden IREs on `ill'.
1237 	 */
1238 	ire_walk_ill(MATCH_IRE_ILL, 0, ipmp_ill_ire_clear_testhidden, ill, ill);
1239 
1240 	/*
1241 	 * Ensure the multicast state for each ipif on `ill' is down so that
1242 	 * our ipif_multicast_up() (once `ill' leaves the group) will rejoin
1243 	 * all eligible groups.
1244 	 */
1245 	for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next)
1246 		if (ipif->ipif_flags & IPIF_UP)
1247 			ipif_multicast_down(ipif);
1248 
1249 	/*
1250 	 * Account for `ill' leaving the illgrp.
1251 	 */
1252 	rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
1253 	if (ill->ill_isv6)
1254 		ill->ill_phyint->phyint_grp->gr_nv6--;
1255 	else
1256 		ill->ill_phyint->phyint_grp->gr_nv4--;
1257 	rw_exit(&ipst->ips_ipmp_lock);
1258 
1259 	/*
1260 	 * Pull `ill' out of the interface lists.
1261 	 */
1262 	if (list_link_active(&ill->ill_actnode))
1263 		ipmp_ill_deactivate(ill);
1264 	rw_enter(&ipst->ips_ill_g_lock, RW_WRITER);
1265 	list_remove(&illg->ig_if, ill);
1266 	ill->ill_grp = NULL;
1267 	rw_exit(&ipst->ips_ill_g_lock);
1268 
1269 	/*
1270 	 * Re-establish multicast memberships that were previously being
1271 	 * handled by the IPMP meta-interface.
1272 	 */
1273 	for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next)
1274 		if (ipif->ipif_flags & IPIF_UP)
1275 			ipif_multicast_up(ipif);
1276 
1277 	/*
1278 	 * Refresh the group MTU based on the new interface list.
1279 	 */
1280 	ipmp_illgrp_refresh_mtu(illg);
1281 
1282 	if (list_is_empty(&illg->ig_if)) {
1283 		/*
1284 		 * No ills left in the illgrp; we no longer have a physical
1285 		 * address length, nor can we support ARP, CoS, or anything
1286 		 * else that depends on knowing the link layer type.
1287 		 */
1288 		while ((entp = ipmp_illgrp_lookup_arpent(illg, NULL)) != NULL)
1289 			ipmp_illgrp_destroy_arpent(illg, entp);
1290 
1291 		ipmp_ill->ill_phys_addr_length = 0;
1292 		ipmp_ill->ill_nd_lla_len = 0;
1293 		ipmp_ill->ill_type = IFT_OTHER;
1294 		mutex_enter(&ipmp_ill->ill_lock);
1295 		ipmp_ill->ill_flags &= ~ILLF_COS_ENABLED;
1296 		mutex_exit(&ipmp_ill->ill_lock);
1297 	} else {
1298 		/*
1299 		 * If `ill' didn't support CoS, see if it can now be enabled.
1300 		 */
1301 		if (!(ill->ill_flags & ILLF_COS_ENABLED)) {
1302 			ASSERT(!(ipmp_ill->ill_flags & ILLF_COS_ENABLED));
1303 
1304 			ill = list_head(&illg->ig_if);
1305 			do {
1306 				if (!(ill->ill_flags & ILLF_COS_ENABLED))
1307 					break;
1308 			} while ((ill = list_next(&illg->ig_if, ill)) != NULL);
1309 
1310 			if (ill == NULL) {
1311 				mutex_enter(&ipmp_ill->ill_lock);
1312 				ipmp_ill->ill_flags |= ILLF_COS_ENABLED;
1313 				mutex_exit(&ipmp_ill->ill_lock);
1314 			}
1315 		}
1316 	}
1317 }
1318 
1319 /*
1320  * Check if `ill' should be active, and activate or deactivate if need be.
1321  * Return B_FALSE if a refresh was necessary but could not be performed.
1322  */
1323 static boolean_t
1324 ipmp_ill_try_refresh_active(ill_t *ill)
1325 {
1326 	boolean_t refreshed = B_TRUE;
1327 
1328 	ASSERT(IAM_WRITER_ILL(ill));
1329 	ASSERT(IS_UNDER_IPMP(ill));
1330 
1331 	if (ipmp_ill_is_active(ill)) {
1332 		if (!list_link_active(&ill->ill_actnode))
1333 			refreshed = ipmp_ill_activate(ill);
1334 	} else {
1335 		if (list_link_active(&ill->ill_actnode))
1336 			ipmp_ill_deactivate(ill);
1337 	}
1338 
1339 	return (refreshed);
1340 }
1341 
1342 /*
1343  * Check if `ill' should be active, and activate or deactivate if need be.
1344  * If the refresh fails, schedule a timer to try again later.
1345  */
1346 void
1347 ipmp_ill_refresh_active(ill_t *ill)
1348 {
1349 	if (!ipmp_ill_try_refresh_active(ill))
1350 		ipmp_ill_refresh_active_timer_start(ill);
1351 }
1352 
1353 /*
1354  * Retry ipmp_ill_try_refresh_active() on the ill named by `ill_arg'.
1355  */
1356 static void
1357 ipmp_ill_refresh_active_timer(void *ill_arg)
1358 {
1359 	ill_t *ill = ill_arg;
1360 	boolean_t refreshed = B_FALSE;
1361 
1362 	/*
1363 	 * Clear ill_refresh_tid to indicate that no timeout is pending
1364 	 * (another thread could schedule a new timeout while we're still
1365 	 * running, but that's harmless).  If the ill is going away, bail.
1366 	 */
1367 	mutex_enter(&ill->ill_lock);
1368 	ill->ill_refresh_tid = 0;
1369 	if (ill->ill_state_flags & ILL_CONDEMNED) {
1370 		mutex_exit(&ill->ill_lock);
1371 		return;
1372 	}
1373 	mutex_exit(&ill->ill_lock);
1374 
1375 	if (ipsq_try_enter(NULL, ill, NULL, NULL, NULL, NEW_OP, B_FALSE)) {
1376 		refreshed = ipmp_ill_try_refresh_active(ill);
1377 		ipsq_exit(ill->ill_phyint->phyint_ipsq);
1378 	}
1379 
1380 	/*
1381 	 * If the refresh failed, schedule another attempt.
1382 	 */
1383 	if (!refreshed)
1384 		ipmp_ill_refresh_active_timer_start(ill);
1385 }
1386 
1387 /*
1388  * Retry an ipmp_ill_try_refresh_active() on the ill named by `arg'.
1389  */
1390 static void
1391 ipmp_ill_refresh_active_timer_start(ill_t *ill)
1392 {
1393 	mutex_enter(&ill->ill_lock);
1394 
1395 	/*
1396 	 * If the ill is going away or a refresh is already scheduled, bail.
1397 	 */
1398 	if (ill->ill_refresh_tid != 0 ||
1399 	    (ill->ill_state_flags & ILL_CONDEMNED)) {
1400 		mutex_exit(&ill->ill_lock);
1401 		return;
1402 	}
1403 
1404 	ill->ill_refresh_tid = timeout(ipmp_ill_refresh_active_timer, ill,
1405 	    SEC_TO_TICK(IPMP_ILL_REFRESH_TIMEOUT));
1406 
1407 	mutex_exit(&ill->ill_lock);
1408 }
1409 
1410 /*
1411  * Activate `ill' so it will be used to send and receive data traffic.  Return
1412  * B_FALSE if `ill' cannot be activated.  Note that we allocate any messages
1413  * needed to deactivate `ill' here as well so that deactivation cannot fail.
1414  */
1415 static boolean_t
1416 ipmp_ill_activate(ill_t *ill)
1417 {
1418 	ipif_t		*ipif;
1419 	mblk_t		*linkupmp = NULL, *linkdownmp = NULL;
1420 	ipmp_grp_t	*grp = ill->ill_phyint->phyint_grp;
1421 	ipmp_illgrp_t	*illg = ill->ill_grp;
1422 	ill_t		*maxill;
1423 	ip_stack_t	*ipst = IPMP_ILLGRP_TO_IPST(illg);
1424 
1425 	ASSERT(IAM_WRITER_ILL(ill));
1426 	ASSERT(IS_UNDER_IPMP(ill));
1427 
1428 	/*
1429 	 * If this will be the first active interface in the group, allocate
1430 	 * the link-up and link-down messages.
1431 	 */
1432 	if (grp->gr_nactif == 0) {
1433 		linkupmp = ip_dlnotify_alloc(DL_NOTE_LINK_UP, 0);
1434 		linkdownmp = ip_dlnotify_alloc(DL_NOTE_LINK_DOWN, 0);
1435 		if (linkupmp == NULL || linkdownmp == NULL)
1436 			goto fail;
1437 	}
1438 
1439 	if (list_is_empty(&illg->ig_actif)) {
1440 		/*
1441 		 * Now that we have an active ill, nominate it for multicast
1442 		 * and broadcast duties.  Do this before ipmp_ill_bind_ipif()
1443 		 * since that may need to send multicast packets (e.g., IPv6
1444 		 * neighbor discovery probes).
1445 		 */
1446 		ipmp_illgrp_set_cast(illg, ill);
1447 
1448 		/*
1449 		 * This is the first active ill in the illgrp -- add 'em all.
1450 		 * We can access/walk ig_ipmp_ill's ipif list since we're
1451 		 * writer on its IPSQ as well.
1452 		 */
1453 		ipif = illg->ig_ipmp_ill->ill_ipif;
1454 		for (; ipif != NULL; ipif = ipif->ipif_next)
1455 			if (ipmp_ipif_is_up_dataaddr(ipif))
1456 				ipmp_ill_bind_ipif(ill, ipif, Res_act_initial);
1457 	} else {
1458 		/*
1459 		 * Redistribute the addresses by moving them from the ill with
1460 		 * the most addresses until the ill being activated is at the
1461 		 * same level as the rest of the ills.
1462 		 */
1463 		for (;;) {
1464 			maxill = ipmp_illgrp_max_ill(illg);
1465 			ASSERT(maxill != NULL);
1466 			if (ill->ill_bound_cnt + 1 >= maxill->ill_bound_cnt)
1467 				break;
1468 			ipif = ipmp_ill_unbind_ipif(maxill, NULL, B_TRUE);
1469 			ipmp_ill_bind_ipif(ill, ipif, Res_act_rebind);
1470 		}
1471 	}
1472 
1473 	/*
1474 	 * Put the interface in the active list.
1475 	 */
1476 	rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
1477 	list_insert_tail(&illg->ig_actif, ill);
1478 	illg->ig_nactif++;
1479 	illg->ig_next_ill = ill;
1480 	rw_exit(&ipst->ips_ipmp_lock);
1481 
1482 	/*
1483 	 * Refresh static/proxy ARP entries to use `ill', if need be.
1484 	 */
1485 	if (!ill->ill_isv6)
1486 		ipmp_illgrp_refresh_arpent(illg);
1487 
1488 	/*
1489 	 * Finally, mark the group link up, if necessary.
1490 	 */
1491 	if (grp->gr_nactif++ == 0) {
1492 		ASSERT(grp->gr_linkdownmp == NULL);
1493 		grp->gr_linkdownmp = linkdownmp;
1494 		put(illg->ig_ipmp_ill->ill_rq, linkupmp);
1495 	}
1496 	return (B_TRUE);
1497 fail:
1498 	freemsg(linkupmp);
1499 	freemsg(linkdownmp);
1500 	return (B_FALSE);
1501 }
1502 
1503 /*
1504  * Deactivate `ill' so it will not be used to send or receive data traffic.
1505  */
1506 static void
1507 ipmp_ill_deactivate(ill_t *ill)
1508 {
1509 	ill_t		*minill, *ipmp_ill;
1510 	ipif_t		*ipif, *ubnextipif, *ubheadipif = NULL;
1511 	mblk_t		*mp;
1512 	ipmp_grp_t	*grp = ill->ill_phyint->phyint_grp;
1513 	ipmp_illgrp_t	*illg = ill->ill_grp;
1514 	ip_stack_t	*ipst = IPMP_ILLGRP_TO_IPST(illg);
1515 
1516 	ASSERT(IAM_WRITER_ILL(ill));
1517 	ASSERT(IS_UNDER_IPMP(ill));
1518 
1519 	ipmp_ill = illg->ig_ipmp_ill;
1520 
1521 	/*
1522 	 * Pull the interface out of the active list.
1523 	 */
1524 	rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
1525 	list_remove(&illg->ig_actif, ill);
1526 	illg->ig_nactif--;
1527 	illg->ig_next_ill = list_head(&illg->ig_actif);
1528 	rw_exit(&ipst->ips_ipmp_lock);
1529 
1530 	/*
1531 	 * If the ill that's being deactivated had been nominated for
1532 	 * multicast/broadcast, nominate a new one.
1533 	 */
1534 	if (ill == illg->ig_cast_ill)
1535 		ipmp_illgrp_set_cast(illg, list_head(&illg->ig_actif));
1536 
1537 	/*
1538 	 * Delete all nce_t entries using this ill, so that the next attempt
1539 	 * to send data traffic will revalidate cached nce's.
1540 	 */
1541 	nce_flush(ill, B_TRUE);
1542 
1543 	/*
1544 	 * Unbind all of the ipifs bound to this ill, and save 'em in a list;
1545 	 * we'll rebind them after we tell the resolver the ill is no longer
1546 	 * active.  We must do things in this order or the resolver could
1547 	 * accidentally rebind to the ill we're trying to remove if multiple
1548 	 * ills in the group have the same hardware address (which is
1549 	 * unsupported, but shouldn't lead to a wedged machine).
1550 	 */
1551 	while ((ipif = ipmp_ill_unbind_ipif(ill, NULL, B_TRUE)) != NULL) {
1552 		ipif->ipif_bound_next = ubheadipif;
1553 		ubheadipif = ipif;
1554 	}
1555 
1556 	if (!ill->ill_isv6) {
1557 		/*
1558 		 * Refresh static/proxy ARP entries that had been using `ill'.
1559 		 */
1560 		ipmp_illgrp_refresh_arpent(illg);
1561 	}
1562 
1563 	/*
1564 	 * Rebind each ipif from the deactivated ill to the active ill with
1565 	 * the fewest ipifs.  If there are no active ills, the ipifs will
1566 	 * remain unbound.
1567 	 */
1568 	for (ipif = ubheadipif; ipif != NULL; ipif = ubnextipif) {
1569 		ubnextipif = ipif->ipif_bound_next;
1570 		ipif->ipif_bound_next = NULL;
1571 
1572 		if ((minill = ipmp_illgrp_min_ill(illg)) != NULL)
1573 			ipmp_ill_bind_ipif(minill, ipif, Res_act_rebind);
1574 	}
1575 
1576 	/*
1577 	 * Remove any IRE_IF_CLONEs for this ill since they might have an
1578 	 * ire_nce_cache/nce_common which refers to another ill in the group.
1579 	 */
1580 	ire_walk_ill(MATCH_IRE_TYPE, IRE_IF_CLONE, ill_downi_if_clone, ill,
1581 	    ill);
1582 
1583 	/*
1584 	 * Finally, if there are no longer any active interfaces, then delete
1585 	 * any NCECs associated with the group and mark the group link down.
1586 	 */
1587 	if (--grp->gr_nactif == 0) {
1588 		ncec_walk(ipmp_ill, (pfi_t)ncec_delete_per_ill, ipmp_ill, ipst);
1589 		mp = grp->gr_linkdownmp;
1590 		grp->gr_linkdownmp = NULL;
1591 		ASSERT(mp != NULL);
1592 		put(ipmp_ill->ill_rq, mp);
1593 	}
1594 }
1595 
1596 /*
1597  * Send the routing socket messages needed to make `ill' "appear" (RTM_ADD)
1598  * or "disappear" (RTM_DELETE) to non-IPMP-aware routing socket listeners.
1599  */
1600 static void
1601 ipmp_ill_rtsaddrmsg(ill_t *ill, int cmd)
1602 {
1603 	ipif_t *ipif;
1604 
1605 	ASSERT(IAM_WRITER_ILL(ill));
1606 	ASSERT(cmd == RTM_ADD || cmd == RTM_DELETE);
1607 
1608 	/*
1609 	 * If `ill' is truly down, there are no messages to generate since:
1610 	 *
1611 	 * 1. If cmd == RTM_DELETE, then we're supposed to hide the interface
1612 	 *    and its addresses by bringing them down.  But that's already
1613 	 *    true, so there's nothing to hide.
1614 	 *
1615 	 * 2. If cmd == RTM_ADD, then we're supposed to generate messages
1616 	 *    indicating that any previously-hidden up addresses are again
1617 	 *    back up (along with the interface).  But they aren't, so
1618 	 *    there's nothing to expose.
1619 	 */
1620 	if (ill->ill_ipif_up_count == 0)
1621 		return;
1622 
1623 	if (cmd == RTM_ADD)
1624 		ip_rts_xifmsg(ill->ill_ipif, IPIF_UP, 0, RTSQ_NORMAL);
1625 
1626 	for (ipif = ill->ill_ipif; ipif != NULL; ipif = ipif->ipif_next)
1627 		if (ipif->ipif_flags & IPIF_UP)
1628 			ip_rts_newaddrmsg(cmd, 0, ipif, RTSQ_NORMAL);
1629 
1630 	if (cmd == RTM_DELETE)
1631 		ip_rts_xifmsg(ill->ill_ipif, 0, IPIF_UP, RTSQ_NORMAL);
1632 }
1633 
1634 /*
1635  * Bind the address named by `ipif' to the underlying ill named by `ill'.
1636  * If `act' is Res_act_none, don't notify the resolver.  Otherwise, `act'
1637  * will indicate to the resolver whether this is an initial bringup of
1638  * `ipif', or just a rebind to another ill.
1639  */
1640 static void
1641 ipmp_ill_bind_ipif(ill_t *ill, ipif_t *ipif, enum ip_resolver_action act)
1642 {
1643 	int err = 0;
1644 	ip_stack_t *ipst = ill->ill_ipst;
1645 
1646 	ASSERT(IAM_WRITER_ILL(ill) && IAM_WRITER_IPIF(ipif));
1647 	ASSERT(IS_UNDER_IPMP(ill) && IS_IPMP(ipif->ipif_ill));
1648 	ASSERT(act == Res_act_none || ipmp_ipif_is_up_dataaddr(ipif));
1649 	ASSERT(ipif->ipif_bound_ill == NULL);
1650 	ASSERT(ipif->ipif_bound_next == NULL);
1651 
1652 	ipif->ipif_bound_next = ill->ill_bound_ipif;
1653 	ill->ill_bound_ipif = ipif;
1654 	ill->ill_bound_cnt++;
1655 	rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
1656 	ipif->ipif_bound_ill = ill;
1657 	rw_exit(&ipst->ips_ipmp_lock);
1658 
1659 	/*
1660 	 * If necessary, tell ARP/NDP about the new mapping.  Note that
1661 	 * ipif_resolver_up() cannot fail for IPv6 ills.
1662 	 */
1663 	if (act != Res_act_none) {
1664 		if (ill->ill_isv6) {
1665 			VERIFY(ipif_resolver_up(ipif, act) == 0);
1666 			err = ipif_ndp_up(ipif, act == Res_act_initial);
1667 		} else {
1668 			err = ipif_resolver_up(ipif, act);
1669 		}
1670 
1671 		/*
1672 		 * Since ipif_ndp_up() never returns EINPROGRESS and
1673 		 * ipif_resolver_up() only returns EINPROGRESS when the
1674 		 * associated ill is not up, we should never be here with
1675 		 * EINPROGRESS.  We rely on this to simplify the design.
1676 		 */
1677 		ASSERT(err != EINPROGRESS);
1678 	}
1679 	/* TODO: retry binding on failure? when? */
1680 	ipif->ipif_bound = (err == 0);
1681 }
1682 
1683 /*
1684  * Unbind the address named by `ipif' from the underlying ill named by `ill'.
1685  * If `ipif' is NULL, then an arbitrary ipif on `ill' is unbound and returned.
1686  * If no ipifs are bound to `ill', NULL is returned.  If `notifyres' is
1687  * B_TRUE, notify the resolver about the change.
1688  */
1689 static ipif_t *
1690 ipmp_ill_unbind_ipif(ill_t *ill, ipif_t *ipif, boolean_t notifyres)
1691 {
1692 	ipif_t *previpif;
1693 	ip_stack_t *ipst = ill->ill_ipst;
1694 
1695 	ASSERT(IAM_WRITER_ILL(ill));
1696 	ASSERT(IS_UNDER_IPMP(ill));
1697 
1698 	/*
1699 	 * If necessary, find an ipif to unbind.
1700 	 */
1701 	if (ipif == NULL) {
1702 		if ((ipif = ill->ill_bound_ipif) == NULL) {
1703 			ASSERT(ill->ill_bound_cnt == 0);
1704 			return (NULL);
1705 		}
1706 	}
1707 
1708 	ASSERT(IAM_WRITER_IPIF(ipif));
1709 	ASSERT(IS_IPMP(ipif->ipif_ill));
1710 	ASSERT(ipif->ipif_bound_ill == ill);
1711 	ASSERT(ill->ill_bound_cnt > 0);
1712 
1713 	/*
1714 	 * Unbind it.
1715 	 */
1716 	rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
1717 	ipif->ipif_bound_ill = NULL;
1718 	rw_exit(&ipst->ips_ipmp_lock);
1719 	ill->ill_bound_cnt--;
1720 
1721 	if (ill->ill_bound_ipif == ipif) {
1722 		ill->ill_bound_ipif = ipif->ipif_bound_next;
1723 	} else {
1724 		previpif = ill->ill_bound_ipif;
1725 		while (previpif->ipif_bound_next != ipif)
1726 			previpif = previpif->ipif_bound_next;
1727 
1728 		previpif->ipif_bound_next = ipif->ipif_bound_next;
1729 	}
1730 	ipif->ipif_bound_next = NULL;
1731 
1732 	/*
1733 	 * If requested, notify the resolvers (provided we're bound).
1734 	 */
1735 	if (notifyres && ipif->ipif_bound) {
1736 		if (ill->ill_isv6)
1737 			ipif_ndp_down(ipif);
1738 		else
1739 			(void) ipif_arp_down(ipif);
1740 	}
1741 	ipif->ipif_bound = B_FALSE;
1742 
1743 	return (ipif);
1744 }
1745 
1746 /*
1747  * Check if `ill' is active.  Caller must hold ill_lock and phyint_lock if
1748  * it's not inside the IPSQ.  Since ipmp_ill_try_refresh_active() calls this
1749  * to determine whether an ill should be considered active, other consumers
1750  * may race and learn about an ill that should be deactivated/activated before
1751  * IPMP has performed the activation/deactivation.  This should be safe though
1752  * since at worst e.g. ire_atomic_start() will prematurely delete an IRE that
1753  * would've been cleaned up by ipmp_ill_deactivate().
1754  */
1755 boolean_t
1756 ipmp_ill_is_active(ill_t *ill)
1757 {
1758 	phyint_t *phyi = ill->ill_phyint;
1759 
1760 	ASSERT(IS_UNDER_IPMP(ill));
1761 	ASSERT(IAM_WRITER_ILL(ill) ||
1762 	    (MUTEX_HELD(&ill->ill_lock) && MUTEX_HELD(&phyi->phyint_lock)));
1763 
1764 	/*
1765 	 * Note that PHYI_RUNNING isn't checked since we rely on in.mpathd to
1766 	 * set PHYI_FAILED whenever PHYI_RUNNING is cleared.  This allows the
1767 	 * link flapping logic to be just in in.mpathd and allows us to ignore
1768 	 * changes to PHYI_RUNNING.
1769 	 */
1770 	return (!(ill->ill_ipif_up_count == 0 ||
1771 	    (phyi->phyint_flags & (PHYI_OFFLINE|PHYI_INACTIVE|PHYI_FAILED))));
1772 }
1773 
1774 /*
1775  * IRE walker callback: set ire_testhidden on IRE_HIDDEN_TYPE IREs associated
1776  * with `ill_arg'.
1777  */
1778 static void
1779 ipmp_ill_ire_mark_testhidden(ire_t *ire, char *ill_arg)
1780 {
1781 	ill_t *ill = (ill_t *)ill_arg;
1782 
1783 	ASSERT(IAM_WRITER_ILL(ill));
1784 	ASSERT(!IS_IPMP(ill));
1785 
1786 	if (ire->ire_ill != ill)
1787 		return;
1788 
1789 	if (IRE_HIDDEN_TYPE(ire->ire_type)) {
1790 		DTRACE_PROBE1(ipmp__mark__testhidden, ire_t *, ire);
1791 		ire->ire_testhidden = B_TRUE;
1792 	}
1793 }
1794 
1795 /*
1796  * IRE walker callback: clear ire_testhidden if the IRE has a source address
1797  * on `ill_arg'.
1798  */
1799 static void
1800 ipmp_ill_ire_clear_testhidden(ire_t *ire, char *ill_arg)
1801 {
1802 	ill_t *ill = (ill_t *)ill_arg;
1803 
1804 	ASSERT(IAM_WRITER_ILL(ill));
1805 	ASSERT(!IS_IPMP(ill));
1806 
1807 	if (ire->ire_ill == ill) {
1808 		DTRACE_PROBE1(ipmp__clear__testhidden, ire_t *, ire);
1809 		ire->ire_testhidden = B_FALSE;
1810 	}
1811 }
1812 
1813 /*
1814  * Return a held pointer to the IPMP ill for underlying interface `ill', or
1815  * NULL if one doesn't exist.  (Unfortunately, this function needs to take an
1816  * underlying ill rather than an ipmp_illgrp_t because an underlying ill's
1817  * ill_grp pointer may become stale when not inside an IPSQ and not holding
1818  * ipmp_lock.)  Caller need not be inside the IPSQ.
1819  */
1820 ill_t *
1821 ipmp_ill_hold_ipmp_ill(ill_t *ill)
1822 {
1823 	ip_stack_t *ipst = ill->ill_ipst;
1824 	ipmp_illgrp_t *illg;
1825 
1826 	ASSERT(!IS_IPMP(ill));
1827 
1828 	rw_enter(&ipst->ips_ipmp_lock, RW_READER);
1829 	illg = ill->ill_grp;
1830 	if (illg != NULL && ill_check_and_refhold(illg->ig_ipmp_ill)) {
1831 		rw_exit(&ipst->ips_ipmp_lock);
1832 		return (illg->ig_ipmp_ill);
1833 	}
1834 	/*
1835 	 * Assume `ill' was removed from the illgrp in the meantime.
1836 	 */
1837 	rw_exit(&ill->ill_ipst->ips_ipmp_lock);
1838 	return (NULL);
1839 }
1840 
1841 /*
1842  * Return a held pointer to the appropriate underlying ill for sending the
1843  * specified type of packet.  (Unfortunately, this function needs to take an
1844  * underlying ill rather than an ipmp_illgrp_t because an underlying ill's
1845  * ill_grp pointer may become stale when not inside an IPSQ and not holding
1846  * ipmp_lock.)  Caller need not be inside the IPSQ.
1847  */
1848 ill_t *
1849 ipmp_ill_hold_xmit_ill(ill_t *ill, boolean_t is_unicast)
1850 {
1851 	ill_t *xmit_ill;
1852 	ip_stack_t *ipst = ill->ill_ipst;
1853 
1854 	rw_enter(&ipst->ips_ill_g_lock, RW_READER);
1855 	if (ill->ill_grp == NULL) {
1856 		/*
1857 		 * The ill was taken out of the group, so just send on it.
1858 		 */
1859 		rw_exit(&ipst->ips_ill_g_lock);
1860 		ill_refhold(ill);
1861 		return (ill);
1862 	}
1863 	if (is_unicast)
1864 		xmit_ill = ipmp_illgrp_hold_next_ill(ill->ill_grp);
1865 	else
1866 		xmit_ill = ipmp_illgrp_hold_cast_ill(ill->ill_grp);
1867 	rw_exit(&ipst->ips_ill_g_lock);
1868 
1869 	return (xmit_ill);
1870 }
1871 
1872 /*
1873  * Return the interface index for the IPMP ill tied to underlying interface
1874  * `ill', or zero if one doesn't exist.  Caller need not be inside the IPSQ.
1875  */
1876 uint_t
1877 ipmp_ill_get_ipmp_ifindex(const ill_t *ill)
1878 {
1879 	uint_t ifindex = 0;
1880 	ip_stack_t *ipst = ill->ill_ipst;
1881 	ipmp_grp_t *grp;
1882 
1883 	ASSERT(!IS_IPMP(ill));
1884 
1885 	rw_enter(&ipst->ips_ipmp_lock, RW_READER);
1886 	if ((grp = ill->ill_phyint->phyint_grp) != NULL)
1887 		ifindex = grp->gr_phyint->phyint_ifindex;
1888 	rw_exit(&ipst->ips_ipmp_lock);
1889 	return (ifindex);
1890 }
1891 
1892 /*
1893  * Place phyint `phyi' into IPMP group `grp'.
1894  */
1895 void
1896 ipmp_phyint_join_grp(phyint_t *phyi, ipmp_grp_t *grp)
1897 {
1898 	ill_t *ill;
1899 	ipsq_t *ipsq = phyi->phyint_ipsq;
1900 	ipsq_t *grp_ipsq = grp->gr_phyint->phyint_ipsq;
1901 	ip_stack_t *ipst = PHYINT_TO_IPST(phyi);
1902 
1903 	ASSERT(IAM_WRITER_IPSQ(ipsq));
1904 	ASSERT(phyi->phyint_illv4 != NULL || phyi->phyint_illv6 != NULL);
1905 
1906 	/*
1907 	 * Send routing socket messages indicating that the phyint's ills
1908 	 * and ipifs vanished.
1909 	 */
1910 	if (phyi->phyint_illv4 != NULL) {
1911 		ill = phyi->phyint_illv4;
1912 		ipmp_ill_rtsaddrmsg(ill, RTM_DELETE);
1913 	}
1914 
1915 	if (phyi->phyint_illv6 != NULL) {
1916 		ill = phyi->phyint_illv6;
1917 		ipmp_ill_rtsaddrmsg(ill, RTM_DELETE);
1918 	}
1919 
1920 	/*
1921 	 * Snapshot the phyint's initial kstats as a baseline.
1922 	 */
1923 	ipmp_phyint_get_kstats(phyi, phyi->phyint_kstats0);
1924 
1925 	rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
1926 
1927 	phyi->phyint_grp = grp;
1928 	if (++grp->gr_nif == 1)
1929 		grp->gr_mactype = ill->ill_mactype;
1930 	else
1931 		ASSERT(grp->gr_mactype == ill->ill_mactype);
1932 
1933 	/*
1934 	 * Now that we're in the group, request a switch to the group's xop
1935 	 * when we ipsq_exit().  All future operations will be exclusive on
1936 	 * the group xop until ipmp_phyint_leave_grp() is called.
1937 	 */
1938 	ASSERT(ipsq->ipsq_swxop == NULL);
1939 	ASSERT(grp_ipsq->ipsq_xop == &grp_ipsq->ipsq_ownxop);
1940 	ipsq->ipsq_swxop = &grp_ipsq->ipsq_ownxop;
1941 
1942 	rw_exit(&ipst->ips_ipmp_lock);
1943 }
1944 
1945 /*
1946  * Remove phyint `phyi' from its current IPMP group.
1947  */
1948 void
1949 ipmp_phyint_leave_grp(phyint_t *phyi)
1950 {
1951 	uint_t i;
1952 	ipsq_t *ipsq = phyi->phyint_ipsq;
1953 	ip_stack_t *ipst = PHYINT_TO_IPST(phyi);
1954 	uint64_t phyi_kstats[IPMP_KSTAT_MAX];
1955 
1956 	ASSERT(IAM_WRITER_IPSQ(ipsq));
1957 
1958 	/*
1959 	 * If any of the phyint's ills are still in an illgrp, kick 'em out.
1960 	 */
1961 	if (phyi->phyint_illv4 != NULL && IS_UNDER_IPMP(phyi->phyint_illv4))
1962 		ipmp_ill_leave_illgrp(phyi->phyint_illv4);
1963 	if (phyi->phyint_illv6 != NULL && IS_UNDER_IPMP(phyi->phyint_illv6))
1964 		ipmp_ill_leave_illgrp(phyi->phyint_illv6);
1965 
1966 	/*
1967 	 * Send routing socket messages indicating that the phyint's ills
1968 	 * and ipifs have reappeared.
1969 	 */
1970 	if (phyi->phyint_illv4 != NULL)
1971 		ipmp_ill_rtsaddrmsg(phyi->phyint_illv4, RTM_ADD);
1972 	if (phyi->phyint_illv6 != NULL)
1973 		ipmp_ill_rtsaddrmsg(phyi->phyint_illv6, RTM_ADD);
1974 
1975 	/*
1976 	 * Calculate the phyint's cumulative kstats while it was in the group,
1977 	 * and add that to the group's baseline.
1978 	 */
1979 	ipmp_phyint_get_kstats(phyi, phyi_kstats);
1980 	for (i = 0; i < IPMP_KSTAT_MAX; i++) {
1981 		phyi_kstats[i] -= phyi->phyint_kstats0[i];
1982 		atomic_add_64(&phyi->phyint_grp->gr_kstats0[i], phyi_kstats[i]);
1983 	}
1984 
1985 	rw_enter(&ipst->ips_ipmp_lock, RW_WRITER);
1986 
1987 	phyi->phyint_grp->gr_nif--;
1988 	phyi->phyint_grp = NULL;
1989 
1990 	/*
1991 	 * As our final act in leaving the group, request a switch back to our
1992 	 * IPSQ's own xop when we ipsq_exit().
1993 	 */
1994 	ASSERT(ipsq->ipsq_swxop == NULL);
1995 	ipsq->ipsq_swxop = &ipsq->ipsq_ownxop;
1996 
1997 	rw_exit(&ipst->ips_ipmp_lock);
1998 }
1999 
2000 /*
2001  * Store the IPMP-related kstats for `phyi' into the array named by `kstats'.
2002  * Assumes that `kstats' has at least IPMP_KSTAT_MAX elements.
2003  */
2004 static void
2005 ipmp_phyint_get_kstats(phyint_t *phyi, uint64_t kstats[])
2006 {
2007 	uint_t		i, j;
2008 	const char	*name;
2009 	kstat_t		*ksp;
2010 	kstat_named_t	*kn;
2011 	ip_stack_t	*ipst = PHYINT_TO_IPST(phyi);
2012 	zoneid_t	zoneid;
2013 
2014 	bzero(kstats, sizeof (kstats[0]) * IPMP_KSTAT_MAX);
2015 	zoneid = netstackid_to_zoneid(ipst->ips_netstack->netstack_stackid);
2016 	ksp = kstat_hold_byname("link", 0, phyi->phyint_name, zoneid);
2017 	if (ksp == NULL)
2018 		return;
2019 
2020 	KSTAT_ENTER(ksp);
2021 
2022 	if (ksp->ks_data != NULL && ksp->ks_type == KSTAT_TYPE_NAMED) {
2023 		/*
2024 		 * Bring kstats up-to-date before recording.
2025 		 */
2026 		(void) KSTAT_UPDATE(ksp, KSTAT_READ);
2027 
2028 		kn = KSTAT_NAMED_PTR(ksp);
2029 		for (i = 0; i < IPMP_KSTAT_MAX; i++) {
2030 			name = ipmp_kstats[i].name;
2031 			kstats[i] = 0;
2032 			for (j = 0; j < ksp->ks_ndata; j++) {
2033 				if (strcmp(kn[j].name, name) != 0)
2034 					continue;
2035 
2036 				switch (kn[j].data_type) {
2037 				case KSTAT_DATA_INT32:
2038 				case KSTAT_DATA_UINT32:
2039 					kstats[i] = kn[j].value.ui32;
2040 					break;
2041 #ifdef	_LP64
2042 				case KSTAT_DATA_LONG:
2043 				case KSTAT_DATA_ULONG:
2044 					kstats[i] = kn[j].value.ul;
2045 					break;
2046 #endif
2047 				case KSTAT_DATA_INT64:
2048 				case KSTAT_DATA_UINT64:
2049 					kstats[i] = kn[j].value.ui64;
2050 					break;
2051 				}
2052 				break;
2053 			}
2054 		}
2055 	}
2056 
2057 	KSTAT_EXIT(ksp);
2058 	kstat_rele(ksp);
2059 }
2060 
2061 /*
2062  * Refresh the active state of all ills on `phyi'.
2063  */
2064 void
2065 ipmp_phyint_refresh_active(phyint_t *phyi)
2066 {
2067 	if (phyi->phyint_illv4 != NULL)
2068 		ipmp_ill_refresh_active(phyi->phyint_illv4);
2069 	if (phyi->phyint_illv6 != NULL)
2070 		ipmp_ill_refresh_active(phyi->phyint_illv6);
2071 }
2072 
2073 /*
2074  * Return a held pointer to the underlying ill bound to `ipif', or NULL if one
2075  * doesn't exist.  Caller need not be inside the IPSQ.
2076  */
2077 ill_t *
2078 ipmp_ipif_hold_bound_ill(const ipif_t *ipif)
2079 {
2080 	ill_t *boundill;
2081 	ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
2082 
2083 	ASSERT(IS_IPMP(ipif->ipif_ill));
2084 
2085 	rw_enter(&ipst->ips_ipmp_lock, RW_READER);
2086 	boundill = ipif->ipif_bound_ill;
2087 	if (boundill != NULL && ill_check_and_refhold(boundill)) {
2088 		rw_exit(&ipst->ips_ipmp_lock);
2089 		return (boundill);
2090 	}
2091 	rw_exit(&ipst->ips_ipmp_lock);
2092 	return (NULL);
2093 }
2094 
2095 /*
2096  * Return a pointer to the underlying ill bound to `ipif', or NULL if one
2097  * doesn't exist.  Caller must be inside the IPSQ.
2098  */
2099 ill_t *
2100 ipmp_ipif_bound_ill(const ipif_t *ipif)
2101 {
2102 	ASSERT(IAM_WRITER_ILL(ipif->ipif_ill));
2103 	ASSERT(IS_IPMP(ipif->ipif_ill));
2104 
2105 	return (ipif->ipif_bound_ill);
2106 }
2107 
2108 /*
2109  * Check if `ipif' is a "stub" (placeholder address not being used).
2110  */
2111 boolean_t
2112 ipmp_ipif_is_stubaddr(const ipif_t *ipif)
2113 {
2114 	if (ipif->ipif_flags & IPIF_UP)
2115 		return (B_FALSE);
2116 	if (ipif->ipif_ill->ill_isv6)
2117 		return (IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6lcl_addr));
2118 	else
2119 		return (ipif->ipif_lcl_addr == INADDR_ANY);
2120 }
2121 
2122 /*
2123  * Check if `ipif' is an IPMP data address.
2124  */
2125 boolean_t
2126 ipmp_ipif_is_dataaddr(const ipif_t *ipif)
2127 {
2128 	if (ipif->ipif_flags & IPIF_NOFAILOVER)
2129 		return (B_FALSE);
2130 	if (ipif->ipif_ill->ill_isv6)
2131 		return (!IN6_IS_ADDR_UNSPECIFIED(&ipif->ipif_v6lcl_addr));
2132 	else
2133 		return (ipif->ipif_lcl_addr != INADDR_ANY);
2134 }
2135 
2136 /*
2137  * Check if `ipif' is an IPIF_UP IPMP data address.
2138  */
2139 static boolean_t
2140 ipmp_ipif_is_up_dataaddr(const ipif_t *ipif)
2141 {
2142 	return (ipmp_ipif_is_dataaddr(ipif) && (ipif->ipif_flags & IPIF_UP));
2143 }
2144 
2145 /*
2146  * Check if `mp' contains a probe packet by checking if the IP source address
2147  * is a test address on underlying interface `ill'.  Caller need not be inside
2148  * the IPSQ.
2149  */
2150 boolean_t
2151 ipmp_packet_is_probe(mblk_t *mp, ill_t *ill)
2152 {
2153 	ip6_t *ip6h = (ip6_t *)mp->b_rptr;
2154 	ipha_t *ipha = (ipha_t *)mp->b_rptr;
2155 
2156 	ASSERT(DB_TYPE(mp) != M_CTL);
2157 
2158 	if (!IS_UNDER_IPMP(ill))
2159 		return (B_FALSE);
2160 
2161 	if (ill->ill_isv6) {
2162 		if (!IN6_IS_ADDR_UNSPECIFIED(&ip6h->ip6_src) &&
2163 		    ipif_lookup_testaddr_v6(ill, &ip6h->ip6_src, NULL))
2164 			return (B_TRUE);
2165 	} else {
2166 		if (ipha->ipha_src != INADDR_ANY &&
2167 		    ipif_lookup_testaddr_v4(ill, &ipha->ipha_src, NULL))
2168 			return (B_TRUE);
2169 	}
2170 	return (B_FALSE);
2171 }
2172 
2173 /*
2174  * NCEC walker callback: delete `ncec' if it is associated with `ill_arg' and
2175  * is not one of our local addresses.  Caller must be inside the IPSQ.
2176  */
2177 static void
2178 ipmp_ncec_delete_nonlocal(ncec_t *ncec, uchar_t *ill_arg)
2179 {
2180 	if (!NCE_MYADDR(ncec) && ncec->ncec_ill == (ill_t *)ill_arg)
2181 		ncec_delete(ncec);
2182 }
2183 
2184 /*
2185  * Delete any NCEs tied to the illgrp associated with `ncec'.  Caller need not
2186  * be inside the IPSQ.
2187  */
2188 void
2189 ipmp_ncec_delete_nce(ncec_t *ncec)
2190 {
2191 	ipmp_illgrp_t	*illg = ncec->ncec_ill->ill_grp;
2192 	ip_stack_t	*ipst = ncec->ncec_ipst;
2193 	ill_t		*ill;
2194 	nce_t		*nce;
2195 	list_t		dead;
2196 
2197 	ASSERT(IS_IPMP(ncec->ncec_ill));
2198 
2199 	/*
2200 	 * For each underlying interface, delete `ncec' from its ill_nce list
2201 	 * via nce_fastpath_list_delete().  Defer the actual nce_refrele()
2202 	 * until we've dropped ill_g_lock.
2203 	 */
2204 	list_create(&dead, sizeof (nce_t), offsetof(nce_t, nce_node));
2205 
2206 	rw_enter(&ipst->ips_ill_g_lock, RW_READER);
2207 	ill = list_head(&illg->ig_if);
2208 	for (; ill != NULL; ill = list_next(&illg->ig_if, ill))
2209 		nce_fastpath_list_delete(ill, ncec, &dead);
2210 	rw_exit(&ipst->ips_ill_g_lock);
2211 
2212 	while ((nce = list_remove_head(&dead)) != NULL)
2213 		nce_refrele(nce);
2214 
2215 	list_destroy(&dead);
2216 }
2217 
2218 /*
2219  * Refresh any NCE entries tied to the illgrp associated with `ncec' to
2220  * use the information in `ncec'.  Caller need not be inside the IPSQ.
2221  */
2222 void
2223 ipmp_ncec_refresh_nce(ncec_t *ncec)
2224 {
2225 	ipmp_illgrp_t	*illg = ncec->ncec_ill->ill_grp;
2226 	ip_stack_t	*ipst = ncec->ncec_ipst;
2227 	ill_t		*ill;
2228 	nce_t		*nce, *nce_next;
2229 	list_t		replace;
2230 
2231 	ASSERT(IS_IPMP(ncec->ncec_ill));
2232 
2233 	/*
2234 	 * If `ncec' is not reachable, there is no use in refreshing NCEs.
2235 	 */
2236 	if (!NCE_ISREACHABLE(ncec))
2237 		return;
2238 
2239 	/*
2240 	 * Find all the NCEs matching ncec->ncec_addr.  We cannot update them
2241 	 * in-situ because we're holding ipmp_lock to prevent changes to IPMP
2242 	 * group membership and updating indirectly calls nce_fastpath_probe()
2243 	 * -> putnext() which cannot hold locks.  Thus, move the NCEs to a
2244 	 * separate list and process that list after dropping ipmp_lock.
2245 	 */
2246 	list_create(&replace, sizeof (nce_t), offsetof(nce_t, nce_node));
2247 	rw_enter(&ipst->ips_ipmp_lock, RW_READER);
2248 	ill = list_head(&illg->ig_actif);
2249 	for (; ill != NULL; ill = list_next(&illg->ig_actif, ill)) {
2250 		mutex_enter(&ill->ill_lock);
2251 		nce = list_head(&ill->ill_nce);
2252 		for (; nce != NULL; nce = nce_next) {
2253 			nce_next = list_next(&ill->ill_nce, nce);
2254 			if (IN6_ARE_ADDR_EQUAL(&nce->nce_addr,
2255 			    &ncec->ncec_addr)) {
2256 				nce_refhold(nce);
2257 				nce_delete(nce);
2258 				list_insert_tail(&replace, nce);
2259 			}
2260 		}
2261 		mutex_exit(&ill->ill_lock);
2262 	}
2263 	rw_exit(&ipst->ips_ipmp_lock);
2264 
2265 	/*
2266 	 * Process the list; nce_lookup_then_add_v* ensures that nce->nce_ill
2267 	 * is still in the group for ncec->ncec_ill.
2268 	 */
2269 	while ((nce = list_remove_head(&replace)) != NULL) {
2270 		if (ncec->ncec_ill->ill_isv6) {
2271 			(void) nce_lookup_then_add_v6(nce->nce_ill,
2272 			    ncec->ncec_lladdr, ncec->ncec_lladdr_length,
2273 			    &nce->nce_addr, ncec->ncec_flags, ND_UNCHANGED,
2274 			    NULL);
2275 		} else {
2276 			ipaddr_t ipaddr;
2277 
2278 			IN6_V4MAPPED_TO_IPADDR(&ncec->ncec_addr, ipaddr);
2279 			(void) nce_lookup_then_add_v4(nce->nce_ill,
2280 			    ncec->ncec_lladdr, ncec->ncec_lladdr_length,
2281 			    &ipaddr, ncec->ncec_flags, ND_UNCHANGED, NULL);
2282 		}
2283 		nce_refrele(nce);
2284 	}
2285 
2286 	list_destroy(&replace);
2287 }
2288