xref: /titanic_44/usr/src/uts/common/inet/ip/ip_ftable.c (revision 2a9459bdd821c1cf59590a7a9069ac9c591e8a6b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * This file contains consumer routines of the IPv4 forwarding engine
30  */
31 
32 #include <sys/types.h>
33 #include <sys/stream.h>
34 #include <sys/stropts.h>
35 #include <sys/strlog.h>
36 #include <sys/dlpi.h>
37 #include <sys/ddi.h>
38 #include <sys/cmn_err.h>
39 #include <sys/policy.h>
40 
41 #include <sys/systm.h>
42 #include <sys/strsun.h>
43 #include <sys/kmem.h>
44 #include <sys/param.h>
45 #include <sys/socket.h>
46 #include <sys/strsubr.h>
47 #include <sys/pattr.h>
48 #include <net/if.h>
49 #include <net/route.h>
50 #include <netinet/in.h>
51 #include <net/if_dl.h>
52 #include <netinet/ip6.h>
53 #include <netinet/icmp6.h>
54 
55 #include <inet/common.h>
56 #include <inet/mi.h>
57 #include <inet/mib2.h>
58 #include <inet/ip.h>
59 #include <inet/ip_impl.h>
60 #include <inet/ip6.h>
61 #include <inet/ip_ndp.h>
62 #include <inet/arp.h>
63 #include <inet/ip_if.h>
64 #include <inet/ip_ire.h>
65 #include <inet/ip_ftable.h>
66 #include <inet/ip_rts.h>
67 #include <inet/nd.h>
68 
69 #include <net/pfkeyv2.h>
70 #include <inet/ipsec_info.h>
71 #include <inet/sadb.h>
72 #include <sys/kmem.h>
73 #include <inet/tcp.h>
74 #include <inet/ipclassifier.h>
75 #include <sys/zone.h>
76 #include <net/radix.h>
77 #include <sys/tsol/label.h>
78 #include <sys/tsol/tnet.h>
79 
80 #define	IS_DEFAULT_ROUTE(ire)	\
81 	(((ire)->ire_type & IRE_DEFAULT) || \
82 	    (((ire)->ire_type & IRE_INTERFACE) && ((ire)->ire_addr == 0)))
83 
84 /*
85  * structure for passing args between ire_ftable_lookup and ire_find_best_route
86  */
87 typedef struct ire_ftable_args_s {
88 	ipaddr_t	ift_addr;
89 	ipaddr_t	ift_mask;
90 	ipaddr_t	ift_gateway;
91 	int		ift_type;
92 	const ipif_t		*ift_ipif;
93 	zoneid_t	ift_zoneid;
94 	uint32_t	ift_ihandle;
95 	const ts_label_t	*ift_tsl;
96 	int		ift_flags;
97 	ire_t		*ift_best_ire;
98 } ire_ftable_args_t;
99 
100 static ire_t	*route_to_dst(const struct sockaddr *, zoneid_t, ip_stack_t *);
101 static ire_t   	*ire_round_robin(irb_t *, zoneid_t, ire_ftable_args_t *,
102     ip_stack_t *);
103 static void		ire_del_host_redir(ire_t *, char *);
104 static boolean_t	ire_find_best_route(struct radix_node *, void *);
105 static int	ip_send_align_hcksum_flags(mblk_t *, ill_t *);
106 
107 /*
108  * Lookup a route in forwarding table. A specific lookup is indicated by
109  * passing the required parameters and indicating the match required in the
110  * flag field.
111  *
112  * Looking for default route can be done in three ways
113  * 1) pass mask as 0 and set MATCH_IRE_MASK in flags field
114  *    along with other matches.
115  * 2) pass type as IRE_DEFAULT and set MATCH_IRE_TYPE in flags
116  *    field along with other matches.
117  * 3) if the destination and mask are passed as zeros.
118  *
119  * A request to return a default route if no route
120  * is found, can be specified by setting MATCH_IRE_DEFAULT
121  * in flags.
122  *
123  * It does not support recursion more than one level. It
124  * will do recursive lookup only when the lookup maps to
125  * a prefix or default route and MATCH_IRE_RECURSIVE flag is passed.
126  *
127  * If the routing table is setup to allow more than one level
128  * of recursion, the cleaning up cache table will not work resulting
129  * in invalid routing.
130  *
131  * Supports IP_BOUND_IF by following the ipif/ill when recursing.
132  *
133  * NOTE : When this function returns NULL, pire has already been released.
134  *	  pire is valid only when this function successfully returns an
135  *	  ire.
136  */
137 ire_t *
138 ire_ftable_lookup(ipaddr_t addr, ipaddr_t mask, ipaddr_t gateway,
139     int type, const ipif_t *ipif, ire_t **pire, zoneid_t zoneid,
140     uint32_t ihandle, const ts_label_t *tsl, int flags, ip_stack_t *ipst)
141 {
142 	ire_t *ire = NULL;
143 	ipaddr_t gw_addr;
144 	struct rt_sockaddr rdst, rmask;
145 	struct rt_entry *rt;
146 	ire_ftable_args_t margs;
147 	boolean_t found_incomplete = B_FALSE;
148 
149 	ASSERT(ipif == NULL || !ipif->ipif_isv6);
150 
151 	/*
152 	 * When we return NULL from this function, we should make
153 	 * sure that *pire is NULL so that the callers will not
154 	 * wrongly REFRELE the pire.
155 	 */
156 	if (pire != NULL)
157 		*pire = NULL;
158 	/*
159 	 * ire_match_args() will dereference ipif MATCH_IRE_SRC or
160 	 * MATCH_IRE_ILL is set.
161 	 */
162 	if ((flags & (MATCH_IRE_SRC | MATCH_IRE_ILL | MATCH_IRE_ILL_GROUP)) &&
163 	    (ipif == NULL))
164 		return (NULL);
165 
166 	(void) memset(&rdst, 0, sizeof (rdst));
167 	rdst.rt_sin_len = sizeof (rdst);
168 	rdst.rt_sin_family = AF_INET;
169 	rdst.rt_sin_addr.s_addr = addr;
170 
171 	(void) memset(&rmask, 0, sizeof (rmask));
172 	rmask.rt_sin_len = sizeof (rmask);
173 	rmask.rt_sin_family = AF_INET;
174 	rmask.rt_sin_addr.s_addr = mask;
175 
176 	(void) memset(&margs, 0, sizeof (margs));
177 	margs.ift_addr = addr;
178 	margs.ift_mask = mask;
179 	margs.ift_gateway = gateway;
180 	margs.ift_type = type;
181 	margs.ift_ipif = ipif;
182 	margs.ift_zoneid = zoneid;
183 	margs.ift_ihandle = ihandle;
184 	margs.ift_tsl = tsl;
185 	margs.ift_flags = flags;
186 
187 	/*
188 	 * The flags argument passed to ire_ftable_lookup may cause the
189 	 * search to return, not the longest matching prefix, but the
190 	 * "best matching prefix", i.e., the longest prefix that also
191 	 * satisfies constraints imposed via the permutation of flags
192 	 * passed in. To achieve this, we invoke ire_match_args() on
193 	 * each matching leaf in the  radix tree. ire_match_args is
194 	 * invoked by the callback function ire_find_best_route()
195 	 * We hold the global tree lock in read mode when calling
196 	 * rn_match_args.Before dropping the global tree lock, ensure
197 	 * that the radix node can't be deleted by incrementing ire_refcnt.
198 	 */
199 	RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable);
200 	rt = (struct rt_entry *)ipst->ips_ip_ftable->rnh_matchaddr_args(&rdst,
201 	    ipst->ips_ip_ftable, ire_find_best_route, &margs);
202 	ire = margs.ift_best_ire;
203 	RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable);
204 
205 	if (rt == NULL) {
206 		return (NULL);
207 	} else {
208 		ASSERT(ire != NULL);
209 	}
210 
211 	DTRACE_PROBE2(ire__found, ire_ftable_args_t *, &margs, ire_t *, ire);
212 
213 	if (!IS_DEFAULT_ROUTE(ire))
214 		goto found_ire_held;
215 	/*
216 	 * If default route is found, see if default matching criteria
217 	 * are satisfied.
218 	 */
219 	if (flags & MATCH_IRE_MASK) {
220 		/*
221 		 * we were asked to match a 0 mask, and came back with
222 		 * a default route. Ok to return it.
223 		 */
224 		goto found_default_ire;
225 	}
226 	if ((flags & MATCH_IRE_TYPE) &&
227 	    (type & (IRE_DEFAULT | IRE_INTERFACE))) {
228 		/*
229 		 * we were asked to match a default ire type. Ok to return it.
230 		 */
231 		goto found_default_ire;
232 	}
233 	if (flags & MATCH_IRE_DEFAULT) {
234 		goto found_default_ire;
235 	}
236 	/*
237 	 * we found a default route, but default matching criteria
238 	 * are not specified and we are not explicitly looking for
239 	 * default.
240 	 */
241 	IRE_REFRELE(ire);
242 	return (NULL);
243 found_default_ire:
244 	/*
245 	 * round-robin only if we have more than one route in the bucket.
246 	 */
247 	if ((ire->ire_bucket->irb_ire_cnt > 1) &&
248 	    IS_DEFAULT_ROUTE(ire) &&
249 	    ((flags & (MATCH_IRE_DEFAULT | MATCH_IRE_MASK)) ==
250 	    MATCH_IRE_DEFAULT)) {
251 		ire_t *next_ire;
252 
253 		next_ire = ire_round_robin(ire->ire_bucket, zoneid, &margs,
254 		    ipst);
255 		IRE_REFRELE(ire);
256 		if (next_ire != NULL) {
257 			ire = next_ire;
258 		} else {
259 			/* no route */
260 			return (NULL);
261 		}
262 	}
263 found_ire_held:
264 	if ((flags & MATCH_IRE_RJ_BHOLE) &&
265 	    (ire->ire_flags & (RTF_BLACKHOLE | RTF_REJECT))) {
266 		return (ire);
267 	}
268 	/*
269 	 * At this point, IRE that was found must be an IRE_FORWARDTABLE
270 	 * type.  If this is a recursive lookup and an IRE_INTERFACE type was
271 	 * found, return that.  If it was some other IRE_FORWARDTABLE type of
272 	 * IRE (one of the prefix types), then it is necessary to fill in the
273 	 * parent IRE pointed to by pire, and then lookup the gateway address of
274 	 * the parent.  For backwards compatiblity, if this lookup returns an
275 	 * IRE other than a IRE_CACHETABLE or IRE_INTERFACE, then one more level
276 	 * of lookup is done.
277 	 */
278 	if (flags & MATCH_IRE_RECURSIVE) {
279 		ipif_t	*gw_ipif;
280 		int match_flags = MATCH_IRE_DSTONLY;
281 		ire_t *save_ire;
282 
283 		if (ire->ire_type & IRE_INTERFACE)
284 			return (ire);
285 		if (pire != NULL)
286 			*pire = ire;
287 		/*
288 		 * If we can't find an IRE_INTERFACE or the caller has not
289 		 * asked for pire, we need to REFRELE the save_ire.
290 		 */
291 		save_ire = ire;
292 
293 		/*
294 		 * Currently MATCH_IRE_ILL is never used with
295 		 * (MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT) while
296 		 * sending out packets as MATCH_IRE_ILL is used only
297 		 * for communicating with on-link hosts. We can't assert
298 		 * that here as RTM_GET calls this function with
299 		 * MATCH_IRE_ILL | MATCH_IRE_DEFAULT | MATCH_IRE_RECURSIVE.
300 		 * We have already used the MATCH_IRE_ILL in determining
301 		 * the right prefix route at this point. To match the
302 		 * behavior of how we locate routes while sending out
303 		 * packets, we don't want to use MATCH_IRE_ILL below
304 		 * while locating the interface route.
305 		 *
306 		 * ire_ftable_lookup may end up with an incomplete IRE_CACHE
307 		 * entry for the gateway (i.e., one for which the
308 		 * ire_nce->nce_state is not yet ND_REACHABLE). If the caller
309 		 * has specified MATCH_IRE_COMPLETE, such entries will not
310 		 * be returned; instead, we return the IF_RESOLVER ire.
311 		 */
312 		if (ire->ire_ipif != NULL)
313 			match_flags |= MATCH_IRE_ILL_GROUP;
314 
315 		ire = ire_route_lookup(ire->ire_gateway_addr, 0, 0, 0,
316 		    ire->ire_ipif, NULL, zoneid, tsl, match_flags, ipst);
317 		DTRACE_PROBE2(ftable__route__lookup1, (ire_t *), ire,
318 		    (ire_t *), save_ire);
319 		if (ire == NULL ||
320 		    ((ire->ire_type & IRE_CACHE) && ire->ire_nce &&
321 		    ire->ire_nce->nce_state != ND_REACHABLE &&
322 		    (flags & MATCH_IRE_COMPLETE))) {
323 			/*
324 			 * Do not release the parent ire if MATCH_IRE_PARENT
325 			 * is set. Also return it via ire.
326 			 */
327 			if (ire != NULL) {
328 				ire_refrele(ire);
329 				ire = NULL;
330 				found_incomplete = B_TRUE;
331 			}
332 			if (flags & MATCH_IRE_PARENT) {
333 				if (pire != NULL) {
334 					/*
335 					 * Need an extra REFHOLD, if the parent
336 					 * ire is returned via both ire and
337 					 * pire.
338 					 */
339 					IRE_REFHOLD(save_ire);
340 				}
341 				ire = save_ire;
342 			} else {
343 				ire_refrele(save_ire);
344 				if (pire != NULL)
345 					*pire = NULL;
346 			}
347 			if (!found_incomplete)
348 				return (ire);
349 		}
350 		if (ire->ire_type & (IRE_CACHETABLE | IRE_INTERFACE)) {
351 			/*
352 			 * If the caller did not ask for pire, release
353 			 * it now.
354 			 */
355 			if (pire == NULL) {
356 				ire_refrele(save_ire);
357 			}
358 			return (ire);
359 		}
360 		match_flags |= MATCH_IRE_TYPE;
361 		gw_addr = ire->ire_gateway_addr;
362 		gw_ipif = ire->ire_ipif;
363 		ire_refrele(ire);
364 		ire = ire_route_lookup(gw_addr, 0, 0,
365 		    (found_incomplete? IRE_INTERFACE :
366 		    (IRE_CACHETABLE | IRE_INTERFACE)),
367 		    gw_ipif, NULL, zoneid, tsl, match_flags, ipst);
368 		DTRACE_PROBE2(ftable__route__lookup2, (ire_t *), ire,
369 		    (ire_t *), save_ire);
370 		if (ire == NULL ||
371 		    ((ire->ire_type & IRE_CACHE) && ire->ire_nce &&
372 		    ire->ire_nce->nce_state != ND_REACHABLE &&
373 		    (flags & MATCH_IRE_COMPLETE))) {
374 			/*
375 			 * Do not release the parent ire if MATCH_IRE_PARENT
376 			 * is set. Also return it via ire.
377 			 */
378 			if (ire != NULL) {
379 				ire_refrele(ire);
380 				ire = NULL;
381 			}
382 			if (flags & MATCH_IRE_PARENT) {
383 				if (pire != NULL) {
384 					/*
385 					 * Need an extra REFHOLD, if the
386 					 * parent ire is returned via both
387 					 * ire and pire.
388 					 */
389 					IRE_REFHOLD(save_ire);
390 				}
391 				ire = save_ire;
392 			} else {
393 				ire_refrele(save_ire);
394 				if (pire != NULL)
395 					*pire = NULL;
396 			}
397 			return (ire);
398 		} else if (pire == NULL) {
399 			/*
400 			 * If the caller did not ask for pire, release
401 			 * it now.
402 			 */
403 			ire_refrele(save_ire);
404 		}
405 		return (ire);
406 	}
407 	ASSERT(pire == NULL || *pire == NULL);
408 	return (ire);
409 }
410 
411 
412 /*
413  * Find an IRE_OFFSUBNET IRE entry for the multicast address 'group'
414  * that goes through 'ipif'. As a fallback, a route that goes through
415  * ipif->ipif_ill can be returned.
416  */
417 ire_t *
418 ipif_lookup_multi_ire(ipif_t *ipif, ipaddr_t group)
419 {
420 	ire_t	*ire;
421 	ire_t	*save_ire = NULL;
422 	ire_t   *gw_ire;
423 	irb_t   *irb;
424 	ipaddr_t gw_addr;
425 	int	match_flags = MATCH_IRE_TYPE | MATCH_IRE_ILL;
426 	ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
427 
428 	ASSERT(CLASSD(group));
429 
430 	ire = ire_ftable_lookup(group, 0, 0, 0, NULL, NULL, ALL_ZONES, 0,
431 	    NULL, MATCH_IRE_DEFAULT, ipst);
432 
433 	if (ire == NULL)
434 		return (NULL);
435 
436 	irb = ire->ire_bucket;
437 	ASSERT(irb);
438 
439 	IRB_REFHOLD(irb);
440 	ire_refrele(ire);
441 	for (ire = irb->irb_ire; ire != NULL; ire = ire->ire_next) {
442 		if (ire->ire_addr != group ||
443 		    ipif->ipif_zoneid != ire->ire_zoneid &&
444 		    ire->ire_zoneid != ALL_ZONES) {
445 			continue;
446 		}
447 
448 		switch (ire->ire_type) {
449 		case IRE_DEFAULT:
450 		case IRE_PREFIX:
451 		case IRE_HOST:
452 			gw_addr = ire->ire_gateway_addr;
453 			gw_ire = ire_ftable_lookup(gw_addr, 0, 0, IRE_INTERFACE,
454 			    ipif, NULL, ALL_ZONES, 0, NULL, match_flags, ipst);
455 
456 			if (gw_ire != NULL) {
457 				if (save_ire != NULL) {
458 					ire_refrele(save_ire);
459 				}
460 				IRE_REFHOLD(ire);
461 				if (gw_ire->ire_ipif == ipif) {
462 					ire_refrele(gw_ire);
463 
464 					IRB_REFRELE(irb);
465 					return (ire);
466 				}
467 				ire_refrele(gw_ire);
468 				save_ire = ire;
469 			}
470 			break;
471 		case IRE_IF_NORESOLVER:
472 		case IRE_IF_RESOLVER:
473 			if (ire->ire_ipif == ipif) {
474 				if (save_ire != NULL) {
475 					ire_refrele(save_ire);
476 				}
477 				IRE_REFHOLD(ire);
478 
479 				IRB_REFRELE(irb);
480 				return (ire);
481 			}
482 			break;
483 		}
484 	}
485 	IRB_REFRELE(irb);
486 
487 	return (save_ire);
488 }
489 
490 /*
491  * Find an IRE_INTERFACE for the multicast group.
492  * Allows different routes for multicast addresses
493  * in the unicast routing table (akin to 224.0.0.0 but could be more specific)
494  * which point at different interfaces. This is used when IP_MULTICAST_IF
495  * isn't specified (when sending) and when IP_ADD_MEMBERSHIP doesn't
496  * specify the interface to join on.
497  *
498  * Supports IP_BOUND_IF by following the ipif/ill when recursing.
499  */
500 ire_t *
501 ire_lookup_multi(ipaddr_t group, zoneid_t zoneid, ip_stack_t *ipst)
502 {
503 	ire_t	*ire;
504 	ipif_t	*ipif = NULL;
505 	int	match_flags = MATCH_IRE_TYPE;
506 	ipaddr_t gw_addr;
507 
508 	ire = ire_ftable_lookup(group, 0, 0, 0, NULL, NULL, zoneid,
509 	    0, NULL, MATCH_IRE_DEFAULT, ipst);
510 
511 	/* We search a resolvable ire in case of multirouting. */
512 	if ((ire != NULL) && (ire->ire_flags & RTF_MULTIRT)) {
513 		ire_t *cire = NULL;
514 		/*
515 		 * If the route is not resolvable, the looked up ire
516 		 * may be changed here. In that case, ire_multirt_lookup()
517 		 * IRE_REFRELE the original ire and change it.
518 		 */
519 		(void) ire_multirt_lookup(&cire, &ire, MULTIRT_CACHEGW,
520 		    NULL, ipst);
521 		if (cire != NULL)
522 			ire_refrele(cire);
523 	}
524 	if (ire == NULL)
525 		return (NULL);
526 	/*
527 	 * Make sure we follow ire_ipif.
528 	 *
529 	 * We need to determine the interface route through
530 	 * which the gateway will be reached. We don't really
531 	 * care which interface is picked if the interface is
532 	 * part of a group.
533 	 */
534 	if (ire->ire_ipif != NULL) {
535 		ipif = ire->ire_ipif;
536 		match_flags |= MATCH_IRE_ILL_GROUP;
537 	}
538 
539 	switch (ire->ire_type) {
540 	case IRE_DEFAULT:
541 	case IRE_PREFIX:
542 	case IRE_HOST:
543 		gw_addr = ire->ire_gateway_addr;
544 		ire_refrele(ire);
545 		ire = ire_ftable_lookup(gw_addr, 0, 0,
546 		    IRE_INTERFACE, ipif, NULL, zoneid, 0,
547 		    NULL, match_flags, ipst);
548 		return (ire);
549 	case IRE_IF_NORESOLVER:
550 	case IRE_IF_RESOLVER:
551 		return (ire);
552 	default:
553 		ire_refrele(ire);
554 		return (NULL);
555 	}
556 }
557 
558 /*
559  * Delete the passed in ire if the gateway addr matches
560  */
561 void
562 ire_del_host_redir(ire_t *ire, char *gateway)
563 {
564 	if ((ire->ire_flags & RTF_DYNAMIC) &&
565 	    (ire->ire_gateway_addr == *(ipaddr_t *)gateway))
566 		ire_delete(ire);
567 }
568 
569 /*
570  * Search for all HOST REDIRECT routes that are
571  * pointing at the specified gateway and
572  * delete them. This routine is called only
573  * when a default gateway is going away.
574  */
575 void
576 ire_delete_host_redirects(ipaddr_t gateway, ip_stack_t *ipst)
577 {
578 	struct rtfuncarg rtfarg;
579 
580 	(void) memset(&rtfarg, 0, sizeof (rtfarg));
581 	rtfarg.rt_func = ire_del_host_redir;
582 	rtfarg.rt_arg = (void *)&gateway;
583 	(void) ipst->ips_ip_ftable->rnh_walktree_mt(ipst->ips_ip_ftable,
584 	    rtfunc, &rtfarg, irb_refhold_rn, irb_refrele_rn);
585 }
586 
587 struct ihandle_arg {
588 	uint32_t ihandle;
589 	ire_t	 *ire;
590 };
591 
592 static int
593 ire_ihandle_onlink_match(struct radix_node *rn, void *arg)
594 {
595 	struct rt_entry *rt;
596 	irb_t *irb;
597 	ire_t *ire;
598 	struct ihandle_arg *ih = arg;
599 
600 	rt = (struct rt_entry *)rn;
601 	ASSERT(rt != NULL);
602 	irb = &rt->rt_irb;
603 	for (ire = irb->irb_ire; ire != NULL; ire = ire->ire_next) {
604 		if ((ire->ire_type & IRE_INTERFACE) &&
605 		    (ire->ire_ihandle == ih->ihandle)) {
606 			ih->ire = ire;
607 			IRE_REFHOLD(ire);
608 			return (1);
609 		}
610 	}
611 	return (0);
612 }
613 
614 /*
615  * Locate the interface ire that is tied to the cache ire 'cire' via
616  * cire->ire_ihandle.
617  *
618  * We are trying to create the cache ire for an onlink destn. or
619  * gateway in 'cire'. We are called from ire_add_v4() in the IRE_IF_RESOLVER
620  * case, after the ire has come back from ARP.
621  */
622 ire_t *
623 ire_ihandle_lookup_onlink(ire_t *cire)
624 {
625 	ire_t	*ire;
626 	int	match_flags;
627 	struct ihandle_arg ih;
628 	ip_stack_t *ipst;
629 
630 	ASSERT(cire != NULL);
631 	ipst = cire->ire_ipst;
632 
633 	/*
634 	 * We don't need to specify the zoneid to ire_ftable_lookup() below
635 	 * because the ihandle refers to an ipif which can be in only one zone.
636 	 */
637 	match_flags =  MATCH_IRE_TYPE | MATCH_IRE_IHANDLE | MATCH_IRE_MASK;
638 	/*
639 	 * We know that the mask of the interface ire equals cire->ire_cmask.
640 	 * (When ip_newroute() created 'cire' for an on-link destn. it set its
641 	 * cmask from the interface ire's mask)
642 	 */
643 	ire = ire_ftable_lookup(cire->ire_addr, cire->ire_cmask, 0,
644 	    IRE_INTERFACE, NULL, NULL, ALL_ZONES, cire->ire_ihandle,
645 	    NULL, match_flags, ipst);
646 	if (ire != NULL)
647 		return (ire);
648 	/*
649 	 * If we didn't find an interface ire above, we can't declare failure.
650 	 * For backwards compatibility, we need to support prefix routes
651 	 * pointing to next hop gateways that are not on-link.
652 	 *
653 	 * In the resolver/noresolver case, ip_newroute() thinks it is creating
654 	 * the cache ire for an onlink destination in 'cire'. But 'cire' is
655 	 * not actually onlink, because ire_ftable_lookup() cheated it, by
656 	 * doing ire_route_lookup() twice and returning an interface ire.
657 	 *
658 	 * Eg. default	-	gw1			(line 1)
659 	 *	gw1	-	gw2			(line 2)
660 	 *	gw2	-	hme0			(line 3)
661 	 *
662 	 * In the above example, ip_newroute() tried to create the cache ire
663 	 * 'cire' for gw1, based on the interface route in line 3. The
664 	 * ire_ftable_lookup() above fails, because there is no interface route
665 	 * to reach gw1. (it is gw2). We fall thru below.
666 	 *
667 	 * Do a brute force search based on the ihandle in a subset of the
668 	 * forwarding tables, corresponding to cire->ire_cmask. Otherwise
669 	 * things become very complex, since we don't have 'pire' in this
670 	 * case. (Also note that this method is not possible in the offlink
671 	 * case because we don't know the mask)
672 	 */
673 	(void) memset(&ih, 0, sizeof (ih));
674 	ih.ihandle = cire->ire_ihandle;
675 	(void) ipst->ips_ip_ftable->rnh_walktree_mt(ipst->ips_ip_ftable,
676 	    ire_ihandle_onlink_match, &ih, irb_refhold_rn, irb_refrele_rn);
677 	return (ih.ire);
678 }
679 
680 /*
681  * IRE iterator used by ire_ftable_lookup[_v6]() to process multiple default
682  * routes. Given a starting point in the hash list (ire_origin), walk the IREs
683  * in the bucket skipping default interface routes and deleted entries.
684  * Returns the next IRE (unheld), or NULL when we're back to the starting point.
685  * Assumes that the caller holds a reference on the IRE bucket.
686  */
687 ire_t *
688 ire_get_next_default_ire(ire_t *ire, ire_t *ire_origin)
689 {
690 	ASSERT(ire_origin->ire_bucket != NULL);
691 	ASSERT(ire != NULL);
692 
693 	do {
694 		ire = ire->ire_next;
695 		if (ire == NULL)
696 			ire = ire_origin->ire_bucket->irb_ire;
697 		if (ire == ire_origin)
698 			return (NULL);
699 	} while ((ire->ire_type & IRE_INTERFACE) ||
700 	    (ire->ire_marks & IRE_MARK_CONDEMNED));
701 	ASSERT(ire != NULL);
702 	return (ire);
703 }
704 
705 static ipif_t *
706 ire_forward_src_ipif(ipaddr_t dst, ire_t *sire, ire_t *ire, ill_t *dst_ill,
707     int zoneid, ushort_t *marks)
708 {
709 	ipif_t *src_ipif;
710 	ip_stack_t *ipst = dst_ill->ill_ipst;
711 
712 	/*
713 	 * Pick the best source address from dst_ill.
714 	 *
715 	 * 1) If it is part of a multipathing group, we would
716 	 *    like to spread the inbound packets across different
717 	 *    interfaces. ipif_select_source picks a random source
718 	 *    across the different ills in the group.
719 	 *
720 	 * 2) If it is not part of a multipathing group, we try
721 	 *    to pick the source address from the destination
722 	 *    route. Clustering assumes that when we have multiple
723 	 *    prefixes hosted on an interface, the prefix of the
724 	 *    source address matches the prefix of the destination
725 	 *    route. We do this only if the address is not
726 	 *    DEPRECATED.
727 	 *
728 	 * 3) If the conn is in a different zone than the ire, we
729 	 *    need to pick a source address from the right zone.
730 	 *
731 	 * NOTE : If we hit case (1) above, the prefix of the source
732 	 *	  address picked may not match the prefix of the
733 	 *	  destination routes prefix as ipif_select_source
734 	 *	  does not look at "dst" while picking a source
735 	 *	  address.
736 	 *	  If we want the same behavior as (2), we will need
737 	 *	  to change the behavior of ipif_select_source.
738 	 */
739 
740 	if ((sire != NULL) && (sire->ire_flags & RTF_SETSRC)) {
741 		/*
742 		 * The RTF_SETSRC flag is set in the parent ire (sire).
743 		 * Check that the ipif matching the requested source
744 		 * address still exists.
745 		 */
746 		src_ipif = ipif_lookup_addr(sire->ire_src_addr, NULL,
747 		    zoneid, NULL, NULL, NULL, NULL, ipst);
748 		return (src_ipif);
749 	}
750 	*marks |= IRE_MARK_USESRC_CHECK;
751 	if ((dst_ill->ill_group != NULL) ||
752 	    (ire->ire_ipif->ipif_flags & IPIF_DEPRECATED) ||
753 	    (dst_ill->ill_usesrc_ifindex != 0)) {
754 		src_ipif = ipif_select_source(dst_ill, dst, zoneid);
755 		if (src_ipif == NULL)
756 			return (NULL);
757 
758 	} else {
759 		src_ipif = ire->ire_ipif;
760 		ASSERT(src_ipif != NULL);
761 		/* hold src_ipif for uniformity */
762 		ipif_refhold(src_ipif);
763 	}
764 	return (src_ipif);
765 }
766 
767 /*
768  * This function is called by ip_rput_noire() and ip_fast_forward()
769  * to resolve the route of incoming packet that needs to be forwarded.
770  * If the ire of the nexthop is not already in the cachetable, this
771  * routine will insert it to the table, but won't trigger ARP resolution yet.
772  * Thus unlike ip_newroute, this function adds incomplete ires to
773  * the cachetable. ARP resolution for these ires are  delayed until
774  * after all of the packet processing is completed and its ready to
775  * be sent out on the wire, Eventually, the packet transmit routine
776  * ip_xmit_v4() attempts to send a packet  to the driver. If it finds
777  * that there is no link layer information, it will do the arp
778  * resolution and queue the packet in ire->ire_nce->nce_qd_mp and
779  * then send it out once the arp resolution is over
780  * (see ip_xmit_v4()->ire_arpresolve()). This scheme is similar to
781  * the model of BSD/SunOS 4
782  *
783  * In future, the insertion of incomplete ires in the cachetable should
784  * be implemented in hostpath as well, as doing so will greatly reduce
785  * the existing complexity for code paths that depend on the context of
786  * the sender (such as IPsec).
787  *
788  * Thus this scheme of adding incomplete ires in cachetable in forwarding
789  * path can be used as a template for simplifying the hostpath.
790  */
791 
792 ire_t *
793 ire_forward(ipaddr_t dst, boolean_t *check_multirt, ire_t *supplied_ire,
794     ire_t *supplied_sire, const struct ts_label_s *tsl, ip_stack_t *ipst)
795 {
796 	ipaddr_t gw = 0;
797 	ire_t	*ire = NULL;
798 	ire_t   *sire = NULL, *save_ire;
799 	ill_t *dst_ill = NULL;
800 	int error;
801 	zoneid_t zoneid;
802 	ipif_t *src_ipif = NULL;
803 	mblk_t *res_mp;
804 	ushort_t ire_marks = 0;
805 	tsol_gcgrp_t *gcgrp = NULL;
806 	tsol_gcgrp_addr_t ga;
807 
808 	zoneid = GLOBAL_ZONEID;
809 
810 	if (supplied_ire != NULL) {
811 		/* We have arrived here from ipfil_sendpkt */
812 		ire = supplied_ire;
813 		sire = supplied_sire;
814 		goto create_irecache;
815 	}
816 
817 	ire = ire_ftable_lookup(dst, 0, 0, 0, NULL, &sire, zoneid, 0,
818 	    tsl, MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT |
819 	    MATCH_IRE_RJ_BHOLE | MATCH_IRE_PARENT|MATCH_IRE_SECATTR, ipst);
820 
821 	if (ire == NULL) {
822 		ip_rts_change(RTM_MISS, dst, 0, 0, 0, 0, 0, 0, RTA_DST, ipst);
823 		goto icmp_err_ret;
824 	}
825 
826 	/*
827 	 * If we encounter CGTP, we should  have the caller use
828 	 * ip_newroute to resolve multirt instead of this function.
829 	 * CGTP specs explicitly state that it can't be used with routers.
830 	 * This essentially prevents insertion of incomplete RTF_MULTIRT
831 	 * ires in cachetable.
832 	 */
833 	if (ipst->ips_ip_cgtp_filter &&
834 	    ((ire->ire_flags & RTF_MULTIRT) ||
835 	    ((sire != NULL) && (sire->ire_flags & RTF_MULTIRT)))) {
836 		ip3dbg(("ire_forward: packet is to be multirouted- "
837 		    "handing it to ip_newroute\n"));
838 		if (sire != NULL)
839 			ire_refrele(sire);
840 		ire_refrele(ire);
841 		/*
842 		 * Inform caller about encountering of multirt so that
843 		 * ip_newroute() can be called.
844 		 */
845 		*check_multirt = B_TRUE;
846 		return (NULL);
847 	}
848 
849 	*check_multirt = B_FALSE;
850 
851 	/*
852 	 * Verify that the returned IRE does not have either
853 	 * the RTF_REJECT or RTF_BLACKHOLE flags set and that the IRE is
854 	 * either an IRE_CACHE, IRE_IF_NORESOLVER or IRE_IF_RESOLVER.
855 	 */
856 	if ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) ||
857 	    (ire->ire_type & (IRE_CACHE | IRE_INTERFACE)) == 0) {
858 		ip3dbg(("ire 0x%p is not cache/resolver/noresolver\n",
859 		    (void *)ire));
860 		goto icmp_err_ret;
861 	}
862 
863 	/*
864 	 * If we already have a fully resolved IRE CACHE of the
865 	 * nexthop router, just hand over the cache entry
866 	 * and we are done.
867 	 */
868 
869 	if (ire->ire_type & IRE_CACHE) {
870 
871 		/*
872 		 * If we are using this ire cache entry as a
873 		 * gateway to forward packets, chances are we
874 		 * will be using it again. So turn off
875 		 * the temporary flag, thus reducing its
876 		 * chances of getting deleted frequently.
877 		 */
878 		if (ire->ire_marks & IRE_MARK_TEMPORARY) {
879 			irb_t *irb = ire->ire_bucket;
880 			rw_enter(&irb->irb_lock, RW_WRITER);
881 			/*
882 			 * We need to recheck for IRE_MARK_TEMPORARY after
883 			 * acquiring the lock in order to guarantee
884 			 * irb_tmp_ire_cnt
885 			 */
886 			if (ire->ire_marks & IRE_MARK_TEMPORARY) {
887 				ire->ire_marks &= ~IRE_MARK_TEMPORARY;
888 				irb->irb_tmp_ire_cnt--;
889 			}
890 			rw_exit(&irb->irb_lock);
891 		}
892 
893 		if (sire != NULL) {
894 			UPDATE_OB_PKT_COUNT(sire);
895 			sire->ire_last_used_time = lbolt;
896 			ire_refrele(sire);
897 		}
898 		return (ire);
899 	}
900 create_irecache:
901 	/*
902 	 * Increment the ire_ob_pkt_count field for ire if it is an
903 	 * INTERFACE (IF_RESOLVER or IF_NORESOLVER) IRE type, and
904 	 * increment the same for the parent IRE, sire, if it is some
905 	 * sort of prefix IRE (which includes DEFAULT, PREFIX, and HOST).
906 	 */
907 	if ((ire->ire_type & IRE_INTERFACE) != 0) {
908 		UPDATE_OB_PKT_COUNT(ire);
909 		ire->ire_last_used_time = lbolt;
910 	}
911 
912 	/*
913 	 * sire must be either IRE_CACHETABLE OR IRE_INTERFACE type
914 	 */
915 	if (sire != NULL) {
916 		gw = sire->ire_gateway_addr;
917 		ASSERT((sire->ire_type &
918 		    (IRE_CACHETABLE | IRE_INTERFACE)) == 0);
919 		UPDATE_OB_PKT_COUNT(sire);
920 		sire->ire_last_used_time = lbolt;
921 	}
922 
923 	/* Obtain dst_ill */
924 	dst_ill = ip_newroute_get_dst_ill(ire->ire_ipif->ipif_ill);
925 	if (dst_ill == NULL) {
926 		ip2dbg(("ire_forward no dst ill; ire 0x%p\n",
927 		    (void *)ire));
928 		goto icmp_err_ret;
929 	}
930 
931 	ASSERT(src_ipif == NULL);
932 	/* Now obtain the src_ipif */
933 	src_ipif = ire_forward_src_ipif(dst, sire, ire, dst_ill,
934 	    zoneid, &ire_marks);
935 	if (src_ipif == NULL)
936 		goto icmp_err_ret;
937 
938 	switch (ire->ire_type) {
939 	case IRE_IF_NORESOLVER:
940 		/* create ire_cache for ire_addr endpoint */
941 	case IRE_IF_RESOLVER:
942 		/*
943 		 * We have the IRE_IF_RESOLVER of the nexthop gateway
944 		 * and now need to build a IRE_CACHE for it.
945 		 * In this case, we have the following :
946 		 *
947 		 * 1) src_ipif - used for getting a source address.
948 		 *
949 		 * 2) dst_ill - from which we derive ire_stq/ire_rfq. This
950 		 *    means packets using the IRE_CACHE that we will build
951 		 *    here will go out on dst_ill.
952 		 *
953 		 * 3) sire may or may not be NULL. But, the IRE_CACHE that is
954 		 *    to be created will only be tied to the IRE_INTERFACE
955 		 *    that was derived from the ire_ihandle field.
956 		 *
957 		 *    If sire is non-NULL, it means the destination is
958 		 *    off-link and we will first create the IRE_CACHE for the
959 		 *    gateway.
960 		 */
961 		res_mp = dst_ill->ill_resolver_mp;
962 		if (ire->ire_type == IRE_IF_RESOLVER &&
963 		    (!OK_RESOLVER_MP(res_mp))) {
964 			ire_refrele(ire);
965 			ire = NULL;
966 			goto out;
967 		}
968 		/*
969 		 * To be at this point in the code with a non-zero gw
970 		 * means that dst is reachable through a gateway that
971 		 * we have never resolved.  By changing dst to the gw
972 		 * addr we resolve the gateway first.
973 		 */
974 		if (gw != INADDR_ANY) {
975 			/*
976 			 * The source ipif that was determined above was
977 			 * relative to the destination address, not the
978 			 * gateway's. If src_ipif was not taken out of
979 			 * the IRE_IF_RESOLVER entry, we'll need to call
980 			 * ipif_select_source() again.
981 			 */
982 			if (src_ipif != ire->ire_ipif) {
983 				ipif_refrele(src_ipif);
984 				src_ipif = ipif_select_source(dst_ill,
985 				    gw, zoneid);
986 				if (src_ipif == NULL)
987 					goto icmp_err_ret;
988 			}
989 			dst = gw;
990 			gw = INADDR_ANY;
991 		}
992 		/*
993 		 * dst has been set to the address of the nexthop.
994 		 *
995 		 * TSol note: get security attributes of the nexthop;
996 		 * Note that the nexthop may either be a gateway, or the
997 		 * packet destination itself; Detailed explanation of
998 		 * issues involved is  provided in the  IRE_IF_NORESOLVER
999 		 * logic in ip_newroute().
1000 		 */
1001 		ga.ga_af = AF_INET;
1002 		IN6_IPADDR_TO_V4MAPPED(dst, &ga.ga_addr);
1003 		gcgrp = gcgrp_lookup(&ga, B_FALSE);
1004 
1005 		if (ire->ire_type == IRE_IF_NORESOLVER)
1006 			dst = ire->ire_addr; /* ire_cache for tunnel endpoint */
1007 
1008 		save_ire = ire;
1009 		/*
1010 		 * create an incomplete IRE_CACHE.
1011 		 * An areq_mp will be generated in ire_arpresolve() for
1012 		 * RESOLVER interfaces.
1013 		 */
1014 		ire = ire_create(
1015 		    (uchar_t *)&dst,		/* dest address */
1016 		    (uchar_t *)&ip_g_all_ones,	/* mask */
1017 		    (uchar_t *)&src_ipif->ipif_src_addr, /* src addr */
1018 		    (uchar_t *)&gw,		/* gateway address */
1019 		    (save_ire->ire_type == IRE_IF_RESOLVER ?  NULL:
1020 		    &save_ire->ire_max_frag),
1021 		    NULL,
1022 		    dst_ill->ill_rq,		/* recv-from queue */
1023 		    dst_ill->ill_wq,		/* send-to queue */
1024 		    IRE_CACHE,			/* IRE type */
1025 		    src_ipif,
1026 		    ire->ire_mask,		/* Parent mask */
1027 		    0,
1028 		    ire->ire_ihandle,	/* Interface handle */
1029 		    0,
1030 		    &(ire->ire_uinfo),
1031 		    NULL,
1032 		    gcgrp,
1033 		    ipst);
1034 		ip1dbg(("incomplete ire_cache 0x%p\n", (void *)ire));
1035 		if (ire != NULL) {
1036 			gcgrp = NULL; /* reference now held by IRE */
1037 			ire->ire_marks |= ire_marks;
1038 			/* add the incomplete ire: */
1039 			error = ire_add(&ire, NULL, NULL, NULL, B_TRUE);
1040 			if (error == 0 && ire != NULL) {
1041 				ire->ire_max_frag = save_ire->ire_max_frag;
1042 				ip1dbg(("setting max_frag to %d in ire 0x%p\n",
1043 				    ire->ire_max_frag, (void *)ire));
1044 			} else {
1045 				ire_refrele(save_ire);
1046 				goto icmp_err_ret;
1047 			}
1048 		} else {
1049 			if (gcgrp != NULL) {
1050 				GCGRP_REFRELE(gcgrp);
1051 				gcgrp = NULL;
1052 			}
1053 		}
1054 
1055 		ire_refrele(save_ire);
1056 		break;
1057 	default:
1058 		break;
1059 	}
1060 
1061 out:
1062 	if (sire != NULL)
1063 		ire_refrele(sire);
1064 	if (dst_ill != NULL)
1065 		ill_refrele(dst_ill);
1066 	if (src_ipif != NULL)
1067 		ipif_refrele(src_ipif);
1068 	return (ire);
1069 icmp_err_ret:
1070 	if (src_ipif != NULL)
1071 		ipif_refrele(src_ipif);
1072 	if (dst_ill != NULL)
1073 		ill_refrele(dst_ill);
1074 	if (sire != NULL)
1075 		ire_refrele(sire);
1076 	if (ire != NULL) {
1077 		ire_refrele(ire);
1078 	}
1079 	/* caller needs to send icmp error message */
1080 	return (NULL);
1081 
1082 }
1083 
1084 /*
1085  * Obtain the rt_entry and rt_irb for the route to be added to
1086  * the ips_ip_ftable.
1087  * First attempt to add a node to the radix tree via rn_addroute. If the
1088  * route already exists, return the bucket for the existing route.
1089  *
1090  * Locking notes: Need to hold the global radix tree lock in write mode to
1091  * add a radix node. To prevent the node from being deleted, ire_get_bucket()
1092  * returns with a ref'ed irb_t. The ire itself is added in ire_add_v4()
1093  * while holding the irb_lock, but not the radix tree lock.
1094  */
1095 irb_t *
1096 ire_get_bucket(ire_t *ire)
1097 {
1098 	struct radix_node *rn;
1099 	struct rt_entry *rt;
1100 	struct rt_sockaddr rmask, rdst;
1101 	irb_t *irb = NULL;
1102 	ip_stack_t *ipst = ire->ire_ipst;
1103 
1104 	ASSERT(ipst->ips_ip_ftable != NULL);
1105 
1106 	/* first try to see if route exists (based on rtalloc1) */
1107 	(void) memset(&rdst, 0, sizeof (rdst));
1108 	rdst.rt_sin_len = sizeof (rdst);
1109 	rdst.rt_sin_family = AF_INET;
1110 	rdst.rt_sin_addr.s_addr = ire->ire_addr;
1111 
1112 	(void) memset(&rmask, 0, sizeof (rmask));
1113 	rmask.rt_sin_len = sizeof (rmask);
1114 	rmask.rt_sin_family = AF_INET;
1115 	rmask.rt_sin_addr.s_addr = ire->ire_mask;
1116 
1117 	/*
1118 	 * add the route. based on BSD's rtrequest1(RTM_ADD)
1119 	 */
1120 	R_Malloc(rt, rt_entry_cache,  sizeof (*rt));
1121 	/* kmem_alloc failed */
1122 	if (rt == NULL)
1123 		return (NULL);
1124 
1125 	(void) memset(rt, 0, sizeof (*rt));
1126 	rt->rt_nodes->rn_key = (char *)&rt->rt_dst;
1127 	rt->rt_dst = rdst;
1128 	irb = &rt->rt_irb;
1129 	irb->irb_marks |= IRB_MARK_FTABLE; /* dynamically allocated/freed */
1130 	irb->irb_ipst = ipst;
1131 	rw_init(&irb->irb_lock, NULL, RW_DEFAULT, NULL);
1132 	RADIX_NODE_HEAD_WLOCK(ipst->ips_ip_ftable);
1133 	rn = ipst->ips_ip_ftable->rnh_addaddr(&rt->rt_dst, &rmask,
1134 	    ipst->ips_ip_ftable, (struct radix_node *)rt);
1135 	if (rn == NULL) {
1136 		RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable);
1137 		Free(rt, rt_entry_cache);
1138 		rt = NULL;
1139 		irb = NULL;
1140 		RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable);
1141 		rn = ipst->ips_ip_ftable->rnh_lookup(&rdst, &rmask,
1142 		    ipst->ips_ip_ftable);
1143 		if (rn != NULL && ((rn->rn_flags & RNF_ROOT) == 0)) {
1144 			/* found a non-root match */
1145 			rt = (struct rt_entry *)rn;
1146 		}
1147 	}
1148 	if (rt != NULL) {
1149 		irb = &rt->rt_irb;
1150 		IRB_REFHOLD(irb);
1151 	}
1152 	RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable);
1153 	return (irb);
1154 }
1155 
1156 /*
1157  * This function is used when the caller wants to know the outbound
1158  * interface for a packet given only the address.
1159  * If this is a offlink IP address and there are multiple
1160  * routes to this destination, this routine will utilise the
1161  * first route it finds to IP address
1162  * Return values:
1163  * 	0	- FAILURE
1164  *	nonzero	- ifindex
1165  */
1166 uint_t
1167 ifindex_lookup(const struct sockaddr *ipaddr, zoneid_t zoneid)
1168 {
1169 	uint_t ifindex = 0;
1170 	ire_t *ire;
1171 	ill_t *ill;
1172 	netstack_t *ns;
1173 	ip_stack_t *ipst;
1174 
1175 	if (zoneid == ALL_ZONES)
1176 		ns = netstack_find_by_zoneid(GLOBAL_ZONEID);
1177 	else
1178 		ns = netstack_find_by_zoneid(zoneid);
1179 	ASSERT(ns != NULL);
1180 
1181 	/*
1182 	 * For exclusive stacks we set the zoneid to zero
1183 	 * since IP uses the global zoneid in the exclusive stacks.
1184 	 */
1185 	if (ns->netstack_stackid != GLOBAL_NETSTACKID)
1186 		zoneid = GLOBAL_ZONEID;
1187 	ipst = ns->netstack_ip;
1188 
1189 	ASSERT(ipaddr->sa_family == AF_INET || ipaddr->sa_family == AF_INET6);
1190 
1191 	if ((ire =  route_to_dst(ipaddr, zoneid, ipst)) != NULL) {
1192 		ill = ire_to_ill(ire);
1193 		if (ill != NULL)
1194 			ifindex = ill->ill_phyint->phyint_ifindex;
1195 		ire_refrele(ire);
1196 	}
1197 	netstack_rele(ns);
1198 	return (ifindex);
1199 }
1200 
1201 /*
1202  * Routine to find the route to a destination. If a ifindex is supplied
1203  * it tries to match the the route to the corresponding ipif for the ifindex
1204  */
1205 static	ire_t *
1206 route_to_dst(const struct sockaddr *dst_addr, zoneid_t zoneid, ip_stack_t *ipst)
1207 {
1208 	ire_t *ire = NULL;
1209 	int match_flags;
1210 
1211 	match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT |
1212 	    MATCH_IRE_RECURSIVE | MATCH_IRE_RJ_BHOLE);
1213 
1214 	/* XXX pass NULL tsl for now */
1215 
1216 	if (dst_addr->sa_family == AF_INET) {
1217 		ire = ire_route_lookup(
1218 		    ((struct sockaddr_in *)dst_addr)->sin_addr.s_addr,
1219 		    0, 0, 0, NULL, NULL, zoneid, NULL, match_flags, ipst);
1220 	} else {
1221 		ire = ire_route_lookup_v6(
1222 		    &((struct sockaddr_in6 *)dst_addr)->sin6_addr,
1223 		    0, 0, 0, NULL, NULL, zoneid, NULL, match_flags, ipst);
1224 	}
1225 	return (ire);
1226 }
1227 
1228 /*
1229  * This routine is called by IP Filter to send a packet out on the wire
1230  * to a specified V4 dst (which may be onlink or offlink). The ifindex may or
1231  * may not be 0. A non-null ifindex indicates IP Filter has stipulated
1232  * an outgoing interface and requires the nexthop to be on that interface.
1233  * IP WILL NOT DO the following to the data packet before sending it out:
1234  *	a. manipulate ttl
1235  *	b. ipsec work
1236  *	c. fragmentation
1237  *
1238  * If the packet has been prepared for hardware checksum then it will be
1239  * passed off to ip_send_align_cksum() to check that the flags set on the
1240  * packet are in alignment with the capabilities of the new outgoing NIC.
1241  *
1242  * Return values:
1243  *	0:		IP was able to send of the data pkt
1244  *	ECOMM:		Could not send packet
1245  *	ENONET		No route to dst. It is up to the caller
1246  *			to send icmp unreachable error message,
1247  *	EINPROGRESS	The macaddr of the onlink dst or that
1248  *			of the offlink dst's nexthop needs to get
1249  *			resolved before packet can be sent to dst.
1250  *			Thus transmission is not guaranteed.
1251  *
1252  */
1253 
1254 int
1255 ipfil_sendpkt(const struct sockaddr *dst_addr, mblk_t *mp, uint_t ifindex,
1256     zoneid_t zoneid)
1257 {
1258 	ire_t *ire = NULL, *sire = NULL;
1259 	ire_t *ire_cache = NULL;
1260 	boolean_t   check_multirt = B_FALSE;
1261 	int value;
1262 	int match_flags;
1263 	ipaddr_t dst;
1264 	netstack_t *ns;
1265 	ip_stack_t *ipst;
1266 
1267 	ASSERT(mp != NULL);
1268 
1269 	if (zoneid == ALL_ZONES)
1270 		ns = netstack_find_by_zoneid(GLOBAL_ZONEID);
1271 	else
1272 		ns = netstack_find_by_zoneid(zoneid);
1273 	ASSERT(ns != NULL);
1274 
1275 	/*
1276 	 * For exclusive stacks we set the zoneid to zero
1277 	 * since IP uses the global zoneid in the exclusive stacks.
1278 	 */
1279 	if (ns->netstack_stackid != GLOBAL_NETSTACKID)
1280 		zoneid = GLOBAL_ZONEID;
1281 	ipst = ns->netstack_ip;
1282 
1283 	ASSERT(dst_addr->sa_family == AF_INET ||
1284 	    dst_addr->sa_family == AF_INET6);
1285 
1286 	if (dst_addr->sa_family == AF_INET) {
1287 		dst = ((struct sockaddr_in *)dst_addr)->sin_addr.s_addr;
1288 	} else {
1289 		/*
1290 		 * We dont have support for V6 yet. It will be provided
1291 		 * once RFE  6399103  has been delivered.
1292 		 * Until then, for V6 dsts, IP Filter will not call
1293 		 * this function. Instead the netinfo framework provides
1294 		 * its own code path, in ip_inject_impl(), to achieve
1295 		 * what it needs to do, for the time being.
1296 		 */
1297 		ip1dbg(("ipfil_sendpkt: no V6 support \n"));
1298 		value = ECOMM;
1299 		freemsg(mp);
1300 		goto discard;
1301 	}
1302 
1303 	/*
1304 	 * Lets get the ire. We might get the ire cache entry,
1305 	 * or the ire,sire pair needed to create the cache entry.
1306 	 * XXX pass NULL tsl for now.
1307 	 */
1308 
1309 	if (ifindex == 0) {
1310 		/* There is no supplied index. So use the FIB info */
1311 
1312 		match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT |
1313 		    MATCH_IRE_RECURSIVE | MATCH_IRE_RJ_BHOLE);
1314 		ire = ire_route_lookup(dst,
1315 		    0, 0, 0, NULL, &sire, zoneid, MBLK_GETLABEL(mp),
1316 		    match_flags, ipst);
1317 	} else {
1318 		ipif_t *supplied_ipif;
1319 		ill_t *ill;
1320 
1321 		match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT |
1322 		    MATCH_IRE_RECURSIVE| MATCH_IRE_RJ_BHOLE|
1323 		    MATCH_IRE_SECATTR);
1324 
1325 		/*
1326 		 * If supplied ifindex is non-null, the only valid
1327 		 * nexthop is one off of the interface or group corresponding
1328 		 * to the specified ifindex.
1329 		 */
1330 		ill = ill_lookup_on_ifindex(ifindex, B_FALSE,
1331 		    NULL, NULL, NULL, NULL, ipst);
1332 		if (ill != NULL) {
1333 			match_flags |= MATCH_IRE_ILL;
1334 		} else {
1335 			/* Fallback to group names if hook_emulation set */
1336 			if (ipst->ips_ipmp_hook_emulation) {
1337 				ill = ill_group_lookup_on_ifindex(ifindex,
1338 				    B_FALSE, ipst);
1339 			}
1340 			if (ill == NULL) {
1341 				ip1dbg(("ipfil_sendpkt: Could not find"
1342 				    " route to dst\n"));
1343 				value = ECOMM;
1344 				freemsg(mp);
1345 				goto discard;
1346 			}
1347 			match_flags |= MATCH_IRE_ILL_GROUP;
1348 		}
1349 		supplied_ipif = ipif_get_next_ipif(NULL, ill);
1350 
1351 		ire = ire_route_lookup(dst, 0, 0, 0, supplied_ipif,
1352 		    &sire, zoneid, MBLK_GETLABEL(mp), match_flags, ipst);
1353 		ipif_refrele(supplied_ipif);
1354 		ill_refrele(ill);
1355 	}
1356 
1357 	/*
1358 	 * Verify that the returned IRE is non-null and does
1359 	 * not have either the RTF_REJECT or RTF_BLACKHOLE
1360 	 * flags set and that the IRE is  either an IRE_CACHE,
1361 	 * IRE_IF_NORESOLVER or IRE_IF_RESOLVER.
1362 	 */
1363 	if (ire == NULL ||
1364 	    ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) ||
1365 	    (ire->ire_type & (IRE_CACHE | IRE_INTERFACE)) == 0)) {
1366 		/*
1367 		 * Either ire could not be found or we got
1368 		 * an invalid one
1369 		 */
1370 		ip1dbg(("ipfil_sendpkt: Could not find route to dst\n"));
1371 		value = ENONET;
1372 		freemsg(mp);
1373 		goto discard;
1374 	}
1375 
1376 	/* IP Filter and CGTP dont mix. So bail out if CGTP is on */
1377 	if (ipst->ips_ip_cgtp_filter &&
1378 	    ((ire->ire_flags & RTF_MULTIRT) ||
1379 	    ((sire != NULL) && (sire->ire_flags & RTF_MULTIRT)))) {
1380 		ip1dbg(("ipfil_sendpkt: IPFilter does not work with CGTP\n"));
1381 		value = ECOMM;
1382 		freemsg(mp);
1383 		goto discard;
1384 	}
1385 
1386 	ASSERT(ire->ire_type != IRE_CACHE || ire->ire_nce != NULL);
1387 
1388 	/*
1389 	 * If needed, we will create the ire cache entry for the
1390 	 * nexthop, resolve its link-layer address and then send
1391 	 * the packet out without ttl or IPSec processing.
1392 	 */
1393 	switch (ire->ire_type) {
1394 	case IRE_CACHE:
1395 		if (sire != NULL) {
1396 			UPDATE_OB_PKT_COUNT(sire);
1397 			sire->ire_last_used_time = lbolt;
1398 			ire_refrele(sire);
1399 		}
1400 		ire_cache = ire;
1401 		break;
1402 	case IRE_IF_NORESOLVER:
1403 	case IRE_IF_RESOLVER:
1404 		/*
1405 		 * Call ire_forward(). This function
1406 		 * will, create the ire cache entry of the
1407 		 * the nexthop and adds this incomplete ire
1408 		 * to the ire cache table
1409 		 */
1410 		ire_cache = ire_forward(dst, &check_multirt, ire, sire,
1411 		    MBLK_GETLABEL(mp), ipst);
1412 		if (ire_cache == NULL) {
1413 			ip1dbg(("ipfil_sendpkt: failed to create the"
1414 			    " ire cache entry \n"));
1415 			value = ENONET;
1416 			freemsg(mp);
1417 			sire = NULL;
1418 			ire = NULL;
1419 			goto discard;
1420 		}
1421 		break;
1422 	}
1423 
1424 	if (DB_CKSUMFLAGS(mp)) {
1425 		if (ip_send_align_hcksum_flags(mp, ire_to_ill(ire_cache)))
1426 			goto cleanup;
1427 	}
1428 
1429 	/*
1430 	 * Now that we have the ire cache entry of the nexthop, call
1431 	 * ip_xmit_v4() to trigger mac addr resolution
1432 	 * if necessary and send it once ready.
1433 	 */
1434 
1435 	value = ip_xmit_v4(mp, ire_cache, NULL, B_FALSE);
1436 cleanup:
1437 	ire_refrele(ire_cache);
1438 	/*
1439 	 * At this point, the reference for these have already been
1440 	 * released within ire_forward() and/or ip_xmit_v4(). So we set
1441 	 * them to NULL to make sure we dont drop the references
1442 	 * again in case ip_xmit_v4() returns with either SEND_FAILED
1443 	 * or LLHDR_RESLV_FAILED
1444 	 */
1445 	sire = NULL;
1446 	ire = NULL;
1447 
1448 	switch (value) {
1449 	case SEND_FAILED:
1450 		ip1dbg(("ipfil_sendpkt: Send failed\n"));
1451 		value = ECOMM;
1452 		break;
1453 	case LLHDR_RESLV_FAILED:
1454 		ip1dbg(("ipfil_sendpkt: Link-layer resolution"
1455 		    "  failed\n"));
1456 		value = ECOMM;
1457 		break;
1458 	case LOOKUP_IN_PROGRESS:
1459 		netstack_rele(ns);
1460 		return (EINPROGRESS);
1461 	case SEND_PASSED:
1462 		netstack_rele(ns);
1463 		return (0);
1464 	}
1465 discard:
1466 	if (dst_addr->sa_family == AF_INET) {
1467 		BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
1468 	} else {
1469 		BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsOutDiscards);
1470 	}
1471 	if (ire != NULL)
1472 		ire_refrele(ire);
1473 	if (sire != NULL)
1474 		ire_refrele(sire);
1475 	netstack_rele(ns);
1476 	return (value);
1477 }
1478 
1479 
1480 /*
1481  * We don't check for dohwcksum in here because it should be being used
1482  * elsewhere to control what flags are being set on the mblk.  That is,
1483  * if DB_CKSUMFLAGS() is non-zero then we assume dohwcksum to be true
1484  * for this packet.
1485  *
1486  * This function assumes that it is *only* being called for TCP or UDP
1487  * packets and nothing else.
1488  */
1489 static int
1490 ip_send_align_hcksum_flags(mblk_t *mp, ill_t *ill)
1491 {
1492 	int illhckflags;
1493 	int mbhckflags;
1494 	uint16_t *up;
1495 	uint32_t cksum;
1496 	ipha_t *ipha;
1497 	ip6_t *ip6;
1498 	int proto;
1499 	int ipversion;
1500 	int length;
1501 	int start;
1502 	ip6_pkt_t ipp;
1503 
1504 	mbhckflags = DB_CKSUMFLAGS(mp);
1505 	ASSERT(mbhckflags != 0);
1506 	ASSERT(mp->b_datap->db_type == M_DATA);
1507 	/*
1508 	 * Since this function only knows how to manage the hardware checksum
1509 	 * issue, reject and packets that have flags set on the aside from
1510 	 * checksum related attributes as we cannot necessarily safely map
1511 	 * that packet onto the new NIC.  Packets that can be potentially
1512 	 * dropped here include those marked for LSO.
1513 	 */
1514 	if ((mbhckflags &
1515 	    ~(HCK_FULLCKSUM|HCK_PARTIALCKSUM|HCK_IPV4_HDRCKSUM)) != 0) {
1516 		DTRACE_PROBE2(pbr__incapable, (mblk_t *), mp, (ill_t *), ill);
1517 		freemsg(mp);
1518 		return (-1);
1519 	}
1520 
1521 	ipha = (ipha_t *)mp->b_rptr;
1522 
1523 	/*
1524 	 * Find out what the new NIC is capable of, if anything, and
1525 	 * only allow it to be used with M_DATA mblks being sent out.
1526 	 */
1527 	if (ILL_HCKSUM_CAPABLE(ill)) {
1528 		illhckflags = ill->ill_hcksum_capab->ill_hcksum_txflags;
1529 	} else {
1530 		/*
1531 		 * No capabilities, so turn off everything.
1532 		 */
1533 		illhckflags = 0;
1534 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, 0, 0);
1535 		mp->b_datap->db_struioflag &= ~STRUIO_IP;
1536 	}
1537 
1538 	DTRACE_PROBE4(pbr__info__a, (mblk_t *), mp, (ill_t *), ill,
1539 	    uint32_t, illhckflags, uint32_t, mbhckflags);
1540 	/*
1541 	 * This block of code that looks for the position of the TCP/UDP
1542 	 * checksum is early in this function because we need to know
1543 	 * what needs to be blanked out for the hardware checksum case.
1544 	 *
1545 	 * That we're in this function implies that the packet is either
1546 	 * TCP or UDP on Solaris, so checks are made for one protocol and
1547 	 * if that fails, the other is therefore implied.
1548 	 */
1549 	ipversion = IPH_HDR_VERSION(ipha);
1550 
1551 	if (ipversion == IPV4_VERSION) {
1552 		proto = ipha->ipha_protocol;
1553 		if (proto == IPPROTO_TCP) {
1554 			up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH);
1555 		} else {
1556 			up = IPH_UDPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH);
1557 		}
1558 	} else {
1559 		uint8_t lasthdr;
1560 
1561 		/*
1562 		 * Nothing I've seen indicates that IPv6 checksum'ing
1563 		 * precludes the presence of extension headers, so we
1564 		 * can't just look at the next header value in the IPv6
1565 		 * packet header to see if it is TCP/UDP.
1566 		 */
1567 		ip6 = (ip6_t *)ipha;
1568 		(void) memset(&ipp, 0, sizeof (ipp));
1569 		start = ip_find_hdr_v6(mp, ip6, &ipp, &lasthdr);
1570 		proto = lasthdr;
1571 
1572 		if (proto == IPPROTO_TCP) {
1573 			up = IPH_TCPH_CHECKSUMP(ipha, start);
1574 		} else {
1575 			up = IPH_UDPH_CHECKSUMP(ipha, start);
1576 		}
1577 	}
1578 
1579 	/*
1580 	 * The first case here is easiest:
1581 	 * mblk hasn't asked for full checksum, but the card supports it.
1582 	 *
1583 	 * In addition, check for IPv4 header capability.  Note that only
1584 	 * the mblk flag is checked and not ipversion.
1585 	 */
1586 	if ((((illhckflags & HCKSUM_INET_FULL_V4) && (ipversion == 4)) ||
1587 	    (((illhckflags & HCKSUM_INET_FULL_V6) && (ipversion == 6)))) &&
1588 	    ((mbhckflags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)) != 0)) {
1589 		int newflags = HCK_FULLCKSUM;
1590 
1591 		if ((mbhckflags & HCK_IPV4_HDRCKSUM) != 0) {
1592 			if ((illhckflags & HCKSUM_IPHDRCKSUM) != 0) {
1593 				newflags |= HCK_IPV4_HDRCKSUM;
1594 			} else {
1595 				/*
1596 				 * Rather than call a function, just inline
1597 				 * the computation of the basic IPv4 header.
1598 				 */
1599 				cksum = (ipha->ipha_dst >> 16) +
1600 				    (ipha->ipha_dst & 0xFFFF) +
1601 				    (ipha->ipha_src >> 16) +
1602 				    (ipha->ipha_src & 0xFFFF);
1603 				IP_HDR_CKSUM(ipha, cksum,
1604 				    ((uint32_t *)ipha)[0],
1605 				    ((uint16_t *)ipha)[4]);
1606 			}
1607 		}
1608 
1609 		*up = 0;
1610 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0,
1611 		    newflags, 0);
1612 		return (0);
1613 	}
1614 
1615 	DTRACE_PROBE2(pbr__info__b, int, ipversion, int, proto);
1616 
1617 	/*
1618 	 * Start calculating the pseudo checksum over the IP packet header.
1619 	 * Although the final pseudo checksum used by TCP/UDP consists of
1620 	 * more than just the address fields, we can use the result of
1621 	 * adding those together a little bit further down for IPv4.
1622 	 */
1623 	if (ipversion == IPV4_VERSION) {
1624 		cksum = (ipha->ipha_dst >> 16) + (ipha->ipha_dst & 0xFFFF) +
1625 		    (ipha->ipha_src >> 16) + (ipha->ipha_src & 0xFFFF);
1626 		start = IP_SIMPLE_HDR_LENGTH;
1627 		length = ntohs(ipha->ipha_length);
1628 		DTRACE_PROBE3(pbr__info__e, uint32_t, ipha->ipha_src,
1629 		    uint32_t, ipha->ipha_dst, int, cksum);
1630 	} else {
1631 		uint16_t *pseudo;
1632 
1633 		pseudo = (uint16_t *)&ip6->ip6_src;
1634 
1635 		/* calculate pseudo-header checksum */
1636 		cksum = pseudo[0] + pseudo[1] + pseudo[2] + pseudo[3] +
1637 		    pseudo[4] + pseudo[5] + pseudo[6] + pseudo[7] +
1638 		    pseudo[8] + pseudo[9] + pseudo[10] + pseudo[11] +
1639 		    pseudo[12] + pseudo[13] + pseudo[14] + pseudo[15];
1640 
1641 		length = ntohs(ip6->ip6_plen) + sizeof (ip6_t);
1642 	}
1643 
1644 	/* Fold the initial sum */
1645 	cksum = (cksum & 0xffff) + (cksum >> 16);
1646 
1647 	/*
1648 	 * If the packet was asking for an IPv4 header checksum to be
1649 	 * calculated but the interface doesn't support that, fill it in
1650 	 * using our pseudo checksum as a starting point.
1651 	 */
1652 	if (((mbhckflags & HCK_IPV4_HDRCKSUM) != 0) &&
1653 	    ((illhckflags & HCKSUM_IPHDRCKSUM) == 0)) {
1654 		/*
1655 		 * IP_HDR_CKSUM uses the 2rd arg to the macro in a destructive
1656 		 * way so pass in a copy of the checksum calculated thus far.
1657 		 */
1658 		uint32_t ipsum = cksum;
1659 
1660 		DB_CKSUMFLAGS(mp) &= ~HCK_IPV4_HDRCKSUM;
1661 
1662 		IP_HDR_CKSUM(ipha, ipsum, ((uint32_t *)ipha)[0],
1663 		    ((uint16_t *)ipha)[4]);
1664 	}
1665 
1666 	DTRACE_PROBE3(pbr__info__c, int, start, int, length, int, cksum);
1667 
1668 	if (proto == IPPROTO_TCP) {
1669 		cksum += IP_TCP_CSUM_COMP;
1670 	} else {
1671 		cksum += IP_UDP_CSUM_COMP;
1672 	}
1673 	cksum += htons(length - start);
1674 	cksum = (cksum & 0xffff) + (cksum >> 16);
1675 
1676 	/*
1677 	 * For TCP/UDP, we either want to setup the packet for partial
1678 	 * checksum or we want to do it all ourselves because the NIC
1679 	 * offers no support for either partial or full checksum.
1680 	 */
1681 	if ((illhckflags & HCKSUM_INET_PARTIAL) != 0) {
1682 		/*
1683 		 * The only case we care about here is if the mblk was
1684 		 * previously set for full checksum offload.  If it was
1685 		 * marked for partial (and the NIC does partial), then
1686 		 * we have nothing to do.  Similarly if the packet was
1687 		 * not set for partial or full, we do nothing as this
1688 		 * is cheaper than more work to set something up.
1689 		 */
1690 		if ((mbhckflags & HCK_FULLCKSUM) != 0) {
1691 			uint32_t offset;
1692 
1693 			if (proto == IPPROTO_TCP) {
1694 				offset = TCP_CHECKSUM_OFFSET;
1695 			} else {
1696 				offset = UDP_CHECKSUM_OFFSET;
1697 			}
1698 			*up = cksum;
1699 
1700 			DTRACE_PROBE3(pbr__info__f, int, length - start, int,
1701 			    cksum, int, offset);
1702 
1703 			(void) hcksum_assoc(mp, NULL, NULL, start,
1704 			    start + offset, length, 0,
1705 			    DB_CKSUMFLAGS(mp) | HCK_PARTIALCKSUM, 0);
1706 		}
1707 
1708 	} else if (mbhckflags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)) {
1709 		DB_CKSUMFLAGS(mp) &= ~(HCK_PARTIALCKSUM|HCK_FULLCKSUM);
1710 
1711 		*up = 0;
1712 		*up = IP_CSUM(mp, start, cksum);
1713 	}
1714 
1715 	DTRACE_PROBE4(pbr__info__d, (mblk_t *), mp, (ipha_t *), ipha,
1716 	    (uint16_t *), up, int, cksum);
1717 	return (0);
1718 }
1719 
1720 
1721 /* ire_walk routine invoked for ip_ire_report for each IRE. */
1722 void
1723 ire_report_ftable(ire_t *ire, char *m)
1724 {
1725 	char	buf1[16];
1726 	char	buf2[16];
1727 	char	buf3[16];
1728 	char	buf4[16];
1729 	uint_t	fo_pkt_count;
1730 	uint_t	ib_pkt_count;
1731 	int	ref;
1732 	uint_t	print_len, buf_len;
1733 	mblk_t 	*mp = (mblk_t *)m;
1734 
1735 	if (ire->ire_type & IRE_CACHETABLE)
1736 		return;
1737 	buf_len = mp->b_datap->db_lim - mp->b_wptr;
1738 	if (buf_len <= 0)
1739 		return;
1740 
1741 	/* Number of active references of this ire */
1742 	ref = ire->ire_refcnt;
1743 	/* "inbound" to a non local address is a forward */
1744 	ib_pkt_count = ire->ire_ib_pkt_count;
1745 	fo_pkt_count = 0;
1746 	if (!(ire->ire_type & (IRE_LOCAL|IRE_BROADCAST))) {
1747 		fo_pkt_count = ib_pkt_count;
1748 		ib_pkt_count = 0;
1749 	}
1750 	print_len = snprintf((char *)mp->b_wptr, buf_len,
1751 	    MI_COL_PTRFMT_STR MI_COL_PTRFMT_STR MI_COL_PTRFMT_STR "%5d "
1752 	    "%s %s %s %s %05d %05ld %06ld %08d %03d %06d %09d %09d %06d %08d "
1753 	    "%04d %08d %08d %d/%d/%d %s\n",
1754 	    (void *)ire, (void *)ire->ire_rfq, (void *)ire->ire_stq,
1755 	    (int)ire->ire_zoneid,
1756 	    ip_dot_addr(ire->ire_addr, buf1), ip_dot_addr(ire->ire_mask, buf2),
1757 	    ip_dot_addr(ire->ire_src_addr, buf3),
1758 	    ip_dot_addr(ire->ire_gateway_addr, buf4),
1759 	    ire->ire_max_frag, ire->ire_uinfo.iulp_rtt,
1760 	    ire->ire_uinfo.iulp_rtt_sd,
1761 	    ire->ire_uinfo.iulp_ssthresh, ref,
1762 	    ire->ire_uinfo.iulp_rtomax,
1763 	    (ire->ire_uinfo.iulp_tstamp_ok ? 1: 0),
1764 	    (ire->ire_uinfo.iulp_wscale_ok ? 1: 0),
1765 	    (ire->ire_uinfo.iulp_ecn_ok ? 1: 0),
1766 	    (ire->ire_uinfo.iulp_pmtud_ok ? 1: 0),
1767 	    ire->ire_uinfo.iulp_sack,
1768 	    ire->ire_uinfo.iulp_spipe, ire->ire_uinfo.iulp_rpipe,
1769 	    ib_pkt_count, ire->ire_ob_pkt_count, fo_pkt_count,
1770 	    ip_nv_lookup(ire_nv_tbl, (int)ire->ire_type));
1771 	if (print_len < buf_len) {
1772 		mp->b_wptr += print_len;
1773 	} else {
1774 		mp->b_wptr += buf_len;
1775 	}
1776 }
1777 
1778 /*
1779  * callback function provided by ire_ftable_lookup when calling
1780  * rn_match_args(). Invoke ire_match_args on each matching leaf node in
1781  * the radix tree.
1782  */
1783 boolean_t
1784 ire_find_best_route(struct radix_node *rn, void *arg)
1785 {
1786 	struct rt_entry *rt = (struct rt_entry *)rn;
1787 	irb_t *irb_ptr;
1788 	ire_t *ire;
1789 	ire_ftable_args_t *margs = arg;
1790 	ipaddr_t match_mask;
1791 
1792 	ASSERT(rt != NULL);
1793 
1794 	irb_ptr = &rt->rt_irb;
1795 
1796 	if (irb_ptr->irb_ire_cnt == 0)
1797 		return (B_FALSE);
1798 
1799 	rw_enter(&irb_ptr->irb_lock, RW_READER);
1800 	for (ire = irb_ptr->irb_ire; ire != NULL; ire = ire->ire_next) {
1801 		if (ire->ire_marks & IRE_MARK_CONDEMNED)
1802 			continue;
1803 		if (margs->ift_flags & MATCH_IRE_MASK)
1804 			match_mask = margs->ift_mask;
1805 		else
1806 			match_mask = ire->ire_mask;
1807 
1808 		if (ire_match_args(ire, margs->ift_addr, match_mask,
1809 		    margs->ift_gateway, margs->ift_type, margs->ift_ipif,
1810 		    margs->ift_zoneid, margs->ift_ihandle, margs->ift_tsl,
1811 		    margs->ift_flags)) {
1812 			IRE_REFHOLD(ire);
1813 			rw_exit(&irb_ptr->irb_lock);
1814 			margs->ift_best_ire = ire;
1815 			return (B_TRUE);
1816 		}
1817 	}
1818 	rw_exit(&irb_ptr->irb_lock);
1819 	return (B_FALSE);
1820 }
1821 
1822 /*
1823  * ftable irb_t structures are dynamically allocated, and we need to
1824  * check if the irb_t (and associated ftable tree attachment) needs to
1825  * be cleaned up when the irb_refcnt goes to 0. The conditions that need
1826  * be verified are:
1827  * - no other walkers of the irebucket, i.e., quiescent irb_refcnt,
1828  * - no other threads holding references to ire's in the bucket,
1829  *   i.e., irb_nire == 0
1830  * - no active ire's in the bucket, i.e., irb_ire_cnt == 0
1831  * - need to hold the global tree lock and irb_lock in write mode.
1832  */
1833 void
1834 irb_refrele_ftable(irb_t *irb)
1835 {
1836 	for (;;) {
1837 		rw_enter(&irb->irb_lock, RW_WRITER);
1838 		ASSERT(irb->irb_refcnt != 0);
1839 		if (irb->irb_refcnt != 1) {
1840 			/*
1841 			 * Someone has a reference to this radix node
1842 			 * or there is some bucket walker.
1843 			 */
1844 			irb->irb_refcnt--;
1845 			rw_exit(&irb->irb_lock);
1846 			return;
1847 		} else {
1848 			/*
1849 			 * There is no other walker, nor is there any
1850 			 * other thread that holds a direct ref to this
1851 			 * radix node. Do the clean up if needed. Call
1852 			 * to ire_unlink will clear the IRB_MARK_CONDEMNED flag
1853 			 */
1854 			if (irb->irb_marks & IRB_MARK_CONDEMNED)  {
1855 				ire_t *ire_list;
1856 
1857 				ire_list = ire_unlink(irb);
1858 				rw_exit(&irb->irb_lock);
1859 
1860 				if (ire_list != NULL)
1861 					ire_cleanup(ire_list);
1862 				/*
1863 				 * more CONDEMNED entries could have
1864 				 * been added while we dropped the lock,
1865 				 * so we have to re-check.
1866 				 */
1867 				continue;
1868 			}
1869 
1870 			/*
1871 			 * Now check if there are still any ires
1872 			 * associated with this radix node.
1873 			 */
1874 			if (irb->irb_nire != 0) {
1875 				/*
1876 				 * someone is still holding on
1877 				 * to ires in this bucket
1878 				 */
1879 				irb->irb_refcnt--;
1880 				rw_exit(&irb->irb_lock);
1881 				return;
1882 			} else {
1883 				/*
1884 				 * Everything is clear. Zero walkers,
1885 				 * Zero threads with a ref to this
1886 				 * radix node, Zero ires associated with
1887 				 * this radix node. Due to lock order,
1888 				 * check the above conditions again
1889 				 * after grabbing all locks in the right order
1890 				 */
1891 				rw_exit(&irb->irb_lock);
1892 				if (irb_inactive(irb))
1893 					return;
1894 				/*
1895 				 * irb_inactive could not free the irb.
1896 				 * See if there are any walkers, if not
1897 				 * try to clean up again.
1898 				 */
1899 			}
1900 		}
1901 	}
1902 }
1903 
1904 /*
1905  * IRE iterator used by ire_ftable_lookup() to process multiple default
1906  * routes. Given a starting point in the hash list (ire_origin), walk the IREs
1907  * in the bucket skipping default interface routes and deleted entries.
1908  * Returns the next IRE (unheld), or NULL when we're back to the starting point.
1909  * Assumes that the caller holds a reference on the IRE bucket.
1910  *
1911  * In the absence of good IRE_DEFAULT routes, this function will return
1912  * the first IRE_INTERFACE route found (if any).
1913  */
1914 ire_t *
1915 ire_round_robin(irb_t *irb_ptr, zoneid_t zoneid, ire_ftable_args_t *margs,
1916 	ip_stack_t *ipst)
1917 {
1918 	ire_t	*ire_origin;
1919 	ire_t	*ire, *maybe_ire = NULL;
1920 
1921 	rw_enter(&irb_ptr->irb_lock, RW_WRITER);
1922 	ire_origin = irb_ptr->irb_rr_origin;
1923 	if (ire_origin != NULL) {
1924 		ire_origin = ire_origin->ire_next;
1925 		IRE_FIND_NEXT_ORIGIN(ire_origin);
1926 	}
1927 
1928 	if (ire_origin == NULL) {
1929 		/*
1930 		 * first time through routine, or we dropped off the end
1931 		 * of list.
1932 		 */
1933 		ire_origin = irb_ptr->irb_ire;
1934 		IRE_FIND_NEXT_ORIGIN(ire_origin);
1935 	}
1936 	irb_ptr->irb_rr_origin = ire_origin;
1937 	IRB_REFHOLD_LOCKED(irb_ptr);
1938 	rw_exit(&irb_ptr->irb_lock);
1939 
1940 	DTRACE_PROBE2(ire__rr__origin, (irb_t *), irb_ptr,
1941 	    (ire_t *), ire_origin);
1942 
1943 	/*
1944 	 * Round-robin the routers list looking for a route that
1945 	 * matches the passed in parameters.
1946 	 * We start with the ire we found above and we walk the hash
1947 	 * list until we're back where we started. It doesn't matter if
1948 	 * routes are added or deleted by other threads - we know this
1949 	 * ire will stay in the list because we hold a reference on the
1950 	 * ire bucket.
1951 	 */
1952 	ire = ire_origin;
1953 	while (ire != NULL) {
1954 		int match_flags = MATCH_IRE_TYPE | MATCH_IRE_SECATTR;
1955 		ire_t *rire;
1956 
1957 		if (ire->ire_marks & IRE_MARK_CONDEMNED)
1958 			goto next_ire;
1959 
1960 		if (!ire_match_args(ire, margs->ift_addr, (ipaddr_t)0,
1961 		    margs->ift_gateway, margs->ift_type, margs->ift_ipif,
1962 		    margs->ift_zoneid, margs->ift_ihandle, margs->ift_tsl,
1963 		    margs->ift_flags))
1964 			goto next_ire;
1965 
1966 		if (ire->ire_type & IRE_INTERFACE) {
1967 			/*
1968 			 * keep looking to see if there is a non-interface
1969 			 * default ire, but save this one as a last resort.
1970 			 */
1971 			if (maybe_ire == NULL)
1972 				maybe_ire = ire;
1973 			goto next_ire;
1974 		}
1975 
1976 		if (zoneid == ALL_ZONES) {
1977 			IRE_REFHOLD(ire);
1978 			IRB_REFRELE(irb_ptr);
1979 			return (ire);
1980 		}
1981 		/*
1982 		 * When we're in a non-global zone, we're only
1983 		 * interested in routers that are
1984 		 * reachable through ipifs within our zone.
1985 		 */
1986 		if (ire->ire_ipif != NULL) {
1987 			match_flags |= MATCH_IRE_ILL_GROUP;
1988 		}
1989 		rire = ire_route_lookup(ire->ire_gateway_addr, 0, 0,
1990 		    IRE_INTERFACE, ire->ire_ipif, NULL, zoneid, margs->ift_tsl,
1991 		    match_flags, ipst);
1992 		if (rire != NULL) {
1993 			ire_refrele(rire);
1994 			IRE_REFHOLD(ire);
1995 			IRB_REFRELE(irb_ptr);
1996 			return (ire);
1997 		}
1998 next_ire:
1999 		ire = (ire->ire_next ?  ire->ire_next : irb_ptr->irb_ire);
2000 		if (ire == ire_origin)
2001 			break;
2002 	}
2003 	if (maybe_ire != NULL)
2004 		IRE_REFHOLD(maybe_ire);
2005 	IRB_REFRELE(irb_ptr);
2006 	return (maybe_ire);
2007 }
2008 
2009 void
2010 irb_refhold_rn(struct radix_node *rn)
2011 {
2012 	if ((rn->rn_flags & RNF_ROOT) == 0)
2013 		IRB_REFHOLD(&((rt_t *)(rn))->rt_irb);
2014 }
2015 
2016 void
2017 irb_refrele_rn(struct radix_node *rn)
2018 {
2019 	if ((rn->rn_flags & RNF_ROOT) == 0)
2020 		irb_refrele_ftable(&((rt_t *)(rn))->rt_irb);
2021 }
2022