xref: /titanic_51/usr/src/uts/common/inet/ip/ip_ftable.c (revision d74f5eca2277b6a0ce0e8757a5a4b41f4dcbf8ba)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * This file contains consumer routines of the IPv4 forwarding engine
28  */
29 
30 #include <sys/types.h>
31 #include <sys/stream.h>
32 #include <sys/stropts.h>
33 #include <sys/strlog.h>
34 #include <sys/dlpi.h>
35 #include <sys/ddi.h>
36 #include <sys/cmn_err.h>
37 #include <sys/policy.h>
38 
39 #include <sys/systm.h>
40 #include <sys/strsun.h>
41 #include <sys/kmem.h>
42 #include <sys/param.h>
43 #include <sys/socket.h>
44 #include <sys/strsubr.h>
45 #include <sys/pattr.h>
46 #include <net/if.h>
47 #include <net/route.h>
48 #include <netinet/in.h>
49 #include <net/if_dl.h>
50 #include <netinet/ip6.h>
51 #include <netinet/icmp6.h>
52 
53 #include <inet/common.h>
54 #include <inet/mi.h>
55 #include <inet/mib2.h>
56 #include <inet/ip.h>
57 #include <inet/ip_impl.h>
58 #include <inet/ip6.h>
59 #include <inet/ip_ndp.h>
60 #include <inet/arp.h>
61 #include <inet/ip_if.h>
62 #include <inet/ip_ire.h>
63 #include <inet/ip_ftable.h>
64 #include <inet/ip_rts.h>
65 #include <inet/nd.h>
66 
67 #include <net/pfkeyv2.h>
68 #include <inet/ipsec_info.h>
69 #include <inet/sadb.h>
70 #include <sys/kmem.h>
71 #include <inet/tcp.h>
72 #include <inet/ipclassifier.h>
73 #include <sys/zone.h>
74 #include <net/radix.h>
75 #include <sys/tsol/label.h>
76 #include <sys/tsol/tnet.h>
77 
78 #define	IS_DEFAULT_ROUTE(ire)	\
79 	(((ire)->ire_type & IRE_DEFAULT) || \
80 	    (((ire)->ire_type & IRE_INTERFACE) && ((ire)->ire_addr == 0)))
81 
82 /*
83  * structure for passing args between ire_ftable_lookup and ire_find_best_route
84  */
85 typedef struct ire_ftable_args_s {
86 	ipaddr_t	ift_addr;
87 	ipaddr_t	ift_mask;
88 	ipaddr_t	ift_gateway;
89 	int		ift_type;
90 	const ipif_t		*ift_ipif;
91 	zoneid_t	ift_zoneid;
92 	uint32_t	ift_ihandle;
93 	const ts_label_t	*ift_tsl;
94 	int		ift_flags;
95 	ire_t		*ift_best_ire;
96 } ire_ftable_args_t;
97 
98 static ire_t	*route_to_dst(const struct sockaddr *, zoneid_t, ip_stack_t *);
99 static ire_t   	*ire_round_robin(irb_t *, zoneid_t, ire_ftable_args_t *,
100     ip_stack_t *);
101 static void		ire_del_host_redir(ire_t *, char *);
102 static boolean_t	ire_find_best_route(struct radix_node *, void *);
103 static int	ip_send_align_hcksum_flags(mblk_t *, ill_t *);
104 
105 /*
106  * Lookup a route in forwarding table. A specific lookup is indicated by
107  * passing the required parameters and indicating the match required in the
108  * flag field.
109  *
110  * Looking for default route can be done in three ways
111  * 1) pass mask as 0 and set MATCH_IRE_MASK in flags field
112  *    along with other matches.
113  * 2) pass type as IRE_DEFAULT and set MATCH_IRE_TYPE in flags
114  *    field along with other matches.
115  * 3) if the destination and mask are passed as zeros.
116  *
117  * A request to return a default route if no route
118  * is found, can be specified by setting MATCH_IRE_DEFAULT
119  * in flags.
120  *
121  * It does not support recursion more than one level. It
122  * will do recursive lookup only when the lookup maps to
123  * a prefix or default route and MATCH_IRE_RECURSIVE flag is passed.
124  *
125  * If the routing table is setup to allow more than one level
126  * of recursion, the cleaning up cache table will not work resulting
127  * in invalid routing.
128  *
129  * Supports IP_BOUND_IF by following the ipif/ill when recursing.
130  *
131  * NOTE : When this function returns NULL, pire has already been released.
132  *	  pire is valid only when this function successfully returns an
133  *	  ire.
134  */
135 ire_t *
136 ire_ftable_lookup(ipaddr_t addr, ipaddr_t mask, ipaddr_t gateway,
137     int type, const ipif_t *ipif, ire_t **pire, zoneid_t zoneid,
138     uint32_t ihandle, const ts_label_t *tsl, int flags, ip_stack_t *ipst)
139 {
140 	ire_t *ire = NULL;
141 	ipaddr_t gw_addr;
142 	struct rt_sockaddr rdst, rmask;
143 	struct rt_entry *rt;
144 	ire_ftable_args_t margs;
145 	boolean_t found_incomplete = B_FALSE;
146 
147 	ASSERT(ipif == NULL || !ipif->ipif_isv6);
148 
149 	/*
150 	 * When we return NULL from this function, we should make
151 	 * sure that *pire is NULL so that the callers will not
152 	 * wrongly REFRELE the pire.
153 	 */
154 	if (pire != NULL)
155 		*pire = NULL;
156 	/*
157 	 * ire_match_args() will dereference ipif MATCH_IRE_SRC or
158 	 * MATCH_IRE_ILL is set.
159 	 */
160 	if ((flags & (MATCH_IRE_SRC | MATCH_IRE_ILL | MATCH_IRE_ILL_GROUP)) &&
161 	    (ipif == NULL))
162 		return (NULL);
163 
164 	(void) memset(&rdst, 0, sizeof (rdst));
165 	rdst.rt_sin_len = sizeof (rdst);
166 	rdst.rt_sin_family = AF_INET;
167 	rdst.rt_sin_addr.s_addr = addr;
168 
169 	(void) memset(&rmask, 0, sizeof (rmask));
170 	rmask.rt_sin_len = sizeof (rmask);
171 	rmask.rt_sin_family = AF_INET;
172 	rmask.rt_sin_addr.s_addr = mask;
173 
174 	(void) memset(&margs, 0, sizeof (margs));
175 	margs.ift_addr = addr;
176 	margs.ift_mask = mask;
177 	margs.ift_gateway = gateway;
178 	margs.ift_type = type;
179 	margs.ift_ipif = ipif;
180 	margs.ift_zoneid = zoneid;
181 	margs.ift_ihandle = ihandle;
182 	margs.ift_tsl = tsl;
183 	margs.ift_flags = flags;
184 
185 	/*
186 	 * The flags argument passed to ire_ftable_lookup may cause the
187 	 * search to return, not the longest matching prefix, but the
188 	 * "best matching prefix", i.e., the longest prefix that also
189 	 * satisfies constraints imposed via the permutation of flags
190 	 * passed in. To achieve this, we invoke ire_match_args() on
191 	 * each matching leaf in the  radix tree. ire_match_args is
192 	 * invoked by the callback function ire_find_best_route()
193 	 * We hold the global tree lock in read mode when calling
194 	 * rn_match_args.Before dropping the global tree lock, ensure
195 	 * that the radix node can't be deleted by incrementing ire_refcnt.
196 	 */
197 	RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable);
198 	rt = (struct rt_entry *)ipst->ips_ip_ftable->rnh_matchaddr_args(&rdst,
199 	    ipst->ips_ip_ftable, ire_find_best_route, &margs);
200 	ire = margs.ift_best_ire;
201 	RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable);
202 
203 	if (rt == NULL) {
204 		return (NULL);
205 	} else {
206 		ASSERT(ire != NULL);
207 	}
208 
209 	DTRACE_PROBE2(ire__found, ire_ftable_args_t *, &margs, ire_t *, ire);
210 
211 	if (!IS_DEFAULT_ROUTE(ire))
212 		goto found_ire_held;
213 	/*
214 	 * If default route is found, see if default matching criteria
215 	 * are satisfied.
216 	 */
217 	if (flags & MATCH_IRE_MASK) {
218 		/*
219 		 * we were asked to match a 0 mask, and came back with
220 		 * a default route. Ok to return it.
221 		 */
222 		goto found_default_ire;
223 	}
224 	if ((flags & MATCH_IRE_TYPE) &&
225 	    (type & (IRE_DEFAULT | IRE_INTERFACE))) {
226 		/*
227 		 * we were asked to match a default ire type. Ok to return it.
228 		 */
229 		goto found_default_ire;
230 	}
231 	if (flags & MATCH_IRE_DEFAULT) {
232 		goto found_default_ire;
233 	}
234 	/*
235 	 * we found a default route, but default matching criteria
236 	 * are not specified and we are not explicitly looking for
237 	 * default.
238 	 */
239 	IRE_REFRELE(ire);
240 	return (NULL);
241 found_default_ire:
242 	/*
243 	 * round-robin only if we have more than one route in the bucket.
244 	 */
245 	if ((ire->ire_bucket->irb_ire_cnt > 1) &&
246 	    IS_DEFAULT_ROUTE(ire) &&
247 	    ((flags & (MATCH_IRE_DEFAULT | MATCH_IRE_MASK)) ==
248 	    MATCH_IRE_DEFAULT)) {
249 		ire_t *next_ire;
250 
251 		next_ire = ire_round_robin(ire->ire_bucket, zoneid, &margs,
252 		    ipst);
253 		IRE_REFRELE(ire);
254 		if (next_ire != NULL) {
255 			ire = next_ire;
256 		} else {
257 			/* no route */
258 			return (NULL);
259 		}
260 	}
261 found_ire_held:
262 	if ((flags & MATCH_IRE_RJ_BHOLE) &&
263 	    (ire->ire_flags & (RTF_BLACKHOLE | RTF_REJECT))) {
264 		return (ire);
265 	}
266 	/*
267 	 * At this point, IRE that was found must be an IRE_FORWARDTABLE
268 	 * type.  If this is a recursive lookup and an IRE_INTERFACE type was
269 	 * found, return that.  If it was some other IRE_FORWARDTABLE type of
270 	 * IRE (one of the prefix types), then it is necessary to fill in the
271 	 * parent IRE pointed to by pire, and then lookup the gateway address of
272 	 * the parent.  For backwards compatiblity, if this lookup returns an
273 	 * IRE other than a IRE_CACHETABLE or IRE_INTERFACE, then one more level
274 	 * of lookup is done.
275 	 */
276 	if (flags & MATCH_IRE_RECURSIVE) {
277 		ipif_t	*gw_ipif;
278 		int match_flags = MATCH_IRE_DSTONLY;
279 		ire_t *save_ire;
280 
281 		if (ire->ire_type & IRE_INTERFACE)
282 			return (ire);
283 		if (pire != NULL)
284 			*pire = ire;
285 		/*
286 		 * If we can't find an IRE_INTERFACE or the caller has not
287 		 * asked for pire, we need to REFRELE the save_ire.
288 		 */
289 		save_ire = ire;
290 
291 		/*
292 		 * Currently MATCH_IRE_ILL is never used with
293 		 * (MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT) while
294 		 * sending out packets as MATCH_IRE_ILL is used only
295 		 * for communicating with on-link hosts. We can't assert
296 		 * that here as RTM_GET calls this function with
297 		 * MATCH_IRE_ILL | MATCH_IRE_DEFAULT | MATCH_IRE_RECURSIVE.
298 		 * We have already used the MATCH_IRE_ILL in determining
299 		 * the right prefix route at this point. To match the
300 		 * behavior of how we locate routes while sending out
301 		 * packets, we don't want to use MATCH_IRE_ILL below
302 		 * while locating the interface route.
303 		 *
304 		 * ire_ftable_lookup may end up with an incomplete IRE_CACHE
305 		 * entry for the gateway (i.e., one for which the
306 		 * ire_nce->nce_state is not yet ND_REACHABLE). If the caller
307 		 * has specified MATCH_IRE_COMPLETE, such entries will not
308 		 * be returned; instead, we return the IF_RESOLVER ire.
309 		 */
310 		if (ire->ire_ipif != NULL)
311 			match_flags |= MATCH_IRE_ILL_GROUP;
312 
313 		ire = ire_route_lookup(ire->ire_gateway_addr, 0, 0, 0,
314 		    ire->ire_ipif, NULL, zoneid, tsl, match_flags, ipst);
315 		DTRACE_PROBE2(ftable__route__lookup1, (ire_t *), ire,
316 		    (ire_t *), save_ire);
317 		if (ire == NULL ||
318 		    ((ire->ire_type & IRE_CACHE) && ire->ire_nce &&
319 		    ire->ire_nce->nce_state != ND_REACHABLE &&
320 		    (flags & MATCH_IRE_COMPLETE))) {
321 			/*
322 			 * Do not release the parent ire if MATCH_IRE_PARENT
323 			 * is set. Also return it via ire.
324 			 */
325 			if (ire != NULL) {
326 				ire_refrele(ire);
327 				ire = NULL;
328 				found_incomplete = B_TRUE;
329 			}
330 			if (flags & MATCH_IRE_PARENT) {
331 				if (pire != NULL) {
332 					/*
333 					 * Need an extra REFHOLD, if the parent
334 					 * ire is returned via both ire and
335 					 * pire.
336 					 */
337 					IRE_REFHOLD(save_ire);
338 				}
339 				ire = save_ire;
340 			} else {
341 				ire_refrele(save_ire);
342 				if (pire != NULL)
343 					*pire = NULL;
344 			}
345 			if (!found_incomplete)
346 				return (ire);
347 		}
348 		if (ire->ire_type & (IRE_CACHETABLE | IRE_INTERFACE)) {
349 			/*
350 			 * If the caller did not ask for pire, release
351 			 * it now.
352 			 */
353 			if (pire == NULL) {
354 				ire_refrele(save_ire);
355 			}
356 			return (ire);
357 		}
358 		match_flags |= MATCH_IRE_TYPE;
359 		gw_addr = ire->ire_gateway_addr;
360 		gw_ipif = ire->ire_ipif;
361 		ire_refrele(ire);
362 		ire = ire_route_lookup(gw_addr, 0, 0,
363 		    (found_incomplete? IRE_INTERFACE :
364 		    (IRE_CACHETABLE | IRE_INTERFACE)),
365 		    gw_ipif, NULL, zoneid, tsl, match_flags, ipst);
366 		DTRACE_PROBE2(ftable__route__lookup2, (ire_t *), ire,
367 		    (ire_t *), save_ire);
368 		if (ire == NULL ||
369 		    ((ire->ire_type & IRE_CACHE) && ire->ire_nce &&
370 		    ire->ire_nce->nce_state != ND_REACHABLE &&
371 		    (flags & MATCH_IRE_COMPLETE))) {
372 			/*
373 			 * Do not release the parent ire if MATCH_IRE_PARENT
374 			 * is set. Also return it via ire.
375 			 */
376 			if (ire != NULL) {
377 				ire_refrele(ire);
378 				ire = NULL;
379 			}
380 			if (flags & MATCH_IRE_PARENT) {
381 				if (pire != NULL) {
382 					/*
383 					 * Need an extra REFHOLD, if the
384 					 * parent ire is returned via both
385 					 * ire and pire.
386 					 */
387 					IRE_REFHOLD(save_ire);
388 				}
389 				ire = save_ire;
390 			} else {
391 				ire_refrele(save_ire);
392 				if (pire != NULL)
393 					*pire = NULL;
394 			}
395 			return (ire);
396 		} else if (pire == NULL) {
397 			/*
398 			 * If the caller did not ask for pire, release
399 			 * it now.
400 			 */
401 			ire_refrele(save_ire);
402 		}
403 		return (ire);
404 	}
405 	ASSERT(pire == NULL || *pire == NULL);
406 	return (ire);
407 }
408 
409 
410 /*
411  * Find an IRE_OFFSUBNET IRE entry for the multicast address 'group'
412  * that goes through 'ipif'. As a fallback, a route that goes through
413  * ipif->ipif_ill can be returned.
414  */
415 ire_t *
416 ipif_lookup_multi_ire(ipif_t *ipif, ipaddr_t group)
417 {
418 	ire_t	*ire;
419 	ire_t	*save_ire = NULL;
420 	ire_t   *gw_ire;
421 	irb_t   *irb;
422 	ipaddr_t gw_addr;
423 	int	match_flags = MATCH_IRE_TYPE | MATCH_IRE_ILL;
424 	ip_stack_t *ipst = ipif->ipif_ill->ill_ipst;
425 
426 	ASSERT(CLASSD(group));
427 
428 	ire = ire_ftable_lookup(group, 0, 0, 0, NULL, NULL, ALL_ZONES, 0,
429 	    NULL, MATCH_IRE_DEFAULT, ipst);
430 
431 	if (ire == NULL)
432 		return (NULL);
433 
434 	irb = ire->ire_bucket;
435 	ASSERT(irb);
436 
437 	IRB_REFHOLD(irb);
438 	ire_refrele(ire);
439 	for (ire = irb->irb_ire; ire != NULL; ire = ire->ire_next) {
440 		if (ire->ire_addr != group ||
441 		    ipif->ipif_zoneid != ire->ire_zoneid &&
442 		    ire->ire_zoneid != ALL_ZONES) {
443 			continue;
444 		}
445 
446 		switch (ire->ire_type) {
447 		case IRE_DEFAULT:
448 		case IRE_PREFIX:
449 		case IRE_HOST:
450 			gw_addr = ire->ire_gateway_addr;
451 			gw_ire = ire_ftable_lookup(gw_addr, 0, 0, IRE_INTERFACE,
452 			    ipif, NULL, ALL_ZONES, 0, NULL, match_flags, ipst);
453 
454 			if (gw_ire != NULL) {
455 				if (save_ire != NULL) {
456 					ire_refrele(save_ire);
457 				}
458 				IRE_REFHOLD(ire);
459 				if (gw_ire->ire_ipif == ipif) {
460 					ire_refrele(gw_ire);
461 
462 					IRB_REFRELE(irb);
463 					return (ire);
464 				}
465 				ire_refrele(gw_ire);
466 				save_ire = ire;
467 			}
468 			break;
469 		case IRE_IF_NORESOLVER:
470 		case IRE_IF_RESOLVER:
471 			if (ire->ire_ipif == ipif) {
472 				if (save_ire != NULL) {
473 					ire_refrele(save_ire);
474 				}
475 				IRE_REFHOLD(ire);
476 
477 				IRB_REFRELE(irb);
478 				return (ire);
479 			}
480 			break;
481 		}
482 	}
483 	IRB_REFRELE(irb);
484 
485 	return (save_ire);
486 }
487 
488 /*
489  * Find an IRE_INTERFACE for the multicast group.
490  * Allows different routes for multicast addresses
491  * in the unicast routing table (akin to 224.0.0.0 but could be more specific)
492  * which point at different interfaces. This is used when IP_MULTICAST_IF
493  * isn't specified (when sending) and when IP_ADD_MEMBERSHIP doesn't
494  * specify the interface to join on.
495  *
496  * Supports IP_BOUND_IF by following the ipif/ill when recursing.
497  */
498 ire_t *
499 ire_lookup_multi(ipaddr_t group, zoneid_t zoneid, ip_stack_t *ipst)
500 {
501 	ire_t	*ire;
502 	ipif_t	*ipif = NULL;
503 	int	match_flags = MATCH_IRE_TYPE;
504 	ipaddr_t gw_addr;
505 
506 	ire = ire_ftable_lookup(group, 0, 0, 0, NULL, NULL, zoneid,
507 	    0, NULL, MATCH_IRE_DEFAULT, ipst);
508 
509 	/* We search a resolvable ire in case of multirouting. */
510 	if ((ire != NULL) && (ire->ire_flags & RTF_MULTIRT)) {
511 		ire_t *cire = NULL;
512 		/*
513 		 * If the route is not resolvable, the looked up ire
514 		 * may be changed here. In that case, ire_multirt_lookup()
515 		 * IRE_REFRELE the original ire and change it.
516 		 */
517 		(void) ire_multirt_lookup(&cire, &ire, MULTIRT_CACHEGW,
518 		    NULL, ipst);
519 		if (cire != NULL)
520 			ire_refrele(cire);
521 	}
522 	if (ire == NULL)
523 		return (NULL);
524 	/*
525 	 * Make sure we follow ire_ipif.
526 	 *
527 	 * We need to determine the interface route through
528 	 * which the gateway will be reached. We don't really
529 	 * care which interface is picked if the interface is
530 	 * part of a group.
531 	 */
532 	if (ire->ire_ipif != NULL) {
533 		ipif = ire->ire_ipif;
534 		match_flags |= MATCH_IRE_ILL_GROUP;
535 	}
536 
537 	switch (ire->ire_type) {
538 	case IRE_DEFAULT:
539 	case IRE_PREFIX:
540 	case IRE_HOST:
541 		gw_addr = ire->ire_gateway_addr;
542 		ire_refrele(ire);
543 		ire = ire_ftable_lookup(gw_addr, 0, 0,
544 		    IRE_INTERFACE, ipif, NULL, zoneid, 0,
545 		    NULL, match_flags, ipst);
546 		return (ire);
547 	case IRE_IF_NORESOLVER:
548 	case IRE_IF_RESOLVER:
549 		return (ire);
550 	default:
551 		ire_refrele(ire);
552 		return (NULL);
553 	}
554 }
555 
556 /*
557  * Delete the passed in ire if the gateway addr matches
558  */
559 void
560 ire_del_host_redir(ire_t *ire, char *gateway)
561 {
562 	if ((ire->ire_flags & RTF_DYNAMIC) &&
563 	    (ire->ire_gateway_addr == *(ipaddr_t *)gateway))
564 		ire_delete(ire);
565 }
566 
567 /*
568  * Search for all HOST REDIRECT routes that are
569  * pointing at the specified gateway and
570  * delete them. This routine is called only
571  * when a default gateway is going away.
572  */
573 void
574 ire_delete_host_redirects(ipaddr_t gateway, ip_stack_t *ipst)
575 {
576 	struct rtfuncarg rtfarg;
577 
578 	(void) memset(&rtfarg, 0, sizeof (rtfarg));
579 	rtfarg.rt_func = ire_del_host_redir;
580 	rtfarg.rt_arg = (void *)&gateway;
581 	(void) ipst->ips_ip_ftable->rnh_walktree_mt(ipst->ips_ip_ftable,
582 	    rtfunc, &rtfarg, irb_refhold_rn, irb_refrele_rn);
583 }
584 
585 struct ihandle_arg {
586 	uint32_t ihandle;
587 	ire_t	 *ire;
588 };
589 
590 static int
591 ire_ihandle_onlink_match(struct radix_node *rn, void *arg)
592 {
593 	struct rt_entry *rt;
594 	irb_t *irb;
595 	ire_t *ire;
596 	struct ihandle_arg *ih = arg;
597 
598 	rt = (struct rt_entry *)rn;
599 	ASSERT(rt != NULL);
600 	irb = &rt->rt_irb;
601 	for (ire = irb->irb_ire; ire != NULL; ire = ire->ire_next) {
602 		if ((ire->ire_type & IRE_INTERFACE) &&
603 		    (ire->ire_ihandle == ih->ihandle)) {
604 			ih->ire = ire;
605 			IRE_REFHOLD(ire);
606 			return (1);
607 		}
608 	}
609 	return (0);
610 }
611 
612 /*
613  * Locate the interface ire that is tied to the cache ire 'cire' via
614  * cire->ire_ihandle.
615  *
616  * We are trying to create the cache ire for an onlink destn. or
617  * gateway in 'cire'. We are called from ire_add_v4() in the IRE_IF_RESOLVER
618  * case, after the ire has come back from ARP.
619  */
620 ire_t *
621 ire_ihandle_lookup_onlink(ire_t *cire)
622 {
623 	ire_t	*ire;
624 	int	match_flags;
625 	struct ihandle_arg ih;
626 	ip_stack_t *ipst;
627 
628 	ASSERT(cire != NULL);
629 	ipst = cire->ire_ipst;
630 
631 	/*
632 	 * We don't need to specify the zoneid to ire_ftable_lookup() below
633 	 * because the ihandle refers to an ipif which can be in only one zone.
634 	 */
635 	match_flags =  MATCH_IRE_TYPE | MATCH_IRE_IHANDLE | MATCH_IRE_MASK;
636 	/*
637 	 * We know that the mask of the interface ire equals cire->ire_cmask.
638 	 * (When ip_newroute() created 'cire' for an on-link destn. it set its
639 	 * cmask from the interface ire's mask)
640 	 */
641 	ire = ire_ftable_lookup(cire->ire_addr, cire->ire_cmask, 0,
642 	    IRE_INTERFACE, NULL, NULL, ALL_ZONES, cire->ire_ihandle,
643 	    NULL, match_flags, ipst);
644 	if (ire != NULL)
645 		return (ire);
646 	/*
647 	 * If we didn't find an interface ire above, we can't declare failure.
648 	 * For backwards compatibility, we need to support prefix routes
649 	 * pointing to next hop gateways that are not on-link.
650 	 *
651 	 * In the resolver/noresolver case, ip_newroute() thinks it is creating
652 	 * the cache ire for an onlink destination in 'cire'. But 'cire' is
653 	 * not actually onlink, because ire_ftable_lookup() cheated it, by
654 	 * doing ire_route_lookup() twice and returning an interface ire.
655 	 *
656 	 * Eg. default	-	gw1			(line 1)
657 	 *	gw1	-	gw2			(line 2)
658 	 *	gw2	-	hme0			(line 3)
659 	 *
660 	 * In the above example, ip_newroute() tried to create the cache ire
661 	 * 'cire' for gw1, based on the interface route in line 3. The
662 	 * ire_ftable_lookup() above fails, because there is no interface route
663 	 * to reach gw1. (it is gw2). We fall thru below.
664 	 *
665 	 * Do a brute force search based on the ihandle in a subset of the
666 	 * forwarding tables, corresponding to cire->ire_cmask. Otherwise
667 	 * things become very complex, since we don't have 'pire' in this
668 	 * case. (Also note that this method is not possible in the offlink
669 	 * case because we don't know the mask)
670 	 */
671 	(void) memset(&ih, 0, sizeof (ih));
672 	ih.ihandle = cire->ire_ihandle;
673 	(void) ipst->ips_ip_ftable->rnh_walktree_mt(ipst->ips_ip_ftable,
674 	    ire_ihandle_onlink_match, &ih, irb_refhold_rn, irb_refrele_rn);
675 	return (ih.ire);
676 }
677 
678 /*
679  * IRE iterator used by ire_ftable_lookup[_v6]() to process multiple default
680  * routes. Given a starting point in the hash list (ire_origin), walk the IREs
681  * in the bucket skipping default interface routes and deleted entries.
682  * Returns the next IRE (unheld), or NULL when we're back to the starting point.
683  * Assumes that the caller holds a reference on the IRE bucket.
684  */
685 ire_t *
686 ire_get_next_default_ire(ire_t *ire, ire_t *ire_origin)
687 {
688 	ASSERT(ire_origin->ire_bucket != NULL);
689 	ASSERT(ire != NULL);
690 
691 	do {
692 		ire = ire->ire_next;
693 		if (ire == NULL)
694 			ire = ire_origin->ire_bucket->irb_ire;
695 		if (ire == ire_origin)
696 			return (NULL);
697 	} while ((ire->ire_type & IRE_INTERFACE) ||
698 	    (ire->ire_marks & IRE_MARK_CONDEMNED));
699 	ASSERT(ire != NULL);
700 	return (ire);
701 }
702 
703 static ipif_t *
704 ire_forward_src_ipif(ipaddr_t dst, ire_t *sire, ire_t *ire, ill_t *dst_ill,
705     int zoneid, ushort_t *marks)
706 {
707 	ipif_t *src_ipif;
708 	ip_stack_t *ipst = dst_ill->ill_ipst;
709 
710 	/*
711 	 * Pick the best source address from dst_ill.
712 	 *
713 	 * 1) If it is part of a multipathing group, we would
714 	 *    like to spread the inbound packets across different
715 	 *    interfaces. ipif_select_source picks a random source
716 	 *    across the different ills in the group.
717 	 *
718 	 * 2) If it is not part of a multipathing group, we try
719 	 *    to pick the source address from the destination
720 	 *    route. Clustering assumes that when we have multiple
721 	 *    prefixes hosted on an interface, the prefix of the
722 	 *    source address matches the prefix of the destination
723 	 *    route. We do this only if the address is not
724 	 *    DEPRECATED.
725 	 *
726 	 * 3) If the conn is in a different zone than the ire, we
727 	 *    need to pick a source address from the right zone.
728 	 *
729 	 * NOTE : If we hit case (1) above, the prefix of the source
730 	 *	  address picked may not match the prefix of the
731 	 *	  destination routes prefix as ipif_select_source
732 	 *	  does not look at "dst" while picking a source
733 	 *	  address.
734 	 *	  If we want the same behavior as (2), we will need
735 	 *	  to change the behavior of ipif_select_source.
736 	 */
737 
738 	if ((sire != NULL) && (sire->ire_flags & RTF_SETSRC)) {
739 		/*
740 		 * The RTF_SETSRC flag is set in the parent ire (sire).
741 		 * Check that the ipif matching the requested source
742 		 * address still exists.
743 		 */
744 		src_ipif = ipif_lookup_addr(sire->ire_src_addr, NULL,
745 		    zoneid, NULL, NULL, NULL, NULL, ipst);
746 		return (src_ipif);
747 	}
748 	*marks |= IRE_MARK_USESRC_CHECK;
749 	if ((dst_ill->ill_group != NULL) ||
750 	    (ire->ire_ipif->ipif_flags & IPIF_DEPRECATED) ||
751 	    (dst_ill->ill_usesrc_ifindex != 0)) {
752 		src_ipif = ipif_select_source(dst_ill, dst, zoneid);
753 		if (src_ipif == NULL)
754 			return (NULL);
755 
756 	} else {
757 		src_ipif = ire->ire_ipif;
758 		ASSERT(src_ipif != NULL);
759 		/* hold src_ipif for uniformity */
760 		ipif_refhold(src_ipif);
761 	}
762 	return (src_ipif);
763 }
764 
765 /*
766  * This function is called by ip_rput_noire() and ip_fast_forward()
767  * to resolve the route of incoming packet that needs to be forwarded.
768  * If the ire of the nexthop is not already in the cachetable, this
769  * routine will insert it to the table, but won't trigger ARP resolution yet.
770  * Thus unlike ip_newroute, this function adds incomplete ires to
771  * the cachetable. ARP resolution for these ires are  delayed until
772  * after all of the packet processing is completed and its ready to
773  * be sent out on the wire, Eventually, the packet transmit routine
774  * ip_xmit_v4() attempts to send a packet  to the driver. If it finds
775  * that there is no link layer information, it will do the arp
776  * resolution and queue the packet in ire->ire_nce->nce_qd_mp and
777  * then send it out once the arp resolution is over
778  * (see ip_xmit_v4()->ire_arpresolve()). This scheme is similar to
779  * the model of BSD/SunOS 4
780  *
781  * In future, the insertion of incomplete ires in the cachetable should
782  * be implemented in hostpath as well, as doing so will greatly reduce
783  * the existing complexity for code paths that depend on the context of
784  * the sender (such as IPsec).
785  *
786  * Thus this scheme of adding incomplete ires in cachetable in forwarding
787  * path can be used as a template for simplifying the hostpath.
788  */
789 
790 ire_t *
791 ire_forward(ipaddr_t dst, enum ire_forward_action *ret_action,
792     ire_t *supplied_ire, ire_t *supplied_sire, const struct ts_label_s *tsl,
793     ip_stack_t *ipst)
794 {
795 	ipaddr_t gw = 0;
796 	ire_t	*ire = NULL;
797 	ire_t   *sire = NULL, *save_ire;
798 	ill_t *dst_ill = NULL;
799 	int error;
800 	zoneid_t zoneid;
801 	ipif_t *src_ipif = NULL;
802 	mblk_t *res_mp;
803 	ushort_t ire_marks = 0;
804 	tsol_gcgrp_t *gcgrp = NULL;
805 	tsol_gcgrp_addr_t ga;
806 
807 	zoneid = GLOBAL_ZONEID;
808 
809 	if (supplied_ire != NULL) {
810 		/* We have arrived here from ipfil_sendpkt */
811 		ire = supplied_ire;
812 		sire = supplied_sire;
813 		goto create_irecache;
814 	}
815 
816 	ire = ire_ftable_lookup(dst, 0, 0, 0, NULL, &sire, zoneid, 0,
817 	    tsl, MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT |
818 	    MATCH_IRE_RJ_BHOLE | MATCH_IRE_PARENT|MATCH_IRE_SECATTR, ipst);
819 
820 	if (ire == NULL) {
821 		ip_rts_change(RTM_MISS, dst, 0, 0, 0, 0, 0, 0, RTA_DST, ipst);
822 		goto icmp_err_ret;
823 	}
824 
825 	/*
826 	 * If we encounter CGTP, we should  have the caller use
827 	 * ip_newroute to resolve multirt instead of this function.
828 	 * CGTP specs explicitly state that it can't be used with routers.
829 	 * This essentially prevents insertion of incomplete RTF_MULTIRT
830 	 * ires in cachetable.
831 	 */
832 	if (ipst->ips_ip_cgtp_filter &&
833 	    ((ire->ire_flags & RTF_MULTIRT) ||
834 	    ((sire != NULL) && (sire->ire_flags & RTF_MULTIRT)))) {
835 		ip3dbg(("ire_forward: packet is to be multirouted- "
836 		    "handing it to ip_newroute\n"));
837 		if (sire != NULL)
838 			ire_refrele(sire);
839 		ire_refrele(ire);
840 		/*
841 		 * Inform caller about encountering of multirt so that
842 		 * ip_newroute() can be called.
843 		 */
844 		*ret_action = Forward_check_multirt;
845 		return (NULL);
846 	}
847 
848 	/*
849 	 * Verify that the returned IRE does not have either
850 	 * the RTF_REJECT or RTF_BLACKHOLE flags set and that the IRE is
851 	 * either an IRE_CACHE, IRE_IF_NORESOLVER or IRE_IF_RESOLVER.
852 	 */
853 	if ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) ||
854 	    (ire->ire_type & (IRE_CACHE | IRE_INTERFACE)) == 0) {
855 		ip3dbg(("ire 0x%p is not cache/resolver/noresolver\n",
856 		    (void *)ire));
857 		goto icmp_err_ret;
858 	}
859 
860 	/*
861 	 * If we already have a fully resolved IRE CACHE of the
862 	 * nexthop router, just hand over the cache entry
863 	 * and we are done.
864 	 */
865 
866 	if (ire->ire_type & IRE_CACHE) {
867 
868 		/*
869 		 * If we are using this ire cache entry as a
870 		 * gateway to forward packets, chances are we
871 		 * will be using it again. So turn off
872 		 * the temporary flag, thus reducing its
873 		 * chances of getting deleted frequently.
874 		 */
875 		if (ire->ire_marks & IRE_MARK_TEMPORARY) {
876 			irb_t *irb = ire->ire_bucket;
877 			rw_enter(&irb->irb_lock, RW_WRITER);
878 			/*
879 			 * We need to recheck for IRE_MARK_TEMPORARY after
880 			 * acquiring the lock in order to guarantee
881 			 * irb_tmp_ire_cnt
882 			 */
883 			if (ire->ire_marks & IRE_MARK_TEMPORARY) {
884 				ire->ire_marks &= ~IRE_MARK_TEMPORARY;
885 				irb->irb_tmp_ire_cnt--;
886 			}
887 			rw_exit(&irb->irb_lock);
888 		}
889 
890 		if (sire != NULL) {
891 			UPDATE_OB_PKT_COUNT(sire);
892 			sire->ire_last_used_time = lbolt;
893 			ire_refrele(sire);
894 		}
895 		*ret_action = Forward_ok;
896 		return (ire);
897 	}
898 create_irecache:
899 	/*
900 	 * Increment the ire_ob_pkt_count field for ire if it is an
901 	 * INTERFACE (IF_RESOLVER or IF_NORESOLVER) IRE type, and
902 	 * increment the same for the parent IRE, sire, if it is some
903 	 * sort of prefix IRE (which includes DEFAULT, PREFIX, and HOST).
904 	 */
905 	if ((ire->ire_type & IRE_INTERFACE) != 0) {
906 		UPDATE_OB_PKT_COUNT(ire);
907 		ire->ire_last_used_time = lbolt;
908 	}
909 
910 	/*
911 	 * sire must be either IRE_CACHETABLE OR IRE_INTERFACE type
912 	 */
913 	if (sire != NULL) {
914 		gw = sire->ire_gateway_addr;
915 		ASSERT((sire->ire_type &
916 		    (IRE_CACHETABLE | IRE_INTERFACE)) == 0);
917 		UPDATE_OB_PKT_COUNT(sire);
918 		sire->ire_last_used_time = lbolt;
919 	}
920 
921 	/* Obtain dst_ill */
922 	dst_ill = ip_newroute_get_dst_ill(ire->ire_ipif->ipif_ill);
923 	if (dst_ill == NULL) {
924 		ip2dbg(("ire_forward no dst ill; ire 0x%p\n",
925 		    (void *)ire));
926 		goto icmp_err_ret;
927 	}
928 
929 	ASSERT(src_ipif == NULL);
930 	/* Now obtain the src_ipif */
931 	src_ipif = ire_forward_src_ipif(dst, sire, ire, dst_ill,
932 	    zoneid, &ire_marks);
933 	if (src_ipif == NULL)
934 		goto icmp_err_ret;
935 
936 	switch (ire->ire_type) {
937 	case IRE_IF_NORESOLVER:
938 		/* create ire_cache for ire_addr endpoint */
939 		if (dst_ill->ill_phys_addr_length != IP_ADDR_LEN &&
940 		    dst_ill->ill_resolver_mp == NULL) {
941 			ip1dbg(("ire_forward: dst_ill %p "
942 			    "for IRE_IF_NORESOLVER ire %p has "
943 			    "no ill_resolver_mp\n",
944 			    (void *)dst_ill, (void *)ire));
945 			goto icmp_err_ret;
946 		}
947 		/* FALLTHRU */
948 	case IRE_IF_RESOLVER:
949 		/*
950 		 * We have the IRE_IF_RESOLVER of the nexthop gateway
951 		 * and now need to build a IRE_CACHE for it.
952 		 * In this case, we have the following :
953 		 *
954 		 * 1) src_ipif - used for getting a source address.
955 		 *
956 		 * 2) dst_ill - from which we derive ire_stq/ire_rfq. This
957 		 *    means packets using the IRE_CACHE that we will build
958 		 *    here will go out on dst_ill.
959 		 *
960 		 * 3) sire may or may not be NULL. But, the IRE_CACHE that is
961 		 *    to be created will only be tied to the IRE_INTERFACE
962 		 *    that was derived from the ire_ihandle field.
963 		 *
964 		 *    If sire is non-NULL, it means the destination is
965 		 *    off-link and we will first create the IRE_CACHE for the
966 		 *    gateway.
967 		 */
968 		res_mp = dst_ill->ill_resolver_mp;
969 		if (ire->ire_type == IRE_IF_RESOLVER &&
970 		    (!OK_RESOLVER_MP(res_mp))) {
971 			goto icmp_err_ret;
972 		}
973 		/*
974 		 * To be at this point in the code with a non-zero gw
975 		 * means that dst is reachable through a gateway that
976 		 * we have never resolved.  By changing dst to the gw
977 		 * addr we resolve the gateway first.
978 		 */
979 		if (gw != INADDR_ANY) {
980 			/*
981 			 * The source ipif that was determined above was
982 			 * relative to the destination address, not the
983 			 * gateway's. If src_ipif was not taken out of
984 			 * the IRE_IF_RESOLVER entry, we'll need to call
985 			 * ipif_select_source() again.
986 			 */
987 			if (src_ipif != ire->ire_ipif) {
988 				ipif_refrele(src_ipif);
989 				src_ipif = ipif_select_source(dst_ill,
990 				    gw, zoneid);
991 				if (src_ipif == NULL)
992 					goto icmp_err_ret;
993 			}
994 			dst = gw;
995 			gw = INADDR_ANY;
996 		}
997 		/*
998 		 * dst has been set to the address of the nexthop.
999 		 *
1000 		 * TSol note: get security attributes of the nexthop;
1001 		 * Note that the nexthop may either be a gateway, or the
1002 		 * packet destination itself; Detailed explanation of
1003 		 * issues involved is  provided in the  IRE_IF_NORESOLVER
1004 		 * logic in ip_newroute().
1005 		 */
1006 		ga.ga_af = AF_INET;
1007 		IN6_IPADDR_TO_V4MAPPED(dst, &ga.ga_addr);
1008 		gcgrp = gcgrp_lookup(&ga, B_FALSE);
1009 
1010 		if (ire->ire_type == IRE_IF_NORESOLVER)
1011 			dst = ire->ire_addr; /* ire_cache for tunnel endpoint */
1012 
1013 		save_ire = ire;
1014 		/*
1015 		 * create an incomplete IRE_CACHE.
1016 		 * An areq_mp will be generated in ire_arpresolve() for
1017 		 * RESOLVER interfaces.
1018 		 */
1019 		ire = ire_create(
1020 		    (uchar_t *)&dst,		/* dest address */
1021 		    (uchar_t *)&ip_g_all_ones,	/* mask */
1022 		    (uchar_t *)&src_ipif->ipif_src_addr, /* src addr */
1023 		    (uchar_t *)&gw,		/* gateway address */
1024 		    (save_ire->ire_type == IRE_IF_RESOLVER ?  NULL:
1025 		    &save_ire->ire_max_frag),
1026 		    NULL,
1027 		    dst_ill->ill_rq,		/* recv-from queue */
1028 		    dst_ill->ill_wq,		/* send-to queue */
1029 		    IRE_CACHE,			/* IRE type */
1030 		    src_ipif,
1031 		    ire->ire_mask,		/* Parent mask */
1032 		    0,
1033 		    ire->ire_ihandle,	/* Interface handle */
1034 		    0,
1035 		    &(ire->ire_uinfo),
1036 		    NULL,
1037 		    gcgrp,
1038 		    ipst);
1039 		ip1dbg(("incomplete ire_cache 0x%p\n", (void *)ire));
1040 		if (ire != NULL) {
1041 			gcgrp = NULL; /* reference now held by IRE */
1042 			ire->ire_marks |= ire_marks;
1043 			/* add the incomplete ire: */
1044 			error = ire_add(&ire, NULL, NULL, NULL, B_TRUE);
1045 			if (error == 0 && ire != NULL) {
1046 				ire->ire_max_frag = save_ire->ire_max_frag;
1047 				ip1dbg(("setting max_frag to %d in ire 0x%p\n",
1048 				    ire->ire_max_frag, (void *)ire));
1049 			} else {
1050 				ire_refrele(save_ire);
1051 				goto icmp_err_ret;
1052 			}
1053 		} else {
1054 			if (gcgrp != NULL) {
1055 				GCGRP_REFRELE(gcgrp);
1056 				gcgrp = NULL;
1057 			}
1058 		}
1059 
1060 		ire_refrele(save_ire);
1061 		break;
1062 	default:
1063 		break;
1064 	}
1065 
1066 	*ret_action = Forward_ok;
1067 	if (sire != NULL)
1068 		ire_refrele(sire);
1069 	if (dst_ill != NULL)
1070 		ill_refrele(dst_ill);
1071 	if (src_ipif != NULL)
1072 		ipif_refrele(src_ipif);
1073 	return (ire);
1074 icmp_err_ret:
1075 	*ret_action = Forward_ret_icmp_err;
1076 	if (sire != NULL)
1077 		ire_refrele(sire);
1078 	if (dst_ill != NULL)
1079 		ill_refrele(dst_ill);
1080 	if (src_ipif != NULL)
1081 		ipif_refrele(src_ipif);
1082 	if (ire != NULL) {
1083 		if (ire->ire_flags & RTF_BLACKHOLE)
1084 			*ret_action = Forward_blackhole;
1085 		ire_refrele(ire);
1086 	}
1087 	return (NULL);
1088 
1089 }
1090 
1091 /*
1092  * Obtain the rt_entry and rt_irb for the route to be added to
1093  * the ips_ip_ftable.
1094  * First attempt to add a node to the radix tree via rn_addroute. If the
1095  * route already exists, return the bucket for the existing route.
1096  *
1097  * Locking notes: Need to hold the global radix tree lock in write mode to
1098  * add a radix node. To prevent the node from being deleted, ire_get_bucket()
1099  * returns with a ref'ed irb_t. The ire itself is added in ire_add_v4()
1100  * while holding the irb_lock, but not the radix tree lock.
1101  */
1102 irb_t *
1103 ire_get_bucket(ire_t *ire)
1104 {
1105 	struct radix_node *rn;
1106 	struct rt_entry *rt;
1107 	struct rt_sockaddr rmask, rdst;
1108 	irb_t *irb = NULL;
1109 	ip_stack_t *ipst = ire->ire_ipst;
1110 
1111 	ASSERT(ipst->ips_ip_ftable != NULL);
1112 
1113 	/* first try to see if route exists (based on rtalloc1) */
1114 	(void) memset(&rdst, 0, sizeof (rdst));
1115 	rdst.rt_sin_len = sizeof (rdst);
1116 	rdst.rt_sin_family = AF_INET;
1117 	rdst.rt_sin_addr.s_addr = ire->ire_addr;
1118 
1119 	(void) memset(&rmask, 0, sizeof (rmask));
1120 	rmask.rt_sin_len = sizeof (rmask);
1121 	rmask.rt_sin_family = AF_INET;
1122 	rmask.rt_sin_addr.s_addr = ire->ire_mask;
1123 
1124 	/*
1125 	 * add the route. based on BSD's rtrequest1(RTM_ADD)
1126 	 */
1127 	R_Malloc(rt, rt_entry_cache,  sizeof (*rt));
1128 	/* kmem_alloc failed */
1129 	if (rt == NULL)
1130 		return (NULL);
1131 
1132 	(void) memset(rt, 0, sizeof (*rt));
1133 	rt->rt_nodes->rn_key = (char *)&rt->rt_dst;
1134 	rt->rt_dst = rdst;
1135 	irb = &rt->rt_irb;
1136 	irb->irb_marks |= IRB_MARK_FTABLE; /* dynamically allocated/freed */
1137 	irb->irb_ipst = ipst;
1138 	rw_init(&irb->irb_lock, NULL, RW_DEFAULT, NULL);
1139 	RADIX_NODE_HEAD_WLOCK(ipst->ips_ip_ftable);
1140 	rn = ipst->ips_ip_ftable->rnh_addaddr(&rt->rt_dst, &rmask,
1141 	    ipst->ips_ip_ftable, (struct radix_node *)rt);
1142 	if (rn == NULL) {
1143 		RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable);
1144 		Free(rt, rt_entry_cache);
1145 		rt = NULL;
1146 		irb = NULL;
1147 		RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable);
1148 		rn = ipst->ips_ip_ftable->rnh_lookup(&rdst, &rmask,
1149 		    ipst->ips_ip_ftable);
1150 		if (rn != NULL && ((rn->rn_flags & RNF_ROOT) == 0)) {
1151 			/* found a non-root match */
1152 			rt = (struct rt_entry *)rn;
1153 		}
1154 	}
1155 	if (rt != NULL) {
1156 		irb = &rt->rt_irb;
1157 		IRB_REFHOLD(irb);
1158 	}
1159 	RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable);
1160 	return (irb);
1161 }
1162 
1163 /*
1164  * This function is used when the caller wants to know the outbound
1165  * interface for a packet given only the address.
1166  * If this is a offlink IP address and there are multiple
1167  * routes to this destination, this routine will utilise the
1168  * first route it finds to IP address
1169  * Return values:
1170  * 	0	- FAILURE
1171  *	nonzero	- ifindex
1172  */
1173 uint_t
1174 ifindex_lookup(const struct sockaddr *ipaddr, zoneid_t zoneid)
1175 {
1176 	uint_t ifindex = 0;
1177 	ire_t *ire;
1178 	ill_t *ill;
1179 	netstack_t *ns;
1180 	ip_stack_t *ipst;
1181 
1182 	if (zoneid == ALL_ZONES)
1183 		ns = netstack_find_by_zoneid(GLOBAL_ZONEID);
1184 	else
1185 		ns = netstack_find_by_zoneid(zoneid);
1186 	ASSERT(ns != NULL);
1187 
1188 	/*
1189 	 * For exclusive stacks we set the zoneid to zero
1190 	 * since IP uses the global zoneid in the exclusive stacks.
1191 	 */
1192 	if (ns->netstack_stackid != GLOBAL_NETSTACKID)
1193 		zoneid = GLOBAL_ZONEID;
1194 	ipst = ns->netstack_ip;
1195 
1196 	ASSERT(ipaddr->sa_family == AF_INET || ipaddr->sa_family == AF_INET6);
1197 
1198 	if ((ire =  route_to_dst(ipaddr, zoneid, ipst)) != NULL) {
1199 		ill = ire_to_ill(ire);
1200 		if (ill != NULL)
1201 			ifindex = ill->ill_phyint->phyint_ifindex;
1202 		ire_refrele(ire);
1203 	}
1204 	netstack_rele(ns);
1205 	return (ifindex);
1206 }
1207 
1208 /*
1209  * Routine to find the route to a destination. If a ifindex is supplied
1210  * it tries to match the the route to the corresponding ipif for the ifindex
1211  */
1212 static	ire_t *
1213 route_to_dst(const struct sockaddr *dst_addr, zoneid_t zoneid, ip_stack_t *ipst)
1214 {
1215 	ire_t *ire = NULL;
1216 	int match_flags;
1217 
1218 	match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT |
1219 	    MATCH_IRE_RECURSIVE | MATCH_IRE_RJ_BHOLE);
1220 
1221 	/* XXX pass NULL tsl for now */
1222 
1223 	if (dst_addr->sa_family == AF_INET) {
1224 		ire = ire_route_lookup(
1225 		    ((struct sockaddr_in *)dst_addr)->sin_addr.s_addr,
1226 		    0, 0, 0, NULL, NULL, zoneid, NULL, match_flags, ipst);
1227 	} else {
1228 		ire = ire_route_lookup_v6(
1229 		    &((struct sockaddr_in6 *)dst_addr)->sin6_addr,
1230 		    0, 0, 0, NULL, NULL, zoneid, NULL, match_flags, ipst);
1231 	}
1232 	return (ire);
1233 }
1234 
1235 /*
1236  * This routine is called by IP Filter to send a packet out on the wire
1237  * to a specified V4 dst (which may be onlink or offlink). The ifindex may or
1238  * may not be 0. A non-null ifindex indicates IP Filter has stipulated
1239  * an outgoing interface and requires the nexthop to be on that interface.
1240  * IP WILL NOT DO the following to the data packet before sending it out:
1241  *	a. manipulate ttl
1242  *	b. ipsec work
1243  *	c. fragmentation
1244  *
1245  * If the packet has been prepared for hardware checksum then it will be
1246  * passed off to ip_send_align_cksum() to check that the flags set on the
1247  * packet are in alignment with the capabilities of the new outgoing NIC.
1248  *
1249  * Return values:
1250  *	0:		IP was able to send of the data pkt
1251  *	ECOMM:		Could not send packet
1252  *	ENONET		No route to dst. It is up to the caller
1253  *			to send icmp unreachable error message,
1254  *	EINPROGRESS	The macaddr of the onlink dst or that
1255  *			of the offlink dst's nexthop needs to get
1256  *			resolved before packet can be sent to dst.
1257  *			Thus transmission is not guaranteed.
1258  *
1259  */
1260 
1261 int
1262 ipfil_sendpkt(const struct sockaddr *dst_addr, mblk_t *mp, uint_t ifindex,
1263     zoneid_t zoneid)
1264 {
1265 	ire_t *ire = NULL, *sire = NULL;
1266 	ire_t *ire_cache = NULL;
1267 	int value;
1268 	int match_flags;
1269 	ipaddr_t dst;
1270 	netstack_t *ns;
1271 	ip_stack_t *ipst;
1272 	enum ire_forward_action ret_action;
1273 
1274 	ASSERT(mp != NULL);
1275 
1276 	if (zoneid == ALL_ZONES)
1277 		ns = netstack_find_by_zoneid(GLOBAL_ZONEID);
1278 	else
1279 		ns = netstack_find_by_zoneid(zoneid);
1280 	ASSERT(ns != NULL);
1281 
1282 	/*
1283 	 * For exclusive stacks we set the zoneid to zero
1284 	 * since IP uses the global zoneid in the exclusive stacks.
1285 	 */
1286 	if (ns->netstack_stackid != GLOBAL_NETSTACKID)
1287 		zoneid = GLOBAL_ZONEID;
1288 	ipst = ns->netstack_ip;
1289 
1290 	ASSERT(dst_addr->sa_family == AF_INET ||
1291 	    dst_addr->sa_family == AF_INET6);
1292 
1293 	if (dst_addr->sa_family == AF_INET) {
1294 		dst = ((struct sockaddr_in *)dst_addr)->sin_addr.s_addr;
1295 	} else {
1296 		/*
1297 		 * We dont have support for V6 yet. It will be provided
1298 		 * once RFE  6399103  has been delivered.
1299 		 * Until then, for V6 dsts, IP Filter will not call
1300 		 * this function. Instead the netinfo framework provides
1301 		 * its own code path, in ip_inject_impl(), to achieve
1302 		 * what it needs to do, for the time being.
1303 		 */
1304 		ip1dbg(("ipfil_sendpkt: no V6 support \n"));
1305 		value = ECOMM;
1306 		freemsg(mp);
1307 		goto discard;
1308 	}
1309 
1310 	/*
1311 	 * Lets get the ire. We might get the ire cache entry,
1312 	 * or the ire,sire pair needed to create the cache entry.
1313 	 * XXX pass NULL tsl for now.
1314 	 */
1315 
1316 	if (ifindex == 0) {
1317 		/* There is no supplied index. So use the FIB info */
1318 
1319 		match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT |
1320 		    MATCH_IRE_RECURSIVE | MATCH_IRE_RJ_BHOLE);
1321 		ire = ire_route_lookup(dst,
1322 		    0, 0, 0, NULL, &sire, zoneid, MBLK_GETLABEL(mp),
1323 		    match_flags, ipst);
1324 	} else {
1325 		ipif_t *supplied_ipif;
1326 		ill_t *ill;
1327 
1328 		match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT |
1329 		    MATCH_IRE_RECURSIVE| MATCH_IRE_RJ_BHOLE|
1330 		    MATCH_IRE_SECATTR);
1331 
1332 		/*
1333 		 * If supplied ifindex is non-null, the only valid
1334 		 * nexthop is one off of the interface or group corresponding
1335 		 * to the specified ifindex.
1336 		 */
1337 		ill = ill_lookup_on_ifindex(ifindex, B_FALSE,
1338 		    NULL, NULL, NULL, NULL, ipst);
1339 		if (ill != NULL) {
1340 			match_flags |= MATCH_IRE_ILL;
1341 		} else {
1342 			/* Fallback to group names if hook_emulation set */
1343 			if (ipst->ips_ipmp_hook_emulation) {
1344 				ill = ill_group_lookup_on_ifindex(ifindex,
1345 				    B_FALSE, ipst);
1346 			}
1347 			if (ill == NULL) {
1348 				ip1dbg(("ipfil_sendpkt: Could not find"
1349 				    " route to dst\n"));
1350 				value = ECOMM;
1351 				freemsg(mp);
1352 				goto discard;
1353 			}
1354 			match_flags |= MATCH_IRE_ILL_GROUP;
1355 		}
1356 		supplied_ipif = ipif_get_next_ipif(NULL, ill);
1357 
1358 		ire = ire_route_lookup(dst, 0, 0, 0, supplied_ipif,
1359 		    &sire, zoneid, MBLK_GETLABEL(mp), match_flags, ipst);
1360 		ipif_refrele(supplied_ipif);
1361 		ill_refrele(ill);
1362 	}
1363 
1364 	/*
1365 	 * Verify that the returned IRE is non-null and does
1366 	 * not have either the RTF_REJECT or RTF_BLACKHOLE
1367 	 * flags set and that the IRE is  either an IRE_CACHE,
1368 	 * IRE_IF_NORESOLVER or IRE_IF_RESOLVER.
1369 	 */
1370 	if (ire == NULL ||
1371 	    ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) ||
1372 	    (ire->ire_type & (IRE_CACHE | IRE_INTERFACE)) == 0)) {
1373 		/*
1374 		 * Either ire could not be found or we got
1375 		 * an invalid one
1376 		 */
1377 		ip1dbg(("ipfil_sendpkt: Could not find route to dst\n"));
1378 		value = ENONET;
1379 		freemsg(mp);
1380 		goto discard;
1381 	}
1382 
1383 	/* IP Filter and CGTP dont mix. So bail out if CGTP is on */
1384 	if (ipst->ips_ip_cgtp_filter &&
1385 	    ((ire->ire_flags & RTF_MULTIRT) ||
1386 	    ((sire != NULL) && (sire->ire_flags & RTF_MULTIRT)))) {
1387 		ip1dbg(("ipfil_sendpkt: IPFilter does not work with CGTP\n"));
1388 		value = ECOMM;
1389 		freemsg(mp);
1390 		goto discard;
1391 	}
1392 
1393 	ASSERT(ire->ire_type != IRE_CACHE || ire->ire_nce != NULL);
1394 
1395 	/*
1396 	 * If needed, we will create the ire cache entry for the
1397 	 * nexthop, resolve its link-layer address and then send
1398 	 * the packet out without ttl or IPSec processing.
1399 	 */
1400 	switch (ire->ire_type) {
1401 	case IRE_CACHE:
1402 		if (sire != NULL) {
1403 			UPDATE_OB_PKT_COUNT(sire);
1404 			sire->ire_last_used_time = lbolt;
1405 			ire_refrele(sire);
1406 		}
1407 		ire_cache = ire;
1408 		break;
1409 	case IRE_IF_NORESOLVER:
1410 	case IRE_IF_RESOLVER:
1411 		/*
1412 		 * Call ire_forward(). This function
1413 		 * will, create the ire cache entry of the
1414 		 * the nexthop and adds this incomplete ire
1415 		 * to the ire cache table
1416 		 */
1417 		ire_cache = ire_forward(dst, &ret_action, ire, sire,
1418 		    MBLK_GETLABEL(mp), ipst);
1419 		if (ire_cache == NULL) {
1420 			ip1dbg(("ipfil_sendpkt: failed to create the"
1421 			    " ire cache entry \n"));
1422 			value = ENONET;
1423 			freemsg(mp);
1424 			sire = NULL;
1425 			ire = NULL;
1426 			goto discard;
1427 		}
1428 		break;
1429 	}
1430 
1431 	if (DB_CKSUMFLAGS(mp)) {
1432 		if (ip_send_align_hcksum_flags(mp, ire_to_ill(ire_cache)))
1433 			goto cleanup;
1434 	}
1435 
1436 	/*
1437 	 * Now that we have the ire cache entry of the nexthop, call
1438 	 * ip_xmit_v4() to trigger mac addr resolution
1439 	 * if necessary and send it once ready.
1440 	 */
1441 
1442 	value = ip_xmit_v4(mp, ire_cache, NULL, B_FALSE);
1443 cleanup:
1444 	ire_refrele(ire_cache);
1445 	/*
1446 	 * At this point, the reference for these have already been
1447 	 * released within ire_forward() and/or ip_xmit_v4(). So we set
1448 	 * them to NULL to make sure we dont drop the references
1449 	 * again in case ip_xmit_v4() returns with either SEND_FAILED
1450 	 * or LLHDR_RESLV_FAILED
1451 	 */
1452 	sire = NULL;
1453 	ire = NULL;
1454 
1455 	switch (value) {
1456 	case SEND_FAILED:
1457 		ip1dbg(("ipfil_sendpkt: Send failed\n"));
1458 		value = ECOMM;
1459 		break;
1460 	case LLHDR_RESLV_FAILED:
1461 		ip1dbg(("ipfil_sendpkt: Link-layer resolution"
1462 		    "  failed\n"));
1463 		value = ECOMM;
1464 		break;
1465 	case LOOKUP_IN_PROGRESS:
1466 		netstack_rele(ns);
1467 		return (EINPROGRESS);
1468 	case SEND_PASSED:
1469 		netstack_rele(ns);
1470 		return (0);
1471 	}
1472 discard:
1473 	if (dst_addr->sa_family == AF_INET) {
1474 		BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
1475 	} else {
1476 		BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsOutDiscards);
1477 	}
1478 	if (ire != NULL)
1479 		ire_refrele(ire);
1480 	if (sire != NULL)
1481 		ire_refrele(sire);
1482 	netstack_rele(ns);
1483 	return (value);
1484 }
1485 
1486 
1487 /*
1488  * We don't check for dohwcksum in here because it should be being used
1489  * elsewhere to control what flags are being set on the mblk.  That is,
1490  * if DB_CKSUMFLAGS() is non-zero then we assume dohwcksum to be true
1491  * for this packet.
1492  *
1493  * This function assumes that it is *only* being called for TCP or UDP
1494  * packets and nothing else.
1495  */
1496 static int
1497 ip_send_align_hcksum_flags(mblk_t *mp, ill_t *ill)
1498 {
1499 	int illhckflags;
1500 	int mbhckflags;
1501 	uint16_t *up;
1502 	uint32_t cksum;
1503 	ipha_t *ipha;
1504 	ip6_t *ip6;
1505 	int proto;
1506 	int ipversion;
1507 	int length;
1508 	int start;
1509 	ip6_pkt_t ipp;
1510 
1511 	mbhckflags = DB_CKSUMFLAGS(mp);
1512 	ASSERT(mbhckflags != 0);
1513 	ASSERT(mp->b_datap->db_type == M_DATA);
1514 	/*
1515 	 * Since this function only knows how to manage the hardware checksum
1516 	 * issue, reject and packets that have flags set on the aside from
1517 	 * checksum related attributes as we cannot necessarily safely map
1518 	 * that packet onto the new NIC.  Packets that can be potentially
1519 	 * dropped here include those marked for LSO.
1520 	 */
1521 	if ((mbhckflags &
1522 	    ~(HCK_FULLCKSUM|HCK_PARTIALCKSUM|HCK_IPV4_HDRCKSUM)) != 0) {
1523 		DTRACE_PROBE2(pbr__incapable, (mblk_t *), mp, (ill_t *), ill);
1524 		freemsg(mp);
1525 		return (-1);
1526 	}
1527 
1528 	ipha = (ipha_t *)mp->b_rptr;
1529 
1530 	/*
1531 	 * Find out what the new NIC is capable of, if anything, and
1532 	 * only allow it to be used with M_DATA mblks being sent out.
1533 	 */
1534 	if (ILL_HCKSUM_CAPABLE(ill)) {
1535 		illhckflags = ill->ill_hcksum_capab->ill_hcksum_txflags;
1536 	} else {
1537 		/*
1538 		 * No capabilities, so turn off everything.
1539 		 */
1540 		illhckflags = 0;
1541 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, 0, 0);
1542 		mp->b_datap->db_struioflag &= ~STRUIO_IP;
1543 	}
1544 
1545 	DTRACE_PROBE4(pbr__info__a, (mblk_t *), mp, (ill_t *), ill,
1546 	    uint32_t, illhckflags, uint32_t, mbhckflags);
1547 	/*
1548 	 * This block of code that looks for the position of the TCP/UDP
1549 	 * checksum is early in this function because we need to know
1550 	 * what needs to be blanked out for the hardware checksum case.
1551 	 *
1552 	 * That we're in this function implies that the packet is either
1553 	 * TCP or UDP on Solaris, so checks are made for one protocol and
1554 	 * if that fails, the other is therefore implied.
1555 	 */
1556 	ipversion = IPH_HDR_VERSION(ipha);
1557 
1558 	if (ipversion == IPV4_VERSION) {
1559 		proto = ipha->ipha_protocol;
1560 		if (proto == IPPROTO_TCP) {
1561 			up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH);
1562 		} else {
1563 			up = IPH_UDPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH);
1564 		}
1565 	} else {
1566 		uint8_t lasthdr;
1567 
1568 		/*
1569 		 * Nothing I've seen indicates that IPv6 checksum'ing
1570 		 * precludes the presence of extension headers, so we
1571 		 * can't just look at the next header value in the IPv6
1572 		 * packet header to see if it is TCP/UDP.
1573 		 */
1574 		ip6 = (ip6_t *)ipha;
1575 		(void) memset(&ipp, 0, sizeof (ipp));
1576 		start = ip_find_hdr_v6(mp, ip6, &ipp, &lasthdr);
1577 		proto = lasthdr;
1578 
1579 		if (proto == IPPROTO_TCP) {
1580 			up = IPH_TCPH_CHECKSUMP(ipha, start);
1581 		} else {
1582 			up = IPH_UDPH_CHECKSUMP(ipha, start);
1583 		}
1584 	}
1585 
1586 	/*
1587 	 * The first case here is easiest:
1588 	 * mblk hasn't asked for full checksum, but the card supports it.
1589 	 *
1590 	 * In addition, check for IPv4 header capability.  Note that only
1591 	 * the mblk flag is checked and not ipversion.
1592 	 */
1593 	if ((((illhckflags & HCKSUM_INET_FULL_V4) && (ipversion == 4)) ||
1594 	    (((illhckflags & HCKSUM_INET_FULL_V6) && (ipversion == 6)))) &&
1595 	    ((mbhckflags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)) != 0)) {
1596 		int newflags = HCK_FULLCKSUM;
1597 
1598 		if ((mbhckflags & HCK_IPV4_HDRCKSUM) != 0) {
1599 			if ((illhckflags & HCKSUM_IPHDRCKSUM) != 0) {
1600 				newflags |= HCK_IPV4_HDRCKSUM;
1601 			} else {
1602 				/*
1603 				 * Rather than call a function, just inline
1604 				 * the computation of the basic IPv4 header.
1605 				 */
1606 				cksum = (ipha->ipha_dst >> 16) +
1607 				    (ipha->ipha_dst & 0xFFFF) +
1608 				    (ipha->ipha_src >> 16) +
1609 				    (ipha->ipha_src & 0xFFFF);
1610 				IP_HDR_CKSUM(ipha, cksum,
1611 				    ((uint32_t *)ipha)[0],
1612 				    ((uint16_t *)ipha)[4]);
1613 			}
1614 		}
1615 
1616 		*up = 0;
1617 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0,
1618 		    newflags, 0);
1619 		return (0);
1620 	}
1621 
1622 	DTRACE_PROBE2(pbr__info__b, int, ipversion, int, proto);
1623 
1624 	/*
1625 	 * Start calculating the pseudo checksum over the IP packet header.
1626 	 * Although the final pseudo checksum used by TCP/UDP consists of
1627 	 * more than just the address fields, we can use the result of
1628 	 * adding those together a little bit further down for IPv4.
1629 	 */
1630 	if (ipversion == IPV4_VERSION) {
1631 		cksum = (ipha->ipha_dst >> 16) + (ipha->ipha_dst & 0xFFFF) +
1632 		    (ipha->ipha_src >> 16) + (ipha->ipha_src & 0xFFFF);
1633 		start = IP_SIMPLE_HDR_LENGTH;
1634 		length = ntohs(ipha->ipha_length);
1635 		DTRACE_PROBE3(pbr__info__e, uint32_t, ipha->ipha_src,
1636 		    uint32_t, ipha->ipha_dst, int, cksum);
1637 	} else {
1638 		uint16_t *pseudo;
1639 
1640 		pseudo = (uint16_t *)&ip6->ip6_src;
1641 
1642 		/* calculate pseudo-header checksum */
1643 		cksum = pseudo[0] + pseudo[1] + pseudo[2] + pseudo[3] +
1644 		    pseudo[4] + pseudo[5] + pseudo[6] + pseudo[7] +
1645 		    pseudo[8] + pseudo[9] + pseudo[10] + pseudo[11] +
1646 		    pseudo[12] + pseudo[13] + pseudo[14] + pseudo[15];
1647 
1648 		length = ntohs(ip6->ip6_plen) + sizeof (ip6_t);
1649 	}
1650 
1651 	/* Fold the initial sum */
1652 	cksum = (cksum & 0xffff) + (cksum >> 16);
1653 
1654 	/*
1655 	 * If the packet was asking for an IPv4 header checksum to be
1656 	 * calculated but the interface doesn't support that, fill it in
1657 	 * using our pseudo checksum as a starting point.
1658 	 */
1659 	if (((mbhckflags & HCK_IPV4_HDRCKSUM) != 0) &&
1660 	    ((illhckflags & HCKSUM_IPHDRCKSUM) == 0)) {
1661 		/*
1662 		 * IP_HDR_CKSUM uses the 2rd arg to the macro in a destructive
1663 		 * way so pass in a copy of the checksum calculated thus far.
1664 		 */
1665 		uint32_t ipsum = cksum;
1666 
1667 		DB_CKSUMFLAGS(mp) &= ~HCK_IPV4_HDRCKSUM;
1668 
1669 		IP_HDR_CKSUM(ipha, ipsum, ((uint32_t *)ipha)[0],
1670 		    ((uint16_t *)ipha)[4]);
1671 	}
1672 
1673 	DTRACE_PROBE3(pbr__info__c, int, start, int, length, int, cksum);
1674 
1675 	if (proto == IPPROTO_TCP) {
1676 		cksum += IP_TCP_CSUM_COMP;
1677 	} else {
1678 		cksum += IP_UDP_CSUM_COMP;
1679 	}
1680 	cksum += htons(length - start);
1681 	cksum = (cksum & 0xffff) + (cksum >> 16);
1682 
1683 	/*
1684 	 * For TCP/UDP, we either want to setup the packet for partial
1685 	 * checksum or we want to do it all ourselves because the NIC
1686 	 * offers no support for either partial or full checksum.
1687 	 */
1688 	if ((illhckflags & HCKSUM_INET_PARTIAL) != 0) {
1689 		/*
1690 		 * The only case we care about here is if the mblk was
1691 		 * previously set for full checksum offload.  If it was
1692 		 * marked for partial (and the NIC does partial), then
1693 		 * we have nothing to do.  Similarly if the packet was
1694 		 * not set for partial or full, we do nothing as this
1695 		 * is cheaper than more work to set something up.
1696 		 */
1697 		if ((mbhckflags & HCK_FULLCKSUM) != 0) {
1698 			uint32_t offset;
1699 
1700 			if (proto == IPPROTO_TCP) {
1701 				offset = TCP_CHECKSUM_OFFSET;
1702 			} else {
1703 				offset = UDP_CHECKSUM_OFFSET;
1704 			}
1705 			*up = cksum;
1706 
1707 			DTRACE_PROBE3(pbr__info__f, int, length - start, int,
1708 			    cksum, int, offset);
1709 
1710 			(void) hcksum_assoc(mp, NULL, NULL, start,
1711 			    start + offset, length, 0,
1712 			    DB_CKSUMFLAGS(mp) | HCK_PARTIALCKSUM, 0);
1713 		}
1714 
1715 	} else if (mbhckflags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)) {
1716 		DB_CKSUMFLAGS(mp) &= ~(HCK_PARTIALCKSUM|HCK_FULLCKSUM);
1717 
1718 		*up = 0;
1719 		*up = IP_CSUM(mp, start, cksum);
1720 	}
1721 
1722 	DTRACE_PROBE4(pbr__info__d, (mblk_t *), mp, (ipha_t *), ipha,
1723 	    (uint16_t *), up, int, cksum);
1724 	return (0);
1725 }
1726 
1727 /*
1728  * callback function provided by ire_ftable_lookup when calling
1729  * rn_match_args(). Invoke ire_match_args on each matching leaf node in
1730  * the radix tree.
1731  */
1732 boolean_t
1733 ire_find_best_route(struct radix_node *rn, void *arg)
1734 {
1735 	struct rt_entry *rt = (struct rt_entry *)rn;
1736 	irb_t *irb_ptr;
1737 	ire_t *ire;
1738 	ire_ftable_args_t *margs = arg;
1739 	ipaddr_t match_mask;
1740 
1741 	ASSERT(rt != NULL);
1742 
1743 	irb_ptr = &rt->rt_irb;
1744 
1745 	if (irb_ptr->irb_ire_cnt == 0)
1746 		return (B_FALSE);
1747 
1748 	rw_enter(&irb_ptr->irb_lock, RW_READER);
1749 	for (ire = irb_ptr->irb_ire; ire != NULL; ire = ire->ire_next) {
1750 		if (ire->ire_marks & IRE_MARK_CONDEMNED)
1751 			continue;
1752 		if (margs->ift_flags & MATCH_IRE_MASK)
1753 			match_mask = margs->ift_mask;
1754 		else
1755 			match_mask = ire->ire_mask;
1756 
1757 		if (ire_match_args(ire, margs->ift_addr, match_mask,
1758 		    margs->ift_gateway, margs->ift_type, margs->ift_ipif,
1759 		    margs->ift_zoneid, margs->ift_ihandle, margs->ift_tsl,
1760 		    margs->ift_flags, NULL)) {
1761 			IRE_REFHOLD(ire);
1762 			rw_exit(&irb_ptr->irb_lock);
1763 			margs->ift_best_ire = ire;
1764 			return (B_TRUE);
1765 		}
1766 	}
1767 	rw_exit(&irb_ptr->irb_lock);
1768 	return (B_FALSE);
1769 }
1770 
1771 /*
1772  * ftable irb_t structures are dynamically allocated, and we need to
1773  * check if the irb_t (and associated ftable tree attachment) needs to
1774  * be cleaned up when the irb_refcnt goes to 0. The conditions that need
1775  * be verified are:
1776  * - no other walkers of the irebucket, i.e., quiescent irb_refcnt,
1777  * - no other threads holding references to ire's in the bucket,
1778  *   i.e., irb_nire == 0
1779  * - no active ire's in the bucket, i.e., irb_ire_cnt == 0
1780  * - need to hold the global tree lock and irb_lock in write mode.
1781  */
1782 void
1783 irb_refrele_ftable(irb_t *irb)
1784 {
1785 	for (;;) {
1786 		rw_enter(&irb->irb_lock, RW_WRITER);
1787 		ASSERT(irb->irb_refcnt != 0);
1788 		if (irb->irb_refcnt != 1) {
1789 			/*
1790 			 * Someone has a reference to this radix node
1791 			 * or there is some bucket walker.
1792 			 */
1793 			irb->irb_refcnt--;
1794 			rw_exit(&irb->irb_lock);
1795 			return;
1796 		} else {
1797 			/*
1798 			 * There is no other walker, nor is there any
1799 			 * other thread that holds a direct ref to this
1800 			 * radix node. Do the clean up if needed. Call
1801 			 * to ire_unlink will clear the IRB_MARK_CONDEMNED flag
1802 			 */
1803 			if (irb->irb_marks & IRB_MARK_CONDEMNED)  {
1804 				ire_t *ire_list;
1805 
1806 				ire_list = ire_unlink(irb);
1807 				rw_exit(&irb->irb_lock);
1808 
1809 				if (ire_list != NULL)
1810 					ire_cleanup(ire_list);
1811 				/*
1812 				 * more CONDEMNED entries could have
1813 				 * been added while we dropped the lock,
1814 				 * so we have to re-check.
1815 				 */
1816 				continue;
1817 			}
1818 
1819 			/*
1820 			 * Now check if there are still any ires
1821 			 * associated with this radix node.
1822 			 */
1823 			if (irb->irb_nire != 0) {
1824 				/*
1825 				 * someone is still holding on
1826 				 * to ires in this bucket
1827 				 */
1828 				irb->irb_refcnt--;
1829 				rw_exit(&irb->irb_lock);
1830 				return;
1831 			} else {
1832 				/*
1833 				 * Everything is clear. Zero walkers,
1834 				 * Zero threads with a ref to this
1835 				 * radix node, Zero ires associated with
1836 				 * this radix node. Due to lock order,
1837 				 * check the above conditions again
1838 				 * after grabbing all locks in the right order
1839 				 */
1840 				rw_exit(&irb->irb_lock);
1841 				if (irb_inactive(irb))
1842 					return;
1843 				/*
1844 				 * irb_inactive could not free the irb.
1845 				 * See if there are any walkers, if not
1846 				 * try to clean up again.
1847 				 */
1848 			}
1849 		}
1850 	}
1851 }
1852 
1853 /*
1854  * IRE iterator used by ire_ftable_lookup() to process multiple default
1855  * routes. Given a starting point in the hash list (ire_origin), walk the IREs
1856  * in the bucket skipping default interface routes and deleted entries.
1857  * Returns the next IRE (unheld), or NULL when we're back to the starting point.
1858  * Assumes that the caller holds a reference on the IRE bucket.
1859  *
1860  * In the absence of good IRE_DEFAULT routes, this function will return
1861  * the first IRE_INTERFACE route found (if any).
1862  */
1863 ire_t *
1864 ire_round_robin(irb_t *irb_ptr, zoneid_t zoneid, ire_ftable_args_t *margs,
1865 	ip_stack_t *ipst)
1866 {
1867 	ire_t	*ire_origin;
1868 	ire_t	*ire, *maybe_ire = NULL;
1869 
1870 	rw_enter(&irb_ptr->irb_lock, RW_WRITER);
1871 	ire_origin = irb_ptr->irb_rr_origin;
1872 	if (ire_origin != NULL) {
1873 		ire_origin = ire_origin->ire_next;
1874 		IRE_FIND_NEXT_ORIGIN(ire_origin);
1875 	}
1876 
1877 	if (ire_origin == NULL) {
1878 		/*
1879 		 * first time through routine, or we dropped off the end
1880 		 * of list.
1881 		 */
1882 		ire_origin = irb_ptr->irb_ire;
1883 		IRE_FIND_NEXT_ORIGIN(ire_origin);
1884 	}
1885 	irb_ptr->irb_rr_origin = ire_origin;
1886 	IRB_REFHOLD_LOCKED(irb_ptr);
1887 	rw_exit(&irb_ptr->irb_lock);
1888 
1889 	DTRACE_PROBE2(ire__rr__origin, (irb_t *), irb_ptr,
1890 	    (ire_t *), ire_origin);
1891 
1892 	/*
1893 	 * Round-robin the routers list looking for a route that
1894 	 * matches the passed in parameters.
1895 	 * We start with the ire we found above and we walk the hash
1896 	 * list until we're back where we started. It doesn't matter if
1897 	 * routes are added or deleted by other threads - we know this
1898 	 * ire will stay in the list because we hold a reference on the
1899 	 * ire bucket.
1900 	 */
1901 	ire = ire_origin;
1902 	while (ire != NULL) {
1903 		int match_flags = MATCH_IRE_TYPE | MATCH_IRE_SECATTR;
1904 		ire_t *rire;
1905 
1906 		if (ire->ire_marks & IRE_MARK_CONDEMNED)
1907 			goto next_ire;
1908 
1909 		if (!ire_match_args(ire, margs->ift_addr, (ipaddr_t)0,
1910 		    margs->ift_gateway, margs->ift_type, margs->ift_ipif,
1911 		    margs->ift_zoneid, margs->ift_ihandle, margs->ift_tsl,
1912 		    margs->ift_flags, NULL))
1913 			goto next_ire;
1914 
1915 		if (ire->ire_type & IRE_INTERFACE) {
1916 			/*
1917 			 * keep looking to see if there is a non-interface
1918 			 * default ire, but save this one as a last resort.
1919 			 */
1920 			if (maybe_ire == NULL)
1921 				maybe_ire = ire;
1922 			goto next_ire;
1923 		}
1924 
1925 		if (zoneid == ALL_ZONES) {
1926 			IRE_REFHOLD(ire);
1927 			IRB_REFRELE(irb_ptr);
1928 			return (ire);
1929 		}
1930 		/*
1931 		 * When we're in a non-global zone, we're only
1932 		 * interested in routers that are
1933 		 * reachable through ipifs within our zone.
1934 		 */
1935 		if (ire->ire_ipif != NULL) {
1936 			match_flags |= MATCH_IRE_ILL_GROUP;
1937 		}
1938 		rire = ire_route_lookup(ire->ire_gateway_addr, 0, 0,
1939 		    IRE_INTERFACE, ire->ire_ipif, NULL, zoneid, margs->ift_tsl,
1940 		    match_flags, ipst);
1941 		if (rire != NULL) {
1942 			ire_refrele(rire);
1943 			IRE_REFHOLD(ire);
1944 			IRB_REFRELE(irb_ptr);
1945 			return (ire);
1946 		}
1947 next_ire:
1948 		ire = (ire->ire_next ?  ire->ire_next : irb_ptr->irb_ire);
1949 		if (ire == ire_origin)
1950 			break;
1951 	}
1952 	if (maybe_ire != NULL)
1953 		IRE_REFHOLD(maybe_ire);
1954 	IRB_REFRELE(irb_ptr);
1955 	return (maybe_ire);
1956 }
1957 
1958 void
1959 irb_refhold_rn(struct radix_node *rn)
1960 {
1961 	if ((rn->rn_flags & RNF_ROOT) == 0)
1962 		IRB_REFHOLD(&((rt_t *)(rn))->rt_irb);
1963 }
1964 
1965 void
1966 irb_refrele_rn(struct radix_node *rn)
1967 {
1968 	if ((rn->rn_flags & RNF_ROOT) == 0)
1969 		irb_refrele_ftable(&((rt_t *)(rn))->rt_irb);
1970 }
1971