1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * This file contains consumer routines of the IPv4 forwarding engine 30 */ 31 32 #include <sys/types.h> 33 #include <sys/stream.h> 34 #include <sys/stropts.h> 35 #include <sys/strlog.h> 36 #include <sys/dlpi.h> 37 #include <sys/ddi.h> 38 #include <sys/cmn_err.h> 39 #include <sys/policy.h> 40 41 #include <sys/systm.h> 42 #include <sys/strsun.h> 43 #include <sys/kmem.h> 44 #include <sys/param.h> 45 #include <sys/socket.h> 46 #include <sys/strsubr.h> 47 #include <sys/pattr.h> 48 #include <net/if.h> 49 #include <net/route.h> 50 #include <netinet/in.h> 51 #include <net/if_dl.h> 52 #include <netinet/ip6.h> 53 #include <netinet/icmp6.h> 54 55 #include <inet/common.h> 56 #include <inet/mi.h> 57 #include <inet/mib2.h> 58 #include <inet/ip.h> 59 #include <inet/ip_impl.h> 60 #include <inet/ip6.h> 61 #include <inet/ip_ndp.h> 62 #include <inet/arp.h> 63 #include <inet/ip_if.h> 64 #include <inet/ip_ire.h> 65 #include <inet/ip_ftable.h> 66 #include <inet/ip_rts.h> 67 #include <inet/nd.h> 68 69 #include <net/pfkeyv2.h> 70 #include <inet/ipsec_info.h> 71 #include <inet/sadb.h> 72 #include <sys/kmem.h> 73 #include <inet/tcp.h> 74 #include <inet/ipclassifier.h> 75 #include <sys/zone.h> 76 #include <net/radix.h> 77 #include <sys/tsol/label.h> 78 #include <sys/tsol/tnet.h> 79 80 #define IS_DEFAULT_ROUTE(ire) \ 81 (((ire)->ire_type & IRE_DEFAULT) || \ 82 (((ire)->ire_type & IRE_INTERFACE) && ((ire)->ire_addr == 0))) 83 84 /* 85 * structure for passing args between ire_ftable_lookup and ire_find_best_route 86 */ 87 typedef struct ire_ftable_args_s { 88 ipaddr_t ift_addr; 89 ipaddr_t ift_mask; 90 ipaddr_t ift_gateway; 91 int ift_type; 92 const ipif_t *ift_ipif; 93 zoneid_t ift_zoneid; 94 uint32_t ift_ihandle; 95 const ts_label_t *ift_tsl; 96 int ift_flags; 97 ire_t *ift_best_ire; 98 } ire_ftable_args_t; 99 100 static ire_t *route_to_dst(const struct sockaddr *, zoneid_t, ip_stack_t *); 101 static ire_t *ire_round_robin(irb_t *, zoneid_t, ire_ftable_args_t *, 102 ip_stack_t *); 103 static void ire_del_host_redir(ire_t *, char *); 104 static boolean_t ire_find_best_route(struct radix_node *, void *); 105 static int ip_send_align_hcksum_flags(mblk_t *, ill_t *); 106 107 /* 108 * Lookup a route in forwarding table. A specific lookup is indicated by 109 * passing the required parameters and indicating the match required in the 110 * flag field. 111 * 112 * Looking for default route can be done in three ways 113 * 1) pass mask as 0 and set MATCH_IRE_MASK in flags field 114 * along with other matches. 115 * 2) pass type as IRE_DEFAULT and set MATCH_IRE_TYPE in flags 116 * field along with other matches. 117 * 3) if the destination and mask are passed as zeros. 118 * 119 * A request to return a default route if no route 120 * is found, can be specified by setting MATCH_IRE_DEFAULT 121 * in flags. 122 * 123 * It does not support recursion more than one level. It 124 * will do recursive lookup only when the lookup maps to 125 * a prefix or default route and MATCH_IRE_RECURSIVE flag is passed. 126 * 127 * If the routing table is setup to allow more than one level 128 * of recursion, the cleaning up cache table will not work resulting 129 * in invalid routing. 130 * 131 * Supports IP_BOUND_IF by following the ipif/ill when recursing. 132 * 133 * NOTE : When this function returns NULL, pire has already been released. 134 * pire is valid only when this function successfully returns an 135 * ire. 136 */ 137 ire_t * 138 ire_ftable_lookup(ipaddr_t addr, ipaddr_t mask, ipaddr_t gateway, 139 int type, const ipif_t *ipif, ire_t **pire, zoneid_t zoneid, 140 uint32_t ihandle, const ts_label_t *tsl, int flags, ip_stack_t *ipst) 141 { 142 ire_t *ire = NULL; 143 ipaddr_t gw_addr; 144 struct rt_sockaddr rdst, rmask; 145 struct rt_entry *rt; 146 ire_ftable_args_t margs; 147 boolean_t found_incomplete = B_FALSE; 148 149 ASSERT(ipif == NULL || !ipif->ipif_isv6); 150 151 /* 152 * When we return NULL from this function, we should make 153 * sure that *pire is NULL so that the callers will not 154 * wrongly REFRELE the pire. 155 */ 156 if (pire != NULL) 157 *pire = NULL; 158 /* 159 * ire_match_args() will dereference ipif MATCH_IRE_SRC or 160 * MATCH_IRE_ILL is set. 161 */ 162 if ((flags & (MATCH_IRE_SRC | MATCH_IRE_ILL | MATCH_IRE_ILL_GROUP)) && 163 (ipif == NULL)) 164 return (NULL); 165 166 (void) memset(&rdst, 0, sizeof (rdst)); 167 rdst.rt_sin_len = sizeof (rdst); 168 rdst.rt_sin_family = AF_INET; 169 rdst.rt_sin_addr.s_addr = addr; 170 171 (void) memset(&rmask, 0, sizeof (rmask)); 172 rmask.rt_sin_len = sizeof (rmask); 173 rmask.rt_sin_family = AF_INET; 174 rmask.rt_sin_addr.s_addr = mask; 175 176 (void) memset(&margs, 0, sizeof (margs)); 177 margs.ift_addr = addr; 178 margs.ift_mask = mask; 179 margs.ift_gateway = gateway; 180 margs.ift_type = type; 181 margs.ift_ipif = ipif; 182 margs.ift_zoneid = zoneid; 183 margs.ift_ihandle = ihandle; 184 margs.ift_tsl = tsl; 185 margs.ift_flags = flags; 186 187 /* 188 * The flags argument passed to ire_ftable_lookup may cause the 189 * search to return, not the longest matching prefix, but the 190 * "best matching prefix", i.e., the longest prefix that also 191 * satisfies constraints imposed via the permutation of flags 192 * passed in. To achieve this, we invoke ire_match_args() on 193 * each matching leaf in the radix tree. ire_match_args is 194 * invoked by the callback function ire_find_best_route() 195 * We hold the global tree lock in read mode when calling 196 * rn_match_args.Before dropping the global tree lock, ensure 197 * that the radix node can't be deleted by incrementing ire_refcnt. 198 */ 199 RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable); 200 rt = (struct rt_entry *)ipst->ips_ip_ftable->rnh_matchaddr_args(&rdst, 201 ipst->ips_ip_ftable, ire_find_best_route, &margs); 202 ire = margs.ift_best_ire; 203 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 204 205 if (rt == NULL) { 206 return (NULL); 207 } else { 208 ASSERT(ire != NULL); 209 } 210 211 DTRACE_PROBE2(ire__found, ire_ftable_args_t *, &margs, ire_t *, ire); 212 213 if (!IS_DEFAULT_ROUTE(ire)) 214 goto found_ire_held; 215 /* 216 * If default route is found, see if default matching criteria 217 * are satisfied. 218 */ 219 if (flags & MATCH_IRE_MASK) { 220 /* 221 * we were asked to match a 0 mask, and came back with 222 * a default route. Ok to return it. 223 */ 224 goto found_default_ire; 225 } 226 if ((flags & MATCH_IRE_TYPE) && 227 (type & (IRE_DEFAULT | IRE_INTERFACE))) { 228 /* 229 * we were asked to match a default ire type. Ok to return it. 230 */ 231 goto found_default_ire; 232 } 233 if (flags & MATCH_IRE_DEFAULT) { 234 goto found_default_ire; 235 } 236 /* 237 * we found a default route, but default matching criteria 238 * are not specified and we are not explicitly looking for 239 * default. 240 */ 241 IRE_REFRELE(ire); 242 return (NULL); 243 found_default_ire: 244 /* 245 * round-robin only if we have more than one route in the bucket. 246 */ 247 if ((ire->ire_bucket->irb_ire_cnt > 1) && 248 IS_DEFAULT_ROUTE(ire) && 249 ((flags & (MATCH_IRE_DEFAULT | MATCH_IRE_MASK)) == 250 MATCH_IRE_DEFAULT)) { 251 ire_t *next_ire; 252 253 next_ire = ire_round_robin(ire->ire_bucket, zoneid, &margs, 254 ipst); 255 IRE_REFRELE(ire); 256 if (next_ire != NULL) { 257 ire = next_ire; 258 } else { 259 /* no route */ 260 return (NULL); 261 } 262 } 263 found_ire_held: 264 if ((flags & MATCH_IRE_RJ_BHOLE) && 265 (ire->ire_flags & (RTF_BLACKHOLE | RTF_REJECT))) { 266 return (ire); 267 } 268 /* 269 * At this point, IRE that was found must be an IRE_FORWARDTABLE 270 * type. If this is a recursive lookup and an IRE_INTERFACE type was 271 * found, return that. If it was some other IRE_FORWARDTABLE type of 272 * IRE (one of the prefix types), then it is necessary to fill in the 273 * parent IRE pointed to by pire, and then lookup the gateway address of 274 * the parent. For backwards compatiblity, if this lookup returns an 275 * IRE other than a IRE_CACHETABLE or IRE_INTERFACE, then one more level 276 * of lookup is done. 277 */ 278 if (flags & MATCH_IRE_RECURSIVE) { 279 ipif_t *gw_ipif; 280 int match_flags = MATCH_IRE_DSTONLY; 281 ire_t *save_ire; 282 283 if (ire->ire_type & IRE_INTERFACE) 284 return (ire); 285 if (pire != NULL) 286 *pire = ire; 287 /* 288 * If we can't find an IRE_INTERFACE or the caller has not 289 * asked for pire, we need to REFRELE the save_ire. 290 */ 291 save_ire = ire; 292 293 /* 294 * Currently MATCH_IRE_ILL is never used with 295 * (MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT) while 296 * sending out packets as MATCH_IRE_ILL is used only 297 * for communicating with on-link hosts. We can't assert 298 * that here as RTM_GET calls this function with 299 * MATCH_IRE_ILL | MATCH_IRE_DEFAULT | MATCH_IRE_RECURSIVE. 300 * We have already used the MATCH_IRE_ILL in determining 301 * the right prefix route at this point. To match the 302 * behavior of how we locate routes while sending out 303 * packets, we don't want to use MATCH_IRE_ILL below 304 * while locating the interface route. 305 * 306 * ire_ftable_lookup may end up with an incomplete IRE_CACHE 307 * entry for the gateway (i.e., one for which the 308 * ire_nce->nce_state is not yet ND_REACHABLE). If the caller 309 * has specified MATCH_IRE_COMPLETE, such entries will not 310 * be returned; instead, we return the IF_RESOLVER ire. 311 */ 312 if (ire->ire_ipif != NULL) 313 match_flags |= MATCH_IRE_ILL_GROUP; 314 315 ire = ire_route_lookup(ire->ire_gateway_addr, 0, 0, 0, 316 ire->ire_ipif, NULL, zoneid, tsl, match_flags, ipst); 317 DTRACE_PROBE2(ftable__route__lookup1, (ire_t *), ire, 318 (ire_t *), save_ire); 319 if (ire == NULL || 320 ((ire->ire_type & IRE_CACHE) && ire->ire_nce && 321 ire->ire_nce->nce_state != ND_REACHABLE && 322 (flags & MATCH_IRE_COMPLETE))) { 323 /* 324 * Do not release the parent ire if MATCH_IRE_PARENT 325 * is set. Also return it via ire. 326 */ 327 if (ire != NULL) { 328 ire_refrele(ire); 329 ire = NULL; 330 found_incomplete = B_TRUE; 331 } 332 if (flags & MATCH_IRE_PARENT) { 333 if (pire != NULL) { 334 /* 335 * Need an extra REFHOLD, if the parent 336 * ire is returned via both ire and 337 * pire. 338 */ 339 IRE_REFHOLD(save_ire); 340 } 341 ire = save_ire; 342 } else { 343 ire_refrele(save_ire); 344 if (pire != NULL) 345 *pire = NULL; 346 } 347 if (!found_incomplete) 348 return (ire); 349 } 350 if (ire->ire_type & (IRE_CACHETABLE | IRE_INTERFACE)) { 351 /* 352 * If the caller did not ask for pire, release 353 * it now. 354 */ 355 if (pire == NULL) { 356 ire_refrele(save_ire); 357 } 358 return (ire); 359 } 360 match_flags |= MATCH_IRE_TYPE; 361 gw_addr = ire->ire_gateway_addr; 362 gw_ipif = ire->ire_ipif; 363 ire_refrele(ire); 364 ire = ire_route_lookup(gw_addr, 0, 0, 365 (found_incomplete? IRE_INTERFACE : 366 (IRE_CACHETABLE | IRE_INTERFACE)), 367 gw_ipif, NULL, zoneid, tsl, match_flags, ipst); 368 DTRACE_PROBE2(ftable__route__lookup2, (ire_t *), ire, 369 (ire_t *), save_ire); 370 if (ire == NULL || 371 ((ire->ire_type & IRE_CACHE) && ire->ire_nce && 372 ire->ire_nce->nce_state != ND_REACHABLE && 373 (flags & MATCH_IRE_COMPLETE))) { 374 /* 375 * Do not release the parent ire if MATCH_IRE_PARENT 376 * is set. Also return it via ire. 377 */ 378 if (ire != NULL) { 379 ire_refrele(ire); 380 ire = NULL; 381 } 382 if (flags & MATCH_IRE_PARENT) { 383 if (pire != NULL) { 384 /* 385 * Need an extra REFHOLD, if the 386 * parent ire is returned via both 387 * ire and pire. 388 */ 389 IRE_REFHOLD(save_ire); 390 } 391 ire = save_ire; 392 } else { 393 ire_refrele(save_ire); 394 if (pire != NULL) 395 *pire = NULL; 396 } 397 return (ire); 398 } else if (pire == NULL) { 399 /* 400 * If the caller did not ask for pire, release 401 * it now. 402 */ 403 ire_refrele(save_ire); 404 } 405 return (ire); 406 } 407 ASSERT(pire == NULL || *pire == NULL); 408 return (ire); 409 } 410 411 412 /* 413 * Find an IRE_OFFSUBNET IRE entry for the multicast address 'group' 414 * that goes through 'ipif'. As a fallback, a route that goes through 415 * ipif->ipif_ill can be returned. 416 */ 417 ire_t * 418 ipif_lookup_multi_ire(ipif_t *ipif, ipaddr_t group) 419 { 420 ire_t *ire; 421 ire_t *save_ire = NULL; 422 ire_t *gw_ire; 423 irb_t *irb; 424 ipaddr_t gw_addr; 425 int match_flags = MATCH_IRE_TYPE | MATCH_IRE_ILL; 426 ip_stack_t *ipst = ipif->ipif_ill->ill_ipst; 427 428 ASSERT(CLASSD(group)); 429 430 ire = ire_ftable_lookup(group, 0, 0, 0, NULL, NULL, ALL_ZONES, 0, 431 NULL, MATCH_IRE_DEFAULT, ipst); 432 433 if (ire == NULL) 434 return (NULL); 435 436 irb = ire->ire_bucket; 437 ASSERT(irb); 438 439 IRB_REFHOLD(irb); 440 ire_refrele(ire); 441 for (ire = irb->irb_ire; ire != NULL; ire = ire->ire_next) { 442 if (ire->ire_addr != group || 443 ipif->ipif_zoneid != ire->ire_zoneid && 444 ire->ire_zoneid != ALL_ZONES) { 445 continue; 446 } 447 448 switch (ire->ire_type) { 449 case IRE_DEFAULT: 450 case IRE_PREFIX: 451 case IRE_HOST: 452 gw_addr = ire->ire_gateway_addr; 453 gw_ire = ire_ftable_lookup(gw_addr, 0, 0, IRE_INTERFACE, 454 ipif, NULL, ALL_ZONES, 0, NULL, match_flags, ipst); 455 456 if (gw_ire != NULL) { 457 if (save_ire != NULL) { 458 ire_refrele(save_ire); 459 } 460 IRE_REFHOLD(ire); 461 if (gw_ire->ire_ipif == ipif) { 462 ire_refrele(gw_ire); 463 464 IRB_REFRELE(irb); 465 return (ire); 466 } 467 ire_refrele(gw_ire); 468 save_ire = ire; 469 } 470 break; 471 case IRE_IF_NORESOLVER: 472 case IRE_IF_RESOLVER: 473 if (ire->ire_ipif == ipif) { 474 if (save_ire != NULL) { 475 ire_refrele(save_ire); 476 } 477 IRE_REFHOLD(ire); 478 479 IRB_REFRELE(irb); 480 return (ire); 481 } 482 break; 483 } 484 } 485 IRB_REFRELE(irb); 486 487 return (save_ire); 488 } 489 490 /* 491 * Find an IRE_INTERFACE for the multicast group. 492 * Allows different routes for multicast addresses 493 * in the unicast routing table (akin to 224.0.0.0 but could be more specific) 494 * which point at different interfaces. This is used when IP_MULTICAST_IF 495 * isn't specified (when sending) and when IP_ADD_MEMBERSHIP doesn't 496 * specify the interface to join on. 497 * 498 * Supports IP_BOUND_IF by following the ipif/ill when recursing. 499 */ 500 ire_t * 501 ire_lookup_multi(ipaddr_t group, zoneid_t zoneid, ip_stack_t *ipst) 502 { 503 ire_t *ire; 504 ipif_t *ipif = NULL; 505 int match_flags = MATCH_IRE_TYPE; 506 ipaddr_t gw_addr; 507 508 ire = ire_ftable_lookup(group, 0, 0, 0, NULL, NULL, zoneid, 509 0, NULL, MATCH_IRE_DEFAULT, ipst); 510 511 /* We search a resolvable ire in case of multirouting. */ 512 if ((ire != NULL) && (ire->ire_flags & RTF_MULTIRT)) { 513 ire_t *cire = NULL; 514 /* 515 * If the route is not resolvable, the looked up ire 516 * may be changed here. In that case, ire_multirt_lookup() 517 * IRE_REFRELE the original ire and change it. 518 */ 519 (void) ire_multirt_lookup(&cire, &ire, MULTIRT_CACHEGW, 520 NULL, ipst); 521 if (cire != NULL) 522 ire_refrele(cire); 523 } 524 if (ire == NULL) 525 return (NULL); 526 /* 527 * Make sure we follow ire_ipif. 528 * 529 * We need to determine the interface route through 530 * which the gateway will be reached. We don't really 531 * care which interface is picked if the interface is 532 * part of a group. 533 */ 534 if (ire->ire_ipif != NULL) { 535 ipif = ire->ire_ipif; 536 match_flags |= MATCH_IRE_ILL_GROUP; 537 } 538 539 switch (ire->ire_type) { 540 case IRE_DEFAULT: 541 case IRE_PREFIX: 542 case IRE_HOST: 543 gw_addr = ire->ire_gateway_addr; 544 ire_refrele(ire); 545 ire = ire_ftable_lookup(gw_addr, 0, 0, 546 IRE_INTERFACE, ipif, NULL, zoneid, 0, 547 NULL, match_flags, ipst); 548 return (ire); 549 case IRE_IF_NORESOLVER: 550 case IRE_IF_RESOLVER: 551 return (ire); 552 default: 553 ire_refrele(ire); 554 return (NULL); 555 } 556 } 557 558 /* 559 * Delete the passed in ire if the gateway addr matches 560 */ 561 void 562 ire_del_host_redir(ire_t *ire, char *gateway) 563 { 564 if ((ire->ire_flags & RTF_DYNAMIC) && 565 (ire->ire_gateway_addr == *(ipaddr_t *)gateway)) 566 ire_delete(ire); 567 } 568 569 /* 570 * Search for all HOST REDIRECT routes that are 571 * pointing at the specified gateway and 572 * delete them. This routine is called only 573 * when a default gateway is going away. 574 */ 575 void 576 ire_delete_host_redirects(ipaddr_t gateway, ip_stack_t *ipst) 577 { 578 struct rtfuncarg rtfarg; 579 580 (void) memset(&rtfarg, 0, sizeof (rtfarg)); 581 rtfarg.rt_func = ire_del_host_redir; 582 rtfarg.rt_arg = (void *)&gateway; 583 (void) ipst->ips_ip_ftable->rnh_walktree_mt(ipst->ips_ip_ftable, 584 rtfunc, &rtfarg, irb_refhold_rn, irb_refrele_rn); 585 } 586 587 struct ihandle_arg { 588 uint32_t ihandle; 589 ire_t *ire; 590 }; 591 592 static int 593 ire_ihandle_onlink_match(struct radix_node *rn, void *arg) 594 { 595 struct rt_entry *rt; 596 irb_t *irb; 597 ire_t *ire; 598 struct ihandle_arg *ih = arg; 599 600 rt = (struct rt_entry *)rn; 601 ASSERT(rt != NULL); 602 irb = &rt->rt_irb; 603 for (ire = irb->irb_ire; ire != NULL; ire = ire->ire_next) { 604 if ((ire->ire_type & IRE_INTERFACE) && 605 (ire->ire_ihandle == ih->ihandle)) { 606 ih->ire = ire; 607 IRE_REFHOLD(ire); 608 return (1); 609 } 610 } 611 return (0); 612 } 613 614 /* 615 * Locate the interface ire that is tied to the cache ire 'cire' via 616 * cire->ire_ihandle. 617 * 618 * We are trying to create the cache ire for an onlink destn. or 619 * gateway in 'cire'. We are called from ire_add_v4() in the IRE_IF_RESOLVER 620 * case, after the ire has come back from ARP. 621 */ 622 ire_t * 623 ire_ihandle_lookup_onlink(ire_t *cire) 624 { 625 ire_t *ire; 626 int match_flags; 627 struct ihandle_arg ih; 628 ip_stack_t *ipst; 629 630 ASSERT(cire != NULL); 631 ipst = cire->ire_ipst; 632 633 /* 634 * We don't need to specify the zoneid to ire_ftable_lookup() below 635 * because the ihandle refers to an ipif which can be in only one zone. 636 */ 637 match_flags = MATCH_IRE_TYPE | MATCH_IRE_IHANDLE | MATCH_IRE_MASK; 638 /* 639 * We know that the mask of the interface ire equals cire->ire_cmask. 640 * (When ip_newroute() created 'cire' for an on-link destn. it set its 641 * cmask from the interface ire's mask) 642 */ 643 ire = ire_ftable_lookup(cire->ire_addr, cire->ire_cmask, 0, 644 IRE_INTERFACE, NULL, NULL, ALL_ZONES, cire->ire_ihandle, 645 NULL, match_flags, ipst); 646 if (ire != NULL) 647 return (ire); 648 /* 649 * If we didn't find an interface ire above, we can't declare failure. 650 * For backwards compatibility, we need to support prefix routes 651 * pointing to next hop gateways that are not on-link. 652 * 653 * In the resolver/noresolver case, ip_newroute() thinks it is creating 654 * the cache ire for an onlink destination in 'cire'. But 'cire' is 655 * not actually onlink, because ire_ftable_lookup() cheated it, by 656 * doing ire_route_lookup() twice and returning an interface ire. 657 * 658 * Eg. default - gw1 (line 1) 659 * gw1 - gw2 (line 2) 660 * gw2 - hme0 (line 3) 661 * 662 * In the above example, ip_newroute() tried to create the cache ire 663 * 'cire' for gw1, based on the interface route in line 3. The 664 * ire_ftable_lookup() above fails, because there is no interface route 665 * to reach gw1. (it is gw2). We fall thru below. 666 * 667 * Do a brute force search based on the ihandle in a subset of the 668 * forwarding tables, corresponding to cire->ire_cmask. Otherwise 669 * things become very complex, since we don't have 'pire' in this 670 * case. (Also note that this method is not possible in the offlink 671 * case because we don't know the mask) 672 */ 673 (void) memset(&ih, 0, sizeof (ih)); 674 ih.ihandle = cire->ire_ihandle; 675 (void) ipst->ips_ip_ftable->rnh_walktree_mt(ipst->ips_ip_ftable, 676 ire_ihandle_onlink_match, &ih, irb_refhold_rn, irb_refrele_rn); 677 return (ih.ire); 678 } 679 680 /* 681 * IRE iterator used by ire_ftable_lookup[_v6]() to process multiple default 682 * routes. Given a starting point in the hash list (ire_origin), walk the IREs 683 * in the bucket skipping default interface routes and deleted entries. 684 * Returns the next IRE (unheld), or NULL when we're back to the starting point. 685 * Assumes that the caller holds a reference on the IRE bucket. 686 */ 687 ire_t * 688 ire_get_next_default_ire(ire_t *ire, ire_t *ire_origin) 689 { 690 ASSERT(ire_origin->ire_bucket != NULL); 691 ASSERT(ire != NULL); 692 693 do { 694 ire = ire->ire_next; 695 if (ire == NULL) 696 ire = ire_origin->ire_bucket->irb_ire; 697 if (ire == ire_origin) 698 return (NULL); 699 } while ((ire->ire_type & IRE_INTERFACE) || 700 (ire->ire_marks & IRE_MARK_CONDEMNED)); 701 ASSERT(ire != NULL); 702 return (ire); 703 } 704 705 static ipif_t * 706 ire_forward_src_ipif(ipaddr_t dst, ire_t *sire, ire_t *ire, ill_t *dst_ill, 707 int zoneid, ushort_t *marks) 708 { 709 ipif_t *src_ipif; 710 ip_stack_t *ipst = dst_ill->ill_ipst; 711 712 /* 713 * Pick the best source address from dst_ill. 714 * 715 * 1) If it is part of a multipathing group, we would 716 * like to spread the inbound packets across different 717 * interfaces. ipif_select_source picks a random source 718 * across the different ills in the group. 719 * 720 * 2) If it is not part of a multipathing group, we try 721 * to pick the source address from the destination 722 * route. Clustering assumes that when we have multiple 723 * prefixes hosted on an interface, the prefix of the 724 * source address matches the prefix of the destination 725 * route. We do this only if the address is not 726 * DEPRECATED. 727 * 728 * 3) If the conn is in a different zone than the ire, we 729 * need to pick a source address from the right zone. 730 * 731 * NOTE : If we hit case (1) above, the prefix of the source 732 * address picked may not match the prefix of the 733 * destination routes prefix as ipif_select_source 734 * does not look at "dst" while picking a source 735 * address. 736 * If we want the same behavior as (2), we will need 737 * to change the behavior of ipif_select_source. 738 */ 739 740 if ((sire != NULL) && (sire->ire_flags & RTF_SETSRC)) { 741 /* 742 * The RTF_SETSRC flag is set in the parent ire (sire). 743 * Check that the ipif matching the requested source 744 * address still exists. 745 */ 746 src_ipif = ipif_lookup_addr(sire->ire_src_addr, NULL, 747 zoneid, NULL, NULL, NULL, NULL, ipst); 748 return (src_ipif); 749 } 750 *marks |= IRE_MARK_USESRC_CHECK; 751 if ((dst_ill->ill_group != NULL) || 752 (ire->ire_ipif->ipif_flags & IPIF_DEPRECATED) || 753 (dst_ill->ill_usesrc_ifindex != 0)) { 754 src_ipif = ipif_select_source(dst_ill, dst, zoneid); 755 if (src_ipif == NULL) 756 return (NULL); 757 758 } else { 759 src_ipif = ire->ire_ipif; 760 ASSERT(src_ipif != NULL); 761 /* hold src_ipif for uniformity */ 762 ipif_refhold(src_ipif); 763 } 764 return (src_ipif); 765 } 766 767 /* 768 * This function is called by ip_rput_noire() and ip_fast_forward() 769 * to resolve the route of incoming packet that needs to be forwarded. 770 * If the ire of the nexthop is not already in the cachetable, this 771 * routine will insert it to the table, but won't trigger ARP resolution yet. 772 * Thus unlike ip_newroute, this function adds incomplete ires to 773 * the cachetable. ARP resolution for these ires are delayed until 774 * after all of the packet processing is completed and its ready to 775 * be sent out on the wire, Eventually, the packet transmit routine 776 * ip_xmit_v4() attempts to send a packet to the driver. If it finds 777 * that there is no link layer information, it will do the arp 778 * resolution and queue the packet in ire->ire_nce->nce_qd_mp and 779 * then send it out once the arp resolution is over 780 * (see ip_xmit_v4()->ire_arpresolve()). This scheme is similar to 781 * the model of BSD/SunOS 4 782 * 783 * In future, the insertion of incomplete ires in the cachetable should 784 * be implemented in hostpath as well, as doing so will greatly reduce 785 * the existing complexity for code paths that depend on the context of 786 * the sender (such as IPsec). 787 * 788 * Thus this scheme of adding incomplete ires in cachetable in forwarding 789 * path can be used as a template for simplifying the hostpath. 790 */ 791 792 ire_t * 793 ire_forward(ipaddr_t dst, enum ire_forward_action *ret_action, 794 ire_t *supplied_ire, ire_t *supplied_sire, const struct ts_label_s *tsl, 795 ip_stack_t *ipst) 796 { 797 ipaddr_t gw = 0; 798 ire_t *ire = NULL; 799 ire_t *sire = NULL, *save_ire; 800 ill_t *dst_ill = NULL; 801 int error; 802 zoneid_t zoneid; 803 ipif_t *src_ipif = NULL; 804 mblk_t *res_mp; 805 ushort_t ire_marks = 0; 806 tsol_gcgrp_t *gcgrp = NULL; 807 tsol_gcgrp_addr_t ga; 808 809 zoneid = GLOBAL_ZONEID; 810 811 if (supplied_ire != NULL) { 812 /* We have arrived here from ipfil_sendpkt */ 813 ire = supplied_ire; 814 sire = supplied_sire; 815 goto create_irecache; 816 } 817 818 ire = ire_ftable_lookup(dst, 0, 0, 0, NULL, &sire, zoneid, 0, 819 tsl, MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT | 820 MATCH_IRE_RJ_BHOLE | MATCH_IRE_PARENT|MATCH_IRE_SECATTR, ipst); 821 822 if (ire == NULL) { 823 ip_rts_change(RTM_MISS, dst, 0, 0, 0, 0, 0, 0, RTA_DST, ipst); 824 goto icmp_err_ret; 825 } 826 827 /* 828 * If we encounter CGTP, we should have the caller use 829 * ip_newroute to resolve multirt instead of this function. 830 * CGTP specs explicitly state that it can't be used with routers. 831 * This essentially prevents insertion of incomplete RTF_MULTIRT 832 * ires in cachetable. 833 */ 834 if (ipst->ips_ip_cgtp_filter && 835 ((ire->ire_flags & RTF_MULTIRT) || 836 ((sire != NULL) && (sire->ire_flags & RTF_MULTIRT)))) { 837 ip3dbg(("ire_forward: packet is to be multirouted- " 838 "handing it to ip_newroute\n")); 839 if (sire != NULL) 840 ire_refrele(sire); 841 ire_refrele(ire); 842 /* 843 * Inform caller about encountering of multirt so that 844 * ip_newroute() can be called. 845 */ 846 *ret_action = Forward_check_multirt; 847 return (NULL); 848 } 849 850 /* 851 * Verify that the returned IRE does not have either 852 * the RTF_REJECT or RTF_BLACKHOLE flags set and that the IRE is 853 * either an IRE_CACHE, IRE_IF_NORESOLVER or IRE_IF_RESOLVER. 854 */ 855 if ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) || 856 (ire->ire_type & (IRE_CACHE | IRE_INTERFACE)) == 0) { 857 ip3dbg(("ire 0x%p is not cache/resolver/noresolver\n", 858 (void *)ire)); 859 goto icmp_err_ret; 860 } 861 862 /* 863 * If we already have a fully resolved IRE CACHE of the 864 * nexthop router, just hand over the cache entry 865 * and we are done. 866 */ 867 868 if (ire->ire_type & IRE_CACHE) { 869 870 /* 871 * If we are using this ire cache entry as a 872 * gateway to forward packets, chances are we 873 * will be using it again. So turn off 874 * the temporary flag, thus reducing its 875 * chances of getting deleted frequently. 876 */ 877 if (ire->ire_marks & IRE_MARK_TEMPORARY) { 878 irb_t *irb = ire->ire_bucket; 879 rw_enter(&irb->irb_lock, RW_WRITER); 880 /* 881 * We need to recheck for IRE_MARK_TEMPORARY after 882 * acquiring the lock in order to guarantee 883 * irb_tmp_ire_cnt 884 */ 885 if (ire->ire_marks & IRE_MARK_TEMPORARY) { 886 ire->ire_marks &= ~IRE_MARK_TEMPORARY; 887 irb->irb_tmp_ire_cnt--; 888 } 889 rw_exit(&irb->irb_lock); 890 } 891 892 if (sire != NULL) { 893 UPDATE_OB_PKT_COUNT(sire); 894 sire->ire_last_used_time = lbolt; 895 ire_refrele(sire); 896 } 897 return (ire); 898 } 899 create_irecache: 900 /* 901 * Increment the ire_ob_pkt_count field for ire if it is an 902 * INTERFACE (IF_RESOLVER or IF_NORESOLVER) IRE type, and 903 * increment the same for the parent IRE, sire, if it is some 904 * sort of prefix IRE (which includes DEFAULT, PREFIX, and HOST). 905 */ 906 if ((ire->ire_type & IRE_INTERFACE) != 0) { 907 UPDATE_OB_PKT_COUNT(ire); 908 ire->ire_last_used_time = lbolt; 909 } 910 911 /* 912 * sire must be either IRE_CACHETABLE OR IRE_INTERFACE type 913 */ 914 if (sire != NULL) { 915 gw = sire->ire_gateway_addr; 916 ASSERT((sire->ire_type & 917 (IRE_CACHETABLE | IRE_INTERFACE)) == 0); 918 UPDATE_OB_PKT_COUNT(sire); 919 sire->ire_last_used_time = lbolt; 920 } 921 922 /* Obtain dst_ill */ 923 dst_ill = ip_newroute_get_dst_ill(ire->ire_ipif->ipif_ill); 924 if (dst_ill == NULL) { 925 ip2dbg(("ire_forward no dst ill; ire 0x%p\n", 926 (void *)ire)); 927 goto icmp_err_ret; 928 } 929 930 ASSERT(src_ipif == NULL); 931 /* Now obtain the src_ipif */ 932 src_ipif = ire_forward_src_ipif(dst, sire, ire, dst_ill, 933 zoneid, &ire_marks); 934 if (src_ipif == NULL) 935 goto icmp_err_ret; 936 937 switch (ire->ire_type) { 938 case IRE_IF_NORESOLVER: 939 /* create ire_cache for ire_addr endpoint */ 940 if (dst_ill->ill_phys_addr_length != IP_ADDR_LEN && 941 dst_ill->ill_resolver_mp == NULL) { 942 ip1dbg(("ire_forward: dst_ill %p " 943 "for IRE_IF_NORESOLVER ire %p has " 944 "no ill_resolver_mp\n", 945 (void *)dst_ill, (void *)ire)); 946 goto icmp_err_ret; 947 } 948 /* FALLTHRU */ 949 case IRE_IF_RESOLVER: 950 /* 951 * We have the IRE_IF_RESOLVER of the nexthop gateway 952 * and now need to build a IRE_CACHE for it. 953 * In this case, we have the following : 954 * 955 * 1) src_ipif - used for getting a source address. 956 * 957 * 2) dst_ill - from which we derive ire_stq/ire_rfq. This 958 * means packets using the IRE_CACHE that we will build 959 * here will go out on dst_ill. 960 * 961 * 3) sire may or may not be NULL. But, the IRE_CACHE that is 962 * to be created will only be tied to the IRE_INTERFACE 963 * that was derived from the ire_ihandle field. 964 * 965 * If sire is non-NULL, it means the destination is 966 * off-link and we will first create the IRE_CACHE for the 967 * gateway. 968 */ 969 res_mp = dst_ill->ill_resolver_mp; 970 if (ire->ire_type == IRE_IF_RESOLVER && 971 (!OK_RESOLVER_MP(res_mp))) { 972 goto icmp_err_ret; 973 } 974 /* 975 * To be at this point in the code with a non-zero gw 976 * means that dst is reachable through a gateway that 977 * we have never resolved. By changing dst to the gw 978 * addr we resolve the gateway first. 979 */ 980 if (gw != INADDR_ANY) { 981 /* 982 * The source ipif that was determined above was 983 * relative to the destination address, not the 984 * gateway's. If src_ipif was not taken out of 985 * the IRE_IF_RESOLVER entry, we'll need to call 986 * ipif_select_source() again. 987 */ 988 if (src_ipif != ire->ire_ipif) { 989 ipif_refrele(src_ipif); 990 src_ipif = ipif_select_source(dst_ill, 991 gw, zoneid); 992 if (src_ipif == NULL) 993 goto icmp_err_ret; 994 } 995 dst = gw; 996 gw = INADDR_ANY; 997 } 998 /* 999 * dst has been set to the address of the nexthop. 1000 * 1001 * TSol note: get security attributes of the nexthop; 1002 * Note that the nexthop may either be a gateway, or the 1003 * packet destination itself; Detailed explanation of 1004 * issues involved is provided in the IRE_IF_NORESOLVER 1005 * logic in ip_newroute(). 1006 */ 1007 ga.ga_af = AF_INET; 1008 IN6_IPADDR_TO_V4MAPPED(dst, &ga.ga_addr); 1009 gcgrp = gcgrp_lookup(&ga, B_FALSE); 1010 1011 if (ire->ire_type == IRE_IF_NORESOLVER) 1012 dst = ire->ire_addr; /* ire_cache for tunnel endpoint */ 1013 1014 save_ire = ire; 1015 /* 1016 * create an incomplete IRE_CACHE. 1017 * An areq_mp will be generated in ire_arpresolve() for 1018 * RESOLVER interfaces. 1019 */ 1020 ire = ire_create( 1021 (uchar_t *)&dst, /* dest address */ 1022 (uchar_t *)&ip_g_all_ones, /* mask */ 1023 (uchar_t *)&src_ipif->ipif_src_addr, /* src addr */ 1024 (uchar_t *)&gw, /* gateway address */ 1025 (save_ire->ire_type == IRE_IF_RESOLVER ? NULL: 1026 &save_ire->ire_max_frag), 1027 NULL, 1028 dst_ill->ill_rq, /* recv-from queue */ 1029 dst_ill->ill_wq, /* send-to queue */ 1030 IRE_CACHE, /* IRE type */ 1031 src_ipif, 1032 ire->ire_mask, /* Parent mask */ 1033 0, 1034 ire->ire_ihandle, /* Interface handle */ 1035 0, 1036 &(ire->ire_uinfo), 1037 NULL, 1038 gcgrp, 1039 ipst); 1040 ip1dbg(("incomplete ire_cache 0x%p\n", (void *)ire)); 1041 if (ire != NULL) { 1042 gcgrp = NULL; /* reference now held by IRE */ 1043 ire->ire_marks |= ire_marks; 1044 /* add the incomplete ire: */ 1045 error = ire_add(&ire, NULL, NULL, NULL, B_TRUE); 1046 if (error == 0 && ire != NULL) { 1047 ire->ire_max_frag = save_ire->ire_max_frag; 1048 ip1dbg(("setting max_frag to %d in ire 0x%p\n", 1049 ire->ire_max_frag, (void *)ire)); 1050 } else { 1051 ire_refrele(save_ire); 1052 goto icmp_err_ret; 1053 } 1054 } else { 1055 if (gcgrp != NULL) { 1056 GCGRP_REFRELE(gcgrp); 1057 gcgrp = NULL; 1058 } 1059 } 1060 1061 ire_refrele(save_ire); 1062 break; 1063 default: 1064 break; 1065 } 1066 1067 *ret_action = Forward_ok; 1068 if (sire != NULL) 1069 ire_refrele(sire); 1070 if (dst_ill != NULL) 1071 ill_refrele(dst_ill); 1072 if (src_ipif != NULL) 1073 ipif_refrele(src_ipif); 1074 return (ire); 1075 icmp_err_ret: 1076 *ret_action = Forward_ret_icmp_err; 1077 if (sire != NULL) 1078 ire_refrele(sire); 1079 if (dst_ill != NULL) 1080 ill_refrele(dst_ill); 1081 if (src_ipif != NULL) 1082 ipif_refrele(src_ipif); 1083 if (ire != NULL) { 1084 if (ire->ire_flags & RTF_BLACKHOLE) 1085 *ret_action = Forward_blackhole; 1086 ire_refrele(ire); 1087 } 1088 return (NULL); 1089 1090 } 1091 1092 /* 1093 * Obtain the rt_entry and rt_irb for the route to be added to 1094 * the ips_ip_ftable. 1095 * First attempt to add a node to the radix tree via rn_addroute. If the 1096 * route already exists, return the bucket for the existing route. 1097 * 1098 * Locking notes: Need to hold the global radix tree lock in write mode to 1099 * add a radix node. To prevent the node from being deleted, ire_get_bucket() 1100 * returns with a ref'ed irb_t. The ire itself is added in ire_add_v4() 1101 * while holding the irb_lock, but not the radix tree lock. 1102 */ 1103 irb_t * 1104 ire_get_bucket(ire_t *ire) 1105 { 1106 struct radix_node *rn; 1107 struct rt_entry *rt; 1108 struct rt_sockaddr rmask, rdst; 1109 irb_t *irb = NULL; 1110 ip_stack_t *ipst = ire->ire_ipst; 1111 1112 ASSERT(ipst->ips_ip_ftable != NULL); 1113 1114 /* first try to see if route exists (based on rtalloc1) */ 1115 (void) memset(&rdst, 0, sizeof (rdst)); 1116 rdst.rt_sin_len = sizeof (rdst); 1117 rdst.rt_sin_family = AF_INET; 1118 rdst.rt_sin_addr.s_addr = ire->ire_addr; 1119 1120 (void) memset(&rmask, 0, sizeof (rmask)); 1121 rmask.rt_sin_len = sizeof (rmask); 1122 rmask.rt_sin_family = AF_INET; 1123 rmask.rt_sin_addr.s_addr = ire->ire_mask; 1124 1125 /* 1126 * add the route. based on BSD's rtrequest1(RTM_ADD) 1127 */ 1128 R_Malloc(rt, rt_entry_cache, sizeof (*rt)); 1129 /* kmem_alloc failed */ 1130 if (rt == NULL) 1131 return (NULL); 1132 1133 (void) memset(rt, 0, sizeof (*rt)); 1134 rt->rt_nodes->rn_key = (char *)&rt->rt_dst; 1135 rt->rt_dst = rdst; 1136 irb = &rt->rt_irb; 1137 irb->irb_marks |= IRB_MARK_FTABLE; /* dynamically allocated/freed */ 1138 irb->irb_ipst = ipst; 1139 rw_init(&irb->irb_lock, NULL, RW_DEFAULT, NULL); 1140 RADIX_NODE_HEAD_WLOCK(ipst->ips_ip_ftable); 1141 rn = ipst->ips_ip_ftable->rnh_addaddr(&rt->rt_dst, &rmask, 1142 ipst->ips_ip_ftable, (struct radix_node *)rt); 1143 if (rn == NULL) { 1144 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 1145 Free(rt, rt_entry_cache); 1146 rt = NULL; 1147 irb = NULL; 1148 RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable); 1149 rn = ipst->ips_ip_ftable->rnh_lookup(&rdst, &rmask, 1150 ipst->ips_ip_ftable); 1151 if (rn != NULL && ((rn->rn_flags & RNF_ROOT) == 0)) { 1152 /* found a non-root match */ 1153 rt = (struct rt_entry *)rn; 1154 } 1155 } 1156 if (rt != NULL) { 1157 irb = &rt->rt_irb; 1158 IRB_REFHOLD(irb); 1159 } 1160 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 1161 return (irb); 1162 } 1163 1164 /* 1165 * This function is used when the caller wants to know the outbound 1166 * interface for a packet given only the address. 1167 * If this is a offlink IP address and there are multiple 1168 * routes to this destination, this routine will utilise the 1169 * first route it finds to IP address 1170 * Return values: 1171 * 0 - FAILURE 1172 * nonzero - ifindex 1173 */ 1174 uint_t 1175 ifindex_lookup(const struct sockaddr *ipaddr, zoneid_t zoneid) 1176 { 1177 uint_t ifindex = 0; 1178 ire_t *ire; 1179 ill_t *ill; 1180 netstack_t *ns; 1181 ip_stack_t *ipst; 1182 1183 if (zoneid == ALL_ZONES) 1184 ns = netstack_find_by_zoneid(GLOBAL_ZONEID); 1185 else 1186 ns = netstack_find_by_zoneid(zoneid); 1187 ASSERT(ns != NULL); 1188 1189 /* 1190 * For exclusive stacks we set the zoneid to zero 1191 * since IP uses the global zoneid in the exclusive stacks. 1192 */ 1193 if (ns->netstack_stackid != GLOBAL_NETSTACKID) 1194 zoneid = GLOBAL_ZONEID; 1195 ipst = ns->netstack_ip; 1196 1197 ASSERT(ipaddr->sa_family == AF_INET || ipaddr->sa_family == AF_INET6); 1198 1199 if ((ire = route_to_dst(ipaddr, zoneid, ipst)) != NULL) { 1200 ill = ire_to_ill(ire); 1201 if (ill != NULL) 1202 ifindex = ill->ill_phyint->phyint_ifindex; 1203 ire_refrele(ire); 1204 } 1205 netstack_rele(ns); 1206 return (ifindex); 1207 } 1208 1209 /* 1210 * Routine to find the route to a destination. If a ifindex is supplied 1211 * it tries to match the the route to the corresponding ipif for the ifindex 1212 */ 1213 static ire_t * 1214 route_to_dst(const struct sockaddr *dst_addr, zoneid_t zoneid, ip_stack_t *ipst) 1215 { 1216 ire_t *ire = NULL; 1217 int match_flags; 1218 1219 match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT | 1220 MATCH_IRE_RECURSIVE | MATCH_IRE_RJ_BHOLE); 1221 1222 /* XXX pass NULL tsl for now */ 1223 1224 if (dst_addr->sa_family == AF_INET) { 1225 ire = ire_route_lookup( 1226 ((struct sockaddr_in *)dst_addr)->sin_addr.s_addr, 1227 0, 0, 0, NULL, NULL, zoneid, NULL, match_flags, ipst); 1228 } else { 1229 ire = ire_route_lookup_v6( 1230 &((struct sockaddr_in6 *)dst_addr)->sin6_addr, 1231 0, 0, 0, NULL, NULL, zoneid, NULL, match_flags, ipst); 1232 } 1233 return (ire); 1234 } 1235 1236 /* 1237 * This routine is called by IP Filter to send a packet out on the wire 1238 * to a specified V4 dst (which may be onlink or offlink). The ifindex may or 1239 * may not be 0. A non-null ifindex indicates IP Filter has stipulated 1240 * an outgoing interface and requires the nexthop to be on that interface. 1241 * IP WILL NOT DO the following to the data packet before sending it out: 1242 * a. manipulate ttl 1243 * b. ipsec work 1244 * c. fragmentation 1245 * 1246 * If the packet has been prepared for hardware checksum then it will be 1247 * passed off to ip_send_align_cksum() to check that the flags set on the 1248 * packet are in alignment with the capabilities of the new outgoing NIC. 1249 * 1250 * Return values: 1251 * 0: IP was able to send of the data pkt 1252 * ECOMM: Could not send packet 1253 * ENONET No route to dst. It is up to the caller 1254 * to send icmp unreachable error message, 1255 * EINPROGRESS The macaddr of the onlink dst or that 1256 * of the offlink dst's nexthop needs to get 1257 * resolved before packet can be sent to dst. 1258 * Thus transmission is not guaranteed. 1259 * 1260 */ 1261 1262 int 1263 ipfil_sendpkt(const struct sockaddr *dst_addr, mblk_t *mp, uint_t ifindex, 1264 zoneid_t zoneid) 1265 { 1266 ire_t *ire = NULL, *sire = NULL; 1267 ire_t *ire_cache = NULL; 1268 int value; 1269 int match_flags; 1270 ipaddr_t dst; 1271 netstack_t *ns; 1272 ip_stack_t *ipst; 1273 enum ire_forward_action ret_action; 1274 1275 ASSERT(mp != NULL); 1276 1277 if (zoneid == ALL_ZONES) 1278 ns = netstack_find_by_zoneid(GLOBAL_ZONEID); 1279 else 1280 ns = netstack_find_by_zoneid(zoneid); 1281 ASSERT(ns != NULL); 1282 1283 /* 1284 * For exclusive stacks we set the zoneid to zero 1285 * since IP uses the global zoneid in the exclusive stacks. 1286 */ 1287 if (ns->netstack_stackid != GLOBAL_NETSTACKID) 1288 zoneid = GLOBAL_ZONEID; 1289 ipst = ns->netstack_ip; 1290 1291 ASSERT(dst_addr->sa_family == AF_INET || 1292 dst_addr->sa_family == AF_INET6); 1293 1294 if (dst_addr->sa_family == AF_INET) { 1295 dst = ((struct sockaddr_in *)dst_addr)->sin_addr.s_addr; 1296 } else { 1297 /* 1298 * We dont have support for V6 yet. It will be provided 1299 * once RFE 6399103 has been delivered. 1300 * Until then, for V6 dsts, IP Filter will not call 1301 * this function. Instead the netinfo framework provides 1302 * its own code path, in ip_inject_impl(), to achieve 1303 * what it needs to do, for the time being. 1304 */ 1305 ip1dbg(("ipfil_sendpkt: no V6 support \n")); 1306 value = ECOMM; 1307 freemsg(mp); 1308 goto discard; 1309 } 1310 1311 /* 1312 * Lets get the ire. We might get the ire cache entry, 1313 * or the ire,sire pair needed to create the cache entry. 1314 * XXX pass NULL tsl for now. 1315 */ 1316 1317 if (ifindex == 0) { 1318 /* There is no supplied index. So use the FIB info */ 1319 1320 match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT | 1321 MATCH_IRE_RECURSIVE | MATCH_IRE_RJ_BHOLE); 1322 ire = ire_route_lookup(dst, 1323 0, 0, 0, NULL, &sire, zoneid, MBLK_GETLABEL(mp), 1324 match_flags, ipst); 1325 } else { 1326 ipif_t *supplied_ipif; 1327 ill_t *ill; 1328 1329 match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT | 1330 MATCH_IRE_RECURSIVE| MATCH_IRE_RJ_BHOLE| 1331 MATCH_IRE_SECATTR); 1332 1333 /* 1334 * If supplied ifindex is non-null, the only valid 1335 * nexthop is one off of the interface or group corresponding 1336 * to the specified ifindex. 1337 */ 1338 ill = ill_lookup_on_ifindex(ifindex, B_FALSE, 1339 NULL, NULL, NULL, NULL, ipst); 1340 if (ill != NULL) { 1341 match_flags |= MATCH_IRE_ILL; 1342 } else { 1343 /* Fallback to group names if hook_emulation set */ 1344 if (ipst->ips_ipmp_hook_emulation) { 1345 ill = ill_group_lookup_on_ifindex(ifindex, 1346 B_FALSE, ipst); 1347 } 1348 if (ill == NULL) { 1349 ip1dbg(("ipfil_sendpkt: Could not find" 1350 " route to dst\n")); 1351 value = ECOMM; 1352 freemsg(mp); 1353 goto discard; 1354 } 1355 match_flags |= MATCH_IRE_ILL_GROUP; 1356 } 1357 supplied_ipif = ipif_get_next_ipif(NULL, ill); 1358 1359 ire = ire_route_lookup(dst, 0, 0, 0, supplied_ipif, 1360 &sire, zoneid, MBLK_GETLABEL(mp), match_flags, ipst); 1361 ipif_refrele(supplied_ipif); 1362 ill_refrele(ill); 1363 } 1364 1365 /* 1366 * Verify that the returned IRE is non-null and does 1367 * not have either the RTF_REJECT or RTF_BLACKHOLE 1368 * flags set and that the IRE is either an IRE_CACHE, 1369 * IRE_IF_NORESOLVER or IRE_IF_RESOLVER. 1370 */ 1371 if (ire == NULL || 1372 ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) || 1373 (ire->ire_type & (IRE_CACHE | IRE_INTERFACE)) == 0)) { 1374 /* 1375 * Either ire could not be found or we got 1376 * an invalid one 1377 */ 1378 ip1dbg(("ipfil_sendpkt: Could not find route to dst\n")); 1379 value = ENONET; 1380 freemsg(mp); 1381 goto discard; 1382 } 1383 1384 /* IP Filter and CGTP dont mix. So bail out if CGTP is on */ 1385 if (ipst->ips_ip_cgtp_filter && 1386 ((ire->ire_flags & RTF_MULTIRT) || 1387 ((sire != NULL) && (sire->ire_flags & RTF_MULTIRT)))) { 1388 ip1dbg(("ipfil_sendpkt: IPFilter does not work with CGTP\n")); 1389 value = ECOMM; 1390 freemsg(mp); 1391 goto discard; 1392 } 1393 1394 ASSERT(ire->ire_type != IRE_CACHE || ire->ire_nce != NULL); 1395 1396 /* 1397 * If needed, we will create the ire cache entry for the 1398 * nexthop, resolve its link-layer address and then send 1399 * the packet out without ttl or IPSec processing. 1400 */ 1401 switch (ire->ire_type) { 1402 case IRE_CACHE: 1403 if (sire != NULL) { 1404 UPDATE_OB_PKT_COUNT(sire); 1405 sire->ire_last_used_time = lbolt; 1406 ire_refrele(sire); 1407 } 1408 ire_cache = ire; 1409 break; 1410 case IRE_IF_NORESOLVER: 1411 case IRE_IF_RESOLVER: 1412 /* 1413 * Call ire_forward(). This function 1414 * will, create the ire cache entry of the 1415 * the nexthop and adds this incomplete ire 1416 * to the ire cache table 1417 */ 1418 ire_cache = ire_forward(dst, &ret_action, ire, sire, 1419 MBLK_GETLABEL(mp), ipst); 1420 if (ire_cache == NULL) { 1421 ip1dbg(("ipfil_sendpkt: failed to create the" 1422 " ire cache entry \n")); 1423 value = ENONET; 1424 freemsg(mp); 1425 sire = NULL; 1426 ire = NULL; 1427 goto discard; 1428 } 1429 break; 1430 } 1431 1432 if (DB_CKSUMFLAGS(mp)) { 1433 if (ip_send_align_hcksum_flags(mp, ire_to_ill(ire_cache))) 1434 goto cleanup; 1435 } 1436 1437 /* 1438 * Now that we have the ire cache entry of the nexthop, call 1439 * ip_xmit_v4() to trigger mac addr resolution 1440 * if necessary and send it once ready. 1441 */ 1442 1443 value = ip_xmit_v4(mp, ire_cache, NULL, B_FALSE); 1444 cleanup: 1445 ire_refrele(ire_cache); 1446 /* 1447 * At this point, the reference for these have already been 1448 * released within ire_forward() and/or ip_xmit_v4(). So we set 1449 * them to NULL to make sure we dont drop the references 1450 * again in case ip_xmit_v4() returns with either SEND_FAILED 1451 * or LLHDR_RESLV_FAILED 1452 */ 1453 sire = NULL; 1454 ire = NULL; 1455 1456 switch (value) { 1457 case SEND_FAILED: 1458 ip1dbg(("ipfil_sendpkt: Send failed\n")); 1459 value = ECOMM; 1460 break; 1461 case LLHDR_RESLV_FAILED: 1462 ip1dbg(("ipfil_sendpkt: Link-layer resolution" 1463 " failed\n")); 1464 value = ECOMM; 1465 break; 1466 case LOOKUP_IN_PROGRESS: 1467 netstack_rele(ns); 1468 return (EINPROGRESS); 1469 case SEND_PASSED: 1470 netstack_rele(ns); 1471 return (0); 1472 } 1473 discard: 1474 if (dst_addr->sa_family == AF_INET) { 1475 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards); 1476 } else { 1477 BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsOutDiscards); 1478 } 1479 if (ire != NULL) 1480 ire_refrele(ire); 1481 if (sire != NULL) 1482 ire_refrele(sire); 1483 netstack_rele(ns); 1484 return (value); 1485 } 1486 1487 1488 /* 1489 * We don't check for dohwcksum in here because it should be being used 1490 * elsewhere to control what flags are being set on the mblk. That is, 1491 * if DB_CKSUMFLAGS() is non-zero then we assume dohwcksum to be true 1492 * for this packet. 1493 * 1494 * This function assumes that it is *only* being called for TCP or UDP 1495 * packets and nothing else. 1496 */ 1497 static int 1498 ip_send_align_hcksum_flags(mblk_t *mp, ill_t *ill) 1499 { 1500 int illhckflags; 1501 int mbhckflags; 1502 uint16_t *up; 1503 uint32_t cksum; 1504 ipha_t *ipha; 1505 ip6_t *ip6; 1506 int proto; 1507 int ipversion; 1508 int length; 1509 int start; 1510 ip6_pkt_t ipp; 1511 1512 mbhckflags = DB_CKSUMFLAGS(mp); 1513 ASSERT(mbhckflags != 0); 1514 ASSERT(mp->b_datap->db_type == M_DATA); 1515 /* 1516 * Since this function only knows how to manage the hardware checksum 1517 * issue, reject and packets that have flags set on the aside from 1518 * checksum related attributes as we cannot necessarily safely map 1519 * that packet onto the new NIC. Packets that can be potentially 1520 * dropped here include those marked for LSO. 1521 */ 1522 if ((mbhckflags & 1523 ~(HCK_FULLCKSUM|HCK_PARTIALCKSUM|HCK_IPV4_HDRCKSUM)) != 0) { 1524 DTRACE_PROBE2(pbr__incapable, (mblk_t *), mp, (ill_t *), ill); 1525 freemsg(mp); 1526 return (-1); 1527 } 1528 1529 ipha = (ipha_t *)mp->b_rptr; 1530 1531 /* 1532 * Find out what the new NIC is capable of, if anything, and 1533 * only allow it to be used with M_DATA mblks being sent out. 1534 */ 1535 if (ILL_HCKSUM_CAPABLE(ill)) { 1536 illhckflags = ill->ill_hcksum_capab->ill_hcksum_txflags; 1537 } else { 1538 /* 1539 * No capabilities, so turn off everything. 1540 */ 1541 illhckflags = 0; 1542 (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, 0, 0); 1543 mp->b_datap->db_struioflag &= ~STRUIO_IP; 1544 } 1545 1546 DTRACE_PROBE4(pbr__info__a, (mblk_t *), mp, (ill_t *), ill, 1547 uint32_t, illhckflags, uint32_t, mbhckflags); 1548 /* 1549 * This block of code that looks for the position of the TCP/UDP 1550 * checksum is early in this function because we need to know 1551 * what needs to be blanked out for the hardware checksum case. 1552 * 1553 * That we're in this function implies that the packet is either 1554 * TCP or UDP on Solaris, so checks are made for one protocol and 1555 * if that fails, the other is therefore implied. 1556 */ 1557 ipversion = IPH_HDR_VERSION(ipha); 1558 1559 if (ipversion == IPV4_VERSION) { 1560 proto = ipha->ipha_protocol; 1561 if (proto == IPPROTO_TCP) { 1562 up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 1563 } else { 1564 up = IPH_UDPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 1565 } 1566 } else { 1567 uint8_t lasthdr; 1568 1569 /* 1570 * Nothing I've seen indicates that IPv6 checksum'ing 1571 * precludes the presence of extension headers, so we 1572 * can't just look at the next header value in the IPv6 1573 * packet header to see if it is TCP/UDP. 1574 */ 1575 ip6 = (ip6_t *)ipha; 1576 (void) memset(&ipp, 0, sizeof (ipp)); 1577 start = ip_find_hdr_v6(mp, ip6, &ipp, &lasthdr); 1578 proto = lasthdr; 1579 1580 if (proto == IPPROTO_TCP) { 1581 up = IPH_TCPH_CHECKSUMP(ipha, start); 1582 } else { 1583 up = IPH_UDPH_CHECKSUMP(ipha, start); 1584 } 1585 } 1586 1587 /* 1588 * The first case here is easiest: 1589 * mblk hasn't asked for full checksum, but the card supports it. 1590 * 1591 * In addition, check for IPv4 header capability. Note that only 1592 * the mblk flag is checked and not ipversion. 1593 */ 1594 if ((((illhckflags & HCKSUM_INET_FULL_V4) && (ipversion == 4)) || 1595 (((illhckflags & HCKSUM_INET_FULL_V6) && (ipversion == 6)))) && 1596 ((mbhckflags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)) != 0)) { 1597 int newflags = HCK_FULLCKSUM; 1598 1599 if ((mbhckflags & HCK_IPV4_HDRCKSUM) != 0) { 1600 if ((illhckflags & HCKSUM_IPHDRCKSUM) != 0) { 1601 newflags |= HCK_IPV4_HDRCKSUM; 1602 } else { 1603 /* 1604 * Rather than call a function, just inline 1605 * the computation of the basic IPv4 header. 1606 */ 1607 cksum = (ipha->ipha_dst >> 16) + 1608 (ipha->ipha_dst & 0xFFFF) + 1609 (ipha->ipha_src >> 16) + 1610 (ipha->ipha_src & 0xFFFF); 1611 IP_HDR_CKSUM(ipha, cksum, 1612 ((uint32_t *)ipha)[0], 1613 ((uint16_t *)ipha)[4]); 1614 } 1615 } 1616 1617 *up = 0; 1618 (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, 1619 newflags, 0); 1620 return (0); 1621 } 1622 1623 DTRACE_PROBE2(pbr__info__b, int, ipversion, int, proto); 1624 1625 /* 1626 * Start calculating the pseudo checksum over the IP packet header. 1627 * Although the final pseudo checksum used by TCP/UDP consists of 1628 * more than just the address fields, we can use the result of 1629 * adding those together a little bit further down for IPv4. 1630 */ 1631 if (ipversion == IPV4_VERSION) { 1632 cksum = (ipha->ipha_dst >> 16) + (ipha->ipha_dst & 0xFFFF) + 1633 (ipha->ipha_src >> 16) + (ipha->ipha_src & 0xFFFF); 1634 start = IP_SIMPLE_HDR_LENGTH; 1635 length = ntohs(ipha->ipha_length); 1636 DTRACE_PROBE3(pbr__info__e, uint32_t, ipha->ipha_src, 1637 uint32_t, ipha->ipha_dst, int, cksum); 1638 } else { 1639 uint16_t *pseudo; 1640 1641 pseudo = (uint16_t *)&ip6->ip6_src; 1642 1643 /* calculate pseudo-header checksum */ 1644 cksum = pseudo[0] + pseudo[1] + pseudo[2] + pseudo[3] + 1645 pseudo[4] + pseudo[5] + pseudo[6] + pseudo[7] + 1646 pseudo[8] + pseudo[9] + pseudo[10] + pseudo[11] + 1647 pseudo[12] + pseudo[13] + pseudo[14] + pseudo[15]; 1648 1649 length = ntohs(ip6->ip6_plen) + sizeof (ip6_t); 1650 } 1651 1652 /* Fold the initial sum */ 1653 cksum = (cksum & 0xffff) + (cksum >> 16); 1654 1655 /* 1656 * If the packet was asking for an IPv4 header checksum to be 1657 * calculated but the interface doesn't support that, fill it in 1658 * using our pseudo checksum as a starting point. 1659 */ 1660 if (((mbhckflags & HCK_IPV4_HDRCKSUM) != 0) && 1661 ((illhckflags & HCKSUM_IPHDRCKSUM) == 0)) { 1662 /* 1663 * IP_HDR_CKSUM uses the 2rd arg to the macro in a destructive 1664 * way so pass in a copy of the checksum calculated thus far. 1665 */ 1666 uint32_t ipsum = cksum; 1667 1668 DB_CKSUMFLAGS(mp) &= ~HCK_IPV4_HDRCKSUM; 1669 1670 IP_HDR_CKSUM(ipha, ipsum, ((uint32_t *)ipha)[0], 1671 ((uint16_t *)ipha)[4]); 1672 } 1673 1674 DTRACE_PROBE3(pbr__info__c, int, start, int, length, int, cksum); 1675 1676 if (proto == IPPROTO_TCP) { 1677 cksum += IP_TCP_CSUM_COMP; 1678 } else { 1679 cksum += IP_UDP_CSUM_COMP; 1680 } 1681 cksum += htons(length - start); 1682 cksum = (cksum & 0xffff) + (cksum >> 16); 1683 1684 /* 1685 * For TCP/UDP, we either want to setup the packet for partial 1686 * checksum or we want to do it all ourselves because the NIC 1687 * offers no support for either partial or full checksum. 1688 */ 1689 if ((illhckflags & HCKSUM_INET_PARTIAL) != 0) { 1690 /* 1691 * The only case we care about here is if the mblk was 1692 * previously set for full checksum offload. If it was 1693 * marked for partial (and the NIC does partial), then 1694 * we have nothing to do. Similarly if the packet was 1695 * not set for partial or full, we do nothing as this 1696 * is cheaper than more work to set something up. 1697 */ 1698 if ((mbhckflags & HCK_FULLCKSUM) != 0) { 1699 uint32_t offset; 1700 1701 if (proto == IPPROTO_TCP) { 1702 offset = TCP_CHECKSUM_OFFSET; 1703 } else { 1704 offset = UDP_CHECKSUM_OFFSET; 1705 } 1706 *up = cksum; 1707 1708 DTRACE_PROBE3(pbr__info__f, int, length - start, int, 1709 cksum, int, offset); 1710 1711 (void) hcksum_assoc(mp, NULL, NULL, start, 1712 start + offset, length, 0, 1713 DB_CKSUMFLAGS(mp) | HCK_PARTIALCKSUM, 0); 1714 } 1715 1716 } else if (mbhckflags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)) { 1717 DB_CKSUMFLAGS(mp) &= ~(HCK_PARTIALCKSUM|HCK_FULLCKSUM); 1718 1719 *up = 0; 1720 *up = IP_CSUM(mp, start, cksum); 1721 } 1722 1723 DTRACE_PROBE4(pbr__info__d, (mblk_t *), mp, (ipha_t *), ipha, 1724 (uint16_t *), up, int, cksum); 1725 return (0); 1726 } 1727 1728 1729 /* ire_walk routine invoked for ip_ire_report for each IRE. */ 1730 void 1731 ire_report_ftable(ire_t *ire, char *m) 1732 { 1733 char buf1[16]; 1734 char buf2[16]; 1735 char buf3[16]; 1736 char buf4[16]; 1737 uint_t fo_pkt_count; 1738 uint_t ib_pkt_count; 1739 int ref; 1740 uint_t print_len, buf_len; 1741 mblk_t *mp = (mblk_t *)m; 1742 1743 if (ire->ire_type & IRE_CACHETABLE) 1744 return; 1745 buf_len = mp->b_datap->db_lim - mp->b_wptr; 1746 if (buf_len <= 0) 1747 return; 1748 1749 /* Number of active references of this ire */ 1750 ref = ire->ire_refcnt; 1751 /* "inbound" to a non local address is a forward */ 1752 ib_pkt_count = ire->ire_ib_pkt_count; 1753 fo_pkt_count = 0; 1754 if (!(ire->ire_type & (IRE_LOCAL|IRE_BROADCAST))) { 1755 fo_pkt_count = ib_pkt_count; 1756 ib_pkt_count = 0; 1757 } 1758 print_len = snprintf((char *)mp->b_wptr, buf_len, 1759 MI_COL_PTRFMT_STR MI_COL_PTRFMT_STR MI_COL_PTRFMT_STR "%5d " 1760 "%s %s %s %s %05d %05ld %06ld %08d %03d %06d %09d %09d %06d %08d " 1761 "%04d %08d %08d %d/%d/%d %s\n", 1762 (void *)ire, (void *)ire->ire_rfq, (void *)ire->ire_stq, 1763 (int)ire->ire_zoneid, 1764 ip_dot_addr(ire->ire_addr, buf1), ip_dot_addr(ire->ire_mask, buf2), 1765 ip_dot_addr(ire->ire_src_addr, buf3), 1766 ip_dot_addr(ire->ire_gateway_addr, buf4), 1767 ire->ire_max_frag, ire->ire_uinfo.iulp_rtt, 1768 ire->ire_uinfo.iulp_rtt_sd, 1769 ire->ire_uinfo.iulp_ssthresh, ref, 1770 ire->ire_uinfo.iulp_rtomax, 1771 (ire->ire_uinfo.iulp_tstamp_ok ? 1: 0), 1772 (ire->ire_uinfo.iulp_wscale_ok ? 1: 0), 1773 (ire->ire_uinfo.iulp_ecn_ok ? 1: 0), 1774 (ire->ire_uinfo.iulp_pmtud_ok ? 1: 0), 1775 ire->ire_uinfo.iulp_sack, 1776 ire->ire_uinfo.iulp_spipe, ire->ire_uinfo.iulp_rpipe, 1777 ib_pkt_count, ire->ire_ob_pkt_count, fo_pkt_count, 1778 ip_nv_lookup(ire_nv_tbl, (int)ire->ire_type)); 1779 if (print_len < buf_len) { 1780 mp->b_wptr += print_len; 1781 } else { 1782 mp->b_wptr += buf_len; 1783 } 1784 } 1785 1786 /* 1787 * callback function provided by ire_ftable_lookup when calling 1788 * rn_match_args(). Invoke ire_match_args on each matching leaf node in 1789 * the radix tree. 1790 */ 1791 boolean_t 1792 ire_find_best_route(struct radix_node *rn, void *arg) 1793 { 1794 struct rt_entry *rt = (struct rt_entry *)rn; 1795 irb_t *irb_ptr; 1796 ire_t *ire; 1797 ire_ftable_args_t *margs = arg; 1798 ipaddr_t match_mask; 1799 1800 ASSERT(rt != NULL); 1801 1802 irb_ptr = &rt->rt_irb; 1803 1804 if (irb_ptr->irb_ire_cnt == 0) 1805 return (B_FALSE); 1806 1807 rw_enter(&irb_ptr->irb_lock, RW_READER); 1808 for (ire = irb_ptr->irb_ire; ire != NULL; ire = ire->ire_next) { 1809 if (ire->ire_marks & IRE_MARK_CONDEMNED) 1810 continue; 1811 if (margs->ift_flags & MATCH_IRE_MASK) 1812 match_mask = margs->ift_mask; 1813 else 1814 match_mask = ire->ire_mask; 1815 1816 if (ire_match_args(ire, margs->ift_addr, match_mask, 1817 margs->ift_gateway, margs->ift_type, margs->ift_ipif, 1818 margs->ift_zoneid, margs->ift_ihandle, margs->ift_tsl, 1819 margs->ift_flags)) { 1820 IRE_REFHOLD(ire); 1821 rw_exit(&irb_ptr->irb_lock); 1822 margs->ift_best_ire = ire; 1823 return (B_TRUE); 1824 } 1825 } 1826 rw_exit(&irb_ptr->irb_lock); 1827 return (B_FALSE); 1828 } 1829 1830 /* 1831 * ftable irb_t structures are dynamically allocated, and we need to 1832 * check if the irb_t (and associated ftable tree attachment) needs to 1833 * be cleaned up when the irb_refcnt goes to 0. The conditions that need 1834 * be verified are: 1835 * - no other walkers of the irebucket, i.e., quiescent irb_refcnt, 1836 * - no other threads holding references to ire's in the bucket, 1837 * i.e., irb_nire == 0 1838 * - no active ire's in the bucket, i.e., irb_ire_cnt == 0 1839 * - need to hold the global tree lock and irb_lock in write mode. 1840 */ 1841 void 1842 irb_refrele_ftable(irb_t *irb) 1843 { 1844 for (;;) { 1845 rw_enter(&irb->irb_lock, RW_WRITER); 1846 ASSERT(irb->irb_refcnt != 0); 1847 if (irb->irb_refcnt != 1) { 1848 /* 1849 * Someone has a reference to this radix node 1850 * or there is some bucket walker. 1851 */ 1852 irb->irb_refcnt--; 1853 rw_exit(&irb->irb_lock); 1854 return; 1855 } else { 1856 /* 1857 * There is no other walker, nor is there any 1858 * other thread that holds a direct ref to this 1859 * radix node. Do the clean up if needed. Call 1860 * to ire_unlink will clear the IRB_MARK_CONDEMNED flag 1861 */ 1862 if (irb->irb_marks & IRB_MARK_CONDEMNED) { 1863 ire_t *ire_list; 1864 1865 ire_list = ire_unlink(irb); 1866 rw_exit(&irb->irb_lock); 1867 1868 if (ire_list != NULL) 1869 ire_cleanup(ire_list); 1870 /* 1871 * more CONDEMNED entries could have 1872 * been added while we dropped the lock, 1873 * so we have to re-check. 1874 */ 1875 continue; 1876 } 1877 1878 /* 1879 * Now check if there are still any ires 1880 * associated with this radix node. 1881 */ 1882 if (irb->irb_nire != 0) { 1883 /* 1884 * someone is still holding on 1885 * to ires in this bucket 1886 */ 1887 irb->irb_refcnt--; 1888 rw_exit(&irb->irb_lock); 1889 return; 1890 } else { 1891 /* 1892 * Everything is clear. Zero walkers, 1893 * Zero threads with a ref to this 1894 * radix node, Zero ires associated with 1895 * this radix node. Due to lock order, 1896 * check the above conditions again 1897 * after grabbing all locks in the right order 1898 */ 1899 rw_exit(&irb->irb_lock); 1900 if (irb_inactive(irb)) 1901 return; 1902 /* 1903 * irb_inactive could not free the irb. 1904 * See if there are any walkers, if not 1905 * try to clean up again. 1906 */ 1907 } 1908 } 1909 } 1910 } 1911 1912 /* 1913 * IRE iterator used by ire_ftable_lookup() to process multiple default 1914 * routes. Given a starting point in the hash list (ire_origin), walk the IREs 1915 * in the bucket skipping default interface routes and deleted entries. 1916 * Returns the next IRE (unheld), or NULL when we're back to the starting point. 1917 * Assumes that the caller holds a reference on the IRE bucket. 1918 * 1919 * In the absence of good IRE_DEFAULT routes, this function will return 1920 * the first IRE_INTERFACE route found (if any). 1921 */ 1922 ire_t * 1923 ire_round_robin(irb_t *irb_ptr, zoneid_t zoneid, ire_ftable_args_t *margs, 1924 ip_stack_t *ipst) 1925 { 1926 ire_t *ire_origin; 1927 ire_t *ire, *maybe_ire = NULL; 1928 1929 rw_enter(&irb_ptr->irb_lock, RW_WRITER); 1930 ire_origin = irb_ptr->irb_rr_origin; 1931 if (ire_origin != NULL) { 1932 ire_origin = ire_origin->ire_next; 1933 IRE_FIND_NEXT_ORIGIN(ire_origin); 1934 } 1935 1936 if (ire_origin == NULL) { 1937 /* 1938 * first time through routine, or we dropped off the end 1939 * of list. 1940 */ 1941 ire_origin = irb_ptr->irb_ire; 1942 IRE_FIND_NEXT_ORIGIN(ire_origin); 1943 } 1944 irb_ptr->irb_rr_origin = ire_origin; 1945 IRB_REFHOLD_LOCKED(irb_ptr); 1946 rw_exit(&irb_ptr->irb_lock); 1947 1948 DTRACE_PROBE2(ire__rr__origin, (irb_t *), irb_ptr, 1949 (ire_t *), ire_origin); 1950 1951 /* 1952 * Round-robin the routers list looking for a route that 1953 * matches the passed in parameters. 1954 * We start with the ire we found above and we walk the hash 1955 * list until we're back where we started. It doesn't matter if 1956 * routes are added or deleted by other threads - we know this 1957 * ire will stay in the list because we hold a reference on the 1958 * ire bucket. 1959 */ 1960 ire = ire_origin; 1961 while (ire != NULL) { 1962 int match_flags = MATCH_IRE_TYPE | MATCH_IRE_SECATTR; 1963 ire_t *rire; 1964 1965 if (ire->ire_marks & IRE_MARK_CONDEMNED) 1966 goto next_ire; 1967 1968 if (!ire_match_args(ire, margs->ift_addr, (ipaddr_t)0, 1969 margs->ift_gateway, margs->ift_type, margs->ift_ipif, 1970 margs->ift_zoneid, margs->ift_ihandle, margs->ift_tsl, 1971 margs->ift_flags)) 1972 goto next_ire; 1973 1974 if (ire->ire_type & IRE_INTERFACE) { 1975 /* 1976 * keep looking to see if there is a non-interface 1977 * default ire, but save this one as a last resort. 1978 */ 1979 if (maybe_ire == NULL) 1980 maybe_ire = ire; 1981 goto next_ire; 1982 } 1983 1984 if (zoneid == ALL_ZONES) { 1985 IRE_REFHOLD(ire); 1986 IRB_REFRELE(irb_ptr); 1987 return (ire); 1988 } 1989 /* 1990 * When we're in a non-global zone, we're only 1991 * interested in routers that are 1992 * reachable through ipifs within our zone. 1993 */ 1994 if (ire->ire_ipif != NULL) { 1995 match_flags |= MATCH_IRE_ILL_GROUP; 1996 } 1997 rire = ire_route_lookup(ire->ire_gateway_addr, 0, 0, 1998 IRE_INTERFACE, ire->ire_ipif, NULL, zoneid, margs->ift_tsl, 1999 match_flags, ipst); 2000 if (rire != NULL) { 2001 ire_refrele(rire); 2002 IRE_REFHOLD(ire); 2003 IRB_REFRELE(irb_ptr); 2004 return (ire); 2005 } 2006 next_ire: 2007 ire = (ire->ire_next ? ire->ire_next : irb_ptr->irb_ire); 2008 if (ire == ire_origin) 2009 break; 2010 } 2011 if (maybe_ire != NULL) 2012 IRE_REFHOLD(maybe_ire); 2013 IRB_REFRELE(irb_ptr); 2014 return (maybe_ire); 2015 } 2016 2017 void 2018 irb_refhold_rn(struct radix_node *rn) 2019 { 2020 if ((rn->rn_flags & RNF_ROOT) == 0) 2021 IRB_REFHOLD(&((rt_t *)(rn))->rt_irb); 2022 } 2023 2024 void 2025 irb_refrele_rn(struct radix_node *rn) 2026 { 2027 if ((rn->rn_flags & RNF_ROOT) == 0) 2028 irb_refrele_ftable(&((rt_t *)(rn))->rt_irb); 2029 } 2030