1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file contains consumer routines of the IPv4 forwarding engine 28 */ 29 30 #include <sys/types.h> 31 #include <sys/stream.h> 32 #include <sys/stropts.h> 33 #include <sys/strlog.h> 34 #include <sys/dlpi.h> 35 #include <sys/ddi.h> 36 #include <sys/cmn_err.h> 37 #include <sys/policy.h> 38 39 #include <sys/systm.h> 40 #include <sys/strsun.h> 41 #include <sys/kmem.h> 42 #include <sys/param.h> 43 #include <sys/socket.h> 44 #include <sys/strsubr.h> 45 #include <sys/pattr.h> 46 #include <net/if.h> 47 #include <net/route.h> 48 #include <netinet/in.h> 49 #include <net/if_dl.h> 50 #include <netinet/ip6.h> 51 #include <netinet/icmp6.h> 52 53 #include <inet/common.h> 54 #include <inet/mi.h> 55 #include <inet/mib2.h> 56 #include <inet/ip.h> 57 #include <inet/ip_impl.h> 58 #include <inet/ip6.h> 59 #include <inet/ip_ndp.h> 60 #include <inet/arp.h> 61 #include <inet/ip_if.h> 62 #include <inet/ip_ire.h> 63 #include <inet/ip_ftable.h> 64 #include <inet/ip_rts.h> 65 #include <inet/nd.h> 66 67 #include <net/pfkeyv2.h> 68 #include <inet/ipsec_info.h> 69 #include <inet/sadb.h> 70 #include <inet/tcp.h> 71 #include <inet/ipclassifier.h> 72 #include <sys/zone.h> 73 #include <net/radix.h> 74 #include <sys/tsol/label.h> 75 #include <sys/tsol/tnet.h> 76 77 #define IS_DEFAULT_ROUTE(ire) \ 78 (((ire)->ire_type & IRE_DEFAULT) || \ 79 (((ire)->ire_type & IRE_INTERFACE) && ((ire)->ire_addr == 0))) 80 81 /* 82 * structure for passing args between ire_ftable_lookup and ire_find_best_route 83 */ 84 typedef struct ire_ftable_args_s { 85 ipaddr_t ift_addr; 86 ipaddr_t ift_mask; 87 ipaddr_t ift_gateway; 88 int ift_type; 89 const ipif_t *ift_ipif; 90 zoneid_t ift_zoneid; 91 uint32_t ift_ihandle; 92 const ts_label_t *ift_tsl; 93 int ift_flags; 94 ire_t *ift_best_ire; 95 } ire_ftable_args_t; 96 97 static ire_t *route_to_dst(const struct sockaddr *, zoneid_t, ip_stack_t *); 98 static ire_t *ire_round_robin(irb_t *, zoneid_t, ire_ftable_args_t *, 99 ip_stack_t *); 100 static void ire_del_host_redir(ire_t *, char *); 101 static boolean_t ire_find_best_route(struct radix_node *, void *); 102 static int ip_send_align_hcksum_flags(mblk_t *, ill_t *); 103 static ire_t *ire_ftable_lookup_simple(ipaddr_t, 104 ire_t **, zoneid_t, int, ip_stack_t *); 105 106 /* 107 * Lookup a route in forwarding table. A specific lookup is indicated by 108 * passing the required parameters and indicating the match required in the 109 * flag field. 110 * 111 * Looking for default route can be done in three ways 112 * 1) pass mask as 0 and set MATCH_IRE_MASK in flags field 113 * along with other matches. 114 * 2) pass type as IRE_DEFAULT and set MATCH_IRE_TYPE in flags 115 * field along with other matches. 116 * 3) if the destination and mask are passed as zeros. 117 * 118 * A request to return a default route if no route 119 * is found, can be specified by setting MATCH_IRE_DEFAULT 120 * in flags. 121 * 122 * It does not support recursion more than one level. It 123 * will do recursive lookup only when the lookup maps to 124 * a prefix or default route and MATCH_IRE_RECURSIVE flag is passed. 125 * 126 * If the routing table is setup to allow more than one level 127 * of recursion, the cleaning up cache table will not work resulting 128 * in invalid routing. 129 * 130 * Supports IP_BOUND_IF by following the ipif/ill when recursing. 131 * 132 * NOTE : When this function returns NULL, pire has already been released. 133 * pire is valid only when this function successfully returns an 134 * ire. 135 */ 136 ire_t * 137 ire_ftable_lookup(ipaddr_t addr, ipaddr_t mask, ipaddr_t gateway, 138 int type, const ipif_t *ipif, ire_t **pire, zoneid_t zoneid, 139 uint32_t ihandle, const ts_label_t *tsl, int flags, ip_stack_t *ipst) 140 { 141 ire_t *ire = NULL; 142 ipaddr_t gw_addr; 143 struct rt_sockaddr rdst, rmask; 144 struct rt_entry *rt; 145 ire_ftable_args_t margs; 146 boolean_t found_incomplete = B_FALSE; 147 148 ASSERT(ipif == NULL || !ipif->ipif_isv6); 149 150 /* 151 * When we return NULL from this function, we should make 152 * sure that *pire is NULL so that the callers will not 153 * wrongly REFRELE the pire. 154 */ 155 if (pire != NULL) 156 *pire = NULL; 157 /* 158 * ire_match_args() will dereference ipif MATCH_IRE_SRC or 159 * MATCH_IRE_ILL is set. 160 */ 161 if ((flags & (MATCH_IRE_SRC | MATCH_IRE_ILL)) && (ipif == NULL)) 162 return (NULL); 163 164 (void) memset(&rdst, 0, sizeof (rdst)); 165 rdst.rt_sin_len = sizeof (rdst); 166 rdst.rt_sin_family = AF_INET; 167 rdst.rt_sin_addr.s_addr = addr; 168 169 (void) memset(&rmask, 0, sizeof (rmask)); 170 rmask.rt_sin_len = sizeof (rmask); 171 rmask.rt_sin_family = AF_INET; 172 rmask.rt_sin_addr.s_addr = mask; 173 174 (void) memset(&margs, 0, sizeof (margs)); 175 margs.ift_addr = addr; 176 margs.ift_mask = mask; 177 margs.ift_gateway = gateway; 178 margs.ift_type = type; 179 margs.ift_ipif = ipif; 180 margs.ift_zoneid = zoneid; 181 margs.ift_ihandle = ihandle; 182 margs.ift_tsl = tsl; 183 margs.ift_flags = flags; 184 185 /* 186 * The flags argument passed to ire_ftable_lookup may cause the 187 * search to return, not the longest matching prefix, but the 188 * "best matching prefix", i.e., the longest prefix that also 189 * satisfies constraints imposed via the permutation of flags 190 * passed in. To achieve this, we invoke ire_match_args() on 191 * each matching leaf in the radix tree. ire_match_args is 192 * invoked by the callback function ire_find_best_route() 193 * We hold the global tree lock in read mode when calling 194 * rn_match_args.Before dropping the global tree lock, ensure 195 * that the radix node can't be deleted by incrementing ire_refcnt. 196 */ 197 RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable); 198 rt = (struct rt_entry *)ipst->ips_ip_ftable->rnh_matchaddr_args(&rdst, 199 ipst->ips_ip_ftable, ire_find_best_route, &margs); 200 ire = margs.ift_best_ire; 201 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 202 203 if (rt == NULL) { 204 return (NULL); 205 } else { 206 ASSERT(ire != NULL); 207 } 208 209 DTRACE_PROBE2(ire__found, ire_ftable_args_t *, &margs, ire_t *, ire); 210 211 if (!IS_DEFAULT_ROUTE(ire)) 212 goto found_ire_held; 213 /* 214 * If default route is found, see if default matching criteria 215 * are satisfied. 216 */ 217 if (flags & MATCH_IRE_MASK) { 218 /* 219 * we were asked to match a 0 mask, and came back with 220 * a default route. Ok to return it. 221 */ 222 goto found_default_ire; 223 } 224 if ((flags & MATCH_IRE_TYPE) && 225 (type & (IRE_DEFAULT | IRE_INTERFACE))) { 226 /* 227 * we were asked to match a default ire type. Ok to return it. 228 */ 229 goto found_default_ire; 230 } 231 if (flags & MATCH_IRE_DEFAULT) { 232 goto found_default_ire; 233 } 234 /* 235 * we found a default route, but default matching criteria 236 * are not specified and we are not explicitly looking for 237 * default. 238 */ 239 IRE_REFRELE(ire); 240 return (NULL); 241 found_default_ire: 242 /* 243 * round-robin only if we have more than one route in the bucket. 244 */ 245 if ((ire->ire_bucket->irb_ire_cnt > 1) && 246 IS_DEFAULT_ROUTE(ire) && 247 ((flags & (MATCH_IRE_DEFAULT | MATCH_IRE_MASK)) == 248 MATCH_IRE_DEFAULT)) { 249 ire_t *next_ire; 250 251 next_ire = ire_round_robin(ire->ire_bucket, zoneid, &margs, 252 ipst); 253 IRE_REFRELE(ire); 254 if (next_ire != NULL) { 255 ire = next_ire; 256 } else { 257 /* no route */ 258 return (NULL); 259 } 260 } 261 found_ire_held: 262 if ((flags & MATCH_IRE_RJ_BHOLE) && 263 (ire->ire_flags & (RTF_BLACKHOLE | RTF_REJECT))) { 264 return (ire); 265 } 266 /* 267 * At this point, IRE that was found must be an IRE_FORWARDTABLE 268 * type. If this is a recursive lookup and an IRE_INTERFACE type was 269 * found, return that. If it was some other IRE_FORWARDTABLE type of 270 * IRE (one of the prefix types), then it is necessary to fill in the 271 * parent IRE pointed to by pire, and then lookup the gateway address of 272 * the parent. For backwards compatiblity, if this lookup returns an 273 * IRE other than a IRE_CACHETABLE or IRE_INTERFACE, then one more level 274 * of lookup is done. 275 */ 276 if (flags & MATCH_IRE_RECURSIVE) { 277 ipif_t *gw_ipif; 278 int match_flags = MATCH_IRE_DSTONLY; 279 ire_t *save_ire; 280 281 if (ire->ire_type & IRE_INTERFACE) 282 return (ire); 283 if (pire != NULL) 284 *pire = ire; 285 /* 286 * If we can't find an IRE_INTERFACE or the caller has not 287 * asked for pire, we need to REFRELE the save_ire. 288 */ 289 save_ire = ire; 290 291 if (ire->ire_ipif != NULL) 292 match_flags |= MATCH_IRE_ILL; 293 294 /* 295 * ire_ftable_lookup may end up with an incomplete IRE_CACHE 296 * entry for the gateway (i.e., one for which the 297 * ire_nce->nce_state is not yet ND_REACHABLE). If the caller 298 * has specified MATCH_IRE_COMPLETE, such entries will not 299 * be returned; instead, we return the IF_RESOLVER ire. 300 */ 301 ire = ire_route_lookup(ire->ire_gateway_addr, 0, 0, 0, 302 ire->ire_ipif, NULL, zoneid, tsl, match_flags, ipst); 303 DTRACE_PROBE2(ftable__route__lookup1, (ire_t *), ire, 304 (ire_t *), save_ire); 305 if (ire == NULL || 306 ((ire->ire_type & IRE_CACHE) && ire->ire_nce && 307 ire->ire_nce->nce_state != ND_REACHABLE && 308 (flags & MATCH_IRE_COMPLETE))) { 309 /* 310 * Do not release the parent ire if MATCH_IRE_PARENT 311 * is set. Also return it via ire. 312 */ 313 if (ire != NULL) { 314 ire_refrele(ire); 315 ire = NULL; 316 found_incomplete = B_TRUE; 317 } 318 if (flags & MATCH_IRE_PARENT) { 319 if (pire != NULL) { 320 /* 321 * Need an extra REFHOLD, if the parent 322 * ire is returned via both ire and 323 * pire. 324 */ 325 IRE_REFHOLD(save_ire); 326 } 327 ire = save_ire; 328 } else { 329 ire_refrele(save_ire); 330 if (pire != NULL) 331 *pire = NULL; 332 } 333 if (!found_incomplete) 334 return (ire); 335 } 336 if (ire->ire_type & (IRE_CACHETABLE | IRE_INTERFACE)) { 337 /* 338 * If the caller did not ask for pire, release 339 * it now. 340 */ 341 if (pire == NULL) { 342 ire_refrele(save_ire); 343 } 344 return (ire); 345 } 346 match_flags |= MATCH_IRE_TYPE; 347 gw_addr = ire->ire_gateway_addr; 348 gw_ipif = ire->ire_ipif; 349 ire_refrele(ire); 350 ire = ire_route_lookup(gw_addr, 0, 0, 351 (found_incomplete? IRE_INTERFACE : 352 (IRE_CACHETABLE | IRE_INTERFACE)), 353 gw_ipif, NULL, zoneid, tsl, match_flags, ipst); 354 DTRACE_PROBE2(ftable__route__lookup2, (ire_t *), ire, 355 (ire_t *), save_ire); 356 if (ire == NULL || 357 ((ire->ire_type & IRE_CACHE) && ire->ire_nce && 358 ire->ire_nce->nce_state != ND_REACHABLE && 359 (flags & MATCH_IRE_COMPLETE))) { 360 /* 361 * Do not release the parent ire if MATCH_IRE_PARENT 362 * is set. Also return it via ire. 363 */ 364 if (ire != NULL) { 365 ire_refrele(ire); 366 ire = NULL; 367 } 368 if (flags & MATCH_IRE_PARENT) { 369 if (pire != NULL) { 370 /* 371 * Need an extra REFHOLD, if the 372 * parent ire is returned via both 373 * ire and pire. 374 */ 375 IRE_REFHOLD(save_ire); 376 } 377 ire = save_ire; 378 } else { 379 ire_refrele(save_ire); 380 if (pire != NULL) 381 *pire = NULL; 382 } 383 return (ire); 384 } else if (pire == NULL) { 385 /* 386 * If the caller did not ask for pire, release 387 * it now. 388 */ 389 ire_refrele(save_ire); 390 } 391 return (ire); 392 } 393 ASSERT(pire == NULL || *pire == NULL); 394 return (ire); 395 } 396 397 /* 398 * This function is called by 399 * ip_fast_forward->ire_forward_simple 400 * The optimizations of this function over ire_ftable_lookup are: 401 * o removing unnecessary flag matching 402 * o doing longest prefix match instead of overloading it further 403 * with the unnecessary "best_prefix_match" 404 * o Does not do round robin of default route for every packet 405 * o inlines code of ire_ctable_lookup to look for nexthop cache 406 * entry before calling ire_route_lookup 407 */ 408 static ire_t * 409 ire_ftable_lookup_simple(ipaddr_t addr, 410 ire_t **pire, zoneid_t zoneid, int flags, 411 ip_stack_t *ipst) 412 { 413 ire_t *ire = NULL; 414 ire_t *tmp_ire = NULL; 415 struct rt_sockaddr rdst; 416 struct rt_entry *rt; 417 irb_t *irb_ptr; 418 ire_t *save_ire; 419 int match_flags; 420 421 rdst.rt_sin_len = sizeof (rdst); 422 rdst.rt_sin_family = AF_INET; 423 rdst.rt_sin_addr.s_addr = addr; 424 425 /* 426 * This is basically inlining a simpler version of ire_match_args 427 */ 428 RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable); 429 430 rt = (struct rt_entry *)ipst->ips_ip_ftable->rnh_matchaddr_args(&rdst, 431 ipst->ips_ip_ftable, NULL, NULL); 432 433 if (rt == NULL) { 434 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 435 return (NULL); 436 } 437 irb_ptr = &rt->rt_irb; 438 if (irb_ptr == NULL || irb_ptr->irb_ire_cnt == 0) { 439 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 440 return (NULL); 441 } 442 443 rw_enter(&irb_ptr->irb_lock, RW_READER); 444 for (ire = irb_ptr->irb_ire; ire != NULL; ire = ire->ire_next) { 445 if (ire->ire_zoneid == zoneid) 446 break; 447 } 448 449 if (ire == NULL || (ire->ire_marks & IRE_MARK_CONDEMNED)) { 450 rw_exit(&irb_ptr->irb_lock); 451 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 452 return (NULL); 453 } 454 /* we have a ire that matches */ 455 if (ire != NULL) 456 IRE_REFHOLD(ire); 457 rw_exit(&irb_ptr->irb_lock); 458 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 459 460 if ((flags & MATCH_IRE_RJ_BHOLE) && 461 (ire->ire_flags & (RTF_BLACKHOLE | RTF_REJECT))) { 462 return (ire); 463 } 464 /* 465 * At this point, IRE that was found must be an IRE_FORWARDTABLE 466 * type. If this is a recursive lookup and an IRE_INTERFACE type was 467 * found, return that. If it was some other IRE_FORWARDTABLE type of 468 * IRE (one of the prefix types), then it is necessary to fill in the 469 * parent IRE pointed to by pire, and then lookup the gateway address of 470 * the parent. For backwards compatiblity, if this lookup returns an 471 * IRE other than a IRE_CACHETABLE or IRE_INTERFACE, then one more level 472 * of lookup is done. 473 */ 474 match_flags = MATCH_IRE_DSTONLY; 475 476 if (ire->ire_type & IRE_INTERFACE) 477 return (ire); 478 *pire = ire; 479 /* 480 * If we can't find an IRE_INTERFACE or the caller has not 481 * asked for pire, we need to REFRELE the save_ire. 482 */ 483 save_ire = ire; 484 485 /* 486 * Currently MATCH_IRE_ILL is never used with 487 * (MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT) while 488 * sending out packets as MATCH_IRE_ILL is used only 489 * for communicating with on-link hosts. We can't assert 490 * that here as RTM_GET calls this function with 491 * MATCH_IRE_ILL | MATCH_IRE_DEFAULT | MATCH_IRE_RECURSIVE. 492 * We have already used the MATCH_IRE_ILL in determining 493 * the right prefix route at this point. To match the 494 * behavior of how we locate routes while sending out 495 * packets, we don't want to use MATCH_IRE_ILL below 496 * while locating the interface route. 497 * 498 * ire_ftable_lookup may end up with an incomplete IRE_CACHE 499 * entry for the gateway (i.e., one for which the 500 * ire_nce->nce_state is not yet ND_REACHABLE). If the caller 501 * has specified MATCH_IRE_COMPLETE, such entries will not 502 * be returned; instead, we return the IF_RESOLVER ire. 503 */ 504 505 if (ire->ire_ipif == NULL) { 506 tmp_ire = ire; 507 /* 508 * Look to see if the nexthop entry is in the cachetable 509 */ 510 ire = ire_cache_lookup(ire->ire_gateway_addr, zoneid, NULL, 511 ipst); 512 if (ire == NULL) { 513 /* Try ire_route_lookup */ 514 ire = tmp_ire; 515 } else { 516 goto solved; 517 } 518 } 519 if (ire->ire_ipif != NULL) 520 match_flags |= MATCH_IRE_ILL; 521 522 ire = ire_route_lookup(ire->ire_gateway_addr, 0, 523 0, 0, ire->ire_ipif, NULL, zoneid, NULL, match_flags, ipst); 524 solved: 525 DTRACE_PROBE2(ftable__route__lookup1, (ire_t *), ire, 526 (ire_t *), save_ire); 527 if (ire == NULL) { 528 /* 529 * Do not release the parent ire if MATCH_IRE_PARENT 530 * is set. Also return it via ire. 531 */ 532 ire_refrele(save_ire); 533 *pire = NULL; 534 return (ire); 535 } 536 if (ire->ire_type & (IRE_CACHETABLE | IRE_INTERFACE)) { 537 /* 538 * If the caller did not ask for pire, release 539 * it now. 540 */ 541 if (pire == NULL) { 542 ire_refrele(save_ire); 543 } 544 } 545 return (ire); 546 } 547 548 /* 549 * Find an IRE_OFFSUBNET IRE entry for the multicast address 'group' 550 * that goes through 'ipif'. As a fallback, a route that goes through 551 * ipif->ipif_ill can be returned. 552 */ 553 ire_t * 554 ipif_lookup_multi_ire(ipif_t *ipif, ipaddr_t group) 555 { 556 ire_t *ire; 557 ire_t *save_ire = NULL; 558 ire_t *gw_ire; 559 irb_t *irb; 560 ipaddr_t gw_addr; 561 int match_flags = MATCH_IRE_TYPE | MATCH_IRE_ILL; 562 ip_stack_t *ipst = ipif->ipif_ill->ill_ipst; 563 564 ASSERT(CLASSD(group)); 565 566 ire = ire_ftable_lookup(group, 0, 0, 0, NULL, NULL, ALL_ZONES, 0, 567 NULL, MATCH_IRE_DEFAULT, ipst); 568 569 if (ire == NULL) 570 return (NULL); 571 572 irb = ire->ire_bucket; 573 ASSERT(irb); 574 575 IRB_REFHOLD(irb); 576 ire_refrele(ire); 577 for (ire = irb->irb_ire; ire != NULL; ire = ire->ire_next) { 578 if (ire->ire_addr != group || 579 ipif->ipif_zoneid != ire->ire_zoneid && 580 ire->ire_zoneid != ALL_ZONES) { 581 continue; 582 } 583 584 switch (ire->ire_type) { 585 case IRE_DEFAULT: 586 case IRE_PREFIX: 587 case IRE_HOST: 588 gw_addr = ire->ire_gateway_addr; 589 gw_ire = ire_ftable_lookup(gw_addr, 0, 0, IRE_INTERFACE, 590 ipif, NULL, ALL_ZONES, 0, NULL, match_flags, ipst); 591 592 if (gw_ire != NULL) { 593 if (save_ire != NULL) { 594 ire_refrele(save_ire); 595 } 596 IRE_REFHOLD(ire); 597 if (gw_ire->ire_ipif == ipif) { 598 ire_refrele(gw_ire); 599 600 IRB_REFRELE(irb); 601 return (ire); 602 } 603 ire_refrele(gw_ire); 604 save_ire = ire; 605 } 606 break; 607 case IRE_IF_NORESOLVER: 608 case IRE_IF_RESOLVER: 609 if (ire->ire_ipif == ipif) { 610 if (save_ire != NULL) { 611 ire_refrele(save_ire); 612 } 613 IRE_REFHOLD(ire); 614 615 IRB_REFRELE(irb); 616 return (ire); 617 } 618 break; 619 } 620 } 621 IRB_REFRELE(irb); 622 623 return (save_ire); 624 } 625 626 /* 627 * Find an IRE_INTERFACE for the multicast group. 628 * Allows different routes for multicast addresses 629 * in the unicast routing table (akin to 224.0.0.0 but could be more specific) 630 * which point at different interfaces. This is used when IP_MULTICAST_IF 631 * isn't specified (when sending) and when IP_ADD_MEMBERSHIP doesn't 632 * specify the interface to join on. 633 * 634 * Supports IP_BOUND_IF by following the ipif/ill when recursing. 635 */ 636 ire_t * 637 ire_lookup_multi(ipaddr_t group, zoneid_t zoneid, ip_stack_t *ipst) 638 { 639 ire_t *ire; 640 ipif_t *ipif = NULL; 641 int match_flags = MATCH_IRE_TYPE; 642 ipaddr_t gw_addr; 643 644 ire = ire_ftable_lookup(group, 0, 0, 0, NULL, NULL, zoneid, 645 0, NULL, MATCH_IRE_DEFAULT, ipst); 646 647 /* We search a resolvable ire in case of multirouting. */ 648 if ((ire != NULL) && (ire->ire_flags & RTF_MULTIRT)) { 649 ire_t *cire = NULL; 650 /* 651 * If the route is not resolvable, the looked up ire 652 * may be changed here. In that case, ire_multirt_lookup() 653 * IRE_REFRELE the original ire and change it. 654 */ 655 (void) ire_multirt_lookup(&cire, &ire, MULTIRT_CACHEGW, 656 NULL, ipst); 657 if (cire != NULL) 658 ire_refrele(cire); 659 } 660 if (ire == NULL) 661 return (NULL); 662 /* 663 * Make sure we follow ire_ipif. 664 * 665 * We need to determine the interface route through 666 * which the gateway will be reached. 667 */ 668 if (ire->ire_ipif != NULL) { 669 ipif = ire->ire_ipif; 670 match_flags |= MATCH_IRE_ILL; 671 } 672 673 switch (ire->ire_type) { 674 case IRE_DEFAULT: 675 case IRE_PREFIX: 676 case IRE_HOST: 677 gw_addr = ire->ire_gateway_addr; 678 ire_refrele(ire); 679 ire = ire_ftable_lookup(gw_addr, 0, 0, 680 IRE_INTERFACE, ipif, NULL, zoneid, 0, 681 NULL, match_flags, ipst); 682 return (ire); 683 case IRE_IF_NORESOLVER: 684 case IRE_IF_RESOLVER: 685 return (ire); 686 default: 687 ire_refrele(ire); 688 return (NULL); 689 } 690 } 691 692 /* 693 * Delete the passed in ire if the gateway addr matches 694 */ 695 void 696 ire_del_host_redir(ire_t *ire, char *gateway) 697 { 698 if ((ire->ire_flags & RTF_DYNAMIC) && 699 (ire->ire_gateway_addr == *(ipaddr_t *)gateway)) 700 ire_delete(ire); 701 } 702 703 /* 704 * Search for all HOST REDIRECT routes that are 705 * pointing at the specified gateway and 706 * delete them. This routine is called only 707 * when a default gateway is going away. 708 */ 709 void 710 ire_delete_host_redirects(ipaddr_t gateway, ip_stack_t *ipst) 711 { 712 struct rtfuncarg rtfarg; 713 714 (void) memset(&rtfarg, 0, sizeof (rtfarg)); 715 rtfarg.rt_func = ire_del_host_redir; 716 rtfarg.rt_arg = (void *)&gateway; 717 (void) ipst->ips_ip_ftable->rnh_walktree_mt(ipst->ips_ip_ftable, 718 rtfunc, &rtfarg, irb_refhold_rn, irb_refrele_rn); 719 } 720 721 struct ihandle_arg { 722 uint32_t ihandle; 723 ire_t *ire; 724 }; 725 726 static int 727 ire_ihandle_onlink_match(struct radix_node *rn, void *arg) 728 { 729 struct rt_entry *rt; 730 irb_t *irb; 731 ire_t *ire; 732 struct ihandle_arg *ih = arg; 733 734 rt = (struct rt_entry *)rn; 735 ASSERT(rt != NULL); 736 irb = &rt->rt_irb; 737 for (ire = irb->irb_ire; ire != NULL; ire = ire->ire_next) { 738 if ((ire->ire_type & IRE_INTERFACE) && 739 (ire->ire_ihandle == ih->ihandle)) { 740 ih->ire = ire; 741 IRE_REFHOLD(ire); 742 return (1); 743 } 744 } 745 return (0); 746 } 747 748 /* 749 * Locate the interface ire that is tied to the cache ire 'cire' via 750 * cire->ire_ihandle. 751 * 752 * We are trying to create the cache ire for an onlink destn. or 753 * gateway in 'cire'. We are called from ire_add_v4() in the IRE_IF_RESOLVER 754 * case, after the ire has come back from ARP. 755 */ 756 ire_t * 757 ire_ihandle_lookup_onlink(ire_t *cire) 758 { 759 ire_t *ire; 760 int match_flags; 761 struct ihandle_arg ih; 762 ip_stack_t *ipst; 763 764 ASSERT(cire != NULL); 765 ipst = cire->ire_ipst; 766 767 /* 768 * We don't need to specify the zoneid to ire_ftable_lookup() below 769 * because the ihandle refers to an ipif which can be in only one zone. 770 */ 771 match_flags = MATCH_IRE_TYPE | MATCH_IRE_IHANDLE | MATCH_IRE_MASK; 772 /* 773 * We know that the mask of the interface ire equals cire->ire_cmask. 774 * (When ip_newroute() created 'cire' for an on-link destn. it set its 775 * cmask from the interface ire's mask) 776 */ 777 ire = ire_ftable_lookup(cire->ire_addr, cire->ire_cmask, 0, 778 IRE_INTERFACE, NULL, NULL, ALL_ZONES, cire->ire_ihandle, 779 NULL, match_flags, ipst); 780 if (ire != NULL) 781 return (ire); 782 /* 783 * If we didn't find an interface ire above, we can't declare failure. 784 * For backwards compatibility, we need to support prefix routes 785 * pointing to next hop gateways that are not on-link. 786 * 787 * In the resolver/noresolver case, ip_newroute() thinks it is creating 788 * the cache ire for an onlink destination in 'cire'. But 'cire' is 789 * not actually onlink, because ire_ftable_lookup() cheated it, by 790 * doing ire_route_lookup() twice and returning an interface ire. 791 * 792 * Eg. default - gw1 (line 1) 793 * gw1 - gw2 (line 2) 794 * gw2 - hme0 (line 3) 795 * 796 * In the above example, ip_newroute() tried to create the cache ire 797 * 'cire' for gw1, based on the interface route in line 3. The 798 * ire_ftable_lookup() above fails, because there is no interface route 799 * to reach gw1. (it is gw2). We fall thru below. 800 * 801 * Do a brute force search based on the ihandle in a subset of the 802 * forwarding tables, corresponding to cire->ire_cmask. Otherwise 803 * things become very complex, since we don't have 'pire' in this 804 * case. (Also note that this method is not possible in the offlink 805 * case because we don't know the mask) 806 */ 807 (void) memset(&ih, 0, sizeof (ih)); 808 ih.ihandle = cire->ire_ihandle; 809 (void) ipst->ips_ip_ftable->rnh_walktree_mt(ipst->ips_ip_ftable, 810 ire_ihandle_onlink_match, &ih, irb_refhold_rn, irb_refrele_rn); 811 return (ih.ire); 812 } 813 814 /* 815 * IRE iterator used by ire_ftable_lookup[_v6]() to process multiple default 816 * routes. Given a starting point in the hash list (ire_origin), walk the IREs 817 * in the bucket skipping default interface routes and deleted entries. 818 * Returns the next IRE (unheld), or NULL when we're back to the starting point. 819 * Assumes that the caller holds a reference on the IRE bucket. 820 */ 821 ire_t * 822 ire_get_next_default_ire(ire_t *ire, ire_t *ire_origin) 823 { 824 ASSERT(ire_origin->ire_bucket != NULL); 825 ASSERT(ire != NULL); 826 827 do { 828 ire = ire->ire_next; 829 if (ire == NULL) 830 ire = ire_origin->ire_bucket->irb_ire; 831 if (ire == ire_origin) 832 return (NULL); 833 } while ((ire->ire_type & IRE_INTERFACE) || 834 (ire->ire_marks & IRE_MARK_CONDEMNED)); 835 ASSERT(ire != NULL); 836 return (ire); 837 } 838 839 static ipif_t * 840 ire_forward_src_ipif(ipaddr_t dst, ire_t *sire, ire_t *ire, 841 int zoneid, ushort_t *marks) 842 { 843 ipif_t *src_ipif; 844 ill_t *ill = ire->ire_ipif->ipif_ill; 845 ip_stack_t *ipst = ill->ill_ipst; 846 847 /* 848 * Pick the best source address from ill. 849 * 850 * 1) Try to pick the source address from the destination 851 * route. Clustering assumes that when we have multiple 852 * prefixes hosted on an interface, the prefix of the 853 * source address matches the prefix of the destination 854 * route. We do this only if the address is not 855 * DEPRECATED. 856 * 857 * 2) If the conn is in a different zone than the ire, we 858 * need to pick a source address from the right zone. 859 */ 860 if ((sire != NULL) && (sire->ire_flags & RTF_SETSRC)) { 861 /* 862 * The RTF_SETSRC flag is set in the parent ire (sire). 863 * Check that the ipif matching the requested source 864 * address still exists. 865 */ 866 src_ipif = ipif_lookup_addr(sire->ire_src_addr, NULL, 867 zoneid, NULL, NULL, NULL, NULL, ipst); 868 return (src_ipif); 869 } 870 *marks |= IRE_MARK_USESRC_CHECK; 871 if (IS_IPMP(ill) || 872 (ire->ire_ipif->ipif_flags & IPIF_DEPRECATED) || 873 (ill->ill_usesrc_ifindex != 0)) { 874 src_ipif = ipif_select_source(ill, dst, zoneid); 875 } else { 876 src_ipif = ire->ire_ipif; 877 ASSERT(src_ipif != NULL); 878 /* hold src_ipif for uniformity */ 879 ipif_refhold(src_ipif); 880 } 881 return (src_ipif); 882 } 883 884 /* 885 * This function is called by ip_rput_noire() and ip_fast_forward() 886 * to resolve the route of incoming packet that needs to be forwarded. 887 * If the ire of the nexthop is not already in the cachetable, this 888 * routine will insert it to the table, but won't trigger ARP resolution yet. 889 * Thus unlike ip_newroute, this function adds incomplete ires to 890 * the cachetable. ARP resolution for these ires are delayed until 891 * after all of the packet processing is completed and its ready to 892 * be sent out on the wire, Eventually, the packet transmit routine 893 * ip_xmit_v4() attempts to send a packet to the driver. If it finds 894 * that there is no link layer information, it will do the arp 895 * resolution and queue the packet in ire->ire_nce->nce_qd_mp and 896 * then send it out once the arp resolution is over 897 * (see ip_xmit_v4()->ire_arpresolve()). This scheme is similar to 898 * the model of BSD/SunOS 4 899 * 900 * In future, the insertion of incomplete ires in the cachetable should 901 * be implemented in hostpath as well, as doing so will greatly reduce 902 * the existing complexity for code paths that depend on the context of 903 * the sender (such as IPsec). 904 * 905 * Thus this scheme of adding incomplete ires in cachetable in forwarding 906 * path can be used as a template for simplifying the hostpath. 907 */ 908 909 ire_t * 910 ire_forward(ipaddr_t dst, enum ire_forward_action *ret_action, 911 ire_t *supplied_ire, ire_t *supplied_sire, const struct ts_label_s *tsl, 912 ip_stack_t *ipst) 913 { 914 ipaddr_t gw = 0; 915 ire_t *ire = NULL; 916 ire_t *sire = NULL, *save_ire; 917 ill_t *dst_ill = NULL; 918 int error; 919 zoneid_t zoneid; 920 ipif_t *src_ipif = NULL; 921 mblk_t *res_mp; 922 ushort_t ire_marks = 0; 923 tsol_gcgrp_t *gcgrp = NULL; 924 tsol_gcgrp_addr_t ga; 925 926 zoneid = GLOBAL_ZONEID; 927 928 if (supplied_ire != NULL) { 929 /* We have arrived here from ipfil_sendpkt */ 930 ire = supplied_ire; 931 sire = supplied_sire; 932 goto create_irecache; 933 } 934 935 ire = ire_ftable_lookup(dst, 0, 0, 0, NULL, &sire, zoneid, 0, 936 tsl, MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT | 937 MATCH_IRE_RJ_BHOLE | MATCH_IRE_PARENT|MATCH_IRE_SECATTR, ipst); 938 939 if (ire == NULL) { 940 ip_rts_change(RTM_MISS, dst, 0, 0, 0, 0, 0, 0, RTA_DST, ipst); 941 goto icmp_err_ret; 942 } 943 944 /* 945 * If we encounter CGTP, we should have the caller use 946 * ip_newroute to resolve multirt instead of this function. 947 * CGTP specs explicitly state that it can't be used with routers. 948 * This essentially prevents insertion of incomplete RTF_MULTIRT 949 * ires in cachetable. 950 */ 951 if (ipst->ips_ip_cgtp_filter && 952 ((ire->ire_flags & RTF_MULTIRT) || 953 ((sire != NULL) && (sire->ire_flags & RTF_MULTIRT)))) { 954 ip3dbg(("ire_forward: packet is to be multirouted- " 955 "handing it to ip_newroute\n")); 956 if (sire != NULL) 957 ire_refrele(sire); 958 ire_refrele(ire); 959 /* 960 * Inform caller about encountering of multirt so that 961 * ip_newroute() can be called. 962 */ 963 *ret_action = Forward_check_multirt; 964 return (NULL); 965 } 966 967 /* 968 * Verify that the returned IRE does not have either 969 * the RTF_REJECT or RTF_BLACKHOLE flags set and that the IRE is 970 * either an IRE_CACHE, IRE_IF_NORESOLVER or IRE_IF_RESOLVER. 971 */ 972 if ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) || 973 (ire->ire_type & (IRE_CACHE | IRE_INTERFACE)) == 0) { 974 ip3dbg(("ire 0x%p is not cache/resolver/noresolver\n", 975 (void *)ire)); 976 goto icmp_err_ret; 977 } 978 979 /* 980 * If we already have a fully resolved IRE CACHE of the 981 * nexthop router, just hand over the cache entry 982 * and we are done. 983 */ 984 985 if (ire->ire_type & IRE_CACHE) { 986 987 /* 988 * If we are using this ire cache entry as a 989 * gateway to forward packets, chances are we 990 * will be using it again. So turn off 991 * the temporary flag, thus reducing its 992 * chances of getting deleted frequently. 993 */ 994 if (ire->ire_marks & IRE_MARK_TEMPORARY) { 995 irb_t *irb = ire->ire_bucket; 996 rw_enter(&irb->irb_lock, RW_WRITER); 997 /* 998 * We need to recheck for IRE_MARK_TEMPORARY after 999 * acquiring the lock in order to guarantee 1000 * irb_tmp_ire_cnt 1001 */ 1002 if (ire->ire_marks & IRE_MARK_TEMPORARY) { 1003 ire->ire_marks &= ~IRE_MARK_TEMPORARY; 1004 irb->irb_tmp_ire_cnt--; 1005 } 1006 rw_exit(&irb->irb_lock); 1007 } 1008 1009 if (sire != NULL) { 1010 UPDATE_OB_PKT_COUNT(sire); 1011 sire->ire_last_used_time = lbolt; 1012 ire_refrele(sire); 1013 } 1014 *ret_action = Forward_ok; 1015 return (ire); 1016 } 1017 create_irecache: 1018 /* 1019 * Increment the ire_ob_pkt_count field for ire if it is an 1020 * INTERFACE (IF_RESOLVER or IF_NORESOLVER) IRE type, and 1021 * increment the same for the parent IRE, sire, if it is some 1022 * sort of prefix IRE (which includes DEFAULT, PREFIX, and HOST). 1023 */ 1024 if ((ire->ire_type & IRE_INTERFACE) != 0) { 1025 UPDATE_OB_PKT_COUNT(ire); 1026 ire->ire_last_used_time = lbolt; 1027 } 1028 1029 /* 1030 * sire must be either IRE_CACHETABLE OR IRE_INTERFACE type 1031 */ 1032 if (sire != NULL) { 1033 gw = sire->ire_gateway_addr; 1034 ASSERT((sire->ire_type & 1035 (IRE_CACHETABLE | IRE_INTERFACE)) == 0); 1036 UPDATE_OB_PKT_COUNT(sire); 1037 sire->ire_last_used_time = lbolt; 1038 } 1039 1040 dst_ill = ire->ire_ipif->ipif_ill; 1041 if (IS_IPMP(dst_ill)) 1042 dst_ill = ipmp_illgrp_hold_next_ill(dst_ill->ill_grp); 1043 else 1044 ill_refhold(dst_ill); 1045 1046 if (dst_ill == NULL) { 1047 ip2dbg(("ire_forward no dst ill; ire 0x%p\n", (void *)ire)); 1048 goto icmp_err_ret; 1049 } 1050 1051 ASSERT(src_ipif == NULL); 1052 /* Now obtain the src_ipif */ 1053 src_ipif = ire_forward_src_ipif(dst, sire, ire, zoneid, &ire_marks); 1054 if (src_ipif == NULL) 1055 goto icmp_err_ret; 1056 1057 switch (ire->ire_type) { 1058 case IRE_IF_NORESOLVER: 1059 /* create ire_cache for ire_addr endpoint */ 1060 if (dst_ill->ill_phys_addr_length != IP_ADDR_LEN && 1061 dst_ill->ill_resolver_mp == NULL) { 1062 ip1dbg(("ire_forward: dst_ill %p " 1063 "for IRE_IF_NORESOLVER ire %p has " 1064 "no ill_resolver_mp\n", 1065 (void *)dst_ill, (void *)ire)); 1066 goto icmp_err_ret; 1067 } 1068 /* FALLTHRU */ 1069 case IRE_IF_RESOLVER: 1070 /* 1071 * We have the IRE_IF_RESOLVER of the nexthop gateway 1072 * and now need to build a IRE_CACHE for it. 1073 * In this case, we have the following : 1074 * 1075 * 1) src_ipif - used for getting a source address. 1076 * 1077 * 2) dst_ill - from which we derive ire_stq/ire_rfq. This 1078 * means packets using the IRE_CACHE that we will build 1079 * here will go out on dst_ill. 1080 * 1081 * 3) sire may or may not be NULL. But, the IRE_CACHE that is 1082 * to be created will only be tied to the IRE_INTERFACE 1083 * that was derived from the ire_ihandle field. 1084 * 1085 * If sire is non-NULL, it means the destination is 1086 * off-link and we will first create the IRE_CACHE for the 1087 * gateway. 1088 */ 1089 res_mp = dst_ill->ill_resolver_mp; 1090 if (ire->ire_type == IRE_IF_RESOLVER && 1091 (!OK_RESOLVER_MP(res_mp))) { 1092 goto icmp_err_ret; 1093 } 1094 /* 1095 * To be at this point in the code with a non-zero gw 1096 * means that dst is reachable through a gateway that 1097 * we have never resolved. By changing dst to the gw 1098 * addr we resolve the gateway first. 1099 */ 1100 if (gw != INADDR_ANY) { 1101 /* 1102 * The source ipif that was determined above was 1103 * relative to the destination address, not the 1104 * gateway's. If src_ipif was not taken out of 1105 * the IRE_IF_RESOLVER entry, we'll need to call 1106 * ipif_select_source() again. 1107 */ 1108 if (src_ipif != ire->ire_ipif) { 1109 ipif_refrele(src_ipif); 1110 src_ipif = ipif_select_source(dst_ill, 1111 gw, zoneid); 1112 if (src_ipif == NULL) 1113 goto icmp_err_ret; 1114 } 1115 dst = gw; 1116 gw = INADDR_ANY; 1117 } 1118 /* 1119 * dst has been set to the address of the nexthop. 1120 * 1121 * TSol note: get security attributes of the nexthop; 1122 * Note that the nexthop may either be a gateway, or the 1123 * packet destination itself; Detailed explanation of 1124 * issues involved is provided in the IRE_IF_NORESOLVER 1125 * logic in ip_newroute(). 1126 */ 1127 ga.ga_af = AF_INET; 1128 IN6_IPADDR_TO_V4MAPPED(dst, &ga.ga_addr); 1129 gcgrp = gcgrp_lookup(&ga, B_FALSE); 1130 1131 if (ire->ire_type == IRE_IF_NORESOLVER) 1132 dst = ire->ire_addr; /* ire_cache for tunnel endpoint */ 1133 1134 save_ire = ire; 1135 /* 1136 * create an incomplete IRE_CACHE. 1137 * An areq_mp will be generated in ire_arpresolve() for 1138 * RESOLVER interfaces. 1139 */ 1140 ire = ire_create( 1141 (uchar_t *)&dst, /* dest address */ 1142 (uchar_t *)&ip_g_all_ones, /* mask */ 1143 (uchar_t *)&src_ipif->ipif_src_addr, /* src addr */ 1144 (uchar_t *)&gw, /* gateway address */ 1145 (save_ire->ire_type == IRE_IF_RESOLVER ? NULL: 1146 &save_ire->ire_max_frag), 1147 NULL, 1148 dst_ill->ill_rq, /* recv-from queue */ 1149 dst_ill->ill_wq, /* send-to queue */ 1150 IRE_CACHE, /* IRE type */ 1151 src_ipif, 1152 ire->ire_mask, /* Parent mask */ 1153 0, 1154 ire->ire_ihandle, /* Interface handle */ 1155 0, 1156 &(ire->ire_uinfo), 1157 NULL, 1158 gcgrp, 1159 ipst); 1160 ip1dbg(("incomplete ire_cache 0x%p\n", (void *)ire)); 1161 if (ire != NULL) { 1162 gcgrp = NULL; /* reference now held by IRE */ 1163 ire->ire_marks |= ire_marks; 1164 /* add the incomplete ire: */ 1165 error = ire_add(&ire, NULL, NULL, NULL, B_TRUE); 1166 if (error == 0 && ire != NULL) { 1167 ire->ire_max_frag = save_ire->ire_max_frag; 1168 ip1dbg(("setting max_frag to %d in ire 0x%p\n", 1169 ire->ire_max_frag, (void *)ire)); 1170 } else { 1171 ire_refrele(save_ire); 1172 goto icmp_err_ret; 1173 } 1174 } else { 1175 if (gcgrp != NULL) { 1176 GCGRP_REFRELE(gcgrp); 1177 gcgrp = NULL; 1178 } 1179 } 1180 1181 ire_refrele(save_ire); 1182 break; 1183 default: 1184 break; 1185 } 1186 1187 *ret_action = Forward_ok; 1188 if (sire != NULL) 1189 ire_refrele(sire); 1190 if (dst_ill != NULL) 1191 ill_refrele(dst_ill); 1192 if (src_ipif != NULL) 1193 ipif_refrele(src_ipif); 1194 return (ire); 1195 icmp_err_ret: 1196 *ret_action = Forward_ret_icmp_err; 1197 if (sire != NULL) 1198 ire_refrele(sire); 1199 if (dst_ill != NULL) 1200 ill_refrele(dst_ill); 1201 if (src_ipif != NULL) 1202 ipif_refrele(src_ipif); 1203 if (ire != NULL) { 1204 if (ire->ire_flags & RTF_BLACKHOLE) 1205 *ret_action = Forward_blackhole; 1206 ire_refrele(ire); 1207 } 1208 return (NULL); 1209 } 1210 1211 /* 1212 * Since caller is ip_fast_forward, there is no CGTP or Tsol test 1213 * Also we dont call ftable lookup with MATCH_IRE_PARENT 1214 */ 1215 1216 ire_t * 1217 ire_forward_simple(ipaddr_t dst, enum ire_forward_action *ret_action, 1218 ip_stack_t *ipst) 1219 { 1220 ipaddr_t gw = 0; 1221 ire_t *ire = NULL; 1222 ire_t *sire = NULL, *save_ire; 1223 ill_t *dst_ill = NULL; 1224 int error; 1225 zoneid_t zoneid = GLOBAL_ZONEID; 1226 ipif_t *src_ipif = NULL; 1227 mblk_t *res_mp; 1228 ushort_t ire_marks = 0; 1229 1230 ire = ire_ftable_lookup_simple(dst, &sire, zoneid, 1231 MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT | MATCH_IRE_RJ_BHOLE, ipst); 1232 if (ire == NULL) { 1233 ip_rts_change(RTM_MISS, dst, 0, 0, 0, 0, 0, 0, RTA_DST, ipst); 1234 goto icmp_err_ret; 1235 } 1236 1237 /* 1238 * Verify that the returned IRE does not have either 1239 * the RTF_REJECT or RTF_BLACKHOLE flags set and that the IRE is 1240 * either an IRE_CACHE, IRE_IF_NORESOLVER or IRE_IF_RESOLVER. 1241 */ 1242 if ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) || 1243 ((ire->ire_type & (IRE_CACHE | IRE_INTERFACE)) == 0)) { 1244 ip3dbg(("ire 0x%p is not cache/resolver/noresolver\n", 1245 (void *)ire)); 1246 goto icmp_err_ret; 1247 } 1248 1249 /* 1250 * If we already have a fully resolved IRE CACHE of the 1251 * nexthop router, just hand over the cache entry 1252 * and we are done. 1253 */ 1254 if (ire->ire_type & IRE_CACHE) { 1255 /* 1256 * If we are using this ire cache entry as a 1257 * gateway to forward packets, chances are we 1258 * will be using it again. So turn off 1259 * the temporary flag, thus reducing its 1260 * chances of getting deleted frequently. 1261 */ 1262 if (ire->ire_marks & IRE_MARK_TEMPORARY) { 1263 irb_t *irb = ire->ire_bucket; 1264 rw_enter(&irb->irb_lock, RW_WRITER); 1265 ire->ire_marks &= ~IRE_MARK_TEMPORARY; 1266 irb->irb_tmp_ire_cnt--; 1267 rw_exit(&irb->irb_lock); 1268 } 1269 1270 if (sire != NULL) { 1271 UPDATE_OB_PKT_COUNT(sire); 1272 ire_refrele(sire); 1273 } 1274 *ret_action = Forward_ok; 1275 return (ire); 1276 } 1277 /* 1278 * Increment the ire_ob_pkt_count field for ire if it is an 1279 * INTERFACE (IF_RESOLVER or IF_NORESOLVER) IRE type, and 1280 * increment the same for the parent IRE, sire, if it is some 1281 * sort of prefix IRE (which includes DEFAULT, PREFIX, and HOST). 1282 */ 1283 if ((ire->ire_type & IRE_INTERFACE) != 0) { 1284 UPDATE_OB_PKT_COUNT(ire); 1285 ire->ire_last_used_time = lbolt; 1286 } 1287 1288 /* 1289 * sire must be either IRE_CACHETABLE OR IRE_INTERFACE type 1290 */ 1291 if (sire != NULL) { 1292 gw = sire->ire_gateway_addr; 1293 ASSERT((sire->ire_type & 1294 (IRE_CACHETABLE | IRE_INTERFACE)) == 0); 1295 UPDATE_OB_PKT_COUNT(sire); 1296 } 1297 1298 dst_ill = ire->ire_ipif->ipif_ill; 1299 if (IS_IPMP(dst_ill)) 1300 dst_ill = ipmp_illgrp_hold_next_ill(dst_ill->ill_grp); 1301 else 1302 ill_refhold(dst_ill); /* for symmetry */ 1303 1304 if (dst_ill == NULL) { 1305 ip2dbg(("ire_forward_simple: no dst ill; ire 0x%p\n", 1306 (void *)ire)); 1307 goto icmp_err_ret; 1308 } 1309 1310 ASSERT(src_ipif == NULL); 1311 /* Now obtain the src_ipif */ 1312 src_ipif = ire_forward_src_ipif(dst, sire, ire, zoneid, &ire_marks); 1313 if (src_ipif == NULL) 1314 goto icmp_err_ret; 1315 1316 switch (ire->ire_type) { 1317 case IRE_IF_NORESOLVER: 1318 /* create ire_cache for ire_addr endpoint */ 1319 case IRE_IF_RESOLVER: 1320 /* 1321 * We have the IRE_IF_RESOLVER of the nexthop gateway 1322 * and now need to build a IRE_CACHE for it. 1323 * In this case, we have the following : 1324 * 1325 * 1) src_ipif - used for getting a source address. 1326 * 1327 * 2) dst_ill - from which we derive ire_stq/ire_rfq. This 1328 * means packets using the IRE_CACHE that we will build 1329 * here will go out on dst_ill. 1330 * 1331 * 3) sire may or may not be NULL. But, the IRE_CACHE that is 1332 * to be created will only be tied to the IRE_INTERFACE 1333 * that was derived from the ire_ihandle field. 1334 * 1335 * If sire is non-NULL, it means the destination is 1336 * off-link and we will first create the IRE_CACHE for the 1337 * gateway. 1338 */ 1339 res_mp = dst_ill->ill_resolver_mp; 1340 if (ire->ire_type == IRE_IF_RESOLVER && 1341 (!OK_RESOLVER_MP(res_mp))) { 1342 ire_refrele(ire); 1343 ire = NULL; 1344 goto out; 1345 } 1346 /* 1347 * To be at this point in the code with a non-zero gw 1348 * means that dst is reachable through a gateway that 1349 * we have never resolved. By changing dst to the gw 1350 * addr we resolve the gateway first. 1351 */ 1352 if (gw != INADDR_ANY) { 1353 /* 1354 * The source ipif that was determined above was 1355 * relative to the destination address, not the 1356 * gateway's. If src_ipif was not taken out of 1357 * the IRE_IF_RESOLVER entry, we'll need to call 1358 * ipif_select_source() again. 1359 */ 1360 if (src_ipif != ire->ire_ipif) { 1361 ipif_refrele(src_ipif); 1362 src_ipif = ipif_select_source(dst_ill, 1363 gw, zoneid); 1364 if (src_ipif == NULL) 1365 goto icmp_err_ret; 1366 } 1367 dst = gw; 1368 gw = INADDR_ANY; 1369 } 1370 1371 if (ire->ire_type == IRE_IF_NORESOLVER) 1372 dst = ire->ire_addr; /* ire_cache for tunnel endpoint */ 1373 1374 save_ire = ire; 1375 /* 1376 * create an incomplete IRE_CACHE. 1377 * An areq_mp will be generated in ire_arpresolve() for 1378 * RESOLVER interfaces. 1379 */ 1380 ire = ire_create( 1381 (uchar_t *)&dst, /* dest address */ 1382 (uchar_t *)&ip_g_all_ones, /* mask */ 1383 (uchar_t *)&src_ipif->ipif_src_addr, /* src addr */ 1384 (uchar_t *)&gw, /* gateway address */ 1385 (save_ire->ire_type == IRE_IF_RESOLVER ? NULL: 1386 &save_ire->ire_max_frag), 1387 NULL, 1388 dst_ill->ill_rq, /* recv-from queue */ 1389 dst_ill->ill_wq, /* send-to queue */ 1390 IRE_CACHE, /* IRE type */ 1391 src_ipif, 1392 ire->ire_mask, /* Parent mask */ 1393 0, 1394 ire->ire_ihandle, /* Interface handle */ 1395 0, 1396 &(ire->ire_uinfo), 1397 NULL, 1398 NULL, 1399 ipst); 1400 ip1dbg(("incomplete ire_cache 0x%p\n", (void *)ire)); 1401 if (ire != NULL) { 1402 ire->ire_marks |= ire_marks; 1403 /* add the incomplete ire: */ 1404 error = ire_add(&ire, NULL, NULL, NULL, B_TRUE); 1405 if (error == 0 && ire != NULL) { 1406 ire->ire_max_frag = save_ire->ire_max_frag; 1407 ip1dbg(("setting max_frag to %d in ire 0x%p\n", 1408 ire->ire_max_frag, (void *)ire)); 1409 } else { 1410 ire_refrele(save_ire); 1411 goto icmp_err_ret; 1412 } 1413 } 1414 1415 ire_refrele(save_ire); 1416 break; 1417 default: 1418 break; 1419 } 1420 1421 out: 1422 *ret_action = Forward_ok; 1423 if (sire != NULL) 1424 ire_refrele(sire); 1425 if (dst_ill != NULL) 1426 ill_refrele(dst_ill); 1427 if (src_ipif != NULL) 1428 ipif_refrele(src_ipif); 1429 return (ire); 1430 icmp_err_ret: 1431 *ret_action = Forward_ret_icmp_err; 1432 if (src_ipif != NULL) 1433 ipif_refrele(src_ipif); 1434 if (dst_ill != NULL) 1435 ill_refrele(dst_ill); 1436 if (sire != NULL) 1437 ire_refrele(sire); 1438 if (ire != NULL) { 1439 if (ire->ire_flags & RTF_BLACKHOLE) 1440 *ret_action = Forward_blackhole; 1441 ire_refrele(ire); 1442 } 1443 /* caller needs to send icmp error message */ 1444 return (NULL); 1445 1446 } 1447 1448 /* 1449 * Obtain the rt_entry and rt_irb for the route to be added to 1450 * the ips_ip_ftable. 1451 * First attempt to add a node to the radix tree via rn_addroute. If the 1452 * route already exists, return the bucket for the existing route. 1453 * 1454 * Locking notes: Need to hold the global radix tree lock in write mode to 1455 * add a radix node. To prevent the node from being deleted, ire_get_bucket() 1456 * returns with a ref'ed irb_t. The ire itself is added in ire_add_v4() 1457 * while holding the irb_lock, but not the radix tree lock. 1458 */ 1459 irb_t * 1460 ire_get_bucket(ire_t *ire) 1461 { 1462 struct radix_node *rn; 1463 struct rt_entry *rt; 1464 struct rt_sockaddr rmask, rdst; 1465 irb_t *irb = NULL; 1466 ip_stack_t *ipst = ire->ire_ipst; 1467 1468 ASSERT(ipst->ips_ip_ftable != NULL); 1469 1470 /* first try to see if route exists (based on rtalloc1) */ 1471 (void) memset(&rdst, 0, sizeof (rdst)); 1472 rdst.rt_sin_len = sizeof (rdst); 1473 rdst.rt_sin_family = AF_INET; 1474 rdst.rt_sin_addr.s_addr = ire->ire_addr; 1475 1476 (void) memset(&rmask, 0, sizeof (rmask)); 1477 rmask.rt_sin_len = sizeof (rmask); 1478 rmask.rt_sin_family = AF_INET; 1479 rmask.rt_sin_addr.s_addr = ire->ire_mask; 1480 1481 /* 1482 * add the route. based on BSD's rtrequest1(RTM_ADD) 1483 */ 1484 R_Malloc(rt, rt_entry_cache, sizeof (*rt)); 1485 /* kmem_alloc failed */ 1486 if (rt == NULL) 1487 return (NULL); 1488 1489 (void) memset(rt, 0, sizeof (*rt)); 1490 rt->rt_nodes->rn_key = (char *)&rt->rt_dst; 1491 rt->rt_dst = rdst; 1492 irb = &rt->rt_irb; 1493 irb->irb_marks |= IRB_MARK_FTABLE; /* dynamically allocated/freed */ 1494 irb->irb_ipst = ipst; 1495 rw_init(&irb->irb_lock, NULL, RW_DEFAULT, NULL); 1496 RADIX_NODE_HEAD_WLOCK(ipst->ips_ip_ftable); 1497 rn = ipst->ips_ip_ftable->rnh_addaddr(&rt->rt_dst, &rmask, 1498 ipst->ips_ip_ftable, (struct radix_node *)rt); 1499 if (rn == NULL) { 1500 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 1501 Free(rt, rt_entry_cache); 1502 rt = NULL; 1503 irb = NULL; 1504 RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable); 1505 rn = ipst->ips_ip_ftable->rnh_lookup(&rdst, &rmask, 1506 ipst->ips_ip_ftable); 1507 if (rn != NULL && ((rn->rn_flags & RNF_ROOT) == 0)) { 1508 /* found a non-root match */ 1509 rt = (struct rt_entry *)rn; 1510 } 1511 } 1512 if (rt != NULL) { 1513 irb = &rt->rt_irb; 1514 IRB_REFHOLD(irb); 1515 } 1516 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 1517 return (irb); 1518 } 1519 1520 /* 1521 * This function is used when the caller wants to know the outbound 1522 * interface for a packet given only the address. 1523 * If this is a offlink IP address and there are multiple 1524 * routes to this destination, this routine will utilise the 1525 * first route it finds to IP address 1526 * Return values: 1527 * 0 - FAILURE 1528 * nonzero - ifindex 1529 */ 1530 uint_t 1531 ifindex_lookup(const struct sockaddr *ipaddr, zoneid_t zoneid) 1532 { 1533 uint_t ifindex = 0; 1534 ire_t *ire; 1535 ill_t *ill; 1536 netstack_t *ns; 1537 ip_stack_t *ipst; 1538 1539 if (zoneid == ALL_ZONES) 1540 ns = netstack_find_by_zoneid(GLOBAL_ZONEID); 1541 else 1542 ns = netstack_find_by_zoneid(zoneid); 1543 ASSERT(ns != NULL); 1544 1545 /* 1546 * For exclusive stacks we set the zoneid to zero 1547 * since IP uses the global zoneid in the exclusive stacks. 1548 */ 1549 if (ns->netstack_stackid != GLOBAL_NETSTACKID) 1550 zoneid = GLOBAL_ZONEID; 1551 ipst = ns->netstack_ip; 1552 1553 ASSERT(ipaddr->sa_family == AF_INET || ipaddr->sa_family == AF_INET6); 1554 1555 if ((ire = route_to_dst(ipaddr, zoneid, ipst)) != NULL) { 1556 ill = ire_to_ill(ire); 1557 if (ill != NULL) 1558 ifindex = ill->ill_phyint->phyint_ifindex; 1559 ire_refrele(ire); 1560 } 1561 netstack_rele(ns); 1562 return (ifindex); 1563 } 1564 1565 /* 1566 * Routine to find the route to a destination. If a ifindex is supplied 1567 * it tries to match the the route to the corresponding ipif for the ifindex 1568 */ 1569 static ire_t * 1570 route_to_dst(const struct sockaddr *dst_addr, zoneid_t zoneid, ip_stack_t *ipst) 1571 { 1572 ire_t *ire = NULL; 1573 int match_flags; 1574 1575 match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT | 1576 MATCH_IRE_RECURSIVE | MATCH_IRE_RJ_BHOLE); 1577 1578 /* XXX pass NULL tsl for now */ 1579 1580 if (dst_addr->sa_family == AF_INET) { 1581 ire = ire_route_lookup( 1582 ((struct sockaddr_in *)dst_addr)->sin_addr.s_addr, 1583 0, 0, 0, NULL, NULL, zoneid, NULL, match_flags, ipst); 1584 } else { 1585 ire = ire_route_lookup_v6( 1586 &((struct sockaddr_in6 *)dst_addr)->sin6_addr, 1587 0, 0, 0, NULL, NULL, zoneid, NULL, match_flags, ipst); 1588 } 1589 return (ire); 1590 } 1591 1592 /* 1593 * This routine is called by IP Filter to send a packet out on the wire 1594 * to a specified V4 dst (which may be onlink or offlink). The ifindex may or 1595 * may not be 0. A non-null ifindex indicates IP Filter has stipulated 1596 * an outgoing interface and requires the nexthop to be on that interface. 1597 * IP WILL NOT DO the following to the data packet before sending it out: 1598 * a. manipulate ttl 1599 * b. ipsec work 1600 * c. fragmentation 1601 * 1602 * If the packet has been prepared for hardware checksum then it will be 1603 * passed off to ip_send_align_cksum() to check that the flags set on the 1604 * packet are in alignment with the capabilities of the new outgoing NIC. 1605 * 1606 * Return values: 1607 * 0: IP was able to send of the data pkt 1608 * ECOMM: Could not send packet 1609 * ENONET No route to dst. It is up to the caller 1610 * to send icmp unreachable error message, 1611 * EINPROGRESS The macaddr of the onlink dst or that 1612 * of the offlink dst's nexthop needs to get 1613 * resolved before packet can be sent to dst. 1614 * Thus transmission is not guaranteed. 1615 * 1616 */ 1617 1618 int 1619 ipfil_sendpkt(const struct sockaddr *dst_addr, mblk_t *mp, uint_t ifindex, 1620 zoneid_t zoneid) 1621 { 1622 ire_t *ire = NULL, *sire = NULL; 1623 ire_t *ire_cache = NULL; 1624 int value; 1625 int match_flags; 1626 ipaddr_t dst; 1627 netstack_t *ns; 1628 ip_stack_t *ipst; 1629 enum ire_forward_action ret_action; 1630 1631 ASSERT(mp != NULL); 1632 1633 if (zoneid == ALL_ZONES) 1634 ns = netstack_find_by_zoneid(GLOBAL_ZONEID); 1635 else 1636 ns = netstack_find_by_zoneid(zoneid); 1637 ASSERT(ns != NULL); 1638 1639 /* 1640 * For exclusive stacks we set the zoneid to zero 1641 * since IP uses the global zoneid in the exclusive stacks. 1642 */ 1643 if (ns->netstack_stackid != GLOBAL_NETSTACKID) 1644 zoneid = GLOBAL_ZONEID; 1645 ipst = ns->netstack_ip; 1646 1647 ASSERT(dst_addr->sa_family == AF_INET || 1648 dst_addr->sa_family == AF_INET6); 1649 1650 if (dst_addr->sa_family == AF_INET) { 1651 dst = ((struct sockaddr_in *)dst_addr)->sin_addr.s_addr; 1652 } else { 1653 /* 1654 * We dont have support for V6 yet. It will be provided 1655 * once RFE 6399103 has been delivered. 1656 * Until then, for V6 dsts, IP Filter will not call 1657 * this function. Instead the netinfo framework provides 1658 * its own code path, in ip_inject_impl(), to achieve 1659 * what it needs to do, for the time being. 1660 */ 1661 ip1dbg(("ipfil_sendpkt: no V6 support \n")); 1662 value = ECOMM; 1663 freemsg(mp); 1664 goto discard; 1665 } 1666 1667 /* 1668 * Lets get the ire. We might get the ire cache entry, 1669 * or the ire,sire pair needed to create the cache entry. 1670 * XXX pass NULL tsl for now. 1671 */ 1672 1673 if (ifindex == 0) { 1674 /* There is no supplied index. So use the FIB info */ 1675 1676 match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT | 1677 MATCH_IRE_RECURSIVE | MATCH_IRE_RJ_BHOLE); 1678 ire = ire_route_lookup(dst, 1679 0, 0, 0, NULL, &sire, zoneid, MBLK_GETLABEL(mp), 1680 match_flags, ipst); 1681 } else { 1682 ipif_t *supplied_ipif; 1683 ill_t *ill; 1684 1685 match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT | 1686 MATCH_IRE_RECURSIVE| MATCH_IRE_RJ_BHOLE| 1687 MATCH_IRE_SECATTR | MATCH_IRE_ILL); 1688 1689 /* 1690 * If supplied ifindex is non-null, the only valid 1691 * nexthop is one off of the interface corresponding 1692 * to the specified ifindex. 1693 */ 1694 ill = ill_lookup_on_ifindex(ifindex, B_FALSE, 1695 NULL, NULL, NULL, NULL, ipst); 1696 if (ill != NULL) { 1697 supplied_ipif = ipif_get_next_ipif(NULL, ill); 1698 } else { 1699 ip1dbg(("ipfil_sendpkt: Could not find" 1700 " route to dst\n")); 1701 value = ECOMM; 1702 freemsg(mp); 1703 goto discard; 1704 } 1705 1706 ire = ire_route_lookup(dst, 0, 0, 0, supplied_ipif, 1707 &sire, zoneid, MBLK_GETLABEL(mp), match_flags, ipst); 1708 ipif_refrele(supplied_ipif); 1709 ill_refrele(ill); 1710 } 1711 1712 /* 1713 * Verify that the returned IRE is non-null and does 1714 * not have either the RTF_REJECT or RTF_BLACKHOLE 1715 * flags set and that the IRE is either an IRE_CACHE, 1716 * IRE_IF_NORESOLVER or IRE_IF_RESOLVER. 1717 */ 1718 if (ire == NULL || 1719 ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) || 1720 (ire->ire_type & (IRE_CACHE | IRE_INTERFACE)) == 0)) { 1721 /* 1722 * Either ire could not be found or we got 1723 * an invalid one 1724 */ 1725 ip1dbg(("ipfil_sendpkt: Could not find route to dst\n")); 1726 value = ENONET; 1727 freemsg(mp); 1728 goto discard; 1729 } 1730 1731 /* IP Filter and CGTP dont mix. So bail out if CGTP is on */ 1732 if (ipst->ips_ip_cgtp_filter && 1733 ((ire->ire_flags & RTF_MULTIRT) || 1734 ((sire != NULL) && (sire->ire_flags & RTF_MULTIRT)))) { 1735 ip1dbg(("ipfil_sendpkt: IPFilter does not work with CGTP\n")); 1736 value = ECOMM; 1737 freemsg(mp); 1738 goto discard; 1739 } 1740 1741 ASSERT(ire->ire_type != IRE_CACHE || ire->ire_nce != NULL); 1742 1743 /* 1744 * If needed, we will create the ire cache entry for the 1745 * nexthop, resolve its link-layer address and then send 1746 * the packet out without ttl or IPSec processing. 1747 */ 1748 switch (ire->ire_type) { 1749 case IRE_CACHE: 1750 if (sire != NULL) { 1751 UPDATE_OB_PKT_COUNT(sire); 1752 sire->ire_last_used_time = lbolt; 1753 ire_refrele(sire); 1754 } 1755 ire_cache = ire; 1756 break; 1757 case IRE_IF_NORESOLVER: 1758 case IRE_IF_RESOLVER: 1759 /* 1760 * Call ire_forward(). This function 1761 * will, create the ire cache entry of the 1762 * the nexthop and adds this incomplete ire 1763 * to the ire cache table 1764 */ 1765 ire_cache = ire_forward(dst, &ret_action, ire, sire, 1766 MBLK_GETLABEL(mp), ipst); 1767 if (ire_cache == NULL) { 1768 ip1dbg(("ipfil_sendpkt: failed to create the" 1769 " ire cache entry \n")); 1770 value = ENONET; 1771 freemsg(mp); 1772 sire = NULL; 1773 ire = NULL; 1774 goto discard; 1775 } 1776 break; 1777 } 1778 1779 if (DB_CKSUMFLAGS(mp)) { 1780 if (ip_send_align_hcksum_flags(mp, ire_to_ill(ire_cache))) 1781 goto cleanup; 1782 } 1783 1784 /* 1785 * Now that we have the ire cache entry of the nexthop, call 1786 * ip_xmit_v4() to trigger mac addr resolution 1787 * if necessary and send it once ready. 1788 */ 1789 1790 value = ip_xmit_v4(mp, ire_cache, NULL, B_FALSE, NULL); 1791 cleanup: 1792 ire_refrele(ire_cache); 1793 /* 1794 * At this point, the reference for these have already been 1795 * released within ire_forward() and/or ip_xmit_v4(). So we set 1796 * them to NULL to make sure we dont drop the references 1797 * again in case ip_xmit_v4() returns with either SEND_FAILED 1798 * or LLHDR_RESLV_FAILED 1799 */ 1800 sire = NULL; 1801 ire = NULL; 1802 1803 switch (value) { 1804 case SEND_FAILED: 1805 ip1dbg(("ipfil_sendpkt: Send failed\n")); 1806 value = ECOMM; 1807 break; 1808 case LLHDR_RESLV_FAILED: 1809 ip1dbg(("ipfil_sendpkt: Link-layer resolution" 1810 " failed\n")); 1811 value = ECOMM; 1812 break; 1813 case LOOKUP_IN_PROGRESS: 1814 netstack_rele(ns); 1815 return (EINPROGRESS); 1816 case SEND_PASSED: 1817 netstack_rele(ns); 1818 return (0); 1819 } 1820 discard: 1821 if (dst_addr->sa_family == AF_INET) { 1822 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards); 1823 } else { 1824 BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsOutDiscards); 1825 } 1826 if (ire != NULL) 1827 ire_refrele(ire); 1828 if (sire != NULL) 1829 ire_refrele(sire); 1830 netstack_rele(ns); 1831 return (value); 1832 } 1833 1834 1835 /* 1836 * We don't check for dohwcksum in here because it should be being used 1837 * elsewhere to control what flags are being set on the mblk. That is, 1838 * if DB_CKSUMFLAGS() is non-zero then we assume dohwcksum to be true 1839 * for this packet. 1840 * 1841 * This function assumes that it is *only* being called for TCP or UDP 1842 * packets and nothing else. 1843 */ 1844 static int 1845 ip_send_align_hcksum_flags(mblk_t *mp, ill_t *ill) 1846 { 1847 int illhckflags; 1848 int mbhckflags; 1849 uint16_t *up; 1850 uint32_t cksum; 1851 ipha_t *ipha; 1852 ip6_t *ip6; 1853 int proto; 1854 int ipversion; 1855 int length; 1856 int start; 1857 ip6_pkt_t ipp; 1858 1859 mbhckflags = DB_CKSUMFLAGS(mp); 1860 ASSERT(mbhckflags != 0); 1861 ASSERT(mp->b_datap->db_type == M_DATA); 1862 /* 1863 * Since this function only knows how to manage the hardware checksum 1864 * issue, reject and packets that have flags set on the aside from 1865 * checksum related attributes as we cannot necessarily safely map 1866 * that packet onto the new NIC. Packets that can be potentially 1867 * dropped here include those marked for LSO. 1868 */ 1869 if ((mbhckflags & 1870 ~(HCK_FULLCKSUM|HCK_PARTIALCKSUM|HCK_IPV4_HDRCKSUM)) != 0) { 1871 DTRACE_PROBE2(pbr__incapable, (mblk_t *), mp, (ill_t *), ill); 1872 freemsg(mp); 1873 return (-1); 1874 } 1875 1876 ipha = (ipha_t *)mp->b_rptr; 1877 1878 /* 1879 * Find out what the new NIC is capable of, if anything, and 1880 * only allow it to be used with M_DATA mblks being sent out. 1881 */ 1882 if (ILL_HCKSUM_CAPABLE(ill)) { 1883 illhckflags = ill->ill_hcksum_capab->ill_hcksum_txflags; 1884 } else { 1885 /* 1886 * No capabilities, so turn off everything. 1887 */ 1888 illhckflags = 0; 1889 (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, 0, 0); 1890 mp->b_datap->db_struioflag &= ~STRUIO_IP; 1891 } 1892 1893 DTRACE_PROBE4(pbr__info__a, (mblk_t *), mp, (ill_t *), ill, 1894 uint32_t, illhckflags, uint32_t, mbhckflags); 1895 /* 1896 * This block of code that looks for the position of the TCP/UDP 1897 * checksum is early in this function because we need to know 1898 * what needs to be blanked out for the hardware checksum case. 1899 * 1900 * That we're in this function implies that the packet is either 1901 * TCP or UDP on Solaris, so checks are made for one protocol and 1902 * if that fails, the other is therefore implied. 1903 */ 1904 ipversion = IPH_HDR_VERSION(ipha); 1905 1906 if (ipversion == IPV4_VERSION) { 1907 proto = ipha->ipha_protocol; 1908 if (proto == IPPROTO_TCP) { 1909 up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 1910 } else { 1911 up = IPH_UDPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 1912 } 1913 } else { 1914 uint8_t lasthdr; 1915 1916 /* 1917 * Nothing I've seen indicates that IPv6 checksum'ing 1918 * precludes the presence of extension headers, so we 1919 * can't just look at the next header value in the IPv6 1920 * packet header to see if it is TCP/UDP. 1921 */ 1922 ip6 = (ip6_t *)ipha; 1923 (void) memset(&ipp, 0, sizeof (ipp)); 1924 start = ip_find_hdr_v6(mp, ip6, &ipp, &lasthdr); 1925 proto = lasthdr; 1926 1927 if (proto == IPPROTO_TCP) { 1928 up = IPH_TCPH_CHECKSUMP(ipha, start); 1929 } else { 1930 up = IPH_UDPH_CHECKSUMP(ipha, start); 1931 } 1932 } 1933 1934 /* 1935 * The first case here is easiest: 1936 * mblk hasn't asked for full checksum, but the card supports it. 1937 * 1938 * In addition, check for IPv4 header capability. Note that only 1939 * the mblk flag is checked and not ipversion. 1940 */ 1941 if ((((illhckflags & HCKSUM_INET_FULL_V4) && (ipversion == 4)) || 1942 (((illhckflags & HCKSUM_INET_FULL_V6) && (ipversion == 6)))) && 1943 ((mbhckflags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)) != 0)) { 1944 int newflags = HCK_FULLCKSUM; 1945 1946 if ((mbhckflags & HCK_IPV4_HDRCKSUM) != 0) { 1947 if ((illhckflags & HCKSUM_IPHDRCKSUM) != 0) { 1948 newflags |= HCK_IPV4_HDRCKSUM; 1949 } else { 1950 /* 1951 * Rather than call a function, just inline 1952 * the computation of the basic IPv4 header. 1953 */ 1954 cksum = (ipha->ipha_dst >> 16) + 1955 (ipha->ipha_dst & 0xFFFF) + 1956 (ipha->ipha_src >> 16) + 1957 (ipha->ipha_src & 0xFFFF); 1958 IP_HDR_CKSUM(ipha, cksum, 1959 ((uint32_t *)ipha)[0], 1960 ((uint16_t *)ipha)[4]); 1961 } 1962 } 1963 1964 *up = 0; 1965 (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, 1966 newflags, 0); 1967 return (0); 1968 } 1969 1970 DTRACE_PROBE2(pbr__info__b, int, ipversion, int, proto); 1971 1972 /* 1973 * Start calculating the pseudo checksum over the IP packet header. 1974 * Although the final pseudo checksum used by TCP/UDP consists of 1975 * more than just the address fields, we can use the result of 1976 * adding those together a little bit further down for IPv4. 1977 */ 1978 if (ipversion == IPV4_VERSION) { 1979 cksum = (ipha->ipha_dst >> 16) + (ipha->ipha_dst & 0xFFFF) + 1980 (ipha->ipha_src >> 16) + (ipha->ipha_src & 0xFFFF); 1981 start = IP_SIMPLE_HDR_LENGTH; 1982 length = ntohs(ipha->ipha_length); 1983 DTRACE_PROBE3(pbr__info__e, uint32_t, ipha->ipha_src, 1984 uint32_t, ipha->ipha_dst, int, cksum); 1985 } else { 1986 uint16_t *pseudo; 1987 1988 pseudo = (uint16_t *)&ip6->ip6_src; 1989 1990 /* calculate pseudo-header checksum */ 1991 cksum = pseudo[0] + pseudo[1] + pseudo[2] + pseudo[3] + 1992 pseudo[4] + pseudo[5] + pseudo[6] + pseudo[7] + 1993 pseudo[8] + pseudo[9] + pseudo[10] + pseudo[11] + 1994 pseudo[12] + pseudo[13] + pseudo[14] + pseudo[15]; 1995 1996 length = ntohs(ip6->ip6_plen) + sizeof (ip6_t); 1997 } 1998 1999 /* Fold the initial sum */ 2000 cksum = (cksum & 0xffff) + (cksum >> 16); 2001 2002 /* 2003 * If the packet was asking for an IPv4 header checksum to be 2004 * calculated but the interface doesn't support that, fill it in 2005 * using our pseudo checksum as a starting point. 2006 */ 2007 if (((mbhckflags & HCK_IPV4_HDRCKSUM) != 0) && 2008 ((illhckflags & HCKSUM_IPHDRCKSUM) == 0)) { 2009 /* 2010 * IP_HDR_CKSUM uses the 2rd arg to the macro in a destructive 2011 * way so pass in a copy of the checksum calculated thus far. 2012 */ 2013 uint32_t ipsum = cksum; 2014 2015 DB_CKSUMFLAGS(mp) &= ~HCK_IPV4_HDRCKSUM; 2016 2017 IP_HDR_CKSUM(ipha, ipsum, ((uint32_t *)ipha)[0], 2018 ((uint16_t *)ipha)[4]); 2019 } 2020 2021 DTRACE_PROBE3(pbr__info__c, int, start, int, length, int, cksum); 2022 2023 if (proto == IPPROTO_TCP) { 2024 cksum += IP_TCP_CSUM_COMP; 2025 } else { 2026 cksum += IP_UDP_CSUM_COMP; 2027 } 2028 cksum += htons(length - start); 2029 cksum = (cksum & 0xffff) + (cksum >> 16); 2030 2031 /* 2032 * For TCP/UDP, we either want to setup the packet for partial 2033 * checksum or we want to do it all ourselves because the NIC 2034 * offers no support for either partial or full checksum. 2035 */ 2036 if ((illhckflags & HCKSUM_INET_PARTIAL) != 0) { 2037 /* 2038 * The only case we care about here is if the mblk was 2039 * previously set for full checksum offload. If it was 2040 * marked for partial (and the NIC does partial), then 2041 * we have nothing to do. Similarly if the packet was 2042 * not set for partial or full, we do nothing as this 2043 * is cheaper than more work to set something up. 2044 */ 2045 if ((mbhckflags & HCK_FULLCKSUM) != 0) { 2046 uint32_t offset; 2047 2048 if (proto == IPPROTO_TCP) { 2049 offset = TCP_CHECKSUM_OFFSET; 2050 } else { 2051 offset = UDP_CHECKSUM_OFFSET; 2052 } 2053 *up = cksum; 2054 2055 DTRACE_PROBE3(pbr__info__f, int, length - start, int, 2056 cksum, int, offset); 2057 2058 (void) hcksum_assoc(mp, NULL, NULL, start, 2059 start + offset, length, 0, 2060 DB_CKSUMFLAGS(mp) | HCK_PARTIALCKSUM, 0); 2061 } 2062 2063 } else if (mbhckflags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)) { 2064 DB_CKSUMFLAGS(mp) &= ~(HCK_PARTIALCKSUM|HCK_FULLCKSUM); 2065 2066 *up = 0; 2067 *up = IP_CSUM(mp, start, cksum); 2068 } 2069 2070 DTRACE_PROBE4(pbr__info__d, (mblk_t *), mp, (ipha_t *), ipha, 2071 (uint16_t *), up, int, cksum); 2072 return (0); 2073 } 2074 2075 /* 2076 * callback function provided by ire_ftable_lookup when calling 2077 * rn_match_args(). Invoke ire_match_args on each matching leaf node in 2078 * the radix tree. 2079 */ 2080 boolean_t 2081 ire_find_best_route(struct radix_node *rn, void *arg) 2082 { 2083 struct rt_entry *rt = (struct rt_entry *)rn; 2084 irb_t *irb_ptr; 2085 ire_t *ire; 2086 ire_ftable_args_t *margs = arg; 2087 ipaddr_t match_mask; 2088 2089 ASSERT(rt != NULL); 2090 2091 irb_ptr = &rt->rt_irb; 2092 2093 if (irb_ptr->irb_ire_cnt == 0) 2094 return (B_FALSE); 2095 2096 rw_enter(&irb_ptr->irb_lock, RW_READER); 2097 for (ire = irb_ptr->irb_ire; ire != NULL; ire = ire->ire_next) { 2098 if (ire->ire_marks & IRE_MARK_CONDEMNED) 2099 continue; 2100 if (margs->ift_flags & MATCH_IRE_MASK) 2101 match_mask = margs->ift_mask; 2102 else 2103 match_mask = ire->ire_mask; 2104 2105 if (ire_match_args(ire, margs->ift_addr, match_mask, 2106 margs->ift_gateway, margs->ift_type, margs->ift_ipif, 2107 margs->ift_zoneid, margs->ift_ihandle, margs->ift_tsl, 2108 margs->ift_flags, NULL)) { 2109 IRE_REFHOLD(ire); 2110 rw_exit(&irb_ptr->irb_lock); 2111 margs->ift_best_ire = ire; 2112 return (B_TRUE); 2113 } 2114 } 2115 rw_exit(&irb_ptr->irb_lock); 2116 return (B_FALSE); 2117 } 2118 2119 /* 2120 * ftable irb_t structures are dynamically allocated, and we need to 2121 * check if the irb_t (and associated ftable tree attachment) needs to 2122 * be cleaned up when the irb_refcnt goes to 0. The conditions that need 2123 * be verified are: 2124 * - no other walkers of the irebucket, i.e., quiescent irb_refcnt, 2125 * - no other threads holding references to ire's in the bucket, 2126 * i.e., irb_nire == 0 2127 * - no active ire's in the bucket, i.e., irb_ire_cnt == 0 2128 * - need to hold the global tree lock and irb_lock in write mode. 2129 */ 2130 void 2131 irb_refrele_ftable(irb_t *irb) 2132 { 2133 for (;;) { 2134 rw_enter(&irb->irb_lock, RW_WRITER); 2135 ASSERT(irb->irb_refcnt != 0); 2136 if (irb->irb_refcnt != 1) { 2137 /* 2138 * Someone has a reference to this radix node 2139 * or there is some bucket walker. 2140 */ 2141 irb->irb_refcnt--; 2142 rw_exit(&irb->irb_lock); 2143 return; 2144 } else { 2145 /* 2146 * There is no other walker, nor is there any 2147 * other thread that holds a direct ref to this 2148 * radix node. Do the clean up if needed. Call 2149 * to ire_unlink will clear the IRB_MARK_CONDEMNED flag 2150 */ 2151 if (irb->irb_marks & IRB_MARK_CONDEMNED) { 2152 ire_t *ire_list; 2153 2154 ire_list = ire_unlink(irb); 2155 rw_exit(&irb->irb_lock); 2156 2157 if (ire_list != NULL) 2158 ire_cleanup(ire_list); 2159 /* 2160 * more CONDEMNED entries could have 2161 * been added while we dropped the lock, 2162 * so we have to re-check. 2163 */ 2164 continue; 2165 } 2166 2167 /* 2168 * Now check if there are still any ires 2169 * associated with this radix node. 2170 */ 2171 if (irb->irb_nire != 0) { 2172 /* 2173 * someone is still holding on 2174 * to ires in this bucket 2175 */ 2176 irb->irb_refcnt--; 2177 rw_exit(&irb->irb_lock); 2178 return; 2179 } else { 2180 /* 2181 * Everything is clear. Zero walkers, 2182 * Zero threads with a ref to this 2183 * radix node, Zero ires associated with 2184 * this radix node. Due to lock order, 2185 * check the above conditions again 2186 * after grabbing all locks in the right order 2187 */ 2188 rw_exit(&irb->irb_lock); 2189 if (irb_inactive(irb)) 2190 return; 2191 /* 2192 * irb_inactive could not free the irb. 2193 * See if there are any walkers, if not 2194 * try to clean up again. 2195 */ 2196 } 2197 } 2198 } 2199 } 2200 2201 /* 2202 * IRE iterator used by ire_ftable_lookup() to process multiple default 2203 * routes. Given a starting point in the hash list (ire_origin), walk the IREs 2204 * in the bucket skipping default interface routes and deleted entries. 2205 * Returns the next IRE (unheld), or NULL when we're back to the starting point. 2206 * Assumes that the caller holds a reference on the IRE bucket. 2207 * 2208 * In the absence of good IRE_DEFAULT routes, this function will return 2209 * the first IRE_INTERFACE route found (if any). 2210 */ 2211 ire_t * 2212 ire_round_robin(irb_t *irb_ptr, zoneid_t zoneid, ire_ftable_args_t *margs, 2213 ip_stack_t *ipst) 2214 { 2215 ire_t *ire_origin; 2216 ire_t *ire, *maybe_ire = NULL; 2217 2218 rw_enter(&irb_ptr->irb_lock, RW_WRITER); 2219 ire_origin = irb_ptr->irb_rr_origin; 2220 if (ire_origin != NULL) { 2221 ire_origin = ire_origin->ire_next; 2222 IRE_FIND_NEXT_ORIGIN(ire_origin); 2223 } 2224 2225 if (ire_origin == NULL) { 2226 /* 2227 * first time through routine, or we dropped off the end 2228 * of list. 2229 */ 2230 ire_origin = irb_ptr->irb_ire; 2231 IRE_FIND_NEXT_ORIGIN(ire_origin); 2232 } 2233 irb_ptr->irb_rr_origin = ire_origin; 2234 IRB_REFHOLD_LOCKED(irb_ptr); 2235 rw_exit(&irb_ptr->irb_lock); 2236 2237 DTRACE_PROBE2(ire__rr__origin, (irb_t *), irb_ptr, 2238 (ire_t *), ire_origin); 2239 2240 /* 2241 * Round-robin the routers list looking for a route that 2242 * matches the passed in parameters. 2243 * We start with the ire we found above and we walk the hash 2244 * list until we're back where we started. It doesn't matter if 2245 * routes are added or deleted by other threads - we know this 2246 * ire will stay in the list because we hold a reference on the 2247 * ire bucket. 2248 */ 2249 ire = ire_origin; 2250 while (ire != NULL) { 2251 int match_flags = MATCH_IRE_TYPE | MATCH_IRE_SECATTR; 2252 ire_t *rire; 2253 2254 if (ire->ire_marks & IRE_MARK_CONDEMNED) 2255 goto next_ire; 2256 2257 if (!ire_match_args(ire, margs->ift_addr, (ipaddr_t)0, 2258 margs->ift_gateway, margs->ift_type, margs->ift_ipif, 2259 margs->ift_zoneid, margs->ift_ihandle, margs->ift_tsl, 2260 margs->ift_flags, NULL)) 2261 goto next_ire; 2262 2263 if (ire->ire_type & IRE_INTERFACE) { 2264 /* 2265 * keep looking to see if there is a non-interface 2266 * default ire, but save this one as a last resort. 2267 */ 2268 if (maybe_ire == NULL) 2269 maybe_ire = ire; 2270 goto next_ire; 2271 } 2272 2273 if (zoneid == ALL_ZONES) { 2274 IRE_REFHOLD(ire); 2275 IRB_REFRELE(irb_ptr); 2276 return (ire); 2277 } 2278 /* 2279 * When we're in a non-global zone, we're only 2280 * interested in routers that are 2281 * reachable through ipifs within our zone. 2282 */ 2283 if (ire->ire_ipif != NULL) 2284 match_flags |= MATCH_IRE_ILL; 2285 2286 rire = ire_route_lookup(ire->ire_gateway_addr, 0, 0, 2287 IRE_INTERFACE, ire->ire_ipif, NULL, zoneid, margs->ift_tsl, 2288 match_flags, ipst); 2289 if (rire != NULL) { 2290 ire_refrele(rire); 2291 IRE_REFHOLD(ire); 2292 IRB_REFRELE(irb_ptr); 2293 return (ire); 2294 } 2295 next_ire: 2296 ire = (ire->ire_next ? ire->ire_next : irb_ptr->irb_ire); 2297 if (ire == ire_origin) 2298 break; 2299 } 2300 if (maybe_ire != NULL) 2301 IRE_REFHOLD(maybe_ire); 2302 IRB_REFRELE(irb_ptr); 2303 return (maybe_ire); 2304 } 2305 2306 void 2307 irb_refhold_rn(struct radix_node *rn) 2308 { 2309 if ((rn->rn_flags & RNF_ROOT) == 0) 2310 IRB_REFHOLD(&((rt_t *)(rn))->rt_irb); 2311 } 2312 2313 void 2314 irb_refrele_rn(struct radix_node *rn) 2315 { 2316 if ((rn->rn_flags & RNF_ROOT) == 0) 2317 irb_refrele_ftable(&((rt_t *)(rn))->rt_irb); 2318 } 2319