1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file contains consumer routines of the IPv4 forwarding engine 28 */ 29 30 #include <sys/types.h> 31 #include <sys/stream.h> 32 #include <sys/stropts.h> 33 #include <sys/strlog.h> 34 #include <sys/dlpi.h> 35 #include <sys/ddi.h> 36 #include <sys/cmn_err.h> 37 #include <sys/policy.h> 38 39 #include <sys/systm.h> 40 #include <sys/strsun.h> 41 #include <sys/kmem.h> 42 #include <sys/param.h> 43 #include <sys/socket.h> 44 #include <sys/strsubr.h> 45 #include <sys/pattr.h> 46 #include <net/if.h> 47 #include <net/route.h> 48 #include <netinet/in.h> 49 #include <net/if_dl.h> 50 #include <netinet/ip6.h> 51 #include <netinet/icmp6.h> 52 53 #include <inet/common.h> 54 #include <inet/mi.h> 55 #include <inet/mib2.h> 56 #include <inet/ip.h> 57 #include <inet/ip_impl.h> 58 #include <inet/ip6.h> 59 #include <inet/ip_ndp.h> 60 #include <inet/arp.h> 61 #include <inet/ip_if.h> 62 #include <inet/ip_ire.h> 63 #include <inet/ip_ftable.h> 64 #include <inet/ip_rts.h> 65 #include <inet/nd.h> 66 67 #include <net/pfkeyv2.h> 68 #include <inet/ipsec_info.h> 69 #include <inet/sadb.h> 70 #include <sys/kmem.h> 71 #include <inet/tcp.h> 72 #include <inet/ipclassifier.h> 73 #include <sys/zone.h> 74 #include <net/radix.h> 75 #include <sys/tsol/label.h> 76 #include <sys/tsol/tnet.h> 77 78 #define IS_DEFAULT_ROUTE(ire) \ 79 (((ire)->ire_type & IRE_DEFAULT) || \ 80 (((ire)->ire_type & IRE_INTERFACE) && ((ire)->ire_addr == 0))) 81 82 /* 83 * structure for passing args between ire_ftable_lookup and ire_find_best_route 84 */ 85 typedef struct ire_ftable_args_s { 86 ipaddr_t ift_addr; 87 ipaddr_t ift_mask; 88 ipaddr_t ift_gateway; 89 int ift_type; 90 const ipif_t *ift_ipif; 91 zoneid_t ift_zoneid; 92 uint32_t ift_ihandle; 93 const ts_label_t *ift_tsl; 94 int ift_flags; 95 ire_t *ift_best_ire; 96 } ire_ftable_args_t; 97 98 static ire_t *route_to_dst(const struct sockaddr *, zoneid_t, ip_stack_t *); 99 static ire_t *ire_round_robin(irb_t *, zoneid_t, ire_ftable_args_t *, 100 ip_stack_t *); 101 static void ire_del_host_redir(ire_t *, char *); 102 static boolean_t ire_find_best_route(struct radix_node *, void *); 103 static int ip_send_align_hcksum_flags(mblk_t *, ill_t *); 104 static ire_t *ire_ftable_lookup_simple(ipaddr_t, 105 ire_t **, zoneid_t, int, ip_stack_t *); 106 107 /* 108 * Lookup a route in forwarding table. A specific lookup is indicated by 109 * passing the required parameters and indicating the match required in the 110 * flag field. 111 * 112 * Looking for default route can be done in three ways 113 * 1) pass mask as 0 and set MATCH_IRE_MASK in flags field 114 * along with other matches. 115 * 2) pass type as IRE_DEFAULT and set MATCH_IRE_TYPE in flags 116 * field along with other matches. 117 * 3) if the destination and mask are passed as zeros. 118 * 119 * A request to return a default route if no route 120 * is found, can be specified by setting MATCH_IRE_DEFAULT 121 * in flags. 122 * 123 * It does not support recursion more than one level. It 124 * will do recursive lookup only when the lookup maps to 125 * a prefix or default route and MATCH_IRE_RECURSIVE flag is passed. 126 * 127 * If the routing table is setup to allow more than one level 128 * of recursion, the cleaning up cache table will not work resulting 129 * in invalid routing. 130 * 131 * Supports IP_BOUND_IF by following the ipif/ill when recursing. 132 * 133 * NOTE : When this function returns NULL, pire has already been released. 134 * pire is valid only when this function successfully returns an 135 * ire. 136 */ 137 ire_t * 138 ire_ftable_lookup(ipaddr_t addr, ipaddr_t mask, ipaddr_t gateway, 139 int type, const ipif_t *ipif, ire_t **pire, zoneid_t zoneid, 140 uint32_t ihandle, const ts_label_t *tsl, int flags, ip_stack_t *ipst) 141 { 142 ire_t *ire = NULL; 143 ipaddr_t gw_addr; 144 struct rt_sockaddr rdst, rmask; 145 struct rt_entry *rt; 146 ire_ftable_args_t margs; 147 boolean_t found_incomplete = B_FALSE; 148 149 ASSERT(ipif == NULL || !ipif->ipif_isv6); 150 151 /* 152 * When we return NULL from this function, we should make 153 * sure that *pire is NULL so that the callers will not 154 * wrongly REFRELE the pire. 155 */ 156 if (pire != NULL) 157 *pire = NULL; 158 /* 159 * ire_match_args() will dereference ipif MATCH_IRE_SRC or 160 * MATCH_IRE_ILL is set. 161 */ 162 if ((flags & (MATCH_IRE_SRC | MATCH_IRE_ILL | MATCH_IRE_ILL_GROUP)) && 163 (ipif == NULL)) 164 return (NULL); 165 166 (void) memset(&rdst, 0, sizeof (rdst)); 167 rdst.rt_sin_len = sizeof (rdst); 168 rdst.rt_sin_family = AF_INET; 169 rdst.rt_sin_addr.s_addr = addr; 170 171 (void) memset(&rmask, 0, sizeof (rmask)); 172 rmask.rt_sin_len = sizeof (rmask); 173 rmask.rt_sin_family = AF_INET; 174 rmask.rt_sin_addr.s_addr = mask; 175 176 (void) memset(&margs, 0, sizeof (margs)); 177 margs.ift_addr = addr; 178 margs.ift_mask = mask; 179 margs.ift_gateway = gateway; 180 margs.ift_type = type; 181 margs.ift_ipif = ipif; 182 margs.ift_zoneid = zoneid; 183 margs.ift_ihandle = ihandle; 184 margs.ift_tsl = tsl; 185 margs.ift_flags = flags; 186 187 /* 188 * The flags argument passed to ire_ftable_lookup may cause the 189 * search to return, not the longest matching prefix, but the 190 * "best matching prefix", i.e., the longest prefix that also 191 * satisfies constraints imposed via the permutation of flags 192 * passed in. To achieve this, we invoke ire_match_args() on 193 * each matching leaf in the radix tree. ire_match_args is 194 * invoked by the callback function ire_find_best_route() 195 * We hold the global tree lock in read mode when calling 196 * rn_match_args.Before dropping the global tree lock, ensure 197 * that the radix node can't be deleted by incrementing ire_refcnt. 198 */ 199 RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable); 200 rt = (struct rt_entry *)ipst->ips_ip_ftable->rnh_matchaddr_args(&rdst, 201 ipst->ips_ip_ftable, ire_find_best_route, &margs); 202 ire = margs.ift_best_ire; 203 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 204 205 if (rt == NULL) { 206 return (NULL); 207 } else { 208 ASSERT(ire != NULL); 209 } 210 211 DTRACE_PROBE2(ire__found, ire_ftable_args_t *, &margs, ire_t *, ire); 212 213 if (!IS_DEFAULT_ROUTE(ire)) 214 goto found_ire_held; 215 /* 216 * If default route is found, see if default matching criteria 217 * are satisfied. 218 */ 219 if (flags & MATCH_IRE_MASK) { 220 /* 221 * we were asked to match a 0 mask, and came back with 222 * a default route. Ok to return it. 223 */ 224 goto found_default_ire; 225 } 226 if ((flags & MATCH_IRE_TYPE) && 227 (type & (IRE_DEFAULT | IRE_INTERFACE))) { 228 /* 229 * we were asked to match a default ire type. Ok to return it. 230 */ 231 goto found_default_ire; 232 } 233 if (flags & MATCH_IRE_DEFAULT) { 234 goto found_default_ire; 235 } 236 /* 237 * we found a default route, but default matching criteria 238 * are not specified and we are not explicitly looking for 239 * default. 240 */ 241 IRE_REFRELE(ire); 242 return (NULL); 243 found_default_ire: 244 /* 245 * round-robin only if we have more than one route in the bucket. 246 */ 247 if ((ire->ire_bucket->irb_ire_cnt > 1) && 248 IS_DEFAULT_ROUTE(ire) && 249 ((flags & (MATCH_IRE_DEFAULT | MATCH_IRE_MASK)) == 250 MATCH_IRE_DEFAULT)) { 251 ire_t *next_ire; 252 253 next_ire = ire_round_robin(ire->ire_bucket, zoneid, &margs, 254 ipst); 255 IRE_REFRELE(ire); 256 if (next_ire != NULL) { 257 ire = next_ire; 258 } else { 259 /* no route */ 260 return (NULL); 261 } 262 } 263 found_ire_held: 264 if ((flags & MATCH_IRE_RJ_BHOLE) && 265 (ire->ire_flags & (RTF_BLACKHOLE | RTF_REJECT))) { 266 return (ire); 267 } 268 /* 269 * At this point, IRE that was found must be an IRE_FORWARDTABLE 270 * type. If this is a recursive lookup and an IRE_INTERFACE type was 271 * found, return that. If it was some other IRE_FORWARDTABLE type of 272 * IRE (one of the prefix types), then it is necessary to fill in the 273 * parent IRE pointed to by pire, and then lookup the gateway address of 274 * the parent. For backwards compatiblity, if this lookup returns an 275 * IRE other than a IRE_CACHETABLE or IRE_INTERFACE, then one more level 276 * of lookup is done. 277 */ 278 if (flags & MATCH_IRE_RECURSIVE) { 279 ipif_t *gw_ipif; 280 int match_flags = MATCH_IRE_DSTONLY; 281 ire_t *save_ire; 282 283 if (ire->ire_type & IRE_INTERFACE) 284 return (ire); 285 if (pire != NULL) 286 *pire = ire; 287 /* 288 * If we can't find an IRE_INTERFACE or the caller has not 289 * asked for pire, we need to REFRELE the save_ire. 290 */ 291 save_ire = ire; 292 293 /* 294 * Currently MATCH_IRE_ILL is never used with 295 * (MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT) while 296 * sending out packets as MATCH_IRE_ILL is used only 297 * for communicating with on-link hosts. We can't assert 298 * that here as RTM_GET calls this function with 299 * MATCH_IRE_ILL | MATCH_IRE_DEFAULT | MATCH_IRE_RECURSIVE. 300 * We have already used the MATCH_IRE_ILL in determining 301 * the right prefix route at this point. To match the 302 * behavior of how we locate routes while sending out 303 * packets, we don't want to use MATCH_IRE_ILL below 304 * while locating the interface route. 305 * 306 * ire_ftable_lookup may end up with an incomplete IRE_CACHE 307 * entry for the gateway (i.e., one for which the 308 * ire_nce->nce_state is not yet ND_REACHABLE). If the caller 309 * has specified MATCH_IRE_COMPLETE, such entries will not 310 * be returned; instead, we return the IF_RESOLVER ire. 311 */ 312 if (ire->ire_ipif != NULL) 313 match_flags |= MATCH_IRE_ILL_GROUP; 314 315 ire = ire_route_lookup(ire->ire_gateway_addr, 0, 0, 0, 316 ire->ire_ipif, NULL, zoneid, tsl, match_flags, ipst); 317 DTRACE_PROBE2(ftable__route__lookup1, (ire_t *), ire, 318 (ire_t *), save_ire); 319 if (ire == NULL || 320 ((ire->ire_type & IRE_CACHE) && ire->ire_nce && 321 ire->ire_nce->nce_state != ND_REACHABLE && 322 (flags & MATCH_IRE_COMPLETE))) { 323 /* 324 * Do not release the parent ire if MATCH_IRE_PARENT 325 * is set. Also return it via ire. 326 */ 327 if (ire != NULL) { 328 ire_refrele(ire); 329 ire = NULL; 330 found_incomplete = B_TRUE; 331 } 332 if (flags & MATCH_IRE_PARENT) { 333 if (pire != NULL) { 334 /* 335 * Need an extra REFHOLD, if the parent 336 * ire is returned via both ire and 337 * pire. 338 */ 339 IRE_REFHOLD(save_ire); 340 } 341 ire = save_ire; 342 } else { 343 ire_refrele(save_ire); 344 if (pire != NULL) 345 *pire = NULL; 346 } 347 if (!found_incomplete) 348 return (ire); 349 } 350 if (ire->ire_type & (IRE_CACHETABLE | IRE_INTERFACE)) { 351 /* 352 * If the caller did not ask for pire, release 353 * it now. 354 */ 355 if (pire == NULL) { 356 ire_refrele(save_ire); 357 } 358 return (ire); 359 } 360 match_flags |= MATCH_IRE_TYPE; 361 gw_addr = ire->ire_gateway_addr; 362 gw_ipif = ire->ire_ipif; 363 ire_refrele(ire); 364 ire = ire_route_lookup(gw_addr, 0, 0, 365 (found_incomplete? IRE_INTERFACE : 366 (IRE_CACHETABLE | IRE_INTERFACE)), 367 gw_ipif, NULL, zoneid, tsl, match_flags, ipst); 368 DTRACE_PROBE2(ftable__route__lookup2, (ire_t *), ire, 369 (ire_t *), save_ire); 370 if (ire == NULL || 371 ((ire->ire_type & IRE_CACHE) && ire->ire_nce && 372 ire->ire_nce->nce_state != ND_REACHABLE && 373 (flags & MATCH_IRE_COMPLETE))) { 374 /* 375 * Do not release the parent ire if MATCH_IRE_PARENT 376 * is set. Also return it via ire. 377 */ 378 if (ire != NULL) { 379 ire_refrele(ire); 380 ire = NULL; 381 } 382 if (flags & MATCH_IRE_PARENT) { 383 if (pire != NULL) { 384 /* 385 * Need an extra REFHOLD, if the 386 * parent ire is returned via both 387 * ire and pire. 388 */ 389 IRE_REFHOLD(save_ire); 390 } 391 ire = save_ire; 392 } else { 393 ire_refrele(save_ire); 394 if (pire != NULL) 395 *pire = NULL; 396 } 397 return (ire); 398 } else if (pire == NULL) { 399 /* 400 * If the caller did not ask for pire, release 401 * it now. 402 */ 403 ire_refrele(save_ire); 404 } 405 return (ire); 406 } 407 ASSERT(pire == NULL || *pire == NULL); 408 return (ire); 409 } 410 411 /* 412 * This function is called by 413 * ip_fast_forward->ire_forward_simple 414 * The optimizations of this function over ire_ftable_lookup are: 415 * o removing unnecessary flag matching 416 * o doing longest prefix match instead of overloading it further 417 * with the unnecessary "best_prefix_match" 418 * o Does not do round robin of default route for every packet 419 * o inlines code of ire_ctable_lookup to look for nexthop cache 420 * entry before calling ire_route_lookup 421 */ 422 static ire_t * 423 ire_ftable_lookup_simple(ipaddr_t addr, 424 ire_t **pire, zoneid_t zoneid, int flags, 425 ip_stack_t *ipst) 426 { 427 ire_t *ire = NULL; 428 ire_t *tmp_ire = NULL; 429 struct rt_sockaddr rdst; 430 struct rt_entry *rt; 431 irb_t *irb_ptr; 432 ire_t *save_ire; 433 int match_flags; 434 435 rdst.rt_sin_len = sizeof (rdst); 436 rdst.rt_sin_family = AF_INET; 437 rdst.rt_sin_addr.s_addr = addr; 438 439 /* 440 * This is basically inlining a simpler version of ire_match_args 441 */ 442 RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable); 443 444 rt = (struct rt_entry *)ipst->ips_ip_ftable->rnh_matchaddr_args(&rdst, 445 ipst->ips_ip_ftable, NULL, NULL); 446 447 if (rt == NULL) { 448 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 449 return (NULL); 450 } 451 irb_ptr = &rt->rt_irb; 452 if (irb_ptr == NULL || irb_ptr->irb_ire_cnt == 0) { 453 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 454 return (NULL); 455 } 456 457 rw_enter(&irb_ptr->irb_lock, RW_READER); 458 for (ire = irb_ptr->irb_ire; ire != NULL; ire = ire->ire_next) { 459 if (ire->ire_zoneid == zoneid) 460 break; 461 } 462 463 if (ire == NULL || (ire->ire_marks & IRE_MARK_CONDEMNED)) { 464 rw_exit(&irb_ptr->irb_lock); 465 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 466 return (NULL); 467 } 468 /* we have a ire that matches */ 469 if (ire != NULL) 470 IRE_REFHOLD(ire); 471 rw_exit(&irb_ptr->irb_lock); 472 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 473 474 if ((flags & MATCH_IRE_RJ_BHOLE) && 475 (ire->ire_flags & (RTF_BLACKHOLE | RTF_REJECT))) { 476 return (ire); 477 } 478 /* 479 * At this point, IRE that was found must be an IRE_FORWARDTABLE 480 * type. If this is a recursive lookup and an IRE_INTERFACE type was 481 * found, return that. If it was some other IRE_FORWARDTABLE type of 482 * IRE (one of the prefix types), then it is necessary to fill in the 483 * parent IRE pointed to by pire, and then lookup the gateway address of 484 * the parent. For backwards compatiblity, if this lookup returns an 485 * IRE other than a IRE_CACHETABLE or IRE_INTERFACE, then one more level 486 * of lookup is done. 487 */ 488 match_flags = MATCH_IRE_DSTONLY; 489 490 if (ire->ire_type & IRE_INTERFACE) 491 return (ire); 492 *pire = ire; 493 /* 494 * If we can't find an IRE_INTERFACE or the caller has not 495 * asked for pire, we need to REFRELE the save_ire. 496 */ 497 save_ire = ire; 498 499 /* 500 * Currently MATCH_IRE_ILL is never used with 501 * (MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT) while 502 * sending out packets as MATCH_IRE_ILL is used only 503 * for communicating with on-link hosts. We can't assert 504 * that here as RTM_GET calls this function with 505 * MATCH_IRE_ILL | MATCH_IRE_DEFAULT | MATCH_IRE_RECURSIVE. 506 * We have already used the MATCH_IRE_ILL in determining 507 * the right prefix route at this point. To match the 508 * behavior of how we locate routes while sending out 509 * packets, we don't want to use MATCH_IRE_ILL below 510 * while locating the interface route. 511 * 512 * ire_ftable_lookup may end up with an incomplete IRE_CACHE 513 * entry for the gateway (i.e., one for which the 514 * ire_nce->nce_state is not yet ND_REACHABLE). If the caller 515 * has specified MATCH_IRE_COMPLETE, such entries will not 516 * be returned; instead, we return the IF_RESOLVER ire. 517 */ 518 519 if (ire->ire_ipif == NULL) { 520 tmp_ire = ire; 521 /* 522 * Look to see if the nexthop entry is in the 523 * cachetable (I am inlining a simpler ire_cache_lookup 524 * here). 525 */ 526 ire = ire_cache_lookup_simple(ire->ire_gateway_addr, ipst); 527 if (ire == NULL) { 528 /* Try ire_route_lookup */ 529 ire = tmp_ire; 530 } else { 531 goto solved; 532 } 533 } 534 if (ire->ire_ipif != NULL) 535 match_flags |= MATCH_IRE_ILL_GROUP; 536 537 ire = ire_route_lookup(ire->ire_gateway_addr, 0, 538 0, 0, ire->ire_ipif, NULL, zoneid, NULL, match_flags, ipst); 539 solved: 540 DTRACE_PROBE2(ftable__route__lookup1, (ire_t *), ire, 541 (ire_t *), save_ire); 542 if (ire == NULL) { 543 /* 544 * Do not release the parent ire if MATCH_IRE_PARENT 545 * is set. Also return it via ire. 546 */ 547 ire_refrele(save_ire); 548 *pire = NULL; 549 return (ire); 550 } 551 if (ire->ire_type & (IRE_CACHETABLE | IRE_INTERFACE)) { 552 /* 553 * If the caller did not ask for pire, release 554 * it now. 555 */ 556 if (pire == NULL) { 557 ire_refrele(save_ire); 558 } 559 } 560 return (ire); 561 } 562 563 /* 564 * Find an IRE_OFFSUBNET IRE entry for the multicast address 'group' 565 * that goes through 'ipif'. As a fallback, a route that goes through 566 * ipif->ipif_ill can be returned. 567 */ 568 ire_t * 569 ipif_lookup_multi_ire(ipif_t *ipif, ipaddr_t group) 570 { 571 ire_t *ire; 572 ire_t *save_ire = NULL; 573 ire_t *gw_ire; 574 irb_t *irb; 575 ipaddr_t gw_addr; 576 int match_flags = MATCH_IRE_TYPE | MATCH_IRE_ILL; 577 ip_stack_t *ipst = ipif->ipif_ill->ill_ipst; 578 579 ASSERT(CLASSD(group)); 580 581 ire = ire_ftable_lookup(group, 0, 0, 0, NULL, NULL, ALL_ZONES, 0, 582 NULL, MATCH_IRE_DEFAULT, ipst); 583 584 if (ire == NULL) 585 return (NULL); 586 587 irb = ire->ire_bucket; 588 ASSERT(irb); 589 590 IRB_REFHOLD(irb); 591 ire_refrele(ire); 592 for (ire = irb->irb_ire; ire != NULL; ire = ire->ire_next) { 593 if (ire->ire_addr != group || 594 ipif->ipif_zoneid != ire->ire_zoneid && 595 ire->ire_zoneid != ALL_ZONES) { 596 continue; 597 } 598 599 switch (ire->ire_type) { 600 case IRE_DEFAULT: 601 case IRE_PREFIX: 602 case IRE_HOST: 603 gw_addr = ire->ire_gateway_addr; 604 gw_ire = ire_ftable_lookup(gw_addr, 0, 0, IRE_INTERFACE, 605 ipif, NULL, ALL_ZONES, 0, NULL, match_flags, ipst); 606 607 if (gw_ire != NULL) { 608 if (save_ire != NULL) { 609 ire_refrele(save_ire); 610 } 611 IRE_REFHOLD(ire); 612 if (gw_ire->ire_ipif == ipif) { 613 ire_refrele(gw_ire); 614 615 IRB_REFRELE(irb); 616 return (ire); 617 } 618 ire_refrele(gw_ire); 619 save_ire = ire; 620 } 621 break; 622 case IRE_IF_NORESOLVER: 623 case IRE_IF_RESOLVER: 624 if (ire->ire_ipif == ipif) { 625 if (save_ire != NULL) { 626 ire_refrele(save_ire); 627 } 628 IRE_REFHOLD(ire); 629 630 IRB_REFRELE(irb); 631 return (ire); 632 } 633 break; 634 } 635 } 636 IRB_REFRELE(irb); 637 638 return (save_ire); 639 } 640 641 /* 642 * Find an IRE_INTERFACE for the multicast group. 643 * Allows different routes for multicast addresses 644 * in the unicast routing table (akin to 224.0.0.0 but could be more specific) 645 * which point at different interfaces. This is used when IP_MULTICAST_IF 646 * isn't specified (when sending) and when IP_ADD_MEMBERSHIP doesn't 647 * specify the interface to join on. 648 * 649 * Supports IP_BOUND_IF by following the ipif/ill when recursing. 650 */ 651 ire_t * 652 ire_lookup_multi(ipaddr_t group, zoneid_t zoneid, ip_stack_t *ipst) 653 { 654 ire_t *ire; 655 ipif_t *ipif = NULL; 656 int match_flags = MATCH_IRE_TYPE; 657 ipaddr_t gw_addr; 658 659 ire = ire_ftable_lookup(group, 0, 0, 0, NULL, NULL, zoneid, 660 0, NULL, MATCH_IRE_DEFAULT, ipst); 661 662 /* We search a resolvable ire in case of multirouting. */ 663 if ((ire != NULL) && (ire->ire_flags & RTF_MULTIRT)) { 664 ire_t *cire = NULL; 665 /* 666 * If the route is not resolvable, the looked up ire 667 * may be changed here. In that case, ire_multirt_lookup() 668 * IRE_REFRELE the original ire and change it. 669 */ 670 (void) ire_multirt_lookup(&cire, &ire, MULTIRT_CACHEGW, 671 NULL, ipst); 672 if (cire != NULL) 673 ire_refrele(cire); 674 } 675 if (ire == NULL) 676 return (NULL); 677 /* 678 * Make sure we follow ire_ipif. 679 * 680 * We need to determine the interface route through 681 * which the gateway will be reached. We don't really 682 * care which interface is picked if the interface is 683 * part of a group. 684 */ 685 if (ire->ire_ipif != NULL) { 686 ipif = ire->ire_ipif; 687 match_flags |= MATCH_IRE_ILL_GROUP; 688 } 689 690 switch (ire->ire_type) { 691 case IRE_DEFAULT: 692 case IRE_PREFIX: 693 case IRE_HOST: 694 gw_addr = ire->ire_gateway_addr; 695 ire_refrele(ire); 696 ire = ire_ftable_lookup(gw_addr, 0, 0, 697 IRE_INTERFACE, ipif, NULL, zoneid, 0, 698 NULL, match_flags, ipst); 699 return (ire); 700 case IRE_IF_NORESOLVER: 701 case IRE_IF_RESOLVER: 702 return (ire); 703 default: 704 ire_refrele(ire); 705 return (NULL); 706 } 707 } 708 709 /* 710 * Delete the passed in ire if the gateway addr matches 711 */ 712 void 713 ire_del_host_redir(ire_t *ire, char *gateway) 714 { 715 if ((ire->ire_flags & RTF_DYNAMIC) && 716 (ire->ire_gateway_addr == *(ipaddr_t *)gateway)) 717 ire_delete(ire); 718 } 719 720 /* 721 * Search for all HOST REDIRECT routes that are 722 * pointing at the specified gateway and 723 * delete them. This routine is called only 724 * when a default gateway is going away. 725 */ 726 void 727 ire_delete_host_redirects(ipaddr_t gateway, ip_stack_t *ipst) 728 { 729 struct rtfuncarg rtfarg; 730 731 (void) memset(&rtfarg, 0, sizeof (rtfarg)); 732 rtfarg.rt_func = ire_del_host_redir; 733 rtfarg.rt_arg = (void *)&gateway; 734 (void) ipst->ips_ip_ftable->rnh_walktree_mt(ipst->ips_ip_ftable, 735 rtfunc, &rtfarg, irb_refhold_rn, irb_refrele_rn); 736 } 737 738 struct ihandle_arg { 739 uint32_t ihandle; 740 ire_t *ire; 741 }; 742 743 static int 744 ire_ihandle_onlink_match(struct radix_node *rn, void *arg) 745 { 746 struct rt_entry *rt; 747 irb_t *irb; 748 ire_t *ire; 749 struct ihandle_arg *ih = arg; 750 751 rt = (struct rt_entry *)rn; 752 ASSERT(rt != NULL); 753 irb = &rt->rt_irb; 754 for (ire = irb->irb_ire; ire != NULL; ire = ire->ire_next) { 755 if ((ire->ire_type & IRE_INTERFACE) && 756 (ire->ire_ihandle == ih->ihandle)) { 757 ih->ire = ire; 758 IRE_REFHOLD(ire); 759 return (1); 760 } 761 } 762 return (0); 763 } 764 765 /* 766 * Locate the interface ire that is tied to the cache ire 'cire' via 767 * cire->ire_ihandle. 768 * 769 * We are trying to create the cache ire for an onlink destn. or 770 * gateway in 'cire'. We are called from ire_add_v4() in the IRE_IF_RESOLVER 771 * case, after the ire has come back from ARP. 772 */ 773 ire_t * 774 ire_ihandle_lookup_onlink(ire_t *cire) 775 { 776 ire_t *ire; 777 int match_flags; 778 struct ihandle_arg ih; 779 ip_stack_t *ipst; 780 781 ASSERT(cire != NULL); 782 ipst = cire->ire_ipst; 783 784 /* 785 * We don't need to specify the zoneid to ire_ftable_lookup() below 786 * because the ihandle refers to an ipif which can be in only one zone. 787 */ 788 match_flags = MATCH_IRE_TYPE | MATCH_IRE_IHANDLE | MATCH_IRE_MASK; 789 /* 790 * We know that the mask of the interface ire equals cire->ire_cmask. 791 * (When ip_newroute() created 'cire' for an on-link destn. it set its 792 * cmask from the interface ire's mask) 793 */ 794 ire = ire_ftable_lookup(cire->ire_addr, cire->ire_cmask, 0, 795 IRE_INTERFACE, NULL, NULL, ALL_ZONES, cire->ire_ihandle, 796 NULL, match_flags, ipst); 797 if (ire != NULL) 798 return (ire); 799 /* 800 * If we didn't find an interface ire above, we can't declare failure. 801 * For backwards compatibility, we need to support prefix routes 802 * pointing to next hop gateways that are not on-link. 803 * 804 * In the resolver/noresolver case, ip_newroute() thinks it is creating 805 * the cache ire for an onlink destination in 'cire'. But 'cire' is 806 * not actually onlink, because ire_ftable_lookup() cheated it, by 807 * doing ire_route_lookup() twice and returning an interface ire. 808 * 809 * Eg. default - gw1 (line 1) 810 * gw1 - gw2 (line 2) 811 * gw2 - hme0 (line 3) 812 * 813 * In the above example, ip_newroute() tried to create the cache ire 814 * 'cire' for gw1, based on the interface route in line 3. The 815 * ire_ftable_lookup() above fails, because there is no interface route 816 * to reach gw1. (it is gw2). We fall thru below. 817 * 818 * Do a brute force search based on the ihandle in a subset of the 819 * forwarding tables, corresponding to cire->ire_cmask. Otherwise 820 * things become very complex, since we don't have 'pire' in this 821 * case. (Also note that this method is not possible in the offlink 822 * case because we don't know the mask) 823 */ 824 (void) memset(&ih, 0, sizeof (ih)); 825 ih.ihandle = cire->ire_ihandle; 826 (void) ipst->ips_ip_ftable->rnh_walktree_mt(ipst->ips_ip_ftable, 827 ire_ihandle_onlink_match, &ih, irb_refhold_rn, irb_refrele_rn); 828 return (ih.ire); 829 } 830 831 /* 832 * IRE iterator used by ire_ftable_lookup[_v6]() to process multiple default 833 * routes. Given a starting point in the hash list (ire_origin), walk the IREs 834 * in the bucket skipping default interface routes and deleted entries. 835 * Returns the next IRE (unheld), or NULL when we're back to the starting point. 836 * Assumes that the caller holds a reference on the IRE bucket. 837 */ 838 ire_t * 839 ire_get_next_default_ire(ire_t *ire, ire_t *ire_origin) 840 { 841 ASSERT(ire_origin->ire_bucket != NULL); 842 ASSERT(ire != NULL); 843 844 do { 845 ire = ire->ire_next; 846 if (ire == NULL) 847 ire = ire_origin->ire_bucket->irb_ire; 848 if (ire == ire_origin) 849 return (NULL); 850 } while ((ire->ire_type & IRE_INTERFACE) || 851 (ire->ire_marks & IRE_MARK_CONDEMNED)); 852 ASSERT(ire != NULL); 853 return (ire); 854 } 855 856 static ipif_t * 857 ire_forward_src_ipif(ipaddr_t dst, ire_t *sire, ire_t *ire, ill_t *dst_ill, 858 int zoneid, ushort_t *marks) 859 { 860 ipif_t *src_ipif; 861 ip_stack_t *ipst = dst_ill->ill_ipst; 862 863 /* 864 * Pick the best source address from dst_ill. 865 * 866 * 1) If it is part of a multipathing group, we would 867 * like to spread the inbound packets across different 868 * interfaces. ipif_select_source picks a random source 869 * across the different ills in the group. 870 * 871 * 2) If it is not part of a multipathing group, we try 872 * to pick the source address from the destination 873 * route. Clustering assumes that when we have multiple 874 * prefixes hosted on an interface, the prefix of the 875 * source address matches the prefix of the destination 876 * route. We do this only if the address is not 877 * DEPRECATED. 878 * 879 * 3) If the conn is in a different zone than the ire, we 880 * need to pick a source address from the right zone. 881 * 882 * NOTE : If we hit case (1) above, the prefix of the source 883 * address picked may not match the prefix of the 884 * destination routes prefix as ipif_select_source 885 * does not look at "dst" while picking a source 886 * address. 887 * If we want the same behavior as (2), we will need 888 * to change the behavior of ipif_select_source. 889 */ 890 891 if ((sire != NULL) && (sire->ire_flags & RTF_SETSRC)) { 892 /* 893 * The RTF_SETSRC flag is set in the parent ire (sire). 894 * Check that the ipif matching the requested source 895 * address still exists. 896 */ 897 src_ipif = ipif_lookup_addr(sire->ire_src_addr, NULL, 898 zoneid, NULL, NULL, NULL, NULL, ipst); 899 return (src_ipif); 900 } 901 *marks |= IRE_MARK_USESRC_CHECK; 902 if ((dst_ill->ill_group != NULL) || 903 (ire->ire_ipif->ipif_flags & IPIF_DEPRECATED) || 904 (dst_ill->ill_usesrc_ifindex != 0)) { 905 src_ipif = ipif_select_source(dst_ill, dst, zoneid); 906 if (src_ipif == NULL) 907 return (NULL); 908 909 } else { 910 src_ipif = ire->ire_ipif; 911 ASSERT(src_ipif != NULL); 912 /* hold src_ipif for uniformity */ 913 ipif_refhold(src_ipif); 914 } 915 return (src_ipif); 916 } 917 918 /* 919 * This function is called by ip_rput_noire() and ip_fast_forward() 920 * to resolve the route of incoming packet that needs to be forwarded. 921 * If the ire of the nexthop is not already in the cachetable, this 922 * routine will insert it to the table, but won't trigger ARP resolution yet. 923 * Thus unlike ip_newroute, this function adds incomplete ires to 924 * the cachetable. ARP resolution for these ires are delayed until 925 * after all of the packet processing is completed and its ready to 926 * be sent out on the wire, Eventually, the packet transmit routine 927 * ip_xmit_v4() attempts to send a packet to the driver. If it finds 928 * that there is no link layer information, it will do the arp 929 * resolution and queue the packet in ire->ire_nce->nce_qd_mp and 930 * then send it out once the arp resolution is over 931 * (see ip_xmit_v4()->ire_arpresolve()). This scheme is similar to 932 * the model of BSD/SunOS 4 933 * 934 * In future, the insertion of incomplete ires in the cachetable should 935 * be implemented in hostpath as well, as doing so will greatly reduce 936 * the existing complexity for code paths that depend on the context of 937 * the sender (such as IPsec). 938 * 939 * Thus this scheme of adding incomplete ires in cachetable in forwarding 940 * path can be used as a template for simplifying the hostpath. 941 */ 942 943 ire_t * 944 ire_forward(ipaddr_t dst, enum ire_forward_action *ret_action, 945 ire_t *supplied_ire, ire_t *supplied_sire, const struct ts_label_s *tsl, 946 ip_stack_t *ipst) 947 { 948 ipaddr_t gw = 0; 949 ire_t *ire = NULL; 950 ire_t *sire = NULL, *save_ire; 951 ill_t *dst_ill = NULL; 952 int error; 953 zoneid_t zoneid; 954 ipif_t *src_ipif = NULL; 955 mblk_t *res_mp; 956 ushort_t ire_marks = 0; 957 tsol_gcgrp_t *gcgrp = NULL; 958 tsol_gcgrp_addr_t ga; 959 960 zoneid = GLOBAL_ZONEID; 961 962 if (supplied_ire != NULL) { 963 /* We have arrived here from ipfil_sendpkt */ 964 ire = supplied_ire; 965 sire = supplied_sire; 966 goto create_irecache; 967 } 968 969 ire = ire_ftable_lookup(dst, 0, 0, 0, NULL, &sire, zoneid, 0, 970 tsl, MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT | 971 MATCH_IRE_RJ_BHOLE | MATCH_IRE_PARENT|MATCH_IRE_SECATTR, ipst); 972 973 if (ire == NULL) { 974 ip_rts_change(RTM_MISS, dst, 0, 0, 0, 0, 0, 0, RTA_DST, ipst); 975 goto icmp_err_ret; 976 } 977 978 /* 979 * If we encounter CGTP, we should have the caller use 980 * ip_newroute to resolve multirt instead of this function. 981 * CGTP specs explicitly state that it can't be used with routers. 982 * This essentially prevents insertion of incomplete RTF_MULTIRT 983 * ires in cachetable. 984 */ 985 if (ipst->ips_ip_cgtp_filter && 986 ((ire->ire_flags & RTF_MULTIRT) || 987 ((sire != NULL) && (sire->ire_flags & RTF_MULTIRT)))) { 988 ip3dbg(("ire_forward: packet is to be multirouted- " 989 "handing it to ip_newroute\n")); 990 if (sire != NULL) 991 ire_refrele(sire); 992 ire_refrele(ire); 993 /* 994 * Inform caller about encountering of multirt so that 995 * ip_newroute() can be called. 996 */ 997 *ret_action = Forward_check_multirt; 998 return (NULL); 999 } 1000 1001 /* 1002 * Verify that the returned IRE does not have either 1003 * the RTF_REJECT or RTF_BLACKHOLE flags set and that the IRE is 1004 * either an IRE_CACHE, IRE_IF_NORESOLVER or IRE_IF_RESOLVER. 1005 */ 1006 if ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) || 1007 (ire->ire_type & (IRE_CACHE | IRE_INTERFACE)) == 0) { 1008 ip3dbg(("ire 0x%p is not cache/resolver/noresolver\n", 1009 (void *)ire)); 1010 goto icmp_err_ret; 1011 } 1012 1013 /* 1014 * If we already have a fully resolved IRE CACHE of the 1015 * nexthop router, just hand over the cache entry 1016 * and we are done. 1017 */ 1018 1019 if (ire->ire_type & IRE_CACHE) { 1020 1021 /* 1022 * If we are using this ire cache entry as a 1023 * gateway to forward packets, chances are we 1024 * will be using it again. So turn off 1025 * the temporary flag, thus reducing its 1026 * chances of getting deleted frequently. 1027 */ 1028 if (ire->ire_marks & IRE_MARK_TEMPORARY) { 1029 irb_t *irb = ire->ire_bucket; 1030 rw_enter(&irb->irb_lock, RW_WRITER); 1031 /* 1032 * We need to recheck for IRE_MARK_TEMPORARY after 1033 * acquiring the lock in order to guarantee 1034 * irb_tmp_ire_cnt 1035 */ 1036 if (ire->ire_marks & IRE_MARK_TEMPORARY) { 1037 ire->ire_marks &= ~IRE_MARK_TEMPORARY; 1038 irb->irb_tmp_ire_cnt--; 1039 } 1040 rw_exit(&irb->irb_lock); 1041 } 1042 1043 if (sire != NULL) { 1044 UPDATE_OB_PKT_COUNT(sire); 1045 sire->ire_last_used_time = lbolt; 1046 ire_refrele(sire); 1047 } 1048 *ret_action = Forward_ok; 1049 return (ire); 1050 } 1051 create_irecache: 1052 /* 1053 * Increment the ire_ob_pkt_count field for ire if it is an 1054 * INTERFACE (IF_RESOLVER or IF_NORESOLVER) IRE type, and 1055 * increment the same for the parent IRE, sire, if it is some 1056 * sort of prefix IRE (which includes DEFAULT, PREFIX, and HOST). 1057 */ 1058 if ((ire->ire_type & IRE_INTERFACE) != 0) { 1059 UPDATE_OB_PKT_COUNT(ire); 1060 ire->ire_last_used_time = lbolt; 1061 } 1062 1063 /* 1064 * sire must be either IRE_CACHETABLE OR IRE_INTERFACE type 1065 */ 1066 if (sire != NULL) { 1067 gw = sire->ire_gateway_addr; 1068 ASSERT((sire->ire_type & 1069 (IRE_CACHETABLE | IRE_INTERFACE)) == 0); 1070 UPDATE_OB_PKT_COUNT(sire); 1071 sire->ire_last_used_time = lbolt; 1072 } 1073 1074 /* Obtain dst_ill */ 1075 dst_ill = ip_newroute_get_dst_ill(ire->ire_ipif->ipif_ill); 1076 if (dst_ill == NULL) { 1077 ip2dbg(("ire_forward no dst ill; ire 0x%p\n", 1078 (void *)ire)); 1079 goto icmp_err_ret; 1080 } 1081 1082 ASSERT(src_ipif == NULL); 1083 /* Now obtain the src_ipif */ 1084 src_ipif = ire_forward_src_ipif(dst, sire, ire, dst_ill, 1085 zoneid, &ire_marks); 1086 if (src_ipif == NULL) 1087 goto icmp_err_ret; 1088 1089 switch (ire->ire_type) { 1090 case IRE_IF_NORESOLVER: 1091 /* create ire_cache for ire_addr endpoint */ 1092 if (dst_ill->ill_phys_addr_length != IP_ADDR_LEN && 1093 dst_ill->ill_resolver_mp == NULL) { 1094 ip1dbg(("ire_forward: dst_ill %p " 1095 "for IRE_IF_NORESOLVER ire %p has " 1096 "no ill_resolver_mp\n", 1097 (void *)dst_ill, (void *)ire)); 1098 goto icmp_err_ret; 1099 } 1100 /* FALLTHRU */ 1101 case IRE_IF_RESOLVER: 1102 /* 1103 * We have the IRE_IF_RESOLVER of the nexthop gateway 1104 * and now need to build a IRE_CACHE for it. 1105 * In this case, we have the following : 1106 * 1107 * 1) src_ipif - used for getting a source address. 1108 * 1109 * 2) dst_ill - from which we derive ire_stq/ire_rfq. This 1110 * means packets using the IRE_CACHE that we will build 1111 * here will go out on dst_ill. 1112 * 1113 * 3) sire may or may not be NULL. But, the IRE_CACHE that is 1114 * to be created will only be tied to the IRE_INTERFACE 1115 * that was derived from the ire_ihandle field. 1116 * 1117 * If sire is non-NULL, it means the destination is 1118 * off-link and we will first create the IRE_CACHE for the 1119 * gateway. 1120 */ 1121 res_mp = dst_ill->ill_resolver_mp; 1122 if (ire->ire_type == IRE_IF_RESOLVER && 1123 (!OK_RESOLVER_MP(res_mp))) { 1124 goto icmp_err_ret; 1125 } 1126 /* 1127 * To be at this point in the code with a non-zero gw 1128 * means that dst is reachable through a gateway that 1129 * we have never resolved. By changing dst to the gw 1130 * addr we resolve the gateway first. 1131 */ 1132 if (gw != INADDR_ANY) { 1133 /* 1134 * The source ipif that was determined above was 1135 * relative to the destination address, not the 1136 * gateway's. If src_ipif was not taken out of 1137 * the IRE_IF_RESOLVER entry, we'll need to call 1138 * ipif_select_source() again. 1139 */ 1140 if (src_ipif != ire->ire_ipif) { 1141 ipif_refrele(src_ipif); 1142 src_ipif = ipif_select_source(dst_ill, 1143 gw, zoneid); 1144 if (src_ipif == NULL) 1145 goto icmp_err_ret; 1146 } 1147 dst = gw; 1148 gw = INADDR_ANY; 1149 } 1150 /* 1151 * dst has been set to the address of the nexthop. 1152 * 1153 * TSol note: get security attributes of the nexthop; 1154 * Note that the nexthop may either be a gateway, or the 1155 * packet destination itself; Detailed explanation of 1156 * issues involved is provided in the IRE_IF_NORESOLVER 1157 * logic in ip_newroute(). 1158 */ 1159 ga.ga_af = AF_INET; 1160 IN6_IPADDR_TO_V4MAPPED(dst, &ga.ga_addr); 1161 gcgrp = gcgrp_lookup(&ga, B_FALSE); 1162 1163 if (ire->ire_type == IRE_IF_NORESOLVER) 1164 dst = ire->ire_addr; /* ire_cache for tunnel endpoint */ 1165 1166 save_ire = ire; 1167 /* 1168 * create an incomplete IRE_CACHE. 1169 * An areq_mp will be generated in ire_arpresolve() for 1170 * RESOLVER interfaces. 1171 */ 1172 ire = ire_create( 1173 (uchar_t *)&dst, /* dest address */ 1174 (uchar_t *)&ip_g_all_ones, /* mask */ 1175 (uchar_t *)&src_ipif->ipif_src_addr, /* src addr */ 1176 (uchar_t *)&gw, /* gateway address */ 1177 (save_ire->ire_type == IRE_IF_RESOLVER ? NULL: 1178 &save_ire->ire_max_frag), 1179 NULL, 1180 dst_ill->ill_rq, /* recv-from queue */ 1181 dst_ill->ill_wq, /* send-to queue */ 1182 IRE_CACHE, /* IRE type */ 1183 src_ipif, 1184 ire->ire_mask, /* Parent mask */ 1185 0, 1186 ire->ire_ihandle, /* Interface handle */ 1187 0, 1188 &(ire->ire_uinfo), 1189 NULL, 1190 gcgrp, 1191 ipst); 1192 ip1dbg(("incomplete ire_cache 0x%p\n", (void *)ire)); 1193 if (ire != NULL) { 1194 gcgrp = NULL; /* reference now held by IRE */ 1195 ire->ire_marks |= ire_marks; 1196 /* add the incomplete ire: */ 1197 error = ire_add(&ire, NULL, NULL, NULL, B_TRUE); 1198 if (error == 0 && ire != NULL) { 1199 ire->ire_max_frag = save_ire->ire_max_frag; 1200 ip1dbg(("setting max_frag to %d in ire 0x%p\n", 1201 ire->ire_max_frag, (void *)ire)); 1202 } else { 1203 ire_refrele(save_ire); 1204 goto icmp_err_ret; 1205 } 1206 } else { 1207 if (gcgrp != NULL) { 1208 GCGRP_REFRELE(gcgrp); 1209 gcgrp = NULL; 1210 } 1211 } 1212 1213 ire_refrele(save_ire); 1214 break; 1215 default: 1216 break; 1217 } 1218 1219 *ret_action = Forward_ok; 1220 if (sire != NULL) 1221 ire_refrele(sire); 1222 if (dst_ill != NULL) 1223 ill_refrele(dst_ill); 1224 if (src_ipif != NULL) 1225 ipif_refrele(src_ipif); 1226 return (ire); 1227 icmp_err_ret: 1228 *ret_action = Forward_ret_icmp_err; 1229 if (sire != NULL) 1230 ire_refrele(sire); 1231 if (dst_ill != NULL) 1232 ill_refrele(dst_ill); 1233 if (src_ipif != NULL) 1234 ipif_refrele(src_ipif); 1235 if (ire != NULL) { 1236 if (ire->ire_flags & RTF_BLACKHOLE) 1237 *ret_action = Forward_blackhole; 1238 ire_refrele(ire); 1239 } 1240 return (NULL); 1241 } 1242 1243 /* 1244 * Since caller is ip_fast_forward, there is no CGTP or Tsol test 1245 * Also we dont call ftable lookup with MATCH_IRE_PARENT 1246 */ 1247 1248 ire_t * 1249 ire_forward_simple(ipaddr_t dst, enum ire_forward_action *ret_action, 1250 ip_stack_t *ipst) 1251 { 1252 ipaddr_t gw = 0; 1253 ire_t *ire = NULL; 1254 ire_t *sire = NULL, *save_ire; 1255 ill_t *dst_ill = NULL; 1256 int error; 1257 zoneid_t zoneid; 1258 ipif_t *src_ipif = NULL; 1259 mblk_t *res_mp; 1260 ushort_t ire_marks = 0; 1261 1262 zoneid = GLOBAL_ZONEID; 1263 1264 1265 ire = ire_ftable_lookup_simple(dst, &sire, zoneid, 1266 MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT | 1267 MATCH_IRE_RJ_BHOLE, ipst); 1268 1269 if (ire == NULL) { 1270 ip_rts_change(RTM_MISS, dst, 0, 0, 0, 0, 0, 0, RTA_DST, ipst); 1271 goto icmp_err_ret; 1272 } 1273 1274 /* 1275 * Verify that the returned IRE does not have either 1276 * the RTF_REJECT or RTF_BLACKHOLE flags set and that the IRE is 1277 * either an IRE_CACHE, IRE_IF_NORESOLVER or IRE_IF_RESOLVER. 1278 */ 1279 if ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE))) { 1280 ASSERT(ire->ire_type & (IRE_CACHE | IRE_INTERFACE)); 1281 ip3dbg(("ire 0x%p is not cache/resolver/noresolver\n", 1282 (void *)ire)); 1283 goto icmp_err_ret; 1284 } 1285 1286 /* 1287 * If we already have a fully resolved IRE CACHE of the 1288 * nexthop router, just hand over the cache entry 1289 * and we are done. 1290 */ 1291 1292 if (ire->ire_type & IRE_CACHE) { 1293 1294 /* 1295 * If we are using this ire cache entry as a 1296 * gateway to forward packets, chances are we 1297 * will be using it again. So turn off 1298 * the temporary flag, thus reducing its 1299 * chances of getting deleted frequently. 1300 */ 1301 if (ire->ire_marks & IRE_MARK_TEMPORARY) { 1302 irb_t *irb = ire->ire_bucket; 1303 rw_enter(&irb->irb_lock, RW_WRITER); 1304 ire->ire_marks &= ~IRE_MARK_TEMPORARY; 1305 irb->irb_tmp_ire_cnt--; 1306 rw_exit(&irb->irb_lock); 1307 } 1308 1309 if (sire != NULL) { 1310 UPDATE_OB_PKT_COUNT(sire); 1311 ire_refrele(sire); 1312 } 1313 *ret_action = Forward_ok; 1314 return (ire); 1315 } 1316 /* 1317 * Increment the ire_ob_pkt_count field for ire if it is an 1318 * INTERFACE (IF_RESOLVER or IF_NORESOLVER) IRE type, and 1319 * increment the same for the parent IRE, sire, if it is some 1320 * sort of prefix IRE (which includes DEFAULT, PREFIX, and HOST). 1321 */ 1322 if ((ire->ire_type & IRE_INTERFACE) != 0) { 1323 UPDATE_OB_PKT_COUNT(ire); 1324 ire->ire_last_used_time = lbolt; 1325 } 1326 1327 /* 1328 * sire must be either IRE_CACHETABLE OR IRE_INTERFACE type 1329 */ 1330 if (sire != NULL) { 1331 gw = sire->ire_gateway_addr; 1332 ASSERT((sire->ire_type & 1333 (IRE_CACHETABLE | IRE_INTERFACE)) == 0); 1334 UPDATE_OB_PKT_COUNT(sire); 1335 } 1336 1337 /* Obtain dst_ill */ 1338 dst_ill = ip_newroute_get_dst_ill(ire->ire_ipif->ipif_ill); 1339 if (dst_ill == NULL) { 1340 ip2dbg(("ire_forward no dst ill; ire 0x%p\n", 1341 (void *)ire)); 1342 goto icmp_err_ret; 1343 } 1344 1345 ASSERT(src_ipif == NULL); 1346 /* Now obtain the src_ipif */ 1347 src_ipif = ire_forward_src_ipif(dst, sire, ire, dst_ill, 1348 zoneid, &ire_marks); 1349 if (src_ipif == NULL) 1350 goto icmp_err_ret; 1351 1352 switch (ire->ire_type) { 1353 case IRE_IF_NORESOLVER: 1354 /* create ire_cache for ire_addr endpoint */ 1355 case IRE_IF_RESOLVER: 1356 /* 1357 * We have the IRE_IF_RESOLVER of the nexthop gateway 1358 * and now need to build a IRE_CACHE for it. 1359 * In this case, we have the following : 1360 * 1361 * 1) src_ipif - used for getting a source address. 1362 * 1363 * 2) dst_ill - from which we derive ire_stq/ire_rfq. This 1364 * means packets using the IRE_CACHE that we will build 1365 * here will go out on dst_ill. 1366 * 1367 * 3) sire may or may not be NULL. But, the IRE_CACHE that is 1368 * to be created will only be tied to the IRE_INTERFACE 1369 * that was derived from the ire_ihandle field. 1370 * 1371 * If sire is non-NULL, it means the destination is 1372 * off-link and we will first create the IRE_CACHE for the 1373 * gateway. 1374 */ 1375 res_mp = dst_ill->ill_resolver_mp; 1376 if (ire->ire_type == IRE_IF_RESOLVER && 1377 (!OK_RESOLVER_MP(res_mp))) { 1378 ire_refrele(ire); 1379 ire = NULL; 1380 goto out; 1381 } 1382 /* 1383 * To be at this point in the code with a non-zero gw 1384 * means that dst is reachable through a gateway that 1385 * we have never resolved. By changing dst to the gw 1386 * addr we resolve the gateway first. 1387 */ 1388 if (gw != INADDR_ANY) { 1389 /* 1390 * The source ipif that was determined above was 1391 * relative to the destination address, not the 1392 * gateway's. If src_ipif was not taken out of 1393 * the IRE_IF_RESOLVER entry, we'll need to call 1394 * ipif_select_source() again. 1395 */ 1396 if (src_ipif != ire->ire_ipif) { 1397 ipif_refrele(src_ipif); 1398 src_ipif = ipif_select_source(dst_ill, 1399 gw, zoneid); 1400 if (src_ipif == NULL) 1401 goto icmp_err_ret; 1402 } 1403 dst = gw; 1404 gw = INADDR_ANY; 1405 } 1406 1407 if (ire->ire_type == IRE_IF_NORESOLVER) 1408 dst = ire->ire_addr; /* ire_cache for tunnel endpoint */ 1409 1410 save_ire = ire; 1411 /* 1412 * create an incomplete IRE_CACHE. 1413 * An areq_mp will be generated in ire_arpresolve() for 1414 * RESOLVER interfaces. 1415 */ 1416 ire = ire_create( 1417 (uchar_t *)&dst, /* dest address */ 1418 (uchar_t *)&ip_g_all_ones, /* mask */ 1419 (uchar_t *)&src_ipif->ipif_src_addr, /* src addr */ 1420 (uchar_t *)&gw, /* gateway address */ 1421 (save_ire->ire_type == IRE_IF_RESOLVER ? NULL: 1422 &save_ire->ire_max_frag), 1423 NULL, 1424 dst_ill->ill_rq, /* recv-from queue */ 1425 dst_ill->ill_wq, /* send-to queue */ 1426 IRE_CACHE, /* IRE type */ 1427 src_ipif, 1428 ire->ire_mask, /* Parent mask */ 1429 0, 1430 ire->ire_ihandle, /* Interface handle */ 1431 0, 1432 &(ire->ire_uinfo), 1433 NULL, 1434 NULL, 1435 ipst); 1436 ip1dbg(("incomplete ire_cache 0x%p\n", (void *)ire)); 1437 if (ire != NULL) { 1438 ire->ire_marks |= ire_marks; 1439 /* add the incomplete ire: */ 1440 error = ire_add(&ire, NULL, NULL, NULL, B_TRUE); 1441 if (error == 0 && ire != NULL) { 1442 ire->ire_max_frag = save_ire->ire_max_frag; 1443 ip1dbg(("setting max_frag to %d in ire 0x%p\n", 1444 ire->ire_max_frag, (void *)ire)); 1445 } else { 1446 ire_refrele(save_ire); 1447 goto icmp_err_ret; 1448 } 1449 } 1450 1451 ire_refrele(save_ire); 1452 break; 1453 default: 1454 break; 1455 } 1456 1457 out: 1458 *ret_action = Forward_ok; 1459 if (sire != NULL) 1460 ire_refrele(sire); 1461 if (dst_ill != NULL) 1462 ill_refrele(dst_ill); 1463 if (src_ipif != NULL) 1464 ipif_refrele(src_ipif); 1465 return (ire); 1466 icmp_err_ret: 1467 *ret_action = Forward_ret_icmp_err; 1468 if (src_ipif != NULL) 1469 ipif_refrele(src_ipif); 1470 if (dst_ill != NULL) 1471 ill_refrele(dst_ill); 1472 if (sire != NULL) 1473 ire_refrele(sire); 1474 if (ire != NULL) { 1475 if (ire->ire_flags & RTF_BLACKHOLE) 1476 *ret_action = Forward_blackhole; 1477 ire_refrele(ire); 1478 } 1479 /* caller needs to send icmp error message */ 1480 return (NULL); 1481 1482 } 1483 1484 /* 1485 * Obtain the rt_entry and rt_irb for the route to be added to 1486 * the ips_ip_ftable. 1487 * First attempt to add a node to the radix tree via rn_addroute. If the 1488 * route already exists, return the bucket for the existing route. 1489 * 1490 * Locking notes: Need to hold the global radix tree lock in write mode to 1491 * add a radix node. To prevent the node from being deleted, ire_get_bucket() 1492 * returns with a ref'ed irb_t. The ire itself is added in ire_add_v4() 1493 * while holding the irb_lock, but not the radix tree lock. 1494 */ 1495 irb_t * 1496 ire_get_bucket(ire_t *ire) 1497 { 1498 struct radix_node *rn; 1499 struct rt_entry *rt; 1500 struct rt_sockaddr rmask, rdst; 1501 irb_t *irb = NULL; 1502 ip_stack_t *ipst = ire->ire_ipst; 1503 1504 ASSERT(ipst->ips_ip_ftable != NULL); 1505 1506 /* first try to see if route exists (based on rtalloc1) */ 1507 (void) memset(&rdst, 0, sizeof (rdst)); 1508 rdst.rt_sin_len = sizeof (rdst); 1509 rdst.rt_sin_family = AF_INET; 1510 rdst.rt_sin_addr.s_addr = ire->ire_addr; 1511 1512 (void) memset(&rmask, 0, sizeof (rmask)); 1513 rmask.rt_sin_len = sizeof (rmask); 1514 rmask.rt_sin_family = AF_INET; 1515 rmask.rt_sin_addr.s_addr = ire->ire_mask; 1516 1517 /* 1518 * add the route. based on BSD's rtrequest1(RTM_ADD) 1519 */ 1520 R_Malloc(rt, rt_entry_cache, sizeof (*rt)); 1521 /* kmem_alloc failed */ 1522 if (rt == NULL) 1523 return (NULL); 1524 1525 (void) memset(rt, 0, sizeof (*rt)); 1526 rt->rt_nodes->rn_key = (char *)&rt->rt_dst; 1527 rt->rt_dst = rdst; 1528 irb = &rt->rt_irb; 1529 irb->irb_marks |= IRB_MARK_FTABLE; /* dynamically allocated/freed */ 1530 irb->irb_ipst = ipst; 1531 rw_init(&irb->irb_lock, NULL, RW_DEFAULT, NULL); 1532 RADIX_NODE_HEAD_WLOCK(ipst->ips_ip_ftable); 1533 rn = ipst->ips_ip_ftable->rnh_addaddr(&rt->rt_dst, &rmask, 1534 ipst->ips_ip_ftable, (struct radix_node *)rt); 1535 if (rn == NULL) { 1536 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 1537 Free(rt, rt_entry_cache); 1538 rt = NULL; 1539 irb = NULL; 1540 RADIX_NODE_HEAD_RLOCK(ipst->ips_ip_ftable); 1541 rn = ipst->ips_ip_ftable->rnh_lookup(&rdst, &rmask, 1542 ipst->ips_ip_ftable); 1543 if (rn != NULL && ((rn->rn_flags & RNF_ROOT) == 0)) { 1544 /* found a non-root match */ 1545 rt = (struct rt_entry *)rn; 1546 } 1547 } 1548 if (rt != NULL) { 1549 irb = &rt->rt_irb; 1550 IRB_REFHOLD(irb); 1551 } 1552 RADIX_NODE_HEAD_UNLOCK(ipst->ips_ip_ftable); 1553 return (irb); 1554 } 1555 1556 /* 1557 * This function is used when the caller wants to know the outbound 1558 * interface for a packet given only the address. 1559 * If this is a offlink IP address and there are multiple 1560 * routes to this destination, this routine will utilise the 1561 * first route it finds to IP address 1562 * Return values: 1563 * 0 - FAILURE 1564 * nonzero - ifindex 1565 */ 1566 uint_t 1567 ifindex_lookup(const struct sockaddr *ipaddr, zoneid_t zoneid) 1568 { 1569 uint_t ifindex = 0; 1570 ire_t *ire; 1571 ill_t *ill; 1572 netstack_t *ns; 1573 ip_stack_t *ipst; 1574 1575 if (zoneid == ALL_ZONES) 1576 ns = netstack_find_by_zoneid(GLOBAL_ZONEID); 1577 else 1578 ns = netstack_find_by_zoneid(zoneid); 1579 ASSERT(ns != NULL); 1580 1581 /* 1582 * For exclusive stacks we set the zoneid to zero 1583 * since IP uses the global zoneid in the exclusive stacks. 1584 */ 1585 if (ns->netstack_stackid != GLOBAL_NETSTACKID) 1586 zoneid = GLOBAL_ZONEID; 1587 ipst = ns->netstack_ip; 1588 1589 ASSERT(ipaddr->sa_family == AF_INET || ipaddr->sa_family == AF_INET6); 1590 1591 if ((ire = route_to_dst(ipaddr, zoneid, ipst)) != NULL) { 1592 ill = ire_to_ill(ire); 1593 if (ill != NULL) 1594 ifindex = ill->ill_phyint->phyint_ifindex; 1595 ire_refrele(ire); 1596 } 1597 netstack_rele(ns); 1598 return (ifindex); 1599 } 1600 1601 /* 1602 * Routine to find the route to a destination. If a ifindex is supplied 1603 * it tries to match the the route to the corresponding ipif for the ifindex 1604 */ 1605 static ire_t * 1606 route_to_dst(const struct sockaddr *dst_addr, zoneid_t zoneid, ip_stack_t *ipst) 1607 { 1608 ire_t *ire = NULL; 1609 int match_flags; 1610 1611 match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT | 1612 MATCH_IRE_RECURSIVE | MATCH_IRE_RJ_BHOLE); 1613 1614 /* XXX pass NULL tsl for now */ 1615 1616 if (dst_addr->sa_family == AF_INET) { 1617 ire = ire_route_lookup( 1618 ((struct sockaddr_in *)dst_addr)->sin_addr.s_addr, 1619 0, 0, 0, NULL, NULL, zoneid, NULL, match_flags, ipst); 1620 } else { 1621 ire = ire_route_lookup_v6( 1622 &((struct sockaddr_in6 *)dst_addr)->sin6_addr, 1623 0, 0, 0, NULL, NULL, zoneid, NULL, match_flags, ipst); 1624 } 1625 return (ire); 1626 } 1627 1628 /* 1629 * This routine is called by IP Filter to send a packet out on the wire 1630 * to a specified V4 dst (which may be onlink or offlink). The ifindex may or 1631 * may not be 0. A non-null ifindex indicates IP Filter has stipulated 1632 * an outgoing interface and requires the nexthop to be on that interface. 1633 * IP WILL NOT DO the following to the data packet before sending it out: 1634 * a. manipulate ttl 1635 * b. ipsec work 1636 * c. fragmentation 1637 * 1638 * If the packet has been prepared for hardware checksum then it will be 1639 * passed off to ip_send_align_cksum() to check that the flags set on the 1640 * packet are in alignment with the capabilities of the new outgoing NIC. 1641 * 1642 * Return values: 1643 * 0: IP was able to send of the data pkt 1644 * ECOMM: Could not send packet 1645 * ENONET No route to dst. It is up to the caller 1646 * to send icmp unreachable error message, 1647 * EINPROGRESS The macaddr of the onlink dst or that 1648 * of the offlink dst's nexthop needs to get 1649 * resolved before packet can be sent to dst. 1650 * Thus transmission is not guaranteed. 1651 * 1652 */ 1653 1654 int 1655 ipfil_sendpkt(const struct sockaddr *dst_addr, mblk_t *mp, uint_t ifindex, 1656 zoneid_t zoneid) 1657 { 1658 ire_t *ire = NULL, *sire = NULL; 1659 ire_t *ire_cache = NULL; 1660 int value; 1661 int match_flags; 1662 ipaddr_t dst; 1663 netstack_t *ns; 1664 ip_stack_t *ipst; 1665 enum ire_forward_action ret_action; 1666 1667 ASSERT(mp != NULL); 1668 1669 if (zoneid == ALL_ZONES) 1670 ns = netstack_find_by_zoneid(GLOBAL_ZONEID); 1671 else 1672 ns = netstack_find_by_zoneid(zoneid); 1673 ASSERT(ns != NULL); 1674 1675 /* 1676 * For exclusive stacks we set the zoneid to zero 1677 * since IP uses the global zoneid in the exclusive stacks. 1678 */ 1679 if (ns->netstack_stackid != GLOBAL_NETSTACKID) 1680 zoneid = GLOBAL_ZONEID; 1681 ipst = ns->netstack_ip; 1682 1683 ASSERT(dst_addr->sa_family == AF_INET || 1684 dst_addr->sa_family == AF_INET6); 1685 1686 if (dst_addr->sa_family == AF_INET) { 1687 dst = ((struct sockaddr_in *)dst_addr)->sin_addr.s_addr; 1688 } else { 1689 /* 1690 * We dont have support for V6 yet. It will be provided 1691 * once RFE 6399103 has been delivered. 1692 * Until then, for V6 dsts, IP Filter will not call 1693 * this function. Instead the netinfo framework provides 1694 * its own code path, in ip_inject_impl(), to achieve 1695 * what it needs to do, for the time being. 1696 */ 1697 ip1dbg(("ipfil_sendpkt: no V6 support \n")); 1698 value = ECOMM; 1699 freemsg(mp); 1700 goto discard; 1701 } 1702 1703 /* 1704 * Lets get the ire. We might get the ire cache entry, 1705 * or the ire,sire pair needed to create the cache entry. 1706 * XXX pass NULL tsl for now. 1707 */ 1708 1709 if (ifindex == 0) { 1710 /* There is no supplied index. So use the FIB info */ 1711 1712 match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT | 1713 MATCH_IRE_RECURSIVE | MATCH_IRE_RJ_BHOLE); 1714 ire = ire_route_lookup(dst, 1715 0, 0, 0, NULL, &sire, zoneid, MBLK_GETLABEL(mp), 1716 match_flags, ipst); 1717 } else { 1718 ipif_t *supplied_ipif; 1719 ill_t *ill; 1720 1721 match_flags = (MATCH_IRE_DSTONLY | MATCH_IRE_DEFAULT | 1722 MATCH_IRE_RECURSIVE| MATCH_IRE_RJ_BHOLE| 1723 MATCH_IRE_SECATTR); 1724 1725 /* 1726 * If supplied ifindex is non-null, the only valid 1727 * nexthop is one off of the interface or group corresponding 1728 * to the specified ifindex. 1729 */ 1730 ill = ill_lookup_on_ifindex(ifindex, B_FALSE, 1731 NULL, NULL, NULL, NULL, ipst); 1732 if (ill != NULL) { 1733 match_flags |= MATCH_IRE_ILL; 1734 } else { 1735 /* Fallback to group names if hook_emulation set */ 1736 if (ipst->ips_ipmp_hook_emulation) { 1737 ill = ill_group_lookup_on_ifindex(ifindex, 1738 B_FALSE, ipst); 1739 } 1740 if (ill == NULL) { 1741 ip1dbg(("ipfil_sendpkt: Could not find" 1742 " route to dst\n")); 1743 value = ECOMM; 1744 freemsg(mp); 1745 goto discard; 1746 } 1747 match_flags |= MATCH_IRE_ILL_GROUP; 1748 } 1749 supplied_ipif = ipif_get_next_ipif(NULL, ill); 1750 1751 ire = ire_route_lookup(dst, 0, 0, 0, supplied_ipif, 1752 &sire, zoneid, MBLK_GETLABEL(mp), match_flags, ipst); 1753 ipif_refrele(supplied_ipif); 1754 ill_refrele(ill); 1755 } 1756 1757 /* 1758 * Verify that the returned IRE is non-null and does 1759 * not have either the RTF_REJECT or RTF_BLACKHOLE 1760 * flags set and that the IRE is either an IRE_CACHE, 1761 * IRE_IF_NORESOLVER or IRE_IF_RESOLVER. 1762 */ 1763 if (ire == NULL || 1764 ((ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) || 1765 (ire->ire_type & (IRE_CACHE | IRE_INTERFACE)) == 0)) { 1766 /* 1767 * Either ire could not be found or we got 1768 * an invalid one 1769 */ 1770 ip1dbg(("ipfil_sendpkt: Could not find route to dst\n")); 1771 value = ENONET; 1772 freemsg(mp); 1773 goto discard; 1774 } 1775 1776 /* IP Filter and CGTP dont mix. So bail out if CGTP is on */ 1777 if (ipst->ips_ip_cgtp_filter && 1778 ((ire->ire_flags & RTF_MULTIRT) || 1779 ((sire != NULL) && (sire->ire_flags & RTF_MULTIRT)))) { 1780 ip1dbg(("ipfil_sendpkt: IPFilter does not work with CGTP\n")); 1781 value = ECOMM; 1782 freemsg(mp); 1783 goto discard; 1784 } 1785 1786 ASSERT(ire->ire_type != IRE_CACHE || ire->ire_nce != NULL); 1787 1788 /* 1789 * If needed, we will create the ire cache entry for the 1790 * nexthop, resolve its link-layer address and then send 1791 * the packet out without ttl or IPSec processing. 1792 */ 1793 switch (ire->ire_type) { 1794 case IRE_CACHE: 1795 if (sire != NULL) { 1796 UPDATE_OB_PKT_COUNT(sire); 1797 sire->ire_last_used_time = lbolt; 1798 ire_refrele(sire); 1799 } 1800 ire_cache = ire; 1801 break; 1802 case IRE_IF_NORESOLVER: 1803 case IRE_IF_RESOLVER: 1804 /* 1805 * Call ire_forward(). This function 1806 * will, create the ire cache entry of the 1807 * the nexthop and adds this incomplete ire 1808 * to the ire cache table 1809 */ 1810 ire_cache = ire_forward(dst, &ret_action, ire, sire, 1811 MBLK_GETLABEL(mp), ipst); 1812 if (ire_cache == NULL) { 1813 ip1dbg(("ipfil_sendpkt: failed to create the" 1814 " ire cache entry \n")); 1815 value = ENONET; 1816 freemsg(mp); 1817 sire = NULL; 1818 ire = NULL; 1819 goto discard; 1820 } 1821 break; 1822 } 1823 1824 if (DB_CKSUMFLAGS(mp)) { 1825 if (ip_send_align_hcksum_flags(mp, ire_to_ill(ire_cache))) 1826 goto cleanup; 1827 } 1828 1829 /* 1830 * Now that we have the ire cache entry of the nexthop, call 1831 * ip_xmit_v4() to trigger mac addr resolution 1832 * if necessary and send it once ready. 1833 */ 1834 1835 value = ip_xmit_v4(mp, ire_cache, NULL, B_FALSE, NULL); 1836 cleanup: 1837 ire_refrele(ire_cache); 1838 /* 1839 * At this point, the reference for these have already been 1840 * released within ire_forward() and/or ip_xmit_v4(). So we set 1841 * them to NULL to make sure we dont drop the references 1842 * again in case ip_xmit_v4() returns with either SEND_FAILED 1843 * or LLHDR_RESLV_FAILED 1844 */ 1845 sire = NULL; 1846 ire = NULL; 1847 1848 switch (value) { 1849 case SEND_FAILED: 1850 ip1dbg(("ipfil_sendpkt: Send failed\n")); 1851 value = ECOMM; 1852 break; 1853 case LLHDR_RESLV_FAILED: 1854 ip1dbg(("ipfil_sendpkt: Link-layer resolution" 1855 " failed\n")); 1856 value = ECOMM; 1857 break; 1858 case LOOKUP_IN_PROGRESS: 1859 netstack_rele(ns); 1860 return (EINPROGRESS); 1861 case SEND_PASSED: 1862 netstack_rele(ns); 1863 return (0); 1864 } 1865 discard: 1866 if (dst_addr->sa_family == AF_INET) { 1867 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards); 1868 } else { 1869 BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsOutDiscards); 1870 } 1871 if (ire != NULL) 1872 ire_refrele(ire); 1873 if (sire != NULL) 1874 ire_refrele(sire); 1875 netstack_rele(ns); 1876 return (value); 1877 } 1878 1879 1880 /* 1881 * We don't check for dohwcksum in here because it should be being used 1882 * elsewhere to control what flags are being set on the mblk. That is, 1883 * if DB_CKSUMFLAGS() is non-zero then we assume dohwcksum to be true 1884 * for this packet. 1885 * 1886 * This function assumes that it is *only* being called for TCP or UDP 1887 * packets and nothing else. 1888 */ 1889 static int 1890 ip_send_align_hcksum_flags(mblk_t *mp, ill_t *ill) 1891 { 1892 int illhckflags; 1893 int mbhckflags; 1894 uint16_t *up; 1895 uint32_t cksum; 1896 ipha_t *ipha; 1897 ip6_t *ip6; 1898 int proto; 1899 int ipversion; 1900 int length; 1901 int start; 1902 ip6_pkt_t ipp; 1903 1904 mbhckflags = DB_CKSUMFLAGS(mp); 1905 ASSERT(mbhckflags != 0); 1906 ASSERT(mp->b_datap->db_type == M_DATA); 1907 /* 1908 * Since this function only knows how to manage the hardware checksum 1909 * issue, reject and packets that have flags set on the aside from 1910 * checksum related attributes as we cannot necessarily safely map 1911 * that packet onto the new NIC. Packets that can be potentially 1912 * dropped here include those marked for LSO. 1913 */ 1914 if ((mbhckflags & 1915 ~(HCK_FULLCKSUM|HCK_PARTIALCKSUM|HCK_IPV4_HDRCKSUM)) != 0) { 1916 DTRACE_PROBE2(pbr__incapable, (mblk_t *), mp, (ill_t *), ill); 1917 freemsg(mp); 1918 return (-1); 1919 } 1920 1921 ipha = (ipha_t *)mp->b_rptr; 1922 1923 /* 1924 * Find out what the new NIC is capable of, if anything, and 1925 * only allow it to be used with M_DATA mblks being sent out. 1926 */ 1927 if (ILL_HCKSUM_CAPABLE(ill)) { 1928 illhckflags = ill->ill_hcksum_capab->ill_hcksum_txflags; 1929 } else { 1930 /* 1931 * No capabilities, so turn off everything. 1932 */ 1933 illhckflags = 0; 1934 (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, 0, 0); 1935 mp->b_datap->db_struioflag &= ~STRUIO_IP; 1936 } 1937 1938 DTRACE_PROBE4(pbr__info__a, (mblk_t *), mp, (ill_t *), ill, 1939 uint32_t, illhckflags, uint32_t, mbhckflags); 1940 /* 1941 * This block of code that looks for the position of the TCP/UDP 1942 * checksum is early in this function because we need to know 1943 * what needs to be blanked out for the hardware checksum case. 1944 * 1945 * That we're in this function implies that the packet is either 1946 * TCP or UDP on Solaris, so checks are made for one protocol and 1947 * if that fails, the other is therefore implied. 1948 */ 1949 ipversion = IPH_HDR_VERSION(ipha); 1950 1951 if (ipversion == IPV4_VERSION) { 1952 proto = ipha->ipha_protocol; 1953 if (proto == IPPROTO_TCP) { 1954 up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 1955 } else { 1956 up = IPH_UDPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 1957 } 1958 } else { 1959 uint8_t lasthdr; 1960 1961 /* 1962 * Nothing I've seen indicates that IPv6 checksum'ing 1963 * precludes the presence of extension headers, so we 1964 * can't just look at the next header value in the IPv6 1965 * packet header to see if it is TCP/UDP. 1966 */ 1967 ip6 = (ip6_t *)ipha; 1968 (void) memset(&ipp, 0, sizeof (ipp)); 1969 start = ip_find_hdr_v6(mp, ip6, &ipp, &lasthdr); 1970 proto = lasthdr; 1971 1972 if (proto == IPPROTO_TCP) { 1973 up = IPH_TCPH_CHECKSUMP(ipha, start); 1974 } else { 1975 up = IPH_UDPH_CHECKSUMP(ipha, start); 1976 } 1977 } 1978 1979 /* 1980 * The first case here is easiest: 1981 * mblk hasn't asked for full checksum, but the card supports it. 1982 * 1983 * In addition, check for IPv4 header capability. Note that only 1984 * the mblk flag is checked and not ipversion. 1985 */ 1986 if ((((illhckflags & HCKSUM_INET_FULL_V4) && (ipversion == 4)) || 1987 (((illhckflags & HCKSUM_INET_FULL_V6) && (ipversion == 6)))) && 1988 ((mbhckflags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)) != 0)) { 1989 int newflags = HCK_FULLCKSUM; 1990 1991 if ((mbhckflags & HCK_IPV4_HDRCKSUM) != 0) { 1992 if ((illhckflags & HCKSUM_IPHDRCKSUM) != 0) { 1993 newflags |= HCK_IPV4_HDRCKSUM; 1994 } else { 1995 /* 1996 * Rather than call a function, just inline 1997 * the computation of the basic IPv4 header. 1998 */ 1999 cksum = (ipha->ipha_dst >> 16) + 2000 (ipha->ipha_dst & 0xFFFF) + 2001 (ipha->ipha_src >> 16) + 2002 (ipha->ipha_src & 0xFFFF); 2003 IP_HDR_CKSUM(ipha, cksum, 2004 ((uint32_t *)ipha)[0], 2005 ((uint16_t *)ipha)[4]); 2006 } 2007 } 2008 2009 *up = 0; 2010 (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, 2011 newflags, 0); 2012 return (0); 2013 } 2014 2015 DTRACE_PROBE2(pbr__info__b, int, ipversion, int, proto); 2016 2017 /* 2018 * Start calculating the pseudo checksum over the IP packet header. 2019 * Although the final pseudo checksum used by TCP/UDP consists of 2020 * more than just the address fields, we can use the result of 2021 * adding those together a little bit further down for IPv4. 2022 */ 2023 if (ipversion == IPV4_VERSION) { 2024 cksum = (ipha->ipha_dst >> 16) + (ipha->ipha_dst & 0xFFFF) + 2025 (ipha->ipha_src >> 16) + (ipha->ipha_src & 0xFFFF); 2026 start = IP_SIMPLE_HDR_LENGTH; 2027 length = ntohs(ipha->ipha_length); 2028 DTRACE_PROBE3(pbr__info__e, uint32_t, ipha->ipha_src, 2029 uint32_t, ipha->ipha_dst, int, cksum); 2030 } else { 2031 uint16_t *pseudo; 2032 2033 pseudo = (uint16_t *)&ip6->ip6_src; 2034 2035 /* calculate pseudo-header checksum */ 2036 cksum = pseudo[0] + pseudo[1] + pseudo[2] + pseudo[3] + 2037 pseudo[4] + pseudo[5] + pseudo[6] + pseudo[7] + 2038 pseudo[8] + pseudo[9] + pseudo[10] + pseudo[11] + 2039 pseudo[12] + pseudo[13] + pseudo[14] + pseudo[15]; 2040 2041 length = ntohs(ip6->ip6_plen) + sizeof (ip6_t); 2042 } 2043 2044 /* Fold the initial sum */ 2045 cksum = (cksum & 0xffff) + (cksum >> 16); 2046 2047 /* 2048 * If the packet was asking for an IPv4 header checksum to be 2049 * calculated but the interface doesn't support that, fill it in 2050 * using our pseudo checksum as a starting point. 2051 */ 2052 if (((mbhckflags & HCK_IPV4_HDRCKSUM) != 0) && 2053 ((illhckflags & HCKSUM_IPHDRCKSUM) == 0)) { 2054 /* 2055 * IP_HDR_CKSUM uses the 2rd arg to the macro in a destructive 2056 * way so pass in a copy of the checksum calculated thus far. 2057 */ 2058 uint32_t ipsum = cksum; 2059 2060 DB_CKSUMFLAGS(mp) &= ~HCK_IPV4_HDRCKSUM; 2061 2062 IP_HDR_CKSUM(ipha, ipsum, ((uint32_t *)ipha)[0], 2063 ((uint16_t *)ipha)[4]); 2064 } 2065 2066 DTRACE_PROBE3(pbr__info__c, int, start, int, length, int, cksum); 2067 2068 if (proto == IPPROTO_TCP) { 2069 cksum += IP_TCP_CSUM_COMP; 2070 } else { 2071 cksum += IP_UDP_CSUM_COMP; 2072 } 2073 cksum += htons(length - start); 2074 cksum = (cksum & 0xffff) + (cksum >> 16); 2075 2076 /* 2077 * For TCP/UDP, we either want to setup the packet for partial 2078 * checksum or we want to do it all ourselves because the NIC 2079 * offers no support for either partial or full checksum. 2080 */ 2081 if ((illhckflags & HCKSUM_INET_PARTIAL) != 0) { 2082 /* 2083 * The only case we care about here is if the mblk was 2084 * previously set for full checksum offload. If it was 2085 * marked for partial (and the NIC does partial), then 2086 * we have nothing to do. Similarly if the packet was 2087 * not set for partial or full, we do nothing as this 2088 * is cheaper than more work to set something up. 2089 */ 2090 if ((mbhckflags & HCK_FULLCKSUM) != 0) { 2091 uint32_t offset; 2092 2093 if (proto == IPPROTO_TCP) { 2094 offset = TCP_CHECKSUM_OFFSET; 2095 } else { 2096 offset = UDP_CHECKSUM_OFFSET; 2097 } 2098 *up = cksum; 2099 2100 DTRACE_PROBE3(pbr__info__f, int, length - start, int, 2101 cksum, int, offset); 2102 2103 (void) hcksum_assoc(mp, NULL, NULL, start, 2104 start + offset, length, 0, 2105 DB_CKSUMFLAGS(mp) | HCK_PARTIALCKSUM, 0); 2106 } 2107 2108 } else if (mbhckflags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)) { 2109 DB_CKSUMFLAGS(mp) &= ~(HCK_PARTIALCKSUM|HCK_FULLCKSUM); 2110 2111 *up = 0; 2112 *up = IP_CSUM(mp, start, cksum); 2113 } 2114 2115 DTRACE_PROBE4(pbr__info__d, (mblk_t *), mp, (ipha_t *), ipha, 2116 (uint16_t *), up, int, cksum); 2117 return (0); 2118 } 2119 2120 /* 2121 * callback function provided by ire_ftable_lookup when calling 2122 * rn_match_args(). Invoke ire_match_args on each matching leaf node in 2123 * the radix tree. 2124 */ 2125 boolean_t 2126 ire_find_best_route(struct radix_node *rn, void *arg) 2127 { 2128 struct rt_entry *rt = (struct rt_entry *)rn; 2129 irb_t *irb_ptr; 2130 ire_t *ire; 2131 ire_ftable_args_t *margs = arg; 2132 ipaddr_t match_mask; 2133 2134 ASSERT(rt != NULL); 2135 2136 irb_ptr = &rt->rt_irb; 2137 2138 if (irb_ptr->irb_ire_cnt == 0) 2139 return (B_FALSE); 2140 2141 rw_enter(&irb_ptr->irb_lock, RW_READER); 2142 for (ire = irb_ptr->irb_ire; ire != NULL; ire = ire->ire_next) { 2143 if (ire->ire_marks & IRE_MARK_CONDEMNED) 2144 continue; 2145 if (margs->ift_flags & MATCH_IRE_MASK) 2146 match_mask = margs->ift_mask; 2147 else 2148 match_mask = ire->ire_mask; 2149 2150 if (ire_match_args(ire, margs->ift_addr, match_mask, 2151 margs->ift_gateway, margs->ift_type, margs->ift_ipif, 2152 margs->ift_zoneid, margs->ift_ihandle, margs->ift_tsl, 2153 margs->ift_flags, NULL)) { 2154 IRE_REFHOLD(ire); 2155 rw_exit(&irb_ptr->irb_lock); 2156 margs->ift_best_ire = ire; 2157 return (B_TRUE); 2158 } 2159 } 2160 rw_exit(&irb_ptr->irb_lock); 2161 return (B_FALSE); 2162 } 2163 2164 /* 2165 * ftable irb_t structures are dynamically allocated, and we need to 2166 * check if the irb_t (and associated ftable tree attachment) needs to 2167 * be cleaned up when the irb_refcnt goes to 0. The conditions that need 2168 * be verified are: 2169 * - no other walkers of the irebucket, i.e., quiescent irb_refcnt, 2170 * - no other threads holding references to ire's in the bucket, 2171 * i.e., irb_nire == 0 2172 * - no active ire's in the bucket, i.e., irb_ire_cnt == 0 2173 * - need to hold the global tree lock and irb_lock in write mode. 2174 */ 2175 void 2176 irb_refrele_ftable(irb_t *irb) 2177 { 2178 for (;;) { 2179 rw_enter(&irb->irb_lock, RW_WRITER); 2180 ASSERT(irb->irb_refcnt != 0); 2181 if (irb->irb_refcnt != 1) { 2182 /* 2183 * Someone has a reference to this radix node 2184 * or there is some bucket walker. 2185 */ 2186 irb->irb_refcnt--; 2187 rw_exit(&irb->irb_lock); 2188 return; 2189 } else { 2190 /* 2191 * There is no other walker, nor is there any 2192 * other thread that holds a direct ref to this 2193 * radix node. Do the clean up if needed. Call 2194 * to ire_unlink will clear the IRB_MARK_CONDEMNED flag 2195 */ 2196 if (irb->irb_marks & IRB_MARK_CONDEMNED) { 2197 ire_t *ire_list; 2198 2199 ire_list = ire_unlink(irb); 2200 rw_exit(&irb->irb_lock); 2201 2202 if (ire_list != NULL) 2203 ire_cleanup(ire_list); 2204 /* 2205 * more CONDEMNED entries could have 2206 * been added while we dropped the lock, 2207 * so we have to re-check. 2208 */ 2209 continue; 2210 } 2211 2212 /* 2213 * Now check if there are still any ires 2214 * associated with this radix node. 2215 */ 2216 if (irb->irb_nire != 0) { 2217 /* 2218 * someone is still holding on 2219 * to ires in this bucket 2220 */ 2221 irb->irb_refcnt--; 2222 rw_exit(&irb->irb_lock); 2223 return; 2224 } else { 2225 /* 2226 * Everything is clear. Zero walkers, 2227 * Zero threads with a ref to this 2228 * radix node, Zero ires associated with 2229 * this radix node. Due to lock order, 2230 * check the above conditions again 2231 * after grabbing all locks in the right order 2232 */ 2233 rw_exit(&irb->irb_lock); 2234 if (irb_inactive(irb)) 2235 return; 2236 /* 2237 * irb_inactive could not free the irb. 2238 * See if there are any walkers, if not 2239 * try to clean up again. 2240 */ 2241 } 2242 } 2243 } 2244 } 2245 2246 /* 2247 * IRE iterator used by ire_ftable_lookup() to process multiple default 2248 * routes. Given a starting point in the hash list (ire_origin), walk the IREs 2249 * in the bucket skipping default interface routes and deleted entries. 2250 * Returns the next IRE (unheld), or NULL when we're back to the starting point. 2251 * Assumes that the caller holds a reference on the IRE bucket. 2252 * 2253 * In the absence of good IRE_DEFAULT routes, this function will return 2254 * the first IRE_INTERFACE route found (if any). 2255 */ 2256 ire_t * 2257 ire_round_robin(irb_t *irb_ptr, zoneid_t zoneid, ire_ftable_args_t *margs, 2258 ip_stack_t *ipst) 2259 { 2260 ire_t *ire_origin; 2261 ire_t *ire, *maybe_ire = NULL; 2262 2263 rw_enter(&irb_ptr->irb_lock, RW_WRITER); 2264 ire_origin = irb_ptr->irb_rr_origin; 2265 if (ire_origin != NULL) { 2266 ire_origin = ire_origin->ire_next; 2267 IRE_FIND_NEXT_ORIGIN(ire_origin); 2268 } 2269 2270 if (ire_origin == NULL) { 2271 /* 2272 * first time through routine, or we dropped off the end 2273 * of list. 2274 */ 2275 ire_origin = irb_ptr->irb_ire; 2276 IRE_FIND_NEXT_ORIGIN(ire_origin); 2277 } 2278 irb_ptr->irb_rr_origin = ire_origin; 2279 IRB_REFHOLD_LOCKED(irb_ptr); 2280 rw_exit(&irb_ptr->irb_lock); 2281 2282 DTRACE_PROBE2(ire__rr__origin, (irb_t *), irb_ptr, 2283 (ire_t *), ire_origin); 2284 2285 /* 2286 * Round-robin the routers list looking for a route that 2287 * matches the passed in parameters. 2288 * We start with the ire we found above and we walk the hash 2289 * list until we're back where we started. It doesn't matter if 2290 * routes are added or deleted by other threads - we know this 2291 * ire will stay in the list because we hold a reference on the 2292 * ire bucket. 2293 */ 2294 ire = ire_origin; 2295 while (ire != NULL) { 2296 int match_flags = MATCH_IRE_TYPE | MATCH_IRE_SECATTR; 2297 ire_t *rire; 2298 2299 if (ire->ire_marks & IRE_MARK_CONDEMNED) 2300 goto next_ire; 2301 2302 if (!ire_match_args(ire, margs->ift_addr, (ipaddr_t)0, 2303 margs->ift_gateway, margs->ift_type, margs->ift_ipif, 2304 margs->ift_zoneid, margs->ift_ihandle, margs->ift_tsl, 2305 margs->ift_flags, NULL)) 2306 goto next_ire; 2307 2308 if (ire->ire_type & IRE_INTERFACE) { 2309 /* 2310 * keep looking to see if there is a non-interface 2311 * default ire, but save this one as a last resort. 2312 */ 2313 if (maybe_ire == NULL) 2314 maybe_ire = ire; 2315 goto next_ire; 2316 } 2317 2318 if (zoneid == ALL_ZONES) { 2319 IRE_REFHOLD(ire); 2320 IRB_REFRELE(irb_ptr); 2321 return (ire); 2322 } 2323 /* 2324 * When we're in a non-global zone, we're only 2325 * interested in routers that are 2326 * reachable through ipifs within our zone. 2327 */ 2328 if (ire->ire_ipif != NULL) { 2329 match_flags |= MATCH_IRE_ILL_GROUP; 2330 } 2331 rire = ire_route_lookup(ire->ire_gateway_addr, 0, 0, 2332 IRE_INTERFACE, ire->ire_ipif, NULL, zoneid, margs->ift_tsl, 2333 match_flags, ipst); 2334 if (rire != NULL) { 2335 ire_refrele(rire); 2336 IRE_REFHOLD(ire); 2337 IRB_REFRELE(irb_ptr); 2338 return (ire); 2339 } 2340 next_ire: 2341 ire = (ire->ire_next ? ire->ire_next : irb_ptr->irb_ire); 2342 if (ire == ire_origin) 2343 break; 2344 } 2345 if (maybe_ire != NULL) 2346 IRE_REFHOLD(maybe_ire); 2347 IRB_REFRELE(irb_ptr); 2348 return (maybe_ire); 2349 } 2350 2351 void 2352 irb_refhold_rn(struct radix_node *rn) 2353 { 2354 if ((rn->rn_flags & RNF_ROOT) == 0) 2355 IRB_REFHOLD(&((rt_t *)(rn))->rt_irb); 2356 } 2357 2358 void 2359 irb_refrele_rn(struct radix_node *rn) 2360 { 2361 if ((rn->rn_flags & RNF_ROOT) == 0) 2362 irb_refrele_ftable(&((rt_t *)(rn))->rt_irb); 2363 } 2364