1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _INET_IP_IMPL_H 27 #define _INET_IP_IMPL_H 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 /* 32 * IP implementation private declarations. These interfaces are 33 * used to build the IP module and are not meant to be accessed 34 * by any modules except IP itself. They are undocumented and are 35 * subject to change without notice. 36 */ 37 38 #ifdef __cplusplus 39 extern "C" { 40 #endif 41 42 #ifdef _KERNEL 43 44 #include <sys/sdt.h> 45 46 #define IP_MOD_ID 5701 47 48 #ifdef _BIG_ENDIAN 49 #define IP_HDR_CSUM_TTL_ADJUST 256 50 #define IP_TCP_CSUM_COMP IPPROTO_TCP 51 #define IP_UDP_CSUM_COMP IPPROTO_UDP 52 #else 53 #define IP_HDR_CSUM_TTL_ADJUST 1 54 #define IP_TCP_CSUM_COMP (IPPROTO_TCP << 8) 55 #define IP_UDP_CSUM_COMP (IPPROTO_UDP << 8) 56 #endif 57 58 #define TCP_CHECKSUM_OFFSET 16 59 #define TCP_CHECKSUM_SIZE 2 60 61 #define UDP_CHECKSUM_OFFSET 6 62 #define UDP_CHECKSUM_SIZE 2 63 64 #define IPH_TCPH_CHECKSUMP(ipha, hlen) \ 65 ((uint16_t *)(((uchar_t *)(ipha)) + ((hlen) + TCP_CHECKSUM_OFFSET))) 66 67 #define IPH_UDPH_CHECKSUMP(ipha, hlen) \ 68 ((uint16_t *)(((uchar_t *)(ipha)) + ((hlen) + UDP_CHECKSUM_OFFSET))) 69 70 #define ILL_HCKSUM_CAPABLE(ill) \ 71 (((ill)->ill_capabilities & ILL_CAPAB_HCKSUM) != 0) 72 /* 73 * Macro that performs software checksum calculation on the IP header. 74 */ 75 #define IP_HDR_CKSUM(ipha, sum, v_hlen_tos_len, ttl_protocol) { \ 76 (sum) += (ttl_protocol) + (ipha)->ipha_ident + \ 77 ((v_hlen_tos_len) >> 16) + \ 78 ((v_hlen_tos_len) & 0xFFFF) + \ 79 (ipha)->ipha_fragment_offset_and_flags; \ 80 (sum) = (((sum) & 0xFFFF) + ((sum) >> 16)); \ 81 (sum) = ~((sum) + ((sum) >> 16)); \ 82 (ipha)->ipha_hdr_checksum = (uint16_t)(sum); \ 83 } 84 85 #define IS_IP_HDR_HWCKSUM(ipsec, mp, ill) \ 86 ((!ipsec) && (DB_CKSUMFLAGS(mp) & HCK_IPV4_HDRCKSUM) && \ 87 ILL_HCKSUM_CAPABLE(ill) && dohwcksum) 88 89 /* 90 * This macro acts as a wrapper around IP_CKSUM_XMIT_FAST, and it performs 91 * several checks on the IRE and ILL (among other things) in order to see 92 * whether or not hardware checksum offload is allowed for the outgoing 93 * packet. It assumes that the caller has held a reference to the IRE. 94 */ 95 #define IP_CKSUM_XMIT(ill, ire, mp, ihp, up, proto, start, end, \ 96 max_frag, ipsec_len, pseudo) { \ 97 uint32_t _hck_flags; \ 98 /* \ 99 * We offload checksum calculation to hardware when IPsec isn't \ 100 * present and if fragmentation isn't required. We also check \ 101 * if M_DATA fastpath is safe to be used on the corresponding \ 102 * IRE; this check is performed without grabbing ire_lock but \ 103 * instead by holding a reference to it. This is sufficient \ 104 * for IRE_CACHE; for IRE_BROADCAST on non-Ethernet links, the \ 105 * DL_NOTE_FASTPATH_FLUSH indication could come up from the \ 106 * driver and trigger the IRE (hence fp_mp) deletion. This is \ 107 * why only IRE_CACHE type is eligible for offload. \ 108 * \ 109 * The presense of IP options also forces the network stack to \ 110 * calculate the checksum in software. This is because: \ 111 * \ 112 * Wrap around: certain partial-checksum NICs (eri, ce) limit \ 113 * the size of "start offset" width to 6-bit. This effectively \ 114 * sets the largest value of the offset to 64-bytes, starting \ 115 * from the MAC header. When the cumulative MAC and IP headers \ 116 * exceed such limit, the offset will wrap around. This causes \ 117 * the checksum to be calculated at the wrong place. \ 118 * \ 119 * IPv4 source routing: none of the full-checksum capable NICs \ 120 * is capable of correctly handling the IPv4 source-routing \ 121 * option for purposes of calculating the pseudo-header; the \ 122 * actual destination is different from the destination in the \ 123 * header which is that of the next-hop. (This case may not be \ 124 * true for NICs which can parse IPv6 extension headers, but \ 125 * we choose to simplify the implementation by not offloading \ 126 * checksum when they are present.) \ 127 * \ 128 */ \ 129 if ((ill) != NULL && ILL_HCKSUM_CAPABLE(ill) && \ 130 !((ire)->ire_flags & RTF_MULTIRT) && \ 131 (!((ire)->ire_type & IRE_BROADCAST) || \ 132 (ill)->ill_type == IFT_ETHER) && \ 133 (ipsec_len) == 0 && \ 134 (((ire)->ire_ipversion == IPV4_VERSION && \ 135 (start) == IP_SIMPLE_HDR_LENGTH && \ 136 ((ire)->ire_nce != NULL && \ 137 (ire)->ire_nce->nce_fp_mp != NULL && \ 138 MBLKHEAD(mp) >= MBLKL((ire)->ire_nce->nce_fp_mp))) || \ 139 ((ire)->ire_ipversion == IPV6_VERSION && \ 140 (start) == IPV6_HDR_LEN && \ 141 (ire)->ire_nce->nce_fp_mp != NULL && \ 142 MBLKHEAD(mp) >= MBLKL((ire)->ire_nce->nce_fp_mp))) && \ 143 (max_frag) >= (uint_t)((end) + (ipsec_len)) && \ 144 dohwcksum) { \ 145 _hck_flags = (ill)->ill_hcksum_capab->ill_hcksum_txflags; \ 146 } else { \ 147 _hck_flags = 0; \ 148 } \ 149 IP_CKSUM_XMIT_FAST((ire)->ire_ipversion, _hck_flags, mp, ihp, \ 150 up, proto, start, end, pseudo); \ 151 } 152 153 /* 154 * Based on the device capabilities, this macro either marks an outgoing 155 * packet with hardware checksum offload information or calculate the 156 * checksum in software. If the latter is performed, the checksum field 157 * of the dblk is cleared; otherwise it will be non-zero and contain the 158 * necessary flag(s) for the driver. 159 */ 160 #define IP_CKSUM_XMIT_FAST(ipver, hck_flags, mp, ihp, up, proto, start, \ 161 end, pseudo) { \ 162 uint32_t _sum; \ 163 /* \ 164 * Underlying interface supports hardware checksum offload for \ 165 * the payload; leave the payload checksum for the hardware to \ 166 * calculate. N.B: We only need to set up checksum info on the \ 167 * first mblk. \ 168 */ \ 169 DB_CKSUMFLAGS(mp) = 0; \ 170 if (((ipver) == IPV4_VERSION && \ 171 ((hck_flags) & HCKSUM_INET_FULL_V4)) || \ 172 ((ipver) == IPV6_VERSION && \ 173 ((hck_flags) & HCKSUM_INET_FULL_V6))) { \ 174 /* \ 175 * Hardware calculates pseudo-header, header and the \ 176 * payload checksums, so clear the checksum field in \ 177 * the protocol header. \ 178 */ \ 179 *(up) = 0; \ 180 DB_CKSUMFLAGS(mp) |= HCK_FULLCKSUM; \ 181 } else if ((hck_flags) & HCKSUM_INET_PARTIAL) { \ 182 /* \ 183 * Partial checksum offload has been enabled. Fill \ 184 * the checksum field in the protocl header with the \ 185 * pseudo-header checksum value. \ 186 */ \ 187 _sum = ((proto) == IPPROTO_UDP) ? \ 188 IP_UDP_CSUM_COMP : IP_TCP_CSUM_COMP; \ 189 _sum += *(up) + (pseudo); \ 190 _sum = (_sum & 0xFFFF) + (_sum >> 16); \ 191 *(up) = (_sum & 0xFFFF) + (_sum >> 16); \ 192 /* \ 193 * Offsets are relative to beginning of IP header. \ 194 */ \ 195 DB_CKSUMSTART(mp) = (start); \ 196 DB_CKSUMSTUFF(mp) = ((proto) == IPPROTO_UDP) ? \ 197 (start) + UDP_CHECKSUM_OFFSET : \ 198 (start) + TCP_CHECKSUM_OFFSET; \ 199 DB_CKSUMEND(mp) = (end); \ 200 DB_CKSUMFLAGS(mp) |= HCK_PARTIALCKSUM; \ 201 } else { \ 202 /* \ 203 * Software checksumming. \ 204 */ \ 205 _sum = ((proto) == IPPROTO_UDP) ? \ 206 IP_UDP_CSUM_COMP : IP_TCP_CSUM_COMP; \ 207 _sum += (pseudo); \ 208 _sum = IP_CSUM(mp, start, _sum); \ 209 *(up) = (uint16_t)(((proto) == IPPROTO_UDP) ? \ 210 (_sum ? _sum : ~_sum) : _sum); \ 211 } \ 212 /* \ 213 * Hardware supports IP header checksum offload; clear the \ 214 * contents of IP header checksum field as expected by NIC. \ 215 * Do this only if we offloaded either full or partial sum. \ 216 */ \ 217 if ((ipver) == IPV4_VERSION && DB_CKSUMFLAGS(mp) != 0 && \ 218 ((hck_flags) & HCKSUM_IPHDRCKSUM)) { \ 219 DB_CKSUMFLAGS(mp) |= HCK_IPV4_HDRCKSUM; \ 220 ((ipha_t *)(ihp))->ipha_hdr_checksum = 0; \ 221 } \ 222 } 223 224 /* 225 * Macro to inspect the checksum of a fully-reassembled incoming datagram. 226 */ 227 #define IP_CKSUM_RECV_REASS(hck_flags, off, pseudo, sum, err) { \ 228 (err) = B_FALSE; \ 229 if ((hck_flags) & HCK_FULLCKSUM) { \ 230 /* \ 231 * The sum of all fragment checksums should \ 232 * result in -0 (0xFFFF) or otherwise invalid. \ 233 */ \ 234 if ((sum) != 0xFFFF) \ 235 (err) = B_TRUE; \ 236 } else if ((hck_flags) & HCK_PARTIALCKSUM) { \ 237 (sum) += (pseudo); \ 238 (sum) = ((sum) & 0xFFFF) + ((sum) >> 16); \ 239 (sum) = ((sum) & 0xFFFF) + ((sum) >> 16); \ 240 if (~(sum) & 0xFFFF) \ 241 (err) = B_TRUE; \ 242 } else if (((sum) = IP_CSUM(mp, off, pseudo)) != 0) { \ 243 (err) = B_TRUE; \ 244 } \ 245 } 246 247 /* 248 * This macro inspects an incoming packet to see if the checksum value 249 * contained in it is valid; if the hardware has provided the information, 250 * the value is verified, otherwise it performs software checksumming. 251 * The checksum value is returned to caller. 252 */ 253 #define IP_CKSUM_RECV(hck_flags, sum, cksum_start, ulph_off, mp, mp1, err) { \ 254 int32_t _len; \ 255 \ 256 (err) = B_FALSE; \ 257 if ((hck_flags) & HCK_FULLCKSUM) { \ 258 /* \ 259 * Full checksum has been computed by the hardware \ 260 * and has been attached. If the driver wants us to \ 261 * verify the correctness of the attached value, in \ 262 * order to protect against faulty hardware, compare \ 263 * it against -0 (0xFFFF) to see if it's valid. \ 264 */ \ 265 (sum) = DB_CKSUM16(mp); \ 266 if (!((hck_flags) & HCK_FULLCKSUM_OK) && (sum) != 0xFFFF) \ 267 (err) = B_TRUE; \ 268 } else if (((hck_flags) & HCK_PARTIALCKSUM) && \ 269 ((mp1) == NULL || (mp1)->b_cont == NULL) && \ 270 (ulph_off) >= DB_CKSUMSTART(mp) && \ 271 ((_len = (ulph_off) - DB_CKSUMSTART(mp)) & 1) == 0) { \ 272 uint32_t _adj; \ 273 /* \ 274 * Partial checksum has been calculated by hardware \ 275 * and attached to the packet; in addition, any \ 276 * prepended extraneous data is even byte aligned, \ 277 * and there are at most two mblks associated with \ 278 * the packet. If any such data exists, we adjust \ 279 * the checksum; also take care any postpended data. \ 280 */ \ 281 IP_ADJCKSUM_PARTIAL(cksum_start, mp, mp1, _len, _adj); \ 282 /* \ 283 * One's complement subtract extraneous checksum \ 284 */ \ 285 (sum) += DB_CKSUM16(mp); \ 286 if (_adj >= (sum)) \ 287 (sum) = ~(_adj - (sum)) & 0xFFFF; \ 288 else \ 289 (sum) -= _adj; \ 290 (sum) = ((sum) & 0xFFFF) + ((int)(sum) >> 16); \ 291 (sum) = ((sum) & 0xFFFF) + ((int)(sum) >> 16); \ 292 if (~(sum) & 0xFFFF) \ 293 (err) = B_TRUE; \ 294 } else if (((sum) = IP_CSUM(mp, ulph_off, sum)) != 0) { \ 295 (err) = B_TRUE; \ 296 } \ 297 } 298 299 /* 300 * Macro to adjust a given checksum value depending on any prepended 301 * or postpended data on the packet. It expects the start offset to 302 * begin at an even boundary and that the packet consists of at most 303 * two mblks. 304 */ 305 #define IP_ADJCKSUM_PARTIAL(cksum_start, mp, mp1, len, adj) { \ 306 /* \ 307 * Prepended extraneous data; adjust checksum. \ 308 */ \ 309 if ((len) > 0) \ 310 (adj) = IP_BCSUM_PARTIAL(cksum_start, len, 0); \ 311 else \ 312 (adj) = 0; \ 313 /* \ 314 * len is now the total length of mblk(s) \ 315 */ \ 316 (len) = MBLKL(mp); \ 317 if ((mp1) == NULL) \ 318 (mp1) = (mp); \ 319 else \ 320 (len) += MBLKL(mp1); \ 321 /* \ 322 * Postpended extraneous data; adjust checksum. \ 323 */ \ 324 if (((len) = (DB_CKSUMEND(mp) - len)) > 0) { \ 325 uint32_t _pad; \ 326 \ 327 _pad = IP_BCSUM_PARTIAL((mp1)->b_wptr, len, 0); \ 328 /* \ 329 * If the postpended extraneous data was odd \ 330 * byte aligned, swap resulting checksum bytes. \ 331 */ \ 332 if ((uintptr_t)(mp1)->b_wptr & 1) \ 333 (adj) += ((_pad << 8) & 0xFFFF) | (_pad >> 8); \ 334 else \ 335 (adj) += _pad; \ 336 (adj) = ((adj) & 0xFFFF) + ((int)(adj) >> 16); \ 337 } \ 338 } 339 340 #define ILL_MDT_CAPABLE(ill) \ 341 (((ill)->ill_capabilities & ILL_CAPAB_MDT) != 0) 342 343 /* 344 * ioctl identifier and structure for Multidata Transmit update 345 * private M_CTL communication from IP to ULP. 346 */ 347 #define MDT_IOC_INFO_UPDATE (('M' << 8) + 1020) 348 349 typedef struct ip_mdt_info_s { 350 uint_t mdt_info_id; /* MDT_IOC_INFO_UPDATE */ 351 ill_mdt_capab_t mdt_capab; /* ILL MDT capabilities */ 352 } ip_mdt_info_t; 353 354 /* 355 * Macro that determines whether or not a given ILL is allowed for MDT. 356 */ 357 #define ILL_MDT_USABLE(ill) \ 358 (ILL_MDT_CAPABLE(ill) && \ 359 ill->ill_mdt_capab != NULL && \ 360 ill->ill_mdt_capab->ill_mdt_version == MDT_VERSION_2 && \ 361 ill->ill_mdt_capab->ill_mdt_on != 0) 362 363 #define ILL_LSO_CAPABLE(ill) \ 364 (((ill)->ill_capabilities & ILL_CAPAB_LSO) != 0) 365 366 /* 367 * ioctl identifier and structure for Large Segment Offload 368 * private M_CTL communication from IP to ULP. 369 */ 370 #define LSO_IOC_INFO_UPDATE (('L' << 24) + ('S' << 16) + ('O' << 8)) 371 372 typedef struct ip_lso_info_s { 373 uint_t lso_info_id; /* LSO_IOC_INFO_UPDATE */ 374 ill_lso_capab_t lso_capab; /* ILL LSO capabilities */ 375 } ip_lso_info_t; 376 377 /* 378 * Macro that determines whether or not a given ILL is allowed for LSO. 379 */ 380 #define ILL_LSO_USABLE(ill) \ 381 (ILL_LSO_CAPABLE(ill) && \ 382 ill->ill_lso_capab != NULL && \ 383 ill->ill_lso_capab->ill_lso_version == LSO_VERSION_1 && \ 384 ill->ill_lso_capab->ill_lso_on != 0) 385 386 #define ILL_LSO_TCP_USABLE(ill) \ 387 (ILL_LSO_USABLE(ill) && \ 388 ill->ill_lso_capab->ill_lso_flags & LSO_TX_BASIC_TCP_IPV4) 389 390 /* 391 * Macro that determines whether or not a given CONN may be considered 392 * for fast path prior to proceeding further with LSO or Multidata. 393 */ 394 #define CONN_IS_LSO_MD_FASTPATH(connp) \ 395 ((connp)->conn_dontroute == 0 && /* SO_DONTROUTE */ \ 396 !((connp)->conn_nexthop_set) && /* IP_NEXTHOP */ \ 397 (connp)->conn_nofailover_ill == NULL && /* IPIF_NOFAILOVER */ \ 398 (connp)->conn_xmit_if_ill == NULL && /* IP_XMIT_IF */ \ 399 (connp)->conn_outgoing_pill == NULL && /* IP{V6}_BOUND_PIF */ \ 400 (connp)->conn_outgoing_ill == NULL) /* IP{V6}_BOUND_IF */ 401 402 /* Definitons for fragmenting IP packets using MDT. */ 403 404 /* 405 * Smaller and private version of pdescinfo_t used specifically for IP, 406 * which allows for only a single payload span per packet. 407 */ 408 typedef struct ip_pdescinfo_s PDESCINFO_STRUCT(2) ip_pdescinfo_t; 409 410 /* 411 * Macro version of ip_can_frag_mdt() which avoids the function call if we 412 * only examine a single message block. 413 */ 414 #define IP_CAN_FRAG_MDT(mp, hdr_len, len) \ 415 (((mp)->b_cont == NULL) ? \ 416 (MBLKL(mp) >= ((hdr_len) + ip_wput_frag_mdt_min)) : \ 417 ip_can_frag_mdt((mp), (hdr_len), (len))) 418 419 /* 420 * Macro that determines whether or not a given IPC requires 421 * outbound IPSEC processing. 422 */ 423 #define CONN_IPSEC_OUT_ENCAPSULATED(connp) \ 424 ((connp)->conn_out_enforce_policy || \ 425 ((connp)->conn_latch != NULL && \ 426 (connp)->conn_latch->ipl_out_policy != NULL)) 427 428 /* 429 * These are used by the synchronous streams code in tcp and udp. 430 * When we set the flags for a wakeup from a synchronous stream we 431 * always set RSLEEP in sd_wakeq, even if we have a read thread waiting 432 * to do the io. This is in case the read thread gets interrupted 433 * before completing the io. The RSLEEP flag in sd_wakeq is used to 434 * indicate that there is data available at the synchronous barrier. 435 * The assumption is that subsequent functions calls through rwnext() 436 * will reset sd_wakeq appropriately. 437 */ 438 #define STR_WAKEUP_CLEAR(stp) { \ 439 mutex_enter(&stp->sd_lock); \ 440 stp->sd_wakeq &= ~RSLEEP; \ 441 mutex_exit(&stp->sd_lock); \ 442 } 443 444 #define STR_WAKEUP_SET(stp) { \ 445 mutex_enter(&stp->sd_lock); \ 446 if (stp->sd_flag & RSLEEP) { \ 447 stp->sd_flag &= ~RSLEEP; \ 448 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); \ 449 } \ 450 stp->sd_wakeq |= RSLEEP; \ 451 mutex_exit(&stp->sd_lock); \ 452 } 453 454 #define STR_SENDSIG(stp) { \ 455 int _events; \ 456 mutex_enter(&stp->sd_lock); \ 457 if ((_events = stp->sd_sigflags & (S_INPUT | S_RDNORM)) != 0) \ 458 strsendsig(stp->sd_siglist, _events, 0, 0); \ 459 if (stp->sd_rput_opt & SR_POLLIN) { \ 460 stp->sd_rput_opt &= ~SR_POLLIN; \ 461 mutex_exit(&stp->sd_lock); \ 462 pollwakeup(&stp->sd_pollist, POLLIN | POLLRDNORM); \ 463 } else { \ 464 mutex_exit(&stp->sd_lock); \ 465 } \ 466 } 467 468 #define CONN_UDP_SYNCSTR(connp) \ 469 (IPCL_IS_UDP(connp) && (connp)->conn_udp->udp_direct_sockfs) 470 471 /* 472 * Macro that checks whether or not a particular UDP conn is 473 * flow-controlling on the read-side. If udp module is directly 474 * above ip, check to see if the drain queue is full; note here 475 * that we check this without any lock protection because this 476 * is a coarse granularity inbound flow-control. If the module 477 * above ip is not udp, then use canputnext to determine the 478 * flow-control. 479 * 480 * Note that these checks are done after the conn is found in 481 * the UDP fanout table. A UDP conn in that table may have its 482 * IPCL_UDP bit cleared from the conn_flags when the application 483 * pops the udp module without issuing an unbind; in this case 484 * IP will still receive packets for the conn and deliver it 485 * upstream via putnext. This is the reason why we have to test 486 * against IPCL_UDP. 487 */ 488 #define CONN_UDP_FLOWCTLD(connp) \ 489 ((CONN_UDP_SYNCSTR(connp) && \ 490 (connp)->conn_udp->udp_drain_qfull) || \ 491 (!CONN_UDP_SYNCSTR(connp) && !canputnext((connp)->conn_rq))) 492 493 /* 494 * Macro that delivers a given message upstream; if udp module 495 * is directly above ip, the message is passed directly into 496 * the stream-less entry point. Otherwise putnext is used. 497 */ 498 #define CONN_UDP_RECV(connp, mp) { \ 499 if (IPCL_IS_UDP(connp)) \ 500 udp_conn_recv(connp, mp); \ 501 else \ 502 putnext((connp)->conn_rq, mp); \ 503 } 504 505 #define ILL_DLS_CAPABLE(ill) \ 506 (((ill)->ill_capabilities & \ 507 (ILL_CAPAB_POLL|ILL_CAPAB_SOFT_RING)) != 0) 508 509 /* 510 * Macro that hands off one or more messages directly to DLD 511 * when the interface is marked with ILL_CAPAB_POLL. 512 */ 513 #define IP_DLS_ILL_TX(ill, ipha, mp, ipst) { \ 514 ill_dls_capab_t *ill_dls = ill->ill_dls_capab; \ 515 ASSERT(ILL_DLS_CAPABLE(ill)); \ 516 ASSERT(ill_dls != NULL); \ 517 ASSERT(ill_dls->ill_tx != NULL); \ 518 ASSERT(ill_dls->ill_tx_handle != NULL); \ 519 DTRACE_PROBE4(ip4__physical__out__start, \ 520 ill_t *, NULL, ill_t *, ill, \ 521 ipha_t *, ipha, mblk_t *, mp); \ 522 FW_HOOKS(ipst->ips_ip4_physical_out_event, \ 523 ipst->ips_ipv4firewall_physical_out, \ 524 NULL, ill, ipha, mp, mp, ipst); \ 525 DTRACE_PROBE1(ip4__physical__out__end, mblk_t *, mp); \ 526 if (mp != NULL) \ 527 ill_dls->ill_tx(ill_dls->ill_tx_handle, mp); \ 528 } 529 530 extern int ip_wput_frag_mdt_min; 531 extern boolean_t ip_can_frag_mdt(mblk_t *, ssize_t, ssize_t); 532 extern mblk_t *ip_prepend_zoneid(mblk_t *, zoneid_t, ip_stack_t *); 533 534 #endif /* _KERNEL */ 535 536 #ifdef __cplusplus 537 } 538 #endif 539 540 #endif /* _INET_IP_IMPL_H */ 541