1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2007, Myricom Inc. 5 * Copyright (c) 2008, Intel Corporation. 6 * Copyright (c) 2012 The FreeBSD Foundation 7 * Copyright (c) 2016-2021 Mellanox Technologies. 8 * All rights reserved. 9 * 10 * Portions of this software were developed by Bjoern Zeeb 11 * under sponsorship from the FreeBSD Foundation. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_inet.h" 39 #include "opt_inet6.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/sockbuf.h> 49 #include <sys/sysctl.h> 50 51 #include <net/if.h> 52 #include <net/if_var.h> 53 #include <net/ethernet.h> 54 #include <net/bpf.h> 55 #include <net/vnet.h> 56 #include <net/if_dl.h> 57 #include <net/if_media.h> 58 #include <net/if_types.h> 59 #include <net/infiniband.h> 60 #include <net/if_lagg.h> 61 62 #include <netinet/in_systm.h> 63 #include <netinet/in.h> 64 #include <netinet/ip6.h> 65 #include <netinet/ip.h> 66 #include <netinet/ip_var.h> 67 #include <netinet/in_pcb.h> 68 #include <netinet6/in6_pcb.h> 69 #include <netinet/tcp.h> 70 #include <netinet/tcp_seq.h> 71 #include <netinet/tcp_lro.h> 72 #include <netinet/tcp_var.h> 73 #include <netinet/tcpip.h> 74 #include <netinet/tcp_hpts.h> 75 #include <netinet/tcp_log_buf.h> 76 #include <netinet/tcp_fsm.h> 77 #include <netinet/udp.h> 78 #include <netinet6/ip6_var.h> 79 80 #include <machine/in_cksum.h> 81 82 static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures"); 83 84 #define TCP_LRO_TS_OPTION \ 85 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 86 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 87 88 static void tcp_lro_rx_done(struct lro_ctrl *lc); 89 static int tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m, 90 uint32_t csum, bool use_hash); 91 92 #ifdef TCPHPTS 93 static bool do_bpf_strip_and_compress(struct inpcb *, struct lro_ctrl *, 94 struct lro_entry *, struct mbuf **, struct mbuf **, struct mbuf **, 95 bool *, bool, bool, struct ifnet *, bool); 96 97 #endif 98 99 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 100 "TCP LRO"); 101 102 static long tcplro_stacks_wanting_mbufq; 103 counter_u64_t tcp_inp_lro_direct_queue; 104 counter_u64_t tcp_inp_lro_wokeup_queue; 105 counter_u64_t tcp_inp_lro_compressed; 106 counter_u64_t tcp_inp_lro_locks_taken; 107 counter_u64_t tcp_extra_mbuf; 108 counter_u64_t tcp_would_have_but; 109 counter_u64_t tcp_comp_total; 110 counter_u64_t tcp_uncomp_total; 111 counter_u64_t tcp_bad_csums; 112 113 static unsigned tcp_lro_entries = TCP_LRO_ENTRIES; 114 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries, 115 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0, 116 "default number of LRO entries"); 117 118 static uint32_t tcp_lro_cpu_set_thresh = TCP_LRO_CPU_DECLARATION_THRESH; 119 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, lro_cpu_threshold, 120 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_cpu_set_thresh, 0, 121 "Number of interrupts in a row on the same CPU that will make us declare an 'affinity' cpu?"); 122 123 static uint32_t tcp_less_accurate_lro_ts = 0; 124 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, lro_less_accurate, 125 CTLFLAG_MPSAFE, &tcp_less_accurate_lro_ts, 0, 126 "Do we trade off efficency by doing less timestamp operations for time accuracy?"); 127 128 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, fullqueue, CTLFLAG_RD, 129 &tcp_inp_lro_direct_queue, "Number of lro's fully queued to transport"); 130 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, wokeup, CTLFLAG_RD, 131 &tcp_inp_lro_wokeup_queue, "Number of lro's where we woke up transport via hpts"); 132 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, compressed, CTLFLAG_RD, 133 &tcp_inp_lro_compressed, "Number of lro's compressed and sent to transport"); 134 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lockcnt, CTLFLAG_RD, 135 &tcp_inp_lro_locks_taken, "Number of lro's inp_wlocks taken"); 136 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, extra_mbuf, CTLFLAG_RD, 137 &tcp_extra_mbuf, "Number of times we had an extra compressed ack dropped into the tp"); 138 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, would_have_but, CTLFLAG_RD, 139 &tcp_would_have_but, "Number of times we would have had an extra compressed, but mget failed"); 140 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, with_m_ackcmp, CTLFLAG_RD, 141 &tcp_comp_total, "Number of mbufs queued with M_ACKCMP flags set"); 142 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, without_m_ackcmp, CTLFLAG_RD, 143 &tcp_uncomp_total, "Number of mbufs queued without M_ACKCMP"); 144 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lro_badcsum, CTLFLAG_RD, 145 &tcp_bad_csums, "Number of packets that the common code saw with bad csums"); 146 147 void 148 tcp_lro_reg_mbufq(void) 149 { 150 atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, 1); 151 } 152 153 void 154 tcp_lro_dereg_mbufq(void) 155 { 156 atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, -1); 157 } 158 159 static __inline void 160 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket, 161 struct lro_entry *le) 162 { 163 164 LIST_INSERT_HEAD(&lc->lro_active, le, next); 165 LIST_INSERT_HEAD(bucket, le, hash_next); 166 } 167 168 static __inline void 169 tcp_lro_active_remove(struct lro_entry *le) 170 { 171 172 LIST_REMOVE(le, next); /* active list */ 173 LIST_REMOVE(le, hash_next); /* hash bucket */ 174 } 175 176 int 177 tcp_lro_init(struct lro_ctrl *lc) 178 { 179 return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0)); 180 } 181 182 int 183 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp, 184 unsigned lro_entries, unsigned lro_mbufs) 185 { 186 struct lro_entry *le; 187 size_t size; 188 unsigned i, elements; 189 190 lc->lro_bad_csum = 0; 191 lc->lro_queued = 0; 192 lc->lro_flushed = 0; 193 lc->lro_mbuf_count = 0; 194 lc->lro_mbuf_max = lro_mbufs; 195 lc->lro_cnt = lro_entries; 196 lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX; 197 lc->lro_length_lim = TCP_LRO_LENGTH_MAX; 198 lc->ifp = ifp; 199 LIST_INIT(&lc->lro_free); 200 LIST_INIT(&lc->lro_active); 201 202 /* create hash table to accelerate entry lookup */ 203 if (lro_entries > lro_mbufs) 204 elements = lro_entries; 205 else 206 elements = lro_mbufs; 207 lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz, 208 HASH_NOWAIT); 209 if (lc->lro_hash == NULL) { 210 memset(lc, 0, sizeof(*lc)); 211 return (ENOMEM); 212 } 213 214 /* compute size to allocate */ 215 size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) + 216 (lro_entries * sizeof(*le)); 217 lc->lro_mbuf_data = (struct lro_mbuf_sort *) 218 malloc(size, M_LRO, M_NOWAIT | M_ZERO); 219 220 /* check for out of memory */ 221 if (lc->lro_mbuf_data == NULL) { 222 free(lc->lro_hash, M_LRO); 223 memset(lc, 0, sizeof(*lc)); 224 return (ENOMEM); 225 } 226 /* compute offset for LRO entries */ 227 le = (struct lro_entry *) 228 (lc->lro_mbuf_data + lro_mbufs); 229 230 /* setup linked list */ 231 for (i = 0; i != lro_entries; i++) 232 LIST_INSERT_HEAD(&lc->lro_free, le + i, next); 233 234 return (0); 235 } 236 237 struct vxlan_header { 238 uint32_t vxlh_flags; 239 uint32_t vxlh_vni; 240 }; 241 242 static inline void * 243 tcp_lro_low_level_parser(void *ptr, struct lro_parser *parser, bool update_data, bool is_vxlan, int mlen) 244 { 245 const struct ether_vlan_header *eh; 246 void *old; 247 uint16_t eth_type; 248 249 if (update_data) 250 memset(parser, 0, sizeof(*parser)); 251 252 old = ptr; 253 254 if (is_vxlan) { 255 const struct vxlan_header *vxh; 256 vxh = ptr; 257 ptr = (uint8_t *)ptr + sizeof(*vxh); 258 if (update_data) { 259 parser->data.vxlan_vni = 260 vxh->vxlh_vni & htonl(0xffffff00); 261 } 262 } 263 264 eh = ptr; 265 if (__predict_false(eh->evl_encap_proto == htons(ETHERTYPE_VLAN))) { 266 eth_type = eh->evl_proto; 267 if (update_data) { 268 /* strip priority and keep VLAN ID only */ 269 parser->data.vlan_id = eh->evl_tag & htons(EVL_VLID_MASK); 270 } 271 /* advance to next header */ 272 ptr = (uint8_t *)ptr + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 273 mlen -= (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 274 } else { 275 eth_type = eh->evl_encap_proto; 276 /* advance to next header */ 277 mlen -= ETHER_HDR_LEN; 278 ptr = (uint8_t *)ptr + ETHER_HDR_LEN; 279 } 280 if (__predict_false(mlen <= 0)) 281 return (NULL); 282 switch (eth_type) { 283 #ifdef INET 284 case htons(ETHERTYPE_IP): 285 parser->ip4 = ptr; 286 if (__predict_false(mlen < sizeof(struct ip))) 287 return (NULL); 288 /* Ensure there are no IPv4 options. */ 289 if ((parser->ip4->ip_hl << 2) != sizeof (*parser->ip4)) 290 break; 291 /* .. and the packet is not fragmented. */ 292 if (parser->ip4->ip_off & htons(IP_MF|IP_OFFMASK)) 293 break; 294 ptr = (uint8_t *)ptr + (parser->ip4->ip_hl << 2); 295 mlen -= sizeof(struct ip); 296 if (update_data) { 297 parser->data.s_addr.v4 = parser->ip4->ip_src; 298 parser->data.d_addr.v4 = parser->ip4->ip_dst; 299 } 300 switch (parser->ip4->ip_p) { 301 case IPPROTO_UDP: 302 if (__predict_false(mlen < sizeof(struct udphdr))) 303 return (NULL); 304 parser->udp = ptr; 305 if (update_data) { 306 parser->data.lro_type = LRO_TYPE_IPV4_UDP; 307 parser->data.s_port = parser->udp->uh_sport; 308 parser->data.d_port = parser->udp->uh_dport; 309 } else { 310 MPASS(parser->data.lro_type == LRO_TYPE_IPV4_UDP); 311 } 312 ptr = ((uint8_t *)ptr + sizeof(*parser->udp)); 313 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old; 314 return (ptr); 315 case IPPROTO_TCP: 316 parser->tcp = ptr; 317 if (__predict_false(mlen < sizeof(struct tcphdr))) 318 return (NULL); 319 if (update_data) { 320 parser->data.lro_type = LRO_TYPE_IPV4_TCP; 321 parser->data.s_port = parser->tcp->th_sport; 322 parser->data.d_port = parser->tcp->th_dport; 323 } else { 324 MPASS(parser->data.lro_type == LRO_TYPE_IPV4_TCP); 325 } 326 if (__predict_false(mlen < (parser->tcp->th_off << 2))) 327 return (NULL); 328 ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2); 329 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old; 330 return (ptr); 331 default: 332 break; 333 } 334 break; 335 #endif 336 #ifdef INET6 337 case htons(ETHERTYPE_IPV6): 338 parser->ip6 = ptr; 339 if (__predict_false(mlen < sizeof(struct ip6_hdr))) 340 return (NULL); 341 ptr = (uint8_t *)ptr + sizeof(*parser->ip6); 342 if (update_data) { 343 parser->data.s_addr.v6 = parser->ip6->ip6_src; 344 parser->data.d_addr.v6 = parser->ip6->ip6_dst; 345 } 346 mlen -= sizeof(struct ip6_hdr); 347 switch (parser->ip6->ip6_nxt) { 348 case IPPROTO_UDP: 349 if (__predict_false(mlen < sizeof(struct udphdr))) 350 return (NULL); 351 parser->udp = ptr; 352 if (update_data) { 353 parser->data.lro_type = LRO_TYPE_IPV6_UDP; 354 parser->data.s_port = parser->udp->uh_sport; 355 parser->data.d_port = parser->udp->uh_dport; 356 } else { 357 MPASS(parser->data.lro_type == LRO_TYPE_IPV6_UDP); 358 } 359 ptr = (uint8_t *)ptr + sizeof(*parser->udp); 360 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old; 361 return (ptr); 362 case IPPROTO_TCP: 363 if (__predict_false(mlen < sizeof(struct tcphdr))) 364 return (NULL); 365 parser->tcp = ptr; 366 if (update_data) { 367 parser->data.lro_type = LRO_TYPE_IPV6_TCP; 368 parser->data.s_port = parser->tcp->th_sport; 369 parser->data.d_port = parser->tcp->th_dport; 370 } else { 371 MPASS(parser->data.lro_type == LRO_TYPE_IPV6_TCP); 372 } 373 if (__predict_false(mlen < (parser->tcp->th_off << 2))) 374 return (NULL); 375 ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2); 376 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old; 377 return (ptr); 378 default: 379 break; 380 } 381 break; 382 #endif 383 default: 384 break; 385 } 386 /* Invalid packet - cannot parse */ 387 return (NULL); 388 } 389 390 static const int vxlan_csum = CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | 391 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID; 392 393 static inline struct lro_parser * 394 tcp_lro_parser(struct mbuf *m, struct lro_parser *po, struct lro_parser *pi, bool update_data) 395 { 396 void *data_ptr; 397 398 /* Try to parse outer headers first. */ 399 data_ptr = tcp_lro_low_level_parser(m->m_data, po, update_data, false, m->m_len); 400 if (data_ptr == NULL || po->total_hdr_len > m->m_len) 401 return (NULL); 402 403 if (update_data) { 404 /* Store VLAN ID, if any. */ 405 if (__predict_false(m->m_flags & M_VLANTAG)) { 406 po->data.vlan_id = 407 htons(m->m_pkthdr.ether_vtag) & htons(EVL_VLID_MASK); 408 } 409 /* Store decrypted flag, if any. */ 410 if (__predict_false((m->m_pkthdr.csum_flags & 411 CSUM_TLS_MASK) == CSUM_TLS_DECRYPTED)) 412 po->data.lro_flags |= LRO_FLAG_DECRYPTED; 413 } 414 415 switch (po->data.lro_type) { 416 case LRO_TYPE_IPV4_UDP: 417 case LRO_TYPE_IPV6_UDP: 418 /* Check for VXLAN headers. */ 419 if ((m->m_pkthdr.csum_flags & vxlan_csum) != vxlan_csum) 420 break; 421 422 /* Try to parse inner headers. */ 423 data_ptr = tcp_lro_low_level_parser(data_ptr, pi, update_data, true, 424 (m->m_len - ((caddr_t)data_ptr - m->m_data))); 425 if (data_ptr == NULL || (pi->total_hdr_len + po->total_hdr_len) > m->m_len) 426 break; 427 428 /* Verify supported header types. */ 429 switch (pi->data.lro_type) { 430 case LRO_TYPE_IPV4_TCP: 431 case LRO_TYPE_IPV6_TCP: 432 return (pi); 433 default: 434 break; 435 } 436 break; 437 case LRO_TYPE_IPV4_TCP: 438 case LRO_TYPE_IPV6_TCP: 439 if (update_data) 440 memset(pi, 0, sizeof(*pi)); 441 return (po); 442 default: 443 break; 444 } 445 return (NULL); 446 } 447 448 static inline int 449 tcp_lro_trim_mbuf_chain(struct mbuf *m, const struct lro_parser *po) 450 { 451 int len; 452 453 switch (po->data.lro_type) { 454 #ifdef INET 455 case LRO_TYPE_IPV4_TCP: 456 len = ((uint8_t *)po->ip4 - (uint8_t *)m->m_data) + 457 ntohs(po->ip4->ip_len); 458 break; 459 #endif 460 #ifdef INET6 461 case LRO_TYPE_IPV6_TCP: 462 len = ((uint8_t *)po->ip6 - (uint8_t *)m->m_data) + 463 ntohs(po->ip6->ip6_plen) + sizeof(*po->ip6); 464 break; 465 #endif 466 default: 467 return (TCP_LRO_CANNOT); 468 } 469 470 /* 471 * If the frame is padded beyond the end of the IP packet, 472 * then trim the extra bytes off: 473 */ 474 if (__predict_true(m->m_pkthdr.len == len)) { 475 return (0); 476 } else if (m->m_pkthdr.len > len) { 477 m_adj(m, len - m->m_pkthdr.len); 478 return (0); 479 } 480 return (TCP_LRO_CANNOT); 481 } 482 483 static struct tcphdr * 484 tcp_lro_get_th(struct mbuf *m) 485 { 486 return ((struct tcphdr *)((uint8_t *)m->m_data + m->m_pkthdr.lro_tcp_h_off)); 487 } 488 489 static void 490 lro_free_mbuf_chain(struct mbuf *m) 491 { 492 struct mbuf *save; 493 494 while (m) { 495 save = m->m_nextpkt; 496 m->m_nextpkt = NULL; 497 m_freem(m); 498 m = save; 499 } 500 } 501 502 void 503 tcp_lro_free(struct lro_ctrl *lc) 504 { 505 struct lro_entry *le; 506 unsigned x; 507 508 /* reset LRO free list */ 509 LIST_INIT(&lc->lro_free); 510 511 /* free active mbufs, if any */ 512 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { 513 tcp_lro_active_remove(le); 514 lro_free_mbuf_chain(le->m_head); 515 } 516 517 /* free hash table */ 518 free(lc->lro_hash, M_LRO); 519 lc->lro_hash = NULL; 520 lc->lro_hashsz = 0; 521 522 /* free mbuf array, if any */ 523 for (x = 0; x != lc->lro_mbuf_count; x++) 524 m_freem(lc->lro_mbuf_data[x].mb); 525 lc->lro_mbuf_count = 0; 526 527 /* free allocated memory, if any */ 528 free(lc->lro_mbuf_data, M_LRO); 529 lc->lro_mbuf_data = NULL; 530 } 531 532 static uint16_t 533 tcp_lro_rx_csum_tcphdr(const struct tcphdr *th) 534 { 535 const uint16_t *ptr; 536 uint32_t csum; 537 uint16_t len; 538 539 csum = -th->th_sum; /* exclude checksum field */ 540 len = th->th_off; 541 ptr = (const uint16_t *)th; 542 while (len--) { 543 csum += *ptr; 544 ptr++; 545 csum += *ptr; 546 ptr++; 547 } 548 while (csum > 0xffff) 549 csum = (csum >> 16) + (csum & 0xffff); 550 551 return (csum); 552 } 553 554 static uint16_t 555 tcp_lro_rx_csum_data(const struct lro_parser *pa, uint16_t tcp_csum) 556 { 557 uint32_t c; 558 uint16_t cs; 559 560 c = tcp_csum; 561 562 switch (pa->data.lro_type) { 563 #ifdef INET6 564 case LRO_TYPE_IPV6_TCP: 565 /* Compute full pseudo IPv6 header checksum. */ 566 cs = in6_cksum_pseudo(pa->ip6, ntohs(pa->ip6->ip6_plen), pa->ip6->ip6_nxt, 0); 567 break; 568 #endif 569 #ifdef INET 570 case LRO_TYPE_IPV4_TCP: 571 /* Compute full pseudo IPv4 header checsum. */ 572 cs = in_addword(ntohs(pa->ip4->ip_len) - sizeof(*pa->ip4), IPPROTO_TCP); 573 cs = in_pseudo(pa->ip4->ip_src.s_addr, pa->ip4->ip_dst.s_addr, htons(cs)); 574 break; 575 #endif 576 default: 577 cs = 0; /* Keep compiler happy. */ 578 break; 579 } 580 581 /* Complement checksum. */ 582 cs = ~cs; 583 c += cs; 584 585 /* Remove TCP header checksum. */ 586 cs = ~tcp_lro_rx_csum_tcphdr(pa->tcp); 587 c += cs; 588 589 /* Compute checksum remainder. */ 590 while (c > 0xffff) 591 c = (c >> 16) + (c & 0xffff); 592 593 return (c); 594 } 595 596 static void 597 tcp_lro_rx_done(struct lro_ctrl *lc) 598 { 599 struct lro_entry *le; 600 601 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { 602 tcp_lro_active_remove(le); 603 tcp_lro_flush(lc, le); 604 } 605 } 606 607 static void 608 tcp_lro_flush_active(struct lro_ctrl *lc) 609 { 610 struct lro_entry *le; 611 612 /* 613 * Walk through the list of le entries, and 614 * any one that does have packets flush. This 615 * is called because we have an inbound packet 616 * (e.g. SYN) that has to have all others flushed 617 * in front of it. Note we have to do the remove 618 * because tcp_lro_flush() assumes that the entry 619 * is being freed. This is ok it will just get 620 * reallocated again like it was new. 621 */ 622 LIST_FOREACH(le, &lc->lro_active, next) { 623 if (le->m_head != NULL) { 624 tcp_lro_active_remove(le); 625 tcp_lro_flush(lc, le); 626 } 627 } 628 } 629 630 void 631 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout) 632 { 633 struct lro_entry *le, *le_tmp; 634 uint64_t now, tov; 635 struct bintime bt; 636 637 NET_EPOCH_ASSERT(); 638 if (LIST_EMPTY(&lc->lro_active)) 639 return; 640 641 /* get timeout time and current time in ns */ 642 binuptime(&bt); 643 now = bintime2ns(&bt); 644 tov = ((timeout->tv_sec * 1000000000) + (timeout->tv_usec * 1000)); 645 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) { 646 if (now >= (bintime2ns(&le->alloc_time) + tov)) { 647 tcp_lro_active_remove(le); 648 tcp_lro_flush(lc, le); 649 } 650 } 651 } 652 653 #ifdef INET 654 static int 655 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4) 656 { 657 uint16_t csum; 658 659 /* Legacy IP has a header checksum that needs to be correct. */ 660 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 661 if (__predict_false((m->m_pkthdr.csum_flags & CSUM_IP_VALID) == 0)) { 662 lc->lro_bad_csum++; 663 return (TCP_LRO_CANNOT); 664 } 665 } else { 666 csum = in_cksum_hdr(ip4); 667 if (__predict_false(csum != 0)) { 668 lc->lro_bad_csum++; 669 return (TCP_LRO_CANNOT); 670 } 671 } 672 return (0); 673 } 674 #endif 675 676 #ifdef TCPHPTS 677 static void 678 tcp_lro_log(struct tcpcb *tp, const struct lro_ctrl *lc, 679 const struct lro_entry *le, const struct mbuf *m, 680 int frm, int32_t tcp_data_len, uint32_t th_seq, 681 uint32_t th_ack, uint16_t th_win) 682 { 683 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 684 union tcp_log_stackspecific log; 685 struct timeval tv, btv; 686 uint32_t cts; 687 688 cts = tcp_get_usecs(&tv); 689 memset(&log, 0, sizeof(union tcp_log_stackspecific)); 690 log.u_bbr.flex8 = frm; 691 log.u_bbr.flex1 = tcp_data_len; 692 if (m) 693 log.u_bbr.flex2 = m->m_pkthdr.len; 694 else 695 log.u_bbr.flex2 = 0; 696 if (le->m_head) { 697 log.u_bbr.flex3 = le->m_head->m_pkthdr.lro_nsegs; 698 log.u_bbr.flex4 = le->m_head->m_pkthdr.lro_tcp_d_len; 699 log.u_bbr.flex5 = le->m_head->m_pkthdr.len; 700 log.u_bbr.delRate = le->m_head->m_flags; 701 log.u_bbr.rttProp = le->m_head->m_pkthdr.rcv_tstmp; 702 } 703 log.u_bbr.inflight = th_seq; 704 log.u_bbr.delivered = th_ack; 705 log.u_bbr.timeStamp = cts; 706 log.u_bbr.epoch = le->next_seq; 707 log.u_bbr.lt_epoch = le->ack_seq; 708 log.u_bbr.pacing_gain = th_win; 709 log.u_bbr.cwnd_gain = le->window; 710 log.u_bbr.lost = curcpu; 711 log.u_bbr.cur_del_rate = (uintptr_t)m; 712 log.u_bbr.bw_inuse = (uintptr_t)le->m_head; 713 bintime2timeval(&lc->lro_last_queue_time, &btv); 714 log.u_bbr.flex6 = tcp_tv_to_usectick(&btv); 715 log.u_bbr.flex7 = le->compressed; 716 log.u_bbr.pacing_gain = le->uncompressed; 717 if (in_epoch(net_epoch_preempt)) 718 log.u_bbr.inhpts = 1; 719 else 720 log.u_bbr.inhpts = 0; 721 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 722 &tptosocket(tp)->so_snd, 723 TCP_LOG_LRO, 0, 0, &log, false, &tv); 724 } 725 } 726 #endif 727 728 static inline void 729 tcp_lro_assign_and_checksum_16(uint16_t *ptr, uint16_t value, uint16_t *psum) 730 { 731 uint32_t csum; 732 733 csum = 0xffff - *ptr + value; 734 while (csum > 0xffff) 735 csum = (csum >> 16) + (csum & 0xffff); 736 *ptr = value; 737 *psum = csum; 738 } 739 740 static uint16_t 741 tcp_lro_update_checksum(const struct lro_parser *pa, const struct lro_entry *le, 742 uint16_t payload_len, uint16_t delta_sum) 743 { 744 uint32_t csum; 745 uint16_t tlen; 746 uint16_t temp[5] = {}; 747 748 switch (pa->data.lro_type) { 749 case LRO_TYPE_IPV4_TCP: 750 /* Compute new IPv4 length. */ 751 tlen = (pa->ip4->ip_hl << 2) + (pa->tcp->th_off << 2) + payload_len; 752 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]); 753 754 /* Subtract delta from current IPv4 checksum. */ 755 csum = pa->ip4->ip_sum + 0xffff - temp[0]; 756 while (csum > 0xffff) 757 csum = (csum >> 16) + (csum & 0xffff); 758 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]); 759 goto update_tcp_header; 760 761 case LRO_TYPE_IPV6_TCP: 762 /* Compute new IPv6 length. */ 763 tlen = (pa->tcp->th_off << 2) + payload_len; 764 tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]); 765 goto update_tcp_header; 766 767 case LRO_TYPE_IPV4_UDP: 768 /* Compute new IPv4 length. */ 769 tlen = (pa->ip4->ip_hl << 2) + sizeof(*pa->udp) + payload_len; 770 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]); 771 772 /* Subtract delta from current IPv4 checksum. */ 773 csum = pa->ip4->ip_sum + 0xffff - temp[0]; 774 while (csum > 0xffff) 775 csum = (csum >> 16) + (csum & 0xffff); 776 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]); 777 goto update_udp_header; 778 779 case LRO_TYPE_IPV6_UDP: 780 /* Compute new IPv6 length. */ 781 tlen = sizeof(*pa->udp) + payload_len; 782 tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]); 783 goto update_udp_header; 784 785 default: 786 return (0); 787 } 788 789 update_tcp_header: 790 /* Compute current TCP header checksum. */ 791 temp[2] = tcp_lro_rx_csum_tcphdr(pa->tcp); 792 793 /* Incorporate the latest ACK into the TCP header. */ 794 pa->tcp->th_ack = le->ack_seq; 795 pa->tcp->th_win = le->window; 796 797 /* Incorporate latest timestamp into the TCP header. */ 798 if (le->timestamp != 0) { 799 uint32_t *ts_ptr; 800 801 ts_ptr = (uint32_t *)(pa->tcp + 1); 802 ts_ptr[1] = htonl(le->tsval); 803 ts_ptr[2] = le->tsecr; 804 } 805 806 /* Compute new TCP header checksum. */ 807 temp[3] = tcp_lro_rx_csum_tcphdr(pa->tcp); 808 809 /* Compute new TCP checksum. */ 810 csum = pa->tcp->th_sum + 0xffff - delta_sum + 811 0xffff - temp[0] + 0xffff - temp[3] + temp[2]; 812 while (csum > 0xffff) 813 csum = (csum >> 16) + (csum & 0xffff); 814 815 /* Assign new TCP checksum. */ 816 tcp_lro_assign_and_checksum_16(&pa->tcp->th_sum, csum, &temp[4]); 817 818 /* Compute all modififications affecting next checksum. */ 819 csum = temp[0] + temp[1] + 0xffff - temp[2] + 820 temp[3] + temp[4] + delta_sum; 821 while (csum > 0xffff) 822 csum = (csum >> 16) + (csum & 0xffff); 823 824 /* Return delta checksum to next stage, if any. */ 825 return (csum); 826 827 update_udp_header: 828 tlen = sizeof(*pa->udp) + payload_len; 829 /* Assign new UDP length and compute checksum delta. */ 830 tcp_lro_assign_and_checksum_16(&pa->udp->uh_ulen, htons(tlen), &temp[2]); 831 832 /* Check if there is a UDP checksum. */ 833 if (__predict_false(pa->udp->uh_sum != 0)) { 834 /* Compute new UDP checksum. */ 835 csum = pa->udp->uh_sum + 0xffff - delta_sum + 836 0xffff - temp[0] + 0xffff - temp[2]; 837 while (csum > 0xffff) 838 csum = (csum >> 16) + (csum & 0xffff); 839 /* Assign new UDP checksum. */ 840 tcp_lro_assign_and_checksum_16(&pa->udp->uh_sum, csum, &temp[3]); 841 } 842 843 /* Compute all modififications affecting next checksum. */ 844 csum = temp[0] + temp[1] + temp[2] + temp[3] + delta_sum; 845 while (csum > 0xffff) 846 csum = (csum >> 16) + (csum & 0xffff); 847 848 /* Return delta checksum to next stage, if any. */ 849 return (csum); 850 } 851 852 static void 853 tcp_flush_out_entry(struct lro_ctrl *lc, struct lro_entry *le) 854 { 855 /* Check if we need to recompute any checksums. */ 856 if (le->needs_merge) { 857 uint16_t csum; 858 859 switch (le->inner.data.lro_type) { 860 case LRO_TYPE_IPV4_TCP: 861 csum = tcp_lro_update_checksum(&le->inner, le, 862 le->m_head->m_pkthdr.lro_tcp_d_len, 863 le->m_head->m_pkthdr.lro_tcp_d_csum); 864 csum = tcp_lro_update_checksum(&le->outer, NULL, 865 le->m_head->m_pkthdr.lro_tcp_d_len + 866 le->inner.total_hdr_len, csum); 867 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 868 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID; 869 le->m_head->m_pkthdr.csum_data = 0xffff; 870 if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED)) 871 le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED; 872 break; 873 case LRO_TYPE_IPV6_TCP: 874 csum = tcp_lro_update_checksum(&le->inner, le, 875 le->m_head->m_pkthdr.lro_tcp_d_len, 876 le->m_head->m_pkthdr.lro_tcp_d_csum); 877 csum = tcp_lro_update_checksum(&le->outer, NULL, 878 le->m_head->m_pkthdr.lro_tcp_d_len + 879 le->inner.total_hdr_len, csum); 880 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 881 CSUM_PSEUDO_HDR; 882 le->m_head->m_pkthdr.csum_data = 0xffff; 883 if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED)) 884 le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED; 885 break; 886 case LRO_TYPE_NONE: 887 switch (le->outer.data.lro_type) { 888 case LRO_TYPE_IPV4_TCP: 889 csum = tcp_lro_update_checksum(&le->outer, le, 890 le->m_head->m_pkthdr.lro_tcp_d_len, 891 le->m_head->m_pkthdr.lro_tcp_d_csum); 892 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 893 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID; 894 le->m_head->m_pkthdr.csum_data = 0xffff; 895 if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED)) 896 le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED; 897 break; 898 case LRO_TYPE_IPV6_TCP: 899 csum = tcp_lro_update_checksum(&le->outer, le, 900 le->m_head->m_pkthdr.lro_tcp_d_len, 901 le->m_head->m_pkthdr.lro_tcp_d_csum); 902 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 903 CSUM_PSEUDO_HDR; 904 le->m_head->m_pkthdr.csum_data = 0xffff; 905 if (__predict_false(le->outer.data.lro_flags & LRO_FLAG_DECRYPTED)) 906 le->m_head->m_pkthdr.csum_flags |= CSUM_TLS_DECRYPTED; 907 break; 908 default: 909 break; 910 } 911 break; 912 default: 913 break; 914 } 915 } 916 917 /* 918 * Break any chain, this is not set to NULL on the singleton 919 * case m_nextpkt points to m_head. Other case set them 920 * m_nextpkt to NULL in push_and_replace. 921 */ 922 le->m_head->m_nextpkt = NULL; 923 lc->lro_queued += le->m_head->m_pkthdr.lro_nsegs; 924 (*lc->ifp->if_input)(lc->ifp, le->m_head); 925 } 926 927 static void 928 tcp_set_entry_to_mbuf(struct lro_ctrl *lc, struct lro_entry *le, 929 struct mbuf *m, struct tcphdr *th) 930 { 931 uint32_t *ts_ptr; 932 uint16_t tcp_data_len; 933 uint16_t tcp_opt_len; 934 935 ts_ptr = (uint32_t *)(th + 1); 936 tcp_opt_len = (th->th_off << 2); 937 tcp_opt_len -= sizeof(*th); 938 939 /* Check if there is a timestamp option. */ 940 if (tcp_opt_len == 0 || 941 __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA || 942 *ts_ptr != TCP_LRO_TS_OPTION)) { 943 /* We failed to find the timestamp option. */ 944 le->timestamp = 0; 945 } else { 946 le->timestamp = 1; 947 le->tsval = ntohl(*(ts_ptr + 1)); 948 le->tsecr = *(ts_ptr + 2); 949 } 950 951 tcp_data_len = m->m_pkthdr.lro_tcp_d_len; 952 953 /* Pull out TCP sequence numbers and window size. */ 954 le->next_seq = ntohl(th->th_seq) + tcp_data_len; 955 le->ack_seq = th->th_ack; 956 le->window = th->th_win; 957 le->flags = tcp_get_flags(th); 958 le->needs_merge = 0; 959 960 /* Setup new data pointers. */ 961 le->m_head = m; 962 le->m_tail = m_last(m); 963 } 964 965 static void 966 tcp_push_and_replace(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m) 967 { 968 struct lro_parser *pa; 969 970 /* 971 * Push up the stack of the current entry 972 * and replace it with "m". 973 */ 974 struct mbuf *msave; 975 976 /* Grab off the next and save it */ 977 msave = le->m_head->m_nextpkt; 978 le->m_head->m_nextpkt = NULL; 979 980 /* Now push out the old entry */ 981 tcp_flush_out_entry(lc, le); 982 983 /* Re-parse new header, should not fail. */ 984 pa = tcp_lro_parser(m, &le->outer, &le->inner, false); 985 KASSERT(pa != NULL, 986 ("tcp_push_and_replace: LRO parser failed on m=%p\n", m)); 987 988 /* 989 * Now to replace the data properly in the entry 990 * we have to reset the TCP header and 991 * other fields. 992 */ 993 tcp_set_entry_to_mbuf(lc, le, m, pa->tcp); 994 995 /* Restore the next list */ 996 m->m_nextpkt = msave; 997 } 998 999 static void 1000 tcp_lro_mbuf_append_pkthdr(struct lro_entry *le, const struct mbuf *p) 1001 { 1002 struct mbuf *m; 1003 uint32_t csum; 1004 1005 m = le->m_head; 1006 if (m->m_pkthdr.lro_nsegs == 1) { 1007 /* Compute relative checksum. */ 1008 csum = p->m_pkthdr.lro_tcp_d_csum; 1009 } else { 1010 /* Merge TCP data checksums. */ 1011 csum = (uint32_t)m->m_pkthdr.lro_tcp_d_csum + 1012 (uint32_t)p->m_pkthdr.lro_tcp_d_csum; 1013 while (csum > 0xffff) 1014 csum = (csum >> 16) + (csum & 0xffff); 1015 } 1016 1017 /* Update various counters. */ 1018 m->m_pkthdr.len += p->m_pkthdr.lro_tcp_d_len; 1019 m->m_pkthdr.lro_tcp_d_csum = csum; 1020 m->m_pkthdr.lro_tcp_d_len += p->m_pkthdr.lro_tcp_d_len; 1021 m->m_pkthdr.lro_nsegs += p->m_pkthdr.lro_nsegs; 1022 le->needs_merge = 1; 1023 } 1024 1025 static void 1026 tcp_lro_condense(struct lro_ctrl *lc, struct lro_entry *le) 1027 { 1028 /* 1029 * Walk through the mbuf chain we 1030 * have on tap and compress/condense 1031 * as required. 1032 */ 1033 uint32_t *ts_ptr; 1034 struct mbuf *m; 1035 struct tcphdr *th; 1036 uint32_t tcp_data_len_total; 1037 uint32_t tcp_data_seg_total; 1038 uint16_t tcp_data_len; 1039 uint16_t tcp_opt_len; 1040 1041 /* 1042 * First we must check the lead (m_head) 1043 * we must make sure that it is *not* 1044 * something that should be sent up 1045 * right away (sack etc). 1046 */ 1047 again: 1048 m = le->m_head->m_nextpkt; 1049 if (m == NULL) { 1050 /* Just one left. */ 1051 return; 1052 } 1053 1054 th = tcp_lro_get_th(m); 1055 tcp_opt_len = (th->th_off << 2); 1056 tcp_opt_len -= sizeof(*th); 1057 ts_ptr = (uint32_t *)(th + 1); 1058 1059 if (tcp_opt_len != 0 && __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA || 1060 *ts_ptr != TCP_LRO_TS_OPTION)) { 1061 /* 1062 * Its not the timestamp. We can't 1063 * use this guy as the head. 1064 */ 1065 le->m_head->m_nextpkt = m->m_nextpkt; 1066 tcp_push_and_replace(lc, le, m); 1067 goto again; 1068 } 1069 if ((tcp_get_flags(th) & ~(TH_ACK | TH_PUSH)) != 0) { 1070 /* 1071 * Make sure that previously seen segments/ACKs are delivered 1072 * before this segment, e.g. FIN. 1073 */ 1074 le->m_head->m_nextpkt = m->m_nextpkt; 1075 tcp_push_and_replace(lc, le, m); 1076 goto again; 1077 } 1078 while((m = le->m_head->m_nextpkt) != NULL) { 1079 /* 1080 * condense m into le, first 1081 * pull m out of the list. 1082 */ 1083 le->m_head->m_nextpkt = m->m_nextpkt; 1084 m->m_nextpkt = NULL; 1085 /* Setup my data */ 1086 tcp_data_len = m->m_pkthdr.lro_tcp_d_len; 1087 th = tcp_lro_get_th(m); 1088 ts_ptr = (uint32_t *)(th + 1); 1089 tcp_opt_len = (th->th_off << 2); 1090 tcp_opt_len -= sizeof(*th); 1091 tcp_data_len_total = le->m_head->m_pkthdr.lro_tcp_d_len + tcp_data_len; 1092 tcp_data_seg_total = le->m_head->m_pkthdr.lro_nsegs + m->m_pkthdr.lro_nsegs; 1093 1094 if (tcp_data_seg_total >= lc->lro_ackcnt_lim || 1095 tcp_data_len_total >= lc->lro_length_lim) { 1096 /* Flush now if appending will result in overflow. */ 1097 tcp_push_and_replace(lc, le, m); 1098 goto again; 1099 } 1100 if (tcp_opt_len != 0 && 1101 __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA || 1102 *ts_ptr != TCP_LRO_TS_OPTION)) { 1103 /* 1104 * Maybe a sack in the new one? We need to 1105 * start all over after flushing the 1106 * current le. We will go up to the beginning 1107 * and flush it (calling the replace again possibly 1108 * or just returning). 1109 */ 1110 tcp_push_and_replace(lc, le, m); 1111 goto again; 1112 } 1113 if ((tcp_get_flags(th) & ~(TH_ACK | TH_PUSH)) != 0) { 1114 tcp_push_and_replace(lc, le, m); 1115 goto again; 1116 } 1117 if (tcp_opt_len != 0) { 1118 uint32_t tsval = ntohl(*(ts_ptr + 1)); 1119 /* Make sure timestamp values are increasing. */ 1120 if (TSTMP_GT(le->tsval, tsval)) { 1121 tcp_push_and_replace(lc, le, m); 1122 goto again; 1123 } 1124 le->tsval = tsval; 1125 le->tsecr = *(ts_ptr + 2); 1126 } 1127 /* Try to append the new segment. */ 1128 if (__predict_false(ntohl(th->th_seq) != le->next_seq || 1129 ((tcp_get_flags(th) & TH_ACK) != 1130 (le->flags & TH_ACK)) || 1131 (tcp_data_len == 0 && 1132 le->ack_seq == th->th_ack && 1133 le->window == th->th_win))) { 1134 /* Out of order packet, non-ACK + ACK or dup ACK. */ 1135 tcp_push_and_replace(lc, le, m); 1136 goto again; 1137 } 1138 if (tcp_data_len != 0 || 1139 SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq))) { 1140 le->next_seq += tcp_data_len; 1141 le->ack_seq = th->th_ack; 1142 le->window = th->th_win; 1143 le->needs_merge = 1; 1144 } else if (th->th_ack == le->ack_seq) { 1145 if (WIN_GT(th->th_win, le->window)) { 1146 le->window = th->th_win; 1147 le->needs_merge = 1; 1148 } 1149 } 1150 1151 if (tcp_data_len == 0) { 1152 m_freem(m); 1153 continue; 1154 } 1155 1156 /* Merge TCP data checksum and length to head mbuf. */ 1157 tcp_lro_mbuf_append_pkthdr(le, m); 1158 1159 /* 1160 * Adjust the mbuf so that m_data points to the first byte of 1161 * the ULP payload. Adjust the mbuf to avoid complications and 1162 * append new segment to existing mbuf chain. 1163 */ 1164 m_adj(m, m->m_pkthdr.len - tcp_data_len); 1165 m_demote_pkthdr(m); 1166 le->m_tail->m_next = m; 1167 le->m_tail = m_last(m); 1168 } 1169 } 1170 1171 #ifdef TCPHPTS 1172 static void 1173 tcp_queue_pkts(struct inpcb *inp, struct tcpcb *tp, struct lro_entry *le) 1174 { 1175 INP_WLOCK_ASSERT(inp); 1176 if (tp->t_in_pkt == NULL) { 1177 /* Nothing yet there */ 1178 tp->t_in_pkt = le->m_head; 1179 tp->t_tail_pkt = le->m_last_mbuf; 1180 } else { 1181 /* Already some there */ 1182 tp->t_tail_pkt->m_nextpkt = le->m_head; 1183 tp->t_tail_pkt = le->m_last_mbuf; 1184 } 1185 le->m_head = NULL; 1186 le->m_last_mbuf = NULL; 1187 } 1188 1189 static struct mbuf * 1190 tcp_lro_get_last_if_ackcmp(struct lro_ctrl *lc, struct lro_entry *le, 1191 struct inpcb *inp, int32_t *new_m, bool can_append_old_cmp) 1192 { 1193 struct tcpcb *tp; 1194 struct mbuf *m; 1195 1196 tp = intotcpcb(inp); 1197 if (__predict_false(tp == NULL)) 1198 return (NULL); 1199 1200 /* Look at the last mbuf if any in queue */ 1201 if (can_append_old_cmp) { 1202 m = tp->t_tail_pkt; 1203 if (m != NULL && (m->m_flags & M_ACKCMP) != 0) { 1204 if (M_TRAILINGSPACE(m) >= sizeof(struct tcp_ackent)) { 1205 tcp_lro_log(tp, lc, le, NULL, 23, 0, 0, 0, 0); 1206 *new_m = 0; 1207 counter_u64_add(tcp_extra_mbuf, 1); 1208 return (m); 1209 } else { 1210 /* Mark we ran out of space */ 1211 inp->inp_flags2 |= INP_MBUF_L_ACKS; 1212 } 1213 } 1214 } 1215 /* Decide mbuf size. */ 1216 tcp_lro_log(tp, lc, le, NULL, 21, 0, 0, 0, 0); 1217 if (inp->inp_flags2 & INP_MBUF_L_ACKS) 1218 m = m_getcl(M_NOWAIT, MT_DATA, M_ACKCMP | M_PKTHDR); 1219 else 1220 m = m_gethdr(M_NOWAIT, MT_DATA); 1221 1222 if (__predict_false(m == NULL)) { 1223 counter_u64_add(tcp_would_have_but, 1); 1224 return (NULL); 1225 } 1226 counter_u64_add(tcp_comp_total, 1); 1227 m->m_pkthdr.rcvif = lc->ifp; 1228 m->m_flags |= M_ACKCMP; 1229 *new_m = 1; 1230 return (m); 1231 } 1232 1233 static struct inpcb * 1234 tcp_lro_lookup(struct ifnet *ifp, struct lro_parser *pa) 1235 { 1236 struct inpcb *inp; 1237 1238 switch (pa->data.lro_type) { 1239 #ifdef INET6 1240 case LRO_TYPE_IPV6_TCP: 1241 inp = in6_pcblookup(&V_tcbinfo, 1242 &pa->data.s_addr.v6, 1243 pa->data.s_port, 1244 &pa->data.d_addr.v6, 1245 pa->data.d_port, 1246 INPLOOKUP_WLOCKPCB, 1247 ifp); 1248 break; 1249 #endif 1250 #ifdef INET 1251 case LRO_TYPE_IPV4_TCP: 1252 inp = in_pcblookup(&V_tcbinfo, 1253 pa->data.s_addr.v4, 1254 pa->data.s_port, 1255 pa->data.d_addr.v4, 1256 pa->data.d_port, 1257 INPLOOKUP_WLOCKPCB, 1258 ifp); 1259 break; 1260 #endif 1261 default: 1262 inp = NULL; 1263 break; 1264 } 1265 return (inp); 1266 } 1267 1268 static inline bool 1269 tcp_lro_ack_valid(struct mbuf *m, struct tcphdr *th, uint32_t **ppts, bool *other_opts) 1270 { 1271 /* 1272 * This function returns two bits of valuable information. 1273 * a) Is what is present capable of being ack-compressed, 1274 * we can ack-compress if there is no options or just 1275 * a timestamp option, and of course the th_flags must 1276 * be correct as well. 1277 * b) Our other options present such as SACK. This is 1278 * used to determine if we want to wakeup or not. 1279 */ 1280 bool ret = true; 1281 1282 switch (th->th_off << 2) { 1283 case (sizeof(*th) + TCPOLEN_TSTAMP_APPA): 1284 *ppts = (uint32_t *)(th + 1); 1285 /* Check if we have only one timestamp option. */ 1286 if (**ppts == TCP_LRO_TS_OPTION) 1287 *other_opts = false; 1288 else { 1289 *other_opts = true; 1290 ret = false; 1291 } 1292 break; 1293 case (sizeof(*th)): 1294 /* No options. */ 1295 *ppts = NULL; 1296 *other_opts = false; 1297 break; 1298 default: 1299 *ppts = NULL; 1300 *other_opts = true; 1301 ret = false; 1302 break; 1303 } 1304 /* For ACKCMP we only accept ACK, PUSH, ECE and CWR. */ 1305 if ((tcp_get_flags(th) & ~(TH_ACK | TH_PUSH | TH_ECE | TH_CWR)) != 0) 1306 ret = false; 1307 /* If it has data on it we cannot compress it */ 1308 if (m->m_pkthdr.lro_tcp_d_len) 1309 ret = false; 1310 1311 /* ACK flag must be set. */ 1312 if (!(tcp_get_flags(th) & TH_ACK)) 1313 ret = false; 1314 return (ret); 1315 } 1316 1317 static int 1318 tcp_lro_flush_tcphpts(struct lro_ctrl *lc, struct lro_entry *le) 1319 { 1320 struct inpcb *inp; 1321 struct tcpcb *tp; 1322 struct mbuf **pp, *cmp, *mv_to; 1323 struct ifnet *lagg_ifp; 1324 bool bpf_req, lagg_bpf_req, should_wake, can_append_old_cmp; 1325 1326 /* Check if packet doesn't belongs to our network interface. */ 1327 if ((tcplro_stacks_wanting_mbufq == 0) || 1328 (le->outer.data.vlan_id != 0) || 1329 (le->inner.data.lro_type != LRO_TYPE_NONE)) 1330 return (TCP_LRO_CANNOT); 1331 1332 #ifdef INET6 1333 /* 1334 * Be proactive about unspecified IPv6 address in source. As 1335 * we use all-zero to indicate unbounded/unconnected pcb, 1336 * unspecified IPv6 address can be used to confuse us. 1337 * 1338 * Note that packets with unspecified IPv6 destination is 1339 * already dropped in ip6_input. 1340 */ 1341 if (__predict_false(le->outer.data.lro_type == LRO_TYPE_IPV6_TCP && 1342 IN6_IS_ADDR_UNSPECIFIED(&le->outer.data.s_addr.v6))) 1343 return (TCP_LRO_CANNOT); 1344 1345 if (__predict_false(le->inner.data.lro_type == LRO_TYPE_IPV6_TCP && 1346 IN6_IS_ADDR_UNSPECIFIED(&le->inner.data.s_addr.v6))) 1347 return (TCP_LRO_CANNOT); 1348 #endif 1349 /* Lookup inp, if any. */ 1350 inp = tcp_lro_lookup(lc->ifp, 1351 (le->inner.data.lro_type == LRO_TYPE_NONE) ? &le->outer : &le->inner); 1352 if (inp == NULL) 1353 return (TCP_LRO_CANNOT); 1354 1355 counter_u64_add(tcp_inp_lro_locks_taken, 1); 1356 1357 /* Get TCP control structure. */ 1358 tp = intotcpcb(inp); 1359 1360 /* Check if the inp is dead, Jim. */ 1361 if (tp->t_state == TCPS_TIME_WAIT) { 1362 INP_WUNLOCK(inp); 1363 return (TCP_LRO_CANNOT); 1364 } 1365 if ((inp->inp_irq_cpu_set == 0) && (lc->lro_cpu_is_set == 1)) { 1366 inp->inp_irq_cpu = lc->lro_last_cpu; 1367 inp->inp_irq_cpu_set = 1; 1368 } 1369 /* Check if the transport doesn't support the needed optimizations. */ 1370 if ((inp->inp_flags2 & (INP_SUPPORTS_MBUFQ | INP_MBUF_ACKCMP)) == 0) { 1371 INP_WUNLOCK(inp); 1372 return (TCP_LRO_CANNOT); 1373 } 1374 1375 if (inp->inp_flags2 & INP_MBUF_QUEUE_READY) 1376 should_wake = false; 1377 else 1378 should_wake = true; 1379 /* Check if packets should be tapped to BPF. */ 1380 bpf_req = bpf_peers_present(lc->ifp->if_bpf); 1381 lagg_bpf_req = false; 1382 lagg_ifp = NULL; 1383 if (lc->ifp->if_type == IFT_IEEE8023ADLAG || 1384 lc->ifp->if_type == IFT_INFINIBANDLAG) { 1385 struct lagg_port *lp = lc->ifp->if_lagg; 1386 struct lagg_softc *sc = lp->lp_softc; 1387 1388 lagg_ifp = sc->sc_ifp; 1389 if (lagg_ifp != NULL) 1390 lagg_bpf_req = bpf_peers_present(lagg_ifp->if_bpf); 1391 } 1392 1393 /* Strip and compress all the incoming packets. */ 1394 can_append_old_cmp = true; 1395 cmp = NULL; 1396 for (pp = &le->m_head; *pp != NULL; ) { 1397 mv_to = NULL; 1398 if (do_bpf_strip_and_compress(inp, lc, le, pp, 1399 &cmp, &mv_to, &should_wake, bpf_req, 1400 lagg_bpf_req, lagg_ifp, can_append_old_cmp) == false) { 1401 /* Advance to next mbuf. */ 1402 pp = &(*pp)->m_nextpkt; 1403 /* 1404 * Once we have appended we can't look in the pending 1405 * inbound packets for a compressed ack to append to. 1406 */ 1407 can_append_old_cmp = false; 1408 /* 1409 * Once we append we also need to stop adding to any 1410 * compressed ack we were remembering. A new cmp 1411 * ack will be required. 1412 */ 1413 cmp = NULL; 1414 tcp_lro_log(tp, lc, le, NULL, 25, 0, 0, 0, 0); 1415 } else if (mv_to != NULL) { 1416 /* We are asked to move pp up */ 1417 pp = &mv_to->m_nextpkt; 1418 tcp_lro_log(tp, lc, le, NULL, 24, 0, 0, 0, 0); 1419 } else 1420 tcp_lro_log(tp, lc, le, NULL, 26, 0, 0, 0, 0); 1421 } 1422 /* Update "m_last_mbuf", if any. */ 1423 if (pp == &le->m_head) 1424 le->m_last_mbuf = *pp; 1425 else 1426 le->m_last_mbuf = __containerof(pp, struct mbuf, m_nextpkt); 1427 1428 /* Check if any data mbufs left. */ 1429 if (le->m_head != NULL) { 1430 counter_u64_add(tcp_inp_lro_direct_queue, 1); 1431 tcp_lro_log(tp, lc, le, NULL, 22, 1, inp->inp_flags2, 0, 1); 1432 tcp_queue_pkts(inp, tp, le); 1433 } 1434 if (should_wake) { 1435 /* Wakeup */ 1436 counter_u64_add(tcp_inp_lro_wokeup_queue, 1); 1437 if ((*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0)) 1438 inp = NULL; 1439 } 1440 if (inp != NULL) 1441 INP_WUNLOCK(inp); 1442 return (0); /* Success. */ 1443 } 1444 #endif 1445 1446 void 1447 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le) 1448 { 1449 /* Only optimise if there are multiple packets waiting. */ 1450 #ifdef TCPHPTS 1451 int error; 1452 #endif 1453 1454 NET_EPOCH_ASSERT(); 1455 #ifdef TCPHPTS 1456 CURVNET_SET(lc->ifp->if_vnet); 1457 error = tcp_lro_flush_tcphpts(lc, le); 1458 CURVNET_RESTORE(); 1459 if (error != 0) { 1460 #endif 1461 tcp_lro_condense(lc, le); 1462 tcp_flush_out_entry(lc, le); 1463 #ifdef TCPHPTS 1464 } 1465 #endif 1466 lc->lro_flushed++; 1467 bzero(le, sizeof(*le)); 1468 LIST_INSERT_HEAD(&lc->lro_free, le, next); 1469 } 1470 1471 #ifdef HAVE_INLINE_FLSLL 1472 #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1)) 1473 #else 1474 static inline uint64_t 1475 tcp_lro_msb_64(uint64_t x) 1476 { 1477 x |= (x >> 1); 1478 x |= (x >> 2); 1479 x |= (x >> 4); 1480 x |= (x >> 8); 1481 x |= (x >> 16); 1482 x |= (x >> 32); 1483 return (x & ~(x >> 1)); 1484 } 1485 #endif 1486 1487 /* 1488 * The tcp_lro_sort() routine is comparable to qsort(), except it has 1489 * a worst case complexity limit of O(MIN(N,64)*N), where N is the 1490 * number of elements to sort and 64 is the number of sequence bits 1491 * available. The algorithm is bit-slicing the 64-bit sequence number, 1492 * sorting one bit at a time from the most significant bit until the 1493 * least significant one, skipping the constant bits. This is 1494 * typically called a radix sort. 1495 */ 1496 static void 1497 tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size) 1498 { 1499 struct lro_mbuf_sort temp; 1500 uint64_t ones; 1501 uint64_t zeros; 1502 uint32_t x; 1503 uint32_t y; 1504 1505 repeat: 1506 /* for small arrays insertion sort is faster */ 1507 if (size <= 12) { 1508 for (x = 1; x < size; x++) { 1509 temp = parray[x]; 1510 for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--) 1511 parray[y] = parray[y - 1]; 1512 parray[y] = temp; 1513 } 1514 return; 1515 } 1516 1517 /* compute sequence bits which are constant */ 1518 ones = 0; 1519 zeros = 0; 1520 for (x = 0; x != size; x++) { 1521 ones |= parray[x].seq; 1522 zeros |= ~parray[x].seq; 1523 } 1524 1525 /* compute bits which are not constant into "ones" */ 1526 ones &= zeros; 1527 if (ones == 0) 1528 return; 1529 1530 /* pick the most significant bit which is not constant */ 1531 ones = tcp_lro_msb_64(ones); 1532 1533 /* 1534 * Move entries having cleared sequence bits to the beginning 1535 * of the array: 1536 */ 1537 for (x = y = 0; y != size; y++) { 1538 /* skip set bits */ 1539 if (parray[y].seq & ones) 1540 continue; 1541 /* swap entries */ 1542 temp = parray[x]; 1543 parray[x] = parray[y]; 1544 parray[y] = temp; 1545 x++; 1546 } 1547 1548 KASSERT(x != 0 && x != size, ("Memory is corrupted\n")); 1549 1550 /* sort zeros */ 1551 tcp_lro_sort(parray, x); 1552 1553 /* sort ones */ 1554 parray += x; 1555 size -= x; 1556 goto repeat; 1557 } 1558 1559 void 1560 tcp_lro_flush_all(struct lro_ctrl *lc) 1561 { 1562 uint64_t seq; 1563 uint64_t nseq; 1564 unsigned x; 1565 1566 NET_EPOCH_ASSERT(); 1567 /* check if no mbufs to flush */ 1568 if (lc->lro_mbuf_count == 0) 1569 goto done; 1570 if (lc->lro_cpu_is_set == 0) { 1571 if (lc->lro_last_cpu == curcpu) { 1572 lc->lro_cnt_of_same_cpu++; 1573 /* Have we reached the threshold to declare a cpu? */ 1574 if (lc->lro_cnt_of_same_cpu > tcp_lro_cpu_set_thresh) 1575 lc->lro_cpu_is_set = 1; 1576 } else { 1577 lc->lro_last_cpu = curcpu; 1578 lc->lro_cnt_of_same_cpu = 0; 1579 } 1580 } 1581 CURVNET_SET(lc->ifp->if_vnet); 1582 1583 /* get current time */ 1584 binuptime(&lc->lro_last_queue_time); 1585 1586 /* sort all mbufs according to stream */ 1587 tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count); 1588 1589 /* input data into LRO engine, stream by stream */ 1590 seq = 0; 1591 for (x = 0; x != lc->lro_mbuf_count; x++) { 1592 struct mbuf *mb; 1593 1594 /* get mbuf */ 1595 mb = lc->lro_mbuf_data[x].mb; 1596 1597 /* get sequence number, masking away the packet index */ 1598 nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24); 1599 1600 /* check for new stream */ 1601 if (seq != nseq) { 1602 seq = nseq; 1603 1604 /* flush active streams */ 1605 tcp_lro_rx_done(lc); 1606 } 1607 1608 /* add packet to LRO engine */ 1609 if (tcp_lro_rx_common(lc, mb, 0, false) != 0) { 1610 /* Flush anything we have acummulated */ 1611 tcp_lro_flush_active(lc); 1612 /* input packet to network layer */ 1613 (*lc->ifp->if_input)(lc->ifp, mb); 1614 lc->lro_queued++; 1615 lc->lro_flushed++; 1616 } 1617 } 1618 CURVNET_RESTORE(); 1619 done: 1620 /* flush active streams */ 1621 tcp_lro_rx_done(lc); 1622 1623 #ifdef TCPHPTS 1624 tcp_run_hpts(); 1625 #endif 1626 lc->lro_mbuf_count = 0; 1627 } 1628 1629 #ifdef TCPHPTS 1630 static void 1631 build_ack_entry(struct tcp_ackent *ae, struct tcphdr *th, struct mbuf *m, 1632 uint32_t *ts_ptr, uint16_t iptos) 1633 { 1634 /* 1635 * Given a TCP ACK, summarize it down into the small TCP ACK 1636 * entry. 1637 */ 1638 ae->timestamp = m->m_pkthdr.rcv_tstmp; 1639 ae->flags = 0; 1640 if (m->m_flags & M_TSTMP_LRO) 1641 ae->flags |= TSTMP_LRO; 1642 else if (m->m_flags & M_TSTMP) 1643 ae->flags |= TSTMP_HDWR; 1644 ae->seq = ntohl(th->th_seq); 1645 ae->ack = ntohl(th->th_ack); 1646 ae->flags |= tcp_get_flags(th); 1647 if (ts_ptr != NULL) { 1648 ae->ts_value = ntohl(ts_ptr[1]); 1649 ae->ts_echo = ntohl(ts_ptr[2]); 1650 ae->flags |= HAS_TSTMP; 1651 } 1652 ae->win = ntohs(th->th_win); 1653 ae->codepoint = iptos; 1654 } 1655 1656 /* 1657 * Do BPF tap for either ACK_CMP packets or MBUF QUEUE type packets 1658 * and strip all, but the IPv4/IPv6 header. 1659 */ 1660 static bool 1661 do_bpf_strip_and_compress(struct inpcb *inp, struct lro_ctrl *lc, 1662 struct lro_entry *le, struct mbuf **pp, struct mbuf **cmp, struct mbuf **mv_to, 1663 bool *should_wake, bool bpf_req, bool lagg_bpf_req, struct ifnet *lagg_ifp, bool can_append_old_cmp) 1664 { 1665 union { 1666 void *ptr; 1667 struct ip *ip4; 1668 struct ip6_hdr *ip6; 1669 } l3; 1670 struct mbuf *m; 1671 struct mbuf *nm; 1672 struct tcphdr *th; 1673 struct tcp_ackent *ack_ent; 1674 uint32_t *ts_ptr; 1675 int32_t n_mbuf; 1676 bool other_opts, can_compress; 1677 uint8_t lro_type; 1678 uint16_t iptos; 1679 int tcp_hdr_offset; 1680 int idx; 1681 1682 /* Get current mbuf. */ 1683 m = *pp; 1684 1685 /* Let the BPF see the packet */ 1686 if (__predict_false(bpf_req)) 1687 ETHER_BPF_MTAP(lc->ifp, m); 1688 1689 if (__predict_false(lagg_bpf_req)) 1690 ETHER_BPF_MTAP(lagg_ifp, m); 1691 1692 tcp_hdr_offset = m->m_pkthdr.lro_tcp_h_off; 1693 lro_type = le->inner.data.lro_type; 1694 switch (lro_type) { 1695 case LRO_TYPE_NONE: 1696 lro_type = le->outer.data.lro_type; 1697 switch (lro_type) { 1698 case LRO_TYPE_IPV4_TCP: 1699 tcp_hdr_offset -= sizeof(*le->outer.ip4); 1700 m->m_pkthdr.lro_etype = ETHERTYPE_IP; 1701 break; 1702 case LRO_TYPE_IPV6_TCP: 1703 tcp_hdr_offset -= sizeof(*le->outer.ip6); 1704 m->m_pkthdr.lro_etype = ETHERTYPE_IPV6; 1705 break; 1706 default: 1707 goto compressed; 1708 } 1709 break; 1710 case LRO_TYPE_IPV4_TCP: 1711 tcp_hdr_offset -= sizeof(*le->outer.ip4); 1712 m->m_pkthdr.lro_etype = ETHERTYPE_IP; 1713 break; 1714 case LRO_TYPE_IPV6_TCP: 1715 tcp_hdr_offset -= sizeof(*le->outer.ip6); 1716 m->m_pkthdr.lro_etype = ETHERTYPE_IPV6; 1717 break; 1718 default: 1719 goto compressed; 1720 } 1721 1722 MPASS(tcp_hdr_offset >= 0); 1723 1724 m_adj(m, tcp_hdr_offset); 1725 m->m_flags |= M_LRO_EHDRSTRP; 1726 m->m_flags &= ~M_ACKCMP; 1727 m->m_pkthdr.lro_tcp_h_off -= tcp_hdr_offset; 1728 1729 th = tcp_lro_get_th(m); 1730 1731 th->th_sum = 0; /* TCP checksum is valid. */ 1732 1733 /* Check if ACK can be compressed */ 1734 can_compress = tcp_lro_ack_valid(m, th, &ts_ptr, &other_opts); 1735 1736 /* Now lets look at the should wake states */ 1737 if ((other_opts == true) && 1738 ((inp->inp_flags2 & INP_DONT_SACK_QUEUE) == 0)) { 1739 /* 1740 * If there are other options (SACK?) and the 1741 * tcp endpoint has not expressly told us it does 1742 * not care about SACKS, then we should wake up. 1743 */ 1744 *should_wake = true; 1745 } 1746 /* Is the ack compressable? */ 1747 if (can_compress == false) 1748 goto done; 1749 /* Does the TCP endpoint support ACK compression? */ 1750 if ((inp->inp_flags2 & INP_MBUF_ACKCMP) == 0) 1751 goto done; 1752 1753 /* Lets get the TOS/traffic class field */ 1754 l3.ptr = mtod(m, void *); 1755 switch (lro_type) { 1756 case LRO_TYPE_IPV4_TCP: 1757 iptos = l3.ip4->ip_tos; 1758 break; 1759 case LRO_TYPE_IPV6_TCP: 1760 iptos = IPV6_TRAFFIC_CLASS(l3.ip6); 1761 break; 1762 default: 1763 iptos = 0; /* Keep compiler happy. */ 1764 break; 1765 } 1766 /* Now lets get space if we don't have some already */ 1767 if (*cmp == NULL) { 1768 new_one: 1769 nm = tcp_lro_get_last_if_ackcmp(lc, le, inp, &n_mbuf, can_append_old_cmp); 1770 if (__predict_false(nm == NULL)) 1771 goto done; 1772 *cmp = nm; 1773 if (n_mbuf) { 1774 /* 1775 * Link in the new cmp ack to our in-order place, 1776 * first set our cmp ack's next to where we are. 1777 */ 1778 nm->m_nextpkt = m; 1779 (*pp) = nm; 1780 /* 1781 * Set it up so mv_to is advanced to our 1782 * compressed ack. This way the caller can 1783 * advance pp to the right place. 1784 */ 1785 *mv_to = nm; 1786 /* 1787 * Advance it here locally as well. 1788 */ 1789 pp = &nm->m_nextpkt; 1790 } 1791 } else { 1792 /* We have one already we are working on */ 1793 nm = *cmp; 1794 if (M_TRAILINGSPACE(nm) < sizeof(struct tcp_ackent)) { 1795 /* We ran out of space */ 1796 inp->inp_flags2 |= INP_MBUF_L_ACKS; 1797 goto new_one; 1798 } 1799 } 1800 MPASS(M_TRAILINGSPACE(nm) >= sizeof(struct tcp_ackent)); 1801 counter_u64_add(tcp_inp_lro_compressed, 1); 1802 le->compressed++; 1803 /* We can add in to the one on the tail */ 1804 ack_ent = mtod(nm, struct tcp_ackent *); 1805 idx = (nm->m_len / sizeof(struct tcp_ackent)); 1806 build_ack_entry(&ack_ent[idx], th, m, ts_ptr, iptos); 1807 1808 /* Bump the size of both pkt-hdr and len */ 1809 nm->m_len += sizeof(struct tcp_ackent); 1810 nm->m_pkthdr.len += sizeof(struct tcp_ackent); 1811 compressed: 1812 /* Advance to next mbuf before freeing. */ 1813 *pp = m->m_nextpkt; 1814 m->m_nextpkt = NULL; 1815 m_freem(m); 1816 return (true); 1817 done: 1818 counter_u64_add(tcp_uncomp_total, 1); 1819 le->uncompressed++; 1820 return (false); 1821 } 1822 #endif 1823 1824 static struct lro_head * 1825 tcp_lro_rx_get_bucket(struct lro_ctrl *lc, struct mbuf *m, struct lro_parser *parser) 1826 { 1827 u_long hash; 1828 1829 if (M_HASHTYPE_ISHASH(m)) { 1830 hash = m->m_pkthdr.flowid; 1831 } else { 1832 for (unsigned i = hash = 0; i != LRO_RAW_ADDRESS_MAX; i++) 1833 hash += parser->data.raw[i]; 1834 } 1835 return (&lc->lro_hash[hash % lc->lro_hashsz]); 1836 } 1837 1838 static int 1839 tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, bool use_hash) 1840 { 1841 struct lro_parser pi; /* inner address data */ 1842 struct lro_parser po; /* outer address data */ 1843 struct lro_parser *pa; /* current parser for TCP stream */ 1844 struct lro_entry *le; 1845 struct lro_head *bucket; 1846 struct tcphdr *th; 1847 int tcp_data_len; 1848 int tcp_opt_len; 1849 int error; 1850 uint16_t tcp_data_sum; 1851 1852 #ifdef INET 1853 /* Quickly decide if packet cannot be LRO'ed */ 1854 if (__predict_false(V_ipforwarding != 0)) 1855 return (TCP_LRO_CANNOT); 1856 #endif 1857 #ifdef INET6 1858 /* Quickly decide if packet cannot be LRO'ed */ 1859 if (__predict_false(V_ip6_forwarding != 0)) 1860 return (TCP_LRO_CANNOT); 1861 #endif 1862 if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) != 1863 ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) || 1864 (m->m_pkthdr.csum_data != 0xffff)) { 1865 /* 1866 * The checksum either did not have hardware offload 1867 * or it was a bad checksum. We can't LRO such 1868 * a packet. 1869 */ 1870 counter_u64_add(tcp_bad_csums, 1); 1871 return (TCP_LRO_CANNOT); 1872 } 1873 /* We expect a contiguous header [eh, ip, tcp]. */ 1874 pa = tcp_lro_parser(m, &po, &pi, true); 1875 if (__predict_false(pa == NULL)) 1876 return (TCP_LRO_NOT_SUPPORTED); 1877 1878 /* We don't expect any padding. */ 1879 error = tcp_lro_trim_mbuf_chain(m, pa); 1880 if (__predict_false(error != 0)) 1881 return (error); 1882 1883 #ifdef INET 1884 switch (pa->data.lro_type) { 1885 case LRO_TYPE_IPV4_TCP: 1886 error = tcp_lro_rx_ipv4(lc, m, pa->ip4); 1887 if (__predict_false(error != 0)) 1888 return (error); 1889 break; 1890 default: 1891 break; 1892 } 1893 #endif 1894 /* If no hardware or arrival stamp on the packet add timestamp */ 1895 if ((m->m_flags & (M_TSTMP_LRO | M_TSTMP)) == 0) { 1896 m->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time); 1897 m->m_flags |= M_TSTMP_LRO; 1898 } 1899 1900 /* Get pointer to TCP header. */ 1901 th = pa->tcp; 1902 1903 /* Don't process SYN packets. */ 1904 if (__predict_false(tcp_get_flags(th) & TH_SYN)) 1905 return (TCP_LRO_CANNOT); 1906 1907 /* Get total TCP header length and compute payload length. */ 1908 tcp_opt_len = (th->th_off << 2); 1909 tcp_data_len = m->m_pkthdr.len - ((uint8_t *)th - 1910 (uint8_t *)m->m_data) - tcp_opt_len; 1911 tcp_opt_len -= sizeof(*th); 1912 1913 /* Don't process invalid TCP headers. */ 1914 if (__predict_false(tcp_opt_len < 0 || tcp_data_len < 0)) 1915 return (TCP_LRO_CANNOT); 1916 1917 /* Compute TCP data only checksum. */ 1918 if (tcp_data_len == 0) 1919 tcp_data_sum = 0; /* no data, no checksum */ 1920 else if (__predict_false(csum != 0)) 1921 tcp_data_sum = tcp_lro_rx_csum_data(pa, ~csum); 1922 else 1923 tcp_data_sum = tcp_lro_rx_csum_data(pa, ~th->th_sum); 1924 1925 /* Save TCP info in mbuf. */ 1926 m->m_nextpkt = NULL; 1927 m->m_pkthdr.rcvif = lc->ifp; 1928 m->m_pkthdr.lro_tcp_d_csum = tcp_data_sum; 1929 m->m_pkthdr.lro_tcp_d_len = tcp_data_len; 1930 m->m_pkthdr.lro_tcp_h_off = ((uint8_t *)th - (uint8_t *)m->m_data); 1931 m->m_pkthdr.lro_nsegs = 1; 1932 1933 /* Get hash bucket. */ 1934 if (!use_hash) { 1935 bucket = &lc->lro_hash[0]; 1936 } else { 1937 bucket = tcp_lro_rx_get_bucket(lc, m, pa); 1938 } 1939 1940 /* Try to find a matching previous segment. */ 1941 LIST_FOREACH(le, bucket, hash_next) { 1942 /* Compare addresses and ports. */ 1943 if (lro_address_compare(&po.data, &le->outer.data) == false || 1944 lro_address_compare(&pi.data, &le->inner.data) == false) 1945 continue; 1946 1947 /* Check if no data and old ACK. */ 1948 if (tcp_data_len == 0 && 1949 SEQ_LT(ntohl(th->th_ack), ntohl(le->ack_seq))) { 1950 m_freem(m); 1951 return (0); 1952 } 1953 1954 /* Mark "m" in the last spot. */ 1955 le->m_last_mbuf->m_nextpkt = m; 1956 /* Now set the tail to "m". */ 1957 le->m_last_mbuf = m; 1958 return (0); 1959 } 1960 1961 /* Try to find an empty slot. */ 1962 if (LIST_EMPTY(&lc->lro_free)) 1963 return (TCP_LRO_NO_ENTRIES); 1964 1965 /* Start a new segment chain. */ 1966 le = LIST_FIRST(&lc->lro_free); 1967 LIST_REMOVE(le, next); 1968 tcp_lro_active_insert(lc, bucket, le); 1969 1970 /* Make sure the headers are set. */ 1971 le->inner = pi; 1972 le->outer = po; 1973 1974 /* Store time this entry was allocated. */ 1975 le->alloc_time = lc->lro_last_queue_time; 1976 1977 tcp_set_entry_to_mbuf(lc, le, m, th); 1978 1979 /* Now set the tail to "m". */ 1980 le->m_last_mbuf = m; 1981 1982 return (0); 1983 } 1984 1985 int 1986 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) 1987 { 1988 int error; 1989 1990 if (((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) != 1991 ((CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) || 1992 (m->m_pkthdr.csum_data != 0xffff)) { 1993 /* 1994 * The checksum either did not have hardware offload 1995 * or it was a bad checksum. We can't LRO such 1996 * a packet. 1997 */ 1998 counter_u64_add(tcp_bad_csums, 1); 1999 return (TCP_LRO_CANNOT); 2000 } 2001 /* get current time */ 2002 binuptime(&lc->lro_last_queue_time); 2003 CURVNET_SET(lc->ifp->if_vnet); 2004 error = tcp_lro_rx_common(lc, m, csum, true); 2005 if (__predict_false(error != 0)) { 2006 /* 2007 * Flush anything we have acummulated 2008 * ahead of this packet that can't 2009 * be LRO'd. This preserves order. 2010 */ 2011 tcp_lro_flush_active(lc); 2012 } 2013 CURVNET_RESTORE(); 2014 2015 return (error); 2016 } 2017 2018 void 2019 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb) 2020 { 2021 NET_EPOCH_ASSERT(); 2022 /* sanity checks */ 2023 if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL || 2024 lc->lro_mbuf_max == 0)) { 2025 /* packet drop */ 2026 m_freem(mb); 2027 return; 2028 } 2029 2030 /* check if packet is not LRO capable */ 2031 if (__predict_false((lc->ifp->if_capenable & IFCAP_LRO) == 0)) { 2032 /* input packet to network layer */ 2033 (*lc->ifp->if_input) (lc->ifp, mb); 2034 return; 2035 } 2036 2037 /* If no hardware or arrival stamp on the packet add timestamp */ 2038 if ((tcplro_stacks_wanting_mbufq > 0) && 2039 (tcp_less_accurate_lro_ts == 0) && 2040 ((mb->m_flags & M_TSTMP) == 0)) { 2041 /* Add in an LRO time since no hardware */ 2042 binuptime(&lc->lro_last_queue_time); 2043 mb->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time); 2044 mb->m_flags |= M_TSTMP_LRO; 2045 } 2046 2047 /* create sequence number */ 2048 lc->lro_mbuf_data[lc->lro_mbuf_count].seq = 2049 (((uint64_t)M_HASHTYPE_GET(mb)) << 56) | 2050 (((uint64_t)mb->m_pkthdr.flowid) << 24) | 2051 ((uint64_t)lc->lro_mbuf_count); 2052 2053 /* enter mbuf */ 2054 lc->lro_mbuf_data[lc->lro_mbuf_count].mb = mb; 2055 2056 /* flush if array is full */ 2057 if (__predict_false(++lc->lro_mbuf_count == lc->lro_mbuf_max)) 2058 tcp_lro_flush_all(lc); 2059 } 2060 2061 /* end */ 2062