1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2007, Myricom Inc. 5 * Copyright (c) 2008, Intel Corporation. 6 * Copyright (c) 2012 The FreeBSD Foundation 7 * Copyright (c) 2016-2021 Mellanox Technologies. 8 * All rights reserved. 9 * 10 * Portions of this software were developed by Bjoern Zeeb 11 * under sponsorship from the FreeBSD Foundation. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_inet.h" 39 #include "opt_inet6.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/sockbuf.h> 49 #include <sys/sysctl.h> 50 51 #include <net/if.h> 52 #include <net/if_var.h> 53 #include <net/ethernet.h> 54 #include <net/bpf.h> 55 #include <net/vnet.h> 56 57 #include <netinet/in_systm.h> 58 #include <netinet/in.h> 59 #include <netinet/ip6.h> 60 #include <netinet/ip.h> 61 #include <netinet/ip_var.h> 62 #include <netinet/in_pcb.h> 63 #include <netinet6/in6_pcb.h> 64 #include <netinet/tcp.h> 65 #include <netinet/tcp_seq.h> 66 #include <netinet/tcp_lro.h> 67 #include <netinet/tcp_var.h> 68 #include <netinet/tcpip.h> 69 #include <netinet/tcp_hpts.h> 70 #include <netinet/tcp_log_buf.h> 71 #include <netinet/udp.h> 72 #include <netinet6/ip6_var.h> 73 74 #include <machine/in_cksum.h> 75 76 static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures"); 77 78 #define TCP_LRO_TS_OPTION \ 79 ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 80 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 81 82 static void tcp_lro_rx_done(struct lro_ctrl *lc); 83 static int tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m, 84 uint32_t csum, bool use_hash); 85 86 #ifdef TCPHPTS 87 static bool do_bpf_strip_and_compress(struct inpcb *, struct lro_ctrl *, 88 struct lro_entry *, struct mbuf **, struct mbuf **, struct mbuf **, bool *, bool); 89 90 #endif 91 92 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 93 "TCP LRO"); 94 95 static long tcplro_stacks_wanting_mbufq; 96 counter_u64_t tcp_inp_lro_direct_queue; 97 counter_u64_t tcp_inp_lro_wokeup_queue; 98 counter_u64_t tcp_inp_lro_compressed; 99 counter_u64_t tcp_inp_lro_locks_taken; 100 counter_u64_t tcp_extra_mbuf; 101 counter_u64_t tcp_would_have_but; 102 counter_u64_t tcp_comp_total; 103 counter_u64_t tcp_uncomp_total; 104 105 static unsigned tcp_lro_entries = TCP_LRO_ENTRIES; 106 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries, 107 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0, 108 "default number of LRO entries"); 109 110 static uint32_t tcp_lro_cpu_set_thresh = TCP_LRO_CPU_DECLARATION_THRESH; 111 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, lro_cpu_threshold, 112 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_cpu_set_thresh, 0, 113 "Number of interrups in a row on the same CPU that will make us declare an 'affinity' cpu?"); 114 115 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, fullqueue, CTLFLAG_RD, 116 &tcp_inp_lro_direct_queue, "Number of lro's fully queued to transport"); 117 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, wokeup, CTLFLAG_RD, 118 &tcp_inp_lro_wokeup_queue, "Number of lro's where we woke up transport via hpts"); 119 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, compressed, CTLFLAG_RD, 120 &tcp_inp_lro_compressed, "Number of lro's compressed and sent to transport"); 121 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lockcnt, CTLFLAG_RD, 122 &tcp_inp_lro_locks_taken, "Number of lro's inp_wlocks taken"); 123 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, extra_mbuf, CTLFLAG_RD, 124 &tcp_extra_mbuf, "Number of times we had an extra compressed ack dropped into the tp"); 125 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, would_have_but, CTLFLAG_RD, 126 &tcp_would_have_but, "Number of times we would have had an extra compressed, but mget failed"); 127 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, with_m_ackcmp, CTLFLAG_RD, 128 &tcp_comp_total, "Number of mbufs queued with M_ACKCMP flags set"); 129 SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, without_m_ackcmp, CTLFLAG_RD, 130 &tcp_uncomp_total, "Number of mbufs queued without M_ACKCMP"); 131 132 void 133 tcp_lro_reg_mbufq(void) 134 { 135 atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, 1); 136 } 137 138 void 139 tcp_lro_dereg_mbufq(void) 140 { 141 atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, -1); 142 } 143 144 static __inline void 145 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket, 146 struct lro_entry *le) 147 { 148 149 LIST_INSERT_HEAD(&lc->lro_active, le, next); 150 LIST_INSERT_HEAD(bucket, le, hash_next); 151 } 152 153 static __inline void 154 tcp_lro_active_remove(struct lro_entry *le) 155 { 156 157 LIST_REMOVE(le, next); /* active list */ 158 LIST_REMOVE(le, hash_next); /* hash bucket */ 159 } 160 161 int 162 tcp_lro_init(struct lro_ctrl *lc) 163 { 164 return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0)); 165 } 166 167 int 168 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp, 169 unsigned lro_entries, unsigned lro_mbufs) 170 { 171 struct lro_entry *le; 172 size_t size; 173 unsigned i, elements; 174 175 lc->lro_bad_csum = 0; 176 lc->lro_queued = 0; 177 lc->lro_flushed = 0; 178 lc->lro_mbuf_count = 0; 179 lc->lro_mbuf_max = lro_mbufs; 180 lc->lro_cnt = lro_entries; 181 lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX; 182 lc->lro_length_lim = TCP_LRO_LENGTH_MAX; 183 lc->ifp = ifp; 184 LIST_INIT(&lc->lro_free); 185 LIST_INIT(&lc->lro_active); 186 187 /* create hash table to accelerate entry lookup */ 188 if (lro_entries > lro_mbufs) 189 elements = lro_entries; 190 else 191 elements = lro_mbufs; 192 lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz, 193 HASH_NOWAIT); 194 if (lc->lro_hash == NULL) { 195 memset(lc, 0, sizeof(*lc)); 196 return (ENOMEM); 197 } 198 199 /* compute size to allocate */ 200 size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) + 201 (lro_entries * sizeof(*le)); 202 lc->lro_mbuf_data = (struct lro_mbuf_sort *) 203 malloc(size, M_LRO, M_NOWAIT | M_ZERO); 204 205 /* check for out of memory */ 206 if (lc->lro_mbuf_data == NULL) { 207 free(lc->lro_hash, M_LRO); 208 memset(lc, 0, sizeof(*lc)); 209 return (ENOMEM); 210 } 211 /* compute offset for LRO entries */ 212 le = (struct lro_entry *) 213 (lc->lro_mbuf_data + lro_mbufs); 214 215 /* setup linked list */ 216 for (i = 0; i != lro_entries; i++) 217 LIST_INSERT_HEAD(&lc->lro_free, le + i, next); 218 219 return (0); 220 } 221 222 struct vxlan_header { 223 uint32_t vxlh_flags; 224 uint32_t vxlh_vni; 225 }; 226 227 static inline void * 228 tcp_lro_low_level_parser(void *ptr, struct lro_parser *parser, bool update_data, bool is_vxlan) 229 { 230 const struct ether_vlan_header *eh; 231 void *old; 232 uint16_t eth_type; 233 234 if (update_data) 235 memset(parser, 0, sizeof(*parser)); 236 237 old = ptr; 238 239 if (is_vxlan) { 240 const struct vxlan_header *vxh; 241 vxh = ptr; 242 ptr = (uint8_t *)ptr + sizeof(*vxh); 243 if (update_data) { 244 parser->data.vxlan_vni = 245 vxh->vxlh_vni & htonl(0xffffff00); 246 } 247 } 248 249 eh = ptr; 250 if (__predict_false(eh->evl_encap_proto == htons(ETHERTYPE_VLAN))) { 251 eth_type = eh->evl_proto; 252 if (update_data) { 253 /* strip priority and keep VLAN ID only */ 254 parser->data.vlan_id = eh->evl_tag & htons(EVL_VLID_MASK); 255 } 256 /* advance to next header */ 257 ptr = (uint8_t *)ptr + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 258 } else { 259 eth_type = eh->evl_encap_proto; 260 /* advance to next header */ 261 ptr = (uint8_t *)ptr + ETHER_HDR_LEN; 262 } 263 264 switch (eth_type) { 265 #ifdef INET 266 case htons(ETHERTYPE_IP): 267 parser->ip4 = ptr; 268 /* Ensure there are no IPv4 options. */ 269 if ((parser->ip4->ip_hl << 2) != sizeof (*parser->ip4)) 270 break; 271 /* .. and the packet is not fragmented. */ 272 if (parser->ip4->ip_off & htons(IP_MF|IP_OFFMASK)) 273 break; 274 ptr = (uint8_t *)ptr + (parser->ip4->ip_hl << 2); 275 if (update_data) { 276 parser->data.s_addr.v4 = parser->ip4->ip_src; 277 parser->data.d_addr.v4 = parser->ip4->ip_dst; 278 } 279 switch (parser->ip4->ip_p) { 280 case IPPROTO_UDP: 281 parser->udp = ptr; 282 if (update_data) { 283 parser->data.lro_type = LRO_TYPE_IPV4_UDP; 284 parser->data.s_port = parser->udp->uh_sport; 285 parser->data.d_port = parser->udp->uh_dport; 286 } else { 287 MPASS(parser->data.lro_type == LRO_TYPE_IPV4_UDP); 288 } 289 ptr = ((uint8_t *)ptr + sizeof(*parser->udp)); 290 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old; 291 return (ptr); 292 case IPPROTO_TCP: 293 parser->tcp = ptr; 294 if (update_data) { 295 parser->data.lro_type = LRO_TYPE_IPV4_TCP; 296 parser->data.s_port = parser->tcp->th_sport; 297 parser->data.d_port = parser->tcp->th_dport; 298 } else { 299 MPASS(parser->data.lro_type == LRO_TYPE_IPV4_TCP); 300 } 301 ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2); 302 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old; 303 return (ptr); 304 default: 305 break; 306 } 307 break; 308 #endif 309 #ifdef INET6 310 case htons(ETHERTYPE_IPV6): 311 parser->ip6 = ptr; 312 ptr = (uint8_t *)ptr + sizeof(*parser->ip6); 313 if (update_data) { 314 parser->data.s_addr.v6 = parser->ip6->ip6_src; 315 parser->data.d_addr.v6 = parser->ip6->ip6_dst; 316 } 317 switch (parser->ip6->ip6_nxt) { 318 case IPPROTO_UDP: 319 parser->udp = ptr; 320 if (update_data) { 321 parser->data.lro_type = LRO_TYPE_IPV6_UDP; 322 parser->data.s_port = parser->udp->uh_sport; 323 parser->data.d_port = parser->udp->uh_dport; 324 } else { 325 MPASS(parser->data.lro_type == LRO_TYPE_IPV6_UDP); 326 } 327 ptr = (uint8_t *)ptr + sizeof(*parser->udp); 328 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old; 329 return (ptr); 330 case IPPROTO_TCP: 331 parser->tcp = ptr; 332 if (update_data) { 333 parser->data.lro_type = LRO_TYPE_IPV6_TCP; 334 parser->data.s_port = parser->tcp->th_sport; 335 parser->data.d_port = parser->tcp->th_dport; 336 } else { 337 MPASS(parser->data.lro_type == LRO_TYPE_IPV6_TCP); 338 } 339 ptr = (uint8_t *)ptr + (parser->tcp->th_off << 2); 340 parser->total_hdr_len = (uint8_t *)ptr - (uint8_t *)old; 341 return (ptr); 342 default: 343 break; 344 } 345 break; 346 #endif 347 default: 348 break; 349 } 350 /* Invalid packet - cannot parse */ 351 return (NULL); 352 } 353 354 static const int vxlan_csum = CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | 355 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID; 356 357 static inline struct lro_parser * 358 tcp_lro_parser(struct mbuf *m, struct lro_parser *po, struct lro_parser *pi, bool update_data) 359 { 360 void *data_ptr; 361 362 /* Try to parse outer headers first. */ 363 data_ptr = tcp_lro_low_level_parser(m->m_data, po, update_data, false); 364 if (data_ptr == NULL || po->total_hdr_len > m->m_len) 365 return (NULL); 366 367 if (update_data) { 368 /* Store VLAN ID, if any. */ 369 if (__predict_false(m->m_flags & M_VLANTAG)) { 370 po->data.vlan_id = 371 htons(m->m_pkthdr.ether_vtag) & htons(EVL_VLID_MASK); 372 } 373 } 374 375 switch (po->data.lro_type) { 376 case LRO_TYPE_IPV4_UDP: 377 case LRO_TYPE_IPV6_UDP: 378 /* Check for VXLAN headers. */ 379 if ((m->m_pkthdr.csum_flags & vxlan_csum) != vxlan_csum) 380 break; 381 382 /* Try to parse inner headers. */ 383 data_ptr = tcp_lro_low_level_parser(data_ptr, pi, update_data, true); 384 if (data_ptr == NULL || pi->total_hdr_len > m->m_len) 385 break; 386 387 /* Verify supported header types. */ 388 switch (pi->data.lro_type) { 389 case LRO_TYPE_IPV4_TCP: 390 case LRO_TYPE_IPV6_TCP: 391 return (pi); 392 default: 393 break; 394 } 395 break; 396 case LRO_TYPE_IPV4_TCP: 397 case LRO_TYPE_IPV6_TCP: 398 if (update_data) 399 memset(pi, 0, sizeof(*pi)); 400 return (po); 401 default: 402 break; 403 } 404 return (NULL); 405 } 406 407 static inline int 408 tcp_lro_trim_mbuf_chain(struct mbuf *m, const struct lro_parser *po) 409 { 410 int len; 411 412 switch (po->data.lro_type) { 413 #ifdef INET 414 case LRO_TYPE_IPV4_TCP: 415 len = ((uint8_t *)po->ip4 - (uint8_t *)m->m_data) + 416 ntohs(po->ip4->ip_len); 417 break; 418 #endif 419 #ifdef INET6 420 case LRO_TYPE_IPV6_TCP: 421 len = ((uint8_t *)po->ip6 - (uint8_t *)m->m_data) + 422 ntohs(po->ip6->ip6_plen) + sizeof(*po->ip6); 423 break; 424 #endif 425 default: 426 return (TCP_LRO_CANNOT); 427 } 428 429 /* 430 * If the frame is padded beyond the end of the IP packet, 431 * then trim the extra bytes off: 432 */ 433 if (__predict_true(m->m_pkthdr.len == len)) { 434 return (0); 435 } else if (m->m_pkthdr.len > len) { 436 m_adj(m, len - m->m_pkthdr.len); 437 return (0); 438 } 439 return (TCP_LRO_CANNOT); 440 } 441 442 static struct tcphdr * 443 tcp_lro_get_th(struct mbuf *m) 444 { 445 return ((struct tcphdr *)((uint8_t *)m->m_data + m->m_pkthdr.lro_tcp_h_off)); 446 } 447 448 static void 449 lro_free_mbuf_chain(struct mbuf *m) 450 { 451 struct mbuf *save; 452 453 while (m) { 454 save = m->m_nextpkt; 455 m->m_nextpkt = NULL; 456 m_freem(m); 457 m = save; 458 } 459 } 460 461 void 462 tcp_lro_free(struct lro_ctrl *lc) 463 { 464 struct lro_entry *le; 465 unsigned x; 466 467 /* reset LRO free list */ 468 LIST_INIT(&lc->lro_free); 469 470 /* free active mbufs, if any */ 471 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { 472 tcp_lro_active_remove(le); 473 lro_free_mbuf_chain(le->m_head); 474 } 475 476 /* free hash table */ 477 free(lc->lro_hash, M_LRO); 478 lc->lro_hash = NULL; 479 lc->lro_hashsz = 0; 480 481 /* free mbuf array, if any */ 482 for (x = 0; x != lc->lro_mbuf_count; x++) 483 m_freem(lc->lro_mbuf_data[x].mb); 484 lc->lro_mbuf_count = 0; 485 486 /* free allocated memory, if any */ 487 free(lc->lro_mbuf_data, M_LRO); 488 lc->lro_mbuf_data = NULL; 489 } 490 491 static uint16_t 492 tcp_lro_rx_csum_tcphdr(const struct tcphdr *th) 493 { 494 const uint16_t *ptr; 495 uint32_t csum; 496 uint16_t len; 497 498 csum = -th->th_sum; /* exclude checksum field */ 499 len = th->th_off; 500 ptr = (const uint16_t *)th; 501 while (len--) { 502 csum += *ptr; 503 ptr++; 504 csum += *ptr; 505 ptr++; 506 } 507 while (csum > 0xffff) 508 csum = (csum >> 16) + (csum & 0xffff); 509 510 return (csum); 511 } 512 513 static uint16_t 514 tcp_lro_rx_csum_data(const struct lro_parser *pa, uint16_t tcp_csum) 515 { 516 uint32_t c; 517 uint16_t cs; 518 519 c = tcp_csum; 520 521 switch (pa->data.lro_type) { 522 #ifdef INET6 523 case LRO_TYPE_IPV6_TCP: 524 /* Compute full pseudo IPv6 header checksum. */ 525 cs = in6_cksum_pseudo(pa->ip6, ntohs(pa->ip6->ip6_plen), pa->ip6->ip6_nxt, 0); 526 break; 527 #endif 528 #ifdef INET 529 case LRO_TYPE_IPV4_TCP: 530 /* Compute full pseudo IPv4 header checsum. */ 531 cs = in_addword(ntohs(pa->ip4->ip_len) - sizeof(*pa->ip4), IPPROTO_TCP); 532 cs = in_pseudo(pa->ip4->ip_src.s_addr, pa->ip4->ip_dst.s_addr, htons(cs)); 533 break; 534 #endif 535 default: 536 cs = 0; /* Keep compiler happy. */ 537 break; 538 } 539 540 /* Complement checksum. */ 541 cs = ~cs; 542 c += cs; 543 544 /* Remove TCP header checksum. */ 545 cs = ~tcp_lro_rx_csum_tcphdr(pa->tcp); 546 c += cs; 547 548 /* Compute checksum remainder. */ 549 while (c > 0xffff) 550 c = (c >> 16) + (c & 0xffff); 551 552 return (c); 553 } 554 555 static void 556 tcp_lro_rx_done(struct lro_ctrl *lc) 557 { 558 struct lro_entry *le; 559 560 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { 561 tcp_lro_active_remove(le); 562 tcp_lro_flush(lc, le); 563 } 564 } 565 566 void 567 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout) 568 { 569 struct lro_entry *le, *le_tmp; 570 uint64_t now, tov; 571 struct bintime bt; 572 573 if (LIST_EMPTY(&lc->lro_active)) 574 return; 575 576 /* get timeout time and current time in ns */ 577 binuptime(&bt); 578 now = bintime2ns(&bt); 579 tov = ((timeout->tv_sec * 1000000000) + (timeout->tv_usec * 1000)); 580 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) { 581 if (now >= (bintime2ns(&le->alloc_time) + tov)) { 582 tcp_lro_active_remove(le); 583 tcp_lro_flush(lc, le); 584 } 585 } 586 } 587 588 #ifdef INET 589 static int 590 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4) 591 { 592 uint16_t csum; 593 594 /* Legacy IP has a header checksum that needs to be correct. */ 595 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 596 if (__predict_false((m->m_pkthdr.csum_flags & CSUM_IP_VALID) == 0)) { 597 lc->lro_bad_csum++; 598 return (TCP_LRO_CANNOT); 599 } 600 } else { 601 csum = in_cksum_hdr(ip4); 602 if (__predict_false(csum != 0)) { 603 lc->lro_bad_csum++; 604 return (TCP_LRO_CANNOT); 605 } 606 } 607 return (0); 608 } 609 #endif 610 611 #ifdef TCPHPTS 612 static void 613 tcp_lro_log(struct tcpcb *tp, const struct lro_ctrl *lc, 614 const struct lro_entry *le, const struct mbuf *m, 615 int frm, int32_t tcp_data_len, uint32_t th_seq, 616 uint32_t th_ack, uint16_t th_win) 617 { 618 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 619 union tcp_log_stackspecific log; 620 struct timeval tv, btv; 621 uint32_t cts; 622 623 cts = tcp_get_usecs(&tv); 624 memset(&log, 0, sizeof(union tcp_log_stackspecific)); 625 log.u_bbr.flex8 = frm; 626 log.u_bbr.flex1 = tcp_data_len; 627 if (m) 628 log.u_bbr.flex2 = m->m_pkthdr.len; 629 else 630 log.u_bbr.flex2 = 0; 631 log.u_bbr.flex3 = le->m_head->m_pkthdr.lro_nsegs; 632 log.u_bbr.flex4 = le->m_head->m_pkthdr.lro_tcp_d_len; 633 if (le->m_head) { 634 log.u_bbr.flex5 = le->m_head->m_pkthdr.len; 635 log.u_bbr.delRate = le->m_head->m_flags; 636 log.u_bbr.rttProp = le->m_head->m_pkthdr.rcv_tstmp; 637 } 638 log.u_bbr.inflight = th_seq; 639 log.u_bbr.delivered = th_ack; 640 log.u_bbr.timeStamp = cts; 641 log.u_bbr.epoch = le->next_seq; 642 log.u_bbr.lt_epoch = le->ack_seq; 643 log.u_bbr.pacing_gain = th_win; 644 log.u_bbr.cwnd_gain = le->window; 645 log.u_bbr.lost = curcpu; 646 log.u_bbr.cur_del_rate = (uintptr_t)m; 647 log.u_bbr.bw_inuse = (uintptr_t)le->m_head; 648 bintime2timeval(&lc->lro_last_queue_time, &btv); 649 log.u_bbr.flex6 = tcp_tv_to_usectick(&btv); 650 log.u_bbr.flex7 = le->compressed; 651 log.u_bbr.pacing_gain = le->uncompressed; 652 if (in_epoch(net_epoch_preempt)) 653 log.u_bbr.inhpts = 1; 654 else 655 log.u_bbr.inhpts = 0; 656 TCP_LOG_EVENTP(tp, NULL, 657 &tp->t_inpcb->inp_socket->so_rcv, 658 &tp->t_inpcb->inp_socket->so_snd, 659 TCP_LOG_LRO, 0, 660 0, &log, false, &tv); 661 } 662 } 663 #endif 664 665 static inline void 666 tcp_lro_assign_and_checksum_16(uint16_t *ptr, uint16_t value, uint16_t *psum) 667 { 668 uint32_t csum; 669 670 csum = 0xffff - *ptr + value; 671 while (csum > 0xffff) 672 csum = (csum >> 16) + (csum & 0xffff); 673 *ptr = value; 674 *psum = csum; 675 } 676 677 static uint16_t 678 tcp_lro_update_checksum(const struct lro_parser *pa, const struct lro_entry *le, 679 uint16_t payload_len, uint16_t delta_sum) 680 { 681 uint32_t csum; 682 uint16_t tlen; 683 uint16_t temp[5] = {}; 684 685 switch (pa->data.lro_type) { 686 case LRO_TYPE_IPV4_TCP: 687 /* Compute new IPv4 length. */ 688 tlen = (pa->ip4->ip_hl << 2) + (pa->tcp->th_off << 2) + payload_len; 689 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]); 690 691 /* Subtract delta from current IPv4 checksum. */ 692 csum = pa->ip4->ip_sum + 0xffff - temp[0]; 693 while (csum > 0xffff) 694 csum = (csum >> 16) + (csum & 0xffff); 695 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]); 696 goto update_tcp_header; 697 698 case LRO_TYPE_IPV6_TCP: 699 /* Compute new IPv6 length. */ 700 tlen = (pa->tcp->th_off << 2) + payload_len; 701 tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]); 702 goto update_tcp_header; 703 704 case LRO_TYPE_IPV4_UDP: 705 /* Compute new IPv4 length. */ 706 tlen = (pa->ip4->ip_hl << 2) + sizeof(*pa->udp) + payload_len; 707 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_len, htons(tlen), &temp[0]); 708 709 /* Subtract delta from current IPv4 checksum. */ 710 csum = pa->ip4->ip_sum + 0xffff - temp[0]; 711 while (csum > 0xffff) 712 csum = (csum >> 16) + (csum & 0xffff); 713 tcp_lro_assign_and_checksum_16(&pa->ip4->ip_sum, csum, &temp[1]); 714 goto update_udp_header; 715 716 case LRO_TYPE_IPV6_UDP: 717 /* Compute new IPv6 length. */ 718 tlen = sizeof(*pa->udp) + payload_len; 719 tcp_lro_assign_and_checksum_16(&pa->ip6->ip6_plen, htons(tlen), &temp[0]); 720 goto update_udp_header; 721 722 default: 723 return (0); 724 } 725 726 update_tcp_header: 727 /* Compute current TCP header checksum. */ 728 temp[2] = tcp_lro_rx_csum_tcphdr(pa->tcp); 729 730 /* Incorporate the latest ACK into the TCP header. */ 731 pa->tcp->th_ack = le->ack_seq; 732 pa->tcp->th_win = le->window; 733 734 /* Incorporate latest timestamp into the TCP header. */ 735 if (le->timestamp != 0) { 736 uint32_t *ts_ptr; 737 738 ts_ptr = (uint32_t *)(pa->tcp + 1); 739 ts_ptr[1] = htonl(le->tsval); 740 ts_ptr[2] = le->tsecr; 741 } 742 743 /* Compute new TCP header checksum. */ 744 temp[3] = tcp_lro_rx_csum_tcphdr(pa->tcp); 745 746 /* Compute new TCP checksum. */ 747 csum = pa->tcp->th_sum + 0xffff - delta_sum + 748 0xffff - temp[0] + 0xffff - temp[3] + temp[2]; 749 while (csum > 0xffff) 750 csum = (csum >> 16) + (csum & 0xffff); 751 752 /* Assign new TCP checksum. */ 753 tcp_lro_assign_and_checksum_16(&pa->tcp->th_sum, csum, &temp[4]); 754 755 /* Compute all modififications affecting next checksum. */ 756 csum = temp[0] + temp[1] + 0xffff - temp[2] + 757 temp[3] + temp[4] + delta_sum; 758 while (csum > 0xffff) 759 csum = (csum >> 16) + (csum & 0xffff); 760 761 /* Return delta checksum to next stage, if any. */ 762 return (csum); 763 764 update_udp_header: 765 tlen = sizeof(*pa->udp) + payload_len; 766 /* Assign new UDP length and compute checksum delta. */ 767 tcp_lro_assign_and_checksum_16(&pa->udp->uh_ulen, htons(tlen), &temp[2]); 768 769 /* Check if there is a UDP checksum. */ 770 if (__predict_false(pa->udp->uh_sum != 0)) { 771 /* Compute new UDP checksum. */ 772 csum = pa->udp->uh_sum + 0xffff - delta_sum + 773 0xffff - temp[0] + 0xffff - temp[2]; 774 while (csum > 0xffff) 775 csum = (csum >> 16) + (csum & 0xffff); 776 /* Assign new UDP checksum. */ 777 tcp_lro_assign_and_checksum_16(&pa->udp->uh_sum, csum, &temp[3]); 778 } 779 780 /* Compute all modififications affecting next checksum. */ 781 csum = temp[0] + temp[1] + temp[2] + temp[3] + delta_sum; 782 while (csum > 0xffff) 783 csum = (csum >> 16) + (csum & 0xffff); 784 785 /* Return delta checksum to next stage, if any. */ 786 return (csum); 787 } 788 789 static void 790 tcp_flush_out_entry(struct lro_ctrl *lc, struct lro_entry *le) 791 { 792 /* Check if we need to recompute any checksums. */ 793 if (le->m_head->m_pkthdr.lro_nsegs > 1) { 794 uint16_t csum; 795 796 switch (le->inner.data.lro_type) { 797 case LRO_TYPE_IPV4_TCP: 798 csum = tcp_lro_update_checksum(&le->inner, le, 799 le->m_head->m_pkthdr.lro_tcp_d_len, 800 le->m_head->m_pkthdr.lro_tcp_d_csum); 801 csum = tcp_lro_update_checksum(&le->outer, NULL, 802 le->m_head->m_pkthdr.lro_tcp_d_len + 803 le->inner.total_hdr_len, csum); 804 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 805 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID; 806 le->m_head->m_pkthdr.csum_data = 0xffff; 807 break; 808 case LRO_TYPE_IPV6_TCP: 809 csum = tcp_lro_update_checksum(&le->inner, le, 810 le->m_head->m_pkthdr.lro_tcp_d_len, 811 le->m_head->m_pkthdr.lro_tcp_d_csum); 812 csum = tcp_lro_update_checksum(&le->outer, NULL, 813 le->m_head->m_pkthdr.lro_tcp_d_len + 814 le->inner.total_hdr_len, csum); 815 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 816 CSUM_PSEUDO_HDR; 817 le->m_head->m_pkthdr.csum_data = 0xffff; 818 break; 819 case LRO_TYPE_NONE: 820 switch (le->outer.data.lro_type) { 821 case LRO_TYPE_IPV4_TCP: 822 csum = tcp_lro_update_checksum(&le->outer, le, 823 le->m_head->m_pkthdr.lro_tcp_d_len, 824 le->m_head->m_pkthdr.lro_tcp_d_csum); 825 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 826 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID; 827 le->m_head->m_pkthdr.csum_data = 0xffff; 828 break; 829 case LRO_TYPE_IPV6_TCP: 830 csum = tcp_lro_update_checksum(&le->outer, le, 831 le->m_head->m_pkthdr.lro_tcp_d_len, 832 le->m_head->m_pkthdr.lro_tcp_d_csum); 833 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 834 CSUM_PSEUDO_HDR; 835 le->m_head->m_pkthdr.csum_data = 0xffff; 836 break; 837 default: 838 break; 839 } 840 break; 841 default: 842 break; 843 } 844 } 845 846 /* 847 * Break any chain, this is not set to NULL on the singleton 848 * case m_nextpkt points to m_head. Other case set them 849 * m_nextpkt to NULL in push_and_replace. 850 */ 851 le->m_head->m_nextpkt = NULL; 852 lc->lro_queued += le->m_head->m_pkthdr.lro_nsegs; 853 (*lc->ifp->if_input)(lc->ifp, le->m_head); 854 } 855 856 static void 857 tcp_set_entry_to_mbuf(struct lro_ctrl *lc, struct lro_entry *le, 858 struct mbuf *m, struct tcphdr *th) 859 { 860 uint32_t *ts_ptr; 861 uint16_t tcp_data_len; 862 uint16_t tcp_opt_len; 863 864 ts_ptr = (uint32_t *)(th + 1); 865 tcp_opt_len = (th->th_off << 2); 866 tcp_opt_len -= sizeof(*th); 867 868 /* Check if there is a timestamp option. */ 869 if (tcp_opt_len == 0 || 870 __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA || 871 *ts_ptr != TCP_LRO_TS_OPTION)) { 872 /* We failed to find the timestamp option. */ 873 le->timestamp = 0; 874 } else { 875 le->timestamp = 1; 876 le->tsval = ntohl(*(ts_ptr + 1)); 877 le->tsecr = *(ts_ptr + 2); 878 } 879 880 tcp_data_len = m->m_pkthdr.lro_tcp_d_len; 881 882 /* Pull out TCP sequence numbers and window size. */ 883 le->next_seq = ntohl(th->th_seq) + tcp_data_len; 884 le->ack_seq = th->th_ack; 885 le->window = th->th_win; 886 887 /* Setup new data pointers. */ 888 le->m_head = m; 889 le->m_tail = m_last(m); 890 } 891 892 static void 893 tcp_push_and_replace(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m) 894 { 895 struct lro_parser *pa; 896 897 /* 898 * Push up the stack of the current entry 899 * and replace it with "m". 900 */ 901 struct mbuf *msave; 902 903 /* Grab off the next and save it */ 904 msave = le->m_head->m_nextpkt; 905 le->m_head->m_nextpkt = NULL; 906 907 /* Now push out the old entry */ 908 tcp_flush_out_entry(lc, le); 909 910 /* Re-parse new header, should not fail. */ 911 pa = tcp_lro_parser(m, &le->outer, &le->inner, false); 912 KASSERT(pa != NULL, 913 ("tcp_push_and_replace: LRO parser failed on m=%p\n", m)); 914 915 /* 916 * Now to replace the data properly in the entry 917 * we have to reset the TCP header and 918 * other fields. 919 */ 920 tcp_set_entry_to_mbuf(lc, le, m, pa->tcp); 921 922 /* Restore the next list */ 923 m->m_nextpkt = msave; 924 } 925 926 static void 927 tcp_lro_mbuf_append_pkthdr(struct mbuf *m, const struct mbuf *p) 928 { 929 uint32_t csum; 930 931 if (m->m_pkthdr.lro_nsegs == 1) { 932 /* Compute relative checksum. */ 933 csum = p->m_pkthdr.lro_tcp_d_csum; 934 } else { 935 /* Merge TCP data checksums. */ 936 csum = (uint32_t)m->m_pkthdr.lro_tcp_d_csum + 937 (uint32_t)p->m_pkthdr.lro_tcp_d_csum; 938 while (csum > 0xffff) 939 csum = (csum >> 16) + (csum & 0xffff); 940 } 941 942 /* Update various counters. */ 943 m->m_pkthdr.len += p->m_pkthdr.lro_tcp_d_len; 944 m->m_pkthdr.lro_tcp_d_csum = csum; 945 m->m_pkthdr.lro_tcp_d_len += p->m_pkthdr.lro_tcp_d_len; 946 m->m_pkthdr.lro_nsegs += p->m_pkthdr.lro_nsegs; 947 } 948 949 static void 950 tcp_lro_condense(struct lro_ctrl *lc, struct lro_entry *le) 951 { 952 /* 953 * Walk through the mbuf chain we 954 * have on tap and compress/condense 955 * as required. 956 */ 957 uint32_t *ts_ptr; 958 struct mbuf *m; 959 struct tcphdr *th; 960 uint32_t tcp_data_len_total; 961 uint32_t tcp_data_seg_total; 962 uint16_t tcp_data_len; 963 uint16_t tcp_opt_len; 964 965 /* 966 * First we must check the lead (m_head) 967 * we must make sure that it is *not* 968 * something that should be sent up 969 * right away (sack etc). 970 */ 971 again: 972 m = le->m_head->m_nextpkt; 973 if (m == NULL) { 974 /* Just one left. */ 975 return; 976 } 977 978 th = tcp_lro_get_th(m); 979 tcp_opt_len = (th->th_off << 2); 980 tcp_opt_len -= sizeof(*th); 981 ts_ptr = (uint32_t *)(th + 1); 982 983 if (tcp_opt_len != 0 && __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA || 984 *ts_ptr != TCP_LRO_TS_OPTION)) { 985 /* 986 * Its not the timestamp. We can't 987 * use this guy as the head. 988 */ 989 le->m_head->m_nextpkt = m->m_nextpkt; 990 tcp_push_and_replace(lc, le, m); 991 goto again; 992 } 993 if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) { 994 /* 995 * Make sure that previously seen segements/ACKs are delivered 996 * before this segment, e.g. FIN. 997 */ 998 le->m_head->m_nextpkt = m->m_nextpkt; 999 tcp_push_and_replace(lc, le, m); 1000 goto again; 1001 } 1002 while((m = le->m_head->m_nextpkt) != NULL) { 1003 /* 1004 * condense m into le, first 1005 * pull m out of the list. 1006 */ 1007 le->m_head->m_nextpkt = m->m_nextpkt; 1008 m->m_nextpkt = NULL; 1009 /* Setup my data */ 1010 tcp_data_len = m->m_pkthdr.lro_tcp_d_len; 1011 th = tcp_lro_get_th(m); 1012 ts_ptr = (uint32_t *)(th + 1); 1013 tcp_opt_len = (th->th_off << 2); 1014 tcp_opt_len -= sizeof(*th); 1015 tcp_data_len_total = le->m_head->m_pkthdr.lro_tcp_d_len + tcp_data_len; 1016 tcp_data_seg_total = le->m_head->m_pkthdr.lro_nsegs + m->m_pkthdr.lro_nsegs; 1017 1018 if (tcp_data_seg_total >= lc->lro_ackcnt_lim || 1019 tcp_data_len_total >= lc->lro_length_lim) { 1020 /* Flush now if appending will result in overflow. */ 1021 tcp_push_and_replace(lc, le, m); 1022 goto again; 1023 } 1024 if (tcp_opt_len != 0 && 1025 __predict_false(tcp_opt_len != TCPOLEN_TSTAMP_APPA || 1026 *ts_ptr != TCP_LRO_TS_OPTION)) { 1027 /* 1028 * Maybe a sack in the new one? We need to 1029 * start all over after flushing the 1030 * current le. We will go up to the beginning 1031 * and flush it (calling the replace again possibly 1032 * or just returning). 1033 */ 1034 tcp_push_and_replace(lc, le, m); 1035 goto again; 1036 } 1037 if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) { 1038 tcp_push_and_replace(lc, le, m); 1039 goto again; 1040 } 1041 if (tcp_opt_len != 0) { 1042 uint32_t tsval = ntohl(*(ts_ptr + 1)); 1043 /* Make sure timestamp values are increasing. */ 1044 if (TSTMP_GT(le->tsval, tsval)) { 1045 tcp_push_and_replace(lc, le, m); 1046 goto again; 1047 } 1048 le->tsval = tsval; 1049 le->tsecr = *(ts_ptr + 2); 1050 } 1051 /* Try to append the new segment. */ 1052 if (__predict_false(ntohl(th->th_seq) != le->next_seq || 1053 (tcp_data_len == 0 && 1054 le->ack_seq == th->th_ack && 1055 le->window == th->th_win))) { 1056 /* Out of order packet or duplicate ACK. */ 1057 tcp_push_and_replace(lc, le, m); 1058 goto again; 1059 } 1060 if (tcp_data_len != 0 || 1061 SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq))) { 1062 le->next_seq += tcp_data_len; 1063 le->ack_seq = th->th_ack; 1064 le->window = th->th_win; 1065 } else if (th->th_ack == le->ack_seq) { 1066 le->window = WIN_MAX(le->window, th->th_win); 1067 } 1068 1069 if (tcp_data_len == 0) { 1070 m_freem(m); 1071 continue; 1072 } 1073 1074 /* Merge TCP data checksum and length to head mbuf. */ 1075 tcp_lro_mbuf_append_pkthdr(le->m_head, m); 1076 1077 /* 1078 * Adjust the mbuf so that m_data points to the first byte of 1079 * the ULP payload. Adjust the mbuf to avoid complications and 1080 * append new segment to existing mbuf chain. 1081 */ 1082 m_adj(m, m->m_pkthdr.len - tcp_data_len); 1083 m_demote_pkthdr(m); 1084 le->m_tail->m_next = m; 1085 le->m_tail = m_last(m); 1086 } 1087 } 1088 1089 #ifdef TCPHPTS 1090 static void 1091 tcp_queue_pkts(struct inpcb *inp, struct tcpcb *tp, struct lro_entry *le) 1092 { 1093 INP_WLOCK_ASSERT(inp); 1094 if (tp->t_in_pkt == NULL) { 1095 /* Nothing yet there */ 1096 tp->t_in_pkt = le->m_head; 1097 tp->t_tail_pkt = le->m_last_mbuf; 1098 } else { 1099 /* Already some there */ 1100 tp->t_tail_pkt->m_nextpkt = le->m_head; 1101 tp->t_tail_pkt = le->m_last_mbuf; 1102 } 1103 le->m_head = NULL; 1104 le->m_last_mbuf = NULL; 1105 } 1106 1107 static struct mbuf * 1108 tcp_lro_get_last_if_ackcmp(struct lro_ctrl *lc, struct lro_entry *le, 1109 struct inpcb *inp, int32_t *new_m) 1110 { 1111 struct tcpcb *tp; 1112 struct mbuf *m; 1113 1114 tp = intotcpcb(inp); 1115 if (__predict_false(tp == NULL)) 1116 return (NULL); 1117 1118 /* Look at the last mbuf if any in queue */ 1119 m = tp->t_tail_pkt; 1120 if (m != NULL && (m->m_flags & M_ACKCMP) != 0) { 1121 if (M_TRAILINGSPACE(m) >= sizeof(struct tcp_ackent)) { 1122 tcp_lro_log(tp, lc, le, NULL, 23, 0, 0, 0, 0); 1123 *new_m = 0; 1124 counter_u64_add(tcp_extra_mbuf, 1); 1125 return (m); 1126 } else { 1127 /* Mark we ran out of space */ 1128 inp->inp_flags2 |= INP_MBUF_L_ACKS; 1129 } 1130 } 1131 /* Decide mbuf size. */ 1132 if (inp->inp_flags2 & INP_MBUF_L_ACKS) 1133 m = m_getcl(M_NOWAIT, MT_DATA, M_ACKCMP | M_PKTHDR); 1134 else 1135 m = m_gethdr(M_NOWAIT, MT_DATA); 1136 1137 if (__predict_false(m == NULL)) { 1138 counter_u64_add(tcp_would_have_but, 1); 1139 return (NULL); 1140 } 1141 counter_u64_add(tcp_comp_total, 1); 1142 m->m_flags |= M_ACKCMP; 1143 *new_m = 1; 1144 return (m); 1145 } 1146 1147 static struct inpcb * 1148 tcp_lro_lookup(struct ifnet *ifp, struct lro_parser *pa) 1149 { 1150 struct inpcb *inp; 1151 1152 NET_EPOCH_ASSERT(); 1153 1154 switch (pa->data.lro_type) { 1155 #ifdef INET6 1156 case LRO_TYPE_IPV6_TCP: 1157 inp = in6_pcblookup(&V_tcbinfo, 1158 &pa->data.s_addr.v6, 1159 pa->data.s_port, 1160 &pa->data.d_addr.v6, 1161 pa->data.d_port, 1162 INPLOOKUP_WLOCKPCB, 1163 ifp); 1164 break; 1165 #endif 1166 #ifdef INET 1167 case LRO_TYPE_IPV4_TCP: 1168 inp = in_pcblookup(&V_tcbinfo, 1169 pa->data.s_addr.v4, 1170 pa->data.s_port, 1171 pa->data.d_addr.v4, 1172 pa->data.d_port, 1173 INPLOOKUP_WLOCKPCB, 1174 ifp); 1175 break; 1176 #endif 1177 default: 1178 inp = NULL; 1179 break; 1180 } 1181 return (inp); 1182 } 1183 1184 static inline bool 1185 tcp_lro_ack_valid(struct mbuf *m, struct tcphdr *th, uint32_t **ppts, bool *other_opts) 1186 { 1187 /* 1188 * This function returns two bits of valuable information. 1189 * a) Is what is present capable of being ack-compressed, 1190 * we can ack-compress if there is no options or just 1191 * a timestamp option, and of course the th_flags must 1192 * be correct as well. 1193 * b) Our other options present such as SACK. This is 1194 * used to determine if we want to wakeup or not. 1195 */ 1196 bool ret = true; 1197 1198 switch (th->th_off << 2) { 1199 case (sizeof(*th) + TCPOLEN_TSTAMP_APPA): 1200 *ppts = (uint32_t *)(th + 1); 1201 /* Check if we have only one timestamp option. */ 1202 if (**ppts == TCP_LRO_TS_OPTION) 1203 *other_opts = false; 1204 else { 1205 *other_opts = true; 1206 ret = false; 1207 } 1208 break; 1209 case (sizeof(*th)): 1210 /* No options. */ 1211 *ppts = NULL; 1212 *other_opts = false; 1213 break; 1214 default: 1215 *ppts = NULL; 1216 *other_opts = true; 1217 ret = false; 1218 break; 1219 } 1220 /* For ACKCMP we only accept ACK, PUSH, ECE and CWR. */ 1221 if ((th->th_flags & ~(TH_ACK | TH_PUSH | TH_ECE | TH_CWR)) != 0) 1222 ret = false; 1223 /* If it has data on it we cannot compress it */ 1224 if (m->m_pkthdr.lro_tcp_d_len) 1225 ret = false; 1226 1227 /* ACK flag must be set. */ 1228 if (!(th->th_flags & TH_ACK)) 1229 ret = false; 1230 return (ret); 1231 } 1232 1233 static int 1234 tcp_lro_flush_tcphpts(struct lro_ctrl *lc, struct lro_entry *le) 1235 { 1236 struct inpcb *inp; 1237 struct tcpcb *tp; 1238 struct mbuf **pp, *cmp, *mv_to; 1239 bool bpf_req, should_wake; 1240 1241 /* Check if packet doesn't belongs to our network interface. */ 1242 if ((tcplro_stacks_wanting_mbufq == 0) || 1243 (le->outer.data.vlan_id != 0) || 1244 (le->inner.data.lro_type != LRO_TYPE_NONE)) 1245 return (TCP_LRO_CANNOT); 1246 1247 #ifdef INET6 1248 /* 1249 * Be proactive about unspecified IPv6 address in source. As 1250 * we use all-zero to indicate unbounded/unconnected pcb, 1251 * unspecified IPv6 address can be used to confuse us. 1252 * 1253 * Note that packets with unspecified IPv6 destination is 1254 * already dropped in ip6_input. 1255 */ 1256 if (__predict_false(le->outer.data.lro_type == LRO_TYPE_IPV6_TCP && 1257 IN6_IS_ADDR_UNSPECIFIED(&le->outer.data.s_addr.v6))) 1258 return (TCP_LRO_CANNOT); 1259 1260 if (__predict_false(le->inner.data.lro_type == LRO_TYPE_IPV6_TCP && 1261 IN6_IS_ADDR_UNSPECIFIED(&le->inner.data.s_addr.v6))) 1262 return (TCP_LRO_CANNOT); 1263 #endif 1264 /* Lookup inp, if any. */ 1265 inp = tcp_lro_lookup(lc->ifp, 1266 (le->inner.data.lro_type == LRO_TYPE_NONE) ? &le->outer : &le->inner); 1267 if (inp == NULL) 1268 return (TCP_LRO_CANNOT); 1269 1270 counter_u64_add(tcp_inp_lro_locks_taken, 1); 1271 1272 /* Get TCP control structure. */ 1273 tp = intotcpcb(inp); 1274 1275 /* Check if the inp is dead, Jim. */ 1276 if (tp == NULL || 1277 (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) || 1278 (inp->inp_flags2 & INP_FREED)) { 1279 INP_WUNLOCK(inp); 1280 return (TCP_LRO_CANNOT); 1281 } 1282 if ((inp->inp_irq_cpu_set == 0) && (lc->lro_cpu_is_set == 1)) { 1283 inp->inp_irq_cpu = lc->lro_last_cpu; 1284 inp->inp_irq_cpu_set = 1; 1285 } 1286 /* Check if the transport doesn't support the needed optimizations. */ 1287 if ((inp->inp_flags2 & (INP_SUPPORTS_MBUFQ | INP_MBUF_ACKCMP)) == 0) { 1288 INP_WUNLOCK(inp); 1289 return (TCP_LRO_CANNOT); 1290 } 1291 1292 if (inp->inp_flags2 & INP_MBUF_QUEUE_READY) 1293 should_wake = false; 1294 else 1295 should_wake = true; 1296 /* Check if packets should be tapped to BPF. */ 1297 bpf_req = bpf_peers_present(lc->ifp->if_bpf); 1298 1299 /* Strip and compress all the incoming packets. */ 1300 cmp = NULL; 1301 for (pp = &le->m_head; *pp != NULL; ) { 1302 mv_to = NULL; 1303 if (do_bpf_strip_and_compress(inp, lc, le, pp, 1304 &cmp, &mv_to, &should_wake, bpf_req ) == false) { 1305 /* Advance to next mbuf. */ 1306 pp = &(*pp)->m_nextpkt; 1307 } else if (mv_to != NULL) { 1308 /* We are asked to move pp up */ 1309 pp = &mv_to->m_nextpkt; 1310 } 1311 } 1312 /* Update "m_last_mbuf", if any. */ 1313 if (pp == &le->m_head) 1314 le->m_last_mbuf = *pp; 1315 else 1316 le->m_last_mbuf = __containerof(pp, struct mbuf, m_nextpkt); 1317 1318 /* Check if any data mbufs left. */ 1319 if (le->m_head != NULL) { 1320 counter_u64_add(tcp_inp_lro_direct_queue, 1); 1321 tcp_lro_log(tp, lc, le, NULL, 22, 1, 1322 inp->inp_flags2, inp->inp_in_input, 1); 1323 tcp_queue_pkts(inp, tp, le); 1324 } 1325 if (should_wake) { 1326 /* Wakeup */ 1327 counter_u64_add(tcp_inp_lro_wokeup_queue, 1); 1328 if ((*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0)) 1329 inp = NULL; 1330 } 1331 if (inp != NULL) 1332 INP_WUNLOCK(inp); 1333 return (0); /* Success. */ 1334 } 1335 #endif 1336 1337 void 1338 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le) 1339 { 1340 /* Only optimise if there are multiple packets waiting. */ 1341 #ifdef TCPHPTS 1342 int error; 1343 1344 CURVNET_SET(lc->ifp->if_vnet); 1345 error = tcp_lro_flush_tcphpts(lc, le); 1346 CURVNET_RESTORE(); 1347 if (error != 0) { 1348 #endif 1349 tcp_lro_condense(lc, le); 1350 tcp_flush_out_entry(lc, le); 1351 #ifdef TCPHPTS 1352 } 1353 #endif 1354 lc->lro_flushed++; 1355 bzero(le, sizeof(*le)); 1356 LIST_INSERT_HEAD(&lc->lro_free, le, next); 1357 } 1358 1359 #ifdef HAVE_INLINE_FLSLL 1360 #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1)) 1361 #else 1362 static inline uint64_t 1363 tcp_lro_msb_64(uint64_t x) 1364 { 1365 x |= (x >> 1); 1366 x |= (x >> 2); 1367 x |= (x >> 4); 1368 x |= (x >> 8); 1369 x |= (x >> 16); 1370 x |= (x >> 32); 1371 return (x & ~(x >> 1)); 1372 } 1373 #endif 1374 1375 /* 1376 * The tcp_lro_sort() routine is comparable to qsort(), except it has 1377 * a worst case complexity limit of O(MIN(N,64)*N), where N is the 1378 * number of elements to sort and 64 is the number of sequence bits 1379 * available. The algorithm is bit-slicing the 64-bit sequence number, 1380 * sorting one bit at a time from the most significant bit until the 1381 * least significant one, skipping the constant bits. This is 1382 * typically called a radix sort. 1383 */ 1384 static void 1385 tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size) 1386 { 1387 struct lro_mbuf_sort temp; 1388 uint64_t ones; 1389 uint64_t zeros; 1390 uint32_t x; 1391 uint32_t y; 1392 1393 repeat: 1394 /* for small arrays insertion sort is faster */ 1395 if (size <= 12) { 1396 for (x = 1; x < size; x++) { 1397 temp = parray[x]; 1398 for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--) 1399 parray[y] = parray[y - 1]; 1400 parray[y] = temp; 1401 } 1402 return; 1403 } 1404 1405 /* compute sequence bits which are constant */ 1406 ones = 0; 1407 zeros = 0; 1408 for (x = 0; x != size; x++) { 1409 ones |= parray[x].seq; 1410 zeros |= ~parray[x].seq; 1411 } 1412 1413 /* compute bits which are not constant into "ones" */ 1414 ones &= zeros; 1415 if (ones == 0) 1416 return; 1417 1418 /* pick the most significant bit which is not constant */ 1419 ones = tcp_lro_msb_64(ones); 1420 1421 /* 1422 * Move entries having cleared sequence bits to the beginning 1423 * of the array: 1424 */ 1425 for (x = y = 0; y != size; y++) { 1426 /* skip set bits */ 1427 if (parray[y].seq & ones) 1428 continue; 1429 /* swap entries */ 1430 temp = parray[x]; 1431 parray[x] = parray[y]; 1432 parray[y] = temp; 1433 x++; 1434 } 1435 1436 KASSERT(x != 0 && x != size, ("Memory is corrupted\n")); 1437 1438 /* sort zeros */ 1439 tcp_lro_sort(parray, x); 1440 1441 /* sort ones */ 1442 parray += x; 1443 size -= x; 1444 goto repeat; 1445 } 1446 1447 void 1448 tcp_lro_flush_all(struct lro_ctrl *lc) 1449 { 1450 uint64_t seq; 1451 uint64_t nseq; 1452 unsigned x; 1453 1454 /* check if no mbufs to flush */ 1455 if (lc->lro_mbuf_count == 0) 1456 goto done; 1457 if (lc->lro_cpu_is_set == 0) { 1458 if (lc->lro_last_cpu == curcpu) { 1459 lc->lro_cnt_of_same_cpu++; 1460 /* Have we reached the threshold to declare a cpu? */ 1461 if (lc->lro_cnt_of_same_cpu > tcp_lro_cpu_set_thresh) 1462 lc->lro_cpu_is_set = 1; 1463 } else { 1464 lc->lro_last_cpu = curcpu; 1465 lc->lro_cnt_of_same_cpu = 0; 1466 } 1467 } 1468 CURVNET_SET(lc->ifp->if_vnet); 1469 1470 /* get current time */ 1471 binuptime(&lc->lro_last_queue_time); 1472 1473 /* sort all mbufs according to stream */ 1474 tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count); 1475 1476 /* input data into LRO engine, stream by stream */ 1477 seq = 0; 1478 for (x = 0; x != lc->lro_mbuf_count; x++) { 1479 struct mbuf *mb; 1480 1481 /* get mbuf */ 1482 mb = lc->lro_mbuf_data[x].mb; 1483 1484 /* get sequence number, masking away the packet index */ 1485 nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24); 1486 1487 /* check for new stream */ 1488 if (seq != nseq) { 1489 seq = nseq; 1490 1491 /* flush active streams */ 1492 tcp_lro_rx_done(lc); 1493 } 1494 1495 /* add packet to LRO engine */ 1496 if (tcp_lro_rx_common(lc, mb, 0, false) != 0) { 1497 /* input packet to network layer */ 1498 (*lc->ifp->if_input)(lc->ifp, mb); 1499 lc->lro_queued++; 1500 lc->lro_flushed++; 1501 } 1502 } 1503 CURVNET_RESTORE(); 1504 done: 1505 /* flush active streams */ 1506 tcp_lro_rx_done(lc); 1507 1508 #ifdef TCPHPTS 1509 tcp_run_hpts(); 1510 #endif 1511 lc->lro_mbuf_count = 0; 1512 } 1513 1514 #ifdef TCPHPTS 1515 static void 1516 build_ack_entry(struct tcp_ackent *ae, struct tcphdr *th, struct mbuf *m, 1517 uint32_t *ts_ptr, uint16_t iptos) 1518 { 1519 /* 1520 * Given a TCP ACK, summarize it down into the small TCP ACK 1521 * entry. 1522 */ 1523 ae->timestamp = m->m_pkthdr.rcv_tstmp; 1524 if (m->m_flags & M_TSTMP_LRO) 1525 ae->flags = TSTMP_LRO; 1526 else if (m->m_flags & M_TSTMP) 1527 ae->flags = TSTMP_HDWR; 1528 ae->seq = ntohl(th->th_seq); 1529 ae->ack = ntohl(th->th_ack); 1530 ae->flags |= th->th_flags; 1531 if (ts_ptr != NULL) { 1532 ae->ts_value = ntohl(ts_ptr[1]); 1533 ae->ts_echo = ntohl(ts_ptr[2]); 1534 ae->flags |= HAS_TSTMP; 1535 } 1536 ae->win = ntohs(th->th_win); 1537 ae->codepoint = iptos; 1538 } 1539 1540 /* 1541 * Do BPF tap for either ACK_CMP packets or MBUF QUEUE type packets 1542 * and strip all, but the IPv4/IPv6 header. 1543 */ 1544 static bool 1545 do_bpf_strip_and_compress(struct inpcb *inp, struct lro_ctrl *lc, 1546 struct lro_entry *le, struct mbuf **pp, struct mbuf **cmp, struct mbuf **mv_to, 1547 bool *should_wake, bool bpf_req) 1548 { 1549 union { 1550 void *ptr; 1551 struct ip *ip4; 1552 struct ip6_hdr *ip6; 1553 } l3; 1554 struct mbuf *m; 1555 struct mbuf *nm; 1556 struct tcphdr *th; 1557 struct tcp_ackent *ack_ent; 1558 uint32_t *ts_ptr; 1559 int32_t n_mbuf; 1560 bool other_opts, can_compress; 1561 uint16_t lro_type; 1562 uint16_t iptos; 1563 int tcp_hdr_offset; 1564 int idx; 1565 1566 /* Get current mbuf. */ 1567 m = *pp; 1568 1569 /* Let the BPF see the packet */ 1570 if (__predict_false(bpf_req)) 1571 ETHER_BPF_MTAP(lc->ifp, m); 1572 1573 tcp_hdr_offset = m->m_pkthdr.lro_tcp_h_off; 1574 lro_type = le->inner.data.lro_type; 1575 switch (lro_type) { 1576 case LRO_TYPE_NONE: 1577 lro_type = le->outer.data.lro_type; 1578 switch (lro_type) { 1579 case LRO_TYPE_IPV4_TCP: 1580 tcp_hdr_offset -= sizeof(*le->outer.ip4); 1581 m->m_pkthdr.lro_etype = ETHERTYPE_IP; 1582 break; 1583 case LRO_TYPE_IPV6_TCP: 1584 tcp_hdr_offset -= sizeof(*le->outer.ip6); 1585 m->m_pkthdr.lro_etype = ETHERTYPE_IPV6; 1586 break; 1587 default: 1588 goto compressed; 1589 } 1590 break; 1591 case LRO_TYPE_IPV4_TCP: 1592 tcp_hdr_offset -= sizeof(*le->outer.ip4); 1593 m->m_pkthdr.lro_etype = ETHERTYPE_IP; 1594 break; 1595 case LRO_TYPE_IPV6_TCP: 1596 tcp_hdr_offset -= sizeof(*le->outer.ip6); 1597 m->m_pkthdr.lro_etype = ETHERTYPE_IPV6; 1598 break; 1599 default: 1600 goto compressed; 1601 } 1602 1603 MPASS(tcp_hdr_offset >= 0); 1604 1605 m_adj(m, tcp_hdr_offset); 1606 m->m_flags |= M_LRO_EHDRSTRP; 1607 m->m_flags &= ~M_ACKCMP; 1608 m->m_pkthdr.lro_tcp_h_off -= tcp_hdr_offset; 1609 1610 th = tcp_lro_get_th(m); 1611 1612 th->th_sum = 0; /* TCP checksum is valid. */ 1613 1614 /* Check if ACK can be compressed */ 1615 can_compress = tcp_lro_ack_valid(m, th, &ts_ptr, &other_opts); 1616 1617 /* Now lets look at the should wake states */ 1618 if ((other_opts == true) && 1619 ((inp->inp_flags2 & INP_DONT_SACK_QUEUE) == 0)) { 1620 /* 1621 * If there are other options (SACK?) and the 1622 * tcp endpoint has not expressly told us it does 1623 * not care about SACKS, then we should wake up. 1624 */ 1625 *should_wake = true; 1626 } 1627 /* Is the ack compressable? */ 1628 if (can_compress == false) 1629 goto done; 1630 /* Does the TCP endpoint support ACK compression? */ 1631 if ((inp->inp_flags2 & INP_MBUF_ACKCMP) == 0) 1632 goto done; 1633 1634 /* Lets get the TOS/traffic class field */ 1635 l3.ptr = mtod(m, void *); 1636 switch (lro_type) { 1637 case LRO_TYPE_IPV4_TCP: 1638 iptos = l3.ip4->ip_tos; 1639 break; 1640 case LRO_TYPE_IPV6_TCP: 1641 iptos = IPV6_TRAFFIC_CLASS(l3.ip6); 1642 break; 1643 default: 1644 iptos = 0; /* Keep compiler happy. */ 1645 break; 1646 } 1647 /* Now lets get space if we don't have some already */ 1648 if (*cmp == NULL) { 1649 new_one: 1650 nm = tcp_lro_get_last_if_ackcmp(lc, le, inp, &n_mbuf); 1651 if (__predict_false(nm == NULL)) 1652 goto done; 1653 *cmp = nm; 1654 if (n_mbuf) { 1655 /* 1656 * Link in the new cmp ack to our in-order place, 1657 * first set our cmp ack's next to where we are. 1658 */ 1659 nm->m_nextpkt = m; 1660 (*pp) = nm; 1661 /* 1662 * Set it up so mv_to is advanced to our 1663 * compressed ack. This way the caller can 1664 * advance pp to the right place. 1665 */ 1666 *mv_to = nm; 1667 /* 1668 * Advance it here locally as well. 1669 */ 1670 pp = &nm->m_nextpkt; 1671 } 1672 } else { 1673 /* We have one already we are working on */ 1674 nm = *cmp; 1675 if (M_TRAILINGSPACE(nm) < sizeof(struct tcp_ackent)) { 1676 /* We ran out of space */ 1677 inp->inp_flags2 |= INP_MBUF_L_ACKS; 1678 goto new_one; 1679 } 1680 } 1681 MPASS(M_TRAILINGSPACE(nm) >= sizeof(struct tcp_ackent)); 1682 counter_u64_add(tcp_inp_lro_compressed, 1); 1683 le->compressed++; 1684 /* We can add in to the one on the tail */ 1685 ack_ent = mtod(nm, struct tcp_ackent *); 1686 idx = (nm->m_len / sizeof(struct tcp_ackent)); 1687 build_ack_entry(&ack_ent[idx], th, m, ts_ptr, iptos); 1688 1689 /* Bump the size of both pkt-hdr and len */ 1690 nm->m_len += sizeof(struct tcp_ackent); 1691 nm->m_pkthdr.len += sizeof(struct tcp_ackent); 1692 compressed: 1693 /* Advance to next mbuf before freeing. */ 1694 *pp = m->m_nextpkt; 1695 m->m_nextpkt = NULL; 1696 m_freem(m); 1697 return (true); 1698 done: 1699 counter_u64_add(tcp_uncomp_total, 1); 1700 le->uncompressed++; 1701 return (false); 1702 } 1703 #endif 1704 1705 static struct lro_head * 1706 tcp_lro_rx_get_bucket(struct lro_ctrl *lc, struct mbuf *m, struct lro_parser *parser) 1707 { 1708 u_long hash; 1709 1710 if (M_HASHTYPE_ISHASH(m)) { 1711 hash = m->m_pkthdr.flowid; 1712 } else { 1713 for (unsigned i = hash = 0; i != LRO_RAW_ADDRESS_MAX; i++) 1714 hash += parser->data.raw[i]; 1715 } 1716 return (&lc->lro_hash[hash % lc->lro_hashsz]); 1717 } 1718 1719 static int 1720 tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, bool use_hash) 1721 { 1722 struct lro_parser pi; /* inner address data */ 1723 struct lro_parser po; /* outer address data */ 1724 struct lro_parser *pa; /* current parser for TCP stream */ 1725 struct lro_entry *le; 1726 struct lro_head *bucket; 1727 struct tcphdr *th; 1728 int tcp_data_len; 1729 int tcp_opt_len; 1730 int error; 1731 uint16_t tcp_data_sum; 1732 1733 #ifdef INET 1734 /* Quickly decide if packet cannot be LRO'ed */ 1735 if (__predict_false(V_ipforwarding != 0)) 1736 return (TCP_LRO_CANNOT); 1737 #endif 1738 #ifdef INET6 1739 /* Quickly decide if packet cannot be LRO'ed */ 1740 if (__predict_false(V_ip6_forwarding != 0)) 1741 return (TCP_LRO_CANNOT); 1742 #endif 1743 1744 /* We expect a contiguous header [eh, ip, tcp]. */ 1745 pa = tcp_lro_parser(m, &po, &pi, true); 1746 if (__predict_false(pa == NULL)) 1747 return (TCP_LRO_NOT_SUPPORTED); 1748 1749 /* We don't expect any padding. */ 1750 error = tcp_lro_trim_mbuf_chain(m, pa); 1751 if (__predict_false(error != 0)) 1752 return (error); 1753 1754 #ifdef INET 1755 switch (pa->data.lro_type) { 1756 case LRO_TYPE_IPV4_TCP: 1757 error = tcp_lro_rx_ipv4(lc, m, pa->ip4); 1758 if (__predict_false(error != 0)) 1759 return (error); 1760 break; 1761 default: 1762 break; 1763 } 1764 #endif 1765 /* If no hardware or arrival stamp on the packet add timestamp */ 1766 if ((m->m_flags & (M_TSTMP_LRO | M_TSTMP)) == 0) { 1767 m->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time); 1768 m->m_flags |= M_TSTMP_LRO; 1769 } 1770 1771 /* Get pointer to TCP header. */ 1772 th = pa->tcp; 1773 1774 /* Don't process SYN packets. */ 1775 if (__predict_false(th->th_flags & TH_SYN)) 1776 return (TCP_LRO_CANNOT); 1777 1778 /* Get total TCP header length and compute payload length. */ 1779 tcp_opt_len = (th->th_off << 2); 1780 tcp_data_len = m->m_pkthdr.len - ((uint8_t *)th - 1781 (uint8_t *)m->m_data) - tcp_opt_len; 1782 tcp_opt_len -= sizeof(*th); 1783 1784 /* Don't process invalid TCP headers. */ 1785 if (__predict_false(tcp_opt_len < 0 || tcp_data_len < 0)) 1786 return (TCP_LRO_CANNOT); 1787 1788 /* Compute TCP data only checksum. */ 1789 if (tcp_data_len == 0) 1790 tcp_data_sum = 0; /* no data, no checksum */ 1791 else if (__predict_false(csum != 0)) 1792 tcp_data_sum = tcp_lro_rx_csum_data(pa, ~csum); 1793 else 1794 tcp_data_sum = tcp_lro_rx_csum_data(pa, ~th->th_sum); 1795 1796 /* Save TCP info in mbuf. */ 1797 m->m_nextpkt = NULL; 1798 m->m_pkthdr.rcvif = lc->ifp; 1799 m->m_pkthdr.lro_tcp_d_csum = tcp_data_sum; 1800 m->m_pkthdr.lro_tcp_d_len = tcp_data_len; 1801 m->m_pkthdr.lro_tcp_h_off = ((uint8_t *)th - (uint8_t *)m->m_data); 1802 m->m_pkthdr.lro_nsegs = 1; 1803 1804 /* Get hash bucket. */ 1805 if (!use_hash) { 1806 bucket = &lc->lro_hash[0]; 1807 } else { 1808 bucket = tcp_lro_rx_get_bucket(lc, m, pa); 1809 } 1810 1811 /* Try to find a matching previous segment. */ 1812 LIST_FOREACH(le, bucket, hash_next) { 1813 /* Compare addresses and ports. */ 1814 if (lro_address_compare(&po.data, &le->outer.data) == false || 1815 lro_address_compare(&pi.data, &le->inner.data) == false) 1816 continue; 1817 1818 /* Check if no data and old ACK. */ 1819 if (tcp_data_len == 0 && 1820 SEQ_LT(ntohl(th->th_ack), ntohl(le->ack_seq))) { 1821 m_freem(m); 1822 return (0); 1823 } 1824 1825 /* Mark "m" in the last spot. */ 1826 le->m_last_mbuf->m_nextpkt = m; 1827 /* Now set the tail to "m". */ 1828 le->m_last_mbuf = m; 1829 return (0); 1830 } 1831 1832 /* Try to find an empty slot. */ 1833 if (LIST_EMPTY(&lc->lro_free)) 1834 return (TCP_LRO_NO_ENTRIES); 1835 1836 /* Start a new segment chain. */ 1837 le = LIST_FIRST(&lc->lro_free); 1838 LIST_REMOVE(le, next); 1839 tcp_lro_active_insert(lc, bucket, le); 1840 1841 /* Make sure the headers are set. */ 1842 le->inner = pi; 1843 le->outer = po; 1844 1845 /* Store time this entry was allocated. */ 1846 le->alloc_time = lc->lro_last_queue_time; 1847 1848 tcp_set_entry_to_mbuf(lc, le, m, th); 1849 1850 /* Now set the tail to "m". */ 1851 le->m_last_mbuf = m; 1852 1853 return (0); 1854 } 1855 1856 int 1857 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) 1858 { 1859 int error; 1860 1861 /* get current time */ 1862 binuptime(&lc->lro_last_queue_time); 1863 1864 CURVNET_SET(lc->ifp->if_vnet); 1865 error = tcp_lro_rx_common(lc, m, csum, true); 1866 CURVNET_RESTORE(); 1867 1868 return (error); 1869 } 1870 1871 void 1872 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb) 1873 { 1874 /* sanity checks */ 1875 if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL || 1876 lc->lro_mbuf_max == 0)) { 1877 /* packet drop */ 1878 m_freem(mb); 1879 return; 1880 } 1881 1882 /* check if packet is not LRO capable */ 1883 if (__predict_false(mb->m_pkthdr.csum_flags == 0 || 1884 (lc->ifp->if_capenable & IFCAP_LRO) == 0)) { 1885 /* input packet to network layer */ 1886 (*lc->ifp->if_input) (lc->ifp, mb); 1887 return; 1888 } 1889 1890 /* create sequence number */ 1891 lc->lro_mbuf_data[lc->lro_mbuf_count].seq = 1892 (((uint64_t)M_HASHTYPE_GET(mb)) << 56) | 1893 (((uint64_t)mb->m_pkthdr.flowid) << 24) | 1894 ((uint64_t)lc->lro_mbuf_count); 1895 1896 /* enter mbuf */ 1897 lc->lro_mbuf_data[lc->lro_mbuf_count].mb = mb; 1898 1899 /* flush if array is full */ 1900 if (__predict_false(++lc->lro_mbuf_count == lc->lro_mbuf_max)) 1901 tcp_lro_flush_all(lc); 1902 } 1903 1904 /* end */ 1905