1 /*- 2 * Copyright (c) 2007, Myricom Inc. 3 * Copyright (c) 2008, Intel Corporation. 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * Copyright (c) 2016 Mellanox Technologies. 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Bjoern Zeeb 9 * under sponsorship from the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/socket.h> 45 #include <sys/sysctl.h> 46 47 #include <net/if.h> 48 #include <net/if_var.h> 49 #include <net/ethernet.h> 50 #include <net/vnet.h> 51 52 #include <netinet/in_systm.h> 53 #include <netinet/in.h> 54 #include <netinet/ip6.h> 55 #include <netinet/ip.h> 56 #include <netinet/ip_var.h> 57 #include <netinet/tcp.h> 58 #include <netinet/tcp_lro.h> 59 #include <netinet/tcp_var.h> 60 61 #include <netinet6/ip6_var.h> 62 63 #include <machine/in_cksum.h> 64 65 static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures"); 66 67 #define TCP_LRO_UPDATE_CSUM 1 68 #ifndef TCP_LRO_UPDATE_CSUM 69 #define TCP_LRO_INVALID_CSUM 0x0000 70 #endif 71 72 static void tcp_lro_rx_done(struct lro_ctrl *lc); 73 static int tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, 74 uint32_t csum, int use_hash); 75 76 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 77 "TCP LRO"); 78 79 static unsigned tcp_lro_entries = TCP_LRO_ENTRIES; 80 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries, 81 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0, 82 "default number of LRO entries"); 83 84 static __inline void 85 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket, 86 struct lro_entry *le) 87 { 88 89 LIST_INSERT_HEAD(&lc->lro_active, le, next); 90 LIST_INSERT_HEAD(bucket, le, hash_next); 91 } 92 93 static __inline void 94 tcp_lro_active_remove(struct lro_entry *le) 95 { 96 97 LIST_REMOVE(le, next); /* active list */ 98 LIST_REMOVE(le, hash_next); /* hash bucket */ 99 } 100 101 int 102 tcp_lro_init(struct lro_ctrl *lc) 103 { 104 return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0)); 105 } 106 107 int 108 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp, 109 unsigned lro_entries, unsigned lro_mbufs) 110 { 111 struct lro_entry *le; 112 size_t size; 113 unsigned i, elements; 114 115 lc->lro_bad_csum = 0; 116 lc->lro_queued = 0; 117 lc->lro_flushed = 0; 118 lc->lro_cnt = 0; 119 lc->lro_mbuf_count = 0; 120 lc->lro_mbuf_max = lro_mbufs; 121 lc->lro_cnt = lro_entries; 122 lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX; 123 lc->lro_length_lim = TCP_LRO_LENGTH_MAX; 124 lc->ifp = ifp; 125 LIST_INIT(&lc->lro_free); 126 LIST_INIT(&lc->lro_active); 127 128 /* create hash table to accelerate entry lookup */ 129 if (lro_entries > lro_mbufs) 130 elements = lro_entries; 131 else 132 elements = lro_mbufs; 133 lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz, 134 HASH_NOWAIT); 135 if (lc->lro_hash == NULL) { 136 memset(lc, 0, sizeof(*lc)); 137 return (ENOMEM); 138 } 139 140 /* compute size to allocate */ 141 size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) + 142 (lro_entries * sizeof(*le)); 143 lc->lro_mbuf_data = (struct lro_mbuf_sort *) 144 malloc(size, M_LRO, M_NOWAIT | M_ZERO); 145 146 /* check for out of memory */ 147 if (lc->lro_mbuf_data == NULL) { 148 memset(lc, 0, sizeof(*lc)); 149 return (ENOMEM); 150 } 151 /* compute offset for LRO entries */ 152 le = (struct lro_entry *) 153 (lc->lro_mbuf_data + lro_mbufs); 154 155 /* setup linked list */ 156 for (i = 0; i != lro_entries; i++) 157 LIST_INSERT_HEAD(&lc->lro_free, le + i, next); 158 159 return (0); 160 } 161 162 void 163 tcp_lro_free(struct lro_ctrl *lc) 164 { 165 struct lro_entry *le; 166 unsigned x; 167 168 /* reset LRO free list */ 169 LIST_INIT(&lc->lro_free); 170 171 /* free active mbufs, if any */ 172 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { 173 tcp_lro_active_remove(le); 174 m_freem(le->m_head); 175 } 176 177 /* free hash table */ 178 if (lc->lro_hash != NULL) { 179 free(lc->lro_hash, M_LRO); 180 lc->lro_hash = NULL; 181 } 182 lc->lro_hashsz = 0; 183 184 /* free mbuf array, if any */ 185 for (x = 0; x != lc->lro_mbuf_count; x++) 186 m_freem(lc->lro_mbuf_data[x].mb); 187 lc->lro_mbuf_count = 0; 188 189 /* free allocated memory, if any */ 190 free(lc->lro_mbuf_data, M_LRO); 191 lc->lro_mbuf_data = NULL; 192 } 193 194 #ifdef TCP_LRO_UPDATE_CSUM 195 static uint16_t 196 tcp_lro_csum_th(struct tcphdr *th) 197 { 198 uint32_t ch; 199 uint16_t *p, l; 200 201 ch = th->th_sum = 0x0000; 202 l = th->th_off; 203 p = (uint16_t *)th; 204 while (l > 0) { 205 ch += *p; 206 p++; 207 ch += *p; 208 p++; 209 l--; 210 } 211 while (ch > 0xffff) 212 ch = (ch >> 16) + (ch & 0xffff); 213 214 return (ch & 0xffff); 215 } 216 217 static uint16_t 218 tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th, 219 uint16_t tcp_data_len, uint16_t csum) 220 { 221 uint32_t c; 222 uint16_t cs; 223 224 c = csum; 225 226 /* Remove length from checksum. */ 227 switch (le->eh_type) { 228 #ifdef INET6 229 case ETHERTYPE_IPV6: 230 { 231 struct ip6_hdr *ip6; 232 233 ip6 = (struct ip6_hdr *)l3hdr; 234 if (le->append_cnt == 0) 235 cs = ip6->ip6_plen; 236 else { 237 uint32_t cx; 238 239 cx = ntohs(ip6->ip6_plen); 240 cs = in6_cksum_pseudo(ip6, cx, ip6->ip6_nxt, 0); 241 } 242 break; 243 } 244 #endif 245 #ifdef INET 246 case ETHERTYPE_IP: 247 { 248 struct ip *ip4; 249 250 ip4 = (struct ip *)l3hdr; 251 if (le->append_cnt == 0) 252 cs = ip4->ip_len; 253 else { 254 cs = in_addword(ntohs(ip4->ip_len) - sizeof(*ip4), 255 IPPROTO_TCP); 256 cs = in_pseudo(ip4->ip_src.s_addr, ip4->ip_dst.s_addr, 257 htons(cs)); 258 } 259 break; 260 } 261 #endif 262 default: 263 cs = 0; /* Keep compiler happy. */ 264 } 265 266 cs = ~cs; 267 c += cs; 268 269 /* Remove TCP header csum. */ 270 cs = ~tcp_lro_csum_th(th); 271 c += cs; 272 while (c > 0xffff) 273 c = (c >> 16) + (c & 0xffff); 274 275 return (c & 0xffff); 276 } 277 #endif 278 279 static void 280 tcp_lro_rx_done(struct lro_ctrl *lc) 281 { 282 struct lro_entry *le; 283 284 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { 285 tcp_lro_active_remove(le); 286 tcp_lro_flush(lc, le); 287 } 288 } 289 290 void 291 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout) 292 { 293 struct lro_entry *le, *le_tmp; 294 struct timeval tv; 295 296 if (LIST_EMPTY(&lc->lro_active)) 297 return; 298 299 getmicrotime(&tv); 300 timevalsub(&tv, timeout); 301 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) { 302 if (timevalcmp(&tv, &le->mtime, >=)) { 303 tcp_lro_active_remove(le); 304 tcp_lro_flush(lc, le); 305 } 306 } 307 } 308 309 void 310 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le) 311 { 312 313 if (le->append_cnt > 0) { 314 struct tcphdr *th; 315 uint16_t p_len; 316 317 p_len = htons(le->p_len); 318 switch (le->eh_type) { 319 #ifdef INET6 320 case ETHERTYPE_IPV6: 321 { 322 struct ip6_hdr *ip6; 323 324 ip6 = le->le_ip6; 325 ip6->ip6_plen = p_len; 326 th = (struct tcphdr *)(ip6 + 1); 327 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 328 CSUM_PSEUDO_HDR; 329 le->p_len += ETHER_HDR_LEN + sizeof(*ip6); 330 break; 331 } 332 #endif 333 #ifdef INET 334 case ETHERTYPE_IP: 335 { 336 struct ip *ip4; 337 #ifdef TCP_LRO_UPDATE_CSUM 338 uint32_t cl; 339 uint16_t c; 340 #endif 341 342 ip4 = le->le_ip4; 343 #ifdef TCP_LRO_UPDATE_CSUM 344 /* Fix IP header checksum for new length. */ 345 c = ~ip4->ip_sum; 346 cl = c; 347 c = ~ip4->ip_len; 348 cl += c + p_len; 349 while (cl > 0xffff) 350 cl = (cl >> 16) + (cl & 0xffff); 351 c = cl; 352 ip4->ip_sum = ~c; 353 #else 354 ip4->ip_sum = TCP_LRO_INVALID_CSUM; 355 #endif 356 ip4->ip_len = p_len; 357 th = (struct tcphdr *)(ip4 + 1); 358 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 359 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID; 360 le->p_len += ETHER_HDR_LEN; 361 break; 362 } 363 #endif 364 default: 365 th = NULL; /* Keep compiler happy. */ 366 } 367 le->m_head->m_pkthdr.csum_data = 0xffff; 368 le->m_head->m_pkthdr.len = le->p_len; 369 370 /* Incorporate the latest ACK into the TCP header. */ 371 th->th_ack = le->ack_seq; 372 th->th_win = le->window; 373 /* Incorporate latest timestamp into the TCP header. */ 374 if (le->timestamp != 0) { 375 uint32_t *ts_ptr; 376 377 ts_ptr = (uint32_t *)(th + 1); 378 ts_ptr[1] = htonl(le->tsval); 379 ts_ptr[2] = le->tsecr; 380 } 381 #ifdef TCP_LRO_UPDATE_CSUM 382 /* Update the TCP header checksum. */ 383 le->ulp_csum += p_len; 384 le->ulp_csum += tcp_lro_csum_th(th); 385 while (le->ulp_csum > 0xffff) 386 le->ulp_csum = (le->ulp_csum >> 16) + 387 (le->ulp_csum & 0xffff); 388 th->th_sum = (le->ulp_csum & 0xffff); 389 th->th_sum = ~th->th_sum; 390 #else 391 th->th_sum = TCP_LRO_INVALID_CSUM; 392 #endif 393 } 394 395 le->m_head->m_pkthdr.lro_nsegs = le->append_cnt + 1; 396 (*lc->ifp->if_input)(lc->ifp, le->m_head); 397 lc->lro_queued += le->append_cnt + 1; 398 lc->lro_flushed++; 399 bzero(le, sizeof(*le)); 400 LIST_INSERT_HEAD(&lc->lro_free, le, next); 401 } 402 403 #ifdef HAVE_INLINE_FLSLL 404 #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1)) 405 #else 406 static inline uint64_t 407 tcp_lro_msb_64(uint64_t x) 408 { 409 x |= (x >> 1); 410 x |= (x >> 2); 411 x |= (x >> 4); 412 x |= (x >> 8); 413 x |= (x >> 16); 414 x |= (x >> 32); 415 return (x & ~(x >> 1)); 416 } 417 #endif 418 419 /* 420 * The tcp_lro_sort() routine is comparable to qsort(), except it has 421 * a worst case complexity limit of O(MIN(N,64)*N), where N is the 422 * number of elements to sort and 64 is the number of sequence bits 423 * available. The algorithm is bit-slicing the 64-bit sequence number, 424 * sorting one bit at a time from the most significant bit until the 425 * least significant one, skipping the constant bits. This is 426 * typically called a radix sort. 427 */ 428 static void 429 tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size) 430 { 431 struct lro_mbuf_sort temp; 432 uint64_t ones; 433 uint64_t zeros; 434 uint32_t x; 435 uint32_t y; 436 437 repeat: 438 /* for small arrays insertion sort is faster */ 439 if (size <= 12) { 440 for (x = 1; x < size; x++) { 441 temp = parray[x]; 442 for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--) 443 parray[y] = parray[y - 1]; 444 parray[y] = temp; 445 } 446 return; 447 } 448 449 /* compute sequence bits which are constant */ 450 ones = 0; 451 zeros = 0; 452 for (x = 0; x != size; x++) { 453 ones |= parray[x].seq; 454 zeros |= ~parray[x].seq; 455 } 456 457 /* compute bits which are not constant into "ones" */ 458 ones &= zeros; 459 if (ones == 0) 460 return; 461 462 /* pick the most significant bit which is not constant */ 463 ones = tcp_lro_msb_64(ones); 464 465 /* 466 * Move entries having cleared sequence bits to the beginning 467 * of the array: 468 */ 469 for (x = y = 0; y != size; y++) { 470 /* skip set bits */ 471 if (parray[y].seq & ones) 472 continue; 473 /* swap entries */ 474 temp = parray[x]; 475 parray[x] = parray[y]; 476 parray[y] = temp; 477 x++; 478 } 479 480 KASSERT(x != 0 && x != size, ("Memory is corrupted\n")); 481 482 /* sort zeros */ 483 tcp_lro_sort(parray, x); 484 485 /* sort ones */ 486 parray += x; 487 size -= x; 488 goto repeat; 489 } 490 491 void 492 tcp_lro_flush_all(struct lro_ctrl *lc) 493 { 494 uint64_t seq; 495 uint64_t nseq; 496 unsigned x; 497 498 /* check if no mbufs to flush */ 499 if (lc->lro_mbuf_count == 0) 500 goto done; 501 502 /* sort all mbufs according to stream */ 503 tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count); 504 505 /* input data into LRO engine, stream by stream */ 506 seq = 0; 507 for (x = 0; x != lc->lro_mbuf_count; x++) { 508 struct mbuf *mb; 509 510 /* get mbuf */ 511 mb = lc->lro_mbuf_data[x].mb; 512 513 /* get sequence number, masking away the packet index */ 514 nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24); 515 516 /* check for new stream */ 517 if (seq != nseq) { 518 seq = nseq; 519 520 /* flush active streams */ 521 tcp_lro_rx_done(lc); 522 } 523 524 /* add packet to LRO engine */ 525 if (tcp_lro_rx2(lc, mb, 0, 0) != 0) { 526 /* input packet to network layer */ 527 (*lc->ifp->if_input)(lc->ifp, mb); 528 lc->lro_queued++; 529 lc->lro_flushed++; 530 } 531 } 532 done: 533 /* flush active streams */ 534 tcp_lro_rx_done(lc); 535 536 lc->lro_mbuf_count = 0; 537 } 538 539 #ifdef INET6 540 static int 541 tcp_lro_rx_ipv6(struct lro_ctrl *lc, struct mbuf *m, struct ip6_hdr *ip6, 542 struct tcphdr **th) 543 { 544 545 /* XXX-BZ we should check the flow-label. */ 546 547 /* XXX-BZ We do not yet support ext. hdrs. */ 548 if (ip6->ip6_nxt != IPPROTO_TCP) 549 return (TCP_LRO_NOT_SUPPORTED); 550 551 /* Find the TCP header. */ 552 *th = (struct tcphdr *)(ip6 + 1); 553 554 return (0); 555 } 556 #endif 557 558 #ifdef INET 559 static int 560 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4, 561 struct tcphdr **th) 562 { 563 int csum_flags; 564 uint16_t csum; 565 566 if (ip4->ip_p != IPPROTO_TCP) 567 return (TCP_LRO_NOT_SUPPORTED); 568 569 /* Ensure there are no options. */ 570 if ((ip4->ip_hl << 2) != sizeof (*ip4)) 571 return (TCP_LRO_CANNOT); 572 573 /* .. and the packet is not fragmented. */ 574 if (ip4->ip_off & htons(IP_MF|IP_OFFMASK)) 575 return (TCP_LRO_CANNOT); 576 577 /* Legacy IP has a header checksum that needs to be correct. */ 578 csum_flags = m->m_pkthdr.csum_flags; 579 if (csum_flags & CSUM_IP_CHECKED) { 580 if (__predict_false((csum_flags & CSUM_IP_VALID) == 0)) { 581 lc->lro_bad_csum++; 582 return (TCP_LRO_CANNOT); 583 } 584 } else { 585 csum = in_cksum_hdr(ip4); 586 if (__predict_false((csum) != 0)) { 587 lc->lro_bad_csum++; 588 return (TCP_LRO_CANNOT); 589 } 590 } 591 592 /* Find the TCP header (we assured there are no IP options). */ 593 *th = (struct tcphdr *)(ip4 + 1); 594 595 return (0); 596 } 597 #endif 598 599 static int 600 tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash) 601 { 602 struct lro_entry *le; 603 struct ether_header *eh; 604 #ifdef INET6 605 struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */ 606 #endif 607 #ifdef INET 608 struct ip *ip4 = NULL; /* Keep compiler happy. */ 609 #endif 610 struct tcphdr *th; 611 void *l3hdr = NULL; /* Keep compiler happy. */ 612 uint32_t *ts_ptr; 613 tcp_seq seq; 614 int error, ip_len, l; 615 uint16_t eh_type, tcp_data_len; 616 struct lro_head *bucket; 617 int force_flush = 0; 618 619 /* We expect a contiguous header [eh, ip, tcp]. */ 620 621 eh = mtod(m, struct ether_header *); 622 eh_type = ntohs(eh->ether_type); 623 switch (eh_type) { 624 #ifdef INET6 625 case ETHERTYPE_IPV6: 626 { 627 CURVNET_SET(lc->ifp->if_vnet); 628 if (V_ip6_forwarding != 0) { 629 /* XXX-BZ stats but changing lro_ctrl is a problem. */ 630 CURVNET_RESTORE(); 631 return (TCP_LRO_CANNOT); 632 } 633 CURVNET_RESTORE(); 634 l3hdr = ip6 = (struct ip6_hdr *)(eh + 1); 635 error = tcp_lro_rx_ipv6(lc, m, ip6, &th); 636 if (error != 0) 637 return (error); 638 tcp_data_len = ntohs(ip6->ip6_plen); 639 ip_len = sizeof(*ip6) + tcp_data_len; 640 break; 641 } 642 #endif 643 #ifdef INET 644 case ETHERTYPE_IP: 645 { 646 CURVNET_SET(lc->ifp->if_vnet); 647 if (V_ipforwarding != 0) { 648 /* XXX-BZ stats but changing lro_ctrl is a problem. */ 649 CURVNET_RESTORE(); 650 return (TCP_LRO_CANNOT); 651 } 652 CURVNET_RESTORE(); 653 l3hdr = ip4 = (struct ip *)(eh + 1); 654 error = tcp_lro_rx_ipv4(lc, m, ip4, &th); 655 if (error != 0) 656 return (error); 657 ip_len = ntohs(ip4->ip_len); 658 tcp_data_len = ip_len - sizeof(*ip4); 659 break; 660 } 661 #endif 662 /* XXX-BZ what happens in case of VLAN(s)? */ 663 default: 664 return (TCP_LRO_NOT_SUPPORTED); 665 } 666 667 /* 668 * If the frame is padded beyond the end of the IP packet, then we must 669 * trim the extra bytes off. 670 */ 671 l = m->m_pkthdr.len - (ETHER_HDR_LEN + ip_len); 672 if (l != 0) { 673 if (l < 0) 674 /* Truncated packet. */ 675 return (TCP_LRO_CANNOT); 676 677 m_adj(m, -l); 678 } 679 680 /* 681 * Check TCP header constraints. 682 */ 683 /* Ensure no bits set besides ACK or PSH. */ 684 if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) { 685 if (th->th_flags & TH_SYN) 686 return (TCP_LRO_CANNOT); 687 /* 688 * Make sure that previously seen segements/ACKs are delivered 689 * before this segement, e.g. FIN. 690 */ 691 force_flush = 1; 692 } 693 694 /* XXX-BZ We lose a ACK|PUSH flag concatenating multiple segments. */ 695 /* XXX-BZ Ideally we'd flush on PUSH? */ 696 697 /* 698 * Check for timestamps. 699 * Since the only option we handle are timestamps, we only have to 700 * handle the simple case of aligned timestamps. 701 */ 702 l = (th->th_off << 2); 703 tcp_data_len -= l; 704 l -= sizeof(*th); 705 ts_ptr = (uint32_t *)(th + 1); 706 if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) || 707 (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16| 708 TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) { 709 /* 710 * Make sure that previously seen segements/ACKs are delivered 711 * before this segement. 712 */ 713 force_flush = 1; 714 } 715 716 /* If the driver did not pass in the checksum, set it now. */ 717 if (csum == 0x0000) 718 csum = th->th_sum; 719 720 seq = ntohl(th->th_seq); 721 722 if (!use_hash) { 723 bucket = &lc->lro_hash[0]; 724 } else if (M_HASHTYPE_ISHASH(m)) { 725 bucket = &lc->lro_hash[m->m_pkthdr.flowid % lc->lro_hashsz]; 726 } else { 727 uint32_t hash; 728 729 switch (eh_type) { 730 #ifdef INET 731 case ETHERTYPE_IP: 732 hash = ip4->ip_src.s_addr + ip4->ip_dst.s_addr; 733 break; 734 #endif 735 #ifdef INET6 736 case ETHERTYPE_IPV6: 737 hash = ip6->ip6_src.s6_addr32[0] + 738 ip6->ip6_dst.s6_addr32[0]; 739 hash += ip6->ip6_src.s6_addr32[1] + 740 ip6->ip6_dst.s6_addr32[1]; 741 hash += ip6->ip6_src.s6_addr32[2] + 742 ip6->ip6_dst.s6_addr32[2]; 743 hash += ip6->ip6_src.s6_addr32[3] + 744 ip6->ip6_dst.s6_addr32[3]; 745 break; 746 #endif 747 default: 748 hash = 0; 749 break; 750 } 751 hash += th->th_sport + th->th_dport; 752 bucket = &lc->lro_hash[hash % lc->lro_hashsz]; 753 } 754 755 /* Try to find a matching previous segment. */ 756 LIST_FOREACH(le, bucket, hash_next) { 757 if (le->eh_type != eh_type) 758 continue; 759 if (le->source_port != th->th_sport || 760 le->dest_port != th->th_dport) 761 continue; 762 switch (eh_type) { 763 #ifdef INET6 764 case ETHERTYPE_IPV6: 765 if (bcmp(&le->source_ip6, &ip6->ip6_src, 766 sizeof(struct in6_addr)) != 0 || 767 bcmp(&le->dest_ip6, &ip6->ip6_dst, 768 sizeof(struct in6_addr)) != 0) 769 continue; 770 break; 771 #endif 772 #ifdef INET 773 case ETHERTYPE_IP: 774 if (le->source_ip4 != ip4->ip_src.s_addr || 775 le->dest_ip4 != ip4->ip_dst.s_addr) 776 continue; 777 break; 778 #endif 779 } 780 781 if (force_flush) { 782 /* Timestamps mismatch; this is a FIN, etc */ 783 tcp_lro_active_remove(le); 784 tcp_lro_flush(lc, le); 785 return (TCP_LRO_CANNOT); 786 } 787 788 /* Flush now if appending will result in overflow. */ 789 if (le->p_len > (lc->lro_length_lim - tcp_data_len)) { 790 tcp_lro_active_remove(le); 791 tcp_lro_flush(lc, le); 792 break; 793 } 794 795 /* Try to append the new segment. */ 796 if (__predict_false(seq != le->next_seq || 797 (tcp_data_len == 0 && le->ack_seq == th->th_ack))) { 798 /* Out of order packet or duplicate ACK. */ 799 tcp_lro_active_remove(le); 800 tcp_lro_flush(lc, le); 801 return (TCP_LRO_CANNOT); 802 } 803 804 if (l != 0) { 805 uint32_t tsval = ntohl(*(ts_ptr + 1)); 806 /* Make sure timestamp values are increasing. */ 807 /* XXX-BZ flip and use TSTMP_GEQ macro for this? */ 808 if (__predict_false(le->tsval > tsval || 809 *(ts_ptr + 2) == 0)) 810 return (TCP_LRO_CANNOT); 811 le->tsval = tsval; 812 le->tsecr = *(ts_ptr + 2); 813 } 814 815 le->next_seq += tcp_data_len; 816 le->ack_seq = th->th_ack; 817 le->window = th->th_win; 818 le->append_cnt++; 819 820 #ifdef TCP_LRO_UPDATE_CSUM 821 le->ulp_csum += tcp_lro_rx_csum_fixup(le, l3hdr, th, 822 tcp_data_len, ~csum); 823 #endif 824 825 if (tcp_data_len == 0) { 826 m_freem(m); 827 /* 828 * Flush this LRO entry, if this ACK should not 829 * be further delayed. 830 */ 831 if (le->append_cnt >= lc->lro_ackcnt_lim) { 832 tcp_lro_active_remove(le); 833 tcp_lro_flush(lc, le); 834 } 835 return (0); 836 } 837 838 le->p_len += tcp_data_len; 839 840 /* 841 * Adjust the mbuf so that m_data points to the first byte of 842 * the ULP payload. Adjust the mbuf to avoid complications and 843 * append new segment to existing mbuf chain. 844 */ 845 m_adj(m, m->m_pkthdr.len - tcp_data_len); 846 m_demote_pkthdr(m); 847 848 le->m_tail->m_next = m; 849 le->m_tail = m_last(m); 850 851 /* 852 * If a possible next full length packet would cause an 853 * overflow, pro-actively flush now. 854 */ 855 if (le->p_len > (lc->lro_length_lim - lc->ifp->if_mtu)) { 856 tcp_lro_active_remove(le); 857 tcp_lro_flush(lc, le); 858 } else 859 getmicrotime(&le->mtime); 860 861 return (0); 862 } 863 864 if (force_flush) { 865 /* 866 * Nothing to flush, but this segment can not be further 867 * aggregated/delayed. 868 */ 869 return (TCP_LRO_CANNOT); 870 } 871 872 /* Try to find an empty slot. */ 873 if (LIST_EMPTY(&lc->lro_free)) 874 return (TCP_LRO_NO_ENTRIES); 875 876 /* Start a new segment chain. */ 877 le = LIST_FIRST(&lc->lro_free); 878 LIST_REMOVE(le, next); 879 tcp_lro_active_insert(lc, bucket, le); 880 getmicrotime(&le->mtime); 881 882 /* Start filling in details. */ 883 switch (eh_type) { 884 #ifdef INET6 885 case ETHERTYPE_IPV6: 886 le->le_ip6 = ip6; 887 le->source_ip6 = ip6->ip6_src; 888 le->dest_ip6 = ip6->ip6_dst; 889 le->eh_type = eh_type; 890 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN - sizeof(*ip6); 891 break; 892 #endif 893 #ifdef INET 894 case ETHERTYPE_IP: 895 le->le_ip4 = ip4; 896 le->source_ip4 = ip4->ip_src.s_addr; 897 le->dest_ip4 = ip4->ip_dst.s_addr; 898 le->eh_type = eh_type; 899 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN; 900 break; 901 #endif 902 } 903 le->source_port = th->th_sport; 904 le->dest_port = th->th_dport; 905 906 le->next_seq = seq + tcp_data_len; 907 le->ack_seq = th->th_ack; 908 le->window = th->th_win; 909 if (l != 0) { 910 le->timestamp = 1; 911 le->tsval = ntohl(*(ts_ptr + 1)); 912 le->tsecr = *(ts_ptr + 2); 913 } 914 915 #ifdef TCP_LRO_UPDATE_CSUM 916 /* 917 * Do not touch the csum of the first packet. However save the 918 * "adjusted" checksum of just the source and destination addresses, 919 * the next header and the TCP payload. The length and TCP header 920 * parts may change, so we remove those from the saved checksum and 921 * re-add with final values on tcp_lro_flush() if needed. 922 */ 923 KASSERT(le->ulp_csum == 0, ("%s: le=%p le->ulp_csum=0x%04x\n", 924 __func__, le, le->ulp_csum)); 925 926 le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len, 927 ~csum); 928 th->th_sum = csum; /* Restore checksum on first packet. */ 929 #endif 930 931 le->m_head = m; 932 le->m_tail = m_last(m); 933 934 return (0); 935 } 936 937 int 938 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) 939 { 940 941 return tcp_lro_rx2(lc, m, csum, 1); 942 } 943 944 void 945 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb) 946 { 947 /* sanity checks */ 948 if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL || 949 lc->lro_mbuf_max == 0)) { 950 /* packet drop */ 951 m_freem(mb); 952 return; 953 } 954 955 /* check if packet is not LRO capable */ 956 if (__predict_false(mb->m_pkthdr.csum_flags == 0 || 957 (lc->ifp->if_capenable & IFCAP_LRO) == 0)) { 958 lc->lro_flushed++; 959 lc->lro_queued++; 960 961 /* input packet to network layer */ 962 (*lc->ifp->if_input) (lc->ifp, mb); 963 return; 964 } 965 966 /* check if array is full */ 967 if (__predict_false(lc->lro_mbuf_count == lc->lro_mbuf_max)) 968 tcp_lro_flush_all(lc); 969 970 /* create sequence number */ 971 lc->lro_mbuf_data[lc->lro_mbuf_count].seq = 972 (((uint64_t)M_HASHTYPE_GET(mb)) << 56) | 973 (((uint64_t)mb->m_pkthdr.flowid) << 24) | 974 ((uint64_t)lc->lro_mbuf_count); 975 976 /* enter mbuf */ 977 lc->lro_mbuf_data[lc->lro_mbuf_count++].mb = mb; 978 } 979 980 /* end */ 981