1 /*- 2 * Copyright (c) 2007, Myricom Inc. 3 * Copyright (c) 2008, Intel Corporation. 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * Copyright (c) 2016 Mellanox Technologies. 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Bjoern Zeeb 9 * under sponsorship from the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/socket.h> 45 #include <sys/sysctl.h> 46 47 #include <net/if.h> 48 #include <net/if_var.h> 49 #include <net/ethernet.h> 50 #include <net/vnet.h> 51 52 #include <netinet/in_systm.h> 53 #include <netinet/in.h> 54 #include <netinet/ip6.h> 55 #include <netinet/ip.h> 56 #include <netinet/ip_var.h> 57 #include <netinet/tcp.h> 58 #include <netinet/tcp_lro.h> 59 #include <netinet/tcp_var.h> 60 61 #include <netinet6/ip6_var.h> 62 63 #include <machine/in_cksum.h> 64 65 static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures"); 66 67 #define TCP_LRO_UPDATE_CSUM 1 68 #ifndef TCP_LRO_UPDATE_CSUM 69 #define TCP_LRO_INVALID_CSUM 0x0000 70 #endif 71 72 static void tcp_lro_rx_done(struct lro_ctrl *lc); 73 static int tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, 74 uint32_t csum, int use_hash); 75 76 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 77 "TCP LRO"); 78 79 static unsigned tcp_lro_entries = TCP_LRO_ENTRIES; 80 SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries, 81 CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0, 82 "default number of LRO entries"); 83 84 static __inline void 85 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket, 86 struct lro_entry *le) 87 { 88 89 LIST_INSERT_HEAD(&lc->lro_active, le, next); 90 LIST_INSERT_HEAD(bucket, le, hash_next); 91 } 92 93 static __inline void 94 tcp_lro_active_remove(struct lro_entry *le) 95 { 96 97 LIST_REMOVE(le, next); /* active list */ 98 LIST_REMOVE(le, hash_next); /* hash bucket */ 99 } 100 101 int 102 tcp_lro_init(struct lro_ctrl *lc) 103 { 104 return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0)); 105 } 106 107 int 108 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp, 109 unsigned lro_entries, unsigned lro_mbufs) 110 { 111 struct lro_entry *le; 112 size_t size; 113 unsigned i, elements; 114 115 lc->lro_bad_csum = 0; 116 lc->lro_queued = 0; 117 lc->lro_flushed = 0; 118 lc->lro_mbuf_count = 0; 119 lc->lro_mbuf_max = lro_mbufs; 120 lc->lro_cnt = lro_entries; 121 lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX; 122 lc->lro_length_lim = TCP_LRO_LENGTH_MAX; 123 lc->ifp = ifp; 124 LIST_INIT(&lc->lro_free); 125 LIST_INIT(&lc->lro_active); 126 127 /* create hash table to accelerate entry lookup */ 128 if (lro_entries > lro_mbufs) 129 elements = lro_entries; 130 else 131 elements = lro_mbufs; 132 lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz, 133 HASH_NOWAIT); 134 if (lc->lro_hash == NULL) { 135 memset(lc, 0, sizeof(*lc)); 136 return (ENOMEM); 137 } 138 139 /* compute size to allocate */ 140 size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) + 141 (lro_entries * sizeof(*le)); 142 lc->lro_mbuf_data = (struct lro_mbuf_sort *) 143 malloc(size, M_LRO, M_NOWAIT | M_ZERO); 144 145 /* check for out of memory */ 146 if (lc->lro_mbuf_data == NULL) { 147 free(lc->lro_hash, M_LRO); 148 memset(lc, 0, sizeof(*lc)); 149 return (ENOMEM); 150 } 151 /* compute offset for LRO entries */ 152 le = (struct lro_entry *) 153 (lc->lro_mbuf_data + lro_mbufs); 154 155 /* setup linked list */ 156 for (i = 0; i != lro_entries; i++) 157 LIST_INSERT_HEAD(&lc->lro_free, le + i, next); 158 159 return (0); 160 } 161 162 void 163 tcp_lro_free(struct lro_ctrl *lc) 164 { 165 struct lro_entry *le; 166 unsigned x; 167 168 /* reset LRO free list */ 169 LIST_INIT(&lc->lro_free); 170 171 /* free active mbufs, if any */ 172 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { 173 tcp_lro_active_remove(le); 174 m_freem(le->m_head); 175 } 176 177 /* free hash table */ 178 free(lc->lro_hash, M_LRO); 179 lc->lro_hash = NULL; 180 lc->lro_hashsz = 0; 181 182 /* free mbuf array, if any */ 183 for (x = 0; x != lc->lro_mbuf_count; x++) 184 m_freem(lc->lro_mbuf_data[x].mb); 185 lc->lro_mbuf_count = 0; 186 187 /* free allocated memory, if any */ 188 free(lc->lro_mbuf_data, M_LRO); 189 lc->lro_mbuf_data = NULL; 190 } 191 192 #ifdef TCP_LRO_UPDATE_CSUM 193 static uint16_t 194 tcp_lro_csum_th(struct tcphdr *th) 195 { 196 uint32_t ch; 197 uint16_t *p, l; 198 199 ch = th->th_sum = 0x0000; 200 l = th->th_off; 201 p = (uint16_t *)th; 202 while (l > 0) { 203 ch += *p; 204 p++; 205 ch += *p; 206 p++; 207 l--; 208 } 209 while (ch > 0xffff) 210 ch = (ch >> 16) + (ch & 0xffff); 211 212 return (ch & 0xffff); 213 } 214 215 static uint16_t 216 tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th, 217 uint16_t tcp_data_len, uint16_t csum) 218 { 219 uint32_t c; 220 uint16_t cs; 221 222 c = csum; 223 224 /* Remove length from checksum. */ 225 switch (le->eh_type) { 226 #ifdef INET6 227 case ETHERTYPE_IPV6: 228 { 229 struct ip6_hdr *ip6; 230 231 ip6 = (struct ip6_hdr *)l3hdr; 232 if (le->append_cnt == 0) 233 cs = ip6->ip6_plen; 234 else { 235 uint32_t cx; 236 237 cx = ntohs(ip6->ip6_plen); 238 cs = in6_cksum_pseudo(ip6, cx, ip6->ip6_nxt, 0); 239 } 240 break; 241 } 242 #endif 243 #ifdef INET 244 case ETHERTYPE_IP: 245 { 246 struct ip *ip4; 247 248 ip4 = (struct ip *)l3hdr; 249 if (le->append_cnt == 0) 250 cs = ip4->ip_len; 251 else { 252 cs = in_addword(ntohs(ip4->ip_len) - sizeof(*ip4), 253 IPPROTO_TCP); 254 cs = in_pseudo(ip4->ip_src.s_addr, ip4->ip_dst.s_addr, 255 htons(cs)); 256 } 257 break; 258 } 259 #endif 260 default: 261 cs = 0; /* Keep compiler happy. */ 262 } 263 264 cs = ~cs; 265 c += cs; 266 267 /* Remove TCP header csum. */ 268 cs = ~tcp_lro_csum_th(th); 269 c += cs; 270 while (c > 0xffff) 271 c = (c >> 16) + (c & 0xffff); 272 273 return (c & 0xffff); 274 } 275 #endif 276 277 static void 278 tcp_lro_rx_done(struct lro_ctrl *lc) 279 { 280 struct lro_entry *le; 281 282 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { 283 tcp_lro_active_remove(le); 284 tcp_lro_flush(lc, le); 285 } 286 } 287 288 void 289 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout) 290 { 291 struct lro_entry *le, *le_tmp; 292 struct timeval tv; 293 294 if (LIST_EMPTY(&lc->lro_active)) 295 return; 296 297 getmicrotime(&tv); 298 timevalsub(&tv, timeout); 299 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) { 300 if (timevalcmp(&tv, &le->mtime, >=)) { 301 tcp_lro_active_remove(le); 302 tcp_lro_flush(lc, le); 303 } 304 } 305 } 306 307 void 308 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le) 309 { 310 311 if (le->append_cnt > 0) { 312 struct tcphdr *th; 313 uint16_t p_len; 314 315 p_len = htons(le->p_len); 316 switch (le->eh_type) { 317 #ifdef INET6 318 case ETHERTYPE_IPV6: 319 { 320 struct ip6_hdr *ip6; 321 322 ip6 = le->le_ip6; 323 ip6->ip6_plen = p_len; 324 th = (struct tcphdr *)(ip6 + 1); 325 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 326 CSUM_PSEUDO_HDR; 327 le->p_len += ETHER_HDR_LEN + sizeof(*ip6); 328 break; 329 } 330 #endif 331 #ifdef INET 332 case ETHERTYPE_IP: 333 { 334 struct ip *ip4; 335 #ifdef TCP_LRO_UPDATE_CSUM 336 uint32_t cl; 337 uint16_t c; 338 #endif 339 340 ip4 = le->le_ip4; 341 #ifdef TCP_LRO_UPDATE_CSUM 342 /* Fix IP header checksum for new length. */ 343 c = ~ip4->ip_sum; 344 cl = c; 345 c = ~ip4->ip_len; 346 cl += c + p_len; 347 while (cl > 0xffff) 348 cl = (cl >> 16) + (cl & 0xffff); 349 c = cl; 350 ip4->ip_sum = ~c; 351 #else 352 ip4->ip_sum = TCP_LRO_INVALID_CSUM; 353 #endif 354 ip4->ip_len = p_len; 355 th = (struct tcphdr *)(ip4 + 1); 356 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 357 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID; 358 le->p_len += ETHER_HDR_LEN; 359 break; 360 } 361 #endif 362 default: 363 th = NULL; /* Keep compiler happy. */ 364 } 365 le->m_head->m_pkthdr.csum_data = 0xffff; 366 le->m_head->m_pkthdr.len = le->p_len; 367 368 /* Incorporate the latest ACK into the TCP header. */ 369 th->th_ack = le->ack_seq; 370 th->th_win = le->window; 371 /* Incorporate latest timestamp into the TCP header. */ 372 if (le->timestamp != 0) { 373 uint32_t *ts_ptr; 374 375 ts_ptr = (uint32_t *)(th + 1); 376 ts_ptr[1] = htonl(le->tsval); 377 ts_ptr[2] = le->tsecr; 378 } 379 #ifdef TCP_LRO_UPDATE_CSUM 380 /* Update the TCP header checksum. */ 381 le->ulp_csum += p_len; 382 le->ulp_csum += tcp_lro_csum_th(th); 383 while (le->ulp_csum > 0xffff) 384 le->ulp_csum = (le->ulp_csum >> 16) + 385 (le->ulp_csum & 0xffff); 386 th->th_sum = (le->ulp_csum & 0xffff); 387 th->th_sum = ~th->th_sum; 388 #else 389 th->th_sum = TCP_LRO_INVALID_CSUM; 390 #endif 391 } 392 393 le->m_head->m_pkthdr.lro_nsegs = le->append_cnt + 1; 394 (*lc->ifp->if_input)(lc->ifp, le->m_head); 395 lc->lro_queued += le->append_cnt + 1; 396 lc->lro_flushed++; 397 bzero(le, sizeof(*le)); 398 LIST_INSERT_HEAD(&lc->lro_free, le, next); 399 } 400 401 #ifdef HAVE_INLINE_FLSLL 402 #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1)) 403 #else 404 static inline uint64_t 405 tcp_lro_msb_64(uint64_t x) 406 { 407 x |= (x >> 1); 408 x |= (x >> 2); 409 x |= (x >> 4); 410 x |= (x >> 8); 411 x |= (x >> 16); 412 x |= (x >> 32); 413 return (x & ~(x >> 1)); 414 } 415 #endif 416 417 /* 418 * The tcp_lro_sort() routine is comparable to qsort(), except it has 419 * a worst case complexity limit of O(MIN(N,64)*N), where N is the 420 * number of elements to sort and 64 is the number of sequence bits 421 * available. The algorithm is bit-slicing the 64-bit sequence number, 422 * sorting one bit at a time from the most significant bit until the 423 * least significant one, skipping the constant bits. This is 424 * typically called a radix sort. 425 */ 426 static void 427 tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size) 428 { 429 struct lro_mbuf_sort temp; 430 uint64_t ones; 431 uint64_t zeros; 432 uint32_t x; 433 uint32_t y; 434 435 repeat: 436 /* for small arrays insertion sort is faster */ 437 if (size <= 12) { 438 for (x = 1; x < size; x++) { 439 temp = parray[x]; 440 for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--) 441 parray[y] = parray[y - 1]; 442 parray[y] = temp; 443 } 444 return; 445 } 446 447 /* compute sequence bits which are constant */ 448 ones = 0; 449 zeros = 0; 450 for (x = 0; x != size; x++) { 451 ones |= parray[x].seq; 452 zeros |= ~parray[x].seq; 453 } 454 455 /* compute bits which are not constant into "ones" */ 456 ones &= zeros; 457 if (ones == 0) 458 return; 459 460 /* pick the most significant bit which is not constant */ 461 ones = tcp_lro_msb_64(ones); 462 463 /* 464 * Move entries having cleared sequence bits to the beginning 465 * of the array: 466 */ 467 for (x = y = 0; y != size; y++) { 468 /* skip set bits */ 469 if (parray[y].seq & ones) 470 continue; 471 /* swap entries */ 472 temp = parray[x]; 473 parray[x] = parray[y]; 474 parray[y] = temp; 475 x++; 476 } 477 478 KASSERT(x != 0 && x != size, ("Memory is corrupted\n")); 479 480 /* sort zeros */ 481 tcp_lro_sort(parray, x); 482 483 /* sort ones */ 484 parray += x; 485 size -= x; 486 goto repeat; 487 } 488 489 void 490 tcp_lro_flush_all(struct lro_ctrl *lc) 491 { 492 uint64_t seq; 493 uint64_t nseq; 494 unsigned x; 495 496 /* check if no mbufs to flush */ 497 if (lc->lro_mbuf_count == 0) 498 goto done; 499 500 /* sort all mbufs according to stream */ 501 tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count); 502 503 /* input data into LRO engine, stream by stream */ 504 seq = 0; 505 for (x = 0; x != lc->lro_mbuf_count; x++) { 506 struct mbuf *mb; 507 508 /* get mbuf */ 509 mb = lc->lro_mbuf_data[x].mb; 510 511 /* get sequence number, masking away the packet index */ 512 nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24); 513 514 /* check for new stream */ 515 if (seq != nseq) { 516 seq = nseq; 517 518 /* flush active streams */ 519 tcp_lro_rx_done(lc); 520 } 521 522 /* add packet to LRO engine */ 523 if (tcp_lro_rx2(lc, mb, 0, 0) != 0) { 524 /* input packet to network layer */ 525 (*lc->ifp->if_input)(lc->ifp, mb); 526 lc->lro_queued++; 527 lc->lro_flushed++; 528 } 529 } 530 done: 531 /* flush active streams */ 532 tcp_lro_rx_done(lc); 533 534 lc->lro_mbuf_count = 0; 535 } 536 537 #ifdef INET6 538 static int 539 tcp_lro_rx_ipv6(struct lro_ctrl *lc, struct mbuf *m, struct ip6_hdr *ip6, 540 struct tcphdr **th) 541 { 542 543 /* XXX-BZ we should check the flow-label. */ 544 545 /* XXX-BZ We do not yet support ext. hdrs. */ 546 if (ip6->ip6_nxt != IPPROTO_TCP) 547 return (TCP_LRO_NOT_SUPPORTED); 548 549 /* Find the TCP header. */ 550 *th = (struct tcphdr *)(ip6 + 1); 551 552 return (0); 553 } 554 #endif 555 556 #ifdef INET 557 static int 558 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4, 559 struct tcphdr **th) 560 { 561 int csum_flags; 562 uint16_t csum; 563 564 if (ip4->ip_p != IPPROTO_TCP) 565 return (TCP_LRO_NOT_SUPPORTED); 566 567 /* Ensure there are no options. */ 568 if ((ip4->ip_hl << 2) != sizeof (*ip4)) 569 return (TCP_LRO_CANNOT); 570 571 /* .. and the packet is not fragmented. */ 572 if (ip4->ip_off & htons(IP_MF|IP_OFFMASK)) 573 return (TCP_LRO_CANNOT); 574 575 /* Legacy IP has a header checksum that needs to be correct. */ 576 csum_flags = m->m_pkthdr.csum_flags; 577 if (csum_flags & CSUM_IP_CHECKED) { 578 if (__predict_false((csum_flags & CSUM_IP_VALID) == 0)) { 579 lc->lro_bad_csum++; 580 return (TCP_LRO_CANNOT); 581 } 582 } else { 583 csum = in_cksum_hdr(ip4); 584 if (__predict_false((csum) != 0)) { 585 lc->lro_bad_csum++; 586 return (TCP_LRO_CANNOT); 587 } 588 } 589 590 /* Find the TCP header (we assured there are no IP options). */ 591 *th = (struct tcphdr *)(ip4 + 1); 592 593 return (0); 594 } 595 #endif 596 597 static int 598 tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash) 599 { 600 struct lro_entry *le; 601 struct ether_header *eh; 602 #ifdef INET6 603 struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */ 604 #endif 605 #ifdef INET 606 struct ip *ip4 = NULL; /* Keep compiler happy. */ 607 #endif 608 struct tcphdr *th; 609 void *l3hdr = NULL; /* Keep compiler happy. */ 610 uint32_t *ts_ptr; 611 tcp_seq seq; 612 int error, ip_len, l; 613 uint16_t eh_type, tcp_data_len; 614 struct lro_head *bucket; 615 int force_flush = 0; 616 617 /* We expect a contiguous header [eh, ip, tcp]. */ 618 619 eh = mtod(m, struct ether_header *); 620 eh_type = ntohs(eh->ether_type); 621 switch (eh_type) { 622 #ifdef INET6 623 case ETHERTYPE_IPV6: 624 { 625 CURVNET_SET(lc->ifp->if_vnet); 626 if (V_ip6_forwarding != 0) { 627 /* XXX-BZ stats but changing lro_ctrl is a problem. */ 628 CURVNET_RESTORE(); 629 return (TCP_LRO_CANNOT); 630 } 631 CURVNET_RESTORE(); 632 l3hdr = ip6 = (struct ip6_hdr *)(eh + 1); 633 error = tcp_lro_rx_ipv6(lc, m, ip6, &th); 634 if (error != 0) 635 return (error); 636 tcp_data_len = ntohs(ip6->ip6_plen); 637 ip_len = sizeof(*ip6) + tcp_data_len; 638 break; 639 } 640 #endif 641 #ifdef INET 642 case ETHERTYPE_IP: 643 { 644 CURVNET_SET(lc->ifp->if_vnet); 645 if (V_ipforwarding != 0) { 646 /* XXX-BZ stats but changing lro_ctrl is a problem. */ 647 CURVNET_RESTORE(); 648 return (TCP_LRO_CANNOT); 649 } 650 CURVNET_RESTORE(); 651 l3hdr = ip4 = (struct ip *)(eh + 1); 652 error = tcp_lro_rx_ipv4(lc, m, ip4, &th); 653 if (error != 0) 654 return (error); 655 ip_len = ntohs(ip4->ip_len); 656 tcp_data_len = ip_len - sizeof(*ip4); 657 break; 658 } 659 #endif 660 /* XXX-BZ what happens in case of VLAN(s)? */ 661 default: 662 return (TCP_LRO_NOT_SUPPORTED); 663 } 664 665 /* 666 * If the frame is padded beyond the end of the IP packet, then we must 667 * trim the extra bytes off. 668 */ 669 l = m->m_pkthdr.len - (ETHER_HDR_LEN + ip_len); 670 if (l != 0) { 671 if (l < 0) 672 /* Truncated packet. */ 673 return (TCP_LRO_CANNOT); 674 675 m_adj(m, -l); 676 } 677 678 /* 679 * Check TCP header constraints. 680 */ 681 /* Ensure no bits set besides ACK or PSH. */ 682 if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) { 683 if (th->th_flags & TH_SYN) 684 return (TCP_LRO_CANNOT); 685 /* 686 * Make sure that previously seen segements/ACKs are delivered 687 * before this segement, e.g. FIN. 688 */ 689 force_flush = 1; 690 } 691 692 /* XXX-BZ We lose a ACK|PUSH flag concatenating multiple segments. */ 693 /* XXX-BZ Ideally we'd flush on PUSH? */ 694 695 /* 696 * Check for timestamps. 697 * Since the only option we handle are timestamps, we only have to 698 * handle the simple case of aligned timestamps. 699 */ 700 l = (th->th_off << 2); 701 tcp_data_len -= l; 702 l -= sizeof(*th); 703 ts_ptr = (uint32_t *)(th + 1); 704 if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) || 705 (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16| 706 TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) { 707 /* 708 * Make sure that previously seen segements/ACKs are delivered 709 * before this segement. 710 */ 711 force_flush = 1; 712 } 713 714 /* If the driver did not pass in the checksum, set it now. */ 715 if (csum == 0x0000) 716 csum = th->th_sum; 717 718 seq = ntohl(th->th_seq); 719 720 if (!use_hash) { 721 bucket = &lc->lro_hash[0]; 722 } else if (M_HASHTYPE_ISHASH(m)) { 723 bucket = &lc->lro_hash[m->m_pkthdr.flowid % lc->lro_hashsz]; 724 } else { 725 uint32_t hash; 726 727 switch (eh_type) { 728 #ifdef INET 729 case ETHERTYPE_IP: 730 hash = ip4->ip_src.s_addr + ip4->ip_dst.s_addr; 731 break; 732 #endif 733 #ifdef INET6 734 case ETHERTYPE_IPV6: 735 hash = ip6->ip6_src.s6_addr32[0] + 736 ip6->ip6_dst.s6_addr32[0]; 737 hash += ip6->ip6_src.s6_addr32[1] + 738 ip6->ip6_dst.s6_addr32[1]; 739 hash += ip6->ip6_src.s6_addr32[2] + 740 ip6->ip6_dst.s6_addr32[2]; 741 hash += ip6->ip6_src.s6_addr32[3] + 742 ip6->ip6_dst.s6_addr32[3]; 743 break; 744 #endif 745 default: 746 hash = 0; 747 break; 748 } 749 hash += th->th_sport + th->th_dport; 750 bucket = &lc->lro_hash[hash % lc->lro_hashsz]; 751 } 752 753 /* Try to find a matching previous segment. */ 754 LIST_FOREACH(le, bucket, hash_next) { 755 if (le->eh_type != eh_type) 756 continue; 757 if (le->source_port != th->th_sport || 758 le->dest_port != th->th_dport) 759 continue; 760 switch (eh_type) { 761 #ifdef INET6 762 case ETHERTYPE_IPV6: 763 if (bcmp(&le->source_ip6, &ip6->ip6_src, 764 sizeof(struct in6_addr)) != 0 || 765 bcmp(&le->dest_ip6, &ip6->ip6_dst, 766 sizeof(struct in6_addr)) != 0) 767 continue; 768 break; 769 #endif 770 #ifdef INET 771 case ETHERTYPE_IP: 772 if (le->source_ip4 != ip4->ip_src.s_addr || 773 le->dest_ip4 != ip4->ip_dst.s_addr) 774 continue; 775 break; 776 #endif 777 } 778 779 if (force_flush) { 780 /* Timestamps mismatch; this is a FIN, etc */ 781 tcp_lro_active_remove(le); 782 tcp_lro_flush(lc, le); 783 return (TCP_LRO_CANNOT); 784 } 785 786 /* Flush now if appending will result in overflow. */ 787 if (le->p_len > (lc->lro_length_lim - tcp_data_len)) { 788 tcp_lro_active_remove(le); 789 tcp_lro_flush(lc, le); 790 break; 791 } 792 793 /* Try to append the new segment. */ 794 if (__predict_false(seq != le->next_seq || 795 (tcp_data_len == 0 && le->ack_seq == th->th_ack))) { 796 /* Out of order packet or duplicate ACK. */ 797 tcp_lro_active_remove(le); 798 tcp_lro_flush(lc, le); 799 return (TCP_LRO_CANNOT); 800 } 801 802 if (l != 0) { 803 uint32_t tsval = ntohl(*(ts_ptr + 1)); 804 /* Make sure timestamp values are increasing. */ 805 /* XXX-BZ flip and use TSTMP_GEQ macro for this? */ 806 if (__predict_false(le->tsval > tsval || 807 *(ts_ptr + 2) == 0)) 808 return (TCP_LRO_CANNOT); 809 le->tsval = tsval; 810 le->tsecr = *(ts_ptr + 2); 811 } 812 813 le->next_seq += tcp_data_len; 814 le->ack_seq = th->th_ack; 815 le->window = th->th_win; 816 le->append_cnt++; 817 818 #ifdef TCP_LRO_UPDATE_CSUM 819 le->ulp_csum += tcp_lro_rx_csum_fixup(le, l3hdr, th, 820 tcp_data_len, ~csum); 821 #endif 822 823 if (tcp_data_len == 0) { 824 m_freem(m); 825 /* 826 * Flush this LRO entry, if this ACK should not 827 * be further delayed. 828 */ 829 if (le->append_cnt >= lc->lro_ackcnt_lim) { 830 tcp_lro_active_remove(le); 831 tcp_lro_flush(lc, le); 832 } 833 return (0); 834 } 835 836 le->p_len += tcp_data_len; 837 838 /* 839 * Adjust the mbuf so that m_data points to the first byte of 840 * the ULP payload. Adjust the mbuf to avoid complications and 841 * append new segment to existing mbuf chain. 842 */ 843 m_adj(m, m->m_pkthdr.len - tcp_data_len); 844 m_demote_pkthdr(m); 845 846 le->m_tail->m_next = m; 847 le->m_tail = m_last(m); 848 849 /* 850 * If a possible next full length packet would cause an 851 * overflow, pro-actively flush now. 852 */ 853 if (le->p_len > (lc->lro_length_lim - lc->ifp->if_mtu)) { 854 tcp_lro_active_remove(le); 855 tcp_lro_flush(lc, le); 856 } else 857 getmicrotime(&le->mtime); 858 859 return (0); 860 } 861 862 if (force_flush) { 863 /* 864 * Nothing to flush, but this segment can not be further 865 * aggregated/delayed. 866 */ 867 return (TCP_LRO_CANNOT); 868 } 869 870 /* Try to find an empty slot. */ 871 if (LIST_EMPTY(&lc->lro_free)) 872 return (TCP_LRO_NO_ENTRIES); 873 874 /* Start a new segment chain. */ 875 le = LIST_FIRST(&lc->lro_free); 876 LIST_REMOVE(le, next); 877 tcp_lro_active_insert(lc, bucket, le); 878 getmicrotime(&le->mtime); 879 880 /* Start filling in details. */ 881 switch (eh_type) { 882 #ifdef INET6 883 case ETHERTYPE_IPV6: 884 le->le_ip6 = ip6; 885 le->source_ip6 = ip6->ip6_src; 886 le->dest_ip6 = ip6->ip6_dst; 887 le->eh_type = eh_type; 888 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN - sizeof(*ip6); 889 break; 890 #endif 891 #ifdef INET 892 case ETHERTYPE_IP: 893 le->le_ip4 = ip4; 894 le->source_ip4 = ip4->ip_src.s_addr; 895 le->dest_ip4 = ip4->ip_dst.s_addr; 896 le->eh_type = eh_type; 897 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN; 898 break; 899 #endif 900 } 901 le->source_port = th->th_sport; 902 le->dest_port = th->th_dport; 903 904 le->next_seq = seq + tcp_data_len; 905 le->ack_seq = th->th_ack; 906 le->window = th->th_win; 907 if (l != 0) { 908 le->timestamp = 1; 909 le->tsval = ntohl(*(ts_ptr + 1)); 910 le->tsecr = *(ts_ptr + 2); 911 } 912 913 #ifdef TCP_LRO_UPDATE_CSUM 914 /* 915 * Do not touch the csum of the first packet. However save the 916 * "adjusted" checksum of just the source and destination addresses, 917 * the next header and the TCP payload. The length and TCP header 918 * parts may change, so we remove those from the saved checksum and 919 * re-add with final values on tcp_lro_flush() if needed. 920 */ 921 KASSERT(le->ulp_csum == 0, ("%s: le=%p le->ulp_csum=0x%04x\n", 922 __func__, le, le->ulp_csum)); 923 924 le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len, 925 ~csum); 926 th->th_sum = csum; /* Restore checksum on first packet. */ 927 #endif 928 929 le->m_head = m; 930 le->m_tail = m_last(m); 931 932 return (0); 933 } 934 935 int 936 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) 937 { 938 939 return tcp_lro_rx2(lc, m, csum, 1); 940 } 941 942 void 943 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb) 944 { 945 /* sanity checks */ 946 if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL || 947 lc->lro_mbuf_max == 0)) { 948 /* packet drop */ 949 m_freem(mb); 950 return; 951 } 952 953 /* check if packet is not LRO capable */ 954 if (__predict_false(mb->m_pkthdr.csum_flags == 0 || 955 (lc->ifp->if_capenable & IFCAP_LRO) == 0)) { 956 lc->lro_flushed++; 957 lc->lro_queued++; 958 959 /* input packet to network layer */ 960 (*lc->ifp->if_input) (lc->ifp, mb); 961 return; 962 } 963 964 /* check if array is full */ 965 if (__predict_false(lc->lro_mbuf_count == lc->lro_mbuf_max)) 966 tcp_lro_flush_all(lc); 967 968 /* create sequence number */ 969 lc->lro_mbuf_data[lc->lro_mbuf_count].seq = 970 (((uint64_t)M_HASHTYPE_GET(mb)) << 56) | 971 (((uint64_t)mb->m_pkthdr.flowid) << 24) | 972 ((uint64_t)lc->lro_mbuf_count); 973 974 /* enter mbuf */ 975 lc->lro_mbuf_data[lc->lro_mbuf_count++].mb = mb; 976 } 977 978 /* end */ 979