1 /*- 2 * Copyright (c) 2007, Myricom Inc. 3 * Copyright (c) 2008, Intel Corporation. 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * Copyright (c) 2016 Mellanox Technologies. 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Bjoern Zeeb 9 * under sponsorship from the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/socket.h> 45 46 #include <net/if.h> 47 #include <net/if_var.h> 48 #include <net/ethernet.h> 49 #include <net/vnet.h> 50 51 #include <netinet/in_systm.h> 52 #include <netinet/in.h> 53 #include <netinet/ip6.h> 54 #include <netinet/ip.h> 55 #include <netinet/ip_var.h> 56 #include <netinet/tcp.h> 57 #include <netinet/tcp_lro.h> 58 59 #include <netinet6/ip6_var.h> 60 61 #include <machine/in_cksum.h> 62 63 static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures"); 64 65 #define TCP_LRO_UPDATE_CSUM 1 66 #ifndef TCP_LRO_UPDATE_CSUM 67 #define TCP_LRO_INVALID_CSUM 0x0000 68 #endif 69 70 static void tcp_lro_rx_done(struct lro_ctrl *lc); 71 static int tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, 72 uint32_t csum, int use_hash); 73 74 static __inline void 75 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket, 76 struct lro_entry *le) 77 { 78 79 LIST_INSERT_HEAD(&lc->lro_active, le, next); 80 LIST_INSERT_HEAD(bucket, le, hash_next); 81 } 82 83 static __inline void 84 tcp_lro_active_remove(struct lro_entry *le) 85 { 86 87 LIST_REMOVE(le, next); /* active list */ 88 LIST_REMOVE(le, hash_next); /* hash bucket */ 89 } 90 91 int 92 tcp_lro_init(struct lro_ctrl *lc) 93 { 94 return (tcp_lro_init_args(lc, NULL, TCP_LRO_ENTRIES, 0)); 95 } 96 97 int 98 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp, 99 unsigned lro_entries, unsigned lro_mbufs) 100 { 101 struct lro_entry *le; 102 size_t size; 103 unsigned i, elements; 104 105 lc->lro_bad_csum = 0; 106 lc->lro_queued = 0; 107 lc->lro_flushed = 0; 108 lc->lro_cnt = 0; 109 lc->lro_mbuf_count = 0; 110 lc->lro_mbuf_max = lro_mbufs; 111 lc->lro_cnt = lro_entries; 112 lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX; 113 lc->lro_length_lim = TCP_LRO_LENGTH_MAX; 114 lc->ifp = ifp; 115 LIST_INIT(&lc->lro_free); 116 LIST_INIT(&lc->lro_active); 117 118 /* create hash table to accelerate entry lookup */ 119 if (lro_entries > lro_mbufs) 120 elements = lro_entries; 121 else 122 elements = lro_mbufs; 123 lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz, 124 HASH_NOWAIT); 125 if (lc->lro_hash == NULL) { 126 memset(lc, 0, sizeof(*lc)); 127 return (ENOMEM); 128 } 129 130 /* compute size to allocate */ 131 size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) + 132 (lro_entries * sizeof(*le)); 133 lc->lro_mbuf_data = (struct lro_mbuf_sort *) 134 malloc(size, M_LRO, M_NOWAIT | M_ZERO); 135 136 /* check for out of memory */ 137 if (lc->lro_mbuf_data == NULL) { 138 memset(lc, 0, sizeof(*lc)); 139 return (ENOMEM); 140 } 141 /* compute offset for LRO entries */ 142 le = (struct lro_entry *) 143 (lc->lro_mbuf_data + lro_mbufs); 144 145 /* setup linked list */ 146 for (i = 0; i != lro_entries; i++) 147 LIST_INSERT_HEAD(&lc->lro_free, le + i, next); 148 149 return (0); 150 } 151 152 void 153 tcp_lro_free(struct lro_ctrl *lc) 154 { 155 struct lro_entry *le; 156 unsigned x; 157 158 /* reset LRO free list */ 159 LIST_INIT(&lc->lro_free); 160 161 /* free active mbufs, if any */ 162 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { 163 tcp_lro_active_remove(le); 164 m_freem(le->m_head); 165 } 166 167 /* free hash table */ 168 if (lc->lro_hash != NULL) { 169 free(lc->lro_hash, M_LRO); 170 lc->lro_hash = NULL; 171 } 172 lc->lro_hashsz = 0; 173 174 /* free mbuf array, if any */ 175 for (x = 0; x != lc->lro_mbuf_count; x++) 176 m_freem(lc->lro_mbuf_data[x].mb); 177 lc->lro_mbuf_count = 0; 178 179 /* free allocated memory, if any */ 180 free(lc->lro_mbuf_data, M_LRO); 181 lc->lro_mbuf_data = NULL; 182 } 183 184 #ifdef TCP_LRO_UPDATE_CSUM 185 static uint16_t 186 tcp_lro_csum_th(struct tcphdr *th) 187 { 188 uint32_t ch; 189 uint16_t *p, l; 190 191 ch = th->th_sum = 0x0000; 192 l = th->th_off; 193 p = (uint16_t *)th; 194 while (l > 0) { 195 ch += *p; 196 p++; 197 ch += *p; 198 p++; 199 l--; 200 } 201 while (ch > 0xffff) 202 ch = (ch >> 16) + (ch & 0xffff); 203 204 return (ch & 0xffff); 205 } 206 207 static uint16_t 208 tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th, 209 uint16_t tcp_data_len, uint16_t csum) 210 { 211 uint32_t c; 212 uint16_t cs; 213 214 c = csum; 215 216 /* Remove length from checksum. */ 217 switch (le->eh_type) { 218 #ifdef INET6 219 case ETHERTYPE_IPV6: 220 { 221 struct ip6_hdr *ip6; 222 223 ip6 = (struct ip6_hdr *)l3hdr; 224 if (le->append_cnt == 0) 225 cs = ip6->ip6_plen; 226 else { 227 uint32_t cx; 228 229 cx = ntohs(ip6->ip6_plen); 230 cs = in6_cksum_pseudo(ip6, cx, ip6->ip6_nxt, 0); 231 } 232 break; 233 } 234 #endif 235 #ifdef INET 236 case ETHERTYPE_IP: 237 { 238 struct ip *ip4; 239 240 ip4 = (struct ip *)l3hdr; 241 if (le->append_cnt == 0) 242 cs = ip4->ip_len; 243 else { 244 cs = in_addword(ntohs(ip4->ip_len) - sizeof(*ip4), 245 IPPROTO_TCP); 246 cs = in_pseudo(ip4->ip_src.s_addr, ip4->ip_dst.s_addr, 247 htons(cs)); 248 } 249 break; 250 } 251 #endif 252 default: 253 cs = 0; /* Keep compiler happy. */ 254 } 255 256 cs = ~cs; 257 c += cs; 258 259 /* Remove TCP header csum. */ 260 cs = ~tcp_lro_csum_th(th); 261 c += cs; 262 while (c > 0xffff) 263 c = (c >> 16) + (c & 0xffff); 264 265 return (c & 0xffff); 266 } 267 #endif 268 269 static void 270 tcp_lro_rx_done(struct lro_ctrl *lc) 271 { 272 struct lro_entry *le; 273 274 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) { 275 tcp_lro_active_remove(le); 276 tcp_lro_flush(lc, le); 277 } 278 } 279 280 void 281 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout) 282 { 283 struct lro_entry *le, *le_tmp; 284 struct timeval tv; 285 286 if (LIST_EMPTY(&lc->lro_active)) 287 return; 288 289 getmicrotime(&tv); 290 timevalsub(&tv, timeout); 291 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) { 292 if (timevalcmp(&tv, &le->mtime, >=)) { 293 tcp_lro_active_remove(le); 294 tcp_lro_flush(lc, le); 295 } 296 } 297 } 298 299 void 300 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le) 301 { 302 303 if (le->append_cnt > 0) { 304 struct tcphdr *th; 305 uint16_t p_len; 306 307 p_len = htons(le->p_len); 308 switch (le->eh_type) { 309 #ifdef INET6 310 case ETHERTYPE_IPV6: 311 { 312 struct ip6_hdr *ip6; 313 314 ip6 = le->le_ip6; 315 ip6->ip6_plen = p_len; 316 th = (struct tcphdr *)(ip6 + 1); 317 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 318 CSUM_PSEUDO_HDR; 319 le->p_len += ETHER_HDR_LEN + sizeof(*ip6); 320 break; 321 } 322 #endif 323 #ifdef INET 324 case ETHERTYPE_IP: 325 { 326 struct ip *ip4; 327 #ifdef TCP_LRO_UPDATE_CSUM 328 uint32_t cl; 329 uint16_t c; 330 #endif 331 332 ip4 = le->le_ip4; 333 #ifdef TCP_LRO_UPDATE_CSUM 334 /* Fix IP header checksum for new length. */ 335 c = ~ip4->ip_sum; 336 cl = c; 337 c = ~ip4->ip_len; 338 cl += c + p_len; 339 while (cl > 0xffff) 340 cl = (cl >> 16) + (cl & 0xffff); 341 c = cl; 342 ip4->ip_sum = ~c; 343 #else 344 ip4->ip_sum = TCP_LRO_INVALID_CSUM; 345 #endif 346 ip4->ip_len = p_len; 347 th = (struct tcphdr *)(ip4 + 1); 348 le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID | 349 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID; 350 le->p_len += ETHER_HDR_LEN; 351 break; 352 } 353 #endif 354 default: 355 th = NULL; /* Keep compiler happy. */ 356 } 357 le->m_head->m_pkthdr.csum_data = 0xffff; 358 le->m_head->m_pkthdr.len = le->p_len; 359 360 /* Incorporate the latest ACK into the TCP header. */ 361 th->th_ack = le->ack_seq; 362 th->th_win = le->window; 363 /* Incorporate latest timestamp into the TCP header. */ 364 if (le->timestamp != 0) { 365 uint32_t *ts_ptr; 366 367 ts_ptr = (uint32_t *)(th + 1); 368 ts_ptr[1] = htonl(le->tsval); 369 ts_ptr[2] = le->tsecr; 370 } 371 #ifdef TCP_LRO_UPDATE_CSUM 372 /* Update the TCP header checksum. */ 373 le->ulp_csum += p_len; 374 le->ulp_csum += tcp_lro_csum_th(th); 375 while (le->ulp_csum > 0xffff) 376 le->ulp_csum = (le->ulp_csum >> 16) + 377 (le->ulp_csum & 0xffff); 378 th->th_sum = (le->ulp_csum & 0xffff); 379 th->th_sum = ~th->th_sum; 380 #else 381 th->th_sum = TCP_LRO_INVALID_CSUM; 382 #endif 383 } 384 385 (*lc->ifp->if_input)(lc->ifp, le->m_head); 386 lc->lro_queued += le->append_cnt + 1; 387 lc->lro_flushed++; 388 bzero(le, sizeof(*le)); 389 LIST_INSERT_HEAD(&lc->lro_free, le, next); 390 } 391 392 #ifdef HAVE_INLINE_FLSLL 393 #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1)) 394 #else 395 static inline uint64_t 396 tcp_lro_msb_64(uint64_t x) 397 { 398 x |= (x >> 1); 399 x |= (x >> 2); 400 x |= (x >> 4); 401 x |= (x >> 8); 402 x |= (x >> 16); 403 x |= (x >> 32); 404 return (x & ~(x >> 1)); 405 } 406 #endif 407 408 /* 409 * The tcp_lro_sort() routine is comparable to qsort(), except it has 410 * a worst case complexity limit of O(MIN(N,64)*N), where N is the 411 * number of elements to sort and 64 is the number of sequence bits 412 * available. The algorithm is bit-slicing the 64-bit sequence number, 413 * sorting one bit at a time from the most significant bit until the 414 * least significant one, skipping the constant bits. This is 415 * typically called a radix sort. 416 */ 417 static void 418 tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size) 419 { 420 struct lro_mbuf_sort temp; 421 uint64_t ones; 422 uint64_t zeros; 423 uint32_t x; 424 uint32_t y; 425 426 repeat: 427 /* for small arrays insertion sort is faster */ 428 if (size <= 12) { 429 for (x = 1; x < size; x++) { 430 temp = parray[x]; 431 for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--) 432 parray[y] = parray[y - 1]; 433 parray[y] = temp; 434 } 435 return; 436 } 437 438 /* compute sequence bits which are constant */ 439 ones = 0; 440 zeros = 0; 441 for (x = 0; x != size; x++) { 442 ones |= parray[x].seq; 443 zeros |= ~parray[x].seq; 444 } 445 446 /* compute bits which are not constant into "ones" */ 447 ones &= zeros; 448 if (ones == 0) 449 return; 450 451 /* pick the most significant bit which is not constant */ 452 ones = tcp_lro_msb_64(ones); 453 454 /* 455 * Move entries having cleared sequence bits to the beginning 456 * of the array: 457 */ 458 for (x = y = 0; y != size; y++) { 459 /* skip set bits */ 460 if (parray[y].seq & ones) 461 continue; 462 /* swap entries */ 463 temp = parray[x]; 464 parray[x] = parray[y]; 465 parray[y] = temp; 466 x++; 467 } 468 469 KASSERT(x != 0 && x != size, ("Memory is corrupted\n")); 470 471 /* sort zeros */ 472 tcp_lro_sort(parray, x); 473 474 /* sort ones */ 475 parray += x; 476 size -= x; 477 goto repeat; 478 } 479 480 void 481 tcp_lro_flush_all(struct lro_ctrl *lc) 482 { 483 uint64_t seq; 484 uint64_t nseq; 485 unsigned x; 486 487 /* check if no mbufs to flush */ 488 if (lc->lro_mbuf_count == 0) 489 goto done; 490 491 /* sort all mbufs according to stream */ 492 tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count); 493 494 /* input data into LRO engine, stream by stream */ 495 seq = 0; 496 for (x = 0; x != lc->lro_mbuf_count; x++) { 497 struct mbuf *mb; 498 499 /* get mbuf */ 500 mb = lc->lro_mbuf_data[x].mb; 501 502 /* get sequence number, masking away the packet index */ 503 nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24); 504 505 /* check for new stream */ 506 if (seq != nseq) { 507 seq = nseq; 508 509 /* flush active streams */ 510 tcp_lro_rx_done(lc); 511 } 512 513 /* add packet to LRO engine */ 514 if (tcp_lro_rx2(lc, mb, 0, 0) != 0) { 515 /* input packet to network layer */ 516 (*lc->ifp->if_input)(lc->ifp, mb); 517 lc->lro_queued++; 518 lc->lro_flushed++; 519 } 520 } 521 done: 522 /* flush active streams */ 523 tcp_lro_rx_done(lc); 524 525 lc->lro_mbuf_count = 0; 526 } 527 528 #ifdef INET6 529 static int 530 tcp_lro_rx_ipv6(struct lro_ctrl *lc, struct mbuf *m, struct ip6_hdr *ip6, 531 struct tcphdr **th) 532 { 533 534 /* XXX-BZ we should check the flow-label. */ 535 536 /* XXX-BZ We do not yet support ext. hdrs. */ 537 if (ip6->ip6_nxt != IPPROTO_TCP) 538 return (TCP_LRO_NOT_SUPPORTED); 539 540 /* Find the TCP header. */ 541 *th = (struct tcphdr *)(ip6 + 1); 542 543 return (0); 544 } 545 #endif 546 547 #ifdef INET 548 static int 549 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4, 550 struct tcphdr **th) 551 { 552 int csum_flags; 553 uint16_t csum; 554 555 if (ip4->ip_p != IPPROTO_TCP) 556 return (TCP_LRO_NOT_SUPPORTED); 557 558 /* Ensure there are no options. */ 559 if ((ip4->ip_hl << 2) != sizeof (*ip4)) 560 return (TCP_LRO_CANNOT); 561 562 /* .. and the packet is not fragmented. */ 563 if (ip4->ip_off & htons(IP_MF|IP_OFFMASK)) 564 return (TCP_LRO_CANNOT); 565 566 /* Legacy IP has a header checksum that needs to be correct. */ 567 csum_flags = m->m_pkthdr.csum_flags; 568 if (csum_flags & CSUM_IP_CHECKED) { 569 if (__predict_false((csum_flags & CSUM_IP_VALID) == 0)) { 570 lc->lro_bad_csum++; 571 return (TCP_LRO_CANNOT); 572 } 573 } else { 574 csum = in_cksum_hdr(ip4); 575 if (__predict_false((csum) != 0)) { 576 lc->lro_bad_csum++; 577 return (TCP_LRO_CANNOT); 578 } 579 } 580 581 /* Find the TCP header (we assured there are no IP options). */ 582 *th = (struct tcphdr *)(ip4 + 1); 583 584 return (0); 585 } 586 #endif 587 588 static int 589 tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash) 590 { 591 struct lro_entry *le; 592 struct ether_header *eh; 593 #ifdef INET6 594 struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */ 595 #endif 596 #ifdef INET 597 struct ip *ip4 = NULL; /* Keep compiler happy. */ 598 #endif 599 struct tcphdr *th; 600 void *l3hdr = NULL; /* Keep compiler happy. */ 601 uint32_t *ts_ptr; 602 tcp_seq seq; 603 int error, ip_len, l; 604 uint16_t eh_type, tcp_data_len; 605 struct lro_head *bucket; 606 int force_flush = 0; 607 608 /* We expect a contiguous header [eh, ip, tcp]. */ 609 610 eh = mtod(m, struct ether_header *); 611 eh_type = ntohs(eh->ether_type); 612 switch (eh_type) { 613 #ifdef INET6 614 case ETHERTYPE_IPV6: 615 { 616 CURVNET_SET(lc->ifp->if_vnet); 617 if (V_ip6_forwarding != 0) { 618 /* XXX-BZ stats but changing lro_ctrl is a problem. */ 619 CURVNET_RESTORE(); 620 return (TCP_LRO_CANNOT); 621 } 622 CURVNET_RESTORE(); 623 l3hdr = ip6 = (struct ip6_hdr *)(eh + 1); 624 error = tcp_lro_rx_ipv6(lc, m, ip6, &th); 625 if (error != 0) 626 return (error); 627 tcp_data_len = ntohs(ip6->ip6_plen); 628 ip_len = sizeof(*ip6) + tcp_data_len; 629 break; 630 } 631 #endif 632 #ifdef INET 633 case ETHERTYPE_IP: 634 { 635 CURVNET_SET(lc->ifp->if_vnet); 636 if (V_ipforwarding != 0) { 637 /* XXX-BZ stats but changing lro_ctrl is a problem. */ 638 CURVNET_RESTORE(); 639 return (TCP_LRO_CANNOT); 640 } 641 CURVNET_RESTORE(); 642 l3hdr = ip4 = (struct ip *)(eh + 1); 643 error = tcp_lro_rx_ipv4(lc, m, ip4, &th); 644 if (error != 0) 645 return (error); 646 ip_len = ntohs(ip4->ip_len); 647 tcp_data_len = ip_len - sizeof(*ip4); 648 break; 649 } 650 #endif 651 /* XXX-BZ what happens in case of VLAN(s)? */ 652 default: 653 return (TCP_LRO_NOT_SUPPORTED); 654 } 655 656 /* 657 * If the frame is padded beyond the end of the IP packet, then we must 658 * trim the extra bytes off. 659 */ 660 l = m->m_pkthdr.len - (ETHER_HDR_LEN + ip_len); 661 if (l != 0) { 662 if (l < 0) 663 /* Truncated packet. */ 664 return (TCP_LRO_CANNOT); 665 666 m_adj(m, -l); 667 } 668 669 /* 670 * Check TCP header constraints. 671 */ 672 /* Ensure no bits set besides ACK or PSH. */ 673 if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) { 674 if (th->th_flags & TH_SYN) 675 return (TCP_LRO_CANNOT); 676 /* 677 * Make sure that previously seen segements/ACKs are delivered 678 * before this segement, e.g. FIN. 679 */ 680 force_flush = 1; 681 } 682 683 /* XXX-BZ We lose a ACK|PUSH flag concatenating multiple segments. */ 684 /* XXX-BZ Ideally we'd flush on PUSH? */ 685 686 /* 687 * Check for timestamps. 688 * Since the only option we handle are timestamps, we only have to 689 * handle the simple case of aligned timestamps. 690 */ 691 l = (th->th_off << 2); 692 tcp_data_len -= l; 693 l -= sizeof(*th); 694 ts_ptr = (uint32_t *)(th + 1); 695 if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) || 696 (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16| 697 TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) { 698 /* 699 * Make sure that previously seen segements/ACKs are delivered 700 * before this segement. 701 */ 702 force_flush = 1; 703 } 704 705 /* If the driver did not pass in the checksum, set it now. */ 706 if (csum == 0x0000) 707 csum = th->th_sum; 708 709 seq = ntohl(th->th_seq); 710 711 if (!use_hash) { 712 bucket = &lc->lro_hash[0]; 713 } else if (M_HASHTYPE_ISHASH(m)) { 714 bucket = &lc->lro_hash[m->m_pkthdr.flowid % lc->lro_hashsz]; 715 } else { 716 uint32_t hash; 717 718 switch (eh_type) { 719 #ifdef INET 720 case ETHERTYPE_IP: 721 hash = ip4->ip_src.s_addr + ip4->ip_dst.s_addr; 722 break; 723 #endif 724 #ifdef INET6 725 case ETHERTYPE_IPV6: 726 hash = ip6->ip6_src.s6_addr32[0] + 727 ip6->ip6_dst.s6_addr32[0]; 728 hash += ip6->ip6_src.s6_addr32[1] + 729 ip6->ip6_dst.s6_addr32[1]; 730 hash += ip6->ip6_src.s6_addr32[2] + 731 ip6->ip6_dst.s6_addr32[2]; 732 hash += ip6->ip6_src.s6_addr32[3] + 733 ip6->ip6_dst.s6_addr32[3]; 734 break; 735 #endif 736 default: 737 hash = 0; 738 break; 739 } 740 hash += th->th_sport + th->th_dport; 741 bucket = &lc->lro_hash[hash % lc->lro_hashsz]; 742 } 743 744 /* Try to find a matching previous segment. */ 745 LIST_FOREACH(le, bucket, hash_next) { 746 if (le->eh_type != eh_type) 747 continue; 748 if (le->source_port != th->th_sport || 749 le->dest_port != th->th_dport) 750 continue; 751 switch (eh_type) { 752 #ifdef INET6 753 case ETHERTYPE_IPV6: 754 if (bcmp(&le->source_ip6, &ip6->ip6_src, 755 sizeof(struct in6_addr)) != 0 || 756 bcmp(&le->dest_ip6, &ip6->ip6_dst, 757 sizeof(struct in6_addr)) != 0) 758 continue; 759 break; 760 #endif 761 #ifdef INET 762 case ETHERTYPE_IP: 763 if (le->source_ip4 != ip4->ip_src.s_addr || 764 le->dest_ip4 != ip4->ip_dst.s_addr) 765 continue; 766 break; 767 #endif 768 } 769 770 if (force_flush) { 771 /* Timestamps mismatch; this is a FIN, etc */ 772 tcp_lro_active_remove(le); 773 tcp_lro_flush(lc, le); 774 return (TCP_LRO_CANNOT); 775 } 776 777 /* Flush now if appending will result in overflow. */ 778 if (le->p_len > (lc->lro_length_lim - tcp_data_len)) { 779 tcp_lro_active_remove(le); 780 tcp_lro_flush(lc, le); 781 break; 782 } 783 784 /* Try to append the new segment. */ 785 if (__predict_false(seq != le->next_seq || 786 (tcp_data_len == 0 && le->ack_seq == th->th_ack))) { 787 /* Out of order packet or duplicate ACK. */ 788 tcp_lro_active_remove(le); 789 tcp_lro_flush(lc, le); 790 return (TCP_LRO_CANNOT); 791 } 792 793 if (l != 0) { 794 uint32_t tsval = ntohl(*(ts_ptr + 1)); 795 /* Make sure timestamp values are increasing. */ 796 /* XXX-BZ flip and use TSTMP_GEQ macro for this? */ 797 if (__predict_false(le->tsval > tsval || 798 *(ts_ptr + 2) == 0)) 799 return (TCP_LRO_CANNOT); 800 le->tsval = tsval; 801 le->tsecr = *(ts_ptr + 2); 802 } 803 804 le->next_seq += tcp_data_len; 805 le->ack_seq = th->th_ack; 806 le->window = th->th_win; 807 le->append_cnt++; 808 809 #ifdef TCP_LRO_UPDATE_CSUM 810 le->ulp_csum += tcp_lro_rx_csum_fixup(le, l3hdr, th, 811 tcp_data_len, ~csum); 812 #endif 813 814 if (tcp_data_len == 0) { 815 m_freem(m); 816 /* 817 * Flush this LRO entry, if this ACK should not 818 * be further delayed. 819 */ 820 if (le->append_cnt >= lc->lro_ackcnt_lim) { 821 tcp_lro_active_remove(le); 822 tcp_lro_flush(lc, le); 823 } 824 return (0); 825 } 826 827 le->p_len += tcp_data_len; 828 829 /* 830 * Adjust the mbuf so that m_data points to the first byte of 831 * the ULP payload. Adjust the mbuf to avoid complications and 832 * append new segment to existing mbuf chain. 833 */ 834 m_adj(m, m->m_pkthdr.len - tcp_data_len); 835 m_demote_pkthdr(m); 836 837 le->m_tail->m_next = m; 838 le->m_tail = m_last(m); 839 840 /* 841 * If a possible next full length packet would cause an 842 * overflow, pro-actively flush now. 843 */ 844 if (le->p_len > (lc->lro_length_lim - lc->ifp->if_mtu)) { 845 tcp_lro_active_remove(le); 846 tcp_lro_flush(lc, le); 847 } else 848 getmicrotime(&le->mtime); 849 850 return (0); 851 } 852 853 if (force_flush) { 854 /* 855 * Nothing to flush, but this segment can not be further 856 * aggregated/delayed. 857 */ 858 return (TCP_LRO_CANNOT); 859 } 860 861 /* Try to find an empty slot. */ 862 if (LIST_EMPTY(&lc->lro_free)) 863 return (TCP_LRO_NO_ENTRIES); 864 865 /* Start a new segment chain. */ 866 le = LIST_FIRST(&lc->lro_free); 867 LIST_REMOVE(le, next); 868 tcp_lro_active_insert(lc, bucket, le); 869 getmicrotime(&le->mtime); 870 871 /* Start filling in details. */ 872 switch (eh_type) { 873 #ifdef INET6 874 case ETHERTYPE_IPV6: 875 le->le_ip6 = ip6; 876 le->source_ip6 = ip6->ip6_src; 877 le->dest_ip6 = ip6->ip6_dst; 878 le->eh_type = eh_type; 879 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN - sizeof(*ip6); 880 break; 881 #endif 882 #ifdef INET 883 case ETHERTYPE_IP: 884 le->le_ip4 = ip4; 885 le->source_ip4 = ip4->ip_src.s_addr; 886 le->dest_ip4 = ip4->ip_dst.s_addr; 887 le->eh_type = eh_type; 888 le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN; 889 break; 890 #endif 891 } 892 le->source_port = th->th_sport; 893 le->dest_port = th->th_dport; 894 895 le->next_seq = seq + tcp_data_len; 896 le->ack_seq = th->th_ack; 897 le->window = th->th_win; 898 if (l != 0) { 899 le->timestamp = 1; 900 le->tsval = ntohl(*(ts_ptr + 1)); 901 le->tsecr = *(ts_ptr + 2); 902 } 903 904 #ifdef TCP_LRO_UPDATE_CSUM 905 /* 906 * Do not touch the csum of the first packet. However save the 907 * "adjusted" checksum of just the source and destination addresses, 908 * the next header and the TCP payload. The length and TCP header 909 * parts may change, so we remove those from the saved checksum and 910 * re-add with final values on tcp_lro_flush() if needed. 911 */ 912 KASSERT(le->ulp_csum == 0, ("%s: le=%p le->ulp_csum=0x%04x\n", 913 __func__, le, le->ulp_csum)); 914 915 le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len, 916 ~csum); 917 th->th_sum = csum; /* Restore checksum on first packet. */ 918 #endif 919 920 le->m_head = m; 921 le->m_tail = m_last(m); 922 923 return (0); 924 } 925 926 int 927 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) 928 { 929 930 return tcp_lro_rx2(lc, m, csum, 1); 931 } 932 933 void 934 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb) 935 { 936 /* sanity checks */ 937 if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL || 938 lc->lro_mbuf_max == 0)) { 939 /* packet drop */ 940 m_freem(mb); 941 return; 942 } 943 944 /* check if packet is not LRO capable */ 945 if (__predict_false(mb->m_pkthdr.csum_flags == 0 || 946 (lc->ifp->if_capenable & IFCAP_LRO) == 0)) { 947 lc->lro_flushed++; 948 lc->lro_queued++; 949 950 /* input packet to network layer */ 951 (*lc->ifp->if_input) (lc->ifp, mb); 952 return; 953 } 954 955 /* check if array is full */ 956 if (__predict_false(lc->lro_mbuf_count == lc->lro_mbuf_max)) 957 tcp_lro_flush_all(lc); 958 959 /* create sequence number */ 960 lc->lro_mbuf_data[lc->lro_mbuf_count].seq = 961 (((uint64_t)M_HASHTYPE_GET(mb)) << 56) | 962 (((uint64_t)mb->m_pkthdr.flowid) << 24) | 963 ((uint64_t)lc->lro_mbuf_count); 964 965 /* enter mbuf */ 966 lc->lro_mbuf_data[lc->lro_mbuf_count++].mb = mb; 967 } 968 969 /* end */ 970