1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 #include "opt_inet6.h" 39 #include "opt_tcpdebug.h" 40 41 /* For debugging we want counters and BB logging */ 42 /* #define TCP_REASS_COUNTERS 1 */ 43 /* #define TCP_REASS_LOGGING 1 */ 44 45 #include <sys/param.h> 46 #include <sys/kernel.h> 47 #include <sys/eventhandler.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/socket.h> 51 #include <sys/socketvar.h> 52 #include <sys/sysctl.h> 53 #include <sys/syslog.h> 54 #include <sys/systm.h> 55 56 #include <vm/uma.h> 57 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/route.h> 61 #include <net/vnet.h> 62 63 #include <netinet/in.h> 64 #include <netinet/in_pcb.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/in_var.h> 67 #include <netinet/ip.h> 68 #include <netinet/ip_var.h> 69 #include <netinet/ip_options.h> 70 #include <netinet/ip6.h> 71 #include <netinet6/in6_pcb.h> 72 #include <netinet6/ip6_var.h> 73 #include <netinet6/nd6.h> 74 #include <netinet/tcp.h> 75 #include <netinet/tcp_fsm.h> 76 #include <netinet/tcp_seq.h> 77 #include <netinet/tcp_timer.h> 78 #include <netinet/tcp_var.h> 79 #ifdef TCP_REASS_LOGGING 80 #include <netinet/tcp_log_buf.h> 81 #include <netinet/tcp_hpts.h> 82 #endif 83 #include <netinet/tcpip.h> 84 #ifdef TCPDEBUG 85 #include <netinet/tcp_debug.h> 86 #endif /* TCPDEBUG */ 87 88 #define TCP_R_LOG_ADD 1 89 #define TCP_R_LOG_LIMIT_REACHED 2 90 #define TCP_R_LOG_APPEND 3 91 #define TCP_R_LOG_PREPEND 4 92 #define TCP_R_LOG_REPLACE 5 93 #define TCP_R_LOG_MERGE_INTO 6 94 #define TCP_R_LOG_NEW_ENTRY 7 95 #define TCP_R_LOG_READ 8 96 #define TCP_R_LOG_ZERO 9 97 #define TCP_R_LOG_DUMP 10 98 #define TCP_R_LOG_TRIM 11 99 100 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, 101 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 102 "TCP Segment Reassembly Queue"); 103 104 static SYSCTL_NODE(_net_inet_tcp_reass, OID_AUTO, stats, 105 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 106 "TCP Segment Reassembly stats"); 107 108 static int tcp_reass_maxseg = 0; 109 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN, 110 &tcp_reass_maxseg, 0, 111 "Global maximum number of TCP Segments in Reassembly Queue"); 112 113 static uma_zone_t tcp_reass_zone; 114 SYSCTL_UMA_CUR(_net_inet_tcp_reass, OID_AUTO, cursegments, 0, 115 &tcp_reass_zone, 116 "Global number of TCP Segments currently in Reassembly Queue"); 117 118 static u_int tcp_reass_maxqueuelen = 100; 119 SYSCTL_UINT(_net_inet_tcp_reass, OID_AUTO, maxqueuelen, CTLFLAG_RWTUN, 120 &tcp_reass_maxqueuelen, 0, 121 "Maximum number of TCP Segments per Reassembly Queue"); 122 123 static int tcp_new_limits = 0; 124 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, new_limit, CTLFLAG_RWTUN, 125 &tcp_new_limits, 0, 126 "Do we use the new limit method we are discussing?"); 127 128 static u_int tcp_reass_queue_guard = 16; 129 SYSCTL_UINT(_net_inet_tcp_reass, OID_AUTO, queueguard, CTLFLAG_RWTUN, 130 &tcp_reass_queue_guard, 16, 131 "Number of TCP Segments in Reassembly Queue where we flip over to guard mode"); 132 133 #ifdef TCP_REASS_COUNTERS 134 135 counter_u64_t reass_entry; 136 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, entry, CTLFLAG_RD, 137 &reass_entry, "A segment entered reassembly "); 138 139 counter_u64_t reass_path1; 140 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path1, CTLFLAG_RD, 141 &reass_path1, "Took path 1"); 142 143 counter_u64_t reass_path2; 144 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path2, CTLFLAG_RD, 145 &reass_path2, "Took path 2"); 146 147 counter_u64_t reass_path3; 148 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path3, CTLFLAG_RD, 149 &reass_path3, "Took path 3"); 150 151 counter_u64_t reass_path4; 152 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path4, CTLFLAG_RD, 153 &reass_path4, "Took path 4"); 154 155 counter_u64_t reass_path5; 156 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path5, CTLFLAG_RD, 157 &reass_path5, "Took path 5"); 158 159 counter_u64_t reass_path6; 160 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path6, CTLFLAG_RD, 161 &reass_path6, "Took path 6"); 162 163 counter_u64_t reass_path7; 164 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path7, CTLFLAG_RD, 165 &reass_path7, "Took path 7"); 166 167 counter_u64_t reass_fullwalk; 168 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, fullwalk, CTLFLAG_RD, 169 &reass_fullwalk, "Took a full walk "); 170 171 counter_u64_t reass_nospace; 172 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, nospace, CTLFLAG_RD, 173 &reass_nospace, "Had no mbuf capacity "); 174 175 counter_u64_t merge_fwd; 176 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, merge_fwd, CTLFLAG_RD, 177 &merge_fwd, "Ran merge fwd"); 178 179 counter_u64_t merge_into; 180 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, merge_into, CTLFLAG_RD, 181 &merge_into, "Ran merge into"); 182 183 counter_u64_t tcp_zero_input; 184 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, zero_input, CTLFLAG_RD, 185 &tcp_zero_input, "The reassembly buffer saw a zero len segment etc"); 186 187 #endif 188 189 /* Initialize TCP reassembly queue */ 190 static void 191 tcp_reass_zone_change(void *tag) 192 { 193 194 /* Set the zone limit and read back the effective value. */ 195 tcp_reass_maxseg = nmbclusters / 16; 196 tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone, 197 tcp_reass_maxseg); 198 } 199 200 #ifdef TCP_REASS_LOGGING 201 202 static void 203 tcp_log_reassm(struct tcpcb *tp, struct tseg_qent *q, struct tseg_qent *p, 204 tcp_seq seq, int len, uint8_t action, int instance) 205 { 206 struct socket *so = tptosocket(tp); 207 uint32_t cts; 208 struct timeval tv; 209 210 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 211 union tcp_log_stackspecific log; 212 213 memset(&log, 0, sizeof(log)); 214 cts = tcp_get_usecs(&tv); 215 log.u_bbr.flex1 = seq; 216 log.u_bbr.cur_del_rate = (uint64_t)q; 217 log.u_bbr.delRate = (uint64_t)p; 218 if (q != NULL) { 219 log.u_bbr.flex2 = q->tqe_start; 220 log.u_bbr.flex3 = q->tqe_len; 221 log.u_bbr.flex4 = q->tqe_mbuf_cnt; 222 log.u_bbr.hptsi_gain = q->tqe_flags; 223 } 224 if (p != NULL) { 225 log.u_bbr.flex5 = p->tqe_start; 226 log.u_bbr.pkts_out = p->tqe_len; 227 log.u_bbr.epoch = p->tqe_mbuf_cnt; 228 log.u_bbr.cwnd_gain = p->tqe_flags; 229 } 230 log.u_bbr.flex6 = tp->t_segqmbuflen; 231 log.u_bbr.flex7 = instance; 232 log.u_bbr.flex8 = action; 233 log.u_bbr.timeStamp = cts; 234 TCP_LOG_EVENTP(tp, NULL, &so->so_rcv, &so->so_snd, 235 TCP_LOG_REASS, 0, 236 len, &log, false, &tv); 237 } 238 } 239 240 static void 241 tcp_reass_log_dump(struct tcpcb *tp) 242 { 243 struct tseg_qent *q; 244 245 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 246 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) { 247 tcp_log_reassm(tp, q, NULL, q->tqe_start, q->tqe_len, TCP_R_LOG_DUMP, 0); 248 } 249 }; 250 } 251 252 static void 253 tcp_reass_log_new_in(struct tcpcb *tp, tcp_seq seq, int len, struct mbuf *m, 254 int logval, struct tseg_qent *q) 255 { 256 int cnt; 257 struct mbuf *t; 258 259 cnt = 0; 260 t = m; 261 while (t) { 262 cnt += t->m_len; 263 t = t->m_next; 264 } 265 tcp_log_reassm(tp, q, NULL, seq, len, logval, cnt); 266 } 267 268 #endif 269 270 void 271 tcp_reass_global_init(void) 272 { 273 274 tcp_reass_maxseg = nmbclusters / 16; 275 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", 276 &tcp_reass_maxseg); 277 tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent), 278 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 279 /* Set the zone limit and read back the effective value. */ 280 tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone, 281 tcp_reass_maxseg); 282 #ifdef TCP_REASS_COUNTERS 283 reass_path1 = counter_u64_alloc(M_WAITOK); 284 reass_path2 = counter_u64_alloc(M_WAITOK); 285 reass_path3 = counter_u64_alloc(M_WAITOK); 286 reass_path4 = counter_u64_alloc(M_WAITOK); 287 reass_path5 = counter_u64_alloc(M_WAITOK); 288 reass_path6 = counter_u64_alloc(M_WAITOK); 289 reass_path7 = counter_u64_alloc(M_WAITOK); 290 reass_fullwalk = counter_u64_alloc(M_WAITOK); 291 reass_nospace = counter_u64_alloc(M_WAITOK); 292 reass_entry = counter_u64_alloc(M_WAITOK); 293 merge_fwd = counter_u64_alloc(M_WAITOK); 294 merge_into = counter_u64_alloc(M_WAITOK); 295 tcp_zero_input = counter_u64_alloc(M_WAITOK); 296 #endif 297 EVENTHANDLER_REGISTER(nmbclusters_change, 298 tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY); 299 300 } 301 302 void 303 tcp_reass_flush(struct tcpcb *tp) 304 { 305 struct tseg_qent *qe; 306 307 INP_WLOCK_ASSERT(tptoinpcb(tp)); 308 309 while ((qe = TAILQ_FIRST(&tp->t_segq)) != NULL) { 310 TAILQ_REMOVE(&tp->t_segq, qe, tqe_q); 311 m_freem(qe->tqe_m); 312 uma_zfree(tcp_reass_zone, qe); 313 tp->t_segqlen--; 314 } 315 tp->t_segqmbuflen = 0; 316 KASSERT((tp->t_segqlen == 0), 317 ("TCP reass queue %p segment count is %d instead of 0 after flush.", 318 tp, tp->t_segqlen)); 319 } 320 321 static void 322 tcp_reass_append(struct tcpcb *tp, struct tseg_qent *last, 323 struct mbuf *m, struct tcphdr *th, int tlen, 324 struct mbuf *mlast, int lenofoh) 325 { 326 327 #ifdef TCP_REASS_LOGGING 328 tcp_log_reassm(tp, last, NULL, th->th_seq, tlen, TCP_R_LOG_APPEND, 0); 329 #endif 330 last->tqe_len += tlen; 331 last->tqe_m->m_pkthdr.len += tlen; 332 /* Preserve the FIN bit if its there */ 333 last->tqe_flags |= (tcp_get_flags(th) & TH_FIN); 334 last->tqe_last->m_next = m; 335 last->tqe_last = mlast; 336 last->tqe_mbuf_cnt += lenofoh; 337 tp->t_rcvoopack++; 338 TCPSTAT_INC(tcps_rcvoopack); 339 TCPSTAT_ADD(tcps_rcvoobyte, tlen); 340 #ifdef TCP_REASS_LOGGING 341 tcp_reass_log_new_in(tp, last->tqe_start, lenofoh, last->tqe_m, 342 TCP_R_LOG_APPEND, 343 last); 344 #endif 345 } 346 347 static void 348 tcp_reass_prepend(struct tcpcb *tp, struct tseg_qent *first, struct mbuf *m, struct tcphdr *th, 349 int tlen, struct mbuf *mlast, int lenofoh) 350 { 351 int i; 352 353 #ifdef TCP_REASS_LOGGING 354 tcp_log_reassm(tp, first, NULL, th->th_seq, tlen, TCP_R_LOG_PREPEND, 0); 355 #endif 356 if (SEQ_GT((th->th_seq + tlen), first->tqe_start)) { 357 /* The new data overlaps into the old */ 358 i = (th->th_seq + tlen) - first->tqe_start; 359 #ifdef TCP_REASS_LOGGING 360 tcp_log_reassm(tp, first, NULL, 0, i, TCP_R_LOG_TRIM, 1); 361 #endif 362 m_adj(first->tqe_m, i); 363 first->tqe_len -= i; 364 first->tqe_start += i; 365 } 366 /* Ok now setup our chain to point to the old first */ 367 mlast->m_next = first->tqe_m; 368 first->tqe_m = m; 369 first->tqe_len += tlen; 370 first->tqe_start = th->th_seq; 371 first->tqe_m->m_pkthdr.len = first->tqe_len; 372 first->tqe_mbuf_cnt += lenofoh; 373 tp->t_rcvoopack++; 374 TCPSTAT_INC(tcps_rcvoopack); 375 TCPSTAT_ADD(tcps_rcvoobyte, tlen); 376 #ifdef TCP_REASS_LOGGING 377 tcp_reass_log_new_in(tp, first->tqe_start, lenofoh, first->tqe_m, 378 TCP_R_LOG_PREPEND, 379 first); 380 #endif 381 } 382 383 static void 384 tcp_reass_replace(struct tcpcb *tp, struct tseg_qent *q, struct mbuf *m, 385 tcp_seq seq, int len, struct mbuf *mlast, int mbufoh, uint16_t flags) 386 { 387 /* 388 * Free the data in q, and replace 389 * it with the new segment. 390 */ 391 int len_dif; 392 393 #ifdef TCP_REASS_LOGGING 394 tcp_log_reassm(tp, q, NULL, seq, len, TCP_R_LOG_REPLACE, 0); 395 #endif 396 m_freem(q->tqe_m); 397 KASSERT(tp->t_segqmbuflen >= q->tqe_mbuf_cnt, 398 ("Tp:%p seg queue goes negative", tp)); 399 tp->t_segqmbuflen -= q->tqe_mbuf_cnt; 400 q->tqe_mbuf_cnt = mbufoh; 401 q->tqe_m = m; 402 q->tqe_last = mlast; 403 q->tqe_start = seq; 404 if (len > q->tqe_len) 405 len_dif = len - q->tqe_len; 406 else 407 len_dif = 0; 408 tp->t_rcvoopack++; 409 TCPSTAT_INC(tcps_rcvoopack); 410 TCPSTAT_ADD(tcps_rcvoobyte, len_dif); 411 q->tqe_len = len; 412 q->tqe_flags = (flags & TH_FIN); 413 q->tqe_m->m_pkthdr.len = q->tqe_len; 414 tp->t_segqmbuflen += mbufoh; 415 416 } 417 418 static void 419 tcp_reass_merge_into(struct tcpcb *tp, struct tseg_qent *ent, 420 struct tseg_qent *q) 421 { 422 /* 423 * Merge q into ent and free q from the list. 424 */ 425 #ifdef TCP_REASS_LOGGING 426 tcp_log_reassm(tp, q, ent, 0, 0, TCP_R_LOG_MERGE_INTO, 0); 427 #endif 428 #ifdef TCP_REASS_COUNTERS 429 counter_u64_add(merge_into, 1); 430 #endif 431 ent->tqe_last->m_next = q->tqe_m; 432 ent->tqe_last = q->tqe_last; 433 ent->tqe_len += q->tqe_len; 434 ent->tqe_mbuf_cnt += q->tqe_mbuf_cnt; 435 ent->tqe_m->m_pkthdr.len += q->tqe_len; 436 ent->tqe_flags |= (q->tqe_flags & TH_FIN); 437 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 438 uma_zfree(tcp_reass_zone, q); 439 tp->t_segqlen--; 440 441 } 442 443 static void 444 tcp_reass_merge_forward(struct tcpcb *tp, struct tseg_qent *ent) 445 { 446 struct tseg_qent *q, *qtmp; 447 int i; 448 tcp_seq max; 449 /* 450 * Given an entry merge forward anyplace 451 * that ent overlaps forward. 452 */ 453 454 max = ent->tqe_start + ent->tqe_len; 455 q = TAILQ_NEXT(ent, tqe_q); 456 if (q == NULL) { 457 /* Nothing left */ 458 return; 459 } 460 TAILQ_FOREACH_FROM_SAFE(q, &tp->t_segq, tqe_q, qtmp) { 461 if (SEQ_GT(q->tqe_start, max)) { 462 /* Beyond q */ 463 break; 464 } 465 /* We have some or all that are overlapping */ 466 if (SEQ_GEQ(max, (q->tqe_start + q->tqe_len))) { 467 /* It consumes it all */ 468 tp->t_segqmbuflen -= q->tqe_mbuf_cnt; 469 m_freem(q->tqe_m); 470 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 471 uma_zfree(tcp_reass_zone, q); 472 tp->t_segqlen--; 473 continue; 474 } 475 /* 476 * Trim the q entry to dovetail to this one 477 * and then merge q into ent updating max 478 * in the process. 479 */ 480 i = max - q->tqe_start; 481 #ifdef TCP_REASS_LOGGING 482 tcp_log_reassm(tp, q, NULL, 0, i, TCP_R_LOG_TRIM, 2); 483 #endif 484 m_adj(q->tqe_m, i); 485 q->tqe_len -= i; 486 q->tqe_start += i; 487 tcp_reass_merge_into(tp, ent, q); 488 max = ent->tqe_start + ent->tqe_len; 489 } 490 #ifdef TCP_REASS_COUNTERS 491 counter_u64_add(merge_fwd, 1); 492 #endif 493 } 494 495 static int 496 tcp_reass_overhead_of_chain(struct mbuf *m, struct mbuf **mlast) 497 { 498 int len = MSIZE; 499 500 if (m->m_flags & M_EXT) 501 len += m->m_ext.ext_size; 502 while (m->m_next != NULL) { 503 m = m->m_next; 504 len += MSIZE; 505 if (m->m_flags & M_EXT) 506 len += m->m_ext.ext_size; 507 } 508 *mlast = m; 509 return (len); 510 } 511 512 /* 513 * NOTE!!! the new tcp-reassembly code *must not* use 514 * m_adj() with a negative index. That alters the chain 515 * of mbufs (by possibly chopping trailing mbufs). At 516 * the front of tcp_reass we count the mbuf overhead 517 * and setup the tail pointer. If we use m_adj(m, -5) 518 * we could corrupt the tail pointer. Currently the 519 * code only uses m_adj(m, postive-num). If this 520 * changes appropriate changes to update mlast would 521 * be needed. 522 */ 523 int 524 tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start, 525 int *tlenp, struct mbuf *m) 526 { 527 struct tseg_qent *q, *last, *first; 528 struct tseg_qent *p = NULL; 529 struct tseg_qent *nq = NULL; 530 struct tseg_qent *te = NULL; 531 struct mbuf *mlast = NULL; 532 struct inpcb *inp = tptoinpcb(tp); 533 struct socket *so = tptosocket(tp); 534 struct sockbuf *sb = &so->so_rcv; 535 char *s = NULL; 536 int flags, i, lenofoh; 537 538 INP_WLOCK_ASSERT(inp); 539 /* 540 * XXX: tcp_reass() is rather inefficient with its data structures 541 * and should be rewritten (see NetBSD for optimizations). 542 */ 543 544 KASSERT(th == NULL || (seq_start != NULL && tlenp != NULL), 545 ("tcp_reass called with illegal parameter combination " 546 "(tp=%p, th=%p, seq_start=%p, tlenp=%p, m=%p)", 547 tp, th, seq_start, tlenp, m)); 548 /* 549 * Call with th==NULL after become established to 550 * force pre-ESTABLISHED data up to user socket. 551 */ 552 if (th == NULL) 553 goto present; 554 KASSERT(SEQ_GEQ(th->th_seq, tp->rcv_nxt), 555 ("Attempt to add old entry to reassembly queue (th=%p, tp=%p)", 556 th, tp)); 557 #ifdef TCP_REASS_LOGGING 558 tcp_reass_log_new_in(tp, th->th_seq, *tlenp, m, TCP_R_LOG_ADD, NULL); 559 #endif 560 #ifdef TCP_REASS_COUNTERS 561 counter_u64_add(reass_entry, 1); 562 #endif 563 /* 564 * Check for zero length data. 565 */ 566 if ((*tlenp == 0) && ((tcp_get_flags(th) & TH_FIN) == 0)) { 567 /* 568 * A zero length segment does no 569 * one any good. We could check 570 * the rcv_nxt <-> rcv_wnd but thats 571 * already done for us by the caller. 572 */ 573 strip_fin: 574 #ifdef TCP_REASS_COUNTERS 575 counter_u64_add(tcp_zero_input, 1); 576 #endif 577 m_freem(m); 578 #ifdef TCP_REASS_LOGGING 579 tcp_reass_log_dump(tp); 580 #endif 581 return (0); 582 } else if ((*tlenp == 0) && 583 (tcp_get_flags(th) & TH_FIN) && 584 !TCPS_HAVEESTABLISHED(tp->t_state)) { 585 /* 586 * We have not established, and we 587 * have a FIN and no data. Lets treat 588 * this as the same as if the FIN were 589 * not present. We don't want to save 590 * the FIN bit in a reassembly buffer 591 * we want to get established first before 592 * we do that (the peer will retransmit). 593 */ 594 goto strip_fin; 595 } 596 /* 597 * Will it fit? 598 */ 599 lenofoh = tcp_reass_overhead_of_chain(m, &mlast); 600 if ((th->th_seq != tp->rcv_nxt || !TCPS_HAVEESTABLISHED(tp->t_state)) && 601 (sb->sb_mbcnt + tp->t_segqmbuflen + lenofoh) > sb->sb_mbmax) { 602 /* No room */ 603 TCPSTAT_INC(tcps_rcvreassfull); 604 #ifdef TCP_REASS_COUNTERS 605 counter_u64_add(reass_nospace, 1); 606 #endif 607 #ifdef TCP_REASS_LOGGING 608 tcp_log_reassm(tp, NULL, NULL, th->th_seq, lenofoh, TCP_R_LOG_LIMIT_REACHED, 0); 609 #endif 610 if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) { 611 log(LOG_DEBUG, "%s; %s: mbuf count limit reached, " 612 "segment dropped\n", s, __func__); 613 free(s, M_TCPLOG); 614 } 615 m_freem(m); 616 *tlenp = 0; 617 #ifdef TCP_REASS_LOGGING 618 tcp_reass_log_dump(tp); 619 #endif 620 return (0); 621 } 622 /* 623 * First lets deal with two common cases, the 624 * segment appends to the back of our collected 625 * segments. Or the segment is the next in line. 626 */ 627 last = TAILQ_LAST_FAST(&tp->t_segq, tseg_qent, tqe_q); 628 if (last != NULL) { 629 if ((tcp_get_flags(th) & TH_FIN) && 630 SEQ_LT((th->th_seq + *tlenp), (last->tqe_start + last->tqe_len))) { 631 /* 632 * Someone is trying to game us, dump 633 * the segment. 634 */ 635 *tlenp = 0; 636 m_freem(m); 637 return (0); 638 } 639 if ((SEQ_GEQ(th->th_seq, last->tqe_start)) && 640 (SEQ_GEQ((last->tqe_start + last->tqe_len), th->th_seq))) { 641 /* Common case, trailing segment is added */ 642 /** 643 * +--last 644 * v 645 * reassembly buffer |---| |---| |---| 646 * new segment |---| 647 */ 648 #ifdef TCP_REASS_COUNTERS 649 counter_u64_add(reass_path1, 1); 650 #endif 651 if (SEQ_GT((last->tqe_start + last->tqe_len), th->th_seq)) { 652 i = (last->tqe_start + last->tqe_len) - th->th_seq; 653 if (i < *tlenp) { 654 #ifdef TCP_REASS_LOGGING 655 tcp_log_reassm(tp, last, NULL, 0, i, TCP_R_LOG_TRIM, 3); 656 th->th_seq += i; 657 #endif 658 m_adj(m, i); 659 *tlenp -= i; 660 } else { 661 /* Complete overlap */ 662 TCPSTAT_INC(tcps_rcvduppack); 663 TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp); 664 m_freem(m); 665 *tlenp = last->tqe_len; 666 *seq_start = last->tqe_start; 667 return (0); 668 } 669 } 670 if (last->tqe_flags & TH_FIN) { 671 /* 672 * We have data after the FIN on the last? 673 */ 674 *tlenp = 0; 675 m_freem(m); 676 return(0); 677 } 678 tcp_reass_append(tp, last, m, th, *tlenp, mlast, lenofoh); 679 tp->t_segqmbuflen += lenofoh; 680 *seq_start = last->tqe_start; 681 *tlenp = last->tqe_len; 682 return (0); 683 } else if (SEQ_GT(th->th_seq, (last->tqe_start + last->tqe_len))) { 684 /* 685 * Second common case, we missed 686 * another one and have something more 687 * for the end. 688 */ 689 /** 690 * +--last 691 * v 692 * reassembly buffer |---| |---| |---| 693 * new segment |---| 694 */ 695 if (last->tqe_flags & TH_FIN) { 696 /* 697 * We have data after the FIN on the last? 698 */ 699 *tlenp = 0; 700 m_freem(m); 701 return(0); 702 } 703 #ifdef TCP_REASS_COUNTERS 704 counter_u64_add(reass_path2, 1); 705 #endif 706 p = last; 707 goto new_entry; 708 } 709 } else { 710 /* First segment (it's NULL). */ 711 goto new_entry; 712 } 713 first = TAILQ_FIRST(&tp->t_segq); 714 if (SEQ_LT(th->th_seq, first->tqe_start) && 715 SEQ_GEQ((th->th_seq + *tlenp),first->tqe_start) && 716 SEQ_LT((th->th_seq + *tlenp), (first->tqe_start + first->tqe_len))) { 717 /* 718 * The head of the queue is prepended by this and 719 * it may be the one I want most. 720 */ 721 /** 722 * first-------+ 723 * v 724 * rea: |---| |---| |---| 725 * new: |---| 726 * Note the case we do not deal with here is: 727 * rea= |---| |---| |---| 728 * new= |----| 729 * Due to the fact that it could be 730 * new |--------------------| 731 * And we might need to merge forward. 732 */ 733 #ifdef INVARIANTS 734 struct mbuf *firstmbuf; 735 #endif 736 737 #ifdef TCP_REASS_COUNTERS 738 counter_u64_add(reass_path3, 1); 739 #endif 740 if (SEQ_LT(th->th_seq, tp->rcv_nxt)) { 741 /* 742 * The resend was even before 743 * what we have. We need to trim it. 744 * Note TSNH (it should be trimmed 745 * before the call to tcp_reass()). 746 */ 747 #ifdef INVARIANTS 748 panic("th->th_seq:%u rcv_nxt:%u tp:%p not pre-trimmed", 749 th->th_seq, tp->rcv_nxt, tp); 750 #else 751 i = tp->rcv_nxt - th->th_seq; 752 #ifdef TCP_REASS_LOGGING 753 tcp_log_reassm(tp, first, NULL, 0, i, TCP_R_LOG_TRIM, 4); 754 #endif 755 m_adj(m, i); 756 th->th_seq += i; 757 *tlenp -= i; 758 #endif 759 } 760 #ifdef INVARIANTS 761 firstmbuf = first->tqe_m; 762 #endif 763 tcp_reass_prepend(tp, first, m, th, *tlenp, mlast, lenofoh); 764 #ifdef INVARIANTS 765 if (firstmbuf == first->tqe_m) { 766 panic("First stayed same m:%p foobar:%p first->tqe_m:%p tp:%p first:%p", 767 m, firstmbuf, first->tqe_m, tp, first); 768 } else if (first->tqe_m != m) { 769 panic("First did not change to m:%p foobar:%p first->tqe_m:%p tp:%p first:%p", 770 m, firstmbuf, first->tqe_m, tp, first); 771 } 772 #endif 773 tp->t_segqmbuflen += lenofoh; 774 *seq_start = first->tqe_start; 775 *tlenp = first->tqe_len; 776 goto present; 777 } else if (SEQ_LT((th->th_seq + *tlenp), first->tqe_start)) { 778 /* New segment is before our earliest segment. */ 779 /** 780 * first---->+ 781 * v 782 * rea= |---| .... 783 * new" |---| 784 * 785 */ 786 goto new_entry; 787 } 788 /* 789 * Find a segment which begins after this one does. 790 */ 791 #ifdef TCP_REASS_COUNTERS 792 counter_u64_add(reass_fullwalk, 1); 793 #endif 794 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) { 795 if (SEQ_GT(q->tqe_start, th->th_seq)) 796 break; 797 } 798 p = TAILQ_PREV(q, tsegqe_head, tqe_q); 799 /** 800 * Now is this fit just in-between only? 801 * i.e.: 802 * p---+ +----q 803 * v v 804 * res= |--| |--| |--| 805 * nee |-| 806 */ 807 if (SEQ_LT((th->th_seq + *tlenp), q->tqe_start) && 808 ((p == NULL) || (SEQ_GT(th->th_seq, (p->tqe_start + p->tqe_len))))) { 809 /* Yep no overlap */ 810 goto new_entry; 811 } 812 /** 813 * If we reach here we have some (possibly all) overlap 814 * such as: 815 * res= |--| |--| |--| 816 * new= |----| 817 * or new= |-----------------| 818 * or new= |--------| 819 * or new= |---| 820 * or new= |-----------| 821 */ 822 if ((p != NULL) && 823 (SEQ_LEQ(th->th_seq, (p->tqe_start + p->tqe_len)))) { 824 /* conversion to int (in i) handles seq wraparound */ 825 826 #ifdef TCP_REASS_COUNTERS 827 counter_u64_add(reass_path4, 1); 828 #endif 829 i = p->tqe_start + p->tqe_len - th->th_seq; 830 if (i >= 0) { 831 if (i >= *tlenp) { 832 /** 833 * prev seg---->+ 834 * v 835 * reassembly buffer |---| 836 * new segment |-| 837 */ 838 TCPSTAT_INC(tcps_rcvduppack); 839 TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp); 840 *tlenp = p->tqe_len; 841 *seq_start = p->tqe_start; 842 m_freem(m); 843 /* 844 * Try to present any queued data 845 * at the left window edge to the user. 846 * This is needed after the 3-WHS 847 * completes. Note this probably 848 * will not work and we will return. 849 */ 850 return (0); 851 } 852 if (i > 0) { 853 /** 854 * prev seg---->+ 855 * v 856 * reassembly buffer |---| 857 * new segment |-----| 858 */ 859 #ifdef TCP_REASS_COUNTERS 860 counter_u64_add(reass_path5, 1); 861 #endif 862 #ifdef TCP_REASS_LOGGING 863 tcp_log_reassm(tp, p, NULL, 0, i, TCP_R_LOG_TRIM, 5); 864 #endif 865 m_adj(m, i); 866 *tlenp -= i; 867 th->th_seq += i; 868 } 869 } 870 if (th->th_seq == (p->tqe_start + p->tqe_len)) { 871 /* 872 * If dovetails in with this one 873 * append it. 874 */ 875 /** 876 * prev seg---->+ 877 * v 878 * reassembly buffer |--| |---| 879 * new segment |--| 880 * (note: it was trimmed above if it overlapped) 881 */ 882 tcp_reass_append(tp, p, m, th, *tlenp, mlast, lenofoh); 883 tp->t_segqmbuflen += lenofoh; 884 } else { 885 #ifdef INVARIANTS 886 panic("Impossible cut th_seq:%u p->seq:%u(%d) p:%p tp:%p", 887 th->th_seq, p->tqe_start, p->tqe_len, 888 p, tp); 889 #endif 890 *tlenp = 0; 891 m_freem(m); 892 return (0); 893 } 894 q = p; 895 } else { 896 /* 897 * The new data runs over the 898 * top of previously sack'd data (in q). 899 * It may be partially overlapping, or 900 * it may overlap the entire segment. 901 */ 902 #ifdef TCP_REASS_COUNTERS 903 counter_u64_add(reass_path6, 1); 904 #endif 905 if (SEQ_GEQ((th->th_seq + *tlenp), (q->tqe_start + q->tqe_len))) { 906 /* It consumes it all */ 907 /** 908 * next seg---->+ 909 * v 910 * reassembly buffer |--| |---| 911 * new segment |----------| 912 */ 913 #ifdef TCP_REASS_COUNTERS 914 counter_u64_add(reass_path7, 1); 915 #endif 916 tcp_reass_replace(tp, q, m, th->th_seq, *tlenp, mlast, lenofoh, tcp_get_flags(th)); 917 } else { 918 /* 919 * We just need to prepend the data 920 * to this. It does not overrun 921 * the end. 922 */ 923 /** 924 * next seg---->+ 925 * v 926 * reassembly buffer |--| |---| 927 * new segment |----------| 928 */ 929 tcp_reass_prepend(tp, q, m, th, *tlenp, mlast, lenofoh); 930 tp->t_segqmbuflen += lenofoh; 931 } 932 } 933 /* Now does it go further than that? */ 934 tcp_reass_merge_forward(tp, q); 935 *seq_start = q->tqe_start; 936 *tlenp = q->tqe_len; 937 goto present; 938 939 /* 940 * When we reach here we can't combine it 941 * with any existing segment. 942 * 943 * Limit the number of segments that can be queued to reduce the 944 * potential for mbuf exhaustion. For best performance, we want to be 945 * able to queue a full window's worth of segments. The size of the 946 * socket receive buffer determines our advertised window and grows 947 * automatically when socket buffer autotuning is enabled. Use it as the 948 * basis for our queue limit. 949 * 950 * However, allow the user to specify a ceiling for the number of 951 * segments in each queue. 952 * 953 * Always let the missing segment through which caused this queue. 954 * NB: Access to the socket buffer is left intentionally unlocked as we 955 * can tolerate stale information here. 956 * 957 * XXXLAS: Using sbspace(so->so_rcv) instead of so->so_rcv.sb_hiwat 958 * should work but causes packets to be dropped when they shouldn't. 959 * Investigate why and re-evaluate the below limit after the behaviour 960 * is understood. 961 */ 962 new_entry: 963 if (th->th_seq == tp->rcv_nxt && TCPS_HAVEESTABLISHED(tp->t_state)) { 964 tp->rcv_nxt += *tlenp; 965 flags = tcp_get_flags(th) & TH_FIN; 966 TCPSTAT_INC(tcps_rcvoopack); 967 TCPSTAT_ADD(tcps_rcvoobyte, *tlenp); 968 SOCKBUF_LOCK(&so->so_rcv); 969 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 970 m_freem(m); 971 } else { 972 sbappendstream_locked(&so->so_rcv, m, 0); 973 } 974 tp->t_flags |= TF_WAKESOR; 975 return (flags); 976 } 977 if (tcp_new_limits) { 978 if ((tp->t_segqlen > tcp_reass_queue_guard) && 979 (*tlenp < MSIZE)) { 980 /* 981 * This is really a lie, we are not full but 982 * are getting a segment that is above 983 * guard threshold. If it is and its below 984 * a mbuf size (256) we drop it if it 985 * can't fill in some place. 986 */ 987 TCPSTAT_INC(tcps_rcvreassfull); 988 *tlenp = 0; 989 if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) { 990 log(LOG_DEBUG, "%s; %s: queue limit reached, " 991 "segment dropped\n", s, __func__); 992 free(s, M_TCPLOG); 993 } 994 m_freem(m); 995 #ifdef TCP_REASS_LOGGING 996 tcp_reass_log_dump(tp); 997 #endif 998 return (0); 999 } 1000 } else { 1001 if (tp->t_segqlen >= min((so->so_rcv.sb_hiwat / tp->t_maxseg) + 1, 1002 tcp_reass_maxqueuelen)) { 1003 TCPSTAT_INC(tcps_rcvreassfull); 1004 *tlenp = 0; 1005 if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) { 1006 log(LOG_DEBUG, "%s; %s: queue limit reached, " 1007 "segment dropped\n", s, __func__); 1008 free(s, M_TCPLOG); 1009 } 1010 m_freem(m); 1011 #ifdef TCP_REASS_LOGGING 1012 tcp_reass_log_dump(tp); 1013 #endif 1014 return (0); 1015 } 1016 } 1017 /* 1018 * Allocate a new queue entry. If we can't, or hit the zone limit 1019 * just drop the pkt. 1020 */ 1021 te = uma_zalloc(tcp_reass_zone, M_NOWAIT); 1022 if (te == NULL) { 1023 TCPSTAT_INC(tcps_rcvmemdrop); 1024 m_freem(m); 1025 *tlenp = 0; 1026 if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) { 1027 log(LOG_DEBUG, "%s; %s: global zone limit " 1028 "reached, segment dropped\n", s, __func__); 1029 free(s, M_TCPLOG); 1030 } 1031 return (0); 1032 } 1033 tp->t_segqlen++; 1034 tp->t_rcvoopack++; 1035 TCPSTAT_INC(tcps_rcvoopack); 1036 TCPSTAT_ADD(tcps_rcvoobyte, *tlenp); 1037 /* Insert the new segment queue entry into place. */ 1038 te->tqe_m = m; 1039 te->tqe_flags = tcp_get_flags(th); 1040 te->tqe_len = *tlenp; 1041 te->tqe_start = th->th_seq; 1042 te->tqe_last = mlast; 1043 te->tqe_mbuf_cnt = lenofoh; 1044 tp->t_segqmbuflen += te->tqe_mbuf_cnt; 1045 if (p == NULL) { 1046 TAILQ_INSERT_HEAD(&tp->t_segq, te, tqe_q); 1047 } else { 1048 TAILQ_INSERT_AFTER(&tp->t_segq, p, te, tqe_q); 1049 } 1050 #ifdef TCP_REASS_LOGGING 1051 tcp_reass_log_new_in(tp, th->th_seq, *tlenp, m, TCP_R_LOG_NEW_ENTRY, te); 1052 #endif 1053 present: 1054 /* 1055 * Present data to user, advancing rcv_nxt through 1056 * completed sequence space. 1057 */ 1058 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 1059 return (0); 1060 q = TAILQ_FIRST(&tp->t_segq); 1061 KASSERT(q == NULL || SEQ_GEQ(q->tqe_start, tp->rcv_nxt), 1062 ("Reassembly queue for %p has stale entry at head", tp)); 1063 if (!q || q->tqe_start != tp->rcv_nxt) { 1064 #ifdef TCP_REASS_LOGGING 1065 tcp_reass_log_dump(tp); 1066 #endif 1067 return (0); 1068 } 1069 SOCKBUF_LOCK(&so->so_rcv); 1070 do { 1071 tp->rcv_nxt += q->tqe_len; 1072 flags = q->tqe_flags & TH_FIN; 1073 nq = TAILQ_NEXT(q, tqe_q); 1074 TAILQ_REMOVE(&tp->t_segq, q, tqe_q); 1075 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1076 m_freem(q->tqe_m); 1077 } else { 1078 #ifdef TCP_REASS_LOGGING 1079 tcp_reass_log_new_in(tp, q->tqe_start, q->tqe_len, q->tqe_m, TCP_R_LOG_READ, q); 1080 if (th != NULL) { 1081 tcp_log_reassm(tp, q, NULL, th->th_seq, *tlenp, TCP_R_LOG_READ, 1); 1082 } else { 1083 tcp_log_reassm(tp, q, NULL, 0, 0, TCP_R_LOG_READ, 1); 1084 } 1085 #endif 1086 sbappendstream_locked(&so->so_rcv, q->tqe_m, 0); 1087 } 1088 #ifdef TCP_REASS_LOGGING 1089 if (th != NULL) { 1090 tcp_log_reassm(tp, q, NULL, th->th_seq, *tlenp, TCP_R_LOG_READ, 2); 1091 } else { 1092 tcp_log_reassm(tp, q, NULL, 0, 0, TCP_R_LOG_READ, 2); 1093 } 1094 #endif 1095 KASSERT(tp->t_segqmbuflen >= q->tqe_mbuf_cnt, 1096 ("tp:%p seg queue goes negative", tp)); 1097 tp->t_segqmbuflen -= q->tqe_mbuf_cnt; 1098 uma_zfree(tcp_reass_zone, q); 1099 tp->t_segqlen--; 1100 q = nq; 1101 } while (q && q->tqe_start == tp->rcv_nxt); 1102 if (TAILQ_EMPTY(&tp->t_segq) && 1103 (tp->t_segqmbuflen != 0)) { 1104 #ifdef INVARIANTS 1105 panic("tp:%p segq:%p len:%d queue empty", 1106 tp, &tp->t_segq, tp->t_segqmbuflen); 1107 #else 1108 #ifdef TCP_REASS_LOGGING 1109 if (th != NULL) { 1110 tcp_log_reassm(tp, NULL, NULL, th->th_seq, *tlenp, TCP_R_LOG_ZERO, 0); 1111 } else { 1112 tcp_log_reassm(tp, NULL, NULL, 0, 0, TCP_R_LOG_ZERO, 0); 1113 } 1114 #endif 1115 tp->t_segqmbuflen = 0; 1116 #endif 1117 } 1118 #ifdef TCP_REASS_LOGGING 1119 tcp_reass_log_dump(tp); 1120 #endif 1121 tp->t_flags |= TF_WAKESOR; 1122 return (flags); 1123 } 1124