1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34
35 /* For debugging we want counters and BB logging */
36 /* #define TCP_REASS_COUNTERS 1 */
37 /* #define TCP_REASS_LOGGING 1 */
38
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/eventhandler.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/sysctl.h>
47 #include <sys/syslog.h>
48 #include <sys/systm.h>
49
50 #include <vm/uma.h>
51
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/route.h>
55 #include <net/vnet.h>
56
57 #include <netinet/in.h>
58 #include <netinet/in_pcb.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/ip_options.h>
64 #include <netinet/ip6.h>
65 #include <netinet6/in6_pcb.h>
66 #include <netinet6/ip6_var.h>
67 #include <netinet6/nd6.h>
68 #include <netinet/tcp.h>
69 #include <netinet/tcp_fsm.h>
70 #include <netinet/tcp_seq.h>
71 #include <netinet/tcp_timer.h>
72 #include <netinet/tcp_var.h>
73 #ifdef TCP_REASS_LOGGING
74 #include <netinet/tcp_log_buf.h>
75 #include <netinet/tcp_hpts.h>
76 #endif
77 #include <netinet/tcpip.h>
78
79 #define TCP_R_LOG_ADD 1
80 #define TCP_R_LOG_LIMIT_REACHED 2
81 #define TCP_R_LOG_APPEND 3
82 #define TCP_R_LOG_PREPEND 4
83 #define TCP_R_LOG_REPLACE 5
84 #define TCP_R_LOG_MERGE_INTO 6
85 #define TCP_R_LOG_NEW_ENTRY 7
86 #define TCP_R_LOG_READ 8
87 #define TCP_R_LOG_ZERO 9
88 #define TCP_R_LOG_DUMP 10
89 #define TCP_R_LOG_TRIM 11
90
91 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass,
92 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
93 "TCP Segment Reassembly Queue");
94
95 static SYSCTL_NODE(_net_inet_tcp_reass, OID_AUTO, stats,
96 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
97 "TCP Segment Reassembly stats");
98
99 static int tcp_reass_maxseg = 0;
100 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN,
101 &tcp_reass_maxseg, 0,
102 "Global maximum number of TCP Segments in Reassembly Queue");
103
104 static uma_zone_t tcp_reass_zone;
105 SYSCTL_UMA_CUR(_net_inet_tcp_reass, OID_AUTO, cursegments, 0,
106 &tcp_reass_zone,
107 "Global number of TCP Segments currently in Reassembly Queue");
108
109 static u_int tcp_reass_maxqueuelen = 100;
110 SYSCTL_UINT(_net_inet_tcp_reass, OID_AUTO, maxqueuelen, CTLFLAG_RWTUN,
111 &tcp_reass_maxqueuelen, 0,
112 "Maximum number of TCP Segments per Reassembly Queue");
113
114 static int tcp_new_limits = 0;
115 SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, new_limit, CTLFLAG_RWTUN,
116 &tcp_new_limits, 0,
117 "Do we use the new limit method we are discussing?");
118
119 static u_int tcp_reass_queue_guard = 16;
120 SYSCTL_UINT(_net_inet_tcp_reass, OID_AUTO, queueguard, CTLFLAG_RWTUN,
121 &tcp_reass_queue_guard, 16,
122 "Number of TCP Segments in Reassembly Queue where we flip over to guard mode");
123
124 #ifdef TCP_REASS_COUNTERS
125
126 counter_u64_t reass_entry;
127 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, entry, CTLFLAG_RD,
128 &reass_entry, "A segment entered reassembly ");
129
130 counter_u64_t reass_path1;
131 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path1, CTLFLAG_RD,
132 &reass_path1, "Took path 1");
133
134 counter_u64_t reass_path2;
135 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path2, CTLFLAG_RD,
136 &reass_path2, "Took path 2");
137
138 counter_u64_t reass_path3;
139 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path3, CTLFLAG_RD,
140 &reass_path3, "Took path 3");
141
142 counter_u64_t reass_path4;
143 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path4, CTLFLAG_RD,
144 &reass_path4, "Took path 4");
145
146 counter_u64_t reass_path5;
147 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path5, CTLFLAG_RD,
148 &reass_path5, "Took path 5");
149
150 counter_u64_t reass_path6;
151 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path6, CTLFLAG_RD,
152 &reass_path6, "Took path 6");
153
154 counter_u64_t reass_path7;
155 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, path7, CTLFLAG_RD,
156 &reass_path7, "Took path 7");
157
158 counter_u64_t reass_fullwalk;
159 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, fullwalk, CTLFLAG_RD,
160 &reass_fullwalk, "Took a full walk ");
161
162 counter_u64_t reass_nospace;
163 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, nospace, CTLFLAG_RD,
164 &reass_nospace, "Had no mbuf capacity ");
165
166 counter_u64_t merge_fwd;
167 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, merge_fwd, CTLFLAG_RD,
168 &merge_fwd, "Ran merge fwd");
169
170 counter_u64_t merge_into;
171 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, merge_into, CTLFLAG_RD,
172 &merge_into, "Ran merge into");
173
174 counter_u64_t tcp_zero_input;
175 SYSCTL_COUNTER_U64(_net_inet_tcp_reass_stats, OID_AUTO, zero_input, CTLFLAG_RD,
176 &tcp_zero_input, "The reassembly buffer saw a zero len segment etc");
177
178 #endif
179
180 /* Initialize TCP reassembly queue */
181 static void
tcp_reass_zone_change(void * tag)182 tcp_reass_zone_change(void *tag)
183 {
184
185 /* Set the zone limit and read back the effective value. */
186 tcp_reass_maxseg = nmbclusters / 16;
187 tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone,
188 tcp_reass_maxseg);
189 }
190
191 #ifdef TCP_REASS_LOGGING
192
193 static void
tcp_log_reassm(struct tcpcb * tp,struct tseg_qent * q,struct tseg_qent * p,tcp_seq seq,int len,uint8_t action,int instance)194 tcp_log_reassm(struct tcpcb *tp, struct tseg_qent *q, struct tseg_qent *p,
195 tcp_seq seq, int len, uint8_t action, int instance)
196 {
197 struct socket *so = tptosocket(tp);
198 uint32_t cts;
199 struct timeval tv;
200
201 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
202 union tcp_log_stackspecific log;
203
204 memset(&log, 0, sizeof(log));
205 cts = tcp_get_usecs(&tv);
206 log.u_bbr.flex1 = seq;
207 log.u_bbr.cur_del_rate = (uint64_t)q;
208 log.u_bbr.delRate = (uint64_t)p;
209 if (q != NULL) {
210 log.u_bbr.flex2 = q->tqe_start;
211 log.u_bbr.flex3 = q->tqe_len;
212 log.u_bbr.flex4 = q->tqe_mbuf_cnt;
213 log.u_bbr.hptsi_gain = q->tqe_flags;
214 }
215 if (p != NULL) {
216 log.u_bbr.flex5 = p->tqe_start;
217 log.u_bbr.pkts_out = p->tqe_len;
218 log.u_bbr.epoch = p->tqe_mbuf_cnt;
219 log.u_bbr.cwnd_gain = p->tqe_flags;
220 }
221 log.u_bbr.flex6 = tp->t_segqmbuflen;
222 log.u_bbr.flex7 = instance;
223 log.u_bbr.flex8 = action;
224 log.u_bbr.timeStamp = cts;
225 TCP_LOG_EVENTP(tp, NULL, &so->so_rcv, &so->so_snd,
226 TCP_LOG_REASS, 0,
227 len, &log, false, &tv);
228 }
229 }
230
231 static void
tcp_reass_log_dump(struct tcpcb * tp)232 tcp_reass_log_dump(struct tcpcb *tp)
233 {
234 struct tseg_qent *q;
235
236 if (tp->t_logstate != TCP_LOG_STATE_OFF) {
237 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) {
238 tcp_log_reassm(tp, q, NULL, q->tqe_start, q->tqe_len, TCP_R_LOG_DUMP, 0);
239 }
240 };
241 }
242
243 static void
tcp_reass_log_new_in(struct tcpcb * tp,tcp_seq seq,int len,struct mbuf * m,int logval,struct tseg_qent * q)244 tcp_reass_log_new_in(struct tcpcb *tp, tcp_seq seq, int len, struct mbuf *m,
245 int logval, struct tseg_qent *q)
246 {
247 int cnt;
248 struct mbuf *t;
249
250 cnt = 0;
251 t = m;
252 while (t) {
253 cnt += t->m_len;
254 t = t->m_next;
255 }
256 tcp_log_reassm(tp, q, NULL, seq, len, logval, cnt);
257 }
258
259 #endif
260
261 void
tcp_reass_global_init(void)262 tcp_reass_global_init(void)
263 {
264
265 tcp_reass_maxseg = nmbclusters / 16;
266 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
267 &tcp_reass_maxseg);
268 tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent),
269 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
270 /* Set the zone limit and read back the effective value. */
271 tcp_reass_maxseg = uma_zone_set_max(tcp_reass_zone,
272 tcp_reass_maxseg);
273 #ifdef TCP_REASS_COUNTERS
274 reass_path1 = counter_u64_alloc(M_WAITOK);
275 reass_path2 = counter_u64_alloc(M_WAITOK);
276 reass_path3 = counter_u64_alloc(M_WAITOK);
277 reass_path4 = counter_u64_alloc(M_WAITOK);
278 reass_path5 = counter_u64_alloc(M_WAITOK);
279 reass_path6 = counter_u64_alloc(M_WAITOK);
280 reass_path7 = counter_u64_alloc(M_WAITOK);
281 reass_fullwalk = counter_u64_alloc(M_WAITOK);
282 reass_nospace = counter_u64_alloc(M_WAITOK);
283 reass_entry = counter_u64_alloc(M_WAITOK);
284 merge_fwd = counter_u64_alloc(M_WAITOK);
285 merge_into = counter_u64_alloc(M_WAITOK);
286 tcp_zero_input = counter_u64_alloc(M_WAITOK);
287 #endif
288 EVENTHANDLER_REGISTER(nmbclusters_change,
289 tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY);
290
291 }
292
293 void
tcp_reass_flush(struct tcpcb * tp)294 tcp_reass_flush(struct tcpcb *tp)
295 {
296 struct tseg_qent *qe;
297
298 INP_WLOCK_ASSERT(tptoinpcb(tp));
299
300 while ((qe = TAILQ_FIRST(&tp->t_segq)) != NULL) {
301 TAILQ_REMOVE(&tp->t_segq, qe, tqe_q);
302 m_freem(qe->tqe_m);
303 uma_zfree(tcp_reass_zone, qe);
304 tp->t_segqlen--;
305 }
306 tp->t_segqmbuflen = 0;
307 KASSERT((tp->t_segqlen == 0),
308 ("TCP reass queue %p segment count is %d instead of 0 after flush.",
309 tp, tp->t_segqlen));
310 }
311
312 static void
tcp_reass_append(struct tcpcb * tp,struct tseg_qent * last,struct mbuf * m,struct tcphdr * th,int tlen,struct mbuf * mlast,int lenofoh)313 tcp_reass_append(struct tcpcb *tp, struct tseg_qent *last,
314 struct mbuf *m, struct tcphdr *th, int tlen,
315 struct mbuf *mlast, int lenofoh)
316 {
317
318 #ifdef TCP_REASS_LOGGING
319 tcp_log_reassm(tp, last, NULL, th->th_seq, tlen, TCP_R_LOG_APPEND, 0);
320 #endif
321 last->tqe_len += tlen;
322 last->tqe_m->m_pkthdr.len += tlen;
323 /* Preserve the FIN bit if its there */
324 last->tqe_flags |= (tcp_get_flags(th) & TH_FIN);
325 last->tqe_last->m_next = m;
326 last->tqe_last = mlast;
327 last->tqe_mbuf_cnt += lenofoh;
328 tp->t_rcvoopack++;
329 TCPSTAT_INC(tcps_rcvoopack);
330 TCPSTAT_ADD(tcps_rcvoobyte, tlen);
331 #ifdef TCP_REASS_LOGGING
332 tcp_reass_log_new_in(tp, last->tqe_start, lenofoh, last->tqe_m,
333 TCP_R_LOG_APPEND,
334 last);
335 #endif
336 }
337
338 static void
tcp_reass_prepend(struct tcpcb * tp,struct tseg_qent * first,struct mbuf * m,struct tcphdr * th,int tlen,struct mbuf * mlast,int lenofoh)339 tcp_reass_prepend(struct tcpcb *tp, struct tseg_qent *first, struct mbuf *m, struct tcphdr *th,
340 int tlen, struct mbuf *mlast, int lenofoh)
341 {
342 int i;
343
344 #ifdef TCP_REASS_LOGGING
345 tcp_log_reassm(tp, first, NULL, th->th_seq, tlen, TCP_R_LOG_PREPEND, 0);
346 #endif
347 if (SEQ_GT((th->th_seq + tlen), first->tqe_start)) {
348 /* The new data overlaps into the old */
349 i = (th->th_seq + tlen) - first->tqe_start;
350 #ifdef TCP_REASS_LOGGING
351 tcp_log_reassm(tp, first, NULL, 0, i, TCP_R_LOG_TRIM, 1);
352 #endif
353 m_adj(first->tqe_m, i);
354 first->tqe_len -= i;
355 first->tqe_start += i;
356 }
357 /* Ok now setup our chain to point to the old first */
358 mlast->m_next = first->tqe_m;
359 first->tqe_m = m;
360 first->tqe_len += tlen;
361 first->tqe_start = th->th_seq;
362 first->tqe_m->m_pkthdr.len = first->tqe_len;
363 first->tqe_mbuf_cnt += lenofoh;
364 tp->t_rcvoopack++;
365 TCPSTAT_INC(tcps_rcvoopack);
366 TCPSTAT_ADD(tcps_rcvoobyte, tlen);
367 #ifdef TCP_REASS_LOGGING
368 tcp_reass_log_new_in(tp, first->tqe_start, lenofoh, first->tqe_m,
369 TCP_R_LOG_PREPEND,
370 first);
371 #endif
372 }
373
374 static void
tcp_reass_replace(struct tcpcb * tp,struct tseg_qent * q,struct mbuf * m,tcp_seq seq,int len,struct mbuf * mlast,int mbufoh,uint16_t flags)375 tcp_reass_replace(struct tcpcb *tp, struct tseg_qent *q, struct mbuf *m,
376 tcp_seq seq, int len, struct mbuf *mlast, int mbufoh, uint16_t flags)
377 {
378 /*
379 * Free the data in q, and replace
380 * it with the new segment.
381 */
382 int len_dif;
383
384 #ifdef TCP_REASS_LOGGING
385 tcp_log_reassm(tp, q, NULL, seq, len, TCP_R_LOG_REPLACE, 0);
386 #endif
387 m_freem(q->tqe_m);
388 KASSERT(tp->t_segqmbuflen >= q->tqe_mbuf_cnt,
389 ("Tp:%p seg queue goes negative", tp));
390 tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
391 q->tqe_mbuf_cnt = mbufoh;
392 q->tqe_m = m;
393 q->tqe_last = mlast;
394 q->tqe_start = seq;
395 if (len > q->tqe_len)
396 len_dif = len - q->tqe_len;
397 else
398 len_dif = 0;
399 tp->t_rcvoopack++;
400 TCPSTAT_INC(tcps_rcvoopack);
401 TCPSTAT_ADD(tcps_rcvoobyte, len_dif);
402 q->tqe_len = len;
403 q->tqe_flags = (flags & TH_FIN);
404 q->tqe_m->m_pkthdr.len = q->tqe_len;
405 tp->t_segqmbuflen += mbufoh;
406
407 }
408
409 static void
tcp_reass_merge_into(struct tcpcb * tp,struct tseg_qent * ent,struct tseg_qent * q)410 tcp_reass_merge_into(struct tcpcb *tp, struct tseg_qent *ent,
411 struct tseg_qent *q)
412 {
413 /*
414 * Merge q into ent and free q from the list.
415 */
416 #ifdef TCP_REASS_LOGGING
417 tcp_log_reassm(tp, q, ent, 0, 0, TCP_R_LOG_MERGE_INTO, 0);
418 #endif
419 #ifdef TCP_REASS_COUNTERS
420 counter_u64_add(merge_into, 1);
421 #endif
422 ent->tqe_last->m_next = q->tqe_m;
423 ent->tqe_last = q->tqe_last;
424 ent->tqe_len += q->tqe_len;
425 ent->tqe_mbuf_cnt += q->tqe_mbuf_cnt;
426 ent->tqe_m->m_pkthdr.len += q->tqe_len;
427 ent->tqe_flags |= (q->tqe_flags & TH_FIN);
428 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
429 uma_zfree(tcp_reass_zone, q);
430 tp->t_segqlen--;
431
432 }
433
434 static void
tcp_reass_merge_forward(struct tcpcb * tp,struct tseg_qent * ent)435 tcp_reass_merge_forward(struct tcpcb *tp, struct tseg_qent *ent)
436 {
437 struct tseg_qent *q, *qtmp;
438 int i;
439 tcp_seq max;
440 /*
441 * Given an entry merge forward anyplace
442 * that ent overlaps forward.
443 */
444
445 max = ent->tqe_start + ent->tqe_len;
446 q = TAILQ_NEXT(ent, tqe_q);
447 if (q == NULL) {
448 /* Nothing left */
449 return;
450 }
451 TAILQ_FOREACH_FROM_SAFE(q, &tp->t_segq, tqe_q, qtmp) {
452 if (SEQ_GT(q->tqe_start, max)) {
453 /* Beyond q */
454 break;
455 }
456 /* We have some or all that are overlapping */
457 if (SEQ_GEQ(max, (q->tqe_start + q->tqe_len))) {
458 /* It consumes it all */
459 tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
460 m_freem(q->tqe_m);
461 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
462 uma_zfree(tcp_reass_zone, q);
463 tp->t_segqlen--;
464 continue;
465 }
466 /*
467 * Trim the q entry to dovetail to this one
468 * and then merge q into ent updating max
469 * in the process.
470 */
471 i = max - q->tqe_start;
472 #ifdef TCP_REASS_LOGGING
473 tcp_log_reassm(tp, q, NULL, 0, i, TCP_R_LOG_TRIM, 2);
474 #endif
475 m_adj(q->tqe_m, i);
476 q->tqe_len -= i;
477 q->tqe_start += i;
478 tcp_reass_merge_into(tp, ent, q);
479 max = ent->tqe_start + ent->tqe_len;
480 }
481 #ifdef TCP_REASS_COUNTERS
482 counter_u64_add(merge_fwd, 1);
483 #endif
484 }
485
486 static int
tcp_reass_overhead_of_chain(struct mbuf * m,struct mbuf ** mlast)487 tcp_reass_overhead_of_chain(struct mbuf *m, struct mbuf **mlast)
488 {
489 int len = MSIZE;
490
491 if (m->m_flags & M_EXT)
492 len += m->m_ext.ext_size;
493 while (m->m_next != NULL) {
494 m = m->m_next;
495 len += MSIZE;
496 if (m->m_flags & M_EXT)
497 len += m->m_ext.ext_size;
498 }
499 *mlast = m;
500 return (len);
501 }
502
503 /*
504 * NOTE!!! the new tcp-reassembly code *must not* use
505 * m_adj() with a negative index. That alters the chain
506 * of mbufs (by possibly chopping trailing mbufs). At
507 * the front of tcp_reass we count the mbuf overhead
508 * and setup the tail pointer. If we use m_adj(m, -5)
509 * we could corrupt the tail pointer. Currently the
510 * code only uses m_adj(m, postive-num). If this
511 * changes appropriate changes to update mlast would
512 * be needed.
513 */
514 int
tcp_reass(struct tcpcb * tp,struct tcphdr * th,tcp_seq * seq_start,int * tlenp,struct mbuf * m)515 tcp_reass(struct tcpcb *tp, struct tcphdr *th, tcp_seq *seq_start,
516 int *tlenp, struct mbuf *m)
517 {
518 struct tseg_qent *q, *last, *first;
519 struct tseg_qent *p = NULL;
520 struct tseg_qent *nq = NULL;
521 struct tseg_qent *te = NULL;
522 struct mbuf *mlast = NULL;
523 struct inpcb *inp = tptoinpcb(tp);
524 struct socket *so = tptosocket(tp);
525 struct sockbuf *sb = &so->so_rcv;
526 char *s = NULL;
527 int flags, i, lenofoh;
528
529 INP_WLOCK_ASSERT(inp);
530 /*
531 * XXX: tcp_reass() is rather inefficient with its data structures
532 * and should be rewritten (see NetBSD for optimizations).
533 */
534
535 KASSERT(th == NULL || (seq_start != NULL && tlenp != NULL),
536 ("tcp_reass called with illegal parameter combination "
537 "(tp=%p, th=%p, seq_start=%p, tlenp=%p, m=%p)",
538 tp, th, seq_start, tlenp, m));
539 /*
540 * Call with th==NULL after become established to
541 * force pre-ESTABLISHED data up to user socket.
542 */
543 if (th == NULL)
544 goto present;
545 KASSERT(SEQ_GEQ(th->th_seq, tp->rcv_nxt),
546 ("Attempt to add old entry to reassembly queue (th=%p, tp=%p)",
547 th, tp));
548 #ifdef TCP_REASS_LOGGING
549 tcp_reass_log_new_in(tp, th->th_seq, *tlenp, m, TCP_R_LOG_ADD, NULL);
550 #endif
551 #ifdef TCP_REASS_COUNTERS
552 counter_u64_add(reass_entry, 1);
553 #endif
554 /*
555 * Check for zero length data.
556 */
557 if ((*tlenp == 0) && ((tcp_get_flags(th) & TH_FIN) == 0)) {
558 /*
559 * A zero length segment does no
560 * one any good. We could check
561 * the rcv_nxt <-> rcv_wnd but thats
562 * already done for us by the caller.
563 */
564 strip_fin:
565 #ifdef TCP_REASS_COUNTERS
566 counter_u64_add(tcp_zero_input, 1);
567 #endif
568 m_freem(m);
569 #ifdef TCP_REASS_LOGGING
570 tcp_reass_log_dump(tp);
571 #endif
572 return (0);
573 } else if ((*tlenp == 0) &&
574 (tcp_get_flags(th) & TH_FIN) &&
575 !TCPS_HAVEESTABLISHED(tp->t_state)) {
576 /*
577 * We have not established, and we
578 * have a FIN and no data. Lets treat
579 * this as the same as if the FIN were
580 * not present. We don't want to save
581 * the FIN bit in a reassembly buffer
582 * we want to get established first before
583 * we do that (the peer will retransmit).
584 */
585 goto strip_fin;
586 }
587 /*
588 * Will it fit?
589 */
590 lenofoh = tcp_reass_overhead_of_chain(m, &mlast);
591 if ((th->th_seq != tp->rcv_nxt || !TCPS_HAVEESTABLISHED(tp->t_state)) &&
592 (sb->sb_mbcnt + tp->t_segqmbuflen + lenofoh) > sb->sb_mbmax) {
593 /* No room */
594 TCPSTAT_INC(tcps_rcvreassfull);
595 #ifdef TCP_REASS_COUNTERS
596 counter_u64_add(reass_nospace, 1);
597 #endif
598 #ifdef TCP_REASS_LOGGING
599 tcp_log_reassm(tp, NULL, NULL, th->th_seq, lenofoh, TCP_R_LOG_LIMIT_REACHED, 0);
600 #endif
601 if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
602 log(LOG_DEBUG, "%s; %s: mbuf count limit reached, "
603 "segment dropped\n", s, __func__);
604 free(s, M_TCPLOG);
605 }
606 m_freem(m);
607 *tlenp = 0;
608 #ifdef TCP_REASS_LOGGING
609 tcp_reass_log_dump(tp);
610 #endif
611 return (0);
612 }
613 /*
614 * First lets deal with two common cases, the
615 * segment appends to the back of our collected
616 * segments. Or the segment is the next in line.
617 */
618 last = TAILQ_LAST_FAST(&tp->t_segq, tseg_qent, tqe_q);
619 if (last != NULL) {
620 if ((tcp_get_flags(th) & TH_FIN) &&
621 SEQ_LT((th->th_seq + *tlenp), (last->tqe_start + last->tqe_len))) {
622 /*
623 * Someone is trying to game us, dump
624 * the segment.
625 */
626 *tlenp = 0;
627 m_freem(m);
628 return (0);
629 }
630 if ((SEQ_GEQ(th->th_seq, last->tqe_start)) &&
631 (SEQ_GEQ((last->tqe_start + last->tqe_len), th->th_seq))) {
632 /* Common case, trailing segment is added */
633 /**
634 * +--last
635 * v
636 * reassembly buffer |---| |---| |---|
637 * new segment |---|
638 */
639 #ifdef TCP_REASS_COUNTERS
640 counter_u64_add(reass_path1, 1);
641 #endif
642 if (SEQ_GT((last->tqe_start + last->tqe_len), th->th_seq)) {
643 i = (last->tqe_start + last->tqe_len) - th->th_seq;
644 if (i < *tlenp) {
645 #ifdef TCP_REASS_LOGGING
646 tcp_log_reassm(tp, last, NULL, 0, i, TCP_R_LOG_TRIM, 3);
647 th->th_seq += i;
648 #endif
649 m_adj(m, i);
650 *tlenp -= i;
651 } else {
652 /* Complete overlap */
653 TCPSTAT_INC(tcps_rcvduppack);
654 TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp);
655 m_freem(m);
656 *tlenp = last->tqe_len;
657 *seq_start = last->tqe_start;
658 return (0);
659 }
660 }
661 if (last->tqe_flags & TH_FIN) {
662 /*
663 * We have data after the FIN on the last?
664 */
665 *tlenp = 0;
666 m_freem(m);
667 return(0);
668 }
669 tcp_reass_append(tp, last, m, th, *tlenp, mlast, lenofoh);
670 tp->t_segqmbuflen += lenofoh;
671 *seq_start = last->tqe_start;
672 *tlenp = last->tqe_len;
673 return (0);
674 } else if (SEQ_GT(th->th_seq, (last->tqe_start + last->tqe_len))) {
675 /*
676 * Second common case, we missed
677 * another one and have something more
678 * for the end.
679 */
680 /**
681 * +--last
682 * v
683 * reassembly buffer |---| |---| |---|
684 * new segment |---|
685 */
686 if (last->tqe_flags & TH_FIN) {
687 /*
688 * We have data after the FIN on the last?
689 */
690 *tlenp = 0;
691 m_freem(m);
692 return(0);
693 }
694 #ifdef TCP_REASS_COUNTERS
695 counter_u64_add(reass_path2, 1);
696 #endif
697 p = last;
698 goto new_entry;
699 }
700 } else {
701 /* First segment (it's NULL). */
702 goto new_entry;
703 }
704 first = TAILQ_FIRST(&tp->t_segq);
705 if (SEQ_LT(th->th_seq, first->tqe_start) &&
706 SEQ_GEQ((th->th_seq + *tlenp),first->tqe_start) &&
707 SEQ_LT((th->th_seq + *tlenp), (first->tqe_start + first->tqe_len))) {
708 /*
709 * The head of the queue is prepended by this and
710 * it may be the one I want most.
711 */
712 /**
713 * first-------+
714 * v
715 * rea: |---| |---| |---|
716 * new: |---|
717 * Note the case we do not deal with here is:
718 * rea= |---| |---| |---|
719 * new= |----|
720 * Due to the fact that it could be
721 * new |--------------------|
722 * And we might need to merge forward.
723 */
724 #ifdef INVARIANTS
725 struct mbuf *firstmbuf;
726 #endif
727
728 #ifdef TCP_REASS_COUNTERS
729 counter_u64_add(reass_path3, 1);
730 #endif
731 if (SEQ_LT(th->th_seq, tp->rcv_nxt)) {
732 /*
733 * The resend was even before
734 * what we have. We need to trim it.
735 * Note TSNH (it should be trimmed
736 * before the call to tcp_reass()).
737 */
738 #ifdef INVARIANTS
739 panic("th->th_seq:%u rcv_nxt:%u tp:%p not pre-trimmed",
740 th->th_seq, tp->rcv_nxt, tp);
741 #else
742 i = tp->rcv_nxt - th->th_seq;
743 #ifdef TCP_REASS_LOGGING
744 tcp_log_reassm(tp, first, NULL, 0, i, TCP_R_LOG_TRIM, 4);
745 #endif
746 m_adj(m, i);
747 th->th_seq += i;
748 *tlenp -= i;
749 #endif
750 }
751 #ifdef INVARIANTS
752 firstmbuf = first->tqe_m;
753 #endif
754 tcp_reass_prepend(tp, first, m, th, *tlenp, mlast, lenofoh);
755 #ifdef INVARIANTS
756 if (firstmbuf == first->tqe_m) {
757 panic("First stayed same m:%p foobar:%p first->tqe_m:%p tp:%p first:%p",
758 m, firstmbuf, first->tqe_m, tp, first);
759 } else if (first->tqe_m != m) {
760 panic("First did not change to m:%p foobar:%p first->tqe_m:%p tp:%p first:%p",
761 m, firstmbuf, first->tqe_m, tp, first);
762 }
763 #endif
764 tp->t_segqmbuflen += lenofoh;
765 *seq_start = first->tqe_start;
766 *tlenp = first->tqe_len;
767 goto present;
768 } else if (SEQ_LT((th->th_seq + *tlenp), first->tqe_start)) {
769 /* New segment is before our earliest segment. */
770 /**
771 * first---->+
772 * v
773 * rea= |---| ....
774 * new" |---|
775 *
776 */
777 goto new_entry;
778 }
779 /*
780 * Find a segment which begins after this one does.
781 */
782 #ifdef TCP_REASS_COUNTERS
783 counter_u64_add(reass_fullwalk, 1);
784 #endif
785 TAILQ_FOREACH(q, &tp->t_segq, tqe_q) {
786 if (SEQ_GT(q->tqe_start, th->th_seq))
787 break;
788 }
789 p = TAILQ_PREV(q, tsegqe_head, tqe_q);
790 /**
791 * Now is this fit just in-between only?
792 * i.e.:
793 * p---+ +----q
794 * v v
795 * res= |--| |--| |--|
796 * nee |-|
797 */
798 if (SEQ_LT((th->th_seq + *tlenp), q->tqe_start) &&
799 ((p == NULL) || (SEQ_GT(th->th_seq, (p->tqe_start + p->tqe_len))))) {
800 /* Yep no overlap */
801 goto new_entry;
802 }
803 /**
804 * If we reach here we have some (possibly all) overlap
805 * such as:
806 * res= |--| |--| |--|
807 * new= |----|
808 * or new= |-----------------|
809 * or new= |--------|
810 * or new= |---|
811 * or new= |-----------|
812 */
813 if ((p != NULL) &&
814 (SEQ_LEQ(th->th_seq, (p->tqe_start + p->tqe_len)))) {
815 /* conversion to int (in i) handles seq wraparound */
816
817 #ifdef TCP_REASS_COUNTERS
818 counter_u64_add(reass_path4, 1);
819 #endif
820 i = p->tqe_start + p->tqe_len - th->th_seq;
821 if (i >= 0) {
822 if (i >= *tlenp) {
823 /**
824 * prev seg---->+
825 * v
826 * reassembly buffer |---|
827 * new segment |-|
828 */
829 TCPSTAT_INC(tcps_rcvduppack);
830 TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp);
831 *tlenp = p->tqe_len;
832 *seq_start = p->tqe_start;
833 m_freem(m);
834 /*
835 * Try to present any queued data
836 * at the left window edge to the user.
837 * This is needed after the 3-WHS
838 * completes. Note this probably
839 * will not work and we will return.
840 */
841 return (0);
842 }
843 if (i > 0) {
844 /**
845 * prev seg---->+
846 * v
847 * reassembly buffer |---|
848 * new segment |-----|
849 */
850 #ifdef TCP_REASS_COUNTERS
851 counter_u64_add(reass_path5, 1);
852 #endif
853 #ifdef TCP_REASS_LOGGING
854 tcp_log_reassm(tp, p, NULL, 0, i, TCP_R_LOG_TRIM, 5);
855 #endif
856 m_adj(m, i);
857 *tlenp -= i;
858 th->th_seq += i;
859 }
860 }
861 if (th->th_seq == (p->tqe_start + p->tqe_len)) {
862 /*
863 * If dovetails in with this one
864 * append it.
865 */
866 /**
867 * prev seg---->+
868 * v
869 * reassembly buffer |--| |---|
870 * new segment |--|
871 * (note: it was trimmed above if it overlapped)
872 */
873 tcp_reass_append(tp, p, m, th, *tlenp, mlast, lenofoh);
874 tp->t_segqmbuflen += lenofoh;
875 } else {
876 #ifdef INVARIANTS
877 panic("Impossible cut th_seq:%u p->seq:%u(%d) p:%p tp:%p",
878 th->th_seq, p->tqe_start, p->tqe_len,
879 p, tp);
880 #endif
881 *tlenp = 0;
882 m_freem(m);
883 return (0);
884 }
885 q = p;
886 } else {
887 /*
888 * The new data runs over the
889 * top of previously sack'd data (in q).
890 * It may be partially overlapping, or
891 * it may overlap the entire segment.
892 */
893 #ifdef TCP_REASS_COUNTERS
894 counter_u64_add(reass_path6, 1);
895 #endif
896 if (SEQ_GEQ((th->th_seq + *tlenp), (q->tqe_start + q->tqe_len))) {
897 /* It consumes it all */
898 /**
899 * next seg---->+
900 * v
901 * reassembly buffer |--| |---|
902 * new segment |----------|
903 */
904 #ifdef TCP_REASS_COUNTERS
905 counter_u64_add(reass_path7, 1);
906 #endif
907 tcp_reass_replace(tp, q, m, th->th_seq, *tlenp, mlast, lenofoh, tcp_get_flags(th));
908 } else {
909 /*
910 * We just need to prepend the data
911 * to this. It does not overrun
912 * the end.
913 */
914 /**
915 * next seg---->+
916 * v
917 * reassembly buffer |--| |---|
918 * new segment |----------|
919 */
920 tcp_reass_prepend(tp, q, m, th, *tlenp, mlast, lenofoh);
921 tp->t_segqmbuflen += lenofoh;
922 }
923 }
924 /* Now does it go further than that? */
925 tcp_reass_merge_forward(tp, q);
926 *seq_start = q->tqe_start;
927 *tlenp = q->tqe_len;
928 goto present;
929
930 /*
931 * When we reach here we can't combine it
932 * with any existing segment.
933 *
934 * Limit the number of segments that can be queued to reduce the
935 * potential for mbuf exhaustion. For best performance, we want to be
936 * able to queue a full window's worth of segments. The size of the
937 * socket receive buffer determines our advertised window and grows
938 * automatically when socket buffer autotuning is enabled. Use it as the
939 * basis for our queue limit.
940 *
941 * However, allow the user to specify a ceiling for the number of
942 * segments in each queue.
943 *
944 * Always let the missing segment through which caused this queue.
945 * NB: Access to the socket buffer is left intentionally unlocked as we
946 * can tolerate stale information here.
947 *
948 * XXXLAS: Using sbspace(so->so_rcv) instead of so->so_rcv.sb_hiwat
949 * should work but causes packets to be dropped when they shouldn't.
950 * Investigate why and re-evaluate the below limit after the behaviour
951 * is understood.
952 */
953 new_entry:
954 if (th->th_seq == tp->rcv_nxt && TCPS_HAVEESTABLISHED(tp->t_state)) {
955 tp->rcv_nxt += *tlenp;
956 flags = tcp_get_flags(th) & TH_FIN;
957 TCPSTAT_INC(tcps_rcvoopack);
958 TCPSTAT_ADD(tcps_rcvoobyte, *tlenp);
959 SOCK_RECVBUF_LOCK(so);
960 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
961 m_freem(m);
962 } else {
963 sbappendstream_locked(&so->so_rcv, m, 0);
964 }
965 tp->t_flags |= TF_WAKESOR;
966 return (flags);
967 }
968 if (tcp_new_limits) {
969 if ((tp->t_segqlen > tcp_reass_queue_guard) &&
970 (*tlenp < MSIZE)) {
971 /*
972 * This is really a lie, we are not full but
973 * are getting a segment that is above
974 * guard threshold. If it is and its below
975 * a mbuf size (256) we drop it if it
976 * can't fill in some place.
977 */
978 TCPSTAT_INC(tcps_rcvreassfull);
979 *tlenp = 0;
980 if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
981 log(LOG_DEBUG, "%s; %s: queue limit reached, "
982 "segment dropped\n", s, __func__);
983 free(s, M_TCPLOG);
984 }
985 m_freem(m);
986 #ifdef TCP_REASS_LOGGING
987 tcp_reass_log_dump(tp);
988 #endif
989 return (0);
990 }
991 } else {
992 if (tp->t_segqlen >= min((so->so_rcv.sb_hiwat / tp->t_maxseg) + 1,
993 tcp_reass_maxqueuelen)) {
994 TCPSTAT_INC(tcps_rcvreassfull);
995 *tlenp = 0;
996 if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
997 log(LOG_DEBUG, "%s; %s: queue limit reached, "
998 "segment dropped\n", s, __func__);
999 free(s, M_TCPLOG);
1000 }
1001 m_freem(m);
1002 #ifdef TCP_REASS_LOGGING
1003 tcp_reass_log_dump(tp);
1004 #endif
1005 return (0);
1006 }
1007 }
1008 /*
1009 * Allocate a new queue entry. If we can't, or hit the zone limit
1010 * just drop the pkt.
1011 */
1012 te = uma_zalloc(tcp_reass_zone, M_NOWAIT);
1013 if (te == NULL) {
1014 TCPSTAT_INC(tcps_rcvmemdrop);
1015 m_freem(m);
1016 *tlenp = 0;
1017 if ((s = tcp_log_addrs(&inp->inp_inc, th, NULL, NULL))) {
1018 log(LOG_DEBUG, "%s; %s: global zone limit "
1019 "reached, segment dropped\n", s, __func__);
1020 free(s, M_TCPLOG);
1021 }
1022 return (0);
1023 }
1024 tp->t_segqlen++;
1025 tp->t_rcvoopack++;
1026 TCPSTAT_INC(tcps_rcvoopack);
1027 TCPSTAT_ADD(tcps_rcvoobyte, *tlenp);
1028 /* Insert the new segment queue entry into place. */
1029 te->tqe_m = m;
1030 te->tqe_flags = tcp_get_flags(th);
1031 te->tqe_len = *tlenp;
1032 te->tqe_start = th->th_seq;
1033 te->tqe_last = mlast;
1034 te->tqe_mbuf_cnt = lenofoh;
1035 tp->t_segqmbuflen += te->tqe_mbuf_cnt;
1036 if (p == NULL) {
1037 TAILQ_INSERT_HEAD(&tp->t_segq, te, tqe_q);
1038 } else {
1039 TAILQ_INSERT_AFTER(&tp->t_segq, p, te, tqe_q);
1040 }
1041 #ifdef TCP_REASS_LOGGING
1042 tcp_reass_log_new_in(tp, th->th_seq, *tlenp, m, TCP_R_LOG_NEW_ENTRY, te);
1043 #endif
1044 present:
1045 /*
1046 * Present data to user, advancing rcv_nxt through
1047 * completed sequence space.
1048 */
1049 if (!TCPS_HAVEESTABLISHED(tp->t_state))
1050 return (0);
1051 q = TAILQ_FIRST(&tp->t_segq);
1052 KASSERT(q == NULL || SEQ_GEQ(q->tqe_start, tp->rcv_nxt),
1053 ("Reassembly queue for %p has stale entry at head", tp));
1054 if (!q || q->tqe_start != tp->rcv_nxt) {
1055 #ifdef TCP_REASS_LOGGING
1056 tcp_reass_log_dump(tp);
1057 #endif
1058 return (0);
1059 }
1060 SOCK_RECVBUF_LOCK(so);
1061 do {
1062 tp->rcv_nxt += q->tqe_len;
1063 flags = q->tqe_flags & TH_FIN;
1064 nq = TAILQ_NEXT(q, tqe_q);
1065 TAILQ_REMOVE(&tp->t_segq, q, tqe_q);
1066 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
1067 m_freem(q->tqe_m);
1068 } else {
1069 #ifdef TCP_REASS_LOGGING
1070 tcp_reass_log_new_in(tp, q->tqe_start, q->tqe_len, q->tqe_m, TCP_R_LOG_READ, q);
1071 if (th != NULL) {
1072 tcp_log_reassm(tp, q, NULL, th->th_seq, *tlenp, TCP_R_LOG_READ, 1);
1073 } else {
1074 tcp_log_reassm(tp, q, NULL, 0, 0, TCP_R_LOG_READ, 1);
1075 }
1076 #endif
1077 sbappendstream_locked(&so->so_rcv, q->tqe_m, 0);
1078 }
1079 #ifdef TCP_REASS_LOGGING
1080 if (th != NULL) {
1081 tcp_log_reassm(tp, q, NULL, th->th_seq, *tlenp, TCP_R_LOG_READ, 2);
1082 } else {
1083 tcp_log_reassm(tp, q, NULL, 0, 0, TCP_R_LOG_READ, 2);
1084 }
1085 #endif
1086 KASSERT(tp->t_segqmbuflen >= q->tqe_mbuf_cnt,
1087 ("tp:%p seg queue goes negative", tp));
1088 tp->t_segqmbuflen -= q->tqe_mbuf_cnt;
1089 uma_zfree(tcp_reass_zone, q);
1090 tp->t_segqlen--;
1091 q = nq;
1092 } while (q && q->tqe_start == tp->rcv_nxt);
1093 if (TAILQ_EMPTY(&tp->t_segq) &&
1094 (tp->t_segqmbuflen != 0)) {
1095 #ifdef INVARIANTS
1096 panic("tp:%p segq:%p len:%d queue empty",
1097 tp, &tp->t_segq, tp->t_segqmbuflen);
1098 #else
1099 #ifdef TCP_REASS_LOGGING
1100 if (th != NULL) {
1101 tcp_log_reassm(tp, NULL, NULL, th->th_seq, *tlenp, TCP_R_LOG_ZERO, 0);
1102 } else {
1103 tcp_log_reassm(tp, NULL, NULL, 0, 0, TCP_R_LOG_ZERO, 0);
1104 }
1105 #endif
1106 tp->t_segqmbuflen = 0;
1107 #endif
1108 }
1109 #ifdef TCP_REASS_LOGGING
1110 tcp_reass_log_dump(tp);
1111 #endif
1112 tp->t_flags |= TF_WAKESOR;
1113 return (flags);
1114 }
1115