1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_inet.h" 36 #include "opt_inet6.h" 37 #include "opt_tcpdebug.h" 38 39 #include <sys/param.h> 40 #include <sys/kernel.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/socket.h> 44 #include <sys/socketvar.h> 45 #include <sys/sysctl.h> 46 #include <sys/syslog.h> 47 #include <sys/systm.h> 48 49 #include <vm/uma.h> 50 51 #include <net/if.h> 52 #include <net/route.h> 53 #include <net/vnet.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_pcb.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/in_var.h> 59 #include <netinet/ip.h> 60 #include <netinet/ip_var.h> 61 #include <netinet/ip_options.h> 62 #include <netinet/ip6.h> 63 #include <netinet6/in6_pcb.h> 64 #include <netinet6/ip6_var.h> 65 #include <netinet6/nd6.h> 66 #include <netinet/tcp.h> 67 #include <netinet/tcp_fsm.h> 68 #include <netinet/tcp_seq.h> 69 #include <netinet/tcp_timer.h> 70 #include <netinet/tcp_var.h> 71 #include <netinet6/tcp6_var.h> 72 #include <netinet/tcpip.h> 73 #ifdef TCPDEBUG 74 #include <netinet/tcp_debug.h> 75 #endif /* TCPDEBUG */ 76 77 static int tcp_reass_sysctl_maxseg(SYSCTL_HANDLER_ARGS); 78 static int tcp_reass_sysctl_qsize(SYSCTL_HANDLER_ARGS); 79 80 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0, 81 "TCP Segment Reassembly Queue"); 82 83 static VNET_DEFINE(int, tcp_reass_maxseg) = 0; 84 #define V_tcp_reass_maxseg VNET(tcp_reass_maxseg) 85 SYSCTL_VNET_PROC(_net_inet_tcp_reass, OID_AUTO, maxsegments, 86 CTLTYPE_INT | CTLFLAG_RDTUN, 87 &VNET_NAME(tcp_reass_maxseg), 0, &tcp_reass_sysctl_maxseg, "I", 88 "Global maximum number of TCP Segments in Reassembly Queue"); 89 90 static VNET_DEFINE(int, tcp_reass_qsize) = 0; 91 #define V_tcp_reass_qsize VNET(tcp_reass_qsize) 92 SYSCTL_VNET_PROC(_net_inet_tcp_reass, OID_AUTO, cursegments, 93 CTLTYPE_INT | CTLFLAG_RD, 94 &VNET_NAME(tcp_reass_qsize), 0, &tcp_reass_sysctl_qsize, "I", 95 "Global number of TCP Segments currently in Reassembly Queue"); 96 97 static VNET_DEFINE(int, tcp_reass_overflows) = 0; 98 #define V_tcp_reass_overflows VNET(tcp_reass_overflows) 99 SYSCTL_VNET_INT(_net_inet_tcp_reass, OID_AUTO, overflows, 100 CTLTYPE_INT | CTLFLAG_RD, 101 &VNET_NAME(tcp_reass_overflows), 0, 102 "Global number of TCP Segment Reassembly Queue Overflows"); 103 104 static VNET_DEFINE(uma_zone_t, tcp_reass_zone); 105 #define V_tcp_reass_zone VNET(tcp_reass_zone) 106 107 /* Initialize TCP reassembly queue */ 108 static void 109 tcp_reass_zone_change(void *tag) 110 { 111 112 V_tcp_reass_maxseg = nmbclusters / 16; 113 uma_zone_set_max(V_tcp_reass_zone, V_tcp_reass_maxseg); 114 } 115 116 void 117 tcp_reass_init(void) 118 { 119 120 V_tcp_reass_maxseg = nmbclusters / 16; 121 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", 122 &V_tcp_reass_maxseg); 123 V_tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent), 124 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 125 uma_zone_set_max(V_tcp_reass_zone, V_tcp_reass_maxseg); 126 EVENTHANDLER_REGISTER(nmbclusters_change, 127 tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY); 128 } 129 130 #ifdef VIMAGE 131 void 132 tcp_reass_destroy(void) 133 { 134 135 uma_zdestroy(V_tcp_reass_zone); 136 } 137 #endif 138 139 void 140 tcp_reass_flush(struct tcpcb *tp) 141 { 142 struct tseg_qent *qe; 143 144 INP_WLOCK_ASSERT(tp->t_inpcb); 145 146 while ((qe = LIST_FIRST(&tp->t_segq)) != NULL) { 147 LIST_REMOVE(qe, tqe_q); 148 m_freem(qe->tqe_m); 149 uma_zfree(V_tcp_reass_zone, qe); 150 tp->t_segqlen--; 151 } 152 153 KASSERT((tp->t_segqlen == 0), 154 ("TCP reass queue %p segment count is %d instead of 0 after flush.", 155 tp, tp->t_segqlen)); 156 } 157 158 static int 159 tcp_reass_sysctl_maxseg(SYSCTL_HANDLER_ARGS) 160 { 161 V_tcp_reass_maxseg = uma_zone_get_max(V_tcp_reass_zone); 162 return (sysctl_handle_int(oidp, arg1, arg2, req)); 163 } 164 165 static int 166 tcp_reass_sysctl_qsize(SYSCTL_HANDLER_ARGS) 167 { 168 V_tcp_reass_qsize = uma_zone_get_cur(V_tcp_reass_zone); 169 return (sysctl_handle_int(oidp, arg1, arg2, req)); 170 } 171 172 int 173 tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m) 174 { 175 struct tseg_qent *q; 176 struct tseg_qent *p = NULL; 177 struct tseg_qent *nq; 178 struct tseg_qent *te = NULL; 179 struct socket *so = tp->t_inpcb->inp_socket; 180 char *s = NULL; 181 int flags; 182 struct tseg_qent tqs; 183 184 INP_WLOCK_ASSERT(tp->t_inpcb); 185 186 /* 187 * XXX: tcp_reass() is rather inefficient with its data structures 188 * and should be rewritten (see NetBSD for optimizations). 189 */ 190 191 /* 192 * Call with th==NULL after become established to 193 * force pre-ESTABLISHED data up to user socket. 194 */ 195 if (th == NULL) 196 goto present; 197 198 /* 199 * Limit the number of segments that can be queued to reduce the 200 * potential for mbuf exhaustion. For best performance, we want to be 201 * able to queue a full window's worth of segments. The size of the 202 * socket receive buffer determines our advertised window and grows 203 * automatically when socket buffer autotuning is enabled. Use it as the 204 * basis for our queue limit. 205 * Always let the missing segment through which caused this queue. 206 * NB: Access to the socket buffer is left intentionally unlocked as we 207 * can tolerate stale information here. 208 * 209 * XXXLAS: Using sbspace(so->so_rcv) instead of so->so_rcv.sb_hiwat 210 * should work but causes packets to be dropped when they shouldn't. 211 * Investigate why and re-evaluate the below limit after the behaviour 212 * is understood. 213 */ 214 if (th->th_seq != tp->rcv_nxt && 215 tp->t_segqlen >= (so->so_rcv.sb_hiwat / tp->t_maxseg) + 1) { 216 V_tcp_reass_overflows++; 217 TCPSTAT_INC(tcps_rcvmemdrop); 218 m_freem(m); 219 *tlenp = 0; 220 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { 221 log(LOG_DEBUG, "%s; %s: queue limit reached, " 222 "segment dropped\n", s, __func__); 223 free(s, M_TCPLOG); 224 } 225 return (0); 226 } 227 228 /* 229 * Allocate a new queue entry. If we can't, or hit the zone limit 230 * just drop the pkt. 231 * 232 * Use a temporary structure on the stack for the missing segment 233 * when the zone is exhausted. Otherwise we may get stuck. 234 */ 235 te = uma_zalloc(V_tcp_reass_zone, M_NOWAIT); 236 if (te == NULL && th->th_seq != tp->rcv_nxt) { 237 TCPSTAT_INC(tcps_rcvmemdrop); 238 m_freem(m); 239 *tlenp = 0; 240 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { 241 log(LOG_DEBUG, "%s; %s: global zone limit reached, " 242 "segment dropped\n", s, __func__); 243 free(s, M_TCPLOG); 244 } 245 return (0); 246 } else if (th->th_seq == tp->rcv_nxt) { 247 bzero(&tqs, sizeof(struct tseg_qent)); 248 te = &tqs; 249 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { 250 log(LOG_DEBUG, "%s; %s: global zone limit reached, " 251 "using stack for missing segment\n", s, __func__); 252 free(s, M_TCPLOG); 253 } 254 } 255 tp->t_segqlen++; 256 257 /* 258 * Find a segment which begins after this one does. 259 */ 260 LIST_FOREACH(q, &tp->t_segq, tqe_q) { 261 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) 262 break; 263 p = q; 264 } 265 266 /* 267 * If there is a preceding segment, it may provide some of 268 * our data already. If so, drop the data from the incoming 269 * segment. If it provides all of our data, drop us. 270 */ 271 if (p != NULL) { 272 int i; 273 /* conversion to int (in i) handles seq wraparound */ 274 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; 275 if (i > 0) { 276 if (i >= *tlenp) { 277 TCPSTAT_INC(tcps_rcvduppack); 278 TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp); 279 m_freem(m); 280 uma_zfree(V_tcp_reass_zone, te); 281 tp->t_segqlen--; 282 /* 283 * Try to present any queued data 284 * at the left window edge to the user. 285 * This is needed after the 3-WHS 286 * completes. 287 */ 288 goto present; /* ??? */ 289 } 290 m_adj(m, i); 291 *tlenp -= i; 292 th->th_seq += i; 293 } 294 } 295 tp->t_rcvoopack++; 296 TCPSTAT_INC(tcps_rcvoopack); 297 TCPSTAT_ADD(tcps_rcvoobyte, *tlenp); 298 299 /* 300 * While we overlap succeeding segments trim them or, 301 * if they are completely covered, dequeue them. 302 */ 303 while (q) { 304 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq; 305 if (i <= 0) 306 break; 307 if (i < q->tqe_len) { 308 q->tqe_th->th_seq += i; 309 q->tqe_len -= i; 310 m_adj(q->tqe_m, i); 311 break; 312 } 313 314 nq = LIST_NEXT(q, tqe_q); 315 LIST_REMOVE(q, tqe_q); 316 m_freem(q->tqe_m); 317 uma_zfree(V_tcp_reass_zone, q); 318 tp->t_segqlen--; 319 q = nq; 320 } 321 322 /* Insert the new segment queue entry into place. */ 323 te->tqe_m = m; 324 te->tqe_th = th; 325 te->tqe_len = *tlenp; 326 327 if (p == NULL) { 328 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q); 329 } else { 330 KASSERT(te != &tqs, ("%s: temporary stack based entry not " 331 "first element in queue", __func__)); 332 LIST_INSERT_AFTER(p, te, tqe_q); 333 } 334 335 present: 336 /* 337 * Present data to user, advancing rcv_nxt through 338 * completed sequence space. 339 */ 340 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 341 return (0); 342 q = LIST_FIRST(&tp->t_segq); 343 if (!q || q->tqe_th->th_seq != tp->rcv_nxt) 344 return (0); 345 SOCKBUF_LOCK(&so->so_rcv); 346 do { 347 tp->rcv_nxt += q->tqe_len; 348 flags = q->tqe_th->th_flags & TH_FIN; 349 nq = LIST_NEXT(q, tqe_q); 350 LIST_REMOVE(q, tqe_q); 351 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 352 m_freem(q->tqe_m); 353 else 354 sbappendstream_locked(&so->so_rcv, q->tqe_m); 355 if (q != &tqs) 356 uma_zfree(V_tcp_reass_zone, q); 357 tp->t_segqlen--; 358 q = nq; 359 } while (q && q->tqe_th->th_seq == tp->rcv_nxt); 360 ND6_HINT(tp); 361 sorwakeup_locked(so); 362 return (flags); 363 } 364