1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 /* Copyright (c) 1990 Mentat Inc. */ 27 28 #pragma ident "%Z%%M% %I% %E% SMI" 29 const char tcp_version[] = "%Z%%M% %I% %E% SMI"; 30 31 32 #include <sys/types.h> 33 #include <sys/stream.h> 34 #include <sys/strsun.h> 35 #include <sys/strsubr.h> 36 #include <sys/stropts.h> 37 #include <sys/strlog.h> 38 #include <sys/strsun.h> 39 #define _SUN_TPI_VERSION 2 40 #include <sys/tihdr.h> 41 #include <sys/timod.h> 42 #include <sys/ddi.h> 43 #include <sys/sunddi.h> 44 #include <sys/suntpi.h> 45 #include <sys/xti_inet.h> 46 #include <sys/cmn_err.h> 47 #include <sys/debug.h> 48 #include <sys/vtrace.h> 49 #include <sys/kmem.h> 50 #include <sys/ethernet.h> 51 #include <sys/cpuvar.h> 52 #include <sys/dlpi.h> 53 #include <sys/multidata.h> 54 #include <sys/multidata_impl.h> 55 #include <sys/pattr.h> 56 #include <sys/policy.h> 57 #include <sys/priv.h> 58 #include <sys/zone.h> 59 60 #include <sys/errno.h> 61 #include <sys/signal.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/isa_defs.h> 65 #include <sys/md5.h> 66 #include <sys/random.h> 67 #include <netinet/in.h> 68 #include <netinet/tcp.h> 69 #include <netinet/ip6.h> 70 #include <netinet/icmp6.h> 71 #include <net/if.h> 72 #include <net/route.h> 73 #include <inet/ipsec_impl.h> 74 75 #include <inet/common.h> 76 #include <inet/ip.h> 77 #include <inet/ip_impl.h> 78 #include <inet/ip6.h> 79 #include <inet/ip_ndp.h> 80 #include <inet/mi.h> 81 #include <inet/mib2.h> 82 #include <inet/nd.h> 83 #include <inet/optcom.h> 84 #include <inet/snmpcom.h> 85 #include <inet/kstatcom.h> 86 #include <inet/tcp.h> 87 #include <inet/tcp_impl.h> 88 #include <net/pfkeyv2.h> 89 #include <inet/ipsec_info.h> 90 #include <inet/ipdrop.h> 91 #include <inet/tcp_trace.h> 92 93 #include <inet/ipclassifier.h> 94 #include <inet/ip_ire.h> 95 #include <inet/ip_if.h> 96 #include <inet/ipp_common.h> 97 #include <sys/squeue.h> 98 #include <inet/kssl/ksslapi.h> 99 #include <sys/tsol/label.h> 100 #include <sys/tsol/tnet.h> 101 #include <sys/sdt.h> 102 #include <rpc/pmap_prot.h> 103 104 /* 105 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433) 106 * 107 * (Read the detailed design doc in PSARC case directory) 108 * 109 * The entire tcp state is contained in tcp_t and conn_t structure 110 * which are allocated in tandem using ipcl_conn_create() and passing 111 * IPCL_CONNTCP as a flag. We use 'conn_ref' and 'conn_lock' to protect 112 * the references on the tcp_t. The tcp_t structure is never compressed 113 * and packets always land on the correct TCP perimeter from the time 114 * eager is created till the time tcp_t dies (as such the old mentat 115 * TCP global queue is not used for detached state and no IPSEC checking 116 * is required). The global queue is still allocated to send out resets 117 * for connection which have no listeners and IP directly calls 118 * tcp_xmit_listeners_reset() which does any policy check. 119 * 120 * Protection and Synchronisation mechanism: 121 * 122 * The tcp data structure does not use any kind of lock for protecting 123 * its state but instead uses 'squeues' for mutual exclusion from various 124 * read and write side threads. To access a tcp member, the thread should 125 * always be behind squeue (via squeue_enter, squeue_enter_nodrain, or 126 * squeue_fill). Since the squeues allow a direct function call, caller 127 * can pass any tcp function having prototype of edesc_t as argument 128 * (different from traditional STREAMs model where packets come in only 129 * designated entry points). The list of functions that can be directly 130 * called via squeue are listed before the usual function prototype. 131 * 132 * Referencing: 133 * 134 * TCP is MT-Hot and we use a reference based scheme to make sure that the 135 * tcp structure doesn't disappear when its needed. When the application 136 * creates an outgoing connection or accepts an incoming connection, we 137 * start out with 2 references on 'conn_ref'. One for TCP and one for IP. 138 * The IP reference is just a symbolic reference since ip_tcpclose() 139 * looks at tcp structure after tcp_close_output() returns which could 140 * have dropped the last TCP reference. So as long as the connection is 141 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the 142 * conn_t. The classifier puts its own reference when the connection is 143 * inserted in listen or connected hash. Anytime a thread needs to enter 144 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr 145 * on write side or by doing a classify on read side and then puts a 146 * reference on the conn before doing squeue_enter/tryenter/fill. For 147 * read side, the classifier itself puts the reference under fanout lock 148 * to make sure that tcp can't disappear before it gets processed. The 149 * squeue will drop this reference automatically so the called function 150 * doesn't have to do a DEC_REF. 151 * 152 * Opening a new connection: 153 * 154 * The outgoing connection open is pretty simple. ip_tcpopen() does the 155 * work in creating the conn/tcp structure and initializing it. The 156 * squeue assignment is done based on the CPU the application 157 * is running on. So for outbound connections, processing is always done 158 * on application CPU which might be different from the incoming CPU 159 * being interrupted by the NIC. An optimal way would be to figure out 160 * the NIC <-> CPU binding at listen time, and assign the outgoing 161 * connection to the squeue attached to the CPU that will be interrupted 162 * for incoming packets (we know the NIC based on the bind IP address). 163 * This might seem like a problem if more data is going out but the 164 * fact is that in most cases the transmit is ACK driven transmit where 165 * the outgoing data normally sits on TCP's xmit queue waiting to be 166 * transmitted. 167 * 168 * Accepting a connection: 169 * 170 * This is a more interesting case because of various races involved in 171 * establishing a eager in its own perimeter. Read the meta comment on 172 * top of tcp_conn_request(). But briefly, the squeue is picked by 173 * ip_tcp_input()/ip_fanout_tcp_v6() based on the interrupted CPU. 174 * 175 * Closing a connection: 176 * 177 * The close is fairly straight forward. tcp_close() calls tcp_close_output() 178 * via squeue to do the close and mark the tcp as detached if the connection 179 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its 180 * reference but tcp_close() drop IP's reference always. So if tcp was 181 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP 182 * and 1 because it is in classifier's connected hash. This is the condition 183 * we use to determine that its OK to clean up the tcp outside of squeue 184 * when time wait expires (check the ref under fanout and conn_lock and 185 * if it is 2, remove it from fanout hash and kill it). 186 * 187 * Although close just drops the necessary references and marks the 188 * tcp_detached state, tcp_close needs to know the tcp_detached has been 189 * set (under squeue) before letting the STREAM go away (because a 190 * inbound packet might attempt to go up the STREAM while the close 191 * has happened and tcp_detached is not set). So a special lock and 192 * flag is used along with a condition variable (tcp_closelock, tcp_closed, 193 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked 194 * tcp_detached. 195 * 196 * Special provisions and fast paths: 197 * 198 * We make special provision for (AF_INET, SOCK_STREAM) sockets which 199 * can't have 'ipv6_recvpktinfo' set and for these type of sockets, IP 200 * will never send a M_CTL to TCP. As such, ip_tcp_input() which handles 201 * all TCP packets from the wire makes a IPCL_IS_TCP4_CONNECTED_NO_POLICY 202 * check to send packets directly to tcp_rput_data via squeue. Everyone 203 * else comes through tcp_input() on the read side. 204 * 205 * We also make special provisions for sockfs by marking tcp_issocket 206 * whenever we have only sockfs on top of TCP. This allows us to skip 207 * putting the tcp in acceptor hash since a sockfs listener can never 208 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM 209 * since eager has already been allocated and the accept now happens 210 * on acceptor STREAM. There is a big blob of comment on top of 211 * tcp_conn_request explaining the new accept. When socket is POP'd, 212 * sockfs sends us an ioctl to mark the fact and we go back to old 213 * behaviour. Once tcp_issocket is unset, its never set for the 214 * life of that connection. 215 * 216 * IPsec notes : 217 * 218 * Since a packet is always executed on the correct TCP perimeter 219 * all IPsec processing is defered to IP including checking new 220 * connections and setting IPSEC policies for new connection. The 221 * only exception is tcp_xmit_listeners_reset() which is called 222 * directly from IP and needs to policy check to see if TH_RST 223 * can be sent out. 224 */ 225 226 extern major_t TCP6_MAJ; 227 228 /* 229 * Values for squeue switch: 230 * 1: squeue_enter_nodrain 231 * 2: squeue_enter 232 * 3: squeue_fill 233 */ 234 int tcp_squeue_close = 2; 235 int tcp_squeue_wput = 2; 236 237 squeue_func_t tcp_squeue_close_proc; 238 squeue_func_t tcp_squeue_wput_proc; 239 240 /* 241 * This controls how tiny a write must be before we try to copy it 242 * into the the mblk on the tail of the transmit queue. Not much 243 * speedup is observed for values larger than sixteen. Zero will 244 * disable the optimisation. 245 */ 246 int tcp_tx_pull_len = 16; 247 248 /* 249 * TCP Statistics. 250 * 251 * How TCP statistics work. 252 * 253 * There are two types of statistics invoked by two macros. 254 * 255 * TCP_STAT(name) does non-atomic increment of a named stat counter. It is 256 * supposed to be used in non MT-hot paths of the code. 257 * 258 * TCP_DBGSTAT(name) does atomic increment of a named stat counter. It is 259 * supposed to be used for DEBUG purposes and may be used on a hot path. 260 * 261 * Both TCP_STAT and TCP_DBGSTAT counters are available using kstat 262 * (use "kstat tcp" to get them). 263 * 264 * There is also additional debugging facility that marks tcp_clean_death() 265 * instances and saves them in tcp_t structure. It is triggered by 266 * TCP_TAG_CLEAN_DEATH define. Also, there is a global array of counters for 267 * tcp_clean_death() calls that counts the number of times each tag was hit. It 268 * is triggered by TCP_CLD_COUNTERS define. 269 * 270 * How to add new counters. 271 * 272 * 1) Add a field in the tcp_stat structure describing your counter. 273 * 2) Add a line in tcp_statistics with the name of the counter. 274 * 275 * IMPORTANT!! - make sure that both are in sync !! 276 * 3) Use either TCP_STAT or TCP_DBGSTAT with the name. 277 * 278 * Please avoid using private counters which are not kstat-exported. 279 * 280 * TCP_TAG_CLEAN_DEATH set to 1 enables tagging of tcp_clean_death() instances 281 * in tcp_t structure. 282 * 283 * TCP_MAX_CLEAN_DEATH_TAG is the maximum number of possible clean death tags. 284 */ 285 286 #ifndef TCP_DEBUG_COUNTER 287 #ifdef DEBUG 288 #define TCP_DEBUG_COUNTER 1 289 #else 290 #define TCP_DEBUG_COUNTER 0 291 #endif 292 #endif 293 294 #define TCP_CLD_COUNTERS 0 295 296 #define TCP_TAG_CLEAN_DEATH 1 297 #define TCP_MAX_CLEAN_DEATH_TAG 32 298 299 #ifdef lint 300 static int _lint_dummy_; 301 #endif 302 303 #if TCP_CLD_COUNTERS 304 static uint_t tcp_clean_death_stat[TCP_MAX_CLEAN_DEATH_TAG]; 305 #define TCP_CLD_STAT(x) tcp_clean_death_stat[x]++ 306 #elif defined(lint) 307 #define TCP_CLD_STAT(x) ASSERT(_lint_dummy_ == 0); 308 #else 309 #define TCP_CLD_STAT(x) 310 #endif 311 312 #if TCP_DEBUG_COUNTER 313 #define TCP_DBGSTAT(x) atomic_add_64(&(tcp_statistics.x.value.ui64), 1) 314 #elif defined(lint) 315 #define TCP_DBGSTAT(x) ASSERT(_lint_dummy_ == 0); 316 #else 317 #define TCP_DBGSTAT(x) 318 #endif 319 320 tcp_stat_t tcp_statistics = { 321 { "tcp_time_wait", KSTAT_DATA_UINT64 }, 322 { "tcp_time_wait_syn", KSTAT_DATA_UINT64 }, 323 { "tcp_time_wait_success", KSTAT_DATA_UINT64 }, 324 { "tcp_time_wait_fail", KSTAT_DATA_UINT64 }, 325 { "tcp_reinput_syn", KSTAT_DATA_UINT64 }, 326 { "tcp_ip_output", KSTAT_DATA_UINT64 }, 327 { "tcp_detach_non_time_wait", KSTAT_DATA_UINT64 }, 328 { "tcp_detach_time_wait", KSTAT_DATA_UINT64 }, 329 { "tcp_time_wait_reap", KSTAT_DATA_UINT64 }, 330 { "tcp_clean_death_nondetached", KSTAT_DATA_UINT64 }, 331 { "tcp_reinit_calls", KSTAT_DATA_UINT64 }, 332 { "tcp_eager_err1", KSTAT_DATA_UINT64 }, 333 { "tcp_eager_err2", KSTAT_DATA_UINT64 }, 334 { "tcp_eager_blowoff_calls", KSTAT_DATA_UINT64 }, 335 { "tcp_eager_blowoff_q", KSTAT_DATA_UINT64 }, 336 { "tcp_eager_blowoff_q0", KSTAT_DATA_UINT64 }, 337 { "tcp_not_hard_bound", KSTAT_DATA_UINT64 }, 338 { "tcp_no_listener", KSTAT_DATA_UINT64 }, 339 { "tcp_found_eager", KSTAT_DATA_UINT64 }, 340 { "tcp_wrong_queue", KSTAT_DATA_UINT64 }, 341 { "tcp_found_eager_binding1", KSTAT_DATA_UINT64 }, 342 { "tcp_found_eager_bound1", KSTAT_DATA_UINT64 }, 343 { "tcp_eager_has_listener1", KSTAT_DATA_UINT64 }, 344 { "tcp_open_alloc", KSTAT_DATA_UINT64 }, 345 { "tcp_open_detached_alloc", KSTAT_DATA_UINT64 }, 346 { "tcp_rput_time_wait", KSTAT_DATA_UINT64 }, 347 { "tcp_listendrop", KSTAT_DATA_UINT64 }, 348 { "tcp_listendropq0", KSTAT_DATA_UINT64 }, 349 { "tcp_wrong_rq", KSTAT_DATA_UINT64 }, 350 { "tcp_rsrv_calls", KSTAT_DATA_UINT64 }, 351 { "tcp_eagerfree2", KSTAT_DATA_UINT64 }, 352 { "tcp_eagerfree3", KSTAT_DATA_UINT64 }, 353 { "tcp_eagerfree4", KSTAT_DATA_UINT64 }, 354 { "tcp_eagerfree5", KSTAT_DATA_UINT64 }, 355 { "tcp_timewait_syn_fail", KSTAT_DATA_UINT64 }, 356 { "tcp_listen_badflags", KSTAT_DATA_UINT64 }, 357 { "tcp_timeout_calls", KSTAT_DATA_UINT64 }, 358 { "tcp_timeout_cached_alloc", KSTAT_DATA_UINT64 }, 359 { "tcp_timeout_cancel_reqs", KSTAT_DATA_UINT64 }, 360 { "tcp_timeout_canceled", KSTAT_DATA_UINT64 }, 361 { "tcp_timermp_alloced", KSTAT_DATA_UINT64 }, 362 { "tcp_timermp_freed", KSTAT_DATA_UINT64 }, 363 { "tcp_timermp_allocfail", KSTAT_DATA_UINT64 }, 364 { "tcp_timermp_allocdblfail", KSTAT_DATA_UINT64 }, 365 { "tcp_push_timer_cnt", KSTAT_DATA_UINT64 }, 366 { "tcp_ack_timer_cnt", KSTAT_DATA_UINT64 }, 367 { "tcp_ire_null1", KSTAT_DATA_UINT64 }, 368 { "tcp_ire_null", KSTAT_DATA_UINT64 }, 369 { "tcp_ip_send", KSTAT_DATA_UINT64 }, 370 { "tcp_ip_ire_send", KSTAT_DATA_UINT64 }, 371 { "tcp_wsrv_called", KSTAT_DATA_UINT64 }, 372 { "tcp_flwctl_on", KSTAT_DATA_UINT64 }, 373 { "tcp_timer_fire_early", KSTAT_DATA_UINT64 }, 374 { "tcp_timer_fire_miss", KSTAT_DATA_UINT64 }, 375 { "tcp_freelist_cleanup", KSTAT_DATA_UINT64 }, 376 { "tcp_rput_v6_error", KSTAT_DATA_UINT64 }, 377 { "tcp_out_sw_cksum", KSTAT_DATA_UINT64 }, 378 { "tcp_out_sw_cksum_bytes", KSTAT_DATA_UINT64 }, 379 { "tcp_zcopy_on", KSTAT_DATA_UINT64 }, 380 { "tcp_zcopy_off", KSTAT_DATA_UINT64 }, 381 { "tcp_zcopy_backoff", KSTAT_DATA_UINT64 }, 382 { "tcp_zcopy_disable", KSTAT_DATA_UINT64 }, 383 { "tcp_mdt_pkt_out", KSTAT_DATA_UINT64 }, 384 { "tcp_mdt_pkt_out_v4", KSTAT_DATA_UINT64 }, 385 { "tcp_mdt_pkt_out_v6", KSTAT_DATA_UINT64 }, 386 { "tcp_mdt_discarded", KSTAT_DATA_UINT64 }, 387 { "tcp_mdt_conn_halted1", KSTAT_DATA_UINT64 }, 388 { "tcp_mdt_conn_halted2", KSTAT_DATA_UINT64 }, 389 { "tcp_mdt_conn_halted3", KSTAT_DATA_UINT64 }, 390 { "tcp_mdt_conn_resumed1", KSTAT_DATA_UINT64 }, 391 { "tcp_mdt_conn_resumed2", KSTAT_DATA_UINT64 }, 392 { "tcp_mdt_legacy_small", KSTAT_DATA_UINT64 }, 393 { "tcp_mdt_legacy_all", KSTAT_DATA_UINT64 }, 394 { "tcp_mdt_legacy_ret", KSTAT_DATA_UINT64 }, 395 { "tcp_mdt_allocfail", KSTAT_DATA_UINT64 }, 396 { "tcp_mdt_addpdescfail", KSTAT_DATA_UINT64 }, 397 { "tcp_mdt_allocd", KSTAT_DATA_UINT64 }, 398 { "tcp_mdt_linked", KSTAT_DATA_UINT64 }, 399 { "tcp_fusion_flowctl", KSTAT_DATA_UINT64 }, 400 { "tcp_fusion_backenabled", KSTAT_DATA_UINT64 }, 401 { "tcp_fusion_urg", KSTAT_DATA_UINT64 }, 402 { "tcp_fusion_putnext", KSTAT_DATA_UINT64 }, 403 { "tcp_fusion_unfusable", KSTAT_DATA_UINT64 }, 404 { "tcp_fusion_aborted", KSTAT_DATA_UINT64 }, 405 { "tcp_fusion_unqualified", KSTAT_DATA_UINT64 }, 406 { "tcp_fusion_rrw_busy", KSTAT_DATA_UINT64 }, 407 { "tcp_fusion_rrw_msgcnt", KSTAT_DATA_UINT64 }, 408 { "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64 }, 409 { "tcp_sock_fallback", KSTAT_DATA_UINT64 }, 410 }; 411 412 static kstat_t *tcp_kstat; 413 414 /* 415 * Call either ip_output or ip_output_v6. This replaces putnext() calls on the 416 * tcp write side. 417 */ 418 #define CALL_IP_WPUT(connp, q, mp) { \ 419 ASSERT(((q)->q_flag & QREADR) == 0); \ 420 TCP_DBGSTAT(tcp_ip_output); \ 421 connp->conn_send(connp, (mp), (q), IP_WPUT); \ 422 } 423 424 /* Macros for timestamp comparisons */ 425 #define TSTMP_GEQ(a, b) ((int32_t)((a)-(b)) >= 0) 426 #define TSTMP_LT(a, b) ((int32_t)((a)-(b)) < 0) 427 428 /* 429 * Parameters for TCP Initial Send Sequence number (ISS) generation. When 430 * tcp_strong_iss is set to 1, which is the default, the ISS is calculated 431 * by adding three components: a time component which grows by 1 every 4096 432 * nanoseconds (versus every 4 microseconds suggested by RFC 793, page 27); 433 * a per-connection component which grows by 125000 for every new connection; 434 * and an "extra" component that grows by a random amount centered 435 * approximately on 64000. This causes the the ISS generator to cycle every 436 * 4.89 hours if no TCP connections are made, and faster if connections are 437 * made. 438 * 439 * When tcp_strong_iss is set to 0, ISS is calculated by adding two 440 * components: a time component which grows by 250000 every second; and 441 * a per-connection component which grows by 125000 for every new connections. 442 * 443 * A third method, when tcp_strong_iss is set to 2, for generating ISS is 444 * prescribed by Steve Bellovin. This involves adding time, the 125000 per 445 * connection, and a one-way hash (MD5) of the connection ID <sport, dport, 446 * src, dst>, a "truly" random (per RFC 1750) number, and a console-entered 447 * password. 448 */ 449 #define ISS_INCR 250000 450 #define ISS_NSEC_SHT 12 451 452 static uint32_t tcp_iss_incr_extra; /* Incremented for each connection */ 453 static kmutex_t tcp_iss_key_lock; 454 static MD5_CTX tcp_iss_key; 455 static sin_t sin_null; /* Zero address for quick clears */ 456 static sin6_t sin6_null; /* Zero address for quick clears */ 457 458 /* Packet dropper for TCP IPsec policy drops. */ 459 static ipdropper_t tcp_dropper; 460 461 /* 462 * This implementation follows the 4.3BSD interpretation of the urgent 463 * pointer and not RFC 1122. Switching to RFC 1122 behavior would cause 464 * incompatible changes in protocols like telnet and rlogin. 465 */ 466 #define TCP_OLD_URP_INTERPRETATION 1 467 468 #define TCP_IS_DETACHED_NONEAGER(tcp) \ 469 (TCP_IS_DETACHED(tcp) && \ 470 (!(tcp)->tcp_hard_binding)) 471 472 /* 473 * TCP reassembly macros. We hide starting and ending sequence numbers in 474 * b_next and b_prev of messages on the reassembly queue. The messages are 475 * chained using b_cont. These macros are used in tcp_reass() so we don't 476 * have to see the ugly casts and assignments. 477 */ 478 #define TCP_REASS_SEQ(mp) ((uint32_t)(uintptr_t)((mp)->b_next)) 479 #define TCP_REASS_SET_SEQ(mp, u) ((mp)->b_next = \ 480 (mblk_t *)(uintptr_t)(u)) 481 #define TCP_REASS_END(mp) ((uint32_t)(uintptr_t)((mp)->b_prev)) 482 #define TCP_REASS_SET_END(mp, u) ((mp)->b_prev = \ 483 (mblk_t *)(uintptr_t)(u)) 484 485 /* 486 * Implementation of TCP Timers. 487 * ============================= 488 * 489 * INTERFACE: 490 * 491 * There are two basic functions dealing with tcp timers: 492 * 493 * timeout_id_t tcp_timeout(connp, func, time) 494 * clock_t tcp_timeout_cancel(connp, timeout_id) 495 * TCP_TIMER_RESTART(tcp, intvl) 496 * 497 * tcp_timeout() starts a timer for the 'tcp' instance arranging to call 'func' 498 * after 'time' ticks passed. The function called by timeout() must adhere to 499 * the same restrictions as a driver soft interrupt handler - it must not sleep 500 * or call other functions that might sleep. The value returned is the opaque 501 * non-zero timeout identifier that can be passed to tcp_timeout_cancel() to 502 * cancel the request. The call to tcp_timeout() may fail in which case it 503 * returns zero. This is different from the timeout(9F) function which never 504 * fails. 505 * 506 * The call-back function 'func' always receives 'connp' as its single 507 * argument. It is always executed in the squeue corresponding to the tcp 508 * structure. The tcp structure is guaranteed to be present at the time the 509 * call-back is called. 510 * 511 * NOTE: The call-back function 'func' is never called if tcp is in 512 * the TCPS_CLOSED state. 513 * 514 * tcp_timeout_cancel() attempts to cancel a pending tcp_timeout() 515 * request. locks acquired by the call-back routine should not be held across 516 * the call to tcp_timeout_cancel() or a deadlock may result. 517 * 518 * tcp_timeout_cancel() returns -1 if it can not cancel the timeout request. 519 * Otherwise, it returns an integer value greater than or equal to 0. In 520 * particular, if the call-back function is already placed on the squeue, it can 521 * not be canceled. 522 * 523 * NOTE: both tcp_timeout() and tcp_timeout_cancel() should always be called 524 * within squeue context corresponding to the tcp instance. Since the 525 * call-back is also called via the same squeue, there are no race 526 * conditions described in untimeout(9F) manual page since all calls are 527 * strictly serialized. 528 * 529 * TCP_TIMER_RESTART() is a macro that attempts to cancel a pending timeout 530 * stored in tcp_timer_tid and starts a new one using 531 * MSEC_TO_TICK(intvl). It always uses tcp_timer() function as a call-back 532 * and stores the return value of tcp_timeout() in the tcp->tcp_timer_tid 533 * field. 534 * 535 * NOTE: since the timeout cancellation is not guaranteed, the cancelled 536 * call-back may still be called, so it is possible tcp_timer() will be 537 * called several times. This should not be a problem since tcp_timer() 538 * should always check the tcp instance state. 539 * 540 * 541 * IMPLEMENTATION: 542 * 543 * TCP timers are implemented using three-stage process. The call to 544 * tcp_timeout() uses timeout(9F) function to call tcp_timer_callback() function 545 * when the timer expires. The tcp_timer_callback() arranges the call of the 546 * tcp_timer_handler() function via squeue corresponding to the tcp 547 * instance. The tcp_timer_handler() calls actual requested timeout call-back 548 * and passes tcp instance as an argument to it. Information is passed between 549 * stages using the tcp_timer_t structure which contains the connp pointer, the 550 * tcp call-back to call and the timeout id returned by the timeout(9F). 551 * 552 * The tcp_timer_t structure is not used directly, it is embedded in an mblk_t - 553 * like structure that is used to enter an squeue. The mp->b_rptr of this pseudo 554 * mblk points to the beginning of tcp_timer_t structure. The tcp_timeout() 555 * returns the pointer to this mblk. 556 * 557 * The pseudo mblk is allocated from a special tcp_timer_cache kmem cache. It 558 * looks like a normal mblk without actual dblk attached to it. 559 * 560 * To optimize performance each tcp instance holds a small cache of timer 561 * mblocks. In the current implementation it caches up to two timer mblocks per 562 * tcp instance. The cache is preserved over tcp frees and is only freed when 563 * the whole tcp structure is destroyed by its kmem destructor. Since all tcp 564 * timer processing happens on a corresponding squeue, the cache manipulation 565 * does not require any locks. Experiments show that majority of timer mblocks 566 * allocations are satisfied from the tcp cache and do not involve kmem calls. 567 * 568 * The tcp_timeout() places a refhold on the connp instance which guarantees 569 * that it will be present at the time the call-back function fires. The 570 * tcp_timer_handler() drops the reference after calling the call-back, so the 571 * call-back function does not need to manipulate the references explicitly. 572 */ 573 574 typedef struct tcp_timer_s { 575 conn_t *connp; 576 void (*tcpt_proc)(void *); 577 timeout_id_t tcpt_tid; 578 } tcp_timer_t; 579 580 static kmem_cache_t *tcp_timercache; 581 kmem_cache_t *tcp_sack_info_cache; 582 kmem_cache_t *tcp_iphc_cache; 583 584 /* 585 * For scalability, we must not run a timer for every TCP connection 586 * in TIME_WAIT state. To see why, consider (for time wait interval of 587 * 4 minutes): 588 * 1000 connections/sec * 240 seconds/time wait = 240,000 active conn's 589 * 590 * This list is ordered by time, so you need only delete from the head 591 * until you get to entries which aren't old enough to delete yet. 592 * The list consists of only the detached TIME_WAIT connections. 593 * 594 * Note that the timer (tcp_time_wait_expire) is started when the tcp_t 595 * becomes detached TIME_WAIT (either by changing the state and already 596 * being detached or the other way around). This means that the TIME_WAIT 597 * state can be extended (up to doubled) if the connection doesn't become 598 * detached for a long time. 599 * 600 * The list manipulations (including tcp_time_wait_next/prev) 601 * are protected by the tcp_time_wait_lock. The content of the 602 * detached TIME_WAIT connections is protected by the normal perimeters. 603 */ 604 605 typedef struct tcp_squeue_priv_s { 606 kmutex_t tcp_time_wait_lock; 607 /* Protects the next 3 globals */ 608 timeout_id_t tcp_time_wait_tid; 609 tcp_t *tcp_time_wait_head; 610 tcp_t *tcp_time_wait_tail; 611 tcp_t *tcp_free_list; 612 uint_t tcp_free_list_cnt; 613 } tcp_squeue_priv_t; 614 615 /* 616 * TCP_TIME_WAIT_DELAY governs how often the time_wait_collector runs. 617 * Running it every 5 seconds seems to give the best results. 618 */ 619 #define TCP_TIME_WAIT_DELAY drv_usectohz(5000000) 620 621 /* 622 * To prevent memory hog, limit the number of entries in tcp_free_list 623 * to 1% of available memory / number of cpus 624 */ 625 uint_t tcp_free_list_max_cnt = 0; 626 627 #define TCP_XMIT_LOWATER 4096 628 #define TCP_XMIT_HIWATER 49152 629 #define TCP_RECV_LOWATER 2048 630 #define TCP_RECV_HIWATER 49152 631 632 /* 633 * PAWS needs a timer for 24 days. This is the number of ticks in 24 days 634 */ 635 #define PAWS_TIMEOUT ((clock_t)(24*24*60*60*hz)) 636 637 #define TIDUSZ 4096 /* transport interface data unit size */ 638 639 /* 640 * Bind hash list size and has function. It has to be a power of 2 for 641 * hashing. 642 */ 643 #define TCP_BIND_FANOUT_SIZE 512 644 #define TCP_BIND_HASH(lport) (ntohs(lport) & (TCP_BIND_FANOUT_SIZE - 1)) 645 /* 646 * Size of listen and acceptor hash list. It has to be a power of 2 for 647 * hashing. 648 */ 649 #define TCP_FANOUT_SIZE 256 650 651 #ifdef _ILP32 652 #define TCP_ACCEPTOR_HASH(accid) \ 653 (((uint_t)(accid) >> 8) & (TCP_FANOUT_SIZE - 1)) 654 #else 655 #define TCP_ACCEPTOR_HASH(accid) \ 656 ((uint_t)(accid) & (TCP_FANOUT_SIZE - 1)) 657 #endif /* _ILP32 */ 658 659 #define IP_ADDR_CACHE_SIZE 2048 660 #define IP_ADDR_CACHE_HASH(faddr) \ 661 (ntohl(faddr) & (IP_ADDR_CACHE_SIZE -1)) 662 663 /* Hash for HSPs uses all 32 bits, since both networks and hosts are in table */ 664 #define TCP_HSP_HASH_SIZE 256 665 666 #define TCP_HSP_HASH(addr) \ 667 (((addr>>24) ^ (addr >>16) ^ \ 668 (addr>>8) ^ (addr)) % TCP_HSP_HASH_SIZE) 669 670 /* 671 * TCP options struct returned from tcp_parse_options. 672 */ 673 typedef struct tcp_opt_s { 674 uint32_t tcp_opt_mss; 675 uint32_t tcp_opt_wscale; 676 uint32_t tcp_opt_ts_val; 677 uint32_t tcp_opt_ts_ecr; 678 tcp_t *tcp; 679 } tcp_opt_t; 680 681 /* 682 * RFC1323-recommended phrasing of TSTAMP option, for easier parsing 683 */ 684 685 #ifdef _BIG_ENDIAN 686 #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 687 (TCPOPT_TSTAMP << 8) | 10) 688 #else 689 #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \ 690 (TCPOPT_NOP << 8) | TCPOPT_NOP) 691 #endif 692 693 /* 694 * Flags returned from tcp_parse_options. 695 */ 696 #define TCP_OPT_MSS_PRESENT 1 697 #define TCP_OPT_WSCALE_PRESENT 2 698 #define TCP_OPT_TSTAMP_PRESENT 4 699 #define TCP_OPT_SACK_OK_PRESENT 8 700 #define TCP_OPT_SACK_PRESENT 16 701 702 /* TCP option length */ 703 #define TCPOPT_NOP_LEN 1 704 #define TCPOPT_MAXSEG_LEN 4 705 #define TCPOPT_WS_LEN 3 706 #define TCPOPT_REAL_WS_LEN (TCPOPT_WS_LEN+1) 707 #define TCPOPT_TSTAMP_LEN 10 708 #define TCPOPT_REAL_TS_LEN (TCPOPT_TSTAMP_LEN+2) 709 #define TCPOPT_SACK_OK_LEN 2 710 #define TCPOPT_REAL_SACK_OK_LEN (TCPOPT_SACK_OK_LEN+2) 711 #define TCPOPT_REAL_SACK_LEN 4 712 #define TCPOPT_MAX_SACK_LEN 36 713 #define TCPOPT_HEADER_LEN 2 714 715 /* TCP cwnd burst factor. */ 716 #define TCP_CWND_INFINITE 65535 717 #define TCP_CWND_SS 3 718 #define TCP_CWND_NORMAL 5 719 720 /* Maximum TCP initial cwin (start/restart). */ 721 #define TCP_MAX_INIT_CWND 8 722 723 /* 724 * Initialize cwnd according to RFC 3390. def_max_init_cwnd is 725 * either tcp_slow_start_initial or tcp_slow_start_after idle 726 * depending on the caller. If the upper layer has not used the 727 * TCP_INIT_CWND option to change the initial cwnd, tcp_init_cwnd 728 * should be 0 and we use the formula in RFC 3390 to set tcp_cwnd. 729 * If the upper layer has changed set the tcp_init_cwnd, just use 730 * it to calculate the tcp_cwnd. 731 */ 732 #define SET_TCP_INIT_CWND(tcp, mss, def_max_init_cwnd) \ 733 { \ 734 if ((tcp)->tcp_init_cwnd == 0) { \ 735 (tcp)->tcp_cwnd = MIN(def_max_init_cwnd * (mss), \ 736 MIN(4 * (mss), MAX(2 * (mss), 4380 / (mss) * (mss)))); \ 737 } else { \ 738 (tcp)->tcp_cwnd = (tcp)->tcp_init_cwnd * (mss); \ 739 } \ 740 tcp->tcp_cwnd_cnt = 0; \ 741 } 742 743 /* TCP Timer control structure */ 744 typedef struct tcpt_s { 745 pfv_t tcpt_pfv; /* The routine we are to call */ 746 tcp_t *tcpt_tcp; /* The parameter we are to pass in */ 747 } tcpt_t; 748 749 /* Host Specific Parameter structure */ 750 typedef struct tcp_hsp { 751 struct tcp_hsp *tcp_hsp_next; 752 in6_addr_t tcp_hsp_addr_v6; 753 in6_addr_t tcp_hsp_subnet_v6; 754 uint_t tcp_hsp_vers; /* IPV4_VERSION | IPV6_VERSION */ 755 int32_t tcp_hsp_sendspace; 756 int32_t tcp_hsp_recvspace; 757 int32_t tcp_hsp_tstamp; 758 } tcp_hsp_t; 759 #define tcp_hsp_addr V4_PART_OF_V6(tcp_hsp_addr_v6) 760 #define tcp_hsp_subnet V4_PART_OF_V6(tcp_hsp_subnet_v6) 761 762 /* 763 * Functions called directly via squeue having a prototype of edesc_t. 764 */ 765 void tcp_conn_request(void *arg, mblk_t *mp, void *arg2); 766 static void tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2); 767 void tcp_accept_finish(void *arg, mblk_t *mp, void *arg2); 768 static void tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2); 769 static void tcp_wput_proto(void *arg, mblk_t *mp, void *arg2); 770 void tcp_input(void *arg, mblk_t *mp, void *arg2); 771 void tcp_rput_data(void *arg, mblk_t *mp, void *arg2); 772 static void tcp_close_output(void *arg, mblk_t *mp, void *arg2); 773 void tcp_output(void *arg, mblk_t *mp, void *arg2); 774 static void tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2); 775 static void tcp_timer_handler(void *arg, mblk_t *mp, void *arg2); 776 777 778 /* Prototype for TCP functions */ 779 static void tcp_random_init(void); 780 int tcp_random(void); 781 static void tcp_accept(tcp_t *tcp, mblk_t *mp); 782 static void tcp_accept_swap(tcp_t *listener, tcp_t *acceptor, 783 tcp_t *eager); 784 static int tcp_adapt_ire(tcp_t *tcp, mblk_t *ire_mp); 785 static in_port_t tcp_bindi(tcp_t *tcp, in_port_t port, const in6_addr_t *laddr, 786 int reuseaddr, boolean_t quick_connect, boolean_t bind_to_req_port_only, 787 boolean_t user_specified); 788 static void tcp_closei_local(tcp_t *tcp); 789 static void tcp_close_detached(tcp_t *tcp); 790 static boolean_t tcp_conn_con(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph, 791 mblk_t *idmp, mblk_t **defermp); 792 static void tcp_connect(tcp_t *tcp, mblk_t *mp); 793 static void tcp_connect_ipv4(tcp_t *tcp, mblk_t *mp, ipaddr_t *dstaddrp, 794 in_port_t dstport, uint_t srcid); 795 static void tcp_connect_ipv6(tcp_t *tcp, mblk_t *mp, in6_addr_t *dstaddrp, 796 in_port_t dstport, uint32_t flowinfo, uint_t srcid, 797 uint32_t scope_id); 798 static int tcp_clean_death(tcp_t *tcp, int err, uint8_t tag); 799 static void tcp_def_q_set(tcp_t *tcp, mblk_t *mp); 800 static void tcp_disconnect(tcp_t *tcp, mblk_t *mp); 801 static char *tcp_display(tcp_t *tcp, char *, char); 802 static boolean_t tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum); 803 static void tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only); 804 static void tcp_eager_unlink(tcp_t *tcp); 805 static void tcp_err_ack(tcp_t *tcp, mblk_t *mp, int tlierr, 806 int unixerr); 807 static void tcp_err_ack_prim(tcp_t *tcp, mblk_t *mp, int primitive, 808 int tlierr, int unixerr); 809 static int tcp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, 810 cred_t *cr); 811 static int tcp_extra_priv_ports_add(queue_t *q, mblk_t *mp, 812 char *value, caddr_t cp, cred_t *cr); 813 static int tcp_extra_priv_ports_del(queue_t *q, mblk_t *mp, 814 char *value, caddr_t cp, cred_t *cr); 815 static int tcp_tpistate(tcp_t *tcp); 816 static void tcp_bind_hash_insert(tf_t *tf, tcp_t *tcp, 817 int caller_holds_lock); 818 static void tcp_bind_hash_remove(tcp_t *tcp); 819 static tcp_t *tcp_acceptor_hash_lookup(t_uscalar_t id); 820 void tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp); 821 static void tcp_acceptor_hash_remove(tcp_t *tcp); 822 static void tcp_capability_req(tcp_t *tcp, mblk_t *mp); 823 static void tcp_info_req(tcp_t *tcp, mblk_t *mp); 824 static void tcp_addr_req(tcp_t *tcp, mblk_t *mp); 825 static void tcp_addr_req_ipv6(tcp_t *tcp, mblk_t *mp); 826 static int tcp_header_init_ipv4(tcp_t *tcp); 827 static int tcp_header_init_ipv6(tcp_t *tcp); 828 int tcp_init(tcp_t *tcp, queue_t *q); 829 static int tcp_init_values(tcp_t *tcp); 830 static mblk_t *tcp_ip_advise_mblk(void *addr, int addr_len, ipic_t **ipic); 831 static mblk_t *tcp_ip_bind_mp(tcp_t *tcp, t_scalar_t bind_prim, 832 t_scalar_t addr_length); 833 static void tcp_ip_ire_mark_advice(tcp_t *tcp); 834 static void tcp_ip_notify(tcp_t *tcp); 835 static mblk_t *tcp_ire_mp(mblk_t *mp); 836 static void tcp_iss_init(tcp_t *tcp); 837 static void tcp_keepalive_killer(void *arg); 838 static int tcp_parse_options(tcph_t *tcph, tcp_opt_t *tcpopt); 839 static void tcp_mss_set(tcp_t *tcp, uint32_t size); 840 static int tcp_conprim_opt_process(tcp_t *tcp, mblk_t *mp, 841 int *do_disconnectp, int *t_errorp, int *sys_errorp); 842 static boolean_t tcp_allow_connopt_set(int level, int name); 843 int tcp_opt_default(queue_t *q, int level, int name, uchar_t *ptr); 844 int tcp_opt_get(queue_t *q, int level, int name, uchar_t *ptr); 845 int tcp_opt_set(queue_t *q, uint_t optset_context, int level, 846 int name, uint_t inlen, uchar_t *invalp, uint_t *outlenp, 847 uchar_t *outvalp, void *thisdg_attrs, cred_t *cr, 848 mblk_t *mblk); 849 static void tcp_opt_reverse(tcp_t *tcp, ipha_t *ipha); 850 static int tcp_opt_set_header(tcp_t *tcp, boolean_t checkonly, 851 uchar_t *ptr, uint_t len); 852 static int tcp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr); 853 static boolean_t tcp_param_register(tcpparam_t *tcppa, int cnt); 854 static int tcp_param_set(queue_t *q, mblk_t *mp, char *value, 855 caddr_t cp, cred_t *cr); 856 static int tcp_param_set_aligned(queue_t *q, mblk_t *mp, char *value, 857 caddr_t cp, cred_t *cr); 858 static void tcp_iss_key_init(uint8_t *phrase, int len); 859 static int tcp_1948_phrase_set(queue_t *q, mblk_t *mp, char *value, 860 caddr_t cp, cred_t *cr); 861 static void tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_cnt); 862 static mblk_t *tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start); 863 static void tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp); 864 static void tcp_reinit(tcp_t *tcp); 865 static void tcp_reinit_values(tcp_t *tcp); 866 static void tcp_report_item(mblk_t *mp, tcp_t *tcp, int hashval, 867 tcp_t *thisstream, cred_t *cr); 868 869 static uint_t tcp_rcv_drain(queue_t *q, tcp_t *tcp); 870 static void tcp_sack_rxmit(tcp_t *tcp, uint_t *flags); 871 static boolean_t tcp_send_rst_chk(void); 872 static void tcp_ss_rexmit(tcp_t *tcp); 873 static mblk_t *tcp_rput_add_ancillary(tcp_t *tcp, mblk_t *mp, ip6_pkt_t *ipp); 874 static void tcp_process_options(tcp_t *, tcph_t *); 875 static void tcp_rput_common(tcp_t *tcp, mblk_t *mp); 876 static void tcp_rsrv(queue_t *q); 877 static int tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd); 878 static int tcp_snmp_state(tcp_t *tcp); 879 static int tcp_status_report(queue_t *q, mblk_t *mp, caddr_t cp, 880 cred_t *cr); 881 static int tcp_bind_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 882 cred_t *cr); 883 static int tcp_listen_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 884 cred_t *cr); 885 static int tcp_conn_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 886 cred_t *cr); 887 static int tcp_acceptor_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 888 cred_t *cr); 889 static int tcp_host_param_set(queue_t *q, mblk_t *mp, char *value, 890 caddr_t cp, cred_t *cr); 891 static int tcp_host_param_set_ipv6(queue_t *q, mblk_t *mp, char *value, 892 caddr_t cp, cred_t *cr); 893 static int tcp_host_param_report(queue_t *q, mblk_t *mp, caddr_t cp, 894 cred_t *cr); 895 static void tcp_timer(void *arg); 896 static void tcp_timer_callback(void *); 897 static in_port_t tcp_update_next_port(in_port_t port, const tcp_t *tcp, 898 boolean_t random); 899 static in_port_t tcp_get_next_priv_port(const tcp_t *); 900 static void tcp_wput_sock(queue_t *q, mblk_t *mp); 901 void tcp_wput_accept(queue_t *q, mblk_t *mp); 902 static void tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent); 903 static void tcp_wput_flush(tcp_t *tcp, mblk_t *mp); 904 static void tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp); 905 static int tcp_send(queue_t *q, tcp_t *tcp, const int mss, 906 const int tcp_hdr_len, const int tcp_tcp_hdr_len, 907 const int num_sack_blk, int *usable, uint_t *snxt, 908 int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 909 const int mdt_thres); 910 static int tcp_multisend(queue_t *q, tcp_t *tcp, const int mss, 911 const int tcp_hdr_len, const int tcp_tcp_hdr_len, 912 const int num_sack_blk, int *usable, uint_t *snxt, 913 int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 914 const int mdt_thres); 915 static void tcp_fill_header(tcp_t *tcp, uchar_t *rptr, clock_t now, 916 int num_sack_blk); 917 static void tcp_wsrv(queue_t *q); 918 static int tcp_xmit_end(tcp_t *tcp); 919 static mblk_t *tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, 920 int32_t *offset, mblk_t **end_mp, uint32_t seq, 921 boolean_t sendall, uint32_t *seg_len, boolean_t rexmit); 922 static void tcp_ack_timer(void *arg); 923 static mblk_t *tcp_ack_mp(tcp_t *tcp); 924 static void tcp_xmit_early_reset(char *str, mblk_t *mp, 925 uint32_t seq, uint32_t ack, int ctl, uint_t ip_hdr_len); 926 static void tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, 927 uint32_t ack, int ctl); 928 static tcp_hsp_t *tcp_hsp_lookup(ipaddr_t addr); 929 static tcp_hsp_t *tcp_hsp_lookup_ipv6(in6_addr_t *addr); 930 static int setmaxps(queue_t *q, int maxpsz); 931 static void tcp_set_rto(tcp_t *, time_t); 932 static boolean_t tcp_check_policy(tcp_t *, mblk_t *, ipha_t *, ip6_t *, 933 boolean_t, boolean_t); 934 static void tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, 935 boolean_t ipsec_mctl); 936 static mblk_t *tcp_setsockopt_mp(int level, int cmd, 937 char *opt, int optlen); 938 static int tcp_build_hdrs(queue_t *, tcp_t *); 939 static void tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, 940 uint32_t seg_seq, uint32_t seg_ack, int seg_len, 941 tcph_t *tcph); 942 boolean_t tcp_paws_check(tcp_t *tcp, tcph_t *tcph, tcp_opt_t *tcpoptp); 943 boolean_t tcp_reserved_port_add(int, in_port_t *, in_port_t *); 944 boolean_t tcp_reserved_port_del(in_port_t, in_port_t); 945 boolean_t tcp_reserved_port_check(in_port_t); 946 static tcp_t *tcp_alloc_temp_tcp(in_port_t); 947 static int tcp_reserved_port_list(queue_t *, mblk_t *, caddr_t, cred_t *); 948 static mblk_t *tcp_mdt_info_mp(mblk_t *); 949 static void tcp_mdt_update(tcp_t *, ill_mdt_capab_t *, boolean_t); 950 static int tcp_mdt_add_attrs(multidata_t *, const mblk_t *, 951 const boolean_t, const uint32_t, const uint32_t, 952 const uint32_t, const uint32_t); 953 static void tcp_multisend_data(tcp_t *, ire_t *, const ill_t *, mblk_t *, 954 const uint_t, const uint_t, boolean_t *); 955 static void tcp_send_data(tcp_t *, queue_t *, mblk_t *); 956 extern mblk_t *tcp_timermp_alloc(int); 957 extern void tcp_timermp_free(tcp_t *); 958 static void tcp_timer_free(tcp_t *tcp, mblk_t *mp); 959 static void tcp_stop_lingering(tcp_t *tcp); 960 static void tcp_close_linger_timeout(void *arg); 961 void tcp_ddi_init(void); 962 void tcp_ddi_destroy(void); 963 static void tcp_kstat_init(void); 964 static void tcp_kstat_fini(void); 965 static int tcp_kstat_update(kstat_t *kp, int rw); 966 void tcp_reinput(conn_t *connp, mblk_t *mp, squeue_t *sqp); 967 static int tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 968 tcph_t *tcph, uint_t ipvers, mblk_t *idmp); 969 static int tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, ipha_t *ipha, 970 tcph_t *tcph, mblk_t *idmp); 971 static squeue_func_t tcp_squeue_switch(int); 972 973 static int tcp_open(queue_t *, dev_t *, int, int, cred_t *); 974 static int tcp_close(queue_t *, int); 975 static int tcpclose_accept(queue_t *); 976 static int tcp_modclose(queue_t *); 977 static void tcp_wput_mod(queue_t *, mblk_t *); 978 979 static void tcp_squeue_add(squeue_t *); 980 static boolean_t tcp_zcopy_check(tcp_t *); 981 static void tcp_zcopy_notify(tcp_t *); 982 static mblk_t *tcp_zcopy_disable(tcp_t *, mblk_t *); 983 static mblk_t *tcp_zcopy_backoff(tcp_t *, mblk_t *, int); 984 static void tcp_ire_ill_check(tcp_t *, ire_t *, ill_t *, boolean_t); 985 986 extern void tcp_kssl_input(tcp_t *, mblk_t *); 987 988 /* 989 * Routines related to the TCP_IOC_ABORT_CONN ioctl command. 990 * 991 * TCP_IOC_ABORT_CONN is a non-transparent ioctl command used for aborting 992 * TCP connections. To invoke this ioctl, a tcp_ioc_abort_conn_t structure 993 * (defined in tcp.h) needs to be filled in and passed into the kernel 994 * via an I_STR ioctl command (see streamio(7I)). The tcp_ioc_abort_conn_t 995 * structure contains the four-tuple of a TCP connection and a range of TCP 996 * states (specified by ac_start and ac_end). The use of wildcard addresses 997 * and ports is allowed. Connections with a matching four tuple and a state 998 * within the specified range will be aborted. The valid states for the 999 * ac_start and ac_end fields are in the range TCPS_SYN_SENT to TCPS_TIME_WAIT, 1000 * inclusive. 1001 * 1002 * An application which has its connection aborted by this ioctl will receive 1003 * an error that is dependent on the connection state at the time of the abort. 1004 * If the connection state is < TCPS_TIME_WAIT, an application should behave as 1005 * though a RST packet has been received. If the connection state is equal to 1006 * TCPS_TIME_WAIT, the 2MSL timeout will immediately be canceled by the kernel 1007 * and all resources associated with the connection will be freed. 1008 */ 1009 static mblk_t *tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *, tcp_t *); 1010 static void tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *); 1011 static void tcp_ioctl_abort_handler(tcp_t *, mblk_t *); 1012 static int tcp_ioctl_abort(tcp_ioc_abort_conn_t *); 1013 static void tcp_ioctl_abort_conn(queue_t *, mblk_t *); 1014 static int tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *, int, int *, 1015 boolean_t); 1016 1017 static struct module_info tcp_rinfo = { 1018 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, TCP_RECV_HIWATER, TCP_RECV_LOWATER 1019 }; 1020 1021 static struct module_info tcp_winfo = { 1022 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, 127, 16 1023 }; 1024 1025 /* 1026 * Entry points for TCP as a module. It only allows SNMP requests 1027 * to pass through. 1028 */ 1029 struct qinit tcp_mod_rinit = { 1030 (pfi_t)putnext, NULL, tcp_open, ip_snmpmod_close, NULL, &tcp_rinfo, 1031 }; 1032 1033 struct qinit tcp_mod_winit = { 1034 (pfi_t)ip_snmpmod_wput, NULL, tcp_open, ip_snmpmod_close, NULL, 1035 &tcp_rinfo 1036 }; 1037 1038 /* 1039 * Entry points for TCP as a device. The normal case which supports 1040 * the TCP functionality. 1041 */ 1042 struct qinit tcp_rinit = { 1043 NULL, (pfi_t)tcp_rsrv, tcp_open, tcp_close, NULL, &tcp_rinfo 1044 }; 1045 1046 struct qinit tcp_winit = { 1047 (pfi_t)tcp_wput, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo 1048 }; 1049 1050 /* Initial entry point for TCP in socket mode. */ 1051 struct qinit tcp_sock_winit = { 1052 (pfi_t)tcp_wput_sock, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo 1053 }; 1054 1055 /* 1056 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing 1057 * an accept. Avoid allocating data structures since eager has already 1058 * been created. 1059 */ 1060 struct qinit tcp_acceptor_rinit = { 1061 NULL, (pfi_t)tcp_rsrv, NULL, tcpclose_accept, NULL, &tcp_winfo 1062 }; 1063 1064 struct qinit tcp_acceptor_winit = { 1065 (pfi_t)tcp_wput_accept, NULL, NULL, NULL, NULL, &tcp_winfo 1066 }; 1067 1068 /* 1069 * Entry points for TCP loopback (read side only) 1070 */ 1071 struct qinit tcp_loopback_rinit = { 1072 (pfi_t)0, (pfi_t)tcp_rsrv, tcp_open, tcp_close, (pfi_t)0, 1073 &tcp_rinfo, NULL, tcp_fuse_rrw, tcp_fuse_rinfop, STRUIOT_STANDARD 1074 }; 1075 1076 struct streamtab tcpinfo = { 1077 &tcp_rinit, &tcp_winit 1078 }; 1079 1080 extern squeue_func_t tcp_squeue_wput_proc; 1081 extern squeue_func_t tcp_squeue_timer_proc; 1082 1083 /* Protected by tcp_g_q_lock */ 1084 static queue_t *tcp_g_q; /* Default queue used during detached closes */ 1085 kmutex_t tcp_g_q_lock; 1086 1087 /* Protected by tcp_hsp_lock */ 1088 /* 1089 * XXX The host param mechanism should go away and instead we should use 1090 * the metrics associated with the routes to determine the default sndspace 1091 * and rcvspace. 1092 */ 1093 static tcp_hsp_t **tcp_hsp_hash; /* Hash table for HSPs */ 1094 krwlock_t tcp_hsp_lock; 1095 1096 /* 1097 * Extra privileged ports. In host byte order. 1098 * Protected by tcp_epriv_port_lock. 1099 */ 1100 #define TCP_NUM_EPRIV_PORTS 64 1101 static int tcp_g_num_epriv_ports = TCP_NUM_EPRIV_PORTS; 1102 static uint16_t tcp_g_epriv_ports[TCP_NUM_EPRIV_PORTS] = { 2049, 4045 }; 1103 kmutex_t tcp_epriv_port_lock; 1104 1105 /* 1106 * The smallest anonymous port in the privileged port range which TCP 1107 * looks for free port. Use in the option TCP_ANONPRIVBIND. 1108 */ 1109 static in_port_t tcp_min_anonpriv_port = 512; 1110 1111 /* Only modified during _init and _fini thus no locking is needed. */ 1112 static caddr_t tcp_g_nd; /* Head of 'named dispatch' variable list */ 1113 1114 /* Hint not protected by any lock */ 1115 static uint_t tcp_next_port_to_try; 1116 1117 1118 /* TCP bind hash list - all tcp_t with state >= BOUND. */ 1119 tf_t tcp_bind_fanout[TCP_BIND_FANOUT_SIZE]; 1120 1121 /* TCP queue hash list - all tcp_t in case they will be an acceptor. */ 1122 static tf_t tcp_acceptor_fanout[TCP_FANOUT_SIZE]; 1123 1124 /* 1125 * TCP has a private interface for other kernel modules to reserve a 1126 * port range for them to use. Once reserved, TCP will not use any ports 1127 * in the range. This interface relies on the TCP_EXCLBIND feature. If 1128 * the semantics of TCP_EXCLBIND is changed, implementation of this interface 1129 * has to be verified. 1130 * 1131 * There can be TCP_RESERVED_PORTS_ARRAY_MAX_SIZE port ranges. Each port 1132 * range can cover at most TCP_RESERVED_PORTS_RANGE_MAX ports. A port 1133 * range is [port a, port b] inclusive. And each port range is between 1134 * TCP_LOWESET_RESERVED_PORT and TCP_LARGEST_RESERVED_PORT inclusive. 1135 * 1136 * Note that the default anonymous port range starts from 32768. There is 1137 * no port "collision" between that and the reserved port range. If there 1138 * is port collision (because the default smallest anonymous port is lowered 1139 * or some apps specifically bind to ports in the reserved port range), the 1140 * system may not be able to reserve a port range even there are enough 1141 * unbound ports as a reserved port range contains consecutive ports . 1142 */ 1143 #define TCP_RESERVED_PORTS_ARRAY_MAX_SIZE 5 1144 #define TCP_RESERVED_PORTS_RANGE_MAX 1000 1145 #define TCP_SMALLEST_RESERVED_PORT 10240 1146 #define TCP_LARGEST_RESERVED_PORT 20480 1147 1148 /* Structure to represent those reserved port ranges. */ 1149 typedef struct tcp_rport_s { 1150 in_port_t lo_port; 1151 in_port_t hi_port; 1152 tcp_t **temp_tcp_array; 1153 } tcp_rport_t; 1154 1155 /* The reserved port array. */ 1156 static tcp_rport_t tcp_reserved_port[TCP_RESERVED_PORTS_ARRAY_MAX_SIZE]; 1157 1158 /* Locks to protect the tcp_reserved_ports array. */ 1159 static krwlock_t tcp_reserved_port_lock; 1160 1161 /* The number of ranges in the array. */ 1162 uint32_t tcp_reserved_port_array_size = 0; 1163 1164 /* 1165 * MIB-2 stuff for SNMP 1166 * Note: tcpInErrs {tcp 15} is accumulated in ip.c 1167 */ 1168 mib2_tcp_t tcp_mib; /* SNMP fixed size info */ 1169 kstat_t *tcp_mibkp; /* kstat exporting tcp_mib data */ 1170 1171 boolean_t tcp_icmp_source_quench = B_FALSE; 1172 /* 1173 * Following assumes TPI alignment requirements stay along 32 bit 1174 * boundaries 1175 */ 1176 #define ROUNDUP32(x) \ 1177 (((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1)) 1178 1179 /* Template for response to info request. */ 1180 static struct T_info_ack tcp_g_t_info_ack = { 1181 T_INFO_ACK, /* PRIM_type */ 1182 0, /* TSDU_size */ 1183 T_INFINITE, /* ETSDU_size */ 1184 T_INVALID, /* CDATA_size */ 1185 T_INVALID, /* DDATA_size */ 1186 sizeof (sin_t), /* ADDR_size */ 1187 0, /* OPT_size - not initialized here */ 1188 TIDUSZ, /* TIDU_size */ 1189 T_COTS_ORD, /* SERV_type */ 1190 TCPS_IDLE, /* CURRENT_state */ 1191 (XPG4_1|EXPINLINE) /* PROVIDER_flag */ 1192 }; 1193 1194 static struct T_info_ack tcp_g_t_info_ack_v6 = { 1195 T_INFO_ACK, /* PRIM_type */ 1196 0, /* TSDU_size */ 1197 T_INFINITE, /* ETSDU_size */ 1198 T_INVALID, /* CDATA_size */ 1199 T_INVALID, /* DDATA_size */ 1200 sizeof (sin6_t), /* ADDR_size */ 1201 0, /* OPT_size - not initialized here */ 1202 TIDUSZ, /* TIDU_size */ 1203 T_COTS_ORD, /* SERV_type */ 1204 TCPS_IDLE, /* CURRENT_state */ 1205 (XPG4_1|EXPINLINE) /* PROVIDER_flag */ 1206 }; 1207 1208 #define MS 1L 1209 #define SECONDS (1000 * MS) 1210 #define MINUTES (60 * SECONDS) 1211 #define HOURS (60 * MINUTES) 1212 #define DAYS (24 * HOURS) 1213 1214 #define PARAM_MAX (~(uint32_t)0) 1215 1216 /* Max size IP datagram is 64k - 1 */ 1217 #define TCP_MSS_MAX_IPV4 (IP_MAXPACKET - (sizeof (ipha_t) + sizeof (tcph_t))) 1218 #define TCP_MSS_MAX_IPV6 (IP_MAXPACKET - (sizeof (ip6_t) + sizeof (tcph_t))) 1219 /* Max of the above */ 1220 #define TCP_MSS_MAX TCP_MSS_MAX_IPV4 1221 1222 /* Largest TCP port number */ 1223 #define TCP_MAX_PORT (64 * 1024 - 1) 1224 1225 /* 1226 * tcp_wroff_xtra is the extra space in front of TCP/IP header for link 1227 * layer header. It has to be a multiple of 4. 1228 */ 1229 static tcpparam_t tcp_wroff_xtra_param = { 0, 256, 32, "tcp_wroff_xtra" }; 1230 #define tcp_wroff_xtra tcp_wroff_xtra_param.tcp_param_val 1231 1232 /* 1233 * All of these are alterable, within the min/max values given, at run time. 1234 * Note that the default value of "tcp_time_wait_interval" is four minutes, 1235 * per the TCP spec. 1236 */ 1237 /* BEGIN CSTYLED */ 1238 tcpparam_t tcp_param_arr[] = { 1239 /*min max value name */ 1240 { 1*SECONDS, 10*MINUTES, 1*MINUTES, "tcp_time_wait_interval"}, 1241 { 1, PARAM_MAX, 128, "tcp_conn_req_max_q" }, 1242 { 0, PARAM_MAX, 1024, "tcp_conn_req_max_q0" }, 1243 { 1, 1024, 1, "tcp_conn_req_min" }, 1244 { 0*MS, 20*SECONDS, 0*MS, "tcp_conn_grace_period" }, 1245 { 128, (1<<30), 1024*1024, "tcp_cwnd_max" }, 1246 { 0, 10, 0, "tcp_debug" }, 1247 { 1024, (32*1024), 1024, "tcp_smallest_nonpriv_port"}, 1248 { 1*SECONDS, PARAM_MAX, 3*MINUTES, "tcp_ip_abort_cinterval"}, 1249 { 1*SECONDS, PARAM_MAX, 3*MINUTES, "tcp_ip_abort_linterval"}, 1250 { 500*MS, PARAM_MAX, 8*MINUTES, "tcp_ip_abort_interval"}, 1251 { 1*SECONDS, PARAM_MAX, 10*SECONDS, "tcp_ip_notify_cinterval"}, 1252 { 500*MS, PARAM_MAX, 10*SECONDS, "tcp_ip_notify_interval"}, 1253 { 1, 255, 64, "tcp_ipv4_ttl"}, 1254 { 10*SECONDS, 10*DAYS, 2*HOURS, "tcp_keepalive_interval"}, 1255 { 0, 100, 10, "tcp_maxpsz_multiplier" }, 1256 { 1, TCP_MSS_MAX_IPV4, 536, "tcp_mss_def_ipv4"}, 1257 { 1, TCP_MSS_MAX_IPV4, TCP_MSS_MAX_IPV4, "tcp_mss_max_ipv4"}, 1258 { 1, TCP_MSS_MAX, 108, "tcp_mss_min"}, 1259 { 1, (64*1024)-1, (4*1024)-1, "tcp_naglim_def"}, 1260 { 1*MS, 20*SECONDS, 3*SECONDS, "tcp_rexmit_interval_initial"}, 1261 { 1*MS, 2*HOURS, 60*SECONDS, "tcp_rexmit_interval_max"}, 1262 { 1*MS, 2*HOURS, 400*MS, "tcp_rexmit_interval_min"}, 1263 { 1*MS, 1*MINUTES, 100*MS, "tcp_deferred_ack_interval" }, 1264 { 0, 16, 0, "tcp_snd_lowat_fraction" }, 1265 { 0, 128000, 0, "tcp_sth_rcv_hiwat" }, 1266 { 0, 128000, 0, "tcp_sth_rcv_lowat" }, 1267 { 1, 10000, 3, "tcp_dupack_fast_retransmit" }, 1268 { 0, 1, 0, "tcp_ignore_path_mtu" }, 1269 { 1024, TCP_MAX_PORT, 32*1024, "tcp_smallest_anon_port"}, 1270 { 1024, TCP_MAX_PORT, TCP_MAX_PORT, "tcp_largest_anon_port"}, 1271 { TCP_XMIT_LOWATER, (1<<30), TCP_XMIT_HIWATER,"tcp_xmit_hiwat"}, 1272 { TCP_XMIT_LOWATER, (1<<30), TCP_XMIT_LOWATER,"tcp_xmit_lowat"}, 1273 { TCP_RECV_LOWATER, (1<<30), TCP_RECV_HIWATER,"tcp_recv_hiwat"}, 1274 { 1, 65536, 4, "tcp_recv_hiwat_minmss"}, 1275 { 1*SECONDS, PARAM_MAX, 675*SECONDS, "tcp_fin_wait_2_flush_interval"}, 1276 { 0, TCP_MSS_MAX, 64, "tcp_co_min"}, 1277 { 8192, (1<<30), 1024*1024, "tcp_max_buf"}, 1278 /* 1279 * Question: What default value should I set for tcp_strong_iss? 1280 */ 1281 { 0, 2, 1, "tcp_strong_iss"}, 1282 { 0, 65536, 20, "tcp_rtt_updates"}, 1283 { 0, 1, 1, "tcp_wscale_always"}, 1284 { 0, 1, 0, "tcp_tstamp_always"}, 1285 { 0, 1, 1, "tcp_tstamp_if_wscale"}, 1286 { 0*MS, 2*HOURS, 0*MS, "tcp_rexmit_interval_extra"}, 1287 { 0, 16, 2, "tcp_deferred_acks_max"}, 1288 { 1, 16384, 4, "tcp_slow_start_after_idle"}, 1289 { 1, 4, 4, "tcp_slow_start_initial"}, 1290 { 10*MS, 50*MS, 20*MS, "tcp_co_timer_interval"}, 1291 { 0, 2, 2, "tcp_sack_permitted"}, 1292 { 0, 1, 0, "tcp_trace"}, 1293 { 0, 1, 1, "tcp_compression_enabled"}, 1294 { 0, IPV6_MAX_HOPS, IPV6_DEFAULT_HOPS, "tcp_ipv6_hoplimit"}, 1295 { 1, TCP_MSS_MAX_IPV6, 1220, "tcp_mss_def_ipv6"}, 1296 { 1, TCP_MSS_MAX_IPV6, TCP_MSS_MAX_IPV6, "tcp_mss_max_ipv6"}, 1297 { 0, 1, 0, "tcp_rev_src_routes"}, 1298 { 10*MS, 500*MS, 50*MS, "tcp_local_dack_interval"}, 1299 { 100*MS, 60*SECONDS, 1*SECONDS, "tcp_ndd_get_info_interval"}, 1300 { 0, 16, 8, "tcp_local_dacks_max"}, 1301 { 0, 2, 1, "tcp_ecn_permitted"}, 1302 { 0, 1, 1, "tcp_rst_sent_rate_enabled"}, 1303 { 0, PARAM_MAX, 40, "tcp_rst_sent_rate"}, 1304 { 0, 100*MS, 50*MS, "tcp_push_timer_interval"}, 1305 { 0, 1, 0, "tcp_use_smss_as_mss_opt"}, 1306 { 0, PARAM_MAX, 8*MINUTES, "tcp_keepalive_abort_interval"}, 1307 }; 1308 /* END CSTYLED */ 1309 1310 /* 1311 * tcp_mdt_hdr_{head,tail}_min are the leading and trailing spaces of 1312 * each header fragment in the header buffer. Each parameter value has 1313 * to be a multiple of 4 (32-bit aligned). 1314 */ 1315 static tcpparam_t tcp_mdt_head_param = { 32, 256, 32, "tcp_mdt_hdr_head_min" }; 1316 static tcpparam_t tcp_mdt_tail_param = { 0, 256, 32, "tcp_mdt_hdr_tail_min" }; 1317 #define tcp_mdt_hdr_head_min tcp_mdt_head_param.tcp_param_val 1318 #define tcp_mdt_hdr_tail_min tcp_mdt_tail_param.tcp_param_val 1319 1320 /* 1321 * tcp_mdt_max_pbufs is the upper limit value that tcp uses to figure out 1322 * the maximum number of payload buffers associated per Multidata. 1323 */ 1324 static tcpparam_t tcp_mdt_max_pbufs_param = 1325 { 1, MULTIDATA_MAX_PBUFS, MULTIDATA_MAX_PBUFS, "tcp_mdt_max_pbufs" }; 1326 #define tcp_mdt_max_pbufs tcp_mdt_max_pbufs_param.tcp_param_val 1327 1328 /* Round up the value to the nearest mss. */ 1329 #define MSS_ROUNDUP(value, mss) ((((value) - 1) / (mss) + 1) * (mss)) 1330 1331 /* 1332 * Set ECN capable transport (ECT) code point in IP header. 1333 * 1334 * Note that there are 2 ECT code points '01' and '10', which are called 1335 * ECT(1) and ECT(0) respectively. Here we follow the original ECT code 1336 * point ECT(0) for TCP as described in RFC 2481. 1337 */ 1338 #define SET_ECT(tcp, iph) \ 1339 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1340 /* We need to clear the code point first. */ \ 1341 ((ipha_t *)(iph))->ipha_type_of_service &= 0xFC; \ 1342 ((ipha_t *)(iph))->ipha_type_of_service |= IPH_ECN_ECT0; \ 1343 } else { \ 1344 ((ip6_t *)(iph))->ip6_vcf &= htonl(0xFFCFFFFF); \ 1345 ((ip6_t *)(iph))->ip6_vcf |= htonl(IPH_ECN_ECT0 << 20); \ 1346 } 1347 1348 /* 1349 * The format argument to pass to tcp_display(). 1350 * DISP_PORT_ONLY means that the returned string has only port info. 1351 * DISP_ADDR_AND_PORT means that the returned string also contains the 1352 * remote and local IP address. 1353 */ 1354 #define DISP_PORT_ONLY 1 1355 #define DISP_ADDR_AND_PORT 2 1356 1357 /* 1358 * This controls the rate some ndd info report functions can be used 1359 * by non-privileged users. It stores the last time such info is 1360 * requested. When those report functions are called again, this 1361 * is checked with the current time and compare with the ndd param 1362 * tcp_ndd_get_info_interval. 1363 */ 1364 static clock_t tcp_last_ndd_get_info_time = 0; 1365 #define NDD_TOO_QUICK_MSG \ 1366 "ndd get info rate too high for non-privileged users, try again " \ 1367 "later.\n" 1368 #define NDD_OUT_OF_BUF_MSG "<< Out of buffer >>\n" 1369 1370 #define IS_VMLOANED_MBLK(mp) \ 1371 (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0) 1372 1373 /* 1374 * These two variables control the rate for TCP to generate RSTs in 1375 * response to segments not belonging to any connections. We limit 1376 * TCP to sent out tcp_rst_sent_rate (ndd param) number of RSTs in 1377 * each 1 second interval. This is to protect TCP against DoS attack. 1378 */ 1379 static clock_t tcp_last_rst_intrvl; 1380 static uint32_t tcp_rst_cnt; 1381 1382 /* The number of RST not sent because of the rate limit. */ 1383 static uint32_t tcp_rst_unsent; 1384 1385 /* Enable or disable b_cont M_MULTIDATA chaining for MDT. */ 1386 boolean_t tcp_mdt_chain = B_TRUE; 1387 1388 /* 1389 * MDT threshold in the form of effective send MSS multiplier; we take 1390 * the MDT path if the amount of unsent data exceeds the threshold value 1391 * (default threshold is 1*SMSS). 1392 */ 1393 uint_t tcp_mdt_smss_threshold = 1; 1394 1395 uint32_t do_tcpzcopy = 1; /* 0: disable, 1: enable, 2: force */ 1396 1397 /* 1398 * Forces all connections to obey the value of the tcp_maxpsz_multiplier 1399 * tunable settable via NDD. Otherwise, the per-connection behavior is 1400 * determined dynamically during tcp_adapt_ire(), which is the default. 1401 */ 1402 boolean_t tcp_static_maxpsz = B_FALSE; 1403 1404 /* If set to 0, pick ephemeral port sequentially; otherwise randomly. */ 1405 uint32_t tcp_random_anon_port = 1; 1406 1407 /* 1408 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more 1409 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent 1410 * data, TCP will not respond with an ACK. RFC 793 requires that 1411 * TCP responds with an ACK for such a bogus ACK. By not following 1412 * the RFC, we prevent TCP from getting into an ACK storm if somehow 1413 * an attacker successfully spoofs an acceptable segment to our 1414 * peer; or when our peer is "confused." 1415 */ 1416 uint32_t tcp_drop_ack_unsent_cnt = 10; 1417 1418 /* 1419 * Hook functions to enable cluster networking 1420 * On non-clustered systems these vectors must always be NULL. 1421 */ 1422 1423 void (*cl_inet_listen)(uint8_t protocol, sa_family_t addr_family, 1424 uint8_t *laddrp, in_port_t lport) = NULL; 1425 void (*cl_inet_unlisten)(uint8_t protocol, sa_family_t addr_family, 1426 uint8_t *laddrp, in_port_t lport) = NULL; 1427 void (*cl_inet_connect)(uint8_t protocol, sa_family_t addr_family, 1428 uint8_t *laddrp, in_port_t lport, 1429 uint8_t *faddrp, in_port_t fport) = NULL; 1430 void (*cl_inet_disconnect)(uint8_t protocol, sa_family_t addr_family, 1431 uint8_t *laddrp, in_port_t lport, 1432 uint8_t *faddrp, in_port_t fport) = NULL; 1433 1434 /* 1435 * The following are defined in ip.c 1436 */ 1437 extern int (*cl_inet_isclusterwide)(uint8_t protocol, sa_family_t addr_family, 1438 uint8_t *laddrp); 1439 extern uint32_t (*cl_inet_ipident)(uint8_t protocol, sa_family_t addr_family, 1440 uint8_t *laddrp, uint8_t *faddrp); 1441 1442 #define CL_INET_CONNECT(tcp) { \ 1443 if (cl_inet_connect != NULL) { \ 1444 /* \ 1445 * Running in cluster mode - register active connection \ 1446 * information \ 1447 */ \ 1448 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1449 if ((tcp)->tcp_ipha->ipha_src != 0) { \ 1450 (*cl_inet_connect)(IPPROTO_TCP, AF_INET,\ 1451 (uint8_t *)(&((tcp)->tcp_ipha->ipha_src)),\ 1452 (in_port_t)(tcp)->tcp_lport, \ 1453 (uint8_t *)(&((tcp)->tcp_ipha->ipha_dst)),\ 1454 (in_port_t)(tcp)->tcp_fport); \ 1455 } \ 1456 } else { \ 1457 if (!IN6_IS_ADDR_UNSPECIFIED( \ 1458 &(tcp)->tcp_ip6h->ip6_src)) {\ 1459 (*cl_inet_connect)(IPPROTO_TCP, AF_INET6,\ 1460 (uint8_t *)(&((tcp)->tcp_ip6h->ip6_src)),\ 1461 (in_port_t)(tcp)->tcp_lport, \ 1462 (uint8_t *)(&((tcp)->tcp_ip6h->ip6_dst)),\ 1463 (in_port_t)(tcp)->tcp_fport); \ 1464 } \ 1465 } \ 1466 } \ 1467 } 1468 1469 #define CL_INET_DISCONNECT(tcp) { \ 1470 if (cl_inet_disconnect != NULL) { \ 1471 /* \ 1472 * Running in cluster mode - deregister active \ 1473 * connection information \ 1474 */ \ 1475 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1476 if ((tcp)->tcp_ip_src != 0) { \ 1477 (*cl_inet_disconnect)(IPPROTO_TCP, \ 1478 AF_INET, \ 1479 (uint8_t *)(&((tcp)->tcp_ip_src)),\ 1480 (in_port_t)(tcp)->tcp_lport, \ 1481 (uint8_t *) \ 1482 (&((tcp)->tcp_ipha->ipha_dst)),\ 1483 (in_port_t)(tcp)->tcp_fport); \ 1484 } \ 1485 } else { \ 1486 if (!IN6_IS_ADDR_UNSPECIFIED( \ 1487 &(tcp)->tcp_ip_src_v6)) { \ 1488 (*cl_inet_disconnect)(IPPROTO_TCP, AF_INET6,\ 1489 (uint8_t *)(&((tcp)->tcp_ip_src_v6)),\ 1490 (in_port_t)(tcp)->tcp_lport, \ 1491 (uint8_t *) \ 1492 (&((tcp)->tcp_ip6h->ip6_dst)),\ 1493 (in_port_t)(tcp)->tcp_fport); \ 1494 } \ 1495 } \ 1496 } \ 1497 } 1498 1499 /* 1500 * Cluster networking hook for traversing current connection list. 1501 * This routine is used to extract the current list of live connections 1502 * which must continue to to be dispatched to this node. 1503 */ 1504 int cl_tcp_walk_list(int (*callback)(cl_tcp_info_t *, void *), void *arg); 1505 1506 /* 1507 * Figure out the value of window scale opton. Note that the rwnd is 1508 * ASSUMED to be rounded up to the nearest MSS before the calculation. 1509 * We cannot find the scale value and then do a round up of tcp_rwnd 1510 * because the scale value may not be correct after that. 1511 * 1512 * Set the compiler flag to make this function inline. 1513 */ 1514 static void 1515 tcp_set_ws_value(tcp_t *tcp) 1516 { 1517 int i; 1518 uint32_t rwnd = tcp->tcp_rwnd; 1519 1520 for (i = 0; rwnd > TCP_MAXWIN && i < TCP_MAX_WINSHIFT; 1521 i++, rwnd >>= 1) 1522 ; 1523 tcp->tcp_rcv_ws = i; 1524 } 1525 1526 /* 1527 * Remove a connection from the list of detached TIME_WAIT connections. 1528 */ 1529 static void 1530 tcp_time_wait_remove(tcp_t *tcp, tcp_squeue_priv_t *tcp_time_wait) 1531 { 1532 boolean_t locked = B_FALSE; 1533 1534 if (tcp_time_wait == NULL) { 1535 tcp_time_wait = *((tcp_squeue_priv_t **) 1536 squeue_getprivate(tcp->tcp_connp->conn_sqp, SQPRIVATE_TCP)); 1537 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1538 locked = B_TRUE; 1539 } 1540 1541 if (tcp->tcp_time_wait_expire == 0) { 1542 ASSERT(tcp->tcp_time_wait_next == NULL); 1543 ASSERT(tcp->tcp_time_wait_prev == NULL); 1544 if (locked) 1545 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1546 return; 1547 } 1548 ASSERT(TCP_IS_DETACHED(tcp)); 1549 ASSERT(tcp->tcp_state == TCPS_TIME_WAIT); 1550 1551 if (tcp == tcp_time_wait->tcp_time_wait_head) { 1552 ASSERT(tcp->tcp_time_wait_prev == NULL); 1553 tcp_time_wait->tcp_time_wait_head = tcp->tcp_time_wait_next; 1554 if (tcp_time_wait->tcp_time_wait_head != NULL) { 1555 tcp_time_wait->tcp_time_wait_head->tcp_time_wait_prev = 1556 NULL; 1557 } else { 1558 tcp_time_wait->tcp_time_wait_tail = NULL; 1559 } 1560 } else if (tcp == tcp_time_wait->tcp_time_wait_tail) { 1561 ASSERT(tcp != tcp_time_wait->tcp_time_wait_head); 1562 ASSERT(tcp->tcp_time_wait_next == NULL); 1563 tcp_time_wait->tcp_time_wait_tail = tcp->tcp_time_wait_prev; 1564 ASSERT(tcp_time_wait->tcp_time_wait_tail != NULL); 1565 tcp_time_wait->tcp_time_wait_tail->tcp_time_wait_next = NULL; 1566 } else { 1567 ASSERT(tcp->tcp_time_wait_prev->tcp_time_wait_next == tcp); 1568 ASSERT(tcp->tcp_time_wait_next->tcp_time_wait_prev == tcp); 1569 tcp->tcp_time_wait_prev->tcp_time_wait_next = 1570 tcp->tcp_time_wait_next; 1571 tcp->tcp_time_wait_next->tcp_time_wait_prev = 1572 tcp->tcp_time_wait_prev; 1573 } 1574 tcp->tcp_time_wait_next = NULL; 1575 tcp->tcp_time_wait_prev = NULL; 1576 tcp->tcp_time_wait_expire = 0; 1577 1578 if (locked) 1579 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1580 } 1581 1582 /* 1583 * Add a connection to the list of detached TIME_WAIT connections 1584 * and set its time to expire. 1585 */ 1586 static void 1587 tcp_time_wait_append(tcp_t *tcp) 1588 { 1589 tcp_squeue_priv_t *tcp_time_wait = 1590 *((tcp_squeue_priv_t **)squeue_getprivate(tcp->tcp_connp->conn_sqp, 1591 SQPRIVATE_TCP)); 1592 1593 tcp_timers_stop(tcp); 1594 1595 /* Freed above */ 1596 ASSERT(tcp->tcp_timer_tid == 0); 1597 ASSERT(tcp->tcp_ack_tid == 0); 1598 1599 /* must have happened at the time of detaching the tcp */ 1600 ASSERT(tcp->tcp_ptpahn == NULL); 1601 ASSERT(tcp->tcp_flow_stopped == 0); 1602 ASSERT(tcp->tcp_time_wait_next == NULL); 1603 ASSERT(tcp->tcp_time_wait_prev == NULL); 1604 ASSERT(tcp->tcp_time_wait_expire == NULL); 1605 ASSERT(tcp->tcp_listener == NULL); 1606 1607 tcp->tcp_time_wait_expire = ddi_get_lbolt(); 1608 /* 1609 * The value computed below in tcp->tcp_time_wait_expire may 1610 * appear negative or wrap around. That is ok since our 1611 * interest is only in the difference between the current lbolt 1612 * value and tcp->tcp_time_wait_expire. But the value should not 1613 * be zero, since it means the tcp is not in the TIME_WAIT list. 1614 * The corresponding comparison in tcp_time_wait_collector() uses 1615 * modular arithmetic. 1616 */ 1617 tcp->tcp_time_wait_expire += 1618 drv_usectohz(tcp_time_wait_interval * 1000); 1619 if (tcp->tcp_time_wait_expire == 0) 1620 tcp->tcp_time_wait_expire = 1; 1621 1622 ASSERT(TCP_IS_DETACHED(tcp)); 1623 ASSERT(tcp->tcp_state == TCPS_TIME_WAIT); 1624 ASSERT(tcp->tcp_time_wait_next == NULL); 1625 ASSERT(tcp->tcp_time_wait_prev == NULL); 1626 TCP_DBGSTAT(tcp_time_wait); 1627 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1628 if (tcp_time_wait->tcp_time_wait_head == NULL) { 1629 ASSERT(tcp_time_wait->tcp_time_wait_tail == NULL); 1630 tcp_time_wait->tcp_time_wait_head = tcp; 1631 } else { 1632 ASSERT(tcp_time_wait->tcp_time_wait_tail != NULL); 1633 ASSERT(tcp_time_wait->tcp_time_wait_tail->tcp_state == 1634 TCPS_TIME_WAIT); 1635 tcp_time_wait->tcp_time_wait_tail->tcp_time_wait_next = tcp; 1636 tcp->tcp_time_wait_prev = tcp_time_wait->tcp_time_wait_tail; 1637 } 1638 tcp_time_wait->tcp_time_wait_tail = tcp; 1639 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1640 } 1641 1642 /* ARGSUSED */ 1643 void 1644 tcp_timewait_output(void *arg, mblk_t *mp, void *arg2) 1645 { 1646 conn_t *connp = (conn_t *)arg; 1647 tcp_t *tcp = connp->conn_tcp; 1648 1649 ASSERT(tcp != NULL); 1650 if (tcp->tcp_state == TCPS_CLOSED) { 1651 return; 1652 } 1653 1654 ASSERT((tcp->tcp_family == AF_INET && 1655 tcp->tcp_ipversion == IPV4_VERSION) || 1656 (tcp->tcp_family == AF_INET6 && 1657 (tcp->tcp_ipversion == IPV4_VERSION || 1658 tcp->tcp_ipversion == IPV6_VERSION))); 1659 ASSERT(!tcp->tcp_listener); 1660 1661 TCP_STAT(tcp_time_wait_reap); 1662 ASSERT(TCP_IS_DETACHED(tcp)); 1663 1664 /* 1665 * Because they have no upstream client to rebind or tcp_close() 1666 * them later, we axe the connection here and now. 1667 */ 1668 tcp_close_detached(tcp); 1669 } 1670 1671 void 1672 tcp_cleanup(tcp_t *tcp) 1673 { 1674 mblk_t *mp; 1675 char *tcp_iphc; 1676 int tcp_iphc_len; 1677 int tcp_hdr_grown; 1678 tcp_sack_info_t *tcp_sack_info; 1679 conn_t *connp = tcp->tcp_connp; 1680 1681 tcp_bind_hash_remove(tcp); 1682 tcp_free(tcp); 1683 1684 /* Release any SSL context */ 1685 if (tcp->tcp_kssl_ent != NULL) { 1686 kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); 1687 tcp->tcp_kssl_ent = NULL; 1688 } 1689 1690 if (tcp->tcp_kssl_ctx != NULL) { 1691 kssl_release_ctx(tcp->tcp_kssl_ctx); 1692 tcp->tcp_kssl_ctx = NULL; 1693 } 1694 tcp->tcp_kssl_pending = B_FALSE; 1695 1696 conn_delete_ire(connp, NULL); 1697 if (connp->conn_flags & IPCL_TCPCONN) { 1698 if (connp->conn_latch != NULL) 1699 IPLATCH_REFRELE(connp->conn_latch); 1700 if (connp->conn_policy != NULL) 1701 IPPH_REFRELE(connp->conn_policy); 1702 } 1703 1704 /* 1705 * Since we will bzero the entire structure, we need to 1706 * remove it and reinsert it in global hash list. We 1707 * know the walkers can't get to this conn because we 1708 * had set CONDEMNED flag earlier and checked reference 1709 * under conn_lock so walker won't pick it and when we 1710 * go the ipcl_globalhash_remove() below, no walker 1711 * can get to it. 1712 */ 1713 ipcl_globalhash_remove(connp); 1714 1715 /* Save some state */ 1716 mp = tcp->tcp_timercache; 1717 1718 tcp_sack_info = tcp->tcp_sack_info; 1719 tcp_iphc = tcp->tcp_iphc; 1720 tcp_iphc_len = tcp->tcp_iphc_len; 1721 tcp_hdr_grown = tcp->tcp_hdr_grown; 1722 1723 if (connp->conn_cred != NULL) 1724 crfree(connp->conn_cred); 1725 if (connp->conn_peercred != NULL) 1726 crfree(connp->conn_peercred); 1727 bzero(connp, sizeof (conn_t)); 1728 bzero(tcp, sizeof (tcp_t)); 1729 1730 /* restore the state */ 1731 tcp->tcp_timercache = mp; 1732 1733 tcp->tcp_sack_info = tcp_sack_info; 1734 tcp->tcp_iphc = tcp_iphc; 1735 tcp->tcp_iphc_len = tcp_iphc_len; 1736 tcp->tcp_hdr_grown = tcp_hdr_grown; 1737 1738 1739 tcp->tcp_connp = connp; 1740 1741 connp->conn_tcp = tcp; 1742 connp->conn_flags = IPCL_TCPCONN; 1743 connp->conn_state_flags = CONN_INCIPIENT; 1744 connp->conn_ulp = IPPROTO_TCP; 1745 connp->conn_ref = 1; 1746 1747 ipcl_globalhash_insert(connp); 1748 } 1749 1750 /* 1751 * Blows away all tcps whose TIME_WAIT has expired. List traversal 1752 * is done forwards from the head. 1753 */ 1754 /* ARGSUSED */ 1755 void 1756 tcp_time_wait_collector(void *arg) 1757 { 1758 tcp_t *tcp; 1759 clock_t now; 1760 mblk_t *mp; 1761 conn_t *connp; 1762 kmutex_t *lock; 1763 1764 squeue_t *sqp = (squeue_t *)arg; 1765 tcp_squeue_priv_t *tcp_time_wait = 1766 *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP)); 1767 1768 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1769 tcp_time_wait->tcp_time_wait_tid = 0; 1770 1771 if (tcp_time_wait->tcp_free_list != NULL && 1772 tcp_time_wait->tcp_free_list->tcp_in_free_list == B_TRUE) { 1773 TCP_STAT(tcp_freelist_cleanup); 1774 while ((tcp = tcp_time_wait->tcp_free_list) != NULL) { 1775 tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next; 1776 CONN_DEC_REF(tcp->tcp_connp); 1777 } 1778 tcp_time_wait->tcp_free_list_cnt = 0; 1779 } 1780 1781 /* 1782 * In order to reap time waits reliably, we should use a 1783 * source of time that is not adjustable by the user -- hence 1784 * the call to ddi_get_lbolt(). 1785 */ 1786 now = ddi_get_lbolt(); 1787 while ((tcp = tcp_time_wait->tcp_time_wait_head) != NULL) { 1788 /* 1789 * Compare times using modular arithmetic, since 1790 * lbolt can wrapover. 1791 */ 1792 if ((now - tcp->tcp_time_wait_expire) < 0) { 1793 break; 1794 } 1795 1796 tcp_time_wait_remove(tcp, tcp_time_wait); 1797 1798 connp = tcp->tcp_connp; 1799 ASSERT(connp->conn_fanout != NULL); 1800 lock = &connp->conn_fanout->connf_lock; 1801 /* 1802 * This is essentially a TW reclaim fast path optimization for 1803 * performance where the timewait collector checks under the 1804 * fanout lock (so that no one else can get access to the 1805 * conn_t) that the refcnt is 2 i.e. one for TCP and one for 1806 * the classifier hash list. If ref count is indeed 2, we can 1807 * just remove the conn under the fanout lock and avoid 1808 * cleaning up the conn under the squeue, provided that 1809 * clustering callbacks are not enabled. If clustering is 1810 * enabled, we need to make the clustering callback before 1811 * setting the CONDEMNED flag and after dropping all locks and 1812 * so we forego this optimization and fall back to the slow 1813 * path. Also please see the comments in tcp_closei_local 1814 * regarding the refcnt logic. 1815 * 1816 * Since we are holding the tcp_time_wait_lock, its better 1817 * not to block on the fanout_lock because other connections 1818 * can't add themselves to time_wait list. So we do a 1819 * tryenter instead of mutex_enter. 1820 */ 1821 if (mutex_tryenter(lock)) { 1822 mutex_enter(&connp->conn_lock); 1823 if ((connp->conn_ref == 2) && 1824 (cl_inet_disconnect == NULL)) { 1825 ipcl_hash_remove_locked(connp, 1826 connp->conn_fanout); 1827 /* 1828 * Set the CONDEMNED flag now itself so that 1829 * the refcnt cannot increase due to any 1830 * walker. But we have still not cleaned up 1831 * conn_ire_cache. This is still ok since 1832 * we are going to clean it up in tcp_cleanup 1833 * immediately and any interface unplumb 1834 * thread will wait till the ire is blown away 1835 */ 1836 connp->conn_state_flags |= CONN_CONDEMNED; 1837 mutex_exit(lock); 1838 mutex_exit(&connp->conn_lock); 1839 if (tcp_time_wait->tcp_free_list_cnt < 1840 tcp_free_list_max_cnt) { 1841 /* Add to head of tcp_free_list */ 1842 mutex_exit( 1843 &tcp_time_wait->tcp_time_wait_lock); 1844 tcp_cleanup(tcp); 1845 mutex_enter( 1846 &tcp_time_wait->tcp_time_wait_lock); 1847 tcp->tcp_time_wait_next = 1848 tcp_time_wait->tcp_free_list; 1849 tcp_time_wait->tcp_free_list = tcp; 1850 tcp_time_wait->tcp_free_list_cnt++; 1851 continue; 1852 } else { 1853 /* Do not add to tcp_free_list */ 1854 mutex_exit( 1855 &tcp_time_wait->tcp_time_wait_lock); 1856 tcp_bind_hash_remove(tcp); 1857 conn_delete_ire(tcp->tcp_connp, NULL); 1858 CONN_DEC_REF(tcp->tcp_connp); 1859 } 1860 } else { 1861 CONN_INC_REF_LOCKED(connp); 1862 mutex_exit(lock); 1863 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1864 mutex_exit(&connp->conn_lock); 1865 /* 1866 * We can reuse the closemp here since conn has 1867 * detached (otherwise we wouldn't even be in 1868 * time_wait list). 1869 */ 1870 mp = &tcp->tcp_closemp; 1871 squeue_fill(connp->conn_sqp, mp, 1872 tcp_timewait_output, connp, 1873 SQTAG_TCP_TIMEWAIT); 1874 } 1875 } else { 1876 mutex_enter(&connp->conn_lock); 1877 CONN_INC_REF_LOCKED(connp); 1878 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1879 mutex_exit(&connp->conn_lock); 1880 /* 1881 * We can reuse the closemp here since conn has 1882 * detached (otherwise we wouldn't even be in 1883 * time_wait list). 1884 */ 1885 mp = &tcp->tcp_closemp; 1886 squeue_fill(connp->conn_sqp, mp, 1887 tcp_timewait_output, connp, 0); 1888 } 1889 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1890 } 1891 1892 if (tcp_time_wait->tcp_free_list != NULL) 1893 tcp_time_wait->tcp_free_list->tcp_in_free_list = B_TRUE; 1894 1895 tcp_time_wait->tcp_time_wait_tid = 1896 timeout(tcp_time_wait_collector, sqp, TCP_TIME_WAIT_DELAY); 1897 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1898 } 1899 1900 /* 1901 * Reply to a clients T_CONN_RES TPI message. This function 1902 * is used only for TLI/XTI listener. Sockfs sends T_CONN_RES 1903 * on the acceptor STREAM and processed in tcp_wput_accept(). 1904 * Read the block comment on top of tcp_conn_request(). 1905 */ 1906 static void 1907 tcp_accept(tcp_t *listener, mblk_t *mp) 1908 { 1909 tcp_t *acceptor; 1910 tcp_t *eager; 1911 tcp_t *tcp; 1912 struct T_conn_res *tcr; 1913 t_uscalar_t acceptor_id; 1914 t_scalar_t seqnum; 1915 mblk_t *opt_mp = NULL; /* T_OPTMGMT_REQ messages */ 1916 mblk_t *ok_mp; 1917 mblk_t *mp1; 1918 1919 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tcr)) { 1920 tcp_err_ack(listener, mp, TPROTO, 0); 1921 return; 1922 } 1923 tcr = (struct T_conn_res *)mp->b_rptr; 1924 1925 /* 1926 * Under ILP32 the stream head points tcr->ACCEPTOR_id at the 1927 * read side queue of the streams device underneath us i.e. the 1928 * read side queue of 'ip'. Since we can't deference QUEUE_ptr we 1929 * look it up in the queue_hash. Under LP64 it sends down the 1930 * minor_t of the accepting endpoint. 1931 * 1932 * Once the acceptor/eager are modified (in tcp_accept_swap) the 1933 * fanout hash lock is held. 1934 * This prevents any thread from entering the acceptor queue from 1935 * below (since it has not been hard bound yet i.e. any inbound 1936 * packets will arrive on the listener or default tcp queue and 1937 * go through tcp_lookup). 1938 * The CONN_INC_REF will prevent the acceptor from closing. 1939 * 1940 * XXX It is still possible for a tli application to send down data 1941 * on the accepting stream while another thread calls t_accept. 1942 * This should not be a problem for well-behaved applications since 1943 * the T_OK_ACK is sent after the queue swapping is completed. 1944 * 1945 * If the accepting fd is the same as the listening fd, avoid 1946 * queue hash lookup since that will return an eager listener in a 1947 * already established state. 1948 */ 1949 acceptor_id = tcr->ACCEPTOR_id; 1950 mutex_enter(&listener->tcp_eager_lock); 1951 if (listener->tcp_acceptor_id == acceptor_id) { 1952 eager = listener->tcp_eager_next_q; 1953 /* only count how many T_CONN_INDs so don't count q0 */ 1954 if ((listener->tcp_conn_req_cnt_q != 1) || 1955 (eager->tcp_conn_req_seqnum != tcr->SEQ_number)) { 1956 mutex_exit(&listener->tcp_eager_lock); 1957 tcp_err_ack(listener, mp, TBADF, 0); 1958 return; 1959 } 1960 if (listener->tcp_conn_req_cnt_q0 != 0) { 1961 /* Throw away all the eagers on q0. */ 1962 tcp_eager_cleanup(listener, 1); 1963 } 1964 if (listener->tcp_syn_defense) { 1965 listener->tcp_syn_defense = B_FALSE; 1966 if (listener->tcp_ip_addr_cache != NULL) { 1967 kmem_free(listener->tcp_ip_addr_cache, 1968 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 1969 listener->tcp_ip_addr_cache = NULL; 1970 } 1971 } 1972 /* 1973 * Transfer tcp_conn_req_max to the eager so that when 1974 * a disconnect occurs we can revert the endpoint to the 1975 * listen state. 1976 */ 1977 eager->tcp_conn_req_max = listener->tcp_conn_req_max; 1978 ASSERT(listener->tcp_conn_req_cnt_q0 == 0); 1979 /* 1980 * Get a reference on the acceptor just like the 1981 * tcp_acceptor_hash_lookup below. 1982 */ 1983 acceptor = listener; 1984 CONN_INC_REF(acceptor->tcp_connp); 1985 } else { 1986 acceptor = tcp_acceptor_hash_lookup(acceptor_id); 1987 if (acceptor == NULL) { 1988 if (listener->tcp_debug) { 1989 (void) strlog(TCP_MOD_ID, 0, 1, 1990 SL_ERROR|SL_TRACE, 1991 "tcp_accept: did not find acceptor 0x%x\n", 1992 acceptor_id); 1993 } 1994 mutex_exit(&listener->tcp_eager_lock); 1995 tcp_err_ack(listener, mp, TPROVMISMATCH, 0); 1996 return; 1997 } 1998 /* 1999 * Verify acceptor state. The acceptable states for an acceptor 2000 * include TCPS_IDLE and TCPS_BOUND. 2001 */ 2002 switch (acceptor->tcp_state) { 2003 case TCPS_IDLE: 2004 /* FALLTHRU */ 2005 case TCPS_BOUND: 2006 break; 2007 default: 2008 CONN_DEC_REF(acceptor->tcp_connp); 2009 mutex_exit(&listener->tcp_eager_lock); 2010 tcp_err_ack(listener, mp, TOUTSTATE, 0); 2011 return; 2012 } 2013 } 2014 2015 /* The listener must be in TCPS_LISTEN */ 2016 if (listener->tcp_state != TCPS_LISTEN) { 2017 CONN_DEC_REF(acceptor->tcp_connp); 2018 mutex_exit(&listener->tcp_eager_lock); 2019 tcp_err_ack(listener, mp, TOUTSTATE, 0); 2020 return; 2021 } 2022 2023 /* 2024 * Rendezvous with an eager connection request packet hanging off 2025 * 'tcp' that has the 'seqnum' tag. We tagged the detached open 2026 * tcp structure when the connection packet arrived in 2027 * tcp_conn_request(). 2028 */ 2029 seqnum = tcr->SEQ_number; 2030 eager = listener; 2031 do { 2032 eager = eager->tcp_eager_next_q; 2033 if (eager == NULL) { 2034 CONN_DEC_REF(acceptor->tcp_connp); 2035 mutex_exit(&listener->tcp_eager_lock); 2036 tcp_err_ack(listener, mp, TBADSEQ, 0); 2037 return; 2038 } 2039 } while (eager->tcp_conn_req_seqnum != seqnum); 2040 mutex_exit(&listener->tcp_eager_lock); 2041 2042 /* 2043 * At this point, both acceptor and listener have 2 ref 2044 * that they begin with. Acceptor has one additional ref 2045 * we placed in lookup while listener has 3 additional 2046 * ref for being behind the squeue (tcp_accept() is 2047 * done on listener's squeue); being in classifier hash; 2048 * and eager's ref on listener. 2049 */ 2050 ASSERT(listener->tcp_connp->conn_ref >= 5); 2051 ASSERT(acceptor->tcp_connp->conn_ref >= 3); 2052 2053 /* 2054 * The eager at this point is set in its own squeue and 2055 * could easily have been killed (tcp_accept_finish will 2056 * deal with that) because of a TH_RST so we can only 2057 * ASSERT for a single ref. 2058 */ 2059 ASSERT(eager->tcp_connp->conn_ref >= 1); 2060 2061 /* Pre allocate the stroptions mblk also */ 2062 opt_mp = allocb(sizeof (struct stroptions), BPRI_HI); 2063 if (opt_mp == NULL) { 2064 CONN_DEC_REF(acceptor->tcp_connp); 2065 CONN_DEC_REF(eager->tcp_connp); 2066 tcp_err_ack(listener, mp, TSYSERR, ENOMEM); 2067 return; 2068 } 2069 DB_TYPE(opt_mp) = M_SETOPTS; 2070 opt_mp->b_wptr += sizeof (struct stroptions); 2071 2072 /* 2073 * Prepare for inheriting IPV6_BOUND_IF and IPV6_RECVPKTINFO 2074 * from listener to acceptor. The message is chained on opt_mp 2075 * which will be sent onto eager's squeue. 2076 */ 2077 if (listener->tcp_bound_if != 0) { 2078 /* allocate optmgmt req */ 2079 mp1 = tcp_setsockopt_mp(IPPROTO_IPV6, 2080 IPV6_BOUND_IF, (char *)&listener->tcp_bound_if, 2081 sizeof (int)); 2082 if (mp1 != NULL) 2083 linkb(opt_mp, mp1); 2084 } 2085 if (listener->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) { 2086 uint_t on = 1; 2087 2088 /* allocate optmgmt req */ 2089 mp1 = tcp_setsockopt_mp(IPPROTO_IPV6, 2090 IPV6_RECVPKTINFO, (char *)&on, sizeof (on)); 2091 if (mp1 != NULL) 2092 linkb(opt_mp, mp1); 2093 } 2094 2095 /* Re-use mp1 to hold a copy of mp, in case reallocb fails */ 2096 if ((mp1 = copymsg(mp)) == NULL) { 2097 CONN_DEC_REF(acceptor->tcp_connp); 2098 CONN_DEC_REF(eager->tcp_connp); 2099 freemsg(opt_mp); 2100 tcp_err_ack(listener, mp, TSYSERR, ENOMEM); 2101 return; 2102 } 2103 2104 tcr = (struct T_conn_res *)mp1->b_rptr; 2105 2106 /* 2107 * This is an expanded version of mi_tpi_ok_ack_alloc() 2108 * which allocates a larger mblk and appends the new 2109 * local address to the ok_ack. The address is copied by 2110 * soaccept() for getsockname(). 2111 */ 2112 { 2113 int extra; 2114 2115 extra = (eager->tcp_family == AF_INET) ? 2116 sizeof (sin_t) : sizeof (sin6_t); 2117 2118 /* 2119 * Try to re-use mp, if possible. Otherwise, allocate 2120 * an mblk and return it as ok_mp. In any case, mp 2121 * is no longer usable upon return. 2122 */ 2123 if ((ok_mp = mi_tpi_ok_ack_alloc_extra(mp, extra)) == NULL) { 2124 CONN_DEC_REF(acceptor->tcp_connp); 2125 CONN_DEC_REF(eager->tcp_connp); 2126 freemsg(opt_mp); 2127 /* Original mp has been freed by now, so use mp1 */ 2128 tcp_err_ack(listener, mp1, TSYSERR, ENOMEM); 2129 return; 2130 } 2131 2132 mp = NULL; /* We should never use mp after this point */ 2133 2134 switch (extra) { 2135 case sizeof (sin_t): { 2136 sin_t *sin = (sin_t *)ok_mp->b_wptr; 2137 2138 ok_mp->b_wptr += extra; 2139 sin->sin_family = AF_INET; 2140 sin->sin_port = eager->tcp_lport; 2141 sin->sin_addr.s_addr = 2142 eager->tcp_ipha->ipha_src; 2143 break; 2144 } 2145 case sizeof (sin6_t): { 2146 sin6_t *sin6 = (sin6_t *)ok_mp->b_wptr; 2147 2148 ok_mp->b_wptr += extra; 2149 sin6->sin6_family = AF_INET6; 2150 sin6->sin6_port = eager->tcp_lport; 2151 if (eager->tcp_ipversion == IPV4_VERSION) { 2152 sin6->sin6_flowinfo = 0; 2153 IN6_IPADDR_TO_V4MAPPED( 2154 eager->tcp_ipha->ipha_src, 2155 &sin6->sin6_addr); 2156 } else { 2157 ASSERT(eager->tcp_ip6h != NULL); 2158 sin6->sin6_flowinfo = 2159 eager->tcp_ip6h->ip6_vcf & 2160 ~IPV6_VERS_AND_FLOW_MASK; 2161 sin6->sin6_addr = 2162 eager->tcp_ip6h->ip6_src; 2163 } 2164 break; 2165 } 2166 default: 2167 break; 2168 } 2169 ASSERT(ok_mp->b_wptr <= ok_mp->b_datap->db_lim); 2170 } 2171 2172 /* 2173 * If there are no options we know that the T_CONN_RES will 2174 * succeed. However, we can't send the T_OK_ACK upstream until 2175 * the tcp_accept_swap is done since it would be dangerous to 2176 * let the application start using the new fd prior to the swap. 2177 */ 2178 tcp_accept_swap(listener, acceptor, eager); 2179 2180 /* 2181 * tcp_accept_swap unlinks eager from listener but does not drop 2182 * the eager's reference on the listener. 2183 */ 2184 ASSERT(eager->tcp_listener == NULL); 2185 ASSERT(listener->tcp_connp->conn_ref >= 5); 2186 2187 /* 2188 * The eager is now associated with its own queue. Insert in 2189 * the hash so that the connection can be reused for a future 2190 * T_CONN_RES. 2191 */ 2192 tcp_acceptor_hash_insert(acceptor_id, eager); 2193 2194 /* 2195 * We now do the processing of options with T_CONN_RES. 2196 * We delay till now since we wanted to have queue to pass to 2197 * option processing routines that points back to the right 2198 * instance structure which does not happen until after 2199 * tcp_accept_swap(). 2200 * 2201 * Note: 2202 * The sanity of the logic here assumes that whatever options 2203 * are appropriate to inherit from listner=>eager are done 2204 * before this point, and whatever were to be overridden (or not) 2205 * in transfer logic from eager=>acceptor in tcp_accept_swap(). 2206 * [ Warning: acceptor endpoint can have T_OPTMGMT_REQ done to it 2207 * before its ACCEPTOR_id comes down in T_CONN_RES ] 2208 * This may not be true at this point in time but can be fixed 2209 * independently. This option processing code starts with 2210 * the instantiated acceptor instance and the final queue at 2211 * this point. 2212 */ 2213 2214 if (tcr->OPT_length != 0) { 2215 /* Options to process */ 2216 int t_error = 0; 2217 int sys_error = 0; 2218 int do_disconnect = 0; 2219 2220 if (tcp_conprim_opt_process(eager, mp1, 2221 &do_disconnect, &t_error, &sys_error) < 0) { 2222 eager->tcp_accept_error = 1; 2223 if (do_disconnect) { 2224 /* 2225 * An option failed which does not allow 2226 * connection to be accepted. 2227 * 2228 * We allow T_CONN_RES to succeed and 2229 * put a T_DISCON_IND on the eager queue. 2230 */ 2231 ASSERT(t_error == 0 && sys_error == 0); 2232 eager->tcp_send_discon_ind = 1; 2233 } else { 2234 ASSERT(t_error != 0); 2235 freemsg(ok_mp); 2236 /* 2237 * Original mp was either freed or set 2238 * to ok_mp above, so use mp1 instead. 2239 */ 2240 tcp_err_ack(listener, mp1, t_error, sys_error); 2241 goto finish; 2242 } 2243 } 2244 /* 2245 * Most likely success in setting options (except if 2246 * eager->tcp_send_discon_ind set). 2247 * mp1 option buffer represented by OPT_length/offset 2248 * potentially modified and contains results of setting 2249 * options at this point 2250 */ 2251 } 2252 2253 /* We no longer need mp1, since all options processing has passed */ 2254 freemsg(mp1); 2255 2256 putnext(listener->tcp_rq, ok_mp); 2257 2258 mutex_enter(&listener->tcp_eager_lock); 2259 if (listener->tcp_eager_prev_q0->tcp_conn_def_q0) { 2260 tcp_t *tail; 2261 mblk_t *conn_ind; 2262 2263 /* 2264 * This path should not be executed if listener and 2265 * acceptor streams are the same. 2266 */ 2267 ASSERT(listener != acceptor); 2268 2269 tcp = listener->tcp_eager_prev_q0; 2270 /* 2271 * listener->tcp_eager_prev_q0 points to the TAIL of the 2272 * deferred T_conn_ind queue. We need to get to the head of 2273 * the queue in order to send up T_conn_ind the same order as 2274 * how the 3WHS is completed. 2275 */ 2276 while (tcp != listener) { 2277 if (!tcp->tcp_eager_prev_q0->tcp_conn_def_q0) 2278 break; 2279 else 2280 tcp = tcp->tcp_eager_prev_q0; 2281 } 2282 ASSERT(tcp != listener); 2283 conn_ind = tcp->tcp_conn.tcp_eager_conn_ind; 2284 ASSERT(conn_ind != NULL); 2285 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 2286 2287 /* Move from q0 to q */ 2288 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 2289 listener->tcp_conn_req_cnt_q0--; 2290 listener->tcp_conn_req_cnt_q++; 2291 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 2292 tcp->tcp_eager_prev_q0; 2293 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 2294 tcp->tcp_eager_next_q0; 2295 tcp->tcp_eager_prev_q0 = NULL; 2296 tcp->tcp_eager_next_q0 = NULL; 2297 tcp->tcp_conn_def_q0 = B_FALSE; 2298 2299 /* 2300 * Insert at end of the queue because sockfs sends 2301 * down T_CONN_RES in chronological order. Leaving 2302 * the older conn indications at front of the queue 2303 * helps reducing search time. 2304 */ 2305 tail = listener->tcp_eager_last_q; 2306 if (tail != NULL) 2307 tail->tcp_eager_next_q = tcp; 2308 else 2309 listener->tcp_eager_next_q = tcp; 2310 listener->tcp_eager_last_q = tcp; 2311 tcp->tcp_eager_next_q = NULL; 2312 mutex_exit(&listener->tcp_eager_lock); 2313 putnext(tcp->tcp_rq, conn_ind); 2314 } else { 2315 mutex_exit(&listener->tcp_eager_lock); 2316 } 2317 2318 /* 2319 * Done with the acceptor - free it 2320 * 2321 * Note: from this point on, no access to listener should be made 2322 * as listener can be equal to acceptor. 2323 */ 2324 finish: 2325 ASSERT(acceptor->tcp_detached); 2326 acceptor->tcp_rq = tcp_g_q; 2327 acceptor->tcp_wq = WR(tcp_g_q); 2328 (void) tcp_clean_death(acceptor, 0, 2); 2329 CONN_DEC_REF(acceptor->tcp_connp); 2330 2331 /* 2332 * In case we already received a FIN we have to make tcp_rput send 2333 * the ordrel_ind. This will also send up a window update if the window 2334 * has opened up. 2335 * 2336 * In the normal case of a successful connection acceptance 2337 * we give the O_T_BIND_REQ to the read side put procedure as an 2338 * indication that this was just accepted. This tells tcp_rput to 2339 * pass up any data queued in tcp_rcv_list. 2340 * 2341 * In the fringe case where options sent with T_CONN_RES failed and 2342 * we required, we would be indicating a T_DISCON_IND to blow 2343 * away this connection. 2344 */ 2345 2346 /* 2347 * XXX: we currently have a problem if XTI application closes the 2348 * acceptor stream in between. This problem exists in on10-gate also 2349 * and is well know but nothing can be done short of major rewrite 2350 * to fix it. Now it is possible to take care of it by assigning TLI/XTI 2351 * eager same squeue as listener (we can distinguish non socket 2352 * listeners at the time of handling a SYN in tcp_conn_request) 2353 * and do most of the work that tcp_accept_finish does here itself 2354 * and then get behind the acceptor squeue to access the acceptor 2355 * queue. 2356 */ 2357 /* 2358 * We already have a ref on tcp so no need to do one before squeue_fill 2359 */ 2360 squeue_fill(eager->tcp_connp->conn_sqp, opt_mp, 2361 tcp_accept_finish, eager->tcp_connp, SQTAG_TCP_ACCEPT_FINISH); 2362 } 2363 2364 /* 2365 * Swap information between the eager and acceptor for a TLI/XTI client. 2366 * The sockfs accept is done on the acceptor stream and control goes 2367 * through tcp_wput_accept() and tcp_accept()/tcp_accept_swap() is not 2368 * called. In either case, both the eager and listener are in their own 2369 * perimeter (squeue) and the code has to deal with potential race. 2370 * 2371 * See the block comment on top of tcp_accept() and tcp_wput_accept(). 2372 */ 2373 static void 2374 tcp_accept_swap(tcp_t *listener, tcp_t *acceptor, tcp_t *eager) 2375 { 2376 conn_t *econnp, *aconnp; 2377 2378 ASSERT(eager->tcp_rq == listener->tcp_rq); 2379 ASSERT(eager->tcp_detached && !acceptor->tcp_detached); 2380 ASSERT(!eager->tcp_hard_bound); 2381 ASSERT(!TCP_IS_SOCKET(acceptor)); 2382 ASSERT(!TCP_IS_SOCKET(eager)); 2383 ASSERT(!TCP_IS_SOCKET(listener)); 2384 2385 acceptor->tcp_detached = B_TRUE; 2386 /* 2387 * To permit stream re-use by TLI/XTI, the eager needs a copy of 2388 * the acceptor id. 2389 */ 2390 eager->tcp_acceptor_id = acceptor->tcp_acceptor_id; 2391 2392 /* remove eager from listen list... */ 2393 mutex_enter(&listener->tcp_eager_lock); 2394 tcp_eager_unlink(eager); 2395 ASSERT(eager->tcp_eager_next_q == NULL && 2396 eager->tcp_eager_last_q == NULL); 2397 ASSERT(eager->tcp_eager_next_q0 == NULL && 2398 eager->tcp_eager_prev_q0 == NULL); 2399 mutex_exit(&listener->tcp_eager_lock); 2400 eager->tcp_rq = acceptor->tcp_rq; 2401 eager->tcp_wq = acceptor->tcp_wq; 2402 2403 econnp = eager->tcp_connp; 2404 aconnp = acceptor->tcp_connp; 2405 2406 eager->tcp_rq->q_ptr = econnp; 2407 eager->tcp_wq->q_ptr = econnp; 2408 eager->tcp_detached = B_FALSE; 2409 2410 ASSERT(eager->tcp_ack_tid == 0); 2411 2412 econnp->conn_dev = aconnp->conn_dev; 2413 if (eager->tcp_cred != NULL) 2414 crfree(eager->tcp_cred); 2415 eager->tcp_cred = econnp->conn_cred = aconnp->conn_cred; 2416 econnp->conn_zoneid = aconnp->conn_zoneid; 2417 aconnp->conn_cred = NULL; 2418 2419 econnp->conn_mac_exempt = aconnp->conn_mac_exempt; 2420 aconnp->conn_mac_exempt = B_FALSE; 2421 2422 ASSERT(aconnp->conn_peercred == NULL); 2423 2424 /* Do the IPC initialization */ 2425 CONN_INC_REF(econnp); 2426 2427 econnp->conn_multicast_loop = aconnp->conn_multicast_loop; 2428 econnp->conn_af_isv6 = aconnp->conn_af_isv6; 2429 econnp->conn_pkt_isv6 = aconnp->conn_pkt_isv6; 2430 econnp->conn_ulp = aconnp->conn_ulp; 2431 2432 /* Done with old IPC. Drop its ref on its connp */ 2433 CONN_DEC_REF(aconnp); 2434 } 2435 2436 2437 /* 2438 * Adapt to the information, such as rtt and rtt_sd, provided from the 2439 * ire cached in conn_cache_ire. If no ire cached, do a ire lookup. 2440 * 2441 * Checks for multicast and broadcast destination address. 2442 * Returns zero on failure; non-zero if ok. 2443 * 2444 * Note that the MSS calculation here is based on the info given in 2445 * the IRE. We do not do any calculation based on TCP options. They 2446 * will be handled in tcp_rput_other() and tcp_rput_data() when TCP 2447 * knows which options to use. 2448 * 2449 * Note on how TCP gets its parameters for a connection. 2450 * 2451 * When a tcp_t structure is allocated, it gets all the default parameters. 2452 * In tcp_adapt_ire(), it gets those metric parameters, like rtt, rtt_sd, 2453 * spipe, rpipe, ... from the route metrics. Route metric overrides the 2454 * default. But if there is an associated tcp_host_param, it will override 2455 * the metrics. 2456 * 2457 * An incoming SYN with a multicast or broadcast destination address, is dropped 2458 * in 1 of 2 places. 2459 * 2460 * 1. If the packet was received over the wire it is dropped in 2461 * ip_rput_process_broadcast() 2462 * 2463 * 2. If the packet was received through internal IP loopback, i.e. the packet 2464 * was generated and received on the same machine, it is dropped in 2465 * ip_wput_local() 2466 * 2467 * An incoming SYN with a multicast or broadcast source address is always 2468 * dropped in tcp_adapt_ire. The same logic in tcp_adapt_ire also serves to 2469 * reject an attempt to connect to a broadcast or multicast (destination) 2470 * address. 2471 */ 2472 static int 2473 tcp_adapt_ire(tcp_t *tcp, mblk_t *ire_mp) 2474 { 2475 tcp_hsp_t *hsp; 2476 ire_t *ire; 2477 ire_t *sire = NULL; 2478 iulp_t *ire_uinfo = NULL; 2479 uint32_t mss_max; 2480 uint32_t mss; 2481 boolean_t tcp_detached = TCP_IS_DETACHED(tcp); 2482 conn_t *connp = tcp->tcp_connp; 2483 boolean_t ire_cacheable = B_FALSE; 2484 zoneid_t zoneid = connp->conn_zoneid; 2485 int match_flags = MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT | 2486 MATCH_IRE_SECATTR; 2487 ts_label_t *tsl = crgetlabel(CONN_CRED(connp)); 2488 ill_t *ill = NULL; 2489 boolean_t incoming = (ire_mp == NULL); 2490 2491 ASSERT(connp->conn_ire_cache == NULL); 2492 2493 if (tcp->tcp_ipversion == IPV4_VERSION) { 2494 2495 if (CLASSD(tcp->tcp_connp->conn_rem)) { 2496 BUMP_MIB(&ip_mib, ipInDiscards); 2497 return (0); 2498 } 2499 /* 2500 * If IP_NEXTHOP is set, then look for an IRE_CACHE 2501 * for the destination with the nexthop as gateway. 2502 * ire_ctable_lookup() is used because this particular 2503 * ire, if it exists, will be marked private. 2504 * If that is not available, use the interface ire 2505 * for the nexthop. 2506 * 2507 * TSol: tcp_update_label will detect label mismatches based 2508 * only on the destination's label, but that would not 2509 * detect label mismatches based on the security attributes 2510 * of routes or next hop gateway. Hence we need to pass the 2511 * label to ire_ftable_lookup below in order to locate the 2512 * right prefix (and/or) ire cache. Similarly we also need 2513 * pass the label to the ire_cache_lookup below to locate 2514 * the right ire that also matches on the label. 2515 */ 2516 if (tcp->tcp_connp->conn_nexthop_set) { 2517 ire = ire_ctable_lookup(tcp->tcp_connp->conn_rem, 2518 tcp->tcp_connp->conn_nexthop_v4, 0, NULL, zoneid, 2519 tsl, MATCH_IRE_MARK_PRIVATE_ADDR | MATCH_IRE_GW); 2520 if (ire == NULL) { 2521 ire = ire_ftable_lookup( 2522 tcp->tcp_connp->conn_nexthop_v4, 2523 0, 0, IRE_INTERFACE, NULL, NULL, zoneid, 0, 2524 tsl, match_flags); 2525 if (ire == NULL) 2526 return (0); 2527 } else { 2528 ire_uinfo = &ire->ire_uinfo; 2529 } 2530 } else { 2531 ire = ire_cache_lookup(tcp->tcp_connp->conn_rem, 2532 zoneid, tsl); 2533 if (ire != NULL) { 2534 ire_cacheable = B_TRUE; 2535 ire_uinfo = (ire_mp != NULL) ? 2536 &((ire_t *)ire_mp->b_rptr)->ire_uinfo: 2537 &ire->ire_uinfo; 2538 2539 } else { 2540 if (ire_mp == NULL) { 2541 ire = ire_ftable_lookup( 2542 tcp->tcp_connp->conn_rem, 2543 0, 0, 0, NULL, &sire, zoneid, 0, 2544 tsl, (MATCH_IRE_RECURSIVE | 2545 MATCH_IRE_DEFAULT)); 2546 if (ire == NULL) 2547 return (0); 2548 ire_uinfo = (sire != NULL) ? 2549 &sire->ire_uinfo : 2550 &ire->ire_uinfo; 2551 } else { 2552 ire = (ire_t *)ire_mp->b_rptr; 2553 ire_uinfo = 2554 &((ire_t *) 2555 ire_mp->b_rptr)->ire_uinfo; 2556 } 2557 } 2558 } 2559 ASSERT(ire != NULL); 2560 2561 if ((ire->ire_src_addr == INADDR_ANY) || 2562 (ire->ire_type & IRE_BROADCAST)) { 2563 /* 2564 * ire->ire_mp is non null when ire_mp passed in is used 2565 * ire->ire_mp is set in ip_bind_insert_ire[_v6](). 2566 */ 2567 if (ire->ire_mp == NULL) 2568 ire_refrele(ire); 2569 if (sire != NULL) 2570 ire_refrele(sire); 2571 return (0); 2572 } 2573 2574 if (tcp->tcp_ipha->ipha_src == INADDR_ANY) { 2575 ipaddr_t src_addr; 2576 2577 /* 2578 * ip_bind_connected() has stored the correct source 2579 * address in conn_src. 2580 */ 2581 src_addr = tcp->tcp_connp->conn_src; 2582 tcp->tcp_ipha->ipha_src = src_addr; 2583 /* 2584 * Copy of the src addr. in tcp_t is needed 2585 * for the lookup funcs. 2586 */ 2587 IN6_IPADDR_TO_V4MAPPED(src_addr, &tcp->tcp_ip_src_v6); 2588 } 2589 /* 2590 * Set the fragment bit so that IP will tell us if the MTU 2591 * should change. IP tells us the latest setting of 2592 * ip_path_mtu_discovery through ire_frag_flag. 2593 */ 2594 if (ip_path_mtu_discovery) { 2595 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 2596 htons(IPH_DF); 2597 } 2598 /* 2599 * If ire_uinfo is NULL, this is the IRE_INTERFACE case 2600 * for IP_NEXTHOP. No cache ire has been found for the 2601 * destination and we are working with the nexthop's 2602 * interface ire. Since we need to forward all packets 2603 * to the nexthop first, we "blindly" set tcp_localnet 2604 * to false, eventhough the destination may also be 2605 * onlink. 2606 */ 2607 if (ire_uinfo == NULL) 2608 tcp->tcp_localnet = 0; 2609 else 2610 tcp->tcp_localnet = (ire->ire_gateway_addr == 0); 2611 } else { 2612 /* 2613 * For incoming connection ire_mp = NULL 2614 * For outgoing connection ire_mp != NULL 2615 * Technically we should check conn_incoming_ill 2616 * when ire_mp is NULL and conn_outgoing_ill when 2617 * ire_mp is non-NULL. But this is performance 2618 * critical path and for IPV*_BOUND_IF, outgoing 2619 * and incoming ill are always set to the same value. 2620 */ 2621 ill_t *dst_ill = NULL; 2622 ipif_t *dst_ipif = NULL; 2623 2624 ASSERT(connp->conn_outgoing_ill == connp->conn_incoming_ill); 2625 2626 if (connp->conn_outgoing_ill != NULL) { 2627 /* Outgoing or incoming path */ 2628 int err; 2629 2630 dst_ill = conn_get_held_ill(connp, 2631 &connp->conn_outgoing_ill, &err); 2632 if (err == ILL_LOOKUP_FAILED || dst_ill == NULL) { 2633 ip1dbg(("tcp_adapt_ire: ill_lookup failed\n")); 2634 return (0); 2635 } 2636 match_flags |= MATCH_IRE_ILL; 2637 dst_ipif = dst_ill->ill_ipif; 2638 } 2639 ire = ire_ctable_lookup_v6(&tcp->tcp_connp->conn_remv6, 2640 0, 0, dst_ipif, zoneid, tsl, match_flags); 2641 2642 if (ire != NULL) { 2643 ire_cacheable = B_TRUE; 2644 ire_uinfo = (ire_mp != NULL) ? 2645 &((ire_t *)ire_mp->b_rptr)->ire_uinfo: 2646 &ire->ire_uinfo; 2647 } else { 2648 if (ire_mp == NULL) { 2649 ire = ire_ftable_lookup_v6( 2650 &tcp->tcp_connp->conn_remv6, 2651 0, 0, 0, dst_ipif, &sire, zoneid, 2652 0, tsl, match_flags); 2653 if (ire == NULL) { 2654 if (dst_ill != NULL) 2655 ill_refrele(dst_ill); 2656 return (0); 2657 } 2658 ire_uinfo = (sire != NULL) ? &sire->ire_uinfo : 2659 &ire->ire_uinfo; 2660 } else { 2661 ire = (ire_t *)ire_mp->b_rptr; 2662 ire_uinfo = 2663 &((ire_t *)ire_mp->b_rptr)->ire_uinfo; 2664 } 2665 } 2666 if (dst_ill != NULL) 2667 ill_refrele(dst_ill); 2668 2669 ASSERT(ire != NULL); 2670 ASSERT(ire_uinfo != NULL); 2671 2672 if (IN6_IS_ADDR_UNSPECIFIED(&ire->ire_src_addr_v6) || 2673 IN6_IS_ADDR_MULTICAST(&ire->ire_addr_v6)) { 2674 /* 2675 * ire->ire_mp is non null when ire_mp passed in is used 2676 * ire->ire_mp is set in ip_bind_insert_ire[_v6](). 2677 */ 2678 if (ire->ire_mp == NULL) 2679 ire_refrele(ire); 2680 if (sire != NULL) 2681 ire_refrele(sire); 2682 return (0); 2683 } 2684 2685 if (IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip6h->ip6_src)) { 2686 in6_addr_t src_addr; 2687 2688 /* 2689 * ip_bind_connected_v6() has stored the correct source 2690 * address per IPv6 addr. selection policy in 2691 * conn_src_v6. 2692 */ 2693 src_addr = tcp->tcp_connp->conn_srcv6; 2694 2695 tcp->tcp_ip6h->ip6_src = src_addr; 2696 /* 2697 * Copy of the src addr. in tcp_t is needed 2698 * for the lookup funcs. 2699 */ 2700 tcp->tcp_ip_src_v6 = src_addr; 2701 ASSERT(IN6_ARE_ADDR_EQUAL(&tcp->tcp_ip6h->ip6_src, 2702 &connp->conn_srcv6)); 2703 } 2704 tcp->tcp_localnet = 2705 IN6_IS_ADDR_UNSPECIFIED(&ire->ire_gateway_addr_v6); 2706 } 2707 2708 /* 2709 * This allows applications to fail quickly when connections are made 2710 * to dead hosts. Hosts can be labeled dead by adding a reject route 2711 * with both the RTF_REJECT and RTF_PRIVATE flags set. 2712 */ 2713 if ((ire->ire_flags & RTF_REJECT) && 2714 (ire->ire_flags & RTF_PRIVATE)) 2715 goto error; 2716 2717 /* 2718 * Make use of the cached rtt and rtt_sd values to calculate the 2719 * initial RTO. Note that they are already initialized in 2720 * tcp_init_values(). 2721 * If ire_uinfo is NULL, i.e., we do not have a cache ire for 2722 * IP_NEXTHOP, but instead are using the interface ire for the 2723 * nexthop, then we do not use the ire_uinfo from that ire to 2724 * do any initializations. 2725 */ 2726 if (ire_uinfo != NULL) { 2727 if (ire_uinfo->iulp_rtt != 0) { 2728 clock_t rto; 2729 2730 tcp->tcp_rtt_sa = ire_uinfo->iulp_rtt; 2731 tcp->tcp_rtt_sd = ire_uinfo->iulp_rtt_sd; 2732 rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 2733 tcp_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5); 2734 2735 if (rto > tcp_rexmit_interval_max) { 2736 tcp->tcp_rto = tcp_rexmit_interval_max; 2737 } else if (rto < tcp_rexmit_interval_min) { 2738 tcp->tcp_rto = tcp_rexmit_interval_min; 2739 } else { 2740 tcp->tcp_rto = rto; 2741 } 2742 } 2743 if (ire_uinfo->iulp_ssthresh != 0) 2744 tcp->tcp_cwnd_ssthresh = ire_uinfo->iulp_ssthresh; 2745 else 2746 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN; 2747 if (ire_uinfo->iulp_spipe > 0) { 2748 tcp->tcp_xmit_hiwater = MIN(ire_uinfo->iulp_spipe, 2749 tcp_max_buf); 2750 if (tcp_snd_lowat_fraction != 0) 2751 tcp->tcp_xmit_lowater = tcp->tcp_xmit_hiwater / 2752 tcp_snd_lowat_fraction; 2753 (void) tcp_maxpsz_set(tcp, B_TRUE); 2754 } 2755 /* 2756 * Note that up till now, acceptor always inherits receive 2757 * window from the listener. But if there is a metrics 2758 * associated with a host, we should use that instead of 2759 * inheriting it from listener. Thus we need to pass this 2760 * info back to the caller. 2761 */ 2762 if (ire_uinfo->iulp_rpipe > 0) { 2763 tcp->tcp_rwnd = MIN(ire_uinfo->iulp_rpipe, tcp_max_buf); 2764 } 2765 2766 if (ire_uinfo->iulp_rtomax > 0) { 2767 tcp->tcp_second_timer_threshold = 2768 ire_uinfo->iulp_rtomax; 2769 } 2770 2771 /* 2772 * Use the metric option settings, iulp_tstamp_ok and 2773 * iulp_wscale_ok, only for active open. What this means 2774 * is that if the other side uses timestamp or window 2775 * scale option, TCP will also use those options. That 2776 * is for passive open. If the application sets a 2777 * large window, window scale is enabled regardless of 2778 * the value in iulp_wscale_ok. This is the behavior 2779 * since 2.6. So we keep it. 2780 * The only case left in passive open processing is the 2781 * check for SACK. 2782 * For ECN, it should probably be like SACK. But the 2783 * current value is binary, so we treat it like the other 2784 * cases. The metric only controls active open.For passive 2785 * open, the ndd param, tcp_ecn_permitted, controls the 2786 * behavior. 2787 */ 2788 if (!tcp_detached) { 2789 /* 2790 * The if check means that the following can only 2791 * be turned on by the metrics only IRE, but not off. 2792 */ 2793 if (ire_uinfo->iulp_tstamp_ok) 2794 tcp->tcp_snd_ts_ok = B_TRUE; 2795 if (ire_uinfo->iulp_wscale_ok) 2796 tcp->tcp_snd_ws_ok = B_TRUE; 2797 if (ire_uinfo->iulp_sack == 2) 2798 tcp->tcp_snd_sack_ok = B_TRUE; 2799 if (ire_uinfo->iulp_ecn_ok) 2800 tcp->tcp_ecn_ok = B_TRUE; 2801 } else { 2802 /* 2803 * Passive open. 2804 * 2805 * As above, the if check means that SACK can only be 2806 * turned on by the metric only IRE. 2807 */ 2808 if (ire_uinfo->iulp_sack > 0) { 2809 tcp->tcp_snd_sack_ok = B_TRUE; 2810 } 2811 } 2812 } 2813 2814 2815 /* 2816 * XXX: Note that currently, ire_max_frag can be as small as 68 2817 * because of PMTUd. So tcp_mss may go to negative if combined 2818 * length of all those options exceeds 28 bytes. But because 2819 * of the tcp_mss_min check below, we may not have a problem if 2820 * tcp_mss_min is of a reasonable value. The default is 1 so 2821 * the negative problem still exists. And the check defeats PMTUd. 2822 * In fact, if PMTUd finds that the MSS should be smaller than 2823 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min 2824 * value. 2825 * 2826 * We do not deal with that now. All those problems related to 2827 * PMTUd will be fixed later. 2828 */ 2829 ASSERT(ire->ire_max_frag != 0); 2830 mss = tcp->tcp_if_mtu = ire->ire_max_frag; 2831 if (tcp->tcp_ipp_fields & IPPF_USE_MIN_MTU) { 2832 if (tcp->tcp_ipp_use_min_mtu == IPV6_USE_MIN_MTU_NEVER) { 2833 mss = MIN(mss, IPV6_MIN_MTU); 2834 } 2835 } 2836 2837 /* Sanity check for MSS value. */ 2838 if (tcp->tcp_ipversion == IPV4_VERSION) 2839 mss_max = tcp_mss_max_ipv4; 2840 else 2841 mss_max = tcp_mss_max_ipv6; 2842 2843 if (tcp->tcp_ipversion == IPV6_VERSION && 2844 (ire->ire_frag_flag & IPH_FRAG_HDR)) { 2845 /* 2846 * After receiving an ICMPv6 "packet too big" message with a 2847 * MTU < 1280, and for multirouted IPv6 packets, the IP layer 2848 * will insert a 8-byte fragment header in every packet; we 2849 * reduce the MSS by that amount here. 2850 */ 2851 mss -= sizeof (ip6_frag_t); 2852 } 2853 2854 if (tcp->tcp_ipsec_overhead == 0) 2855 tcp->tcp_ipsec_overhead = conn_ipsec_length(connp); 2856 2857 mss -= tcp->tcp_ipsec_overhead; 2858 2859 if (mss < tcp_mss_min) 2860 mss = tcp_mss_min; 2861 if (mss > mss_max) 2862 mss = mss_max; 2863 2864 /* Note that this is the maximum MSS, excluding all options. */ 2865 tcp->tcp_mss = mss; 2866 2867 /* 2868 * Initialize the ISS here now that we have the full connection ID. 2869 * The RFC 1948 method of initial sequence number generation requires 2870 * knowledge of the full connection ID before setting the ISS. 2871 */ 2872 2873 tcp_iss_init(tcp); 2874 2875 if (ire->ire_type & (IRE_LOOPBACK | IRE_LOCAL)) 2876 tcp->tcp_loopback = B_TRUE; 2877 2878 if (tcp->tcp_ipversion == IPV4_VERSION) { 2879 hsp = tcp_hsp_lookup(tcp->tcp_remote); 2880 } else { 2881 hsp = tcp_hsp_lookup_ipv6(&tcp->tcp_remote_v6); 2882 } 2883 2884 if (hsp != NULL) { 2885 /* Only modify if we're going to make them bigger */ 2886 if (hsp->tcp_hsp_sendspace > tcp->tcp_xmit_hiwater) { 2887 tcp->tcp_xmit_hiwater = hsp->tcp_hsp_sendspace; 2888 if (tcp_snd_lowat_fraction != 0) 2889 tcp->tcp_xmit_lowater = tcp->tcp_xmit_hiwater / 2890 tcp_snd_lowat_fraction; 2891 } 2892 2893 if (hsp->tcp_hsp_recvspace > tcp->tcp_rwnd) { 2894 tcp->tcp_rwnd = hsp->tcp_hsp_recvspace; 2895 } 2896 2897 /* Copy timestamp flag only for active open */ 2898 if (!tcp_detached) 2899 tcp->tcp_snd_ts_ok = hsp->tcp_hsp_tstamp; 2900 } 2901 2902 if (sire != NULL) 2903 IRE_REFRELE(sire); 2904 2905 /* 2906 * If we got an IRE_CACHE and an ILL, go through their properties; 2907 * otherwise, this is deferred until later when we have an IRE_CACHE. 2908 */ 2909 if (tcp->tcp_loopback || 2910 (ire_cacheable && (ill = ire_to_ill(ire)) != NULL)) { 2911 /* 2912 * For incoming, see if this tcp may be MDT-capable. For 2913 * outgoing, this process has been taken care of through 2914 * tcp_rput_other. 2915 */ 2916 tcp_ire_ill_check(tcp, ire, ill, incoming); 2917 tcp->tcp_ire_ill_check_done = B_TRUE; 2918 } 2919 2920 mutex_enter(&connp->conn_lock); 2921 /* 2922 * Make sure that conn is not marked incipient 2923 * for incoming connections. A blind 2924 * removal of incipient flag is cheaper than 2925 * check and removal. 2926 */ 2927 connp->conn_state_flags &= ~CONN_INCIPIENT; 2928 2929 /* Must not cache forwarding table routes. */ 2930 if (ire_cacheable) { 2931 rw_enter(&ire->ire_bucket->irb_lock, RW_READER); 2932 if (!(ire->ire_marks & IRE_MARK_CONDEMNED)) { 2933 connp->conn_ire_cache = ire; 2934 IRE_UNTRACE_REF(ire); 2935 rw_exit(&ire->ire_bucket->irb_lock); 2936 mutex_exit(&connp->conn_lock); 2937 return (1); 2938 } 2939 rw_exit(&ire->ire_bucket->irb_lock); 2940 } 2941 mutex_exit(&connp->conn_lock); 2942 2943 if (ire->ire_mp == NULL) 2944 ire_refrele(ire); 2945 return (1); 2946 2947 error: 2948 if (ire->ire_mp == NULL) 2949 ire_refrele(ire); 2950 if (sire != NULL) 2951 ire_refrele(sire); 2952 return (0); 2953 } 2954 2955 /* 2956 * tcp_bind is called (holding the writer lock) by tcp_wput_proto to process a 2957 * O_T_BIND_REQ/T_BIND_REQ message. 2958 */ 2959 static void 2960 tcp_bind(tcp_t *tcp, mblk_t *mp) 2961 { 2962 sin_t *sin; 2963 sin6_t *sin6; 2964 mblk_t *mp1; 2965 in_port_t requested_port; 2966 in_port_t allocated_port; 2967 struct T_bind_req *tbr; 2968 boolean_t bind_to_req_port_only; 2969 boolean_t backlog_update = B_FALSE; 2970 boolean_t user_specified; 2971 in6_addr_t v6addr; 2972 ipaddr_t v4addr; 2973 uint_t origipversion; 2974 int err; 2975 queue_t *q = tcp->tcp_wq; 2976 conn_t *connp; 2977 mlp_type_t addrtype, mlptype; 2978 zone_t *zone; 2979 cred_t *cr; 2980 in_port_t mlp_port; 2981 2982 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 2983 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tbr)) { 2984 if (tcp->tcp_debug) { 2985 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 2986 "tcp_bind: bad req, len %u", 2987 (uint_t)(mp->b_wptr - mp->b_rptr)); 2988 } 2989 tcp_err_ack(tcp, mp, TPROTO, 0); 2990 return; 2991 } 2992 /* Make sure the largest address fits */ 2993 mp1 = reallocb(mp, sizeof (struct T_bind_ack) + sizeof (sin6_t) + 1, 1); 2994 if (mp1 == NULL) { 2995 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 2996 return; 2997 } 2998 mp = mp1; 2999 tbr = (struct T_bind_req *)mp->b_rptr; 3000 if (tcp->tcp_state >= TCPS_BOUND) { 3001 if ((tcp->tcp_state == TCPS_BOUND || 3002 tcp->tcp_state == TCPS_LISTEN) && 3003 tcp->tcp_conn_req_max != tbr->CONIND_number && 3004 tbr->CONIND_number > 0) { 3005 /* 3006 * Handle listen() increasing CONIND_number. 3007 * This is more "liberal" then what the TPI spec 3008 * requires but is needed to avoid a t_unbind 3009 * when handling listen() since the port number 3010 * might be "stolen" between the unbind and bind. 3011 */ 3012 backlog_update = B_TRUE; 3013 goto do_bind; 3014 } 3015 if (tcp->tcp_debug) { 3016 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 3017 "tcp_bind: bad state, %d", tcp->tcp_state); 3018 } 3019 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 3020 return; 3021 } 3022 origipversion = tcp->tcp_ipversion; 3023 3024 switch (tbr->ADDR_length) { 3025 case 0: /* request for a generic port */ 3026 tbr->ADDR_offset = sizeof (struct T_bind_req); 3027 if (tcp->tcp_family == AF_INET) { 3028 tbr->ADDR_length = sizeof (sin_t); 3029 sin = (sin_t *)&tbr[1]; 3030 *sin = sin_null; 3031 sin->sin_family = AF_INET; 3032 mp->b_wptr = (uchar_t *)&sin[1]; 3033 tcp->tcp_ipversion = IPV4_VERSION; 3034 IN6_IPADDR_TO_V4MAPPED(INADDR_ANY, &v6addr); 3035 } else { 3036 ASSERT(tcp->tcp_family == AF_INET6); 3037 tbr->ADDR_length = sizeof (sin6_t); 3038 sin6 = (sin6_t *)&tbr[1]; 3039 *sin6 = sin6_null; 3040 sin6->sin6_family = AF_INET6; 3041 mp->b_wptr = (uchar_t *)&sin6[1]; 3042 tcp->tcp_ipversion = IPV6_VERSION; 3043 V6_SET_ZERO(v6addr); 3044 } 3045 requested_port = 0; 3046 break; 3047 3048 case sizeof (sin_t): /* Complete IPv4 address */ 3049 sin = (sin_t *)mi_offset_param(mp, tbr->ADDR_offset, 3050 sizeof (sin_t)); 3051 if (sin == NULL || !OK_32PTR((char *)sin)) { 3052 if (tcp->tcp_debug) { 3053 (void) strlog(TCP_MOD_ID, 0, 1, 3054 SL_ERROR|SL_TRACE, 3055 "tcp_bind: bad address parameter, " 3056 "offset %d, len %d", 3057 tbr->ADDR_offset, tbr->ADDR_length); 3058 } 3059 tcp_err_ack(tcp, mp, TPROTO, 0); 3060 return; 3061 } 3062 /* 3063 * With sockets sockfs will accept bogus sin_family in 3064 * bind() and replace it with the family used in the socket 3065 * call. 3066 */ 3067 if (sin->sin_family != AF_INET || 3068 tcp->tcp_family != AF_INET) { 3069 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 3070 return; 3071 } 3072 requested_port = ntohs(sin->sin_port); 3073 tcp->tcp_ipversion = IPV4_VERSION; 3074 v4addr = sin->sin_addr.s_addr; 3075 IN6_IPADDR_TO_V4MAPPED(v4addr, &v6addr); 3076 break; 3077 3078 case sizeof (sin6_t): /* Complete IPv6 address */ 3079 sin6 = (sin6_t *)mi_offset_param(mp, 3080 tbr->ADDR_offset, sizeof (sin6_t)); 3081 if (sin6 == NULL || !OK_32PTR((char *)sin6)) { 3082 if (tcp->tcp_debug) { 3083 (void) strlog(TCP_MOD_ID, 0, 1, 3084 SL_ERROR|SL_TRACE, 3085 "tcp_bind: bad IPv6 address parameter, " 3086 "offset %d, len %d", tbr->ADDR_offset, 3087 tbr->ADDR_length); 3088 } 3089 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 3090 return; 3091 } 3092 if (sin6->sin6_family != AF_INET6 || 3093 tcp->tcp_family != AF_INET6) { 3094 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 3095 return; 3096 } 3097 requested_port = ntohs(sin6->sin6_port); 3098 tcp->tcp_ipversion = IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr) ? 3099 IPV4_VERSION : IPV6_VERSION; 3100 v6addr = sin6->sin6_addr; 3101 break; 3102 3103 default: 3104 if (tcp->tcp_debug) { 3105 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 3106 "tcp_bind: bad address length, %d", 3107 tbr->ADDR_length); 3108 } 3109 tcp_err_ack(tcp, mp, TBADADDR, 0); 3110 return; 3111 } 3112 tcp->tcp_bound_source_v6 = v6addr; 3113 3114 /* Check for change in ipversion */ 3115 if (origipversion != tcp->tcp_ipversion) { 3116 ASSERT(tcp->tcp_family == AF_INET6); 3117 err = tcp->tcp_ipversion == IPV6_VERSION ? 3118 tcp_header_init_ipv6(tcp) : tcp_header_init_ipv4(tcp); 3119 if (err) { 3120 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 3121 return; 3122 } 3123 } 3124 3125 /* 3126 * Initialize family specific fields. Copy of the src addr. 3127 * in tcp_t is needed for the lookup funcs. 3128 */ 3129 if (tcp->tcp_ipversion == IPV6_VERSION) { 3130 tcp->tcp_ip6h->ip6_src = v6addr; 3131 } else { 3132 IN6_V4MAPPED_TO_IPADDR(&v6addr, tcp->tcp_ipha->ipha_src); 3133 } 3134 tcp->tcp_ip_src_v6 = v6addr; 3135 3136 /* 3137 * For O_T_BIND_REQ: 3138 * Verify that the target port/addr is available, or choose 3139 * another. 3140 * For T_BIND_REQ: 3141 * Verify that the target port/addr is available or fail. 3142 * In both cases when it succeeds the tcp is inserted in the 3143 * bind hash table. This ensures that the operation is atomic 3144 * under the lock on the hash bucket. 3145 */ 3146 bind_to_req_port_only = requested_port != 0 && 3147 tbr->PRIM_type != O_T_BIND_REQ; 3148 /* 3149 * Get a valid port (within the anonymous range and should not 3150 * be a privileged one) to use if the user has not given a port. 3151 * If multiple threads are here, they may all start with 3152 * with the same initial port. But, it should be fine as long as 3153 * tcp_bindi will ensure that no two threads will be assigned 3154 * the same port. 3155 * 3156 * NOTE: XXX If a privileged process asks for an anonymous port, we 3157 * still check for ports only in the range > tcp_smallest_non_priv_port, 3158 * unless TCP_ANONPRIVBIND option is set. 3159 */ 3160 mlptype = mlptSingle; 3161 mlp_port = requested_port; 3162 if (requested_port == 0) { 3163 requested_port = tcp->tcp_anon_priv_bind ? 3164 tcp_get_next_priv_port(tcp) : 3165 tcp_update_next_port(tcp_next_port_to_try, tcp, B_TRUE); 3166 if (requested_port == 0) { 3167 tcp_err_ack(tcp, mp, TNOADDR, 0); 3168 return; 3169 } 3170 user_specified = B_FALSE; 3171 3172 /* 3173 * If the user went through one of the RPC interfaces to create 3174 * this socket and RPC is MLP in this zone, then give him an 3175 * anonymous MLP. 3176 */ 3177 cr = DB_CREDDEF(mp, tcp->tcp_cred); 3178 connp = tcp->tcp_connp; 3179 if (connp->conn_anon_mlp && is_system_labeled()) { 3180 zone = crgetzone(cr); 3181 addrtype = tsol_mlp_addr_type(zone->zone_id, 3182 IPV6_VERSION, &v6addr); 3183 if (addrtype == mlptSingle) { 3184 tcp_err_ack(tcp, mp, TNOADDR, 0); 3185 return; 3186 } 3187 mlptype = tsol_mlp_port_type(zone, IPPROTO_TCP, 3188 PMAPPORT, addrtype); 3189 mlp_port = PMAPPORT; 3190 } 3191 } else { 3192 int i; 3193 boolean_t priv = B_FALSE; 3194 3195 /* 3196 * If the requested_port is in the well-known privileged range, 3197 * verify that the stream was opened by a privileged user. 3198 * Note: No locks are held when inspecting tcp_g_*epriv_ports 3199 * but instead the code relies on: 3200 * - the fact that the address of the array and its size never 3201 * changes 3202 * - the atomic assignment of the elements of the array 3203 */ 3204 cr = DB_CREDDEF(mp, tcp->tcp_cred); 3205 if (requested_port < tcp_smallest_nonpriv_port) { 3206 priv = B_TRUE; 3207 } else { 3208 for (i = 0; i < tcp_g_num_epriv_ports; i++) { 3209 if (requested_port == 3210 tcp_g_epriv_ports[i]) { 3211 priv = B_TRUE; 3212 break; 3213 } 3214 } 3215 } 3216 if (priv) { 3217 if (secpolicy_net_privaddr(cr, requested_port) != 0) { 3218 if (tcp->tcp_debug) { 3219 (void) strlog(TCP_MOD_ID, 0, 1, 3220 SL_ERROR|SL_TRACE, 3221 "tcp_bind: no priv for port %d", 3222 requested_port); 3223 } 3224 tcp_err_ack(tcp, mp, TACCES, 0); 3225 return; 3226 } 3227 } 3228 user_specified = B_TRUE; 3229 3230 connp = tcp->tcp_connp; 3231 if (is_system_labeled()) { 3232 zone = crgetzone(cr); 3233 addrtype = tsol_mlp_addr_type(zone->zone_id, 3234 IPV6_VERSION, &v6addr); 3235 if (addrtype == mlptSingle) { 3236 tcp_err_ack(tcp, mp, TNOADDR, 0); 3237 return; 3238 } 3239 mlptype = tsol_mlp_port_type(zone, IPPROTO_TCP, 3240 requested_port, addrtype); 3241 } 3242 } 3243 3244 if (mlptype != mlptSingle) { 3245 if (secpolicy_net_bindmlp(cr) != 0) { 3246 if (tcp->tcp_debug) { 3247 (void) strlog(TCP_MOD_ID, 0, 1, 3248 SL_ERROR|SL_TRACE, 3249 "tcp_bind: no priv for multilevel port %d", 3250 requested_port); 3251 } 3252 tcp_err_ack(tcp, mp, TACCES, 0); 3253 return; 3254 } 3255 3256 /* 3257 * If we're specifically binding a shared IP address and the 3258 * port is MLP on shared addresses, then check to see if this 3259 * zone actually owns the MLP. Reject if not. 3260 */ 3261 if (mlptype == mlptShared && addrtype == mlptShared) { 3262 zoneid_t mlpzone; 3263 3264 mlpzone = tsol_mlp_findzone(IPPROTO_TCP, 3265 htons(mlp_port)); 3266 if (connp->conn_zoneid != mlpzone) { 3267 if (tcp->tcp_debug) { 3268 (void) strlog(TCP_MOD_ID, 0, 1, 3269 SL_ERROR|SL_TRACE, 3270 "tcp_bind: attempt to bind port " 3271 "%d on shared addr in zone %d " 3272 "(should be %d)", 3273 mlp_port, connp->conn_zoneid, 3274 mlpzone); 3275 } 3276 tcp_err_ack(tcp, mp, TACCES, 0); 3277 return; 3278 } 3279 } 3280 3281 if (!user_specified) { 3282 err = tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3283 requested_port, B_TRUE); 3284 if (err != 0) { 3285 if (tcp->tcp_debug) { 3286 (void) strlog(TCP_MOD_ID, 0, 1, 3287 SL_ERROR|SL_TRACE, 3288 "tcp_bind: cannot establish anon " 3289 "MLP for port %d", 3290 requested_port); 3291 } 3292 tcp_err_ack(tcp, mp, TSYSERR, err); 3293 return; 3294 } 3295 connp->conn_anon_port = B_TRUE; 3296 } 3297 connp->conn_mlp_type = mlptype; 3298 } 3299 3300 allocated_port = tcp_bindi(tcp, requested_port, &v6addr, 3301 tcp->tcp_reuseaddr, B_FALSE, bind_to_req_port_only, user_specified); 3302 3303 if (allocated_port == 0) { 3304 connp->conn_mlp_type = mlptSingle; 3305 if (connp->conn_anon_port) { 3306 connp->conn_anon_port = B_FALSE; 3307 (void) tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3308 requested_port, B_FALSE); 3309 } 3310 if (bind_to_req_port_only) { 3311 if (tcp->tcp_debug) { 3312 (void) strlog(TCP_MOD_ID, 0, 1, 3313 SL_ERROR|SL_TRACE, 3314 "tcp_bind: requested addr busy"); 3315 } 3316 tcp_err_ack(tcp, mp, TADDRBUSY, 0); 3317 } else { 3318 /* If we are out of ports, fail the bind. */ 3319 if (tcp->tcp_debug) { 3320 (void) strlog(TCP_MOD_ID, 0, 1, 3321 SL_ERROR|SL_TRACE, 3322 "tcp_bind: out of ports?"); 3323 } 3324 tcp_err_ack(tcp, mp, TNOADDR, 0); 3325 } 3326 return; 3327 } 3328 ASSERT(tcp->tcp_state == TCPS_BOUND); 3329 do_bind: 3330 if (!backlog_update) { 3331 if (tcp->tcp_family == AF_INET) 3332 sin->sin_port = htons(allocated_port); 3333 else 3334 sin6->sin6_port = htons(allocated_port); 3335 } 3336 if (tcp->tcp_family == AF_INET) { 3337 if (tbr->CONIND_number != 0) { 3338 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3339 sizeof (sin_t)); 3340 } else { 3341 /* Just verify the local IP address */ 3342 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, IP_ADDR_LEN); 3343 } 3344 } else { 3345 if (tbr->CONIND_number != 0) { 3346 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3347 sizeof (sin6_t)); 3348 } else { 3349 /* Just verify the local IP address */ 3350 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3351 IPV6_ADDR_LEN); 3352 } 3353 } 3354 if (mp1 == NULL) { 3355 if (connp->conn_anon_port) { 3356 connp->conn_anon_port = B_FALSE; 3357 (void) tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3358 requested_port, B_FALSE); 3359 } 3360 connp->conn_mlp_type = mlptSingle; 3361 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 3362 return; 3363 } 3364 3365 tbr->PRIM_type = T_BIND_ACK; 3366 mp->b_datap->db_type = M_PCPROTO; 3367 3368 /* Chain in the reply mp for tcp_rput() */ 3369 mp1->b_cont = mp; 3370 mp = mp1; 3371 3372 tcp->tcp_conn_req_max = tbr->CONIND_number; 3373 if (tcp->tcp_conn_req_max) { 3374 if (tcp->tcp_conn_req_max < tcp_conn_req_min) 3375 tcp->tcp_conn_req_max = tcp_conn_req_min; 3376 if (tcp->tcp_conn_req_max > tcp_conn_req_max_q) 3377 tcp->tcp_conn_req_max = tcp_conn_req_max_q; 3378 /* 3379 * If this is a listener, do not reset the eager list 3380 * and other stuffs. Note that we don't check if the 3381 * existing eager list meets the new tcp_conn_req_max 3382 * requirement. 3383 */ 3384 if (tcp->tcp_state != TCPS_LISTEN) { 3385 tcp->tcp_state = TCPS_LISTEN; 3386 /* Initialize the chain. Don't need the eager_lock */ 3387 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp; 3388 tcp->tcp_second_ctimer_threshold = 3389 tcp_ip_abort_linterval; 3390 } 3391 } 3392 3393 /* 3394 * We can call ip_bind directly which returns a T_BIND_ACK mp. The 3395 * processing continues in tcp_rput_other(). 3396 */ 3397 if (tcp->tcp_family == AF_INET6) { 3398 ASSERT(tcp->tcp_connp->conn_af_isv6); 3399 mp = ip_bind_v6(q, mp, tcp->tcp_connp, &tcp->tcp_sticky_ipp); 3400 } else { 3401 ASSERT(!tcp->tcp_connp->conn_af_isv6); 3402 mp = ip_bind_v4(q, mp, tcp->tcp_connp); 3403 } 3404 /* 3405 * If the bind cannot complete immediately 3406 * IP will arrange to call tcp_rput_other 3407 * when the bind completes. 3408 */ 3409 if (mp != NULL) { 3410 tcp_rput_other(tcp, mp); 3411 } else { 3412 /* 3413 * Bind will be resumed later. Need to ensure 3414 * that conn doesn't disappear when that happens. 3415 * This will be decremented in ip_resume_tcp_bind(). 3416 */ 3417 CONN_INC_REF(tcp->tcp_connp); 3418 } 3419 } 3420 3421 3422 /* 3423 * If the "bind_to_req_port_only" parameter is set, if the requested port 3424 * number is available, return it, If not return 0 3425 * 3426 * If "bind_to_req_port_only" parameter is not set and 3427 * If the requested port number is available, return it. If not, return 3428 * the first anonymous port we happen across. If no anonymous ports are 3429 * available, return 0. addr is the requested local address, if any. 3430 * 3431 * In either case, when succeeding update the tcp_t to record the port number 3432 * and insert it in the bind hash table. 3433 * 3434 * Note that TCP over IPv4 and IPv6 sockets can use the same port number 3435 * without setting SO_REUSEADDR. This is needed so that they 3436 * can be viewed as two independent transport protocols. 3437 */ 3438 static in_port_t 3439 tcp_bindi(tcp_t *tcp, in_port_t port, const in6_addr_t *laddr, 3440 int reuseaddr, boolean_t quick_connect, 3441 boolean_t bind_to_req_port_only, boolean_t user_specified) 3442 { 3443 /* number of times we have run around the loop */ 3444 int count = 0; 3445 /* maximum number of times to run around the loop */ 3446 int loopmax; 3447 conn_t *connp = tcp->tcp_connp; 3448 zoneid_t zoneid = connp->conn_zoneid; 3449 3450 /* 3451 * Lookup for free addresses is done in a loop and "loopmax" 3452 * influences how long we spin in the loop 3453 */ 3454 if (bind_to_req_port_only) { 3455 /* 3456 * If the requested port is busy, don't bother to look 3457 * for a new one. Setting loop maximum count to 1 has 3458 * that effect. 3459 */ 3460 loopmax = 1; 3461 } else { 3462 /* 3463 * If the requested port is busy, look for a free one 3464 * in the anonymous port range. 3465 * Set loopmax appropriately so that one does not look 3466 * forever in the case all of the anonymous ports are in use. 3467 */ 3468 if (tcp->tcp_anon_priv_bind) { 3469 /* 3470 * loopmax = 3471 * (IPPORT_RESERVED-1) - tcp_min_anonpriv_port + 1 3472 */ 3473 loopmax = IPPORT_RESERVED - tcp_min_anonpriv_port; 3474 } else { 3475 loopmax = (tcp_largest_anon_port - 3476 tcp_smallest_anon_port + 1); 3477 } 3478 } 3479 do { 3480 uint16_t lport; 3481 tf_t *tbf; 3482 tcp_t *ltcp; 3483 conn_t *lconnp; 3484 3485 lport = htons(port); 3486 3487 /* 3488 * Ensure that the tcp_t is not currently in the bind hash. 3489 * Hold the lock on the hash bucket to ensure that 3490 * the duplicate check plus the insertion is an atomic 3491 * operation. 3492 * 3493 * This function does an inline lookup on the bind hash list 3494 * Make sure that we access only members of tcp_t 3495 * and that we don't look at tcp_tcp, since we are not 3496 * doing a CONN_INC_REF. 3497 */ 3498 tcp_bind_hash_remove(tcp); 3499 tbf = &tcp_bind_fanout[TCP_BIND_HASH(lport)]; 3500 mutex_enter(&tbf->tf_lock); 3501 for (ltcp = tbf->tf_tcp; ltcp != NULL; 3502 ltcp = ltcp->tcp_bind_hash) { 3503 if (lport != ltcp->tcp_lport) 3504 continue; 3505 3506 lconnp = ltcp->tcp_connp; 3507 3508 /* 3509 * On a labeled system, we must treat bindings to ports 3510 * on shared IP addresses by sockets with MAC exemption 3511 * privilege as being in all zones, as there's 3512 * otherwise no way to identify the right receiver. 3513 */ 3514 if (!IPCL_ZONE_MATCH(ltcp->tcp_connp, zoneid) && 3515 !lconnp->conn_mac_exempt && 3516 !connp->conn_mac_exempt) 3517 continue; 3518 3519 /* 3520 * If TCP_EXCLBIND is set for either the bound or 3521 * binding endpoint, the semantics of bind 3522 * is changed according to the following. 3523 * 3524 * spec = specified address (v4 or v6) 3525 * unspec = unspecified address (v4 or v6) 3526 * A = specified addresses are different for endpoints 3527 * 3528 * bound bind to allowed 3529 * ------------------------------------- 3530 * unspec unspec no 3531 * unspec spec no 3532 * spec unspec no 3533 * spec spec yes if A 3534 * 3535 * For labeled systems, SO_MAC_EXEMPT behaves the same 3536 * as UDP_EXCLBIND, except that zoneid is ignored. 3537 * 3538 * Note: 3539 * 3540 * 1. Because of TLI semantics, an endpoint can go 3541 * back from, say TCP_ESTABLISHED to TCPS_LISTEN or 3542 * TCPS_BOUND, depending on whether it is originally 3543 * a listener or not. That is why we need to check 3544 * for states greater than or equal to TCPS_BOUND 3545 * here. 3546 * 3547 * 2. Ideally, we should only check for state equals 3548 * to TCPS_LISTEN. And the following check should be 3549 * added. 3550 * 3551 * if (ltcp->tcp_state == TCPS_LISTEN || 3552 * !reuseaddr || !ltcp->tcp_reuseaddr) { 3553 * ... 3554 * } 3555 * 3556 * The semantics will be changed to this. If the 3557 * endpoint on the list is in state not equal to 3558 * TCPS_LISTEN and both endpoints have SO_REUSEADDR 3559 * set, let the bind succeed. 3560 * 3561 * But because of (1), we cannot do that now. If 3562 * in future, we can change this going back semantics, 3563 * we can add the above check. 3564 */ 3565 if (ltcp->tcp_exclbind || tcp->tcp_exclbind || 3566 lconnp->conn_mac_exempt || connp->conn_mac_exempt) { 3567 if (V6_OR_V4_INADDR_ANY( 3568 ltcp->tcp_bound_source_v6) || 3569 V6_OR_V4_INADDR_ANY(*laddr) || 3570 IN6_ARE_ADDR_EQUAL(laddr, 3571 <cp->tcp_bound_source_v6)) { 3572 break; 3573 } 3574 continue; 3575 } 3576 3577 /* 3578 * Check ipversion to allow IPv4 and IPv6 sockets to 3579 * have disjoint port number spaces, if *_EXCLBIND 3580 * is not set and only if the application binds to a 3581 * specific port. We use the same autoassigned port 3582 * number space for IPv4 and IPv6 sockets. 3583 */ 3584 if (tcp->tcp_ipversion != ltcp->tcp_ipversion && 3585 bind_to_req_port_only) 3586 continue; 3587 3588 /* 3589 * Ideally, we should make sure that the source 3590 * address, remote address, and remote port in the 3591 * four tuple for this tcp-connection is unique. 3592 * However, trying to find out the local source 3593 * address would require too much code duplication 3594 * with IP, since IP needs needs to have that code 3595 * to support userland TCP implementations. 3596 */ 3597 if (quick_connect && 3598 (ltcp->tcp_state > TCPS_LISTEN) && 3599 ((tcp->tcp_fport != ltcp->tcp_fport) || 3600 !IN6_ARE_ADDR_EQUAL(&tcp->tcp_remote_v6, 3601 <cp->tcp_remote_v6))) 3602 continue; 3603 3604 if (!reuseaddr) { 3605 /* 3606 * No socket option SO_REUSEADDR. 3607 * If existing port is bound to 3608 * a non-wildcard IP address 3609 * and the requesting stream is 3610 * bound to a distinct 3611 * different IP addresses 3612 * (non-wildcard, also), keep 3613 * going. 3614 */ 3615 if (!V6_OR_V4_INADDR_ANY(*laddr) && 3616 !V6_OR_V4_INADDR_ANY( 3617 ltcp->tcp_bound_source_v6) && 3618 !IN6_ARE_ADDR_EQUAL(laddr, 3619 <cp->tcp_bound_source_v6)) 3620 continue; 3621 if (ltcp->tcp_state >= TCPS_BOUND) { 3622 /* 3623 * This port is being used and 3624 * its state is >= TCPS_BOUND, 3625 * so we can't bind to it. 3626 */ 3627 break; 3628 } 3629 } else { 3630 /* 3631 * socket option SO_REUSEADDR is set on the 3632 * binding tcp_t. 3633 * 3634 * If two streams are bound to 3635 * same IP address or both addr 3636 * and bound source are wildcards 3637 * (INADDR_ANY), we want to stop 3638 * searching. 3639 * We have found a match of IP source 3640 * address and source port, which is 3641 * refused regardless of the 3642 * SO_REUSEADDR setting, so we break. 3643 */ 3644 if (IN6_ARE_ADDR_EQUAL(laddr, 3645 <cp->tcp_bound_source_v6) && 3646 (ltcp->tcp_state == TCPS_LISTEN || 3647 ltcp->tcp_state == TCPS_BOUND)) 3648 break; 3649 } 3650 } 3651 if (ltcp != NULL) { 3652 /* The port number is busy */ 3653 mutex_exit(&tbf->tf_lock); 3654 } else { 3655 /* 3656 * This port is ours. Insert in fanout and mark as 3657 * bound to prevent others from getting the port 3658 * number. 3659 */ 3660 tcp->tcp_state = TCPS_BOUND; 3661 tcp->tcp_lport = htons(port); 3662 *(uint16_t *)tcp->tcp_tcph->th_lport = tcp->tcp_lport; 3663 3664 ASSERT(&tcp_bind_fanout[TCP_BIND_HASH( 3665 tcp->tcp_lport)] == tbf); 3666 tcp_bind_hash_insert(tbf, tcp, 1); 3667 3668 mutex_exit(&tbf->tf_lock); 3669 3670 /* 3671 * We don't want tcp_next_port_to_try to "inherit" 3672 * a port number supplied by the user in a bind. 3673 */ 3674 if (user_specified) 3675 return (port); 3676 3677 /* 3678 * This is the only place where tcp_next_port_to_try 3679 * is updated. After the update, it may or may not 3680 * be in the valid range. 3681 */ 3682 if (!tcp->tcp_anon_priv_bind) 3683 tcp_next_port_to_try = port + 1; 3684 return (port); 3685 } 3686 3687 if (tcp->tcp_anon_priv_bind) { 3688 port = tcp_get_next_priv_port(tcp); 3689 } else { 3690 if (count == 0 && user_specified) { 3691 /* 3692 * We may have to return an anonymous port. So 3693 * get one to start with. 3694 */ 3695 port = 3696 tcp_update_next_port(tcp_next_port_to_try, 3697 tcp, B_TRUE); 3698 user_specified = B_FALSE; 3699 } else { 3700 port = tcp_update_next_port(port + 1, tcp, 3701 B_FALSE); 3702 } 3703 } 3704 if (port == 0) 3705 break; 3706 3707 /* 3708 * Don't let this loop run forever in the case where 3709 * all of the anonymous ports are in use. 3710 */ 3711 } while (++count < loopmax); 3712 return (0); 3713 } 3714 3715 /* 3716 * We are dying for some reason. Try to do it gracefully. (May be called 3717 * as writer.) 3718 * 3719 * Return -1 if the structure was not cleaned up (if the cleanup had to be 3720 * done by a service procedure). 3721 * TBD - Should the return value distinguish between the tcp_t being 3722 * freed and it being reinitialized? 3723 */ 3724 static int 3725 tcp_clean_death(tcp_t *tcp, int err, uint8_t tag) 3726 { 3727 mblk_t *mp; 3728 queue_t *q; 3729 3730 TCP_CLD_STAT(tag); 3731 3732 #if TCP_TAG_CLEAN_DEATH 3733 tcp->tcp_cleandeathtag = tag; 3734 #endif 3735 3736 if (tcp->tcp_linger_tid != 0 && 3737 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) { 3738 tcp_stop_lingering(tcp); 3739 } 3740 3741 ASSERT(tcp != NULL); 3742 ASSERT((tcp->tcp_family == AF_INET && 3743 tcp->tcp_ipversion == IPV4_VERSION) || 3744 (tcp->tcp_family == AF_INET6 && 3745 (tcp->tcp_ipversion == IPV4_VERSION || 3746 tcp->tcp_ipversion == IPV6_VERSION))); 3747 3748 if (TCP_IS_DETACHED(tcp)) { 3749 if (tcp->tcp_hard_binding) { 3750 /* 3751 * Its an eager that we are dealing with. We close the 3752 * eager but in case a conn_ind has already gone to the 3753 * listener, let tcp_accept_finish() send a discon_ind 3754 * to the listener and drop the last reference. If the 3755 * listener doesn't even know about the eager i.e. the 3756 * conn_ind hasn't gone up, blow away the eager and drop 3757 * the last reference as well. If the conn_ind has gone 3758 * up, state should be BOUND. tcp_accept_finish 3759 * will figure out that the connection has received a 3760 * RST and will send a DISCON_IND to the application. 3761 */ 3762 tcp_closei_local(tcp); 3763 if (tcp->tcp_conn.tcp_eager_conn_ind != NULL) { 3764 CONN_DEC_REF(tcp->tcp_connp); 3765 } else { 3766 tcp->tcp_state = TCPS_BOUND; 3767 } 3768 } else { 3769 tcp_close_detached(tcp); 3770 } 3771 return (0); 3772 } 3773 3774 TCP_STAT(tcp_clean_death_nondetached); 3775 3776 /* 3777 * If T_ORDREL_IND has not been sent yet (done when service routine 3778 * is run) postpone cleaning up the endpoint until service routine 3779 * has sent up the T_ORDREL_IND. Avoid clearing out an existing 3780 * client_errno since tcp_close uses the client_errno field. 3781 */ 3782 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 3783 if (err != 0) 3784 tcp->tcp_client_errno = err; 3785 3786 tcp->tcp_deferred_clean_death = B_TRUE; 3787 return (-1); 3788 } 3789 3790 q = tcp->tcp_rq; 3791 3792 /* Trash all inbound data */ 3793 flushq(q, FLUSHALL); 3794 3795 /* 3796 * If we are at least part way open and there is error 3797 * (err==0 implies no error) 3798 * notify our client by a T_DISCON_IND. 3799 */ 3800 if ((tcp->tcp_state >= TCPS_SYN_SENT) && err) { 3801 if (tcp->tcp_state >= TCPS_ESTABLISHED && 3802 !TCP_IS_SOCKET(tcp)) { 3803 /* 3804 * Send M_FLUSH according to TPI. Because sockets will 3805 * (and must) ignore FLUSHR we do that only for TPI 3806 * endpoints and sockets in STREAMS mode. 3807 */ 3808 (void) putnextctl1(q, M_FLUSH, FLUSHR); 3809 } 3810 if (tcp->tcp_debug) { 3811 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 3812 "tcp_clean_death: discon err %d", err); 3813 } 3814 mp = mi_tpi_discon_ind(NULL, err, 0); 3815 if (mp != NULL) { 3816 putnext(q, mp); 3817 } else { 3818 if (tcp->tcp_debug) { 3819 (void) strlog(TCP_MOD_ID, 0, 1, 3820 SL_ERROR|SL_TRACE, 3821 "tcp_clean_death, sending M_ERROR"); 3822 } 3823 (void) putnextctl1(q, M_ERROR, EPROTO); 3824 } 3825 if (tcp->tcp_state <= TCPS_SYN_RCVD) { 3826 /* SYN_SENT or SYN_RCVD */ 3827 BUMP_MIB(&tcp_mib, tcpAttemptFails); 3828 } else if (tcp->tcp_state <= TCPS_CLOSE_WAIT) { 3829 /* ESTABLISHED or CLOSE_WAIT */ 3830 BUMP_MIB(&tcp_mib, tcpEstabResets); 3831 } 3832 } 3833 3834 tcp_reinit(tcp); 3835 return (-1); 3836 } 3837 3838 /* 3839 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout 3840 * to expire, stop the wait and finish the close. 3841 */ 3842 static void 3843 tcp_stop_lingering(tcp_t *tcp) 3844 { 3845 clock_t delta = 0; 3846 3847 tcp->tcp_linger_tid = 0; 3848 if (tcp->tcp_state > TCPS_LISTEN) { 3849 tcp_acceptor_hash_remove(tcp); 3850 if (tcp->tcp_flow_stopped) { 3851 tcp_clrqfull(tcp); 3852 } 3853 3854 if (tcp->tcp_timer_tid != 0) { 3855 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 3856 tcp->tcp_timer_tid = 0; 3857 } 3858 /* 3859 * Need to cancel those timers which will not be used when 3860 * TCP is detached. This has to be done before the tcp_wq 3861 * is set to the global queue. 3862 */ 3863 tcp_timers_stop(tcp); 3864 3865 3866 tcp->tcp_detached = B_TRUE; 3867 tcp->tcp_rq = tcp_g_q; 3868 tcp->tcp_wq = WR(tcp_g_q); 3869 3870 if (tcp->tcp_state == TCPS_TIME_WAIT) { 3871 tcp_time_wait_append(tcp); 3872 TCP_DBGSTAT(tcp_detach_time_wait); 3873 goto finish; 3874 } 3875 3876 /* 3877 * If delta is zero the timer event wasn't executed and was 3878 * successfully canceled. In this case we need to restart it 3879 * with the minimal delta possible. 3880 */ 3881 if (delta >= 0) { 3882 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer, 3883 delta ? delta : 1); 3884 } 3885 } else { 3886 tcp_closei_local(tcp); 3887 CONN_DEC_REF(tcp->tcp_connp); 3888 } 3889 finish: 3890 /* Signal closing thread that it can complete close */ 3891 mutex_enter(&tcp->tcp_closelock); 3892 tcp->tcp_detached = B_TRUE; 3893 tcp->tcp_rq = tcp_g_q; 3894 tcp->tcp_wq = WR(tcp_g_q); 3895 tcp->tcp_closed = 1; 3896 cv_signal(&tcp->tcp_closecv); 3897 mutex_exit(&tcp->tcp_closelock); 3898 } 3899 3900 /* 3901 * Handle lingering timeouts. This function is called when the SO_LINGER timeout 3902 * expires. 3903 */ 3904 static void 3905 tcp_close_linger_timeout(void *arg) 3906 { 3907 conn_t *connp = (conn_t *)arg; 3908 tcp_t *tcp = connp->conn_tcp; 3909 3910 tcp->tcp_client_errno = ETIMEDOUT; 3911 tcp_stop_lingering(tcp); 3912 } 3913 3914 static int 3915 tcp_close(queue_t *q, int flags) 3916 { 3917 conn_t *connp = Q_TO_CONN(q); 3918 tcp_t *tcp = connp->conn_tcp; 3919 mblk_t *mp = &tcp->tcp_closemp; 3920 boolean_t conn_ioctl_cleanup_reqd = B_FALSE; 3921 3922 ASSERT(WR(q)->q_next == NULL); 3923 ASSERT(connp->conn_ref >= 2); 3924 ASSERT((connp->conn_flags & IPCL_TCPMOD) == 0); 3925 3926 /* 3927 * We are being closed as /dev/tcp or /dev/tcp6. 3928 * 3929 * Mark the conn as closing. ill_pending_mp_add will not 3930 * add any mp to the pending mp list, after this conn has 3931 * started closing. Same for sq_pending_mp_add 3932 */ 3933 mutex_enter(&connp->conn_lock); 3934 connp->conn_state_flags |= CONN_CLOSING; 3935 if (connp->conn_oper_pending_ill != NULL) 3936 conn_ioctl_cleanup_reqd = B_TRUE; 3937 CONN_INC_REF_LOCKED(connp); 3938 mutex_exit(&connp->conn_lock); 3939 tcp->tcp_closeflags = (uint8_t)flags; 3940 ASSERT(connp->conn_ref >= 3); 3941 3942 (*tcp_squeue_close_proc)(connp->conn_sqp, mp, 3943 tcp_close_output, connp, SQTAG_IP_TCP_CLOSE); 3944 3945 mutex_enter(&tcp->tcp_closelock); 3946 3947 while (!tcp->tcp_closed) 3948 cv_wait(&tcp->tcp_closecv, &tcp->tcp_closelock); 3949 mutex_exit(&tcp->tcp_closelock); 3950 /* 3951 * In the case of listener streams that have eagers in the q or q0 3952 * we wait for the eagers to drop their reference to us. tcp_rq and 3953 * tcp_wq of the eagers point to our queues. By waiting for the 3954 * refcnt to drop to 1, we are sure that the eagers have cleaned 3955 * up their queue pointers and also dropped their references to us. 3956 */ 3957 if (tcp->tcp_wait_for_eagers) { 3958 mutex_enter(&connp->conn_lock); 3959 while (connp->conn_ref != 1) { 3960 cv_wait(&connp->conn_cv, &connp->conn_lock); 3961 } 3962 mutex_exit(&connp->conn_lock); 3963 } 3964 /* 3965 * ioctl cleanup. The mp is queued in the 3966 * ill_pending_mp or in the sq_pending_mp. 3967 */ 3968 if (conn_ioctl_cleanup_reqd) 3969 conn_ioctl_cleanup(connp); 3970 3971 qprocsoff(q); 3972 inet_minor_free(ip_minor_arena, connp->conn_dev); 3973 3974 tcp->tcp_cpid = -1; 3975 3976 /* 3977 * Drop IP's reference on the conn. This is the last reference 3978 * on the connp if the state was less than established. If the 3979 * connection has gone into timewait state, then we will have 3980 * one ref for the TCP and one more ref (total of two) for the 3981 * classifier connected hash list (a timewait connections stays 3982 * in connected hash till closed). 3983 * 3984 * We can't assert the references because there might be other 3985 * transient reference places because of some walkers or queued 3986 * packets in squeue for the timewait state. 3987 */ 3988 CONN_DEC_REF(connp); 3989 q->q_ptr = WR(q)->q_ptr = NULL; 3990 return (0); 3991 } 3992 3993 static int 3994 tcpclose_accept(queue_t *q) 3995 { 3996 ASSERT(WR(q)->q_qinfo == &tcp_acceptor_winit); 3997 3998 /* 3999 * We had opened an acceptor STREAM for sockfs which is 4000 * now being closed due to some error. 4001 */ 4002 qprocsoff(q); 4003 inet_minor_free(ip_minor_arena, (dev_t)q->q_ptr); 4004 q->q_ptr = WR(q)->q_ptr = NULL; 4005 return (0); 4006 } 4007 4008 4009 /* 4010 * Called by streams close routine via squeues when our client blows off her 4011 * descriptor, we take this to mean: "close the stream state NOW, close the tcp 4012 * connection politely" When SO_LINGER is set (with a non-zero linger time and 4013 * it is not a nonblocking socket) then this routine sleeps until the FIN is 4014 * acked. 4015 * 4016 * NOTE: tcp_close potentially returns error when lingering. 4017 * However, the stream head currently does not pass these errors 4018 * to the application. 4.4BSD only returns EINTR and EWOULDBLOCK 4019 * errors to the application (from tsleep()) and not errors 4020 * like ECONNRESET caused by receiving a reset packet. 4021 */ 4022 4023 /* ARGSUSED */ 4024 static void 4025 tcp_close_output(void *arg, mblk_t *mp, void *arg2) 4026 { 4027 char *msg; 4028 conn_t *connp = (conn_t *)arg; 4029 tcp_t *tcp = connp->conn_tcp; 4030 clock_t delta = 0; 4031 4032 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 4033 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 4034 4035 /* Cancel any pending timeout */ 4036 if (tcp->tcp_ordrelid != 0) { 4037 if (tcp->tcp_timeout) { 4038 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ordrelid); 4039 } 4040 tcp->tcp_ordrelid = 0; 4041 tcp->tcp_timeout = B_FALSE; 4042 } 4043 4044 mutex_enter(&tcp->tcp_eager_lock); 4045 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) { 4046 /* Cleanup for listener */ 4047 tcp_eager_cleanup(tcp, 0); 4048 tcp->tcp_wait_for_eagers = 1; 4049 } 4050 mutex_exit(&tcp->tcp_eager_lock); 4051 4052 connp->conn_mdt_ok = B_FALSE; 4053 tcp->tcp_mdt = B_FALSE; 4054 4055 msg = NULL; 4056 switch (tcp->tcp_state) { 4057 case TCPS_CLOSED: 4058 case TCPS_IDLE: 4059 case TCPS_BOUND: 4060 case TCPS_LISTEN: 4061 break; 4062 case TCPS_SYN_SENT: 4063 msg = "tcp_close, during connect"; 4064 break; 4065 case TCPS_SYN_RCVD: 4066 /* 4067 * Close during the connect 3-way handshake 4068 * but here there may or may not be pending data 4069 * already on queue. Process almost same as in 4070 * the ESTABLISHED state. 4071 */ 4072 /* FALLTHRU */ 4073 default: 4074 if (tcp->tcp_fused) 4075 tcp_unfuse(tcp); 4076 4077 /* 4078 * If SO_LINGER has set a zero linger time, abort the 4079 * connection with a reset. 4080 */ 4081 if (tcp->tcp_linger && tcp->tcp_lingertime == 0) { 4082 msg = "tcp_close, zero lingertime"; 4083 break; 4084 } 4085 4086 ASSERT(tcp->tcp_hard_bound || tcp->tcp_hard_binding); 4087 /* 4088 * Abort connection if there is unread data queued. 4089 */ 4090 if (tcp->tcp_rcv_list || tcp->tcp_reass_head) { 4091 msg = "tcp_close, unread data"; 4092 break; 4093 } 4094 /* 4095 * tcp_hard_bound is now cleared thus all packets go through 4096 * tcp_lookup. This fact is used by tcp_detach below. 4097 * 4098 * We have done a qwait() above which could have possibly 4099 * drained more messages in turn causing transition to a 4100 * different state. Check whether we have to do the rest 4101 * of the processing or not. 4102 */ 4103 if (tcp->tcp_state <= TCPS_LISTEN) 4104 break; 4105 4106 /* 4107 * Transmit the FIN before detaching the tcp_t. 4108 * After tcp_detach returns this queue/perimeter 4109 * no longer owns the tcp_t thus others can modify it. 4110 */ 4111 (void) tcp_xmit_end(tcp); 4112 4113 /* 4114 * If lingering on close then wait until the fin is acked, 4115 * the SO_LINGER time passes, or a reset is sent/received. 4116 */ 4117 if (tcp->tcp_linger && tcp->tcp_lingertime > 0 && 4118 !(tcp->tcp_fin_acked) && 4119 tcp->tcp_state >= TCPS_ESTABLISHED) { 4120 if (tcp->tcp_closeflags & (FNDELAY|FNONBLOCK)) { 4121 tcp->tcp_client_errno = EWOULDBLOCK; 4122 } else if (tcp->tcp_client_errno == 0) { 4123 4124 ASSERT(tcp->tcp_linger_tid == 0); 4125 4126 tcp->tcp_linger_tid = TCP_TIMER(tcp, 4127 tcp_close_linger_timeout, 4128 tcp->tcp_lingertime * hz); 4129 4130 /* tcp_close_linger_timeout will finish close */ 4131 if (tcp->tcp_linger_tid == 0) 4132 tcp->tcp_client_errno = ENOSR; 4133 else 4134 return; 4135 } 4136 4137 /* 4138 * Check if we need to detach or just close 4139 * the instance. 4140 */ 4141 if (tcp->tcp_state <= TCPS_LISTEN) 4142 break; 4143 } 4144 4145 /* 4146 * Make sure that no other thread will access the tcp_rq of 4147 * this instance (through lookups etc.) as tcp_rq will go 4148 * away shortly. 4149 */ 4150 tcp_acceptor_hash_remove(tcp); 4151 4152 if (tcp->tcp_flow_stopped) { 4153 tcp_clrqfull(tcp); 4154 } 4155 4156 if (tcp->tcp_timer_tid != 0) { 4157 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 4158 tcp->tcp_timer_tid = 0; 4159 } 4160 /* 4161 * Need to cancel those timers which will not be used when 4162 * TCP is detached. This has to be done before the tcp_wq 4163 * is set to the global queue. 4164 */ 4165 tcp_timers_stop(tcp); 4166 4167 tcp->tcp_detached = B_TRUE; 4168 if (tcp->tcp_state == TCPS_TIME_WAIT) { 4169 tcp_time_wait_append(tcp); 4170 TCP_DBGSTAT(tcp_detach_time_wait); 4171 ASSERT(connp->conn_ref >= 3); 4172 goto finish; 4173 } 4174 4175 /* 4176 * If delta is zero the timer event wasn't executed and was 4177 * successfully canceled. In this case we need to restart it 4178 * with the minimal delta possible. 4179 */ 4180 if (delta >= 0) 4181 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer, 4182 delta ? delta : 1); 4183 4184 ASSERT(connp->conn_ref >= 3); 4185 goto finish; 4186 } 4187 4188 /* Detach did not complete. Still need to remove q from stream. */ 4189 if (msg) { 4190 if (tcp->tcp_state == TCPS_ESTABLISHED || 4191 tcp->tcp_state == TCPS_CLOSE_WAIT) 4192 BUMP_MIB(&tcp_mib, tcpEstabResets); 4193 if (tcp->tcp_state == TCPS_SYN_SENT || 4194 tcp->tcp_state == TCPS_SYN_RCVD) 4195 BUMP_MIB(&tcp_mib, tcpAttemptFails); 4196 tcp_xmit_ctl(msg, tcp, tcp->tcp_snxt, 0, TH_RST); 4197 } 4198 4199 tcp_closei_local(tcp); 4200 CONN_DEC_REF(connp); 4201 ASSERT(connp->conn_ref >= 2); 4202 4203 finish: 4204 /* 4205 * Although packets are always processed on the correct 4206 * tcp's perimeter and access is serialized via squeue's, 4207 * IP still needs a queue when sending packets in time_wait 4208 * state so use WR(tcp_g_q) till ip_output() can be 4209 * changed to deal with just connp. For read side, we 4210 * could have set tcp_rq to NULL but there are some cases 4211 * in tcp_rput_data() from early days of this code which 4212 * do a putnext without checking if tcp is closed. Those 4213 * need to be identified before both tcp_rq and tcp_wq 4214 * can be set to NULL and tcp_q_q can disappear forever. 4215 */ 4216 mutex_enter(&tcp->tcp_closelock); 4217 /* 4218 * Don't change the queues in the case of a listener that has 4219 * eagers in its q or q0. It could surprise the eagers. 4220 * Instead wait for the eagers outside the squeue. 4221 */ 4222 if (!tcp->tcp_wait_for_eagers) { 4223 tcp->tcp_detached = B_TRUE; 4224 tcp->tcp_rq = tcp_g_q; 4225 tcp->tcp_wq = WR(tcp_g_q); 4226 } 4227 4228 /* Signal tcp_close() to finish closing. */ 4229 tcp->tcp_closed = 1; 4230 cv_signal(&tcp->tcp_closecv); 4231 mutex_exit(&tcp->tcp_closelock); 4232 } 4233 4234 4235 /* 4236 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp. 4237 * Some stream heads get upset if they see these later on as anything but NULL. 4238 */ 4239 static void 4240 tcp_close_mpp(mblk_t **mpp) 4241 { 4242 mblk_t *mp; 4243 4244 if ((mp = *mpp) != NULL) { 4245 do { 4246 mp->b_next = NULL; 4247 mp->b_prev = NULL; 4248 } while ((mp = mp->b_cont) != NULL); 4249 4250 mp = *mpp; 4251 *mpp = NULL; 4252 freemsg(mp); 4253 } 4254 } 4255 4256 /* Do detached close. */ 4257 static void 4258 tcp_close_detached(tcp_t *tcp) 4259 { 4260 if (tcp->tcp_fused) 4261 tcp_unfuse(tcp); 4262 4263 /* 4264 * Clustering code serializes TCP disconnect callbacks and 4265 * cluster tcp list walks by blocking a TCP disconnect callback 4266 * if a cluster tcp list walk is in progress. This ensures 4267 * accurate accounting of TCPs in the cluster code even though 4268 * the TCP list walk itself is not atomic. 4269 */ 4270 tcp_closei_local(tcp); 4271 CONN_DEC_REF(tcp->tcp_connp); 4272 } 4273 4274 /* 4275 * Stop all TCP timers, and free the timer mblks if requested. 4276 */ 4277 void 4278 tcp_timers_stop(tcp_t *tcp) 4279 { 4280 if (tcp->tcp_timer_tid != 0) { 4281 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 4282 tcp->tcp_timer_tid = 0; 4283 } 4284 if (tcp->tcp_ka_tid != 0) { 4285 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ka_tid); 4286 tcp->tcp_ka_tid = 0; 4287 } 4288 if (tcp->tcp_ack_tid != 0) { 4289 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 4290 tcp->tcp_ack_tid = 0; 4291 } 4292 if (tcp->tcp_push_tid != 0) { 4293 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 4294 tcp->tcp_push_tid = 0; 4295 } 4296 } 4297 4298 /* 4299 * The tcp_t is going away. Remove it from all lists and set it 4300 * to TCPS_CLOSED. The freeing up of memory is deferred until 4301 * tcp_inactive. This is needed since a thread in tcp_rput might have 4302 * done a CONN_INC_REF on this structure before it was removed from the 4303 * hashes. 4304 */ 4305 static void 4306 tcp_closei_local(tcp_t *tcp) 4307 { 4308 ire_t *ire; 4309 conn_t *connp = tcp->tcp_connp; 4310 4311 if (!TCP_IS_SOCKET(tcp)) 4312 tcp_acceptor_hash_remove(tcp); 4313 4314 UPDATE_MIB(&tcp_mib, tcpInSegs, tcp->tcp_ibsegs); 4315 tcp->tcp_ibsegs = 0; 4316 UPDATE_MIB(&tcp_mib, tcpOutSegs, tcp->tcp_obsegs); 4317 tcp->tcp_obsegs = 0; 4318 4319 /* 4320 * If we are an eager connection hanging off a listener that 4321 * hasn't formally accepted the connection yet, get off his 4322 * list and blow off any data that we have accumulated. 4323 */ 4324 if (tcp->tcp_listener != NULL) { 4325 tcp_t *listener = tcp->tcp_listener; 4326 mutex_enter(&listener->tcp_eager_lock); 4327 /* 4328 * tcp_eager_conn_ind == NULL means that the 4329 * conn_ind has already gone to listener. At 4330 * this point, eager will be closed but we 4331 * leave it in listeners eager list so that 4332 * if listener decides to close without doing 4333 * accept, we can clean this up. In tcp_wput_accept 4334 * we take case of the case of accept on closed 4335 * eager. 4336 */ 4337 if (tcp->tcp_conn.tcp_eager_conn_ind != NULL) { 4338 tcp_eager_unlink(tcp); 4339 mutex_exit(&listener->tcp_eager_lock); 4340 /* 4341 * We don't want to have any pointers to the 4342 * listener queue, after we have released our 4343 * reference on the listener 4344 */ 4345 tcp->tcp_rq = tcp_g_q; 4346 tcp->tcp_wq = WR(tcp_g_q); 4347 CONN_DEC_REF(listener->tcp_connp); 4348 } else { 4349 mutex_exit(&listener->tcp_eager_lock); 4350 } 4351 } 4352 4353 /* Stop all the timers */ 4354 tcp_timers_stop(tcp); 4355 4356 if (tcp->tcp_state == TCPS_LISTEN) { 4357 if (tcp->tcp_ip_addr_cache) { 4358 kmem_free((void *)tcp->tcp_ip_addr_cache, 4359 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 4360 tcp->tcp_ip_addr_cache = NULL; 4361 } 4362 } 4363 if (tcp->tcp_flow_stopped) 4364 tcp_clrqfull(tcp); 4365 4366 tcp_bind_hash_remove(tcp); 4367 /* 4368 * If the tcp_time_wait_collector (which runs outside the squeue) 4369 * is trying to remove this tcp from the time wait list, we will 4370 * block in tcp_time_wait_remove while trying to acquire the 4371 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also 4372 * requires the ipcl_hash_remove to be ordered after the 4373 * tcp_time_wait_remove for the refcnt checks to work correctly. 4374 */ 4375 if (tcp->tcp_state == TCPS_TIME_WAIT) 4376 tcp_time_wait_remove(tcp, NULL); 4377 CL_INET_DISCONNECT(tcp); 4378 ipcl_hash_remove(connp); 4379 4380 /* 4381 * Delete the cached ire in conn_ire_cache and also mark 4382 * the conn as CONDEMNED 4383 */ 4384 mutex_enter(&connp->conn_lock); 4385 connp->conn_state_flags |= CONN_CONDEMNED; 4386 ire = connp->conn_ire_cache; 4387 connp->conn_ire_cache = NULL; 4388 mutex_exit(&connp->conn_lock); 4389 if (ire != NULL) 4390 IRE_REFRELE_NOTR(ire); 4391 4392 /* Need to cleanup any pending ioctls */ 4393 ASSERT(tcp->tcp_time_wait_next == NULL); 4394 ASSERT(tcp->tcp_time_wait_prev == NULL); 4395 ASSERT(tcp->tcp_time_wait_expire == 0); 4396 tcp->tcp_state = TCPS_CLOSED; 4397 4398 /* Release any SSL context */ 4399 if (tcp->tcp_kssl_ent != NULL) { 4400 kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); 4401 tcp->tcp_kssl_ent = NULL; 4402 } 4403 if (tcp->tcp_kssl_ctx != NULL) { 4404 kssl_release_ctx(tcp->tcp_kssl_ctx); 4405 tcp->tcp_kssl_ctx = NULL; 4406 } 4407 tcp->tcp_kssl_pending = B_FALSE; 4408 } 4409 4410 /* 4411 * tcp is dying (called from ipcl_conn_destroy and error cases). 4412 * Free the tcp_t in either case. 4413 */ 4414 void 4415 tcp_free(tcp_t *tcp) 4416 { 4417 mblk_t *mp; 4418 ip6_pkt_t *ipp; 4419 4420 ASSERT(tcp != NULL); 4421 ASSERT(tcp->tcp_ptpahn == NULL && tcp->tcp_acceptor_hash == NULL); 4422 4423 tcp->tcp_rq = NULL; 4424 tcp->tcp_wq = NULL; 4425 4426 tcp_close_mpp(&tcp->tcp_xmit_head); 4427 tcp_close_mpp(&tcp->tcp_reass_head); 4428 if (tcp->tcp_rcv_list != NULL) { 4429 /* Free b_next chain */ 4430 tcp_close_mpp(&tcp->tcp_rcv_list); 4431 } 4432 if ((mp = tcp->tcp_urp_mp) != NULL) { 4433 freemsg(mp); 4434 } 4435 if ((mp = tcp->tcp_urp_mark_mp) != NULL) { 4436 freemsg(mp); 4437 } 4438 4439 if (tcp->tcp_fused_sigurg_mp != NULL) { 4440 freeb(tcp->tcp_fused_sigurg_mp); 4441 tcp->tcp_fused_sigurg_mp = NULL; 4442 } 4443 4444 if (tcp->tcp_sack_info != NULL) { 4445 if (tcp->tcp_notsack_list != NULL) { 4446 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 4447 } 4448 bzero(tcp->tcp_sack_info, sizeof (tcp_sack_info_t)); 4449 } 4450 4451 if (tcp->tcp_hopopts != NULL) { 4452 mi_free(tcp->tcp_hopopts); 4453 tcp->tcp_hopopts = NULL; 4454 tcp->tcp_hopoptslen = 0; 4455 } 4456 ASSERT(tcp->tcp_hopoptslen == 0); 4457 if (tcp->tcp_dstopts != NULL) { 4458 mi_free(tcp->tcp_dstopts); 4459 tcp->tcp_dstopts = NULL; 4460 tcp->tcp_dstoptslen = 0; 4461 } 4462 ASSERT(tcp->tcp_dstoptslen == 0); 4463 if (tcp->tcp_rtdstopts != NULL) { 4464 mi_free(tcp->tcp_rtdstopts); 4465 tcp->tcp_rtdstopts = NULL; 4466 tcp->tcp_rtdstoptslen = 0; 4467 } 4468 ASSERT(tcp->tcp_rtdstoptslen == 0); 4469 if (tcp->tcp_rthdr != NULL) { 4470 mi_free(tcp->tcp_rthdr); 4471 tcp->tcp_rthdr = NULL; 4472 tcp->tcp_rthdrlen = 0; 4473 } 4474 ASSERT(tcp->tcp_rthdrlen == 0); 4475 4476 ipp = &tcp->tcp_sticky_ipp; 4477 if (ipp->ipp_fields & (IPPF_HOPOPTS | IPPF_RTDSTOPTS | IPPF_DSTOPTS | 4478 IPPF_RTHDR)) 4479 ip6_pkt_free(ipp); 4480 4481 /* 4482 * Free memory associated with the tcp/ip header template. 4483 */ 4484 4485 if (tcp->tcp_iphc != NULL) 4486 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 4487 4488 /* 4489 * Following is really a blowing away a union. 4490 * It happens to have exactly two members of identical size 4491 * the following code is enough. 4492 */ 4493 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind); 4494 4495 if (tcp->tcp_tracebuf != NULL) { 4496 kmem_free(tcp->tcp_tracebuf, sizeof (tcptrch_t)); 4497 tcp->tcp_tracebuf = NULL; 4498 } 4499 } 4500 4501 4502 /* 4503 * Put a connection confirmation message upstream built from the 4504 * address information within 'iph' and 'tcph'. Report our success or failure. 4505 */ 4506 static boolean_t 4507 tcp_conn_con(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph, mblk_t *idmp, 4508 mblk_t **defermp) 4509 { 4510 sin_t sin; 4511 sin6_t sin6; 4512 mblk_t *mp; 4513 char *optp = NULL; 4514 int optlen = 0; 4515 cred_t *cr; 4516 4517 if (defermp != NULL) 4518 *defermp = NULL; 4519 4520 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) { 4521 /* 4522 * Return in T_CONN_CON results of option negotiation through 4523 * the T_CONN_REQ. Note: If there is an real end-to-end option 4524 * negotiation, then what is received from remote end needs 4525 * to be taken into account but there is no such thing (yet?) 4526 * in our TCP/IP. 4527 * Note: We do not use mi_offset_param() here as 4528 * tcp_opts_conn_req contents do not directly come from 4529 * an application and are either generated in kernel or 4530 * from user input that was already verified. 4531 */ 4532 mp = tcp->tcp_conn.tcp_opts_conn_req; 4533 optp = (char *)(mp->b_rptr + 4534 ((struct T_conn_req *)mp->b_rptr)->OPT_offset); 4535 optlen = (int) 4536 ((struct T_conn_req *)mp->b_rptr)->OPT_length; 4537 } 4538 4539 if (IPH_HDR_VERSION(iphdr) == IPV4_VERSION) { 4540 ipha_t *ipha = (ipha_t *)iphdr; 4541 4542 /* packet is IPv4 */ 4543 if (tcp->tcp_family == AF_INET) { 4544 sin = sin_null; 4545 sin.sin_addr.s_addr = ipha->ipha_src; 4546 sin.sin_port = *(uint16_t *)tcph->th_lport; 4547 sin.sin_family = AF_INET; 4548 mp = mi_tpi_conn_con(NULL, (char *)&sin, 4549 (int)sizeof (sin_t), optp, optlen); 4550 } else { 4551 sin6 = sin6_null; 4552 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &sin6.sin6_addr); 4553 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4554 sin6.sin6_family = AF_INET6; 4555 mp = mi_tpi_conn_con(NULL, (char *)&sin6, 4556 (int)sizeof (sin6_t), optp, optlen); 4557 4558 } 4559 } else { 4560 ip6_t *ip6h = (ip6_t *)iphdr; 4561 4562 ASSERT(IPH_HDR_VERSION(iphdr) == IPV6_VERSION); 4563 ASSERT(tcp->tcp_family == AF_INET6); 4564 sin6 = sin6_null; 4565 sin6.sin6_addr = ip6h->ip6_src; 4566 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4567 sin6.sin6_family = AF_INET6; 4568 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 4569 mp = mi_tpi_conn_con(NULL, (char *)&sin6, 4570 (int)sizeof (sin6_t), optp, optlen); 4571 } 4572 4573 if (!mp) 4574 return (B_FALSE); 4575 4576 if ((cr = DB_CRED(idmp)) != NULL) { 4577 mblk_setcred(mp, cr); 4578 DB_CPID(mp) = DB_CPID(idmp); 4579 } 4580 4581 if (defermp == NULL) 4582 putnext(tcp->tcp_rq, mp); 4583 else 4584 *defermp = mp; 4585 4586 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 4587 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 4588 return (B_TRUE); 4589 } 4590 4591 /* 4592 * Defense for the SYN attack - 4593 * 1. When q0 is full, drop from the tail (tcp_eager_prev_q0) the oldest 4594 * one that doesn't have the dontdrop bit set. 4595 * 2. Don't drop a SYN request before its first timeout. This gives every 4596 * request at least til the first timeout to complete its 3-way handshake. 4597 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many 4598 * requests currently on the queue that has timed out. This will be used 4599 * as an indicator of whether an attack is under way, so that appropriate 4600 * actions can be taken. (It's incremented in tcp_timer() and decremented 4601 * either when eager goes into ESTABLISHED, or gets freed up.) 4602 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on 4603 * # of timeout drops back to <= q0len/32 => SYN alert off 4604 */ 4605 static boolean_t 4606 tcp_drop_q0(tcp_t *tcp) 4607 { 4608 tcp_t *eager; 4609 4610 ASSERT(MUTEX_HELD(&tcp->tcp_eager_lock)); 4611 ASSERT(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0); 4612 /* 4613 * New one is added after next_q0 so prev_q0 points to the oldest 4614 * Also do not drop any established connections that are deferred on 4615 * q0 due to q being full 4616 */ 4617 4618 eager = tcp->tcp_eager_prev_q0; 4619 while (eager->tcp_dontdrop || eager->tcp_conn_def_q0) { 4620 eager = eager->tcp_eager_prev_q0; 4621 if (eager == tcp) { 4622 eager = tcp->tcp_eager_prev_q0; 4623 break; 4624 } 4625 } 4626 if (eager->tcp_syn_rcvd_timeout == 0) 4627 return (B_FALSE); 4628 4629 if (tcp->tcp_debug) { 4630 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 4631 "tcp_drop_q0: listen half-open queue (max=%d) overflow" 4632 " (%d pending) on %s, drop one", tcp_conn_req_max_q0, 4633 tcp->tcp_conn_req_cnt_q0, 4634 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4635 } 4636 4637 BUMP_MIB(&tcp_mib, tcpHalfOpenDrop); 4638 4639 /* 4640 * need to do refhold here because the selected eager could 4641 * be removed by someone else if we release the eager lock. 4642 */ 4643 CONN_INC_REF(eager->tcp_connp); 4644 mutex_exit(&tcp->tcp_eager_lock); 4645 4646 /* Mark the IRE created for this SYN request temporary */ 4647 tcp_ip_ire_mark_advice(eager); 4648 (void) tcp_clean_death(eager, ETIMEDOUT, 5); 4649 CONN_DEC_REF(eager->tcp_connp); 4650 4651 mutex_enter(&tcp->tcp_eager_lock); 4652 return (B_TRUE); 4653 } 4654 4655 int 4656 tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 4657 tcph_t *tcph, uint_t ipvers, mblk_t *idmp) 4658 { 4659 tcp_t *ltcp = lconnp->conn_tcp; 4660 tcp_t *tcp = connp->conn_tcp; 4661 mblk_t *tpi_mp; 4662 ipha_t *ipha; 4663 ip6_t *ip6h; 4664 sin6_t sin6; 4665 in6_addr_t v6dst; 4666 int err; 4667 int ifindex = 0; 4668 cred_t *cr; 4669 4670 if (ipvers == IPV4_VERSION) { 4671 ipha = (ipha_t *)mp->b_rptr; 4672 4673 connp->conn_send = ip_output; 4674 connp->conn_recv = tcp_input; 4675 4676 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_srcv6); 4677 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_remv6); 4678 4679 sin6 = sin6_null; 4680 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &sin6.sin6_addr); 4681 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &v6dst); 4682 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4683 sin6.sin6_family = AF_INET6; 4684 sin6.__sin6_src_id = ip_srcid_find_addr(&v6dst, 4685 lconnp->conn_zoneid); 4686 if (tcp->tcp_recvdstaddr) { 4687 sin6_t sin6d; 4688 4689 sin6d = sin6_null; 4690 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, 4691 &sin6d.sin6_addr); 4692 sin6d.sin6_port = *(uint16_t *)tcph->th_fport; 4693 sin6d.sin6_family = AF_INET; 4694 tpi_mp = mi_tpi_extconn_ind(NULL, 4695 (char *)&sin6d, sizeof (sin6_t), 4696 (char *)&tcp, 4697 (t_scalar_t)sizeof (intptr_t), 4698 (char *)&sin6d, sizeof (sin6_t), 4699 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4700 } else { 4701 tpi_mp = mi_tpi_conn_ind(NULL, 4702 (char *)&sin6, sizeof (sin6_t), 4703 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4704 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4705 } 4706 } else { 4707 ip6h = (ip6_t *)mp->b_rptr; 4708 4709 connp->conn_send = ip_output_v6; 4710 connp->conn_recv = tcp_input; 4711 4712 connp->conn_srcv6 = ip6h->ip6_dst; 4713 connp->conn_remv6 = ip6h->ip6_src; 4714 4715 /* db_cksumstuff is set at ip_fanout_tcp_v6 */ 4716 ifindex = (int)DB_CKSUMSTUFF(mp); 4717 DB_CKSUMSTUFF(mp) = 0; 4718 4719 sin6 = sin6_null; 4720 sin6.sin6_addr = ip6h->ip6_src; 4721 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4722 sin6.sin6_family = AF_INET6; 4723 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 4724 sin6.__sin6_src_id = ip_srcid_find_addr(&ip6h->ip6_dst, 4725 lconnp->conn_zoneid); 4726 4727 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 4728 /* Pass up the scope_id of remote addr */ 4729 sin6.sin6_scope_id = ifindex; 4730 } else { 4731 sin6.sin6_scope_id = 0; 4732 } 4733 if (tcp->tcp_recvdstaddr) { 4734 sin6_t sin6d; 4735 4736 sin6d = sin6_null; 4737 sin6.sin6_addr = ip6h->ip6_dst; 4738 sin6d.sin6_port = *(uint16_t *)tcph->th_fport; 4739 sin6d.sin6_family = AF_INET; 4740 tpi_mp = mi_tpi_extconn_ind(NULL, 4741 (char *)&sin6d, sizeof (sin6_t), 4742 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4743 (char *)&sin6d, sizeof (sin6_t), 4744 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4745 } else { 4746 tpi_mp = mi_tpi_conn_ind(NULL, 4747 (char *)&sin6, sizeof (sin6_t), 4748 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4749 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4750 } 4751 } 4752 4753 if (tpi_mp == NULL) 4754 return (ENOMEM); 4755 4756 connp->conn_fport = *(uint16_t *)tcph->th_lport; 4757 connp->conn_lport = *(uint16_t *)tcph->th_fport; 4758 connp->conn_flags |= (IPCL_TCP6|IPCL_EAGER); 4759 connp->conn_fully_bound = B_FALSE; 4760 4761 if (tcp_trace) 4762 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_NOSLEEP); 4763 4764 /* Inherit information from the "parent" */ 4765 tcp->tcp_ipversion = ltcp->tcp_ipversion; 4766 tcp->tcp_family = ltcp->tcp_family; 4767 tcp->tcp_wq = ltcp->tcp_wq; 4768 tcp->tcp_rq = ltcp->tcp_rq; 4769 tcp->tcp_mss = tcp_mss_def_ipv6; 4770 tcp->tcp_detached = B_TRUE; 4771 if ((err = tcp_init_values(tcp)) != 0) { 4772 freemsg(tpi_mp); 4773 return (err); 4774 } 4775 4776 if (ipvers == IPV4_VERSION) { 4777 if ((err = tcp_header_init_ipv4(tcp)) != 0) { 4778 freemsg(tpi_mp); 4779 return (err); 4780 } 4781 ASSERT(tcp->tcp_ipha != NULL); 4782 } else { 4783 /* ifindex must be already set */ 4784 ASSERT(ifindex != 0); 4785 4786 if (ltcp->tcp_bound_if != 0) { 4787 /* 4788 * Set newtcp's bound_if equal to 4789 * listener's value. If ifindex is 4790 * not the same as ltcp->tcp_bound_if, 4791 * it must be a packet for the ipmp group 4792 * of interfaces 4793 */ 4794 tcp->tcp_bound_if = ltcp->tcp_bound_if; 4795 } else if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 4796 tcp->tcp_bound_if = ifindex; 4797 } 4798 4799 tcp->tcp_ipv6_recvancillary = ltcp->tcp_ipv6_recvancillary; 4800 tcp->tcp_recvifindex = 0; 4801 tcp->tcp_recvhops = 0xffffffffU; 4802 ASSERT(tcp->tcp_ip6h != NULL); 4803 } 4804 4805 tcp->tcp_lport = ltcp->tcp_lport; 4806 4807 if (ltcp->tcp_ipversion == tcp->tcp_ipversion) { 4808 if (tcp->tcp_iphc_len != ltcp->tcp_iphc_len) { 4809 /* 4810 * Listener had options of some sort; eager inherits. 4811 * Free up the eager template and allocate one 4812 * of the right size. 4813 */ 4814 if (tcp->tcp_hdr_grown) { 4815 kmem_free(tcp->tcp_iphc, tcp->tcp_iphc_len); 4816 } else { 4817 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 4818 kmem_cache_free(tcp_iphc_cache, tcp->tcp_iphc); 4819 } 4820 tcp->tcp_iphc = kmem_zalloc(ltcp->tcp_iphc_len, 4821 KM_NOSLEEP); 4822 if (tcp->tcp_iphc == NULL) { 4823 tcp->tcp_iphc_len = 0; 4824 freemsg(tpi_mp); 4825 return (ENOMEM); 4826 } 4827 tcp->tcp_iphc_len = ltcp->tcp_iphc_len; 4828 tcp->tcp_hdr_grown = B_TRUE; 4829 } 4830 tcp->tcp_hdr_len = ltcp->tcp_hdr_len; 4831 tcp->tcp_ip_hdr_len = ltcp->tcp_ip_hdr_len; 4832 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 4833 tcp->tcp_ip6_hops = ltcp->tcp_ip6_hops; 4834 tcp->tcp_ip6_vcf = ltcp->tcp_ip6_vcf; 4835 4836 /* 4837 * Copy the IP+TCP header template from listener to eager 4838 */ 4839 bcopy(ltcp->tcp_iphc, tcp->tcp_iphc, ltcp->tcp_hdr_len); 4840 if (tcp->tcp_ipversion == IPV6_VERSION) { 4841 if (((ip6i_t *)(tcp->tcp_iphc))->ip6i_nxt == 4842 IPPROTO_RAW) { 4843 tcp->tcp_ip6h = 4844 (ip6_t *)(tcp->tcp_iphc + 4845 sizeof (ip6i_t)); 4846 } else { 4847 tcp->tcp_ip6h = 4848 (ip6_t *)(tcp->tcp_iphc); 4849 } 4850 tcp->tcp_ipha = NULL; 4851 } else { 4852 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 4853 tcp->tcp_ip6h = NULL; 4854 } 4855 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + 4856 tcp->tcp_ip_hdr_len); 4857 } else { 4858 /* 4859 * only valid case when ipversion of listener and 4860 * eager differ is when listener is IPv6 and 4861 * eager is IPv4. 4862 * Eager header template has been initialized to the 4863 * maximum v4 header sizes, which includes space for 4864 * TCP and IP options. 4865 */ 4866 ASSERT((ltcp->tcp_ipversion == IPV6_VERSION) && 4867 (tcp->tcp_ipversion == IPV4_VERSION)); 4868 ASSERT(tcp->tcp_iphc_len >= 4869 TCP_MAX_COMBINED_HEADER_LENGTH); 4870 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 4871 /* copy IP header fields individually */ 4872 tcp->tcp_ipha->ipha_ttl = 4873 ltcp->tcp_ip6h->ip6_hops; 4874 bcopy(ltcp->tcp_tcph->th_lport, 4875 tcp->tcp_tcph->th_lport, sizeof (ushort_t)); 4876 } 4877 4878 bcopy(tcph->th_lport, tcp->tcp_tcph->th_fport, sizeof (in_port_t)); 4879 bcopy(tcp->tcp_tcph->th_fport, &tcp->tcp_fport, 4880 sizeof (in_port_t)); 4881 4882 if (ltcp->tcp_lport == 0) { 4883 tcp->tcp_lport = *(in_port_t *)tcph->th_fport; 4884 bcopy(tcph->th_fport, tcp->tcp_tcph->th_lport, 4885 sizeof (in_port_t)); 4886 } 4887 4888 if (tcp->tcp_ipversion == IPV4_VERSION) { 4889 ASSERT(ipha != NULL); 4890 tcp->tcp_ipha->ipha_dst = ipha->ipha_src; 4891 tcp->tcp_ipha->ipha_src = ipha->ipha_dst; 4892 4893 /* Source routing option copyover (reverse it) */ 4894 if (tcp_rev_src_routes) 4895 tcp_opt_reverse(tcp, ipha); 4896 } else { 4897 ASSERT(ip6h != NULL); 4898 tcp->tcp_ip6h->ip6_dst = ip6h->ip6_src; 4899 tcp->tcp_ip6h->ip6_src = ip6h->ip6_dst; 4900 } 4901 4902 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 4903 /* 4904 * If the SYN contains a credential, it's a loopback packet; attach 4905 * the credential to the TPI message. 4906 */ 4907 if ((cr = DB_CRED(idmp)) != NULL) { 4908 mblk_setcred(tpi_mp, cr); 4909 DB_CPID(tpi_mp) = DB_CPID(idmp); 4910 } 4911 tcp->tcp_conn.tcp_eager_conn_ind = tpi_mp; 4912 4913 /* Inherit the listener's SSL protection state */ 4914 4915 if ((tcp->tcp_kssl_ent = ltcp->tcp_kssl_ent) != NULL) { 4916 kssl_hold_ent(tcp->tcp_kssl_ent); 4917 tcp->tcp_kssl_pending = B_TRUE; 4918 } 4919 4920 return (0); 4921 } 4922 4923 4924 int 4925 tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, ipha_t *ipha, 4926 tcph_t *tcph, mblk_t *idmp) 4927 { 4928 tcp_t *ltcp = lconnp->conn_tcp; 4929 tcp_t *tcp = connp->conn_tcp; 4930 sin_t sin; 4931 mblk_t *tpi_mp = NULL; 4932 int err; 4933 cred_t *cr; 4934 4935 sin = sin_null; 4936 sin.sin_addr.s_addr = ipha->ipha_src; 4937 sin.sin_port = *(uint16_t *)tcph->th_lport; 4938 sin.sin_family = AF_INET; 4939 if (ltcp->tcp_recvdstaddr) { 4940 sin_t sind; 4941 4942 sind = sin_null; 4943 sind.sin_addr.s_addr = ipha->ipha_dst; 4944 sind.sin_port = *(uint16_t *)tcph->th_fport; 4945 sind.sin_family = AF_INET; 4946 tpi_mp = mi_tpi_extconn_ind(NULL, 4947 (char *)&sind, sizeof (sin_t), (char *)&tcp, 4948 (t_scalar_t)sizeof (intptr_t), (char *)&sind, 4949 sizeof (sin_t), (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4950 } else { 4951 tpi_mp = mi_tpi_conn_ind(NULL, 4952 (char *)&sin, sizeof (sin_t), 4953 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4954 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4955 } 4956 4957 if (tpi_mp == NULL) { 4958 return (ENOMEM); 4959 } 4960 4961 connp->conn_flags |= (IPCL_TCP4|IPCL_EAGER); 4962 connp->conn_send = ip_output; 4963 connp->conn_recv = tcp_input; 4964 connp->conn_fully_bound = B_FALSE; 4965 4966 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_srcv6); 4967 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_remv6); 4968 connp->conn_fport = *(uint16_t *)tcph->th_lport; 4969 connp->conn_lport = *(uint16_t *)tcph->th_fport; 4970 4971 if (tcp_trace) { 4972 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_NOSLEEP); 4973 } 4974 4975 /* Inherit information from the "parent" */ 4976 tcp->tcp_ipversion = ltcp->tcp_ipversion; 4977 tcp->tcp_family = ltcp->tcp_family; 4978 tcp->tcp_wq = ltcp->tcp_wq; 4979 tcp->tcp_rq = ltcp->tcp_rq; 4980 tcp->tcp_mss = tcp_mss_def_ipv4; 4981 tcp->tcp_detached = B_TRUE; 4982 if ((err = tcp_init_values(tcp)) != 0) { 4983 freemsg(tpi_mp); 4984 return (err); 4985 } 4986 4987 /* 4988 * Let's make sure that eager tcp template has enough space to 4989 * copy IPv4 listener's tcp template. Since the conn_t structure is 4990 * preserved and tcp_iphc_len is also preserved, an eager conn_t may 4991 * have a tcp_template of total len TCP_MAX_COMBINED_HEADER_LENGTH or 4992 * more (in case of re-allocation of conn_t with tcp-IPv6 template with 4993 * extension headers or with ip6i_t struct). Note that bcopy() below 4994 * copies listener tcp's hdr_len which cannot be greater than TCP_MAX_ 4995 * COMBINED_HEADER_LENGTH as this listener must be a IPv4 listener. 4996 */ 4997 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 4998 ASSERT(ltcp->tcp_hdr_len <= TCP_MAX_COMBINED_HEADER_LENGTH); 4999 5000 tcp->tcp_hdr_len = ltcp->tcp_hdr_len; 5001 tcp->tcp_ip_hdr_len = ltcp->tcp_ip_hdr_len; 5002 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 5003 tcp->tcp_ttl = ltcp->tcp_ttl; 5004 tcp->tcp_tos = ltcp->tcp_tos; 5005 5006 /* Copy the IP+TCP header template from listener to eager */ 5007 bcopy(ltcp->tcp_iphc, tcp->tcp_iphc, ltcp->tcp_hdr_len); 5008 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 5009 tcp->tcp_ip6h = NULL; 5010 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + 5011 tcp->tcp_ip_hdr_len); 5012 5013 /* Initialize the IP addresses and Ports */ 5014 tcp->tcp_ipha->ipha_dst = ipha->ipha_src; 5015 tcp->tcp_ipha->ipha_src = ipha->ipha_dst; 5016 bcopy(tcph->th_lport, tcp->tcp_tcph->th_fport, sizeof (in_port_t)); 5017 bcopy(tcph->th_fport, tcp->tcp_tcph->th_lport, sizeof (in_port_t)); 5018 5019 /* Source routing option copyover (reverse it) */ 5020 if (tcp_rev_src_routes) 5021 tcp_opt_reverse(tcp, ipha); 5022 5023 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 5024 5025 /* 5026 * If the SYN contains a credential, it's a loopback packet; attach 5027 * the credential to the TPI message. 5028 */ 5029 if ((cr = DB_CRED(idmp)) != NULL) { 5030 mblk_setcred(tpi_mp, cr); 5031 DB_CPID(tpi_mp) = DB_CPID(idmp); 5032 } 5033 tcp->tcp_conn.tcp_eager_conn_ind = tpi_mp; 5034 5035 /* Inherit the listener's SSL protection state */ 5036 if ((tcp->tcp_kssl_ent = ltcp->tcp_kssl_ent) != NULL) { 5037 kssl_hold_ent(tcp->tcp_kssl_ent); 5038 tcp->tcp_kssl_pending = B_TRUE; 5039 } 5040 5041 return (0); 5042 } 5043 5044 /* 5045 * sets up conn for ipsec. 5046 * if the first mblk is M_CTL it is consumed and mpp is updated. 5047 * in case of error mpp is freed. 5048 */ 5049 conn_t * 5050 tcp_get_ipsec_conn(tcp_t *tcp, squeue_t *sqp, mblk_t **mpp) 5051 { 5052 conn_t *connp = tcp->tcp_connp; 5053 conn_t *econnp; 5054 squeue_t *new_sqp; 5055 mblk_t *first_mp = *mpp; 5056 mblk_t *mp = *mpp; 5057 boolean_t mctl_present = B_FALSE; 5058 uint_t ipvers; 5059 5060 econnp = tcp_get_conn(sqp); 5061 if (econnp == NULL) { 5062 freemsg(first_mp); 5063 return (NULL); 5064 } 5065 if (DB_TYPE(mp) == M_CTL) { 5066 if (mp->b_cont == NULL || 5067 mp->b_cont->b_datap->db_type != M_DATA) { 5068 freemsg(first_mp); 5069 return (NULL); 5070 } 5071 mp = mp->b_cont; 5072 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) == 0) { 5073 freemsg(first_mp); 5074 return (NULL); 5075 } 5076 5077 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 5078 first_mp->b_datap->db_struioflag &= ~STRUIO_POLICY; 5079 mctl_present = B_TRUE; 5080 } else { 5081 ASSERT(mp->b_datap->db_struioflag & STRUIO_POLICY); 5082 mp->b_datap->db_struioflag &= ~STRUIO_POLICY; 5083 } 5084 5085 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 5086 DB_CKSUMSTART(mp) = 0; 5087 5088 ASSERT(OK_32PTR(mp->b_rptr)); 5089 ipvers = IPH_HDR_VERSION(mp->b_rptr); 5090 if (ipvers == IPV4_VERSION) { 5091 uint16_t *up; 5092 uint32_t ports; 5093 ipha_t *ipha; 5094 5095 ipha = (ipha_t *)mp->b_rptr; 5096 up = (uint16_t *)((uchar_t *)ipha + 5097 IPH_HDR_LENGTH(ipha) + TCP_PORTS_OFFSET); 5098 ports = *(uint32_t *)up; 5099 IPCL_TCP_EAGER_INIT(econnp, IPPROTO_TCP, 5100 ipha->ipha_dst, ipha->ipha_src, ports); 5101 } else { 5102 uint16_t *up; 5103 uint32_t ports; 5104 uint16_t ip_hdr_len; 5105 uint8_t *nexthdrp; 5106 ip6_t *ip6h; 5107 tcph_t *tcph; 5108 5109 ip6h = (ip6_t *)mp->b_rptr; 5110 if (ip6h->ip6_nxt == IPPROTO_TCP) { 5111 ip_hdr_len = IPV6_HDR_LEN; 5112 } else if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ip_hdr_len, 5113 &nexthdrp) || *nexthdrp != IPPROTO_TCP) { 5114 CONN_DEC_REF(econnp); 5115 freemsg(first_mp); 5116 return (NULL); 5117 } 5118 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5119 up = (uint16_t *)tcph->th_lport; 5120 ports = *(uint32_t *)up; 5121 IPCL_TCP_EAGER_INIT_V6(econnp, IPPROTO_TCP, 5122 ip6h->ip6_dst, ip6h->ip6_src, ports); 5123 } 5124 5125 /* 5126 * The caller already ensured that there is a sqp present. 5127 */ 5128 econnp->conn_sqp = new_sqp; 5129 5130 if (connp->conn_policy != NULL) { 5131 ipsec_in_t *ii; 5132 ii = (ipsec_in_t *)(first_mp->b_rptr); 5133 ASSERT(ii->ipsec_in_policy == NULL); 5134 IPPH_REFHOLD(connp->conn_policy); 5135 ii->ipsec_in_policy = connp->conn_policy; 5136 5137 first_mp->b_datap->db_type = IPSEC_POLICY_SET; 5138 if (!ip_bind_ipsec_policy_set(econnp, first_mp)) { 5139 CONN_DEC_REF(econnp); 5140 freemsg(first_mp); 5141 return (NULL); 5142 } 5143 } 5144 5145 if (ipsec_conn_cache_policy(econnp, ipvers == IPV4_VERSION) != 0) { 5146 CONN_DEC_REF(econnp); 5147 freemsg(first_mp); 5148 return (NULL); 5149 } 5150 5151 /* 5152 * If we know we have some policy, pass the "IPSEC" 5153 * options size TCP uses this adjust the MSS. 5154 */ 5155 econnp->conn_tcp->tcp_ipsec_overhead = conn_ipsec_length(econnp); 5156 if (mctl_present) { 5157 freeb(first_mp); 5158 *mpp = mp; 5159 } 5160 5161 return (econnp); 5162 } 5163 5164 /* 5165 * tcp_get_conn/tcp_free_conn 5166 * 5167 * tcp_get_conn is used to get a clean tcp connection structure. 5168 * It tries to reuse the connections put on the freelist by the 5169 * time_wait_collector failing which it goes to kmem_cache. This 5170 * way has two benefits compared to just allocating from and 5171 * freeing to kmem_cache. 5172 * 1) The time_wait_collector can free (which includes the cleanup) 5173 * outside the squeue. So when the interrupt comes, we have a clean 5174 * connection sitting in the freelist. Obviously, this buys us 5175 * performance. 5176 * 5177 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_conn_request 5178 * has multiple disadvantages - tying up the squeue during alloc, and the 5179 * fact that IPSec policy initialization has to happen here which 5180 * requires us sending a M_CTL and checking for it i.e. real ugliness. 5181 * But allocating the conn/tcp in IP land is also not the best since 5182 * we can't check the 'q' and 'q0' which are protected by squeue and 5183 * blindly allocate memory which might have to be freed here if we are 5184 * not allowed to accept the connection. By using the freelist and 5185 * putting the conn/tcp back in freelist, we don't pay a penalty for 5186 * allocating memory without checking 'q/q0' and freeing it if we can't 5187 * accept the connection. 5188 * 5189 * Care should be taken to put the conn back in the same squeue's freelist 5190 * from which it was allocated. Best results are obtained if conn is 5191 * allocated from listener's squeue and freed to the same. Time wait 5192 * collector will free up the freelist is the connection ends up sitting 5193 * there for too long. 5194 */ 5195 void * 5196 tcp_get_conn(void *arg) 5197 { 5198 tcp_t *tcp = NULL; 5199 conn_t *connp = NULL; 5200 squeue_t *sqp = (squeue_t *)arg; 5201 tcp_squeue_priv_t *tcp_time_wait; 5202 5203 tcp_time_wait = 5204 *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP)); 5205 5206 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 5207 tcp = tcp_time_wait->tcp_free_list; 5208 ASSERT((tcp != NULL) ^ (tcp_time_wait->tcp_free_list_cnt == 0)); 5209 if (tcp != NULL) { 5210 tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next; 5211 tcp_time_wait->tcp_free_list_cnt--; 5212 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 5213 tcp->tcp_time_wait_next = NULL; 5214 connp = tcp->tcp_connp; 5215 connp->conn_flags |= IPCL_REUSED; 5216 return ((void *)connp); 5217 } 5218 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 5219 if ((connp = ipcl_conn_create(IPCL_TCPCONN, KM_NOSLEEP)) == NULL) 5220 return (NULL); 5221 return ((void *)connp); 5222 } 5223 5224 /* 5225 * Update the cached label for the given tcp_t. This should be called once per 5226 * connection, and before any packets are sent or tcp_process_options is 5227 * invoked. Returns B_FALSE if the correct label could not be constructed. 5228 */ 5229 static boolean_t 5230 tcp_update_label(tcp_t *tcp, const cred_t *cr) 5231 { 5232 conn_t *connp = tcp->tcp_connp; 5233 5234 if (tcp->tcp_ipversion == IPV4_VERSION) { 5235 uchar_t optbuf[IP_MAX_OPT_LENGTH]; 5236 int added; 5237 5238 if (tsol_compute_label(cr, tcp->tcp_remote, optbuf, 5239 connp->conn_mac_exempt) != 0) 5240 return (B_FALSE); 5241 5242 added = tsol_remove_secopt(tcp->tcp_ipha, tcp->tcp_hdr_len); 5243 if (added == -1) 5244 return (B_FALSE); 5245 tcp->tcp_hdr_len += added; 5246 tcp->tcp_tcph = (tcph_t *)((uchar_t *)tcp->tcp_tcph + added); 5247 tcp->tcp_ip_hdr_len += added; 5248 if ((tcp->tcp_label_len = optbuf[IPOPT_OLEN]) != 0) { 5249 tcp->tcp_label_len = (tcp->tcp_label_len + 3) & ~3; 5250 added = tsol_prepend_option(optbuf, tcp->tcp_ipha, 5251 tcp->tcp_hdr_len); 5252 if (added == -1) 5253 return (B_FALSE); 5254 tcp->tcp_hdr_len += added; 5255 tcp->tcp_tcph = (tcph_t *) 5256 ((uchar_t *)tcp->tcp_tcph + added); 5257 tcp->tcp_ip_hdr_len += added; 5258 } 5259 } else { 5260 uchar_t optbuf[TSOL_MAX_IPV6_OPTION]; 5261 5262 if (tsol_compute_label_v6(cr, &tcp->tcp_remote_v6, optbuf, 5263 connp->conn_mac_exempt) != 0) 5264 return (B_FALSE); 5265 if (tsol_update_sticky(&tcp->tcp_sticky_ipp, 5266 &tcp->tcp_label_len, optbuf) != 0) 5267 return (B_FALSE); 5268 if (tcp_build_hdrs(tcp->tcp_rq, tcp) != 0) 5269 return (B_FALSE); 5270 } 5271 5272 connp->conn_ulp_labeled = 1; 5273 5274 return (B_TRUE); 5275 } 5276 5277 /* BEGIN CSTYLED */ 5278 /* 5279 * 5280 * The sockfs ACCEPT path: 5281 * ======================= 5282 * 5283 * The eager is now established in its own perimeter as soon as SYN is 5284 * received in tcp_conn_request(). When sockfs receives conn_ind, it 5285 * completes the accept processing on the acceptor STREAM. The sending 5286 * of conn_ind part is common for both sockfs listener and a TLI/XTI 5287 * listener but a TLI/XTI listener completes the accept processing 5288 * on the listener perimeter. 5289 * 5290 * Common control flow for 3 way handshake: 5291 * ---------------------------------------- 5292 * 5293 * incoming SYN (listener perimeter) -> tcp_rput_data() 5294 * -> tcp_conn_request() 5295 * 5296 * incoming SYN-ACK-ACK (eager perim) -> tcp_rput_data() 5297 * send T_CONN_IND (listener perim) -> tcp_send_conn_ind() 5298 * 5299 * Sockfs ACCEPT Path: 5300 * ------------------- 5301 * 5302 * open acceptor stream (ip_tcpopen allocates tcp_wput_accept() 5303 * as STREAM entry point) 5304 * 5305 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_wput_accept() 5306 * 5307 * tcp_wput_accept() extracts the eager and makes the q->q_ptr <-> eager 5308 * association (we are not behind eager's squeue but sockfs is protecting us 5309 * and no one knows about this stream yet. The STREAMS entry point q->q_info 5310 * is changed to point at tcp_wput(). 5311 * 5312 * tcp_wput_accept() sends any deferred eagers via tcp_send_pending() to 5313 * listener (done on listener's perimeter). 5314 * 5315 * tcp_wput_accept() calls tcp_accept_finish() on eagers perimeter to finish 5316 * accept. 5317 * 5318 * TLI/XTI client ACCEPT path: 5319 * --------------------------- 5320 * 5321 * soaccept() sends T_CONN_RES on the listener STREAM. 5322 * 5323 * tcp_accept() -> tcp_accept_swap() complete the processing and send 5324 * the bind_mp to eager perimeter to finish accept (tcp_rput_other()). 5325 * 5326 * Locks: 5327 * ====== 5328 * 5329 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and 5330 * and listeners->tcp_eager_next_q. 5331 * 5332 * Referencing: 5333 * ============ 5334 * 5335 * 1) We start out in tcp_conn_request by eager placing a ref on 5336 * listener and listener adding eager to listeners->tcp_eager_next_q0. 5337 * 5338 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before 5339 * doing so we place a ref on the eager. This ref is finally dropped at the 5340 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the 5341 * reference is dropped by the squeue framework. 5342 * 5343 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish 5344 * 5345 * The reference must be released by the same entity that added the reference 5346 * In the above scheme, the eager is the entity that adds and releases the 5347 * references. Note that tcp_accept_finish executes in the squeue of the eager 5348 * (albeit after it is attached to the acceptor stream). Though 1. executes 5349 * in the listener's squeue, the eager is nascent at this point and the 5350 * reference can be considered to have been added on behalf of the eager. 5351 * 5352 * Eager getting a Reset or listener closing: 5353 * ========================================== 5354 * 5355 * Once the listener and eager are linked, the listener never does the unlink. 5356 * If the listener needs to close, tcp_eager_cleanup() is called which queues 5357 * a message on all eager perimeter. The eager then does the unlink, clears 5358 * any pointers to the listener's queue and drops the reference to the 5359 * listener. The listener waits in tcp_close outside the squeue until its 5360 * refcount has dropped to 1. This ensures that the listener has waited for 5361 * all eagers to clear their association with the listener. 5362 * 5363 * Similarly, if eager decides to go away, it can unlink itself and close. 5364 * When the T_CONN_RES comes down, we check if eager has closed. Note that 5365 * the reference to eager is still valid because of the extra ref we put 5366 * in tcp_send_conn_ind. 5367 * 5368 * Listener can always locate the eager under the protection 5369 * of the listener->tcp_eager_lock, and then do a refhold 5370 * on the eager during the accept processing. 5371 * 5372 * The acceptor stream accesses the eager in the accept processing 5373 * based on the ref placed on eager before sending T_conn_ind. 5374 * The only entity that can negate this refhold is a listener close 5375 * which is mutually exclusive with an active acceptor stream. 5376 * 5377 * Eager's reference on the listener 5378 * =================================== 5379 * 5380 * If the accept happens (even on a closed eager) the eager drops its 5381 * reference on the listener at the start of tcp_accept_finish. If the 5382 * eager is killed due to an incoming RST before the T_conn_ind is sent up, 5383 * the reference is dropped in tcp_closei_local. If the listener closes, 5384 * the reference is dropped in tcp_eager_kill. In all cases the reference 5385 * is dropped while executing in the eager's context (squeue). 5386 */ 5387 /* END CSTYLED */ 5388 5389 /* Process the SYN packet, mp, directed at the listener 'tcp' */ 5390 5391 /* 5392 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN. 5393 * tcp_rput_data will not see any SYN packets. 5394 */ 5395 /* ARGSUSED */ 5396 void 5397 tcp_conn_request(void *arg, mblk_t *mp, void *arg2) 5398 { 5399 tcph_t *tcph; 5400 uint32_t seg_seq; 5401 tcp_t *eager; 5402 uint_t ipvers; 5403 ipha_t *ipha; 5404 ip6_t *ip6h; 5405 int err; 5406 conn_t *econnp = NULL; 5407 squeue_t *new_sqp; 5408 mblk_t *mp1; 5409 uint_t ip_hdr_len; 5410 conn_t *connp = (conn_t *)arg; 5411 tcp_t *tcp = connp->conn_tcp; 5412 ire_t *ire; 5413 cred_t *credp; 5414 5415 if (tcp->tcp_state != TCPS_LISTEN) 5416 goto error2; 5417 5418 ASSERT((tcp->tcp_connp->conn_flags & IPCL_BOUND) != 0); 5419 5420 mutex_enter(&tcp->tcp_eager_lock); 5421 if (tcp->tcp_conn_req_cnt_q >= tcp->tcp_conn_req_max) { 5422 mutex_exit(&tcp->tcp_eager_lock); 5423 TCP_STAT(tcp_listendrop); 5424 BUMP_MIB(&tcp_mib, tcpListenDrop); 5425 if (tcp->tcp_debug) { 5426 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 5427 "tcp_conn_request: listen backlog (max=%d) " 5428 "overflow (%d pending) on %s", 5429 tcp->tcp_conn_req_max, tcp->tcp_conn_req_cnt_q, 5430 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 5431 } 5432 goto error2; 5433 } 5434 5435 if (tcp->tcp_conn_req_cnt_q0 >= 5436 tcp->tcp_conn_req_max + tcp_conn_req_max_q0) { 5437 /* 5438 * Q0 is full. Drop a pending half-open req from the queue 5439 * to make room for the new SYN req. Also mark the time we 5440 * drop a SYN. 5441 * 5442 * A more aggressive defense against SYN attack will 5443 * be to set the "tcp_syn_defense" flag now. 5444 */ 5445 TCP_STAT(tcp_listendropq0); 5446 tcp->tcp_last_rcv_lbolt = lbolt64; 5447 if (!tcp_drop_q0(tcp)) { 5448 mutex_exit(&tcp->tcp_eager_lock); 5449 BUMP_MIB(&tcp_mib, tcpListenDropQ0); 5450 if (tcp->tcp_debug) { 5451 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 5452 "tcp_conn_request: listen half-open queue " 5453 "(max=%d) full (%d pending) on %s", 5454 tcp_conn_req_max_q0, 5455 tcp->tcp_conn_req_cnt_q0, 5456 tcp_display(tcp, NULL, 5457 DISP_PORT_ONLY)); 5458 } 5459 goto error2; 5460 } 5461 } 5462 mutex_exit(&tcp->tcp_eager_lock); 5463 5464 /* 5465 * IP adds STRUIO_EAGER and ensures that the received packet is 5466 * M_DATA even if conn_ipv6_recvpktinfo is enabled or for ip6 5467 * link local address. If IPSec is enabled, db_struioflag has 5468 * STRUIO_POLICY set (mutually exclusive from STRUIO_EAGER); 5469 * otherwise an error case if neither of them is set. 5470 */ 5471 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 5472 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 5473 DB_CKSUMSTART(mp) = 0; 5474 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 5475 econnp = (conn_t *)tcp_get_conn(arg2); 5476 if (econnp == NULL) 5477 goto error2; 5478 econnp->conn_sqp = new_sqp; 5479 } else if ((mp->b_datap->db_struioflag & STRUIO_POLICY) != 0) { 5480 /* 5481 * mp is updated in tcp_get_ipsec_conn(). 5482 */ 5483 econnp = tcp_get_ipsec_conn(tcp, arg2, &mp); 5484 if (econnp == NULL) { 5485 /* 5486 * mp freed by tcp_get_ipsec_conn. 5487 */ 5488 return; 5489 } 5490 } else { 5491 goto error2; 5492 } 5493 5494 ASSERT(DB_TYPE(mp) == M_DATA); 5495 5496 ipvers = IPH_HDR_VERSION(mp->b_rptr); 5497 ASSERT(ipvers == IPV6_VERSION || ipvers == IPV4_VERSION); 5498 ASSERT(OK_32PTR(mp->b_rptr)); 5499 if (ipvers == IPV4_VERSION) { 5500 ipha = (ipha_t *)mp->b_rptr; 5501 ip_hdr_len = IPH_HDR_LENGTH(ipha); 5502 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5503 } else { 5504 ip6h = (ip6_t *)mp->b_rptr; 5505 ip_hdr_len = ip_hdr_length_v6(mp, ip6h); 5506 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5507 } 5508 5509 if (tcp->tcp_family == AF_INET) { 5510 ASSERT(ipvers == IPV4_VERSION); 5511 err = tcp_conn_create_v4(connp, econnp, ipha, tcph, mp); 5512 } else { 5513 err = tcp_conn_create_v6(connp, econnp, mp, tcph, ipvers, mp); 5514 } 5515 5516 if (err) 5517 goto error3; 5518 5519 eager = econnp->conn_tcp; 5520 5521 /* Inherit various TCP parameters from the listener */ 5522 eager->tcp_naglim = tcp->tcp_naglim; 5523 eager->tcp_first_timer_threshold = 5524 tcp->tcp_first_timer_threshold; 5525 eager->tcp_second_timer_threshold = 5526 tcp->tcp_second_timer_threshold; 5527 5528 eager->tcp_first_ctimer_threshold = 5529 tcp->tcp_first_ctimer_threshold; 5530 eager->tcp_second_ctimer_threshold = 5531 tcp->tcp_second_ctimer_threshold; 5532 5533 /* 5534 * tcp_adapt_ire() may change tcp_rwnd according to the ire metrics. 5535 * If it does not, the eager's receive window will be set to the 5536 * listener's receive window later in this function. 5537 */ 5538 eager->tcp_rwnd = 0; 5539 5540 /* 5541 * Inherit listener's tcp_init_cwnd. Need to do this before 5542 * calling tcp_process_options() where tcp_mss_set() is called 5543 * to set the initial cwnd. 5544 */ 5545 eager->tcp_init_cwnd = tcp->tcp_init_cwnd; 5546 5547 /* 5548 * Zones: tcp_adapt_ire() and tcp_send_data() both need the 5549 * zone id before the accept is completed in tcp_wput_accept(). 5550 */ 5551 econnp->conn_zoneid = connp->conn_zoneid; 5552 5553 /* Copy nexthop information from listener to eager */ 5554 if (connp->conn_nexthop_set) { 5555 econnp->conn_nexthop_set = connp->conn_nexthop_set; 5556 econnp->conn_nexthop_v4 = connp->conn_nexthop_v4; 5557 } 5558 5559 /* 5560 * TSOL: tsol_input_proc() needs the eager's cred before the 5561 * eager is accepted 5562 */ 5563 econnp->conn_cred = eager->tcp_cred = credp = connp->conn_cred; 5564 crhold(credp); 5565 5566 /* 5567 * If the caller has the process-wide flag set, then default to MAC 5568 * exempt mode. This allows read-down to unlabeled hosts. 5569 */ 5570 if (getpflags(NET_MAC_AWARE, credp) != 0) 5571 econnp->conn_mac_exempt = B_TRUE; 5572 5573 if (is_system_labeled()) { 5574 cred_t *cr; 5575 5576 if (connp->conn_mlp_type != mlptSingle) { 5577 cr = econnp->conn_peercred = DB_CRED(mp); 5578 if (cr != NULL) 5579 crhold(cr); 5580 else 5581 cr = econnp->conn_cred; 5582 DTRACE_PROBE2(mlp_syn_accept, conn_t *, 5583 econnp, cred_t *, cr) 5584 } else { 5585 cr = econnp->conn_cred; 5586 DTRACE_PROBE2(syn_accept, conn_t *, 5587 econnp, cred_t *, cr) 5588 } 5589 5590 if (!tcp_update_label(eager, cr)) { 5591 DTRACE_PROBE3( 5592 tx__ip__log__error__connrequest__tcp, 5593 char *, "eager connp(1) label on SYN mp(2) failed", 5594 conn_t *, econnp, mblk_t *, mp); 5595 goto error3; 5596 } 5597 } 5598 5599 eager->tcp_hard_binding = B_TRUE; 5600 5601 tcp_bind_hash_insert(&tcp_bind_fanout[ 5602 TCP_BIND_HASH(eager->tcp_lport)], eager, 0); 5603 5604 CL_INET_CONNECT(eager); 5605 5606 /* 5607 * No need to check for multicast destination since ip will only pass 5608 * up multicasts to those that have expressed interest 5609 * TODO: what about rejecting broadcasts? 5610 * Also check that source is not a multicast or broadcast address. 5611 */ 5612 eager->tcp_state = TCPS_SYN_RCVD; 5613 5614 5615 /* 5616 * There should be no ire in the mp as we are being called after 5617 * receiving the SYN. 5618 */ 5619 ASSERT(tcp_ire_mp(mp) == NULL); 5620 5621 /* 5622 * Adapt our mss, ttl, ... according to information provided in IRE. 5623 */ 5624 5625 if (tcp_adapt_ire(eager, NULL) == 0) { 5626 /* Undo the bind_hash_insert */ 5627 tcp_bind_hash_remove(eager); 5628 goto error3; 5629 } 5630 5631 /* Process all TCP options. */ 5632 tcp_process_options(eager, tcph); 5633 5634 /* Is the other end ECN capable? */ 5635 if (tcp_ecn_permitted >= 1 && 5636 (tcph->th_flags[0] & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) { 5637 eager->tcp_ecn_ok = B_TRUE; 5638 } 5639 5640 /* 5641 * listener->tcp_rq->q_hiwat should be the default window size or a 5642 * window size changed via SO_RCVBUF option. First round up the 5643 * eager's tcp_rwnd to the nearest MSS. Then find out the window 5644 * scale option value if needed. Call tcp_rwnd_set() to finish the 5645 * setting. 5646 * 5647 * Note if there is a rpipe metric associated with the remote host, 5648 * we should not inherit receive window size from listener. 5649 */ 5650 eager->tcp_rwnd = MSS_ROUNDUP( 5651 (eager->tcp_rwnd == 0 ? tcp->tcp_rq->q_hiwat : 5652 eager->tcp_rwnd), eager->tcp_mss); 5653 if (eager->tcp_snd_ws_ok) 5654 tcp_set_ws_value(eager); 5655 /* 5656 * Note that this is the only place tcp_rwnd_set() is called for 5657 * accepting a connection. We need to call it here instead of 5658 * after the 3-way handshake because we need to tell the other 5659 * side our rwnd in the SYN-ACK segment. 5660 */ 5661 (void) tcp_rwnd_set(eager, eager->tcp_rwnd); 5662 5663 /* 5664 * We eliminate the need for sockfs to send down a T_SVR4_OPTMGMT_REQ 5665 * via soaccept()->soinheritoptions() which essentially applies 5666 * all the listener options to the new STREAM. The options that we 5667 * need to take care of are: 5668 * SO_DEBUG, SO_REUSEADDR, SO_KEEPALIVE, SO_DONTROUTE, SO_BROADCAST, 5669 * SO_USELOOPBACK, SO_OOBINLINE, SO_DGRAM_ERRIND, SO_LINGER, 5670 * SO_SNDBUF, SO_RCVBUF. 5671 * 5672 * SO_RCVBUF: tcp_rwnd_set() above takes care of it. 5673 * SO_SNDBUF: Set the tcp_xmit_hiwater for the eager. When 5674 * tcp_maxpsz_set() gets called later from 5675 * tcp_accept_finish(), the option takes effect. 5676 * 5677 */ 5678 /* Set the TCP options */ 5679 eager->tcp_xmit_hiwater = tcp->tcp_xmit_hiwater; 5680 eager->tcp_dgram_errind = tcp->tcp_dgram_errind; 5681 eager->tcp_oobinline = tcp->tcp_oobinline; 5682 eager->tcp_reuseaddr = tcp->tcp_reuseaddr; 5683 eager->tcp_broadcast = tcp->tcp_broadcast; 5684 eager->tcp_useloopback = tcp->tcp_useloopback; 5685 eager->tcp_dontroute = tcp->tcp_dontroute; 5686 eager->tcp_linger = tcp->tcp_linger; 5687 eager->tcp_lingertime = tcp->tcp_lingertime; 5688 if (tcp->tcp_ka_enabled) 5689 eager->tcp_ka_enabled = 1; 5690 5691 /* Set the IP options */ 5692 econnp->conn_broadcast = connp->conn_broadcast; 5693 econnp->conn_loopback = connp->conn_loopback; 5694 econnp->conn_dontroute = connp->conn_dontroute; 5695 econnp->conn_reuseaddr = connp->conn_reuseaddr; 5696 5697 /* Put a ref on the listener for the eager. */ 5698 CONN_INC_REF(connp); 5699 mutex_enter(&tcp->tcp_eager_lock); 5700 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = eager; 5701 eager->tcp_eager_next_q0 = tcp->tcp_eager_next_q0; 5702 tcp->tcp_eager_next_q0 = eager; 5703 eager->tcp_eager_prev_q0 = tcp; 5704 5705 /* Set tcp_listener before adding it to tcp_conn_fanout */ 5706 eager->tcp_listener = tcp; 5707 eager->tcp_saved_listener = tcp; 5708 5709 /* 5710 * Tag this detached tcp vector for later retrieval 5711 * by our listener client in tcp_accept(). 5712 */ 5713 eager->tcp_conn_req_seqnum = tcp->tcp_conn_req_seqnum; 5714 tcp->tcp_conn_req_cnt_q0++; 5715 if (++tcp->tcp_conn_req_seqnum == -1) { 5716 /* 5717 * -1 is "special" and defined in TPI as something 5718 * that should never be used in T_CONN_IND 5719 */ 5720 ++tcp->tcp_conn_req_seqnum; 5721 } 5722 mutex_exit(&tcp->tcp_eager_lock); 5723 5724 if (tcp->tcp_syn_defense) { 5725 /* Don't drop the SYN that comes from a good IP source */ 5726 ipaddr_t *addr_cache = (ipaddr_t *)(tcp->tcp_ip_addr_cache); 5727 if (addr_cache != NULL && eager->tcp_remote == 5728 addr_cache[IP_ADDR_CACHE_HASH(eager->tcp_remote)]) { 5729 eager->tcp_dontdrop = B_TRUE; 5730 } 5731 } 5732 5733 /* 5734 * We need to insert the eager in its own perimeter but as soon 5735 * as we do that, we expose the eager to the classifier and 5736 * should not touch any field outside the eager's perimeter. 5737 * So do all the work necessary before inserting the eager 5738 * in its own perimeter. Be optimistic that ipcl_conn_insert() 5739 * will succeed but undo everything if it fails. 5740 */ 5741 seg_seq = ABE32_TO_U32(tcph->th_seq); 5742 eager->tcp_irs = seg_seq; 5743 eager->tcp_rack = seg_seq; 5744 eager->tcp_rnxt = seg_seq + 1; 5745 U32_TO_ABE32(eager->tcp_rnxt, eager->tcp_tcph->th_ack); 5746 BUMP_MIB(&tcp_mib, tcpPassiveOpens); 5747 eager->tcp_state = TCPS_SYN_RCVD; 5748 mp1 = tcp_xmit_mp(eager, eager->tcp_xmit_head, eager->tcp_mss, 5749 NULL, NULL, eager->tcp_iss, B_FALSE, NULL, B_FALSE); 5750 if (mp1 == NULL) 5751 goto error1; 5752 DB_CPID(mp1) = tcp->tcp_cpid; 5753 5754 /* 5755 * We need to start the rto timer. In normal case, we start 5756 * the timer after sending the packet on the wire (or at 5757 * least believing that packet was sent by waiting for 5758 * CALL_IP_WPUT() to return). Since this is the first packet 5759 * being sent on the wire for the eager, our initial tcp_rto 5760 * is at least tcp_rexmit_interval_min which is a fairly 5761 * large value to allow the algorithm to adjust slowly to large 5762 * fluctuations of RTT during first few transmissions. 5763 * 5764 * Starting the timer first and then sending the packet in this 5765 * case shouldn't make much difference since tcp_rexmit_interval_min 5766 * is of the order of several 100ms and starting the timer 5767 * first and then sending the packet will result in difference 5768 * of few micro seconds. 5769 * 5770 * Without this optimization, we are forced to hold the fanout 5771 * lock across the ipcl_bind_insert() and sending the packet 5772 * so that we don't race against an incoming packet (maybe RST) 5773 * for this eager. 5774 */ 5775 5776 TCP_RECORD_TRACE(eager, mp1, TCP_TRACE_SEND_PKT); 5777 TCP_TIMER_RESTART(eager, eager->tcp_rto); 5778 5779 5780 /* 5781 * Insert the eager in its own perimeter now. We are ready to deal 5782 * with any packets on eager. 5783 */ 5784 if (eager->tcp_ipversion == IPV4_VERSION) { 5785 if (ipcl_conn_insert(econnp, IPPROTO_TCP, 0, 0, 0) != 0) { 5786 goto error; 5787 } 5788 } else { 5789 if (ipcl_conn_insert_v6(econnp, IPPROTO_TCP, 0, 0, 0, 0) != 0) { 5790 goto error; 5791 } 5792 } 5793 5794 /* mark conn as fully-bound */ 5795 econnp->conn_fully_bound = B_TRUE; 5796 5797 /* Send the SYN-ACK */ 5798 tcp_send_data(eager, eager->tcp_wq, mp1); 5799 freemsg(mp); 5800 5801 return; 5802 error: 5803 (void) TCP_TIMER_CANCEL(eager, eager->tcp_timer_tid); 5804 freemsg(mp1); 5805 error1: 5806 /* Undo what we did above */ 5807 mutex_enter(&tcp->tcp_eager_lock); 5808 tcp_eager_unlink(eager); 5809 mutex_exit(&tcp->tcp_eager_lock); 5810 /* Drop eager's reference on the listener */ 5811 CONN_DEC_REF(connp); 5812 5813 /* 5814 * Delete the cached ire in conn_ire_cache and also mark 5815 * the conn as CONDEMNED 5816 */ 5817 mutex_enter(&econnp->conn_lock); 5818 econnp->conn_state_flags |= CONN_CONDEMNED; 5819 ire = econnp->conn_ire_cache; 5820 econnp->conn_ire_cache = NULL; 5821 mutex_exit(&econnp->conn_lock); 5822 if (ire != NULL) 5823 IRE_REFRELE_NOTR(ire); 5824 5825 /* 5826 * tcp_accept_comm inserts the eager to the bind_hash 5827 * we need to remove it from the hash if ipcl_conn_insert 5828 * fails. 5829 */ 5830 tcp_bind_hash_remove(eager); 5831 /* Drop the eager ref placed in tcp_open_detached */ 5832 CONN_DEC_REF(econnp); 5833 5834 /* 5835 * If a connection already exists, send the mp to that connections so 5836 * that it can be appropriately dealt with. 5837 */ 5838 if ((econnp = ipcl_classify(mp, connp->conn_zoneid)) != NULL) { 5839 if (!IPCL_IS_CONNECTED(econnp)) { 5840 /* 5841 * Something bad happened. ipcl_conn_insert() 5842 * failed because a connection already existed 5843 * in connected hash but we can't find it 5844 * anymore (someone blew it away). Just 5845 * free this message and hopefully remote 5846 * will retransmit at which time the SYN can be 5847 * treated as a new connection or dealth with 5848 * a TH_RST if a connection already exists. 5849 */ 5850 freemsg(mp); 5851 } else { 5852 squeue_fill(econnp->conn_sqp, mp, tcp_input, 5853 econnp, SQTAG_TCP_CONN_REQ); 5854 } 5855 } else { 5856 /* Nobody wants this packet */ 5857 freemsg(mp); 5858 } 5859 return; 5860 error2: 5861 freemsg(mp); 5862 return; 5863 error3: 5864 CONN_DEC_REF(econnp); 5865 freemsg(mp); 5866 } 5867 5868 /* 5869 * In an ideal case of vertical partition in NUMA architecture, its 5870 * beneficial to have the listener and all the incoming connections 5871 * tied to the same squeue. The other constraint is that incoming 5872 * connections should be tied to the squeue attached to interrupted 5873 * CPU for obvious locality reason so this leaves the listener to 5874 * be tied to the same squeue. Our only problem is that when listener 5875 * is binding, the CPU that will get interrupted by the NIC whose 5876 * IP address the listener is binding to is not even known. So 5877 * the code below allows us to change that binding at the time the 5878 * CPU is interrupted by virtue of incoming connection's squeue. 5879 * 5880 * This is usefull only in case of a listener bound to a specific IP 5881 * address. For other kind of listeners, they get bound the 5882 * very first time and there is no attempt to rebind them. 5883 */ 5884 void 5885 tcp_conn_request_unbound(void *arg, mblk_t *mp, void *arg2) 5886 { 5887 conn_t *connp = (conn_t *)arg; 5888 squeue_t *sqp = (squeue_t *)arg2; 5889 squeue_t *new_sqp; 5890 uint32_t conn_flags; 5891 5892 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 5893 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 5894 } else { 5895 goto done; 5896 } 5897 5898 if (connp->conn_fanout == NULL) 5899 goto done; 5900 5901 if (!(connp->conn_flags & IPCL_FULLY_BOUND)) { 5902 mutex_enter(&connp->conn_fanout->connf_lock); 5903 mutex_enter(&connp->conn_lock); 5904 /* 5905 * No one from read or write side can access us now 5906 * except for already queued packets on this squeue. 5907 * But since we haven't changed the squeue yet, they 5908 * can't execute. If they are processed after we have 5909 * changed the squeue, they are sent back to the 5910 * correct squeue down below. 5911 */ 5912 if (connp->conn_sqp != new_sqp) { 5913 while (connp->conn_sqp != new_sqp) 5914 (void) casptr(&connp->conn_sqp, sqp, new_sqp); 5915 } 5916 5917 do { 5918 conn_flags = connp->conn_flags; 5919 conn_flags |= IPCL_FULLY_BOUND; 5920 (void) cas32(&connp->conn_flags, connp->conn_flags, 5921 conn_flags); 5922 } while (!(connp->conn_flags & IPCL_FULLY_BOUND)); 5923 5924 mutex_exit(&connp->conn_fanout->connf_lock); 5925 mutex_exit(&connp->conn_lock); 5926 } 5927 5928 done: 5929 if (connp->conn_sqp != sqp) { 5930 CONN_INC_REF(connp); 5931 squeue_fill(connp->conn_sqp, mp, 5932 connp->conn_recv, connp, SQTAG_TCP_CONN_REQ_UNBOUND); 5933 } else { 5934 tcp_conn_request(connp, mp, sqp); 5935 } 5936 } 5937 5938 /* 5939 * Successful connect request processing begins when our client passes 5940 * a T_CONN_REQ message into tcp_wput() and ends when tcp_rput() passes 5941 * our T_OK_ACK reply message upstream. The control flow looks like this: 5942 * upstream -> tcp_wput() -> tcp_wput_proto() -> tcp_connect() -> IP 5943 * upstream <- tcp_rput() <- IP 5944 * After various error checks are completed, tcp_connect() lays 5945 * the target address and port into the composite header template, 5946 * preallocates the T_OK_ACK reply message, construct a full 12 byte bind 5947 * request followed by an IRE request, and passes the three mblk message 5948 * down to IP looking like this: 5949 * O_T_BIND_REQ for IP --> IRE req --> T_OK_ACK for our client 5950 * Processing continues in tcp_rput() when we receive the following message: 5951 * T_BIND_ACK from IP --> IRE ack --> T_OK_ACK for our client 5952 * After consuming the first two mblks, tcp_rput() calls tcp_timer(), 5953 * to fire off the connection request, and then passes the T_OK_ACK mblk 5954 * upstream that we filled in below. There are, of course, numerous 5955 * error conditions along the way which truncate the processing described 5956 * above. 5957 */ 5958 static void 5959 tcp_connect(tcp_t *tcp, mblk_t *mp) 5960 { 5961 sin_t *sin; 5962 sin6_t *sin6; 5963 queue_t *q = tcp->tcp_wq; 5964 struct T_conn_req *tcr; 5965 ipaddr_t *dstaddrp; 5966 in_port_t dstport; 5967 uint_t srcid; 5968 5969 tcr = (struct T_conn_req *)mp->b_rptr; 5970 5971 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 5972 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tcr)) { 5973 tcp_err_ack(tcp, mp, TPROTO, 0); 5974 return; 5975 } 5976 5977 /* 5978 * Determine packet type based on type of address passed in 5979 * the request should contain an IPv4 or IPv6 address. 5980 * Make sure that address family matches the type of 5981 * family of the the address passed down 5982 */ 5983 switch (tcr->DEST_length) { 5984 default: 5985 tcp_err_ack(tcp, mp, TBADADDR, 0); 5986 return; 5987 5988 case (sizeof (sin_t) - sizeof (sin->sin_zero)): { 5989 /* 5990 * XXX: The check for valid DEST_length was not there 5991 * in earlier releases and some buggy 5992 * TLI apps (e.g Sybase) got away with not feeding 5993 * in sin_zero part of address. 5994 * We allow that bug to keep those buggy apps humming. 5995 * Test suites require the check on DEST_length. 5996 * We construct a new mblk with valid DEST_length 5997 * free the original so the rest of the code does 5998 * not have to keep track of this special shorter 5999 * length address case. 6000 */ 6001 mblk_t *nmp; 6002 struct T_conn_req *ntcr; 6003 sin_t *nsin; 6004 6005 nmp = allocb(sizeof (struct T_conn_req) + sizeof (sin_t) + 6006 tcr->OPT_length, BPRI_HI); 6007 if (nmp == NULL) { 6008 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 6009 return; 6010 } 6011 ntcr = (struct T_conn_req *)nmp->b_rptr; 6012 bzero(ntcr, sizeof (struct T_conn_req)); /* zero fill */ 6013 ntcr->PRIM_type = T_CONN_REQ; 6014 ntcr->DEST_length = sizeof (sin_t); 6015 ntcr->DEST_offset = sizeof (struct T_conn_req); 6016 6017 nsin = (sin_t *)((uchar_t *)ntcr + ntcr->DEST_offset); 6018 *nsin = sin_null; 6019 /* Get pointer to shorter address to copy from original mp */ 6020 sin = (sin_t *)mi_offset_param(mp, tcr->DEST_offset, 6021 tcr->DEST_length); /* extract DEST_length worth of sin_t */ 6022 if (sin == NULL || !OK_32PTR((char *)sin)) { 6023 freemsg(nmp); 6024 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6025 return; 6026 } 6027 nsin->sin_family = sin->sin_family; 6028 nsin->sin_port = sin->sin_port; 6029 nsin->sin_addr = sin->sin_addr; 6030 /* Note:nsin->sin_zero zero-fill with sin_null assign above */ 6031 nmp->b_wptr = (uchar_t *)&nsin[1]; 6032 if (tcr->OPT_length != 0) { 6033 ntcr->OPT_length = tcr->OPT_length; 6034 ntcr->OPT_offset = nmp->b_wptr - nmp->b_rptr; 6035 bcopy((uchar_t *)tcr + tcr->OPT_offset, 6036 (uchar_t *)ntcr + ntcr->OPT_offset, 6037 tcr->OPT_length); 6038 nmp->b_wptr += tcr->OPT_length; 6039 } 6040 freemsg(mp); /* original mp freed */ 6041 mp = nmp; /* re-initialize original variables */ 6042 tcr = ntcr; 6043 } 6044 /* FALLTHRU */ 6045 6046 case sizeof (sin_t): 6047 sin = (sin_t *)mi_offset_param(mp, tcr->DEST_offset, 6048 sizeof (sin_t)); 6049 if (sin == NULL || !OK_32PTR((char *)sin)) { 6050 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6051 return; 6052 } 6053 if (tcp->tcp_family != AF_INET || 6054 sin->sin_family != AF_INET) { 6055 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6056 return; 6057 } 6058 if (sin->sin_port == 0) { 6059 tcp_err_ack(tcp, mp, TBADADDR, 0); 6060 return; 6061 } 6062 if (tcp->tcp_connp && tcp->tcp_connp->conn_ipv6_v6only) { 6063 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6064 return; 6065 } 6066 6067 break; 6068 6069 case sizeof (sin6_t): 6070 sin6 = (sin6_t *)mi_offset_param(mp, tcr->DEST_offset, 6071 sizeof (sin6_t)); 6072 if (sin6 == NULL || !OK_32PTR((char *)sin6)) { 6073 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6074 return; 6075 } 6076 if (tcp->tcp_family != AF_INET6 || 6077 sin6->sin6_family != AF_INET6) { 6078 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6079 return; 6080 } 6081 if (sin6->sin6_port == 0) { 6082 tcp_err_ack(tcp, mp, TBADADDR, 0); 6083 return; 6084 } 6085 break; 6086 } 6087 /* 6088 * TODO: If someone in TCPS_TIME_WAIT has this dst/port we 6089 * should key on their sequence number and cut them loose. 6090 */ 6091 6092 /* 6093 * If options passed in, feed it for verification and handling 6094 */ 6095 if (tcr->OPT_length != 0) { 6096 mblk_t *ok_mp; 6097 mblk_t *discon_mp; 6098 mblk_t *conn_opts_mp; 6099 int t_error, sys_error, do_disconnect; 6100 6101 conn_opts_mp = NULL; 6102 6103 if (tcp_conprim_opt_process(tcp, mp, 6104 &do_disconnect, &t_error, &sys_error) < 0) { 6105 if (do_disconnect) { 6106 ASSERT(t_error == 0 && sys_error == 0); 6107 discon_mp = mi_tpi_discon_ind(NULL, 6108 ECONNREFUSED, 0); 6109 if (!discon_mp) { 6110 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, 6111 TSYSERR, ENOMEM); 6112 return; 6113 } 6114 ok_mp = mi_tpi_ok_ack_alloc(mp); 6115 if (!ok_mp) { 6116 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6117 TSYSERR, ENOMEM); 6118 return; 6119 } 6120 qreply(q, ok_mp); 6121 qreply(q, discon_mp); /* no flush! */ 6122 } else { 6123 ASSERT(t_error != 0); 6124 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, t_error, 6125 sys_error); 6126 } 6127 return; 6128 } 6129 /* 6130 * Success in setting options, the mp option buffer represented 6131 * by OPT_length/offset has been potentially modified and 6132 * contains results of option processing. We copy it in 6133 * another mp to save it for potentially influencing returning 6134 * it in T_CONN_CONN. 6135 */ 6136 if (tcr->OPT_length != 0) { /* there are resulting options */ 6137 conn_opts_mp = copyb(mp); 6138 if (!conn_opts_mp) { 6139 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, 6140 TSYSERR, ENOMEM); 6141 return; 6142 } 6143 ASSERT(tcp->tcp_conn.tcp_opts_conn_req == NULL); 6144 tcp->tcp_conn.tcp_opts_conn_req = conn_opts_mp; 6145 /* 6146 * Note: 6147 * These resulting option negotiation can include any 6148 * end-to-end negotiation options but there no such 6149 * thing (yet?) in our TCP/IP. 6150 */ 6151 } 6152 } 6153 6154 /* 6155 * If we're connecting to an IPv4-mapped IPv6 address, we need to 6156 * make sure that the template IP header in the tcp structure is an 6157 * IPv4 header, and that the tcp_ipversion is IPV4_VERSION. We 6158 * need to this before we call tcp_bindi() so that the port lookup 6159 * code will look for ports in the correct port space (IPv4 and 6160 * IPv6 have separate port spaces). 6161 */ 6162 if (tcp->tcp_family == AF_INET6 && tcp->tcp_ipversion == IPV6_VERSION && 6163 IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6164 int err = 0; 6165 6166 err = tcp_header_init_ipv4(tcp); 6167 if (err != 0) { 6168 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6169 goto connect_failed; 6170 } 6171 if (tcp->tcp_lport != 0) 6172 *(uint16_t *)tcp->tcp_tcph->th_lport = tcp->tcp_lport; 6173 } 6174 6175 switch (tcp->tcp_state) { 6176 case TCPS_IDLE: 6177 /* 6178 * We support quick connect, refer to comments in 6179 * tcp_connect_*() 6180 */ 6181 /* FALLTHRU */ 6182 case TCPS_BOUND: 6183 case TCPS_LISTEN: 6184 if (tcp->tcp_family == AF_INET6) { 6185 if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6186 tcp_connect_ipv6(tcp, mp, 6187 &sin6->sin6_addr, 6188 sin6->sin6_port, sin6->sin6_flowinfo, 6189 sin6->__sin6_src_id, sin6->sin6_scope_id); 6190 return; 6191 } 6192 /* 6193 * Destination adress is mapped IPv6 address. 6194 * Source bound address should be unspecified or 6195 * IPv6 mapped address as well. 6196 */ 6197 if (!IN6_IS_ADDR_UNSPECIFIED( 6198 &tcp->tcp_bound_source_v6) && 6199 !IN6_IS_ADDR_V4MAPPED(&tcp->tcp_bound_source_v6)) { 6200 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, 6201 EADDRNOTAVAIL); 6202 break; 6203 } 6204 dstaddrp = &V4_PART_OF_V6((sin6->sin6_addr)); 6205 dstport = sin6->sin6_port; 6206 srcid = sin6->__sin6_src_id; 6207 } else { 6208 dstaddrp = &sin->sin_addr.s_addr; 6209 dstport = sin->sin_port; 6210 srcid = 0; 6211 } 6212 6213 tcp_connect_ipv4(tcp, mp, dstaddrp, dstport, srcid); 6214 return; 6215 default: 6216 mp = mi_tpi_err_ack_alloc(mp, TOUTSTATE, 0); 6217 break; 6218 } 6219 /* 6220 * Note: Code below is the "failure" case 6221 */ 6222 /* return error ack and blow away saved option results if any */ 6223 connect_failed: 6224 if (mp != NULL) 6225 putnext(tcp->tcp_rq, mp); 6226 else { 6227 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6228 TSYSERR, ENOMEM); 6229 } 6230 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6231 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6232 } 6233 6234 /* 6235 * Handle connect to IPv4 destinations, including connections for AF_INET6 6236 * sockets connecting to IPv4 mapped IPv6 destinations. 6237 */ 6238 static void 6239 tcp_connect_ipv4(tcp_t *tcp, mblk_t *mp, ipaddr_t *dstaddrp, in_port_t dstport, 6240 uint_t srcid) 6241 { 6242 tcph_t *tcph; 6243 mblk_t *mp1; 6244 ipaddr_t dstaddr = *dstaddrp; 6245 int32_t oldstate; 6246 uint16_t lport; 6247 6248 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 6249 6250 /* Check for attempt to connect to INADDR_ANY */ 6251 if (dstaddr == INADDR_ANY) { 6252 /* 6253 * SunOS 4.x and 4.3 BSD allow an application 6254 * to connect a TCP socket to INADDR_ANY. 6255 * When they do this, the kernel picks the 6256 * address of one interface and uses it 6257 * instead. The kernel usually ends up 6258 * picking the address of the loopback 6259 * interface. This is an undocumented feature. 6260 * However, we provide the same thing here 6261 * in order to have source and binary 6262 * compatibility with SunOS 4.x. 6263 * Update the T_CONN_REQ (sin/sin6) since it is used to 6264 * generate the T_CONN_CON. 6265 */ 6266 dstaddr = htonl(INADDR_LOOPBACK); 6267 *dstaddrp = dstaddr; 6268 } 6269 6270 /* Handle __sin6_src_id if socket not bound to an IP address */ 6271 if (srcid != 0 && tcp->tcp_ipha->ipha_src == INADDR_ANY) { 6272 ip_srcid_find_id(srcid, &tcp->tcp_ip_src_v6, 6273 tcp->tcp_connp->conn_zoneid); 6274 IN6_V4MAPPED_TO_IPADDR(&tcp->tcp_ip_src_v6, 6275 tcp->tcp_ipha->ipha_src); 6276 } 6277 6278 /* 6279 * Don't let an endpoint connect to itself. Note that 6280 * the test here does not catch the case where the 6281 * source IP addr was left unspecified by the user. In 6282 * this case, the source addr is set in tcp_adapt_ire() 6283 * using the reply to the T_BIND message that we send 6284 * down to IP here and the check is repeated in tcp_rput_other. 6285 */ 6286 if (dstaddr == tcp->tcp_ipha->ipha_src && 6287 dstport == tcp->tcp_lport) { 6288 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6289 goto failed; 6290 } 6291 6292 tcp->tcp_ipha->ipha_dst = dstaddr; 6293 IN6_IPADDR_TO_V4MAPPED(dstaddr, &tcp->tcp_remote_v6); 6294 6295 /* 6296 * Massage a source route if any putting the first hop 6297 * in iph_dst. Compute a starting value for the checksum which 6298 * takes into account that the original iph_dst should be 6299 * included in the checksum but that ip will include the 6300 * first hop in the source route in the tcp checksum. 6301 */ 6302 tcp->tcp_sum = ip_massage_options(tcp->tcp_ipha); 6303 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 6304 tcp->tcp_sum -= ((tcp->tcp_ipha->ipha_dst >> 16) + 6305 (tcp->tcp_ipha->ipha_dst & 0xffff)); 6306 if ((int)tcp->tcp_sum < 0) 6307 tcp->tcp_sum--; 6308 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 6309 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 6310 (tcp->tcp_sum >> 16)); 6311 tcph = tcp->tcp_tcph; 6312 *(uint16_t *)tcph->th_fport = dstport; 6313 tcp->tcp_fport = dstport; 6314 6315 oldstate = tcp->tcp_state; 6316 /* 6317 * At this point the remote destination address and remote port fields 6318 * in the tcp-four-tuple have been filled in the tcp structure. Now we 6319 * have to see which state tcp was in so we can take apropriate action. 6320 */ 6321 if (oldstate == TCPS_IDLE) { 6322 /* 6323 * We support a quick connect capability here, allowing 6324 * clients to transition directly from IDLE to SYN_SENT 6325 * tcp_bindi will pick an unused port, insert the connection 6326 * in the bind hash and transition to BOUND state. 6327 */ 6328 lport = tcp_update_next_port(tcp_next_port_to_try, tcp, B_TRUE); 6329 lport = tcp_bindi(tcp, lport, &tcp->tcp_ip_src_v6, 0, B_TRUE, 6330 B_FALSE, B_FALSE); 6331 if (lport == 0) { 6332 mp = mi_tpi_err_ack_alloc(mp, TNOADDR, 0); 6333 goto failed; 6334 } 6335 } 6336 tcp->tcp_state = TCPS_SYN_SENT; 6337 6338 /* 6339 * TODO: allow data with connect requests 6340 * by unlinking M_DATA trailers here and 6341 * linking them in behind the T_OK_ACK mblk. 6342 * The tcp_rput() bind ack handler would then 6343 * feed them to tcp_wput_data() rather than call 6344 * tcp_timer(). 6345 */ 6346 mp = mi_tpi_ok_ack_alloc(mp); 6347 if (!mp) { 6348 tcp->tcp_state = oldstate; 6349 goto failed; 6350 } 6351 if (tcp->tcp_family == AF_INET) { 6352 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 6353 sizeof (ipa_conn_t)); 6354 } else { 6355 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 6356 sizeof (ipa6_conn_t)); 6357 } 6358 if (mp1) { 6359 /* Hang onto the T_OK_ACK for later. */ 6360 linkb(mp1, mp); 6361 mblk_setcred(mp1, tcp->tcp_cred); 6362 if (tcp->tcp_family == AF_INET) 6363 mp1 = ip_bind_v4(tcp->tcp_wq, mp1, tcp->tcp_connp); 6364 else { 6365 mp1 = ip_bind_v6(tcp->tcp_wq, mp1, tcp->tcp_connp, 6366 &tcp->tcp_sticky_ipp); 6367 } 6368 BUMP_MIB(&tcp_mib, tcpActiveOpens); 6369 tcp->tcp_active_open = 1; 6370 /* 6371 * If the bind cannot complete immediately 6372 * IP will arrange to call tcp_rput_other 6373 * when the bind completes. 6374 */ 6375 if (mp1 != NULL) 6376 tcp_rput_other(tcp, mp1); 6377 return; 6378 } 6379 /* Error case */ 6380 tcp->tcp_state = oldstate; 6381 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6382 6383 failed: 6384 /* return error ack and blow away saved option results if any */ 6385 if (mp != NULL) 6386 putnext(tcp->tcp_rq, mp); 6387 else { 6388 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6389 TSYSERR, ENOMEM); 6390 } 6391 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6392 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6393 6394 } 6395 6396 /* 6397 * Handle connect to IPv6 destinations. 6398 */ 6399 static void 6400 tcp_connect_ipv6(tcp_t *tcp, mblk_t *mp, in6_addr_t *dstaddrp, 6401 in_port_t dstport, uint32_t flowinfo, uint_t srcid, uint32_t scope_id) 6402 { 6403 tcph_t *tcph; 6404 mblk_t *mp1; 6405 ip6_rthdr_t *rth; 6406 int32_t oldstate; 6407 uint16_t lport; 6408 6409 ASSERT(tcp->tcp_family == AF_INET6); 6410 6411 /* 6412 * If we're here, it means that the destination address is a native 6413 * IPv6 address. Return an error if tcp_ipversion is not IPv6. A 6414 * reason why it might not be IPv6 is if the socket was bound to an 6415 * IPv4-mapped IPv6 address. 6416 */ 6417 if (tcp->tcp_ipversion != IPV6_VERSION) { 6418 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6419 goto failed; 6420 } 6421 6422 /* 6423 * Interpret a zero destination to mean loopback. 6424 * Update the T_CONN_REQ (sin/sin6) since it is used to 6425 * generate the T_CONN_CON. 6426 */ 6427 if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp)) { 6428 *dstaddrp = ipv6_loopback; 6429 } 6430 6431 /* Handle __sin6_src_id if socket not bound to an IP address */ 6432 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip6h->ip6_src)) { 6433 ip_srcid_find_id(srcid, &tcp->tcp_ip6h->ip6_src, 6434 tcp->tcp_connp->conn_zoneid); 6435 tcp->tcp_ip_src_v6 = tcp->tcp_ip6h->ip6_src; 6436 } 6437 6438 /* 6439 * Take care of the scope_id now and add ip6i_t 6440 * if ip6i_t is not already allocated through TCP 6441 * sticky options. At this point tcp_ip6h does not 6442 * have dst info, thus use dstaddrp. 6443 */ 6444 if (scope_id != 0 && 6445 IN6_IS_ADDR_LINKSCOPE(dstaddrp)) { 6446 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 6447 ip6i_t *ip6i; 6448 6449 ipp->ipp_ifindex = scope_id; 6450 ip6i = (ip6i_t *)tcp->tcp_iphc; 6451 6452 if ((ipp->ipp_fields & IPPF_HAS_IP6I) && 6453 ip6i != NULL && (ip6i->ip6i_nxt == IPPROTO_RAW)) { 6454 /* Already allocated */ 6455 ip6i->ip6i_flags |= IP6I_IFINDEX; 6456 ip6i->ip6i_ifindex = ipp->ipp_ifindex; 6457 ipp->ipp_fields |= IPPF_SCOPE_ID; 6458 } else { 6459 int reterr; 6460 6461 ipp->ipp_fields |= IPPF_SCOPE_ID; 6462 if (ipp->ipp_fields & IPPF_HAS_IP6I) 6463 ip2dbg(("tcp_connect_v6: SCOPE_ID set\n")); 6464 reterr = tcp_build_hdrs(tcp->tcp_rq, tcp); 6465 if (reterr != 0) 6466 goto failed; 6467 ip1dbg(("tcp_connect_ipv6: tcp_bld_hdrs returned\n")); 6468 } 6469 } 6470 6471 /* 6472 * Don't let an endpoint connect to itself. Note that 6473 * the test here does not catch the case where the 6474 * source IP addr was left unspecified by the user. In 6475 * this case, the source addr is set in tcp_adapt_ire() 6476 * using the reply to the T_BIND message that we send 6477 * down to IP here and the check is repeated in tcp_rput_other. 6478 */ 6479 if (IN6_ARE_ADDR_EQUAL(dstaddrp, &tcp->tcp_ip6h->ip6_src) && 6480 (dstport == tcp->tcp_lport)) { 6481 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6482 goto failed; 6483 } 6484 6485 tcp->tcp_ip6h->ip6_dst = *dstaddrp; 6486 tcp->tcp_remote_v6 = *dstaddrp; 6487 tcp->tcp_ip6h->ip6_vcf = 6488 (IPV6_DEFAULT_VERS_AND_FLOW & IPV6_VERS_AND_FLOW_MASK) | 6489 (flowinfo & ~IPV6_VERS_AND_FLOW_MASK); 6490 6491 6492 /* 6493 * Massage a routing header (if present) putting the first hop 6494 * in ip6_dst. Compute a starting value for the checksum which 6495 * takes into account that the original ip6_dst should be 6496 * included in the checksum but that ip will include the 6497 * first hop in the source route in the tcp checksum. 6498 */ 6499 rth = ip_find_rthdr_v6(tcp->tcp_ip6h, (uint8_t *)tcp->tcp_tcph); 6500 if (rth != NULL) { 6501 6502 tcp->tcp_sum = ip_massage_options_v6(tcp->tcp_ip6h, rth); 6503 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 6504 (tcp->tcp_sum >> 16)); 6505 } else { 6506 tcp->tcp_sum = 0; 6507 } 6508 6509 tcph = tcp->tcp_tcph; 6510 *(uint16_t *)tcph->th_fport = dstport; 6511 tcp->tcp_fport = dstport; 6512 6513 oldstate = tcp->tcp_state; 6514 /* 6515 * At this point the remote destination address and remote port fields 6516 * in the tcp-four-tuple have been filled in the tcp structure. Now we 6517 * have to see which state tcp was in so we can take apropriate action. 6518 */ 6519 if (oldstate == TCPS_IDLE) { 6520 /* 6521 * We support a quick connect capability here, allowing 6522 * clients to transition directly from IDLE to SYN_SENT 6523 * tcp_bindi will pick an unused port, insert the connection 6524 * in the bind hash and transition to BOUND state. 6525 */ 6526 lport = tcp_update_next_port(tcp_next_port_to_try, tcp, B_TRUE); 6527 lport = tcp_bindi(tcp, lport, &tcp->tcp_ip_src_v6, 0, B_TRUE, 6528 B_FALSE, B_FALSE); 6529 if (lport == 0) { 6530 mp = mi_tpi_err_ack_alloc(mp, TNOADDR, 0); 6531 goto failed; 6532 } 6533 } 6534 tcp->tcp_state = TCPS_SYN_SENT; 6535 /* 6536 * TODO: allow data with connect requests 6537 * by unlinking M_DATA trailers here and 6538 * linking them in behind the T_OK_ACK mblk. 6539 * The tcp_rput() bind ack handler would then 6540 * feed them to tcp_wput_data() rather than call 6541 * tcp_timer(). 6542 */ 6543 mp = mi_tpi_ok_ack_alloc(mp); 6544 if (!mp) { 6545 tcp->tcp_state = oldstate; 6546 goto failed; 6547 } 6548 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, sizeof (ipa6_conn_t)); 6549 if (mp1) { 6550 /* Hang onto the T_OK_ACK for later. */ 6551 linkb(mp1, mp); 6552 mblk_setcred(mp1, tcp->tcp_cred); 6553 mp1 = ip_bind_v6(tcp->tcp_wq, mp1, tcp->tcp_connp, 6554 &tcp->tcp_sticky_ipp); 6555 BUMP_MIB(&tcp_mib, tcpActiveOpens); 6556 tcp->tcp_active_open = 1; 6557 /* ip_bind_v6() may return ACK or ERROR */ 6558 if (mp1 != NULL) 6559 tcp_rput_other(tcp, mp1); 6560 return; 6561 } 6562 /* Error case */ 6563 tcp->tcp_state = oldstate; 6564 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6565 6566 failed: 6567 /* return error ack and blow away saved option results if any */ 6568 if (mp != NULL) 6569 putnext(tcp->tcp_rq, mp); 6570 else { 6571 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6572 TSYSERR, ENOMEM); 6573 } 6574 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6575 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6576 } 6577 6578 /* 6579 * We need a stream q for detached closing tcp connections 6580 * to use. Our client hereby indicates that this q is the 6581 * one to use. 6582 */ 6583 static void 6584 tcp_def_q_set(tcp_t *tcp, mblk_t *mp) 6585 { 6586 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 6587 queue_t *q = tcp->tcp_wq; 6588 6589 mp->b_datap->db_type = M_IOCACK; 6590 iocp->ioc_count = 0; 6591 mutex_enter(&tcp_g_q_lock); 6592 if (tcp_g_q != NULL) { 6593 mutex_exit(&tcp_g_q_lock); 6594 iocp->ioc_error = EALREADY; 6595 } else { 6596 mblk_t *mp1; 6597 6598 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 0); 6599 if (mp1 == NULL) { 6600 mutex_exit(&tcp_g_q_lock); 6601 iocp->ioc_error = ENOMEM; 6602 } else { 6603 tcp_g_q = tcp->tcp_rq; 6604 mutex_exit(&tcp_g_q_lock); 6605 iocp->ioc_error = 0; 6606 iocp->ioc_rval = 0; 6607 /* 6608 * We are passing tcp_sticky_ipp as NULL 6609 * as it is not useful for tcp_default queue 6610 */ 6611 mp1 = ip_bind_v6(q, mp1, tcp->tcp_connp, NULL); 6612 if (mp1 != NULL) 6613 tcp_rput_other(tcp, mp1); 6614 } 6615 } 6616 qreply(q, mp); 6617 } 6618 6619 /* 6620 * Our client hereby directs us to reject the connection request 6621 * that tcp_conn_request() marked with 'seqnum'. Rejection consists 6622 * of sending the appropriate RST, not an ICMP error. 6623 */ 6624 static void 6625 tcp_disconnect(tcp_t *tcp, mblk_t *mp) 6626 { 6627 tcp_t *ltcp = NULL; 6628 t_scalar_t seqnum; 6629 conn_t *connp; 6630 6631 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 6632 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_discon_req)) { 6633 tcp_err_ack(tcp, mp, TPROTO, 0); 6634 return; 6635 } 6636 6637 /* 6638 * Right now, upper modules pass down a T_DISCON_REQ to TCP, 6639 * when the stream is in BOUND state. Do not send a reset, 6640 * since the destination IP address is not valid, and it can 6641 * be the initialized value of all zeros (broadcast address). 6642 * 6643 * If TCP has sent down a bind request to IP and has not 6644 * received the reply, reject the request. Otherwise, TCP 6645 * will be confused. 6646 */ 6647 if (tcp->tcp_state <= TCPS_BOUND || tcp->tcp_hard_binding) { 6648 if (tcp->tcp_debug) { 6649 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 6650 "tcp_disconnect: bad state, %d", tcp->tcp_state); 6651 } 6652 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 6653 return; 6654 } 6655 6656 seqnum = ((struct T_discon_req *)mp->b_rptr)->SEQ_number; 6657 6658 if (seqnum == -1 || tcp->tcp_conn_req_max == 0) { 6659 6660 /* 6661 * According to TPI, for non-listeners, ignore seqnum 6662 * and disconnect. 6663 * Following interpretation of -1 seqnum is historical 6664 * and implied TPI ? (TPI only states that for T_CONN_IND, 6665 * a valid seqnum should not be -1). 6666 * 6667 * -1 means disconnect everything 6668 * regardless even on a listener. 6669 */ 6670 6671 int old_state = tcp->tcp_state; 6672 6673 /* 6674 * The connection can't be on the tcp_time_wait_head list 6675 * since it is not detached. 6676 */ 6677 ASSERT(tcp->tcp_time_wait_next == NULL); 6678 ASSERT(tcp->tcp_time_wait_prev == NULL); 6679 ASSERT(tcp->tcp_time_wait_expire == 0); 6680 ltcp = NULL; 6681 /* 6682 * If it used to be a listener, check to make sure no one else 6683 * has taken the port before switching back to LISTEN state. 6684 */ 6685 if (tcp->tcp_ipversion == IPV4_VERSION) { 6686 connp = ipcl_lookup_listener_v4(tcp->tcp_lport, 6687 tcp->tcp_ipha->ipha_src, 6688 tcp->tcp_connp->conn_zoneid); 6689 if (connp != NULL) 6690 ltcp = connp->conn_tcp; 6691 } else { 6692 /* Allow tcp_bound_if listeners? */ 6693 connp = ipcl_lookup_listener_v6(tcp->tcp_lport, 6694 &tcp->tcp_ip6h->ip6_src, 0, 6695 tcp->tcp_connp->conn_zoneid); 6696 if (connp != NULL) 6697 ltcp = connp->conn_tcp; 6698 } 6699 if (tcp->tcp_conn_req_max && ltcp == NULL) { 6700 tcp->tcp_state = TCPS_LISTEN; 6701 } else if (old_state > TCPS_BOUND) { 6702 tcp->tcp_conn_req_max = 0; 6703 tcp->tcp_state = TCPS_BOUND; 6704 } 6705 if (ltcp != NULL) 6706 CONN_DEC_REF(ltcp->tcp_connp); 6707 if (old_state == TCPS_SYN_SENT || old_state == TCPS_SYN_RCVD) { 6708 BUMP_MIB(&tcp_mib, tcpAttemptFails); 6709 } else if (old_state == TCPS_ESTABLISHED || 6710 old_state == TCPS_CLOSE_WAIT) { 6711 BUMP_MIB(&tcp_mib, tcpEstabResets); 6712 } 6713 6714 if (tcp->tcp_fused) 6715 tcp_unfuse(tcp); 6716 6717 mutex_enter(&tcp->tcp_eager_lock); 6718 if ((tcp->tcp_conn_req_cnt_q0 != 0) || 6719 (tcp->tcp_conn_req_cnt_q != 0)) { 6720 tcp_eager_cleanup(tcp, 0); 6721 } 6722 mutex_exit(&tcp->tcp_eager_lock); 6723 6724 tcp_xmit_ctl("tcp_disconnect", tcp, tcp->tcp_snxt, 6725 tcp->tcp_rnxt, TH_RST | TH_ACK); 6726 6727 tcp_reinit(tcp); 6728 6729 if (old_state >= TCPS_ESTABLISHED) { 6730 /* Send M_FLUSH according to TPI */ 6731 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 6732 } 6733 mp = mi_tpi_ok_ack_alloc(mp); 6734 if (mp) 6735 putnext(tcp->tcp_rq, mp); 6736 return; 6737 } else if (!tcp_eager_blowoff(tcp, seqnum)) { 6738 tcp_err_ack(tcp, mp, TBADSEQ, 0); 6739 return; 6740 } 6741 if (tcp->tcp_state >= TCPS_ESTABLISHED) { 6742 /* Send M_FLUSH according to TPI */ 6743 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 6744 } 6745 mp = mi_tpi_ok_ack_alloc(mp); 6746 if (mp) 6747 putnext(tcp->tcp_rq, mp); 6748 } 6749 6750 /* 6751 * Diagnostic routine used to return a string associated with the tcp state. 6752 * Note that if the caller does not supply a buffer, it will use an internal 6753 * static string. This means that if multiple threads call this function at 6754 * the same time, output can be corrupted... Note also that this function 6755 * does not check the size of the supplied buffer. The caller has to make 6756 * sure that it is big enough. 6757 */ 6758 static char * 6759 tcp_display(tcp_t *tcp, char *sup_buf, char format) 6760 { 6761 char buf1[30]; 6762 static char priv_buf[INET6_ADDRSTRLEN * 2 + 80]; 6763 char *buf; 6764 char *cp; 6765 in6_addr_t local, remote; 6766 char local_addrbuf[INET6_ADDRSTRLEN]; 6767 char remote_addrbuf[INET6_ADDRSTRLEN]; 6768 6769 if (sup_buf != NULL) 6770 buf = sup_buf; 6771 else 6772 buf = priv_buf; 6773 6774 if (tcp == NULL) 6775 return ("NULL_TCP"); 6776 switch (tcp->tcp_state) { 6777 case TCPS_CLOSED: 6778 cp = "TCP_CLOSED"; 6779 break; 6780 case TCPS_IDLE: 6781 cp = "TCP_IDLE"; 6782 break; 6783 case TCPS_BOUND: 6784 cp = "TCP_BOUND"; 6785 break; 6786 case TCPS_LISTEN: 6787 cp = "TCP_LISTEN"; 6788 break; 6789 case TCPS_SYN_SENT: 6790 cp = "TCP_SYN_SENT"; 6791 break; 6792 case TCPS_SYN_RCVD: 6793 cp = "TCP_SYN_RCVD"; 6794 break; 6795 case TCPS_ESTABLISHED: 6796 cp = "TCP_ESTABLISHED"; 6797 break; 6798 case TCPS_CLOSE_WAIT: 6799 cp = "TCP_CLOSE_WAIT"; 6800 break; 6801 case TCPS_FIN_WAIT_1: 6802 cp = "TCP_FIN_WAIT_1"; 6803 break; 6804 case TCPS_CLOSING: 6805 cp = "TCP_CLOSING"; 6806 break; 6807 case TCPS_LAST_ACK: 6808 cp = "TCP_LAST_ACK"; 6809 break; 6810 case TCPS_FIN_WAIT_2: 6811 cp = "TCP_FIN_WAIT_2"; 6812 break; 6813 case TCPS_TIME_WAIT: 6814 cp = "TCP_TIME_WAIT"; 6815 break; 6816 default: 6817 (void) mi_sprintf(buf1, "TCPUnkState(%d)", tcp->tcp_state); 6818 cp = buf1; 6819 break; 6820 } 6821 switch (format) { 6822 case DISP_ADDR_AND_PORT: 6823 if (tcp->tcp_ipversion == IPV4_VERSION) { 6824 /* 6825 * Note that we use the remote address in the tcp_b 6826 * structure. This means that it will print out 6827 * the real destination address, not the next hop's 6828 * address if source routing is used. 6829 */ 6830 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ip_src, &local); 6831 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_remote, &remote); 6832 6833 } else { 6834 local = tcp->tcp_ip_src_v6; 6835 remote = tcp->tcp_remote_v6; 6836 } 6837 (void) inet_ntop(AF_INET6, &local, local_addrbuf, 6838 sizeof (local_addrbuf)); 6839 (void) inet_ntop(AF_INET6, &remote, remote_addrbuf, 6840 sizeof (remote_addrbuf)); 6841 (void) mi_sprintf(buf, "[%s.%u, %s.%u] %s", 6842 local_addrbuf, ntohs(tcp->tcp_lport), remote_addrbuf, 6843 ntohs(tcp->tcp_fport), cp); 6844 break; 6845 case DISP_PORT_ONLY: 6846 default: 6847 (void) mi_sprintf(buf, "[%u, %u] %s", 6848 ntohs(tcp->tcp_lport), ntohs(tcp->tcp_fport), cp); 6849 break; 6850 } 6851 6852 return (buf); 6853 } 6854 6855 /* 6856 * Called via squeue to get on to eager's perimeter to send a 6857 * TH_RST. The listener wants the eager to disappear either 6858 * by means of tcp_eager_blowoff() or tcp_eager_cleanup() 6859 * being called. 6860 */ 6861 /* ARGSUSED */ 6862 void 6863 tcp_eager_kill(void *arg, mblk_t *mp, void *arg2) 6864 { 6865 conn_t *econnp = (conn_t *)arg; 6866 tcp_t *eager = econnp->conn_tcp; 6867 tcp_t *listener = eager->tcp_listener; 6868 6869 /* 6870 * We could be called because listener is closing. Since 6871 * the eager is using listener's queue's, its not safe. 6872 * Better use the default queue just to send the TH_RST 6873 * out. 6874 */ 6875 eager->tcp_rq = tcp_g_q; 6876 eager->tcp_wq = WR(tcp_g_q); 6877 6878 if (eager->tcp_state > TCPS_LISTEN) { 6879 tcp_xmit_ctl("tcp_eager_kill, can't wait", 6880 eager, eager->tcp_snxt, 0, TH_RST); 6881 } 6882 6883 /* We are here because listener wants this eager gone */ 6884 if (listener != NULL) { 6885 mutex_enter(&listener->tcp_eager_lock); 6886 tcp_eager_unlink(eager); 6887 if (eager->tcp_conn.tcp_eager_conn_ind == NULL) { 6888 /* 6889 * The eager has sent a conn_ind up to the 6890 * listener but listener decides to close 6891 * instead. We need to drop the extra ref 6892 * placed on eager in tcp_rput_data() before 6893 * sending the conn_ind to listener. 6894 */ 6895 CONN_DEC_REF(econnp); 6896 } 6897 mutex_exit(&listener->tcp_eager_lock); 6898 CONN_DEC_REF(listener->tcp_connp); 6899 } 6900 6901 if (eager->tcp_state > TCPS_BOUND) 6902 tcp_close_detached(eager); 6903 } 6904 6905 /* 6906 * Reset any eager connection hanging off this listener marked 6907 * with 'seqnum' and then reclaim it's resources. 6908 */ 6909 static boolean_t 6910 tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum) 6911 { 6912 tcp_t *eager; 6913 mblk_t *mp; 6914 6915 TCP_STAT(tcp_eager_blowoff_calls); 6916 eager = listener; 6917 mutex_enter(&listener->tcp_eager_lock); 6918 do { 6919 eager = eager->tcp_eager_next_q; 6920 if (eager == NULL) { 6921 mutex_exit(&listener->tcp_eager_lock); 6922 return (B_FALSE); 6923 } 6924 } while (eager->tcp_conn_req_seqnum != seqnum); 6925 CONN_INC_REF(eager->tcp_connp); 6926 mutex_exit(&listener->tcp_eager_lock); 6927 mp = &eager->tcp_closemp; 6928 squeue_fill(eager->tcp_connp->conn_sqp, mp, tcp_eager_kill, 6929 eager->tcp_connp, SQTAG_TCP_EAGER_BLOWOFF); 6930 return (B_TRUE); 6931 } 6932 6933 /* 6934 * Reset any eager connection hanging off this listener 6935 * and then reclaim it's resources. 6936 */ 6937 static void 6938 tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only) 6939 { 6940 tcp_t *eager; 6941 mblk_t *mp; 6942 6943 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 6944 6945 if (!q0_only) { 6946 /* First cleanup q */ 6947 TCP_STAT(tcp_eager_blowoff_q); 6948 eager = listener->tcp_eager_next_q; 6949 while (eager != NULL) { 6950 CONN_INC_REF(eager->tcp_connp); 6951 mp = &eager->tcp_closemp; 6952 squeue_fill(eager->tcp_connp->conn_sqp, mp, 6953 tcp_eager_kill, eager->tcp_connp, 6954 SQTAG_TCP_EAGER_CLEANUP); 6955 eager = eager->tcp_eager_next_q; 6956 } 6957 } 6958 /* Then cleanup q0 */ 6959 TCP_STAT(tcp_eager_blowoff_q0); 6960 eager = listener->tcp_eager_next_q0; 6961 while (eager != listener) { 6962 CONN_INC_REF(eager->tcp_connp); 6963 mp = &eager->tcp_closemp; 6964 squeue_fill(eager->tcp_connp->conn_sqp, mp, 6965 tcp_eager_kill, eager->tcp_connp, 6966 SQTAG_TCP_EAGER_CLEANUP_Q0); 6967 eager = eager->tcp_eager_next_q0; 6968 } 6969 } 6970 6971 /* 6972 * If we are an eager connection hanging off a listener that hasn't 6973 * formally accepted the connection yet, get off his list and blow off 6974 * any data that we have accumulated. 6975 */ 6976 static void 6977 tcp_eager_unlink(tcp_t *tcp) 6978 { 6979 tcp_t *listener = tcp->tcp_listener; 6980 6981 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 6982 ASSERT(listener != NULL); 6983 if (tcp->tcp_eager_next_q0 != NULL) { 6984 ASSERT(tcp->tcp_eager_prev_q0 != NULL); 6985 6986 /* Remove the eager tcp from q0 */ 6987 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 6988 tcp->tcp_eager_prev_q0; 6989 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 6990 tcp->tcp_eager_next_q0; 6991 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 6992 listener->tcp_conn_req_cnt_q0--; 6993 6994 tcp->tcp_eager_next_q0 = NULL; 6995 tcp->tcp_eager_prev_q0 = NULL; 6996 6997 if (tcp->tcp_syn_rcvd_timeout != 0) { 6998 /* we have timed out before */ 6999 ASSERT(listener->tcp_syn_rcvd_timeout > 0); 7000 listener->tcp_syn_rcvd_timeout--; 7001 } 7002 } else { 7003 tcp_t **tcpp = &listener->tcp_eager_next_q; 7004 tcp_t *prev = NULL; 7005 7006 for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) { 7007 if (tcpp[0] == tcp) { 7008 if (listener->tcp_eager_last_q == tcp) { 7009 /* 7010 * If we are unlinking the last 7011 * element on the list, adjust 7012 * tail pointer. Set tail pointer 7013 * to nil when list is empty. 7014 */ 7015 ASSERT(tcp->tcp_eager_next_q == NULL); 7016 if (listener->tcp_eager_last_q == 7017 listener->tcp_eager_next_q) { 7018 listener->tcp_eager_last_q = 7019 NULL; 7020 } else { 7021 /* 7022 * We won't get here if there 7023 * is only one eager in the 7024 * list. 7025 */ 7026 ASSERT(prev != NULL); 7027 listener->tcp_eager_last_q = 7028 prev; 7029 } 7030 } 7031 tcpp[0] = tcp->tcp_eager_next_q; 7032 tcp->tcp_eager_next_q = NULL; 7033 tcp->tcp_eager_last_q = NULL; 7034 ASSERT(listener->tcp_conn_req_cnt_q > 0); 7035 listener->tcp_conn_req_cnt_q--; 7036 break; 7037 } 7038 prev = tcpp[0]; 7039 } 7040 } 7041 tcp->tcp_listener = NULL; 7042 } 7043 7044 /* Shorthand to generate and send TPI error acks to our client */ 7045 static void 7046 tcp_err_ack(tcp_t *tcp, mblk_t *mp, int t_error, int sys_error) 7047 { 7048 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL) 7049 putnext(tcp->tcp_rq, mp); 7050 } 7051 7052 /* Shorthand to generate and send TPI error acks to our client */ 7053 static void 7054 tcp_err_ack_prim(tcp_t *tcp, mblk_t *mp, int primitive, 7055 int t_error, int sys_error) 7056 { 7057 struct T_error_ack *teackp; 7058 7059 if ((mp = tpi_ack_alloc(mp, sizeof (struct T_error_ack), 7060 M_PCPROTO, T_ERROR_ACK)) != NULL) { 7061 teackp = (struct T_error_ack *)mp->b_rptr; 7062 teackp->ERROR_prim = primitive; 7063 teackp->TLI_error = t_error; 7064 teackp->UNIX_error = sys_error; 7065 putnext(tcp->tcp_rq, mp); 7066 } 7067 } 7068 7069 /* 7070 * Note: No locks are held when inspecting tcp_g_*epriv_ports 7071 * but instead the code relies on: 7072 * - the fact that the address of the array and its size never changes 7073 * - the atomic assignment of the elements of the array 7074 */ 7075 /* ARGSUSED */ 7076 static int 7077 tcp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 7078 { 7079 int i; 7080 7081 for (i = 0; i < tcp_g_num_epriv_ports; i++) { 7082 if (tcp_g_epriv_ports[i] != 0) 7083 (void) mi_mpprintf(mp, "%d ", tcp_g_epriv_ports[i]); 7084 } 7085 return (0); 7086 } 7087 7088 /* 7089 * Hold a lock while changing tcp_g_epriv_ports to prevent multiple 7090 * threads from changing it at the same time. 7091 */ 7092 /* ARGSUSED */ 7093 static int 7094 tcp_extra_priv_ports_add(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 7095 cred_t *cr) 7096 { 7097 long new_value; 7098 int i; 7099 7100 /* 7101 * Fail the request if the new value does not lie within the 7102 * port number limits. 7103 */ 7104 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 7105 new_value <= 0 || new_value >= 65536) { 7106 return (EINVAL); 7107 } 7108 7109 mutex_enter(&tcp_epriv_port_lock); 7110 /* Check if the value is already in the list */ 7111 for (i = 0; i < tcp_g_num_epriv_ports; i++) { 7112 if (new_value == tcp_g_epriv_ports[i]) { 7113 mutex_exit(&tcp_epriv_port_lock); 7114 return (EEXIST); 7115 } 7116 } 7117 /* Find an empty slot */ 7118 for (i = 0; i < tcp_g_num_epriv_ports; i++) { 7119 if (tcp_g_epriv_ports[i] == 0) 7120 break; 7121 } 7122 if (i == tcp_g_num_epriv_ports) { 7123 mutex_exit(&tcp_epriv_port_lock); 7124 return (EOVERFLOW); 7125 } 7126 /* Set the new value */ 7127 tcp_g_epriv_ports[i] = (uint16_t)new_value; 7128 mutex_exit(&tcp_epriv_port_lock); 7129 return (0); 7130 } 7131 7132 /* 7133 * Hold a lock while changing tcp_g_epriv_ports to prevent multiple 7134 * threads from changing it at the same time. 7135 */ 7136 /* ARGSUSED */ 7137 static int 7138 tcp_extra_priv_ports_del(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 7139 cred_t *cr) 7140 { 7141 long new_value; 7142 int i; 7143 7144 /* 7145 * Fail the request if the new value does not lie within the 7146 * port number limits. 7147 */ 7148 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || new_value <= 0 || 7149 new_value >= 65536) { 7150 return (EINVAL); 7151 } 7152 7153 mutex_enter(&tcp_epriv_port_lock); 7154 /* Check that the value is already in the list */ 7155 for (i = 0; i < tcp_g_num_epriv_ports; i++) { 7156 if (tcp_g_epriv_ports[i] == new_value) 7157 break; 7158 } 7159 if (i == tcp_g_num_epriv_ports) { 7160 mutex_exit(&tcp_epriv_port_lock); 7161 return (ESRCH); 7162 } 7163 /* Clear the value */ 7164 tcp_g_epriv_ports[i] = 0; 7165 mutex_exit(&tcp_epriv_port_lock); 7166 return (0); 7167 } 7168 7169 /* Return the TPI/TLI equivalent of our current tcp_state */ 7170 static int 7171 tcp_tpistate(tcp_t *tcp) 7172 { 7173 switch (tcp->tcp_state) { 7174 case TCPS_IDLE: 7175 return (TS_UNBND); 7176 case TCPS_LISTEN: 7177 /* 7178 * Return whether there are outstanding T_CONN_IND waiting 7179 * for the matching T_CONN_RES. Therefore don't count q0. 7180 */ 7181 if (tcp->tcp_conn_req_cnt_q > 0) 7182 return (TS_WRES_CIND); 7183 else 7184 return (TS_IDLE); 7185 case TCPS_BOUND: 7186 return (TS_IDLE); 7187 case TCPS_SYN_SENT: 7188 return (TS_WCON_CREQ); 7189 case TCPS_SYN_RCVD: 7190 /* 7191 * Note: assumption: this has to the active open SYN_RCVD. 7192 * The passive instance is detached in SYN_RCVD stage of 7193 * incoming connection processing so we cannot get request 7194 * for T_info_ack on it. 7195 */ 7196 return (TS_WACK_CRES); 7197 case TCPS_ESTABLISHED: 7198 return (TS_DATA_XFER); 7199 case TCPS_CLOSE_WAIT: 7200 return (TS_WREQ_ORDREL); 7201 case TCPS_FIN_WAIT_1: 7202 return (TS_WIND_ORDREL); 7203 case TCPS_FIN_WAIT_2: 7204 return (TS_WIND_ORDREL); 7205 7206 case TCPS_CLOSING: 7207 case TCPS_LAST_ACK: 7208 case TCPS_TIME_WAIT: 7209 case TCPS_CLOSED: 7210 /* 7211 * Following TS_WACK_DREQ7 is a rendition of "not 7212 * yet TS_IDLE" TPI state. There is no best match to any 7213 * TPI state for TCPS_{CLOSING, LAST_ACK, TIME_WAIT} but we 7214 * choose a value chosen that will map to TLI/XTI level 7215 * state of TSTATECHNG (state is process of changing) which 7216 * captures what this dummy state represents. 7217 */ 7218 return (TS_WACK_DREQ7); 7219 default: 7220 cmn_err(CE_WARN, "tcp_tpistate: strange state (%d) %s", 7221 tcp->tcp_state, tcp_display(tcp, NULL, 7222 DISP_PORT_ONLY)); 7223 return (TS_UNBND); 7224 } 7225 } 7226 7227 static void 7228 tcp_copy_info(struct T_info_ack *tia, tcp_t *tcp) 7229 { 7230 if (tcp->tcp_family == AF_INET6) 7231 *tia = tcp_g_t_info_ack_v6; 7232 else 7233 *tia = tcp_g_t_info_ack; 7234 tia->CURRENT_state = tcp_tpistate(tcp); 7235 tia->OPT_size = tcp_max_optsize; 7236 if (tcp->tcp_mss == 0) { 7237 /* Not yet set - tcp_open does not set mss */ 7238 if (tcp->tcp_ipversion == IPV4_VERSION) 7239 tia->TIDU_size = tcp_mss_def_ipv4; 7240 else 7241 tia->TIDU_size = tcp_mss_def_ipv6; 7242 } else { 7243 tia->TIDU_size = tcp->tcp_mss; 7244 } 7245 /* TODO: Default ETSDU is 1. Is that correct for tcp? */ 7246 } 7247 7248 /* 7249 * This routine responds to T_CAPABILITY_REQ messages. It is called by 7250 * tcp_wput. Much of the T_CAPABILITY_ACK information is copied from 7251 * tcp_g_t_info_ack. The current state of the stream is copied from 7252 * tcp_state. 7253 */ 7254 static void 7255 tcp_capability_req(tcp_t *tcp, mblk_t *mp) 7256 { 7257 t_uscalar_t cap_bits1; 7258 struct T_capability_ack *tcap; 7259 7260 if (MBLKL(mp) < sizeof (struct T_capability_req)) { 7261 freemsg(mp); 7262 return; 7263 } 7264 7265 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 7266 7267 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 7268 mp->b_datap->db_type, T_CAPABILITY_ACK); 7269 if (mp == NULL) 7270 return; 7271 7272 tcap = (struct T_capability_ack *)mp->b_rptr; 7273 tcap->CAP_bits1 = 0; 7274 7275 if (cap_bits1 & TC1_INFO) { 7276 tcp_copy_info(&tcap->INFO_ack, tcp); 7277 tcap->CAP_bits1 |= TC1_INFO; 7278 } 7279 7280 if (cap_bits1 & TC1_ACCEPTOR_ID) { 7281 tcap->ACCEPTOR_id = tcp->tcp_acceptor_id; 7282 tcap->CAP_bits1 |= TC1_ACCEPTOR_ID; 7283 } 7284 7285 putnext(tcp->tcp_rq, mp); 7286 } 7287 7288 /* 7289 * This routine responds to T_INFO_REQ messages. It is called by tcp_wput. 7290 * Most of the T_INFO_ACK information is copied from tcp_g_t_info_ack. 7291 * The current state of the stream is copied from tcp_state. 7292 */ 7293 static void 7294 tcp_info_req(tcp_t *tcp, mblk_t *mp) 7295 { 7296 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO, 7297 T_INFO_ACK); 7298 if (!mp) { 7299 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 7300 return; 7301 } 7302 tcp_copy_info((struct T_info_ack *)mp->b_rptr, tcp); 7303 putnext(tcp->tcp_rq, mp); 7304 } 7305 7306 /* Respond to the TPI addr request */ 7307 static void 7308 tcp_addr_req(tcp_t *tcp, mblk_t *mp) 7309 { 7310 sin_t *sin; 7311 mblk_t *ackmp; 7312 struct T_addr_ack *taa; 7313 7314 /* Make it large enough for worst case */ 7315 ackmp = reallocb(mp, sizeof (struct T_addr_ack) + 7316 2 * sizeof (sin6_t), 1); 7317 if (ackmp == NULL) { 7318 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 7319 return; 7320 } 7321 7322 if (tcp->tcp_ipversion == IPV6_VERSION) { 7323 tcp_addr_req_ipv6(tcp, ackmp); 7324 return; 7325 } 7326 taa = (struct T_addr_ack *)ackmp->b_rptr; 7327 7328 bzero(taa, sizeof (struct T_addr_ack)); 7329 ackmp->b_wptr = (uchar_t *)&taa[1]; 7330 7331 taa->PRIM_type = T_ADDR_ACK; 7332 ackmp->b_datap->db_type = M_PCPROTO; 7333 7334 /* 7335 * Note: Following code assumes 32 bit alignment of basic 7336 * data structures like sin_t and struct T_addr_ack. 7337 */ 7338 if (tcp->tcp_state >= TCPS_BOUND) { 7339 /* 7340 * Fill in local address 7341 */ 7342 taa->LOCADDR_length = sizeof (sin_t); 7343 taa->LOCADDR_offset = sizeof (*taa); 7344 7345 sin = (sin_t *)&taa[1]; 7346 7347 /* Fill zeroes and then intialize non-zero fields */ 7348 *sin = sin_null; 7349 7350 sin->sin_family = AF_INET; 7351 7352 sin->sin_addr.s_addr = tcp->tcp_ipha->ipha_src; 7353 sin->sin_port = *(uint16_t *)tcp->tcp_tcph->th_lport; 7354 7355 ackmp->b_wptr = (uchar_t *)&sin[1]; 7356 7357 if (tcp->tcp_state >= TCPS_SYN_RCVD) { 7358 /* 7359 * Fill in Remote address 7360 */ 7361 taa->REMADDR_length = sizeof (sin_t); 7362 taa->REMADDR_offset = ROUNDUP32(taa->LOCADDR_offset + 7363 taa->LOCADDR_length); 7364 7365 sin = (sin_t *)(ackmp->b_rptr + taa->REMADDR_offset); 7366 *sin = sin_null; 7367 sin->sin_family = AF_INET; 7368 sin->sin_addr.s_addr = tcp->tcp_remote; 7369 sin->sin_port = tcp->tcp_fport; 7370 7371 ackmp->b_wptr = (uchar_t *)&sin[1]; 7372 } 7373 } 7374 putnext(tcp->tcp_rq, ackmp); 7375 } 7376 7377 /* Assumes that tcp_addr_req gets enough space and alignment */ 7378 static void 7379 tcp_addr_req_ipv6(tcp_t *tcp, mblk_t *ackmp) 7380 { 7381 sin6_t *sin6; 7382 struct T_addr_ack *taa; 7383 7384 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 7385 ASSERT(OK_32PTR(ackmp->b_rptr)); 7386 ASSERT(ackmp->b_wptr - ackmp->b_rptr >= sizeof (struct T_addr_ack) + 7387 2 * sizeof (sin6_t)); 7388 7389 taa = (struct T_addr_ack *)ackmp->b_rptr; 7390 7391 bzero(taa, sizeof (struct T_addr_ack)); 7392 ackmp->b_wptr = (uchar_t *)&taa[1]; 7393 7394 taa->PRIM_type = T_ADDR_ACK; 7395 ackmp->b_datap->db_type = M_PCPROTO; 7396 7397 /* 7398 * Note: Following code assumes 32 bit alignment of basic 7399 * data structures like sin6_t and struct T_addr_ack. 7400 */ 7401 if (tcp->tcp_state >= TCPS_BOUND) { 7402 /* 7403 * Fill in local address 7404 */ 7405 taa->LOCADDR_length = sizeof (sin6_t); 7406 taa->LOCADDR_offset = sizeof (*taa); 7407 7408 sin6 = (sin6_t *)&taa[1]; 7409 *sin6 = sin6_null; 7410 7411 sin6->sin6_family = AF_INET6; 7412 sin6->sin6_addr = tcp->tcp_ip6h->ip6_src; 7413 sin6->sin6_port = tcp->tcp_lport; 7414 7415 ackmp->b_wptr = (uchar_t *)&sin6[1]; 7416 7417 if (tcp->tcp_state >= TCPS_SYN_RCVD) { 7418 /* 7419 * Fill in Remote address 7420 */ 7421 taa->REMADDR_length = sizeof (sin6_t); 7422 taa->REMADDR_offset = ROUNDUP32(taa->LOCADDR_offset + 7423 taa->LOCADDR_length); 7424 7425 sin6 = (sin6_t *)(ackmp->b_rptr + taa->REMADDR_offset); 7426 *sin6 = sin6_null; 7427 sin6->sin6_family = AF_INET6; 7428 sin6->sin6_flowinfo = 7429 tcp->tcp_ip6h->ip6_vcf & 7430 ~IPV6_VERS_AND_FLOW_MASK; 7431 sin6->sin6_addr = tcp->tcp_remote_v6; 7432 sin6->sin6_port = tcp->tcp_fport; 7433 7434 ackmp->b_wptr = (uchar_t *)&sin6[1]; 7435 } 7436 } 7437 putnext(tcp->tcp_rq, ackmp); 7438 } 7439 7440 /* 7441 * Handle reinitialization of a tcp structure. 7442 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE. 7443 */ 7444 static void 7445 tcp_reinit(tcp_t *tcp) 7446 { 7447 mblk_t *mp; 7448 int err; 7449 7450 TCP_STAT(tcp_reinit_calls); 7451 7452 /* tcp_reinit should never be called for detached tcp_t's */ 7453 ASSERT(tcp->tcp_listener == NULL); 7454 ASSERT((tcp->tcp_family == AF_INET && 7455 tcp->tcp_ipversion == IPV4_VERSION) || 7456 (tcp->tcp_family == AF_INET6 && 7457 (tcp->tcp_ipversion == IPV4_VERSION || 7458 tcp->tcp_ipversion == IPV6_VERSION))); 7459 7460 /* Cancel outstanding timers */ 7461 tcp_timers_stop(tcp); 7462 7463 /* 7464 * Reset everything in the state vector, after updating global 7465 * MIB data from instance counters. 7466 */ 7467 UPDATE_MIB(&tcp_mib, tcpInSegs, tcp->tcp_ibsegs); 7468 tcp->tcp_ibsegs = 0; 7469 UPDATE_MIB(&tcp_mib, tcpOutSegs, tcp->tcp_obsegs); 7470 tcp->tcp_obsegs = 0; 7471 7472 tcp_close_mpp(&tcp->tcp_xmit_head); 7473 if (tcp->tcp_snd_zcopy_aware) 7474 tcp_zcopy_notify(tcp); 7475 tcp->tcp_xmit_last = tcp->tcp_xmit_tail = NULL; 7476 tcp->tcp_unsent = tcp->tcp_xmit_tail_unsent = 0; 7477 if (tcp->tcp_flow_stopped && 7478 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 7479 tcp_clrqfull(tcp); 7480 } 7481 tcp_close_mpp(&tcp->tcp_reass_head); 7482 tcp->tcp_reass_tail = NULL; 7483 if (tcp->tcp_rcv_list != NULL) { 7484 /* Free b_next chain */ 7485 tcp_close_mpp(&tcp->tcp_rcv_list); 7486 tcp->tcp_rcv_last_head = NULL; 7487 tcp->tcp_rcv_last_tail = NULL; 7488 tcp->tcp_rcv_cnt = 0; 7489 } 7490 tcp->tcp_rcv_last_tail = NULL; 7491 7492 if ((mp = tcp->tcp_urp_mp) != NULL) { 7493 freemsg(mp); 7494 tcp->tcp_urp_mp = NULL; 7495 } 7496 if ((mp = tcp->tcp_urp_mark_mp) != NULL) { 7497 freemsg(mp); 7498 tcp->tcp_urp_mark_mp = NULL; 7499 } 7500 if (tcp->tcp_fused_sigurg_mp != NULL) { 7501 freeb(tcp->tcp_fused_sigurg_mp); 7502 tcp->tcp_fused_sigurg_mp = NULL; 7503 } 7504 7505 /* 7506 * Following is a union with two members which are 7507 * identical types and size so the following cleanup 7508 * is enough. 7509 */ 7510 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind); 7511 7512 CL_INET_DISCONNECT(tcp); 7513 7514 /* 7515 * The connection can't be on the tcp_time_wait_head list 7516 * since it is not detached. 7517 */ 7518 ASSERT(tcp->tcp_time_wait_next == NULL); 7519 ASSERT(tcp->tcp_time_wait_prev == NULL); 7520 ASSERT(tcp->tcp_time_wait_expire == 0); 7521 7522 if (tcp->tcp_kssl_pending) { 7523 tcp->tcp_kssl_pending = B_FALSE; 7524 7525 /* Don't reset if the initialized by bind. */ 7526 if (tcp->tcp_kssl_ent != NULL) { 7527 kssl_release_ent(tcp->tcp_kssl_ent, NULL, 7528 KSSL_NO_PROXY); 7529 } 7530 } 7531 if (tcp->tcp_kssl_ctx != NULL) { 7532 kssl_release_ctx(tcp->tcp_kssl_ctx); 7533 tcp->tcp_kssl_ctx = NULL; 7534 } 7535 7536 /* 7537 * Reset/preserve other values 7538 */ 7539 tcp_reinit_values(tcp); 7540 ipcl_hash_remove(tcp->tcp_connp); 7541 conn_delete_ire(tcp->tcp_connp, NULL); 7542 7543 if (tcp->tcp_conn_req_max != 0) { 7544 /* 7545 * This is the case when a TLI program uses the same 7546 * transport end point to accept a connection. This 7547 * makes the TCP both a listener and acceptor. When 7548 * this connection is closed, we need to set the state 7549 * back to TCPS_LISTEN. Make sure that the eager list 7550 * is reinitialized. 7551 * 7552 * Note that this stream is still bound to the four 7553 * tuples of the previous connection in IP. If a new 7554 * SYN with different foreign address comes in, IP will 7555 * not find it and will send it to the global queue. In 7556 * the global queue, TCP will do a tcp_lookup_listener() 7557 * to find this stream. This works because this stream 7558 * is only removed from connected hash. 7559 * 7560 */ 7561 tcp->tcp_state = TCPS_LISTEN; 7562 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp; 7563 tcp->tcp_connp->conn_recv = tcp_conn_request; 7564 if (tcp->tcp_family == AF_INET6) { 7565 ASSERT(tcp->tcp_connp->conn_af_isv6); 7566 (void) ipcl_bind_insert_v6(tcp->tcp_connp, IPPROTO_TCP, 7567 &tcp->tcp_ip6h->ip6_src, tcp->tcp_lport); 7568 } else { 7569 ASSERT(!tcp->tcp_connp->conn_af_isv6); 7570 (void) ipcl_bind_insert(tcp->tcp_connp, IPPROTO_TCP, 7571 tcp->tcp_ipha->ipha_src, tcp->tcp_lport); 7572 } 7573 } else { 7574 tcp->tcp_state = TCPS_BOUND; 7575 } 7576 7577 /* 7578 * Initialize to default values 7579 * Can't fail since enough header template space already allocated 7580 * at open(). 7581 */ 7582 err = tcp_init_values(tcp); 7583 ASSERT(err == 0); 7584 /* Restore state in tcp_tcph */ 7585 bcopy(&tcp->tcp_lport, tcp->tcp_tcph->th_lport, TCP_PORT_LEN); 7586 if (tcp->tcp_ipversion == IPV4_VERSION) 7587 tcp->tcp_ipha->ipha_src = tcp->tcp_bound_source; 7588 else 7589 tcp->tcp_ip6h->ip6_src = tcp->tcp_bound_source_v6; 7590 /* 7591 * Copy of the src addr. in tcp_t is needed in tcp_t 7592 * since the lookup funcs can only lookup on tcp_t 7593 */ 7594 tcp->tcp_ip_src_v6 = tcp->tcp_bound_source_v6; 7595 7596 ASSERT(tcp->tcp_ptpbhn != NULL); 7597 tcp->tcp_rq->q_hiwat = tcp_recv_hiwat; 7598 tcp->tcp_rwnd = tcp_recv_hiwat; 7599 tcp->tcp_mss = tcp->tcp_ipversion != IPV4_VERSION ? 7600 tcp_mss_def_ipv6 : tcp_mss_def_ipv4; 7601 } 7602 7603 /* 7604 * Force values to zero that need be zero. 7605 * Do not touch values asociated with the BOUND or LISTEN state 7606 * since the connection will end up in that state after the reinit. 7607 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t 7608 * structure! 7609 */ 7610 static void 7611 tcp_reinit_values(tcp) 7612 tcp_t *tcp; 7613 { 7614 #ifndef lint 7615 #define DONTCARE(x) 7616 #define PRESERVE(x) 7617 #else 7618 #define DONTCARE(x) ((x) = (x)) 7619 #define PRESERVE(x) ((x) = (x)) 7620 #endif /* lint */ 7621 7622 PRESERVE(tcp->tcp_bind_hash); 7623 PRESERVE(tcp->tcp_ptpbhn); 7624 PRESERVE(tcp->tcp_acceptor_hash); 7625 PRESERVE(tcp->tcp_ptpahn); 7626 7627 /* Should be ASSERT NULL on these with new code! */ 7628 ASSERT(tcp->tcp_time_wait_next == NULL); 7629 ASSERT(tcp->tcp_time_wait_prev == NULL); 7630 ASSERT(tcp->tcp_time_wait_expire == 0); 7631 PRESERVE(tcp->tcp_state); 7632 PRESERVE(tcp->tcp_rq); 7633 PRESERVE(tcp->tcp_wq); 7634 7635 ASSERT(tcp->tcp_xmit_head == NULL); 7636 ASSERT(tcp->tcp_xmit_last == NULL); 7637 ASSERT(tcp->tcp_unsent == 0); 7638 ASSERT(tcp->tcp_xmit_tail == NULL); 7639 ASSERT(tcp->tcp_xmit_tail_unsent == 0); 7640 7641 tcp->tcp_snxt = 0; /* Displayed in mib */ 7642 tcp->tcp_suna = 0; /* Displayed in mib */ 7643 tcp->tcp_swnd = 0; 7644 DONTCARE(tcp->tcp_cwnd); /* Init in tcp_mss_set */ 7645 7646 ASSERT(tcp->tcp_ibsegs == 0); 7647 ASSERT(tcp->tcp_obsegs == 0); 7648 7649 if (tcp->tcp_iphc != NULL) { 7650 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 7651 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 7652 } 7653 7654 DONTCARE(tcp->tcp_naglim); /* Init in tcp_init_values */ 7655 DONTCARE(tcp->tcp_hdr_len); /* Init in tcp_init_values */ 7656 DONTCARE(tcp->tcp_ipha); 7657 DONTCARE(tcp->tcp_ip6h); 7658 DONTCARE(tcp->tcp_ip_hdr_len); 7659 DONTCARE(tcp->tcp_tcph); 7660 DONTCARE(tcp->tcp_tcp_hdr_len); /* Init in tcp_init_values */ 7661 tcp->tcp_valid_bits = 0; 7662 7663 DONTCARE(tcp->tcp_xmit_hiwater); /* Init in tcp_init_values */ 7664 DONTCARE(tcp->tcp_timer_backoff); /* Init in tcp_init_values */ 7665 DONTCARE(tcp->tcp_last_recv_time); /* Init in tcp_init_values */ 7666 tcp->tcp_last_rcv_lbolt = 0; 7667 7668 tcp->tcp_init_cwnd = 0; 7669 7670 tcp->tcp_urp_last_valid = 0; 7671 tcp->tcp_hard_binding = 0; 7672 tcp->tcp_hard_bound = 0; 7673 PRESERVE(tcp->tcp_cred); 7674 PRESERVE(tcp->tcp_cpid); 7675 PRESERVE(tcp->tcp_exclbind); 7676 7677 tcp->tcp_fin_acked = 0; 7678 tcp->tcp_fin_rcvd = 0; 7679 tcp->tcp_fin_sent = 0; 7680 tcp->tcp_ordrel_done = 0; 7681 7682 tcp->tcp_debug = 0; 7683 tcp->tcp_dontroute = 0; 7684 tcp->tcp_broadcast = 0; 7685 7686 tcp->tcp_useloopback = 0; 7687 tcp->tcp_reuseaddr = 0; 7688 tcp->tcp_oobinline = 0; 7689 tcp->tcp_dgram_errind = 0; 7690 7691 tcp->tcp_detached = 0; 7692 tcp->tcp_bind_pending = 0; 7693 tcp->tcp_unbind_pending = 0; 7694 tcp->tcp_deferred_clean_death = 0; 7695 7696 tcp->tcp_snd_ws_ok = B_FALSE; 7697 tcp->tcp_snd_ts_ok = B_FALSE; 7698 tcp->tcp_linger = 0; 7699 tcp->tcp_ka_enabled = 0; 7700 tcp->tcp_zero_win_probe = 0; 7701 7702 tcp->tcp_loopback = 0; 7703 tcp->tcp_localnet = 0; 7704 tcp->tcp_syn_defense = 0; 7705 tcp->tcp_set_timer = 0; 7706 7707 tcp->tcp_active_open = 0; 7708 ASSERT(tcp->tcp_timeout == B_FALSE); 7709 tcp->tcp_rexmit = B_FALSE; 7710 tcp->tcp_xmit_zc_clean = B_FALSE; 7711 7712 tcp->tcp_snd_sack_ok = B_FALSE; 7713 PRESERVE(tcp->tcp_recvdstaddr); 7714 tcp->tcp_hwcksum = B_FALSE; 7715 7716 tcp->tcp_ire_ill_check_done = B_FALSE; 7717 DONTCARE(tcp->tcp_maxpsz); /* Init in tcp_init_values */ 7718 7719 tcp->tcp_mdt = B_FALSE; 7720 tcp->tcp_mdt_hdr_head = 0; 7721 tcp->tcp_mdt_hdr_tail = 0; 7722 7723 tcp->tcp_conn_def_q0 = 0; 7724 tcp->tcp_ip_forward_progress = B_FALSE; 7725 tcp->tcp_anon_priv_bind = 0; 7726 tcp->tcp_ecn_ok = B_FALSE; 7727 7728 tcp->tcp_cwr = B_FALSE; 7729 tcp->tcp_ecn_echo_on = B_FALSE; 7730 7731 if (tcp->tcp_sack_info != NULL) { 7732 if (tcp->tcp_notsack_list != NULL) { 7733 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 7734 } 7735 kmem_cache_free(tcp_sack_info_cache, tcp->tcp_sack_info); 7736 tcp->tcp_sack_info = NULL; 7737 } 7738 7739 tcp->tcp_rcv_ws = 0; 7740 tcp->tcp_snd_ws = 0; 7741 tcp->tcp_ts_recent = 0; 7742 tcp->tcp_rnxt = 0; /* Displayed in mib */ 7743 DONTCARE(tcp->tcp_rwnd); /* Set in tcp_reinit() */ 7744 tcp->tcp_if_mtu = 0; 7745 7746 ASSERT(tcp->tcp_reass_head == NULL); 7747 ASSERT(tcp->tcp_reass_tail == NULL); 7748 7749 tcp->tcp_cwnd_cnt = 0; 7750 7751 ASSERT(tcp->tcp_rcv_list == NULL); 7752 ASSERT(tcp->tcp_rcv_last_head == NULL); 7753 ASSERT(tcp->tcp_rcv_last_tail == NULL); 7754 ASSERT(tcp->tcp_rcv_cnt == 0); 7755 7756 DONTCARE(tcp->tcp_cwnd_ssthresh); /* Init in tcp_adapt_ire */ 7757 DONTCARE(tcp->tcp_cwnd_max); /* Init in tcp_init_values */ 7758 tcp->tcp_csuna = 0; 7759 7760 tcp->tcp_rto = 0; /* Displayed in MIB */ 7761 DONTCARE(tcp->tcp_rtt_sa); /* Init in tcp_init_values */ 7762 DONTCARE(tcp->tcp_rtt_sd); /* Init in tcp_init_values */ 7763 tcp->tcp_rtt_update = 0; 7764 7765 DONTCARE(tcp->tcp_swl1); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */ 7766 DONTCARE(tcp->tcp_swl2); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */ 7767 7768 tcp->tcp_rack = 0; /* Displayed in mib */ 7769 tcp->tcp_rack_cnt = 0; 7770 tcp->tcp_rack_cur_max = 0; 7771 tcp->tcp_rack_abs_max = 0; 7772 7773 tcp->tcp_max_swnd = 0; 7774 7775 ASSERT(tcp->tcp_listener == NULL); 7776 7777 DONTCARE(tcp->tcp_xmit_lowater); /* Init in tcp_init_values */ 7778 7779 DONTCARE(tcp->tcp_irs); /* tcp_valid_bits cleared */ 7780 DONTCARE(tcp->tcp_iss); /* tcp_valid_bits cleared */ 7781 DONTCARE(tcp->tcp_fss); /* tcp_valid_bits cleared */ 7782 DONTCARE(tcp->tcp_urg); /* tcp_valid_bits cleared */ 7783 7784 ASSERT(tcp->tcp_conn_req_cnt_q == 0); 7785 ASSERT(tcp->tcp_conn_req_cnt_q0 == 0); 7786 PRESERVE(tcp->tcp_conn_req_max); 7787 PRESERVE(tcp->tcp_conn_req_seqnum); 7788 7789 DONTCARE(tcp->tcp_ip_hdr_len); /* Init in tcp_init_values */ 7790 DONTCARE(tcp->tcp_first_timer_threshold); /* Init in tcp_init_values */ 7791 DONTCARE(tcp->tcp_second_timer_threshold); /* Init in tcp_init_values */ 7792 DONTCARE(tcp->tcp_first_ctimer_threshold); /* Init in tcp_init_values */ 7793 DONTCARE(tcp->tcp_second_ctimer_threshold); /* in tcp_init_values */ 7794 7795 tcp->tcp_lingertime = 0; 7796 7797 DONTCARE(tcp->tcp_urp_last); /* tcp_urp_last_valid is cleared */ 7798 ASSERT(tcp->tcp_urp_mp == NULL); 7799 ASSERT(tcp->tcp_urp_mark_mp == NULL); 7800 ASSERT(tcp->tcp_fused_sigurg_mp == NULL); 7801 7802 ASSERT(tcp->tcp_eager_next_q == NULL); 7803 ASSERT(tcp->tcp_eager_last_q == NULL); 7804 ASSERT((tcp->tcp_eager_next_q0 == NULL && 7805 tcp->tcp_eager_prev_q0 == NULL) || 7806 tcp->tcp_eager_next_q0 == tcp->tcp_eager_prev_q0); 7807 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 7808 7809 tcp->tcp_client_errno = 0; 7810 7811 DONTCARE(tcp->tcp_sum); /* Init in tcp_init_values */ 7812 7813 tcp->tcp_remote_v6 = ipv6_all_zeros; /* Displayed in MIB */ 7814 7815 PRESERVE(tcp->tcp_bound_source_v6); 7816 tcp->tcp_last_sent_len = 0; 7817 tcp->tcp_dupack_cnt = 0; 7818 7819 tcp->tcp_fport = 0; /* Displayed in MIB */ 7820 PRESERVE(tcp->tcp_lport); 7821 7822 PRESERVE(tcp->tcp_acceptor_lockp); 7823 7824 ASSERT(tcp->tcp_ordrelid == 0); 7825 PRESERVE(tcp->tcp_acceptor_id); 7826 DONTCARE(tcp->tcp_ipsec_overhead); 7827 7828 /* 7829 * If tcp_tracing flag is ON (i.e. We have a trace buffer 7830 * in tcp structure and now tracing), Re-initialize all 7831 * members of tcp_traceinfo. 7832 */ 7833 if (tcp->tcp_tracebuf != NULL) { 7834 bzero(tcp->tcp_tracebuf, sizeof (tcptrch_t)); 7835 } 7836 7837 PRESERVE(tcp->tcp_family); 7838 if (tcp->tcp_family == AF_INET6) { 7839 tcp->tcp_ipversion = IPV6_VERSION; 7840 tcp->tcp_mss = tcp_mss_def_ipv6; 7841 } else { 7842 tcp->tcp_ipversion = IPV4_VERSION; 7843 tcp->tcp_mss = tcp_mss_def_ipv4; 7844 } 7845 7846 tcp->tcp_bound_if = 0; 7847 tcp->tcp_ipv6_recvancillary = 0; 7848 tcp->tcp_recvifindex = 0; 7849 tcp->tcp_recvhops = 0; 7850 tcp->tcp_closed = 0; 7851 tcp->tcp_cleandeathtag = 0; 7852 if (tcp->tcp_hopopts != NULL) { 7853 mi_free(tcp->tcp_hopopts); 7854 tcp->tcp_hopopts = NULL; 7855 tcp->tcp_hopoptslen = 0; 7856 } 7857 ASSERT(tcp->tcp_hopoptslen == 0); 7858 if (tcp->tcp_dstopts != NULL) { 7859 mi_free(tcp->tcp_dstopts); 7860 tcp->tcp_dstopts = NULL; 7861 tcp->tcp_dstoptslen = 0; 7862 } 7863 ASSERT(tcp->tcp_dstoptslen == 0); 7864 if (tcp->tcp_rtdstopts != NULL) { 7865 mi_free(tcp->tcp_rtdstopts); 7866 tcp->tcp_rtdstopts = NULL; 7867 tcp->tcp_rtdstoptslen = 0; 7868 } 7869 ASSERT(tcp->tcp_rtdstoptslen == 0); 7870 if (tcp->tcp_rthdr != NULL) { 7871 mi_free(tcp->tcp_rthdr); 7872 tcp->tcp_rthdr = NULL; 7873 tcp->tcp_rthdrlen = 0; 7874 } 7875 ASSERT(tcp->tcp_rthdrlen == 0); 7876 PRESERVE(tcp->tcp_drop_opt_ack_cnt); 7877 7878 /* Reset fusion-related fields */ 7879 tcp->tcp_fused = B_FALSE; 7880 tcp->tcp_unfusable = B_FALSE; 7881 tcp->tcp_fused_sigurg = B_FALSE; 7882 tcp->tcp_direct_sockfs = B_FALSE; 7883 tcp->tcp_fuse_syncstr_stopped = B_FALSE; 7884 tcp->tcp_loopback_peer = NULL; 7885 tcp->tcp_fuse_rcv_hiwater = 0; 7886 tcp->tcp_fuse_rcv_unread_hiwater = 0; 7887 tcp->tcp_fuse_rcv_unread_cnt = 0; 7888 7889 tcp->tcp_in_ack_unsent = 0; 7890 tcp->tcp_cork = B_FALSE; 7891 7892 PRESERVE(tcp->tcp_squeue_bytes); 7893 7894 ASSERT(tcp->tcp_kssl_ctx == NULL); 7895 ASSERT(!tcp->tcp_kssl_pending); 7896 PRESERVE(tcp->tcp_kssl_ent); 7897 7898 #undef DONTCARE 7899 #undef PRESERVE 7900 } 7901 7902 /* 7903 * Allocate necessary resources and initialize state vector. 7904 * Guaranteed not to fail so that when an error is returned, 7905 * the caller doesn't need to do any additional cleanup. 7906 */ 7907 int 7908 tcp_init(tcp_t *tcp, queue_t *q) 7909 { 7910 int err; 7911 7912 tcp->tcp_rq = q; 7913 tcp->tcp_wq = WR(q); 7914 tcp->tcp_state = TCPS_IDLE; 7915 if ((err = tcp_init_values(tcp)) != 0) 7916 tcp_timers_stop(tcp); 7917 return (err); 7918 } 7919 7920 static int 7921 tcp_init_values(tcp_t *tcp) 7922 { 7923 int err; 7924 7925 ASSERT((tcp->tcp_family == AF_INET && 7926 tcp->tcp_ipversion == IPV4_VERSION) || 7927 (tcp->tcp_family == AF_INET6 && 7928 (tcp->tcp_ipversion == IPV4_VERSION || 7929 tcp->tcp_ipversion == IPV6_VERSION))); 7930 7931 /* 7932 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO 7933 * will be close to tcp_rexmit_interval_initial. By doing this, we 7934 * allow the algorithm to adjust slowly to large fluctuations of RTT 7935 * during first few transmissions of a connection as seen in slow 7936 * links. 7937 */ 7938 tcp->tcp_rtt_sa = tcp_rexmit_interval_initial << 2; 7939 tcp->tcp_rtt_sd = tcp_rexmit_interval_initial >> 1; 7940 tcp->tcp_rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 7941 tcp_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5) + 7942 tcp_conn_grace_period; 7943 if (tcp->tcp_rto < tcp_rexmit_interval_min) 7944 tcp->tcp_rto = tcp_rexmit_interval_min; 7945 tcp->tcp_timer_backoff = 0; 7946 tcp->tcp_ms_we_have_waited = 0; 7947 tcp->tcp_last_recv_time = lbolt; 7948 tcp->tcp_cwnd_max = tcp_cwnd_max_; 7949 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN; 7950 tcp->tcp_snd_burst = TCP_CWND_INFINITE; 7951 7952 tcp->tcp_maxpsz = tcp_maxpsz_multiplier; 7953 7954 tcp->tcp_first_timer_threshold = tcp_ip_notify_interval; 7955 tcp->tcp_first_ctimer_threshold = tcp_ip_notify_cinterval; 7956 tcp->tcp_second_timer_threshold = tcp_ip_abort_interval; 7957 /* 7958 * Fix it to tcp_ip_abort_linterval later if it turns out to be a 7959 * passive open. 7960 */ 7961 tcp->tcp_second_ctimer_threshold = tcp_ip_abort_cinterval; 7962 7963 tcp->tcp_naglim = tcp_naglim_def; 7964 7965 /* NOTE: ISS is now set in tcp_adapt_ire(). */ 7966 7967 tcp->tcp_mdt_hdr_head = 0; 7968 tcp->tcp_mdt_hdr_tail = 0; 7969 7970 /* Reset fusion-related fields */ 7971 tcp->tcp_fused = B_FALSE; 7972 tcp->tcp_unfusable = B_FALSE; 7973 tcp->tcp_fused_sigurg = B_FALSE; 7974 tcp->tcp_direct_sockfs = B_FALSE; 7975 tcp->tcp_fuse_syncstr_stopped = B_FALSE; 7976 tcp->tcp_loopback_peer = NULL; 7977 tcp->tcp_fuse_rcv_hiwater = 0; 7978 tcp->tcp_fuse_rcv_unread_hiwater = 0; 7979 tcp->tcp_fuse_rcv_unread_cnt = 0; 7980 7981 /* Initialize the header template */ 7982 if (tcp->tcp_ipversion == IPV4_VERSION) { 7983 err = tcp_header_init_ipv4(tcp); 7984 } else { 7985 err = tcp_header_init_ipv6(tcp); 7986 } 7987 if (err) 7988 return (err); 7989 7990 /* 7991 * Init the window scale to the max so tcp_rwnd_set() won't pare 7992 * down tcp_rwnd. tcp_adapt_ire() will set the right value later. 7993 */ 7994 tcp->tcp_rcv_ws = TCP_MAX_WINSHIFT; 7995 tcp->tcp_xmit_lowater = tcp_xmit_lowat; 7996 tcp->tcp_xmit_hiwater = tcp_xmit_hiwat; 7997 7998 tcp->tcp_cork = B_FALSE; 7999 /* 8000 * Init the tcp_debug option. This value determines whether TCP 8001 * calls strlog() to print out debug messages. Doing this 8002 * initialization here means that this value is not inherited thru 8003 * tcp_reinit(). 8004 */ 8005 tcp->tcp_debug = tcp_dbg; 8006 8007 tcp->tcp_ka_interval = tcp_keepalive_interval; 8008 tcp->tcp_ka_abort_thres = tcp_keepalive_abort_interval; 8009 8010 return (0); 8011 } 8012 8013 /* 8014 * Initialize the IPv4 header. Loses any record of any IP options. 8015 */ 8016 static int 8017 tcp_header_init_ipv4(tcp_t *tcp) 8018 { 8019 tcph_t *tcph; 8020 uint32_t sum; 8021 conn_t *connp; 8022 8023 /* 8024 * This is a simple initialization. If there's 8025 * already a template, it should never be too small, 8026 * so reuse it. Otherwise, allocate space for the new one. 8027 */ 8028 if (tcp->tcp_iphc == NULL) { 8029 ASSERT(tcp->tcp_iphc_len == 0); 8030 tcp->tcp_iphc_len = TCP_MAX_COMBINED_HEADER_LENGTH; 8031 tcp->tcp_iphc = kmem_cache_alloc(tcp_iphc_cache, KM_NOSLEEP); 8032 if (tcp->tcp_iphc == NULL) { 8033 tcp->tcp_iphc_len = 0; 8034 return (ENOMEM); 8035 } 8036 } 8037 8038 /* options are gone; may need a new label */ 8039 connp = tcp->tcp_connp; 8040 connp->conn_mlp_type = mlptSingle; 8041 connp->conn_ulp_labeled = !is_system_labeled(); 8042 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 8043 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 8044 tcp->tcp_ip6h = NULL; 8045 tcp->tcp_ipversion = IPV4_VERSION; 8046 tcp->tcp_hdr_len = sizeof (ipha_t) + sizeof (tcph_t); 8047 tcp->tcp_tcp_hdr_len = sizeof (tcph_t); 8048 tcp->tcp_ip_hdr_len = sizeof (ipha_t); 8049 tcp->tcp_ipha->ipha_length = htons(sizeof (ipha_t) + sizeof (tcph_t)); 8050 tcp->tcp_ipha->ipha_version_and_hdr_length 8051 = (IP_VERSION << 4) | IP_SIMPLE_HDR_LENGTH_IN_WORDS; 8052 tcp->tcp_ipha->ipha_ident = 0; 8053 8054 tcp->tcp_ttl = (uchar_t)tcp_ipv4_ttl; 8055 tcp->tcp_tos = 0; 8056 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 0; 8057 tcp->tcp_ipha->ipha_ttl = (uchar_t)tcp_ipv4_ttl; 8058 tcp->tcp_ipha->ipha_protocol = IPPROTO_TCP; 8059 8060 tcph = (tcph_t *)(tcp->tcp_iphc + sizeof (ipha_t)); 8061 tcp->tcp_tcph = tcph; 8062 tcph->th_offset_and_rsrvd[0] = (5 << 4); 8063 /* 8064 * IP wants our header length in the checksum field to 8065 * allow it to perform a single pseudo-header+checksum 8066 * calculation on behalf of TCP. 8067 * Include the adjustment for a source route once IP_OPTIONS is set. 8068 */ 8069 sum = sizeof (tcph_t) + tcp->tcp_sum; 8070 sum = (sum >> 16) + (sum & 0xFFFF); 8071 U16_TO_ABE16(sum, tcph->th_sum); 8072 return (0); 8073 } 8074 8075 /* 8076 * Initialize the IPv6 header. Loses any record of any IPv6 extension headers. 8077 */ 8078 static int 8079 tcp_header_init_ipv6(tcp_t *tcp) 8080 { 8081 tcph_t *tcph; 8082 uint32_t sum; 8083 conn_t *connp; 8084 8085 /* 8086 * This is a simple initialization. If there's 8087 * already a template, it should never be too small, 8088 * so reuse it. Otherwise, allocate space for the new one. 8089 * Ensure that there is enough space to "downgrade" the tcp_t 8090 * to an IPv4 tcp_t. This requires having space for a full load 8091 * of IPv4 options, as well as a full load of TCP options 8092 * (TCP_MAX_COMBINED_HEADER_LENGTH, 120 bytes); this is more space 8093 * than a v6 header and a TCP header with a full load of TCP options 8094 * (IPV6_HDR_LEN is 40 bytes; TCP_MAX_HDR_LENGTH is 60 bytes). 8095 * We want to avoid reallocation in the "downgraded" case when 8096 * processing outbound IPv4 options. 8097 */ 8098 if (tcp->tcp_iphc == NULL) { 8099 ASSERT(tcp->tcp_iphc_len == 0); 8100 tcp->tcp_iphc_len = TCP_MAX_COMBINED_HEADER_LENGTH; 8101 tcp->tcp_iphc = kmem_cache_alloc(tcp_iphc_cache, KM_NOSLEEP); 8102 if (tcp->tcp_iphc == NULL) { 8103 tcp->tcp_iphc_len = 0; 8104 return (ENOMEM); 8105 } 8106 } 8107 8108 /* options are gone; may need a new label */ 8109 connp = tcp->tcp_connp; 8110 connp->conn_mlp_type = mlptSingle; 8111 connp->conn_ulp_labeled = !is_system_labeled(); 8112 8113 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 8114 tcp->tcp_ipversion = IPV6_VERSION; 8115 tcp->tcp_hdr_len = IPV6_HDR_LEN + sizeof (tcph_t); 8116 tcp->tcp_tcp_hdr_len = sizeof (tcph_t); 8117 tcp->tcp_ip_hdr_len = IPV6_HDR_LEN; 8118 tcp->tcp_ip6h = (ip6_t *)tcp->tcp_iphc; 8119 tcp->tcp_ipha = NULL; 8120 8121 /* Initialize the header template */ 8122 8123 tcp->tcp_ip6h->ip6_vcf = IPV6_DEFAULT_VERS_AND_FLOW; 8124 tcp->tcp_ip6h->ip6_plen = ntohs(sizeof (tcph_t)); 8125 tcp->tcp_ip6h->ip6_nxt = IPPROTO_TCP; 8126 tcp->tcp_ip6h->ip6_hops = (uint8_t)tcp_ipv6_hoplimit; 8127 8128 tcph = (tcph_t *)(tcp->tcp_iphc + IPV6_HDR_LEN); 8129 tcp->tcp_tcph = tcph; 8130 tcph->th_offset_and_rsrvd[0] = (5 << 4); 8131 /* 8132 * IP wants our header length in the checksum field to 8133 * allow it to perform a single psuedo-header+checksum 8134 * calculation on behalf of TCP. 8135 * Include the adjustment for a source route when IPV6_RTHDR is set. 8136 */ 8137 sum = sizeof (tcph_t) + tcp->tcp_sum; 8138 sum = (sum >> 16) + (sum & 0xFFFF); 8139 U16_TO_ABE16(sum, tcph->th_sum); 8140 return (0); 8141 } 8142 8143 /* At minimum we need 4 bytes in the TCP header for the lookup */ 8144 #define ICMP_MIN_TCP_HDR 12 8145 8146 /* 8147 * tcp_icmp_error is called by tcp_rput_other to process ICMP error messages 8148 * passed up by IP. The message is always received on the correct tcp_t. 8149 * Assumes that IP has pulled up everything up to and including the ICMP header. 8150 */ 8151 void 8152 tcp_icmp_error(tcp_t *tcp, mblk_t *mp) 8153 { 8154 icmph_t *icmph; 8155 ipha_t *ipha; 8156 int iph_hdr_length; 8157 tcph_t *tcph; 8158 boolean_t ipsec_mctl = B_FALSE; 8159 boolean_t secure; 8160 mblk_t *first_mp = mp; 8161 uint32_t new_mss; 8162 uint32_t ratio; 8163 size_t mp_size = MBLKL(mp); 8164 uint32_t seg_ack; 8165 uint32_t seg_seq; 8166 8167 /* Assume IP provides aligned packets - otherwise toss */ 8168 if (!OK_32PTR(mp->b_rptr)) { 8169 freemsg(mp); 8170 return; 8171 } 8172 8173 /* 8174 * Since ICMP errors are normal data marked with M_CTL when sent 8175 * to TCP or UDP, we have to look for a IPSEC_IN value to identify 8176 * packets starting with an ipsec_info_t, see ipsec_info.h. 8177 */ 8178 if ((mp_size == sizeof (ipsec_info_t)) && 8179 (((ipsec_info_t *)mp->b_rptr)->ipsec_info_type == IPSEC_IN)) { 8180 ASSERT(mp->b_cont != NULL); 8181 mp = mp->b_cont; 8182 /* IP should have done this */ 8183 ASSERT(OK_32PTR(mp->b_rptr)); 8184 mp_size = MBLKL(mp); 8185 ipsec_mctl = B_TRUE; 8186 } 8187 8188 /* 8189 * Verify that we have a complete outer IP header. If not, drop it. 8190 */ 8191 if (mp_size < sizeof (ipha_t)) { 8192 noticmpv4: 8193 freemsg(first_mp); 8194 return; 8195 } 8196 8197 ipha = (ipha_t *)mp->b_rptr; 8198 /* 8199 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent 8200 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6. 8201 */ 8202 switch (IPH_HDR_VERSION(ipha)) { 8203 case IPV6_VERSION: 8204 tcp_icmp_error_ipv6(tcp, first_mp, ipsec_mctl); 8205 return; 8206 case IPV4_VERSION: 8207 break; 8208 default: 8209 goto noticmpv4; 8210 } 8211 8212 /* Skip past the outer IP and ICMP headers */ 8213 iph_hdr_length = IPH_HDR_LENGTH(ipha); 8214 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 8215 /* 8216 * If we don't have the correct outer IP header length or if the ULP 8217 * is not IPPROTO_ICMP or if we don't have a complete inner IP header 8218 * send it upstream. 8219 */ 8220 if (iph_hdr_length < sizeof (ipha_t) || 8221 ipha->ipha_protocol != IPPROTO_ICMP || 8222 (ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) { 8223 goto noticmpv4; 8224 } 8225 ipha = (ipha_t *)&icmph[1]; 8226 8227 /* Skip past the inner IP and find the ULP header */ 8228 iph_hdr_length = IPH_HDR_LENGTH(ipha); 8229 tcph = (tcph_t *)((char *)ipha + iph_hdr_length); 8230 /* 8231 * If we don't have the correct inner IP header length or if the ULP 8232 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR 8233 * bytes of TCP header, drop it. 8234 */ 8235 if (iph_hdr_length < sizeof (ipha_t) || 8236 ipha->ipha_protocol != IPPROTO_TCP || 8237 (uchar_t *)tcph + ICMP_MIN_TCP_HDR > mp->b_wptr) { 8238 goto noticmpv4; 8239 } 8240 8241 if (TCP_IS_DETACHED_NONEAGER(tcp)) { 8242 if (ipsec_mctl) { 8243 secure = ipsec_in_is_secure(first_mp); 8244 } else { 8245 secure = B_FALSE; 8246 } 8247 if (secure) { 8248 /* 8249 * If we are willing to accept this in clear 8250 * we don't have to verify policy. 8251 */ 8252 if (!ipsec_inbound_accept_clear(mp, ipha, NULL)) { 8253 if (!tcp_check_policy(tcp, first_mp, 8254 ipha, NULL, secure, ipsec_mctl)) { 8255 /* 8256 * tcp_check_policy called 8257 * ip_drop_packet() on failure. 8258 */ 8259 return; 8260 } 8261 } 8262 } 8263 } else if (ipsec_mctl) { 8264 /* 8265 * This is a hard_bound connection. IP has already 8266 * verified policy. We don't have to do it again. 8267 */ 8268 freeb(first_mp); 8269 first_mp = mp; 8270 ipsec_mctl = B_FALSE; 8271 } 8272 8273 seg_ack = ABE32_TO_U32(tcph->th_ack); 8274 seg_seq = ABE32_TO_U32(tcph->th_seq); 8275 /* 8276 * TCP SHOULD check that the TCP sequence number contained in 8277 * payload of the ICMP error message is within the range 8278 * SND.UNA <= SEG.SEQ < SND.NXT. and also SEG.ACK <= RECV.NXT 8279 */ 8280 if (SEQ_LT(seg_seq, tcp->tcp_suna) || 8281 SEQ_GEQ(seg_seq, tcp->tcp_snxt) || 8282 SEQ_GT(seg_ack, tcp->tcp_rnxt)) { 8283 /* 8284 * If the ICMP message is bogus, should we kill the 8285 * connection, or should we just drop the bogus ICMP 8286 * message? It would probably make more sense to just 8287 * drop the message so that if this one managed to get 8288 * in, the real connection should not suffer. 8289 */ 8290 goto noticmpv4; 8291 } 8292 8293 switch (icmph->icmph_type) { 8294 case ICMP_DEST_UNREACHABLE: 8295 switch (icmph->icmph_code) { 8296 case ICMP_FRAGMENTATION_NEEDED: 8297 /* 8298 * Reduce the MSS based on the new MTU. This will 8299 * eliminate any fragmentation locally. 8300 * N.B. There may well be some funny side-effects on 8301 * the local send policy and the remote receive policy. 8302 * Pending further research, we provide 8303 * tcp_ignore_path_mtu just in case this proves 8304 * disastrous somewhere. 8305 * 8306 * After updating the MSS, retransmit part of the 8307 * dropped segment using the new mss by calling 8308 * tcp_wput_data(). Need to adjust all those 8309 * params to make sure tcp_wput_data() work properly. 8310 */ 8311 if (tcp_ignore_path_mtu) 8312 break; 8313 8314 /* 8315 * Decrease the MSS by time stamp options 8316 * IP options and IPSEC options. tcp_hdr_len 8317 * includes time stamp option and IP option 8318 * length. 8319 */ 8320 8321 new_mss = ntohs(icmph->icmph_du_mtu) - 8322 tcp->tcp_hdr_len - tcp->tcp_ipsec_overhead; 8323 8324 /* 8325 * Only update the MSS if the new one is 8326 * smaller than the previous one. This is 8327 * to avoid problems when getting multiple 8328 * ICMP errors for the same MTU. 8329 */ 8330 if (new_mss >= tcp->tcp_mss) 8331 break; 8332 8333 /* 8334 * Stop doing PMTU if new_mss is less than 68 8335 * or less than tcp_mss_min. 8336 * The value 68 comes from rfc 1191. 8337 */ 8338 if (new_mss < MAX(68, tcp_mss_min)) 8339 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 8340 0; 8341 8342 ratio = tcp->tcp_cwnd / tcp->tcp_mss; 8343 ASSERT(ratio >= 1); 8344 tcp_mss_set(tcp, new_mss); 8345 8346 /* 8347 * Make sure we have something to 8348 * send. 8349 */ 8350 if (SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) && 8351 (tcp->tcp_xmit_head != NULL)) { 8352 /* 8353 * Shrink tcp_cwnd in 8354 * proportion to the old MSS/new MSS. 8355 */ 8356 tcp->tcp_cwnd = ratio * tcp->tcp_mss; 8357 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 8358 (tcp->tcp_unsent == 0)) { 8359 tcp->tcp_rexmit_max = tcp->tcp_fss; 8360 } else { 8361 tcp->tcp_rexmit_max = tcp->tcp_snxt; 8362 } 8363 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 8364 tcp->tcp_rexmit = B_TRUE; 8365 tcp->tcp_dupack_cnt = 0; 8366 tcp->tcp_snd_burst = TCP_CWND_SS; 8367 tcp_ss_rexmit(tcp); 8368 } 8369 break; 8370 case ICMP_PORT_UNREACHABLE: 8371 case ICMP_PROTOCOL_UNREACHABLE: 8372 switch (tcp->tcp_state) { 8373 case TCPS_SYN_SENT: 8374 case TCPS_SYN_RCVD: 8375 /* 8376 * ICMP can snipe away incipient 8377 * TCP connections as long as 8378 * seq number is same as initial 8379 * send seq number. 8380 */ 8381 if (seg_seq == tcp->tcp_iss) { 8382 (void) tcp_clean_death(tcp, 8383 ECONNREFUSED, 6); 8384 } 8385 break; 8386 } 8387 break; 8388 case ICMP_HOST_UNREACHABLE: 8389 case ICMP_NET_UNREACHABLE: 8390 /* Record the error in case we finally time out. */ 8391 if (icmph->icmph_code == ICMP_HOST_UNREACHABLE) 8392 tcp->tcp_client_errno = EHOSTUNREACH; 8393 else 8394 tcp->tcp_client_errno = ENETUNREACH; 8395 if (tcp->tcp_state == TCPS_SYN_RCVD) { 8396 if (tcp->tcp_listener != NULL && 8397 tcp->tcp_listener->tcp_syn_defense) { 8398 /* 8399 * Ditch the half-open connection if we 8400 * suspect a SYN attack is under way. 8401 */ 8402 tcp_ip_ire_mark_advice(tcp); 8403 (void) tcp_clean_death(tcp, 8404 tcp->tcp_client_errno, 7); 8405 } 8406 } 8407 break; 8408 default: 8409 break; 8410 } 8411 break; 8412 case ICMP_SOURCE_QUENCH: { 8413 /* 8414 * use a global boolean to control 8415 * whether TCP should respond to ICMP_SOURCE_QUENCH. 8416 * The default is false. 8417 */ 8418 if (tcp_icmp_source_quench) { 8419 /* 8420 * Reduce the sending rate as if we got a 8421 * retransmit timeout 8422 */ 8423 uint32_t npkt; 8424 8425 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / 8426 tcp->tcp_mss; 8427 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * tcp->tcp_mss; 8428 tcp->tcp_cwnd = tcp->tcp_mss; 8429 tcp->tcp_cwnd_cnt = 0; 8430 } 8431 break; 8432 } 8433 } 8434 freemsg(first_mp); 8435 } 8436 8437 /* 8438 * tcp_icmp_error_ipv6 is called by tcp_rput_other to process ICMPv6 8439 * error messages passed up by IP. 8440 * Assumes that IP has pulled up all the extension headers as well 8441 * as the ICMPv6 header. 8442 */ 8443 static void 8444 tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, boolean_t ipsec_mctl) 8445 { 8446 icmp6_t *icmp6; 8447 ip6_t *ip6h; 8448 uint16_t iph_hdr_length; 8449 tcpha_t *tcpha; 8450 uint8_t *nexthdrp; 8451 uint32_t new_mss; 8452 uint32_t ratio; 8453 boolean_t secure; 8454 mblk_t *first_mp = mp; 8455 size_t mp_size; 8456 uint32_t seg_ack; 8457 uint32_t seg_seq; 8458 8459 /* 8460 * The caller has determined if this is an IPSEC_IN packet and 8461 * set ipsec_mctl appropriately (see tcp_icmp_error). 8462 */ 8463 if (ipsec_mctl) 8464 mp = mp->b_cont; 8465 8466 mp_size = MBLKL(mp); 8467 8468 /* 8469 * Verify that we have a complete IP header. If not, send it upstream. 8470 */ 8471 if (mp_size < sizeof (ip6_t)) { 8472 noticmpv6: 8473 freemsg(first_mp); 8474 return; 8475 } 8476 8477 /* 8478 * Verify this is an ICMPV6 packet, else send it upstream. 8479 */ 8480 ip6h = (ip6_t *)mp->b_rptr; 8481 if (ip6h->ip6_nxt == IPPROTO_ICMPV6) { 8482 iph_hdr_length = IPV6_HDR_LEN; 8483 } else if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, 8484 &nexthdrp) || 8485 *nexthdrp != IPPROTO_ICMPV6) { 8486 goto noticmpv6; 8487 } 8488 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length]; 8489 ip6h = (ip6_t *)&icmp6[1]; 8490 /* 8491 * Verify if we have a complete ICMP and inner IP header. 8492 */ 8493 if ((uchar_t *)&ip6h[1] > mp->b_wptr) 8494 goto noticmpv6; 8495 8496 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) 8497 goto noticmpv6; 8498 tcpha = (tcpha_t *)((char *)ip6h + iph_hdr_length); 8499 /* 8500 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't 8501 * have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the 8502 * packet. 8503 */ 8504 if ((*nexthdrp != IPPROTO_TCP) || 8505 ((uchar_t *)tcpha + ICMP_MIN_TCP_HDR) > mp->b_wptr) { 8506 goto noticmpv6; 8507 } 8508 8509 /* 8510 * ICMP errors come on the right queue or come on 8511 * listener/global queue for detached connections and 8512 * get switched to the right queue. If it comes on the 8513 * right queue, policy check has already been done by IP 8514 * and thus free the first_mp without verifying the policy. 8515 * If it has come for a non-hard bound connection, we need 8516 * to verify policy as IP may not have done it. 8517 */ 8518 if (!tcp->tcp_hard_bound) { 8519 if (ipsec_mctl) { 8520 secure = ipsec_in_is_secure(first_mp); 8521 } else { 8522 secure = B_FALSE; 8523 } 8524 if (secure) { 8525 /* 8526 * If we are willing to accept this in clear 8527 * we don't have to verify policy. 8528 */ 8529 if (!ipsec_inbound_accept_clear(mp, NULL, ip6h)) { 8530 if (!tcp_check_policy(tcp, first_mp, 8531 NULL, ip6h, secure, ipsec_mctl)) { 8532 /* 8533 * tcp_check_policy called 8534 * ip_drop_packet() on failure. 8535 */ 8536 return; 8537 } 8538 } 8539 } 8540 } else if (ipsec_mctl) { 8541 /* 8542 * This is a hard_bound connection. IP has already 8543 * verified policy. We don't have to do it again. 8544 */ 8545 freeb(first_mp); 8546 first_mp = mp; 8547 ipsec_mctl = B_FALSE; 8548 } 8549 8550 seg_ack = ntohl(tcpha->tha_ack); 8551 seg_seq = ntohl(tcpha->tha_seq); 8552 /* 8553 * TCP SHOULD check that the TCP sequence number contained in 8554 * payload of the ICMP error message is within the range 8555 * SND.UNA <= SEG.SEQ < SND.NXT. and also SEG.ACK <= RECV.NXT 8556 */ 8557 if (SEQ_LT(seg_seq, tcp->tcp_suna) || SEQ_GEQ(seg_seq, tcp->tcp_snxt) || 8558 SEQ_GT(seg_ack, tcp->tcp_rnxt)) { 8559 /* 8560 * If the ICMP message is bogus, should we kill the 8561 * connection, or should we just drop the bogus ICMP 8562 * message? It would probably make more sense to just 8563 * drop the message so that if this one managed to get 8564 * in, the real connection should not suffer. 8565 */ 8566 goto noticmpv6; 8567 } 8568 8569 switch (icmp6->icmp6_type) { 8570 case ICMP6_PACKET_TOO_BIG: 8571 /* 8572 * Reduce the MSS based on the new MTU. This will 8573 * eliminate any fragmentation locally. 8574 * N.B. There may well be some funny side-effects on 8575 * the local send policy and the remote receive policy. 8576 * Pending further research, we provide 8577 * tcp_ignore_path_mtu just in case this proves 8578 * disastrous somewhere. 8579 * 8580 * After updating the MSS, retransmit part of the 8581 * dropped segment using the new mss by calling 8582 * tcp_wput_data(). Need to adjust all those 8583 * params to make sure tcp_wput_data() work properly. 8584 */ 8585 if (tcp_ignore_path_mtu) 8586 break; 8587 8588 /* 8589 * Decrease the MSS by time stamp options 8590 * IP options and IPSEC options. tcp_hdr_len 8591 * includes time stamp option and IP option 8592 * length. 8593 */ 8594 new_mss = ntohs(icmp6->icmp6_mtu) - tcp->tcp_hdr_len - 8595 tcp->tcp_ipsec_overhead; 8596 8597 /* 8598 * Only update the MSS if the new one is 8599 * smaller than the previous one. This is 8600 * to avoid problems when getting multiple 8601 * ICMP errors for the same MTU. 8602 */ 8603 if (new_mss >= tcp->tcp_mss) 8604 break; 8605 8606 ratio = tcp->tcp_cwnd / tcp->tcp_mss; 8607 ASSERT(ratio >= 1); 8608 tcp_mss_set(tcp, new_mss); 8609 8610 /* 8611 * Make sure we have something to 8612 * send. 8613 */ 8614 if (SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) && 8615 (tcp->tcp_xmit_head != NULL)) { 8616 /* 8617 * Shrink tcp_cwnd in 8618 * proportion to the old MSS/new MSS. 8619 */ 8620 tcp->tcp_cwnd = ratio * tcp->tcp_mss; 8621 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 8622 (tcp->tcp_unsent == 0)) { 8623 tcp->tcp_rexmit_max = tcp->tcp_fss; 8624 } else { 8625 tcp->tcp_rexmit_max = tcp->tcp_snxt; 8626 } 8627 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 8628 tcp->tcp_rexmit = B_TRUE; 8629 tcp->tcp_dupack_cnt = 0; 8630 tcp->tcp_snd_burst = TCP_CWND_SS; 8631 tcp_ss_rexmit(tcp); 8632 } 8633 break; 8634 8635 case ICMP6_DST_UNREACH: 8636 switch (icmp6->icmp6_code) { 8637 case ICMP6_DST_UNREACH_NOPORT: 8638 if (((tcp->tcp_state == TCPS_SYN_SENT) || 8639 (tcp->tcp_state == TCPS_SYN_RCVD)) && 8640 (seg_seq == tcp->tcp_iss)) { 8641 (void) tcp_clean_death(tcp, 8642 ECONNREFUSED, 8); 8643 } 8644 break; 8645 8646 case ICMP6_DST_UNREACH_ADMIN: 8647 case ICMP6_DST_UNREACH_NOROUTE: 8648 case ICMP6_DST_UNREACH_BEYONDSCOPE: 8649 case ICMP6_DST_UNREACH_ADDR: 8650 /* Record the error in case we finally time out. */ 8651 tcp->tcp_client_errno = EHOSTUNREACH; 8652 if (((tcp->tcp_state == TCPS_SYN_SENT) || 8653 (tcp->tcp_state == TCPS_SYN_RCVD)) && 8654 (seg_seq == tcp->tcp_iss)) { 8655 if (tcp->tcp_listener != NULL && 8656 tcp->tcp_listener->tcp_syn_defense) { 8657 /* 8658 * Ditch the half-open connection if we 8659 * suspect a SYN attack is under way. 8660 */ 8661 tcp_ip_ire_mark_advice(tcp); 8662 (void) tcp_clean_death(tcp, 8663 tcp->tcp_client_errno, 9); 8664 } 8665 } 8666 8667 8668 break; 8669 default: 8670 break; 8671 } 8672 break; 8673 8674 case ICMP6_PARAM_PROB: 8675 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */ 8676 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER && 8677 (uchar_t *)ip6h + icmp6->icmp6_pptr == 8678 (uchar_t *)nexthdrp) { 8679 if (tcp->tcp_state == TCPS_SYN_SENT || 8680 tcp->tcp_state == TCPS_SYN_RCVD) { 8681 (void) tcp_clean_death(tcp, 8682 ECONNREFUSED, 10); 8683 } 8684 break; 8685 } 8686 break; 8687 8688 case ICMP6_TIME_EXCEEDED: 8689 default: 8690 break; 8691 } 8692 freemsg(first_mp); 8693 } 8694 8695 /* 8696 * IP recognizes seven kinds of bind requests: 8697 * 8698 * - A zero-length address binds only to the protocol number. 8699 * 8700 * - A 4-byte address is treated as a request to 8701 * validate that the address is a valid local IPv4 8702 * address, appropriate for an application to bind to. 8703 * IP does the verification, but does not make any note 8704 * of the address at this time. 8705 * 8706 * - A 16-byte address contains is treated as a request 8707 * to validate a local IPv6 address, as the 4-byte 8708 * address case above. 8709 * 8710 * - A 16-byte sockaddr_in to validate the local IPv4 address and also 8711 * use it for the inbound fanout of packets. 8712 * 8713 * - A 24-byte sockaddr_in6 to validate the local IPv6 address and also 8714 * use it for the inbound fanout of packets. 8715 * 8716 * - A 12-byte address (ipa_conn_t) containing complete IPv4 fanout 8717 * information consisting of local and remote addresses 8718 * and ports. In this case, the addresses are both 8719 * validated as appropriate for this operation, and, if 8720 * so, the information is retained for use in the 8721 * inbound fanout. 8722 * 8723 * - A 36-byte address address (ipa6_conn_t) containing complete IPv6 8724 * fanout information, like the 12-byte case above. 8725 * 8726 * IP will also fill in the IRE request mblk with information 8727 * regarding our peer. In all cases, we notify IP of our protocol 8728 * type by appending a single protocol byte to the bind request. 8729 */ 8730 static mblk_t * 8731 tcp_ip_bind_mp(tcp_t *tcp, t_scalar_t bind_prim, t_scalar_t addr_length) 8732 { 8733 char *cp; 8734 mblk_t *mp; 8735 struct T_bind_req *tbr; 8736 ipa_conn_t *ac; 8737 ipa6_conn_t *ac6; 8738 sin_t *sin; 8739 sin6_t *sin6; 8740 8741 ASSERT(bind_prim == O_T_BIND_REQ || bind_prim == T_BIND_REQ); 8742 ASSERT((tcp->tcp_family == AF_INET && 8743 tcp->tcp_ipversion == IPV4_VERSION) || 8744 (tcp->tcp_family == AF_INET6 && 8745 (tcp->tcp_ipversion == IPV4_VERSION || 8746 tcp->tcp_ipversion == IPV6_VERSION))); 8747 8748 mp = allocb(sizeof (*tbr) + addr_length + 1, BPRI_HI); 8749 if (!mp) 8750 return (mp); 8751 mp->b_datap->db_type = M_PROTO; 8752 tbr = (struct T_bind_req *)mp->b_rptr; 8753 tbr->PRIM_type = bind_prim; 8754 tbr->ADDR_offset = sizeof (*tbr); 8755 tbr->CONIND_number = 0; 8756 tbr->ADDR_length = addr_length; 8757 cp = (char *)&tbr[1]; 8758 switch (addr_length) { 8759 case sizeof (ipa_conn_t): 8760 ASSERT(tcp->tcp_family == AF_INET); 8761 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 8762 8763 mp->b_cont = allocb(sizeof (ire_t), BPRI_HI); 8764 if (mp->b_cont == NULL) { 8765 freemsg(mp); 8766 return (NULL); 8767 } 8768 mp->b_cont->b_wptr += sizeof (ire_t); 8769 mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE; 8770 8771 /* cp known to be 32 bit aligned */ 8772 ac = (ipa_conn_t *)cp; 8773 ac->ac_laddr = tcp->tcp_ipha->ipha_src; 8774 ac->ac_faddr = tcp->tcp_remote; 8775 ac->ac_fport = tcp->tcp_fport; 8776 ac->ac_lport = tcp->tcp_lport; 8777 tcp->tcp_hard_binding = 1; 8778 break; 8779 8780 case sizeof (ipa6_conn_t): 8781 ASSERT(tcp->tcp_family == AF_INET6); 8782 8783 mp->b_cont = allocb(sizeof (ire_t), BPRI_HI); 8784 if (mp->b_cont == NULL) { 8785 freemsg(mp); 8786 return (NULL); 8787 } 8788 mp->b_cont->b_wptr += sizeof (ire_t); 8789 mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE; 8790 8791 /* cp known to be 32 bit aligned */ 8792 ac6 = (ipa6_conn_t *)cp; 8793 if (tcp->tcp_ipversion == IPV4_VERSION) { 8794 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 8795 &ac6->ac6_laddr); 8796 } else { 8797 ac6->ac6_laddr = tcp->tcp_ip6h->ip6_src; 8798 } 8799 ac6->ac6_faddr = tcp->tcp_remote_v6; 8800 ac6->ac6_fport = tcp->tcp_fport; 8801 ac6->ac6_lport = tcp->tcp_lport; 8802 tcp->tcp_hard_binding = 1; 8803 break; 8804 8805 case sizeof (sin_t): 8806 /* 8807 * NOTE: IPV6_ADDR_LEN also has same size. 8808 * Use family to discriminate. 8809 */ 8810 if (tcp->tcp_family == AF_INET) { 8811 sin = (sin_t *)cp; 8812 8813 *sin = sin_null; 8814 sin->sin_family = AF_INET; 8815 sin->sin_addr.s_addr = tcp->tcp_bound_source; 8816 sin->sin_port = tcp->tcp_lport; 8817 break; 8818 } else { 8819 *(in6_addr_t *)cp = tcp->tcp_bound_source_v6; 8820 } 8821 break; 8822 8823 case sizeof (sin6_t): 8824 ASSERT(tcp->tcp_family == AF_INET6); 8825 sin6 = (sin6_t *)cp; 8826 8827 *sin6 = sin6_null; 8828 sin6->sin6_family = AF_INET6; 8829 sin6->sin6_addr = tcp->tcp_bound_source_v6; 8830 sin6->sin6_port = tcp->tcp_lport; 8831 break; 8832 8833 case IP_ADDR_LEN: 8834 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 8835 *(uint32_t *)cp = tcp->tcp_ipha->ipha_src; 8836 break; 8837 8838 } 8839 /* Add protocol number to end */ 8840 cp[addr_length] = (char)IPPROTO_TCP; 8841 mp->b_wptr = (uchar_t *)&cp[addr_length + 1]; 8842 return (mp); 8843 } 8844 8845 /* 8846 * Notify IP that we are having trouble with this connection. IP should 8847 * blow the IRE away and start over. 8848 */ 8849 static void 8850 tcp_ip_notify(tcp_t *tcp) 8851 { 8852 struct iocblk *iocp; 8853 ipid_t *ipid; 8854 mblk_t *mp; 8855 8856 /* IPv6 has NUD thus notification to delete the IRE is not needed */ 8857 if (tcp->tcp_ipversion == IPV6_VERSION) 8858 return; 8859 8860 mp = mkiocb(IP_IOCTL); 8861 if (mp == NULL) 8862 return; 8863 8864 iocp = (struct iocblk *)mp->b_rptr; 8865 iocp->ioc_count = sizeof (ipid_t) + sizeof (tcp->tcp_ipha->ipha_dst); 8866 8867 mp->b_cont = allocb(iocp->ioc_count, BPRI_HI); 8868 if (!mp->b_cont) { 8869 freeb(mp); 8870 return; 8871 } 8872 8873 ipid = (ipid_t *)mp->b_cont->b_rptr; 8874 mp->b_cont->b_wptr += iocp->ioc_count; 8875 bzero(ipid, sizeof (*ipid)); 8876 ipid->ipid_cmd = IP_IOC_IRE_DELETE_NO_REPLY; 8877 ipid->ipid_ire_type = IRE_CACHE; 8878 ipid->ipid_addr_offset = sizeof (ipid_t); 8879 ipid->ipid_addr_length = sizeof (tcp->tcp_ipha->ipha_dst); 8880 /* 8881 * Note: in the case of source routing we want to blow away the 8882 * route to the first source route hop. 8883 */ 8884 bcopy(&tcp->tcp_ipha->ipha_dst, &ipid[1], 8885 sizeof (tcp->tcp_ipha->ipha_dst)); 8886 8887 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 8888 } 8889 8890 /* Unlink and return any mblk that looks like it contains an ire */ 8891 static mblk_t * 8892 tcp_ire_mp(mblk_t *mp) 8893 { 8894 mblk_t *prev_mp; 8895 8896 for (;;) { 8897 prev_mp = mp; 8898 mp = mp->b_cont; 8899 if (mp == NULL) 8900 break; 8901 switch (DB_TYPE(mp)) { 8902 case IRE_DB_TYPE: 8903 case IRE_DB_REQ_TYPE: 8904 if (prev_mp != NULL) 8905 prev_mp->b_cont = mp->b_cont; 8906 mp->b_cont = NULL; 8907 return (mp); 8908 default: 8909 break; 8910 } 8911 } 8912 return (mp); 8913 } 8914 8915 /* 8916 * Timer callback routine for keepalive probe. We do a fake resend of 8917 * last ACKed byte. Then set a timer using RTO. When the timer expires, 8918 * check to see if we have heard anything from the other end for the last 8919 * RTO period. If we have, set the timer to expire for another 8920 * tcp_keepalive_intrvl and check again. If we have not, set a timer using 8921 * RTO << 1 and check again when it expires. Keep exponentially increasing 8922 * the timeout if we have not heard from the other side. If for more than 8923 * (tcp_ka_interval + tcp_ka_abort_thres) we have not heard anything, 8924 * kill the connection unless the keepalive abort threshold is 0. In 8925 * that case, we will probe "forever." 8926 */ 8927 static void 8928 tcp_keepalive_killer(void *arg) 8929 { 8930 mblk_t *mp; 8931 conn_t *connp = (conn_t *)arg; 8932 tcp_t *tcp = connp->conn_tcp; 8933 int32_t firetime; 8934 int32_t idletime; 8935 int32_t ka_intrvl; 8936 8937 tcp->tcp_ka_tid = 0; 8938 8939 if (tcp->tcp_fused) 8940 return; 8941 8942 BUMP_MIB(&tcp_mib, tcpTimKeepalive); 8943 ka_intrvl = tcp->tcp_ka_interval; 8944 8945 /* 8946 * Keepalive probe should only be sent if the application has not 8947 * done a close on the connection. 8948 */ 8949 if (tcp->tcp_state > TCPS_CLOSE_WAIT) { 8950 return; 8951 } 8952 /* Timer fired too early, restart it. */ 8953 if (tcp->tcp_state < TCPS_ESTABLISHED) { 8954 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 8955 MSEC_TO_TICK(ka_intrvl)); 8956 return; 8957 } 8958 8959 idletime = TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time); 8960 /* 8961 * If we have not heard from the other side for a long 8962 * time, kill the connection unless the keepalive abort 8963 * threshold is 0. In that case, we will probe "forever." 8964 */ 8965 if (tcp->tcp_ka_abort_thres != 0 && 8966 idletime > (ka_intrvl + tcp->tcp_ka_abort_thres)) { 8967 BUMP_MIB(&tcp_mib, tcpTimKeepaliveDrop); 8968 (void) tcp_clean_death(tcp, tcp->tcp_client_errno ? 8969 tcp->tcp_client_errno : ETIMEDOUT, 11); 8970 return; 8971 } 8972 8973 if (tcp->tcp_snxt == tcp->tcp_suna && 8974 idletime >= ka_intrvl) { 8975 /* Fake resend of last ACKed byte. */ 8976 mblk_t *mp1 = allocb(1, BPRI_LO); 8977 8978 if (mp1 != NULL) { 8979 *mp1->b_wptr++ = '\0'; 8980 mp = tcp_xmit_mp(tcp, mp1, 1, NULL, NULL, 8981 tcp->tcp_suna - 1, B_FALSE, NULL, B_TRUE); 8982 freeb(mp1); 8983 /* 8984 * if allocation failed, fall through to start the 8985 * timer back. 8986 */ 8987 if (mp != NULL) { 8988 TCP_RECORD_TRACE(tcp, mp, 8989 TCP_TRACE_SEND_PKT); 8990 tcp_send_data(tcp, tcp->tcp_wq, mp); 8991 BUMP_MIB(&tcp_mib, tcpTimKeepaliveProbe); 8992 if (tcp->tcp_ka_last_intrvl != 0) { 8993 /* 8994 * We should probe again at least 8995 * in ka_intrvl, but not more than 8996 * tcp_rexmit_interval_max. 8997 */ 8998 firetime = MIN(ka_intrvl - 1, 8999 tcp->tcp_ka_last_intrvl << 1); 9000 if (firetime > tcp_rexmit_interval_max) 9001 firetime = 9002 tcp_rexmit_interval_max; 9003 } else { 9004 firetime = tcp->tcp_rto; 9005 } 9006 tcp->tcp_ka_tid = TCP_TIMER(tcp, 9007 tcp_keepalive_killer, 9008 MSEC_TO_TICK(firetime)); 9009 tcp->tcp_ka_last_intrvl = firetime; 9010 return; 9011 } 9012 } 9013 } else { 9014 tcp->tcp_ka_last_intrvl = 0; 9015 } 9016 9017 /* firetime can be negative if (mp1 == NULL || mp == NULL) */ 9018 if ((firetime = ka_intrvl - idletime) < 0) { 9019 firetime = ka_intrvl; 9020 } 9021 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 9022 MSEC_TO_TICK(firetime)); 9023 } 9024 9025 int 9026 tcp_maxpsz_set(tcp_t *tcp, boolean_t set_maxblk) 9027 { 9028 queue_t *q = tcp->tcp_rq; 9029 int32_t mss = tcp->tcp_mss; 9030 int maxpsz; 9031 9032 if (TCP_IS_DETACHED(tcp)) 9033 return (mss); 9034 9035 if (tcp->tcp_fused) { 9036 maxpsz = tcp_fuse_maxpsz_set(tcp); 9037 mss = INFPSZ; 9038 } else if (tcp->tcp_mdt || tcp->tcp_maxpsz == 0) { 9039 /* 9040 * Set the sd_qn_maxpsz according to the socket send buffer 9041 * size, and sd_maxblk to INFPSZ (-1). This will essentially 9042 * instruct the stream head to copyin user data into contiguous 9043 * kernel-allocated buffers without breaking it up into smaller 9044 * chunks. We round up the buffer size to the nearest SMSS. 9045 */ 9046 maxpsz = MSS_ROUNDUP(tcp->tcp_xmit_hiwater, mss); 9047 if (tcp->tcp_kssl_ctx == NULL) 9048 mss = INFPSZ; 9049 else 9050 mss = SSL3_MAX_RECORD_LEN; 9051 } else { 9052 /* 9053 * Set sd_qn_maxpsz to approx half the (receivers) buffer 9054 * (and a multiple of the mss). This instructs the stream 9055 * head to break down larger than SMSS writes into SMSS- 9056 * size mblks, up to tcp_maxpsz_multiplier mblks at a time. 9057 */ 9058 maxpsz = tcp->tcp_maxpsz * mss; 9059 if (maxpsz > tcp->tcp_xmit_hiwater/2) { 9060 maxpsz = tcp->tcp_xmit_hiwater/2; 9061 /* Round up to nearest mss */ 9062 maxpsz = MSS_ROUNDUP(maxpsz, mss); 9063 } 9064 } 9065 (void) setmaxps(q, maxpsz); 9066 tcp->tcp_wq->q_maxpsz = maxpsz; 9067 9068 if (set_maxblk) 9069 (void) mi_set_sth_maxblk(q, mss); 9070 9071 return (mss); 9072 } 9073 9074 /* 9075 * Extract option values from a tcp header. We put any found values into the 9076 * tcpopt struct and return a bitmask saying which options were found. 9077 */ 9078 static int 9079 tcp_parse_options(tcph_t *tcph, tcp_opt_t *tcpopt) 9080 { 9081 uchar_t *endp; 9082 int len; 9083 uint32_t mss; 9084 uchar_t *up = (uchar_t *)tcph; 9085 int found = 0; 9086 int32_t sack_len; 9087 tcp_seq sack_begin, sack_end; 9088 tcp_t *tcp; 9089 9090 endp = up + TCP_HDR_LENGTH(tcph); 9091 up += TCP_MIN_HEADER_LENGTH; 9092 while (up < endp) { 9093 len = endp - up; 9094 switch (*up) { 9095 case TCPOPT_EOL: 9096 break; 9097 9098 case TCPOPT_NOP: 9099 up++; 9100 continue; 9101 9102 case TCPOPT_MAXSEG: 9103 if (len < TCPOPT_MAXSEG_LEN || 9104 up[1] != TCPOPT_MAXSEG_LEN) 9105 break; 9106 9107 mss = BE16_TO_U16(up+2); 9108 /* Caller must handle tcp_mss_min and tcp_mss_max_* */ 9109 tcpopt->tcp_opt_mss = mss; 9110 found |= TCP_OPT_MSS_PRESENT; 9111 9112 up += TCPOPT_MAXSEG_LEN; 9113 continue; 9114 9115 case TCPOPT_WSCALE: 9116 if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN) 9117 break; 9118 9119 if (up[2] > TCP_MAX_WINSHIFT) 9120 tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT; 9121 else 9122 tcpopt->tcp_opt_wscale = up[2]; 9123 found |= TCP_OPT_WSCALE_PRESENT; 9124 9125 up += TCPOPT_WS_LEN; 9126 continue; 9127 9128 case TCPOPT_SACK_PERMITTED: 9129 if (len < TCPOPT_SACK_OK_LEN || 9130 up[1] != TCPOPT_SACK_OK_LEN) 9131 break; 9132 found |= TCP_OPT_SACK_OK_PRESENT; 9133 up += TCPOPT_SACK_OK_LEN; 9134 continue; 9135 9136 case TCPOPT_SACK: 9137 if (len <= 2 || up[1] <= 2 || len < up[1]) 9138 break; 9139 9140 /* If TCP is not interested in SACK blks... */ 9141 if ((tcp = tcpopt->tcp) == NULL) { 9142 up += up[1]; 9143 continue; 9144 } 9145 sack_len = up[1] - TCPOPT_HEADER_LEN; 9146 up += TCPOPT_HEADER_LEN; 9147 9148 /* 9149 * If the list is empty, allocate one and assume 9150 * nothing is sack'ed. 9151 */ 9152 ASSERT(tcp->tcp_sack_info != NULL); 9153 if (tcp->tcp_notsack_list == NULL) { 9154 tcp_notsack_update(&(tcp->tcp_notsack_list), 9155 tcp->tcp_suna, tcp->tcp_snxt, 9156 &(tcp->tcp_num_notsack_blk), 9157 &(tcp->tcp_cnt_notsack_list)); 9158 9159 /* 9160 * Make sure tcp_notsack_list is not NULL. 9161 * This happens when kmem_alloc(KM_NOSLEEP) 9162 * returns NULL. 9163 */ 9164 if (tcp->tcp_notsack_list == NULL) { 9165 up += sack_len; 9166 continue; 9167 } 9168 tcp->tcp_fack = tcp->tcp_suna; 9169 } 9170 9171 while (sack_len > 0) { 9172 if (up + 8 > endp) { 9173 up = endp; 9174 break; 9175 } 9176 sack_begin = BE32_TO_U32(up); 9177 up += 4; 9178 sack_end = BE32_TO_U32(up); 9179 up += 4; 9180 sack_len -= 8; 9181 /* 9182 * Bounds checking. Make sure the SACK 9183 * info is within tcp_suna and tcp_snxt. 9184 * If this SACK blk is out of bound, ignore 9185 * it but continue to parse the following 9186 * blks. 9187 */ 9188 if (SEQ_LEQ(sack_end, sack_begin) || 9189 SEQ_LT(sack_begin, tcp->tcp_suna) || 9190 SEQ_GT(sack_end, tcp->tcp_snxt)) { 9191 continue; 9192 } 9193 tcp_notsack_insert(&(tcp->tcp_notsack_list), 9194 sack_begin, sack_end, 9195 &(tcp->tcp_num_notsack_blk), 9196 &(tcp->tcp_cnt_notsack_list)); 9197 if (SEQ_GT(sack_end, tcp->tcp_fack)) { 9198 tcp->tcp_fack = sack_end; 9199 } 9200 } 9201 found |= TCP_OPT_SACK_PRESENT; 9202 continue; 9203 9204 case TCPOPT_TSTAMP: 9205 if (len < TCPOPT_TSTAMP_LEN || 9206 up[1] != TCPOPT_TSTAMP_LEN) 9207 break; 9208 9209 tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2); 9210 tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6); 9211 9212 found |= TCP_OPT_TSTAMP_PRESENT; 9213 9214 up += TCPOPT_TSTAMP_LEN; 9215 continue; 9216 9217 default: 9218 if (len <= 1 || len < (int)up[1] || up[1] == 0) 9219 break; 9220 up += up[1]; 9221 continue; 9222 } 9223 break; 9224 } 9225 return (found); 9226 } 9227 9228 /* 9229 * Set the mss associated with a particular tcp based on its current value, 9230 * and a new one passed in. Observe minimums and maximums, and reset 9231 * other state variables that we want to view as multiples of mss. 9232 * 9233 * This function is called in various places mainly because 9234 * 1) Various stuffs, tcp_mss, tcp_cwnd, ... need to be adjusted when the 9235 * other side's SYN/SYN-ACK packet arrives. 9236 * 2) PMTUd may get us a new MSS. 9237 * 3) If the other side stops sending us timestamp option, we need to 9238 * increase the MSS size to use the extra bytes available. 9239 */ 9240 static void 9241 tcp_mss_set(tcp_t *tcp, uint32_t mss) 9242 { 9243 uint32_t mss_max; 9244 9245 if (tcp->tcp_ipversion == IPV4_VERSION) 9246 mss_max = tcp_mss_max_ipv4; 9247 else 9248 mss_max = tcp_mss_max_ipv6; 9249 9250 if (mss < tcp_mss_min) 9251 mss = tcp_mss_min; 9252 if (mss > mss_max) 9253 mss = mss_max; 9254 /* 9255 * Unless naglim has been set by our client to 9256 * a non-mss value, force naglim to track mss. 9257 * This can help to aggregate small writes. 9258 */ 9259 if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim) 9260 tcp->tcp_naglim = mss; 9261 /* 9262 * TCP should be able to buffer at least 4 MSS data for obvious 9263 * performance reason. 9264 */ 9265 if ((mss << 2) > tcp->tcp_xmit_hiwater) 9266 tcp->tcp_xmit_hiwater = mss << 2; 9267 9268 /* 9269 * Check if we need to apply the tcp_init_cwnd here. If 9270 * it is set and the MSS gets bigger (should not happen 9271 * normally), we need to adjust the resulting tcp_cwnd properly. 9272 * The new tcp_cwnd should not get bigger. 9273 */ 9274 if (tcp->tcp_init_cwnd == 0) { 9275 tcp->tcp_cwnd = MIN(tcp_slow_start_initial * mss, 9276 MIN(4 * mss, MAX(2 * mss, 4380 / mss * mss))); 9277 } else { 9278 if (tcp->tcp_mss < mss) { 9279 tcp->tcp_cwnd = MAX(1, 9280 (tcp->tcp_init_cwnd * tcp->tcp_mss / mss)) * mss; 9281 } else { 9282 tcp->tcp_cwnd = tcp->tcp_init_cwnd * mss; 9283 } 9284 } 9285 tcp->tcp_mss = mss; 9286 tcp->tcp_cwnd_cnt = 0; 9287 (void) tcp_maxpsz_set(tcp, B_TRUE); 9288 } 9289 9290 static int 9291 tcp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 9292 { 9293 tcp_t *tcp = NULL; 9294 conn_t *connp; 9295 int err; 9296 dev_t conn_dev; 9297 zoneid_t zoneid = getzoneid(); 9298 9299 /* 9300 * Special case for install: miniroot needs to be able to access files 9301 * via NFS as though it were always in the global zone. 9302 */ 9303 if (credp == kcred && nfs_global_client_only != 0) 9304 zoneid = GLOBAL_ZONEID; 9305 9306 if (q->q_ptr != NULL) 9307 return (0); 9308 9309 if (sflag == MODOPEN) { 9310 /* 9311 * This is a special case. The purpose of a modopen 9312 * is to allow just the T_SVR4_OPTMGMT_REQ to pass 9313 * through for MIB browsers. Everything else is failed. 9314 */ 9315 connp = (conn_t *)tcp_get_conn(IP_SQUEUE_GET(lbolt)); 9316 9317 if (connp == NULL) 9318 return (ENOMEM); 9319 9320 connp->conn_flags |= IPCL_TCPMOD; 9321 connp->conn_cred = credp; 9322 connp->conn_zoneid = zoneid; 9323 q->q_ptr = WR(q)->q_ptr = connp; 9324 crhold(credp); 9325 q->q_qinfo = &tcp_mod_rinit; 9326 WR(q)->q_qinfo = &tcp_mod_winit; 9327 qprocson(q); 9328 return (0); 9329 } 9330 9331 if ((conn_dev = inet_minor_alloc(ip_minor_arena)) == 0) 9332 return (EBUSY); 9333 9334 *devp = makedevice(getemajor(*devp), (minor_t)conn_dev); 9335 9336 if (flag & SO_ACCEPTOR) { 9337 q->q_qinfo = &tcp_acceptor_rinit; 9338 q->q_ptr = (void *)conn_dev; 9339 WR(q)->q_qinfo = &tcp_acceptor_winit; 9340 WR(q)->q_ptr = (void *)conn_dev; 9341 qprocson(q); 9342 return (0); 9343 } 9344 9345 connp = (conn_t *)tcp_get_conn(IP_SQUEUE_GET(lbolt)); 9346 if (connp == NULL) { 9347 inet_minor_free(ip_minor_arena, conn_dev); 9348 q->q_ptr = NULL; 9349 return (ENOSR); 9350 } 9351 connp->conn_sqp = IP_SQUEUE_GET(lbolt); 9352 tcp = connp->conn_tcp; 9353 9354 q->q_ptr = WR(q)->q_ptr = connp; 9355 if (getmajor(*devp) == TCP6_MAJ) { 9356 connp->conn_flags |= (IPCL_TCP6|IPCL_ISV6); 9357 connp->conn_send = ip_output_v6; 9358 connp->conn_af_isv6 = B_TRUE; 9359 connp->conn_pkt_isv6 = B_TRUE; 9360 connp->conn_src_preferences = IPV6_PREFER_SRC_DEFAULT; 9361 tcp->tcp_ipversion = IPV6_VERSION; 9362 tcp->tcp_family = AF_INET6; 9363 tcp->tcp_mss = tcp_mss_def_ipv6; 9364 } else { 9365 connp->conn_flags |= IPCL_TCP4; 9366 connp->conn_send = ip_output; 9367 connp->conn_af_isv6 = B_FALSE; 9368 connp->conn_pkt_isv6 = B_FALSE; 9369 tcp->tcp_ipversion = IPV4_VERSION; 9370 tcp->tcp_family = AF_INET; 9371 tcp->tcp_mss = tcp_mss_def_ipv4; 9372 } 9373 9374 /* 9375 * TCP keeps a copy of cred for cache locality reasons but 9376 * we put a reference only once. If connp->conn_cred 9377 * becomes invalid, tcp_cred should also be set to NULL. 9378 */ 9379 tcp->tcp_cred = connp->conn_cred = credp; 9380 crhold(connp->conn_cred); 9381 tcp->tcp_cpid = curproc->p_pid; 9382 connp->conn_zoneid = zoneid; 9383 connp->conn_mlp_type = mlptSingle; 9384 connp->conn_ulp_labeled = !is_system_labeled(); 9385 9386 /* 9387 * If the caller has the process-wide flag set, then default to MAC 9388 * exempt mode. This allows read-down to unlabeled hosts. 9389 */ 9390 if (getpflags(NET_MAC_AWARE, credp) != 0) 9391 connp->conn_mac_exempt = B_TRUE; 9392 9393 connp->conn_dev = conn_dev; 9394 9395 ASSERT(q->q_qinfo == &tcp_rinit); 9396 ASSERT(WR(q)->q_qinfo == &tcp_winit); 9397 9398 if (flag & SO_SOCKSTR) { 9399 /* 9400 * No need to insert a socket in tcp acceptor hash. 9401 * If it was a socket acceptor stream, we dealt with 9402 * it above. A socket listener can never accept a 9403 * connection and doesn't need acceptor_id. 9404 */ 9405 connp->conn_flags |= IPCL_SOCKET; 9406 tcp->tcp_issocket = 1; 9407 WR(q)->q_qinfo = &tcp_sock_winit; 9408 } else { 9409 #ifdef _ILP32 9410 tcp->tcp_acceptor_id = (t_uscalar_t)RD(q); 9411 #else 9412 tcp->tcp_acceptor_id = conn_dev; 9413 #endif /* _ILP32 */ 9414 tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp); 9415 } 9416 9417 if (tcp_trace) 9418 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_SLEEP); 9419 9420 err = tcp_init(tcp, q); 9421 if (err != 0) { 9422 inet_minor_free(ip_minor_arena, connp->conn_dev); 9423 tcp_acceptor_hash_remove(tcp); 9424 CONN_DEC_REF(connp); 9425 q->q_ptr = WR(q)->q_ptr = NULL; 9426 return (err); 9427 } 9428 9429 RD(q)->q_hiwat = tcp_recv_hiwat; 9430 tcp->tcp_rwnd = tcp_recv_hiwat; 9431 9432 /* Non-zero default values */ 9433 connp->conn_multicast_loop = IP_DEFAULT_MULTICAST_LOOP; 9434 /* 9435 * Put the ref for TCP. Ref for IP was already put 9436 * by ipcl_conn_create. Also Make the conn_t globally 9437 * visible to walkers 9438 */ 9439 mutex_enter(&connp->conn_lock); 9440 CONN_INC_REF_LOCKED(connp); 9441 ASSERT(connp->conn_ref == 2); 9442 connp->conn_state_flags &= ~CONN_INCIPIENT; 9443 mutex_exit(&connp->conn_lock); 9444 9445 qprocson(q); 9446 return (0); 9447 } 9448 9449 /* 9450 * Some TCP options can be "set" by requesting them in the option 9451 * buffer. This is needed for XTI feature test though we do not 9452 * allow it in general. We interpret that this mechanism is more 9453 * applicable to OSI protocols and need not be allowed in general. 9454 * This routine filters out options for which it is not allowed (most) 9455 * and lets through those (few) for which it is. [ The XTI interface 9456 * test suite specifics will imply that any XTI_GENERIC level XTI_* if 9457 * ever implemented will have to be allowed here ]. 9458 */ 9459 static boolean_t 9460 tcp_allow_connopt_set(int level, int name) 9461 { 9462 9463 switch (level) { 9464 case IPPROTO_TCP: 9465 switch (name) { 9466 case TCP_NODELAY: 9467 return (B_TRUE); 9468 default: 9469 return (B_FALSE); 9470 } 9471 /*NOTREACHED*/ 9472 default: 9473 return (B_FALSE); 9474 } 9475 /*NOTREACHED*/ 9476 } 9477 9478 /* 9479 * This routine gets default values of certain options whose default 9480 * values are maintained by protocol specific code 9481 */ 9482 /* ARGSUSED */ 9483 int 9484 tcp_opt_default(queue_t *q, int level, int name, uchar_t *ptr) 9485 { 9486 int32_t *i1 = (int32_t *)ptr; 9487 9488 switch (level) { 9489 case IPPROTO_TCP: 9490 switch (name) { 9491 case TCP_NOTIFY_THRESHOLD: 9492 *i1 = tcp_ip_notify_interval; 9493 break; 9494 case TCP_ABORT_THRESHOLD: 9495 *i1 = tcp_ip_abort_interval; 9496 break; 9497 case TCP_CONN_NOTIFY_THRESHOLD: 9498 *i1 = tcp_ip_notify_cinterval; 9499 break; 9500 case TCP_CONN_ABORT_THRESHOLD: 9501 *i1 = tcp_ip_abort_cinterval; 9502 break; 9503 default: 9504 return (-1); 9505 } 9506 break; 9507 case IPPROTO_IP: 9508 switch (name) { 9509 case IP_TTL: 9510 *i1 = tcp_ipv4_ttl; 9511 break; 9512 default: 9513 return (-1); 9514 } 9515 break; 9516 case IPPROTO_IPV6: 9517 switch (name) { 9518 case IPV6_UNICAST_HOPS: 9519 *i1 = tcp_ipv6_hoplimit; 9520 break; 9521 default: 9522 return (-1); 9523 } 9524 break; 9525 default: 9526 return (-1); 9527 } 9528 return (sizeof (int)); 9529 } 9530 9531 9532 /* 9533 * TCP routine to get the values of options. 9534 */ 9535 int 9536 tcp_opt_get(queue_t *q, int level, int name, uchar_t *ptr) 9537 { 9538 int *i1 = (int *)ptr; 9539 conn_t *connp = Q_TO_CONN(q); 9540 tcp_t *tcp = connp->conn_tcp; 9541 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 9542 9543 switch (level) { 9544 case SOL_SOCKET: 9545 switch (name) { 9546 case SO_LINGER: { 9547 struct linger *lgr = (struct linger *)ptr; 9548 9549 lgr->l_onoff = tcp->tcp_linger ? SO_LINGER : 0; 9550 lgr->l_linger = tcp->tcp_lingertime; 9551 } 9552 return (sizeof (struct linger)); 9553 case SO_DEBUG: 9554 *i1 = tcp->tcp_debug ? SO_DEBUG : 0; 9555 break; 9556 case SO_KEEPALIVE: 9557 *i1 = tcp->tcp_ka_enabled ? SO_KEEPALIVE : 0; 9558 break; 9559 case SO_DONTROUTE: 9560 *i1 = tcp->tcp_dontroute ? SO_DONTROUTE : 0; 9561 break; 9562 case SO_USELOOPBACK: 9563 *i1 = tcp->tcp_useloopback ? SO_USELOOPBACK : 0; 9564 break; 9565 case SO_BROADCAST: 9566 *i1 = tcp->tcp_broadcast ? SO_BROADCAST : 0; 9567 break; 9568 case SO_REUSEADDR: 9569 *i1 = tcp->tcp_reuseaddr ? SO_REUSEADDR : 0; 9570 break; 9571 case SO_OOBINLINE: 9572 *i1 = tcp->tcp_oobinline ? SO_OOBINLINE : 0; 9573 break; 9574 case SO_DGRAM_ERRIND: 9575 *i1 = tcp->tcp_dgram_errind ? SO_DGRAM_ERRIND : 0; 9576 break; 9577 case SO_TYPE: 9578 *i1 = SOCK_STREAM; 9579 break; 9580 case SO_SNDBUF: 9581 *i1 = tcp->tcp_xmit_hiwater; 9582 break; 9583 case SO_RCVBUF: 9584 *i1 = RD(q)->q_hiwat; 9585 break; 9586 case SO_SND_COPYAVOID: 9587 *i1 = tcp->tcp_snd_zcopy_on ? 9588 SO_SND_COPYAVOID : 0; 9589 break; 9590 case SO_ALLZONES: 9591 *i1 = connp->conn_allzones ? 1 : 0; 9592 break; 9593 case SO_ANON_MLP: 9594 *i1 = connp->conn_anon_mlp; 9595 break; 9596 case SO_MAC_EXEMPT: 9597 *i1 = connp->conn_mac_exempt; 9598 break; 9599 default: 9600 return (-1); 9601 } 9602 break; 9603 case IPPROTO_TCP: 9604 switch (name) { 9605 case TCP_NODELAY: 9606 *i1 = (tcp->tcp_naglim == 1) ? TCP_NODELAY : 0; 9607 break; 9608 case TCP_MAXSEG: 9609 *i1 = tcp->tcp_mss; 9610 break; 9611 case TCP_NOTIFY_THRESHOLD: 9612 *i1 = (int)tcp->tcp_first_timer_threshold; 9613 break; 9614 case TCP_ABORT_THRESHOLD: 9615 *i1 = tcp->tcp_second_timer_threshold; 9616 break; 9617 case TCP_CONN_NOTIFY_THRESHOLD: 9618 *i1 = tcp->tcp_first_ctimer_threshold; 9619 break; 9620 case TCP_CONN_ABORT_THRESHOLD: 9621 *i1 = tcp->tcp_second_ctimer_threshold; 9622 break; 9623 case TCP_RECVDSTADDR: 9624 *i1 = tcp->tcp_recvdstaddr; 9625 break; 9626 case TCP_ANONPRIVBIND: 9627 *i1 = tcp->tcp_anon_priv_bind; 9628 break; 9629 case TCP_EXCLBIND: 9630 *i1 = tcp->tcp_exclbind ? TCP_EXCLBIND : 0; 9631 break; 9632 case TCP_INIT_CWND: 9633 *i1 = tcp->tcp_init_cwnd; 9634 break; 9635 case TCP_KEEPALIVE_THRESHOLD: 9636 *i1 = tcp->tcp_ka_interval; 9637 break; 9638 case TCP_KEEPALIVE_ABORT_THRESHOLD: 9639 *i1 = tcp->tcp_ka_abort_thres; 9640 break; 9641 case TCP_CORK: 9642 *i1 = tcp->tcp_cork; 9643 break; 9644 default: 9645 return (-1); 9646 } 9647 break; 9648 case IPPROTO_IP: 9649 if (tcp->tcp_family != AF_INET) 9650 return (-1); 9651 switch (name) { 9652 case IP_OPTIONS: 9653 case T_IP_OPTIONS: { 9654 /* 9655 * This is compatible with BSD in that in only return 9656 * the reverse source route with the final destination 9657 * as the last entry. The first 4 bytes of the option 9658 * will contain the final destination. 9659 */ 9660 int opt_len; 9661 9662 opt_len = (char *)tcp->tcp_tcph - (char *)tcp->tcp_ipha; 9663 opt_len -= tcp->tcp_label_len + IP_SIMPLE_HDR_LENGTH; 9664 ASSERT(opt_len >= 0); 9665 /* Caller ensures enough space */ 9666 if (opt_len > 0) { 9667 /* 9668 * TODO: Do we have to handle getsockopt on an 9669 * initiator as well? 9670 */ 9671 return (ip_opt_get_user(tcp->tcp_ipha, ptr)); 9672 } 9673 return (0); 9674 } 9675 case IP_TOS: 9676 case T_IP_TOS: 9677 *i1 = (int)tcp->tcp_ipha->ipha_type_of_service; 9678 break; 9679 case IP_TTL: 9680 *i1 = (int)tcp->tcp_ipha->ipha_ttl; 9681 break; 9682 case IP_NEXTHOP: 9683 /* Handled at IP level */ 9684 return (-EINVAL); 9685 default: 9686 return (-1); 9687 } 9688 break; 9689 case IPPROTO_IPV6: 9690 /* 9691 * IPPROTO_IPV6 options are only supported for sockets 9692 * that are using IPv6 on the wire. 9693 */ 9694 if (tcp->tcp_ipversion != IPV6_VERSION) { 9695 return (-1); 9696 } 9697 switch (name) { 9698 case IPV6_UNICAST_HOPS: 9699 *i1 = (unsigned int) tcp->tcp_ip6h->ip6_hops; 9700 break; /* goto sizeof (int) option return */ 9701 case IPV6_BOUND_IF: 9702 /* Zero if not set */ 9703 *i1 = tcp->tcp_bound_if; 9704 break; /* goto sizeof (int) option return */ 9705 case IPV6_RECVPKTINFO: 9706 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) 9707 *i1 = 1; 9708 else 9709 *i1 = 0; 9710 break; /* goto sizeof (int) option return */ 9711 case IPV6_RECVTCLASS: 9712 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVTCLASS) 9713 *i1 = 1; 9714 else 9715 *i1 = 0; 9716 break; /* goto sizeof (int) option return */ 9717 case IPV6_RECVHOPLIMIT: 9718 if (tcp->tcp_ipv6_recvancillary & 9719 TCP_IPV6_RECVHOPLIMIT) 9720 *i1 = 1; 9721 else 9722 *i1 = 0; 9723 break; /* goto sizeof (int) option return */ 9724 case IPV6_RECVHOPOPTS: 9725 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPOPTS) 9726 *i1 = 1; 9727 else 9728 *i1 = 0; 9729 break; /* goto sizeof (int) option return */ 9730 case IPV6_RECVDSTOPTS: 9731 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVDSTOPTS) 9732 *i1 = 1; 9733 else 9734 *i1 = 0; 9735 break; /* goto sizeof (int) option return */ 9736 case _OLD_IPV6_RECVDSTOPTS: 9737 if (tcp->tcp_ipv6_recvancillary & 9738 TCP_OLD_IPV6_RECVDSTOPTS) 9739 *i1 = 1; 9740 else 9741 *i1 = 0; 9742 break; /* goto sizeof (int) option return */ 9743 case IPV6_RECVRTHDR: 9744 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTHDR) 9745 *i1 = 1; 9746 else 9747 *i1 = 0; 9748 break; /* goto sizeof (int) option return */ 9749 case IPV6_RECVRTHDRDSTOPTS: 9750 if (tcp->tcp_ipv6_recvancillary & 9751 TCP_IPV6_RECVRTDSTOPTS) 9752 *i1 = 1; 9753 else 9754 *i1 = 0; 9755 break; /* goto sizeof (int) option return */ 9756 case IPV6_PKTINFO: { 9757 /* XXX assumes that caller has room for max size! */ 9758 struct in6_pktinfo *pkti; 9759 9760 pkti = (struct in6_pktinfo *)ptr; 9761 if (ipp->ipp_fields & IPPF_IFINDEX) 9762 pkti->ipi6_ifindex = ipp->ipp_ifindex; 9763 else 9764 pkti->ipi6_ifindex = 0; 9765 if (ipp->ipp_fields & IPPF_ADDR) 9766 pkti->ipi6_addr = ipp->ipp_addr; 9767 else 9768 pkti->ipi6_addr = ipv6_all_zeros; 9769 return (sizeof (struct in6_pktinfo)); 9770 } 9771 case IPV6_TCLASS: 9772 if (ipp->ipp_fields & IPPF_TCLASS) 9773 *i1 = ipp->ipp_tclass; 9774 else 9775 *i1 = IPV6_FLOW_TCLASS( 9776 IPV6_DEFAULT_VERS_AND_FLOW); 9777 break; /* goto sizeof (int) option return */ 9778 case IPV6_NEXTHOP: { 9779 sin6_t *sin6 = (sin6_t *)ptr; 9780 9781 if (!(ipp->ipp_fields & IPPF_NEXTHOP)) 9782 return (0); 9783 *sin6 = sin6_null; 9784 sin6->sin6_family = AF_INET6; 9785 sin6->sin6_addr = ipp->ipp_nexthop; 9786 return (sizeof (sin6_t)); 9787 } 9788 case IPV6_HOPOPTS: 9789 if (!(ipp->ipp_fields & IPPF_HOPOPTS)) 9790 return (0); 9791 if (ipp->ipp_hopoptslen <= tcp->tcp_label_len) 9792 return (0); 9793 bcopy((char *)ipp->ipp_hopopts + tcp->tcp_label_len, 9794 ptr, ipp->ipp_hopoptslen - tcp->tcp_label_len); 9795 if (tcp->tcp_label_len > 0) { 9796 ptr[0] = ((char *)ipp->ipp_hopopts)[0]; 9797 ptr[1] = (ipp->ipp_hopoptslen - 9798 tcp->tcp_label_len + 7) / 8 - 1; 9799 } 9800 return (ipp->ipp_hopoptslen - tcp->tcp_label_len); 9801 case IPV6_RTHDRDSTOPTS: 9802 if (!(ipp->ipp_fields & IPPF_RTDSTOPTS)) 9803 return (0); 9804 bcopy(ipp->ipp_rtdstopts, ptr, ipp->ipp_rtdstoptslen); 9805 return (ipp->ipp_rtdstoptslen); 9806 case IPV6_RTHDR: 9807 if (!(ipp->ipp_fields & IPPF_RTHDR)) 9808 return (0); 9809 bcopy(ipp->ipp_rthdr, ptr, ipp->ipp_rthdrlen); 9810 return (ipp->ipp_rthdrlen); 9811 case IPV6_DSTOPTS: 9812 if (!(ipp->ipp_fields & IPPF_DSTOPTS)) 9813 return (0); 9814 bcopy(ipp->ipp_dstopts, ptr, ipp->ipp_dstoptslen); 9815 return (ipp->ipp_dstoptslen); 9816 case IPV6_SRC_PREFERENCES: 9817 return (ip6_get_src_preferences(connp, 9818 (uint32_t *)ptr)); 9819 case IPV6_PATHMTU: { 9820 struct ip6_mtuinfo *mtuinfo = (struct ip6_mtuinfo *)ptr; 9821 9822 if (tcp->tcp_state < TCPS_ESTABLISHED) 9823 return (-1); 9824 9825 return (ip_fill_mtuinfo(&connp->conn_remv6, 9826 connp->conn_fport, mtuinfo)); 9827 } 9828 default: 9829 return (-1); 9830 } 9831 break; 9832 default: 9833 return (-1); 9834 } 9835 return (sizeof (int)); 9836 } 9837 9838 /* 9839 * We declare as 'int' rather than 'void' to satisfy pfi_t arg requirements. 9840 * Parameters are assumed to be verified by the caller. 9841 */ 9842 /* ARGSUSED */ 9843 int 9844 tcp_opt_set(queue_t *q, uint_t optset_context, int level, int name, 9845 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 9846 void *thisdg_attrs, cred_t *cr, mblk_t *mblk) 9847 { 9848 conn_t *connp = Q_TO_CONN(q); 9849 tcp_t *tcp = connp->conn_tcp; 9850 int *i1 = (int *)invalp; 9851 boolean_t onoff = (*i1 == 0) ? 0 : 1; 9852 boolean_t checkonly; 9853 int reterr; 9854 9855 switch (optset_context) { 9856 case SETFN_OPTCOM_CHECKONLY: 9857 checkonly = B_TRUE; 9858 /* 9859 * Note: Implies T_CHECK semantics for T_OPTCOM_REQ 9860 * inlen != 0 implies value supplied and 9861 * we have to "pretend" to set it. 9862 * inlen == 0 implies that there is no 9863 * value part in T_CHECK request and just validation 9864 * done elsewhere should be enough, we just return here. 9865 */ 9866 if (inlen == 0) { 9867 *outlenp = 0; 9868 return (0); 9869 } 9870 break; 9871 case SETFN_OPTCOM_NEGOTIATE: 9872 checkonly = B_FALSE; 9873 break; 9874 case SETFN_UD_NEGOTIATE: /* error on conn-oriented transports ? */ 9875 case SETFN_CONN_NEGOTIATE: 9876 checkonly = B_FALSE; 9877 /* 9878 * Negotiating local and "association-related" options 9879 * from other (T_CONN_REQ, T_CONN_RES,T_UNITDATA_REQ) 9880 * primitives is allowed by XTI, but we choose 9881 * to not implement this style negotiation for Internet 9882 * protocols (We interpret it is a must for OSI world but 9883 * optional for Internet protocols) for all options. 9884 * [ Will do only for the few options that enable test 9885 * suites that our XTI implementation of this feature 9886 * works for transports that do allow it ] 9887 */ 9888 if (!tcp_allow_connopt_set(level, name)) { 9889 *outlenp = 0; 9890 return (EINVAL); 9891 } 9892 break; 9893 default: 9894 /* 9895 * We should never get here 9896 */ 9897 *outlenp = 0; 9898 return (EINVAL); 9899 } 9900 9901 ASSERT((optset_context != SETFN_OPTCOM_CHECKONLY) || 9902 (optset_context == SETFN_OPTCOM_CHECKONLY && inlen != 0)); 9903 9904 /* 9905 * For TCP, we should have no ancillary data sent down 9906 * (sendmsg isn't supported for SOCK_STREAM), so thisdg_attrs 9907 * has to be zero. 9908 */ 9909 ASSERT(thisdg_attrs == NULL); 9910 9911 /* 9912 * For fixed length options, no sanity check 9913 * of passed in length is done. It is assumed *_optcom_req() 9914 * routines do the right thing. 9915 */ 9916 9917 switch (level) { 9918 case SOL_SOCKET: 9919 switch (name) { 9920 case SO_LINGER: { 9921 struct linger *lgr = (struct linger *)invalp; 9922 9923 if (!checkonly) { 9924 if (lgr->l_onoff) { 9925 tcp->tcp_linger = 1; 9926 tcp->tcp_lingertime = lgr->l_linger; 9927 } else { 9928 tcp->tcp_linger = 0; 9929 tcp->tcp_lingertime = 0; 9930 } 9931 /* struct copy */ 9932 *(struct linger *)outvalp = *lgr; 9933 } else { 9934 if (!lgr->l_onoff) { 9935 ((struct linger *)outvalp)->l_onoff = 0; 9936 ((struct linger *)outvalp)->l_linger = 0; 9937 } else { 9938 /* struct copy */ 9939 *(struct linger *)outvalp = *lgr; 9940 } 9941 } 9942 *outlenp = sizeof (struct linger); 9943 return (0); 9944 } 9945 case SO_DEBUG: 9946 if (!checkonly) 9947 tcp->tcp_debug = onoff; 9948 break; 9949 case SO_KEEPALIVE: 9950 if (checkonly) { 9951 /* T_CHECK case */ 9952 break; 9953 } 9954 9955 if (!onoff) { 9956 if (tcp->tcp_ka_enabled) { 9957 if (tcp->tcp_ka_tid != 0) { 9958 (void) TCP_TIMER_CANCEL(tcp, 9959 tcp->tcp_ka_tid); 9960 tcp->tcp_ka_tid = 0; 9961 } 9962 tcp->tcp_ka_enabled = 0; 9963 } 9964 break; 9965 } 9966 if (!tcp->tcp_ka_enabled) { 9967 /* Crank up the keepalive timer */ 9968 tcp->tcp_ka_last_intrvl = 0; 9969 tcp->tcp_ka_tid = TCP_TIMER(tcp, 9970 tcp_keepalive_killer, 9971 MSEC_TO_TICK(tcp->tcp_ka_interval)); 9972 tcp->tcp_ka_enabled = 1; 9973 } 9974 break; 9975 case SO_DONTROUTE: 9976 /* 9977 * SO_DONTROUTE, SO_USELOOPBACK, and SO_BROADCAST are 9978 * only of interest to IP. We track them here only so 9979 * that we can report their current value. 9980 */ 9981 if (!checkonly) { 9982 tcp->tcp_dontroute = onoff; 9983 tcp->tcp_connp->conn_dontroute = onoff; 9984 } 9985 break; 9986 case SO_USELOOPBACK: 9987 if (!checkonly) { 9988 tcp->tcp_useloopback = onoff; 9989 tcp->tcp_connp->conn_loopback = onoff; 9990 } 9991 break; 9992 case SO_BROADCAST: 9993 if (!checkonly) { 9994 tcp->tcp_broadcast = onoff; 9995 tcp->tcp_connp->conn_broadcast = onoff; 9996 } 9997 break; 9998 case SO_REUSEADDR: 9999 if (!checkonly) { 10000 tcp->tcp_reuseaddr = onoff; 10001 tcp->tcp_connp->conn_reuseaddr = onoff; 10002 } 10003 break; 10004 case SO_OOBINLINE: 10005 if (!checkonly) 10006 tcp->tcp_oobinline = onoff; 10007 break; 10008 case SO_DGRAM_ERRIND: 10009 if (!checkonly) 10010 tcp->tcp_dgram_errind = onoff; 10011 break; 10012 case SO_SNDBUF: { 10013 tcp_t *peer_tcp; 10014 10015 if (*i1 > tcp_max_buf) { 10016 *outlenp = 0; 10017 return (ENOBUFS); 10018 } 10019 if (checkonly) 10020 break; 10021 10022 tcp->tcp_xmit_hiwater = *i1; 10023 if (tcp_snd_lowat_fraction != 0) 10024 tcp->tcp_xmit_lowater = 10025 tcp->tcp_xmit_hiwater / 10026 tcp_snd_lowat_fraction; 10027 (void) tcp_maxpsz_set(tcp, B_TRUE); 10028 /* 10029 * If we are flow-controlled, recheck the condition. 10030 * There are apps that increase SO_SNDBUF size when 10031 * flow-controlled (EWOULDBLOCK), and expect the flow 10032 * control condition to be lifted right away. 10033 * 10034 * For the fused tcp loopback case, in order to avoid 10035 * a race with the peer's tcp_fuse_rrw() we need to 10036 * hold its fuse_lock while accessing tcp_flow_stopped. 10037 */ 10038 peer_tcp = tcp->tcp_loopback_peer; 10039 ASSERT(!tcp->tcp_fused || peer_tcp != NULL); 10040 if (tcp->tcp_fused) 10041 mutex_enter(&peer_tcp->tcp_fuse_lock); 10042 10043 if (tcp->tcp_flow_stopped && 10044 TCP_UNSENT_BYTES(tcp) < tcp->tcp_xmit_hiwater) { 10045 tcp_clrqfull(tcp); 10046 } 10047 if (tcp->tcp_fused) 10048 mutex_exit(&peer_tcp->tcp_fuse_lock); 10049 break; 10050 } 10051 case SO_RCVBUF: 10052 if (*i1 > tcp_max_buf) { 10053 *outlenp = 0; 10054 return (ENOBUFS); 10055 } 10056 /* Silently ignore zero */ 10057 if (!checkonly && *i1 != 0) { 10058 *i1 = MSS_ROUNDUP(*i1, tcp->tcp_mss); 10059 (void) tcp_rwnd_set(tcp, *i1); 10060 } 10061 /* 10062 * XXX should we return the rwnd here 10063 * and tcp_opt_get ? 10064 */ 10065 break; 10066 case SO_SND_COPYAVOID: 10067 if (!checkonly) { 10068 /* we only allow enable at most once for now */ 10069 if (tcp->tcp_loopback || 10070 (!tcp->tcp_snd_zcopy_aware && 10071 (onoff != 1 || !tcp_zcopy_check(tcp)))) { 10072 *outlenp = 0; 10073 return (EOPNOTSUPP); 10074 } 10075 tcp->tcp_snd_zcopy_aware = 1; 10076 } 10077 break; 10078 case SO_ALLZONES: 10079 /* Handled at the IP level */ 10080 return (-EINVAL); 10081 case SO_ANON_MLP: 10082 if (!checkonly) { 10083 mutex_enter(&connp->conn_lock); 10084 connp->conn_anon_mlp = onoff; 10085 mutex_exit(&connp->conn_lock); 10086 } 10087 break; 10088 case SO_MAC_EXEMPT: 10089 if (secpolicy_net_mac_aware(cr) != 0 || 10090 IPCL_IS_BOUND(connp)) 10091 return (EACCES); 10092 if (!checkonly) { 10093 mutex_enter(&connp->conn_lock); 10094 connp->conn_mac_exempt = onoff; 10095 mutex_exit(&connp->conn_lock); 10096 } 10097 break; 10098 default: 10099 *outlenp = 0; 10100 return (EINVAL); 10101 } 10102 break; 10103 case IPPROTO_TCP: 10104 switch (name) { 10105 case TCP_NODELAY: 10106 if (!checkonly) 10107 tcp->tcp_naglim = *i1 ? 1 : tcp->tcp_mss; 10108 break; 10109 case TCP_NOTIFY_THRESHOLD: 10110 if (!checkonly) 10111 tcp->tcp_first_timer_threshold = *i1; 10112 break; 10113 case TCP_ABORT_THRESHOLD: 10114 if (!checkonly) 10115 tcp->tcp_second_timer_threshold = *i1; 10116 break; 10117 case TCP_CONN_NOTIFY_THRESHOLD: 10118 if (!checkonly) 10119 tcp->tcp_first_ctimer_threshold = *i1; 10120 break; 10121 case TCP_CONN_ABORT_THRESHOLD: 10122 if (!checkonly) 10123 tcp->tcp_second_ctimer_threshold = *i1; 10124 break; 10125 case TCP_RECVDSTADDR: 10126 if (tcp->tcp_state > TCPS_LISTEN) 10127 return (EOPNOTSUPP); 10128 if (!checkonly) 10129 tcp->tcp_recvdstaddr = onoff; 10130 break; 10131 case TCP_ANONPRIVBIND: 10132 if ((reterr = secpolicy_net_privaddr(cr, 0)) != 0) { 10133 *outlenp = 0; 10134 return (reterr); 10135 } 10136 if (!checkonly) { 10137 tcp->tcp_anon_priv_bind = onoff; 10138 } 10139 break; 10140 case TCP_EXCLBIND: 10141 if (!checkonly) 10142 tcp->tcp_exclbind = onoff; 10143 break; /* goto sizeof (int) option return */ 10144 case TCP_INIT_CWND: { 10145 uint32_t init_cwnd = *((uint32_t *)invalp); 10146 10147 if (checkonly) 10148 break; 10149 10150 /* 10151 * Only allow socket with network configuration 10152 * privilege to set the initial cwnd to be larger 10153 * than allowed by RFC 3390. 10154 */ 10155 if (init_cwnd <= MIN(4, MAX(2, 4380 / tcp->tcp_mss))) { 10156 tcp->tcp_init_cwnd = init_cwnd; 10157 break; 10158 } 10159 if ((reterr = secpolicy_net_config(cr, B_TRUE)) != 0) { 10160 *outlenp = 0; 10161 return (reterr); 10162 } 10163 if (init_cwnd > TCP_MAX_INIT_CWND) { 10164 *outlenp = 0; 10165 return (EINVAL); 10166 } 10167 tcp->tcp_init_cwnd = init_cwnd; 10168 break; 10169 } 10170 case TCP_KEEPALIVE_THRESHOLD: 10171 if (checkonly) 10172 break; 10173 10174 if (*i1 < tcp_keepalive_interval_low || 10175 *i1 > tcp_keepalive_interval_high) { 10176 *outlenp = 0; 10177 return (EINVAL); 10178 } 10179 if (*i1 != tcp->tcp_ka_interval) { 10180 tcp->tcp_ka_interval = *i1; 10181 /* 10182 * Check if we need to restart the 10183 * keepalive timer. 10184 */ 10185 if (tcp->tcp_ka_tid != 0) { 10186 ASSERT(tcp->tcp_ka_enabled); 10187 (void) TCP_TIMER_CANCEL(tcp, 10188 tcp->tcp_ka_tid); 10189 tcp->tcp_ka_last_intrvl = 0; 10190 tcp->tcp_ka_tid = TCP_TIMER(tcp, 10191 tcp_keepalive_killer, 10192 MSEC_TO_TICK(tcp->tcp_ka_interval)); 10193 } 10194 } 10195 break; 10196 case TCP_KEEPALIVE_ABORT_THRESHOLD: 10197 if (!checkonly) { 10198 if (*i1 < tcp_keepalive_abort_interval_low || 10199 *i1 > tcp_keepalive_abort_interval_high) { 10200 *outlenp = 0; 10201 return (EINVAL); 10202 } 10203 tcp->tcp_ka_abort_thres = *i1; 10204 } 10205 break; 10206 case TCP_CORK: 10207 if (!checkonly) { 10208 /* 10209 * if tcp->tcp_cork was set and is now 10210 * being unset, we have to make sure that 10211 * the remaining data gets sent out. Also 10212 * unset tcp->tcp_cork so that tcp_wput_data() 10213 * can send data even if it is less than mss 10214 */ 10215 if (tcp->tcp_cork && onoff == 0 && 10216 tcp->tcp_unsent > 0) { 10217 tcp->tcp_cork = B_FALSE; 10218 tcp_wput_data(tcp, NULL, B_FALSE); 10219 } 10220 tcp->tcp_cork = onoff; 10221 } 10222 break; 10223 default: 10224 *outlenp = 0; 10225 return (EINVAL); 10226 } 10227 break; 10228 case IPPROTO_IP: 10229 if (tcp->tcp_family != AF_INET) { 10230 *outlenp = 0; 10231 return (ENOPROTOOPT); 10232 } 10233 switch (name) { 10234 case IP_OPTIONS: 10235 case T_IP_OPTIONS: 10236 reterr = tcp_opt_set_header(tcp, checkonly, 10237 invalp, inlen); 10238 if (reterr) { 10239 *outlenp = 0; 10240 return (reterr); 10241 } 10242 /* OK return - copy input buffer into output buffer */ 10243 if (invalp != outvalp) { 10244 /* don't trust bcopy for identical src/dst */ 10245 bcopy(invalp, outvalp, inlen); 10246 } 10247 *outlenp = inlen; 10248 return (0); 10249 case IP_TOS: 10250 case T_IP_TOS: 10251 if (!checkonly) { 10252 tcp->tcp_ipha->ipha_type_of_service = 10253 (uchar_t)*i1; 10254 tcp->tcp_tos = (uchar_t)*i1; 10255 } 10256 break; 10257 case IP_TTL: 10258 if (!checkonly) { 10259 tcp->tcp_ipha->ipha_ttl = (uchar_t)*i1; 10260 tcp->tcp_ttl = (uchar_t)*i1; 10261 } 10262 break; 10263 case IP_BOUND_IF: 10264 case IP_NEXTHOP: 10265 /* Handled at the IP level */ 10266 return (-EINVAL); 10267 case IP_SEC_OPT: 10268 /* 10269 * We should not allow policy setting after 10270 * we start listening for connections. 10271 */ 10272 if (tcp->tcp_state == TCPS_LISTEN) { 10273 return (EINVAL); 10274 } else { 10275 /* Handled at the IP level */ 10276 return (-EINVAL); 10277 } 10278 default: 10279 *outlenp = 0; 10280 return (EINVAL); 10281 } 10282 break; 10283 case IPPROTO_IPV6: { 10284 ip6_pkt_t *ipp; 10285 10286 /* 10287 * IPPROTO_IPV6 options are only supported for sockets 10288 * that are using IPv6 on the wire. 10289 */ 10290 if (tcp->tcp_ipversion != IPV6_VERSION) { 10291 *outlenp = 0; 10292 return (ENOPROTOOPT); 10293 } 10294 /* 10295 * Only sticky options; no ancillary data 10296 */ 10297 ASSERT(thisdg_attrs == NULL); 10298 ipp = &tcp->tcp_sticky_ipp; 10299 10300 switch (name) { 10301 case IPV6_UNICAST_HOPS: 10302 /* -1 means use default */ 10303 if (*i1 < -1 || *i1 > IPV6_MAX_HOPS) { 10304 *outlenp = 0; 10305 return (EINVAL); 10306 } 10307 if (!checkonly) { 10308 if (*i1 == -1) { 10309 tcp->tcp_ip6h->ip6_hops = 10310 ipp->ipp_unicast_hops = 10311 (uint8_t)tcp_ipv6_hoplimit; 10312 ipp->ipp_fields &= ~IPPF_UNICAST_HOPS; 10313 /* Pass modified value to IP. */ 10314 *i1 = tcp->tcp_ip6h->ip6_hops; 10315 } else { 10316 tcp->tcp_ip6h->ip6_hops = 10317 ipp->ipp_unicast_hops = 10318 (uint8_t)*i1; 10319 ipp->ipp_fields |= IPPF_UNICAST_HOPS; 10320 } 10321 reterr = tcp_build_hdrs(q, tcp); 10322 if (reterr != 0) 10323 return (reterr); 10324 } 10325 break; 10326 case IPV6_BOUND_IF: 10327 if (!checkonly) { 10328 int error = 0; 10329 10330 tcp->tcp_bound_if = *i1; 10331 error = ip_opt_set_ill(tcp->tcp_connp, *i1, 10332 B_TRUE, checkonly, level, name, mblk); 10333 if (error != 0) { 10334 *outlenp = 0; 10335 return (error); 10336 } 10337 } 10338 break; 10339 /* 10340 * Set boolean switches for ancillary data delivery 10341 */ 10342 case IPV6_RECVPKTINFO: 10343 if (!checkonly) { 10344 if (onoff) 10345 tcp->tcp_ipv6_recvancillary |= 10346 TCP_IPV6_RECVPKTINFO; 10347 else 10348 tcp->tcp_ipv6_recvancillary &= 10349 ~TCP_IPV6_RECVPKTINFO; 10350 /* Force it to be sent up with the next msg */ 10351 tcp->tcp_recvifindex = 0; 10352 } 10353 break; 10354 case IPV6_RECVTCLASS: 10355 if (!checkonly) { 10356 if (onoff) 10357 tcp->tcp_ipv6_recvancillary |= 10358 TCP_IPV6_RECVTCLASS; 10359 else 10360 tcp->tcp_ipv6_recvancillary &= 10361 ~TCP_IPV6_RECVTCLASS; 10362 } 10363 break; 10364 case IPV6_RECVHOPLIMIT: 10365 if (!checkonly) { 10366 if (onoff) 10367 tcp->tcp_ipv6_recvancillary |= 10368 TCP_IPV6_RECVHOPLIMIT; 10369 else 10370 tcp->tcp_ipv6_recvancillary &= 10371 ~TCP_IPV6_RECVHOPLIMIT; 10372 /* Force it to be sent up with the next msg */ 10373 tcp->tcp_recvhops = 0xffffffffU; 10374 } 10375 break; 10376 case IPV6_RECVHOPOPTS: 10377 if (!checkonly) { 10378 if (onoff) 10379 tcp->tcp_ipv6_recvancillary |= 10380 TCP_IPV6_RECVHOPOPTS; 10381 else 10382 tcp->tcp_ipv6_recvancillary &= 10383 ~TCP_IPV6_RECVHOPOPTS; 10384 } 10385 break; 10386 case IPV6_RECVDSTOPTS: 10387 if (!checkonly) { 10388 if (onoff) 10389 tcp->tcp_ipv6_recvancillary |= 10390 TCP_IPV6_RECVDSTOPTS; 10391 else 10392 tcp->tcp_ipv6_recvancillary &= 10393 ~TCP_IPV6_RECVDSTOPTS; 10394 } 10395 break; 10396 case _OLD_IPV6_RECVDSTOPTS: 10397 if (!checkonly) { 10398 if (onoff) 10399 tcp->tcp_ipv6_recvancillary |= 10400 TCP_OLD_IPV6_RECVDSTOPTS; 10401 else 10402 tcp->tcp_ipv6_recvancillary &= 10403 ~TCP_OLD_IPV6_RECVDSTOPTS; 10404 } 10405 break; 10406 case IPV6_RECVRTHDR: 10407 if (!checkonly) { 10408 if (onoff) 10409 tcp->tcp_ipv6_recvancillary |= 10410 TCP_IPV6_RECVRTHDR; 10411 else 10412 tcp->tcp_ipv6_recvancillary &= 10413 ~TCP_IPV6_RECVRTHDR; 10414 } 10415 break; 10416 case IPV6_RECVRTHDRDSTOPTS: 10417 if (!checkonly) { 10418 if (onoff) 10419 tcp->tcp_ipv6_recvancillary |= 10420 TCP_IPV6_RECVRTDSTOPTS; 10421 else 10422 tcp->tcp_ipv6_recvancillary &= 10423 ~TCP_IPV6_RECVRTDSTOPTS; 10424 } 10425 break; 10426 case IPV6_PKTINFO: 10427 if (inlen != 0 && inlen != sizeof (struct in6_pktinfo)) 10428 return (EINVAL); 10429 if (checkonly) 10430 break; 10431 10432 if (inlen == 0) { 10433 ipp->ipp_fields &= ~(IPPF_IFINDEX|IPPF_ADDR); 10434 } else { 10435 struct in6_pktinfo *pkti; 10436 10437 pkti = (struct in6_pktinfo *)invalp; 10438 /* 10439 * RFC 3542 states that ipi6_addr must be 10440 * the unspecified address when setting the 10441 * IPV6_PKTINFO sticky socket option on a 10442 * TCP socket. 10443 */ 10444 if (!IN6_IS_ADDR_UNSPECIFIED(&pkti->ipi6_addr)) 10445 return (EINVAL); 10446 /* 10447 * ip6_set_pktinfo() validates the source 10448 * address and interface index. 10449 */ 10450 reterr = ip6_set_pktinfo(cr, tcp->tcp_connp, 10451 pkti, mblk); 10452 if (reterr != 0) 10453 return (reterr); 10454 ipp->ipp_ifindex = pkti->ipi6_ifindex; 10455 ipp->ipp_addr = pkti->ipi6_addr; 10456 if (ipp->ipp_ifindex != 0) 10457 ipp->ipp_fields |= IPPF_IFINDEX; 10458 else 10459 ipp->ipp_fields &= ~IPPF_IFINDEX; 10460 if (!IN6_IS_ADDR_UNSPECIFIED(&ipp->ipp_addr)) 10461 ipp->ipp_fields |= IPPF_ADDR; 10462 else 10463 ipp->ipp_fields &= ~IPPF_ADDR; 10464 } 10465 reterr = tcp_build_hdrs(q, tcp); 10466 if (reterr != 0) 10467 return (reterr); 10468 break; 10469 case IPV6_TCLASS: 10470 if (inlen != 0 && inlen != sizeof (int)) 10471 return (EINVAL); 10472 if (checkonly) 10473 break; 10474 10475 if (inlen == 0) { 10476 ipp->ipp_fields &= ~IPPF_TCLASS; 10477 } else { 10478 if (*i1 > 255 || *i1 < -1) 10479 return (EINVAL); 10480 if (*i1 == -1) { 10481 ipp->ipp_tclass = 0; 10482 *i1 = 0; 10483 } else { 10484 ipp->ipp_tclass = *i1; 10485 } 10486 ipp->ipp_fields |= IPPF_TCLASS; 10487 } 10488 reterr = tcp_build_hdrs(q, tcp); 10489 if (reterr != 0) 10490 return (reterr); 10491 break; 10492 case IPV6_NEXTHOP: 10493 /* 10494 * IP will verify that the nexthop is reachable 10495 * and fail for sticky options. 10496 */ 10497 if (inlen != 0 && inlen != sizeof (sin6_t)) 10498 return (EINVAL); 10499 if (checkonly) 10500 break; 10501 10502 if (inlen == 0) { 10503 ipp->ipp_fields &= ~IPPF_NEXTHOP; 10504 } else { 10505 sin6_t *sin6 = (sin6_t *)invalp; 10506 10507 if (sin6->sin6_family != AF_INET6) 10508 return (EAFNOSUPPORT); 10509 if (IN6_IS_ADDR_V4MAPPED( 10510 &sin6->sin6_addr)) 10511 return (EADDRNOTAVAIL); 10512 ipp->ipp_nexthop = sin6->sin6_addr; 10513 if (!IN6_IS_ADDR_UNSPECIFIED( 10514 &ipp->ipp_nexthop)) 10515 ipp->ipp_fields |= IPPF_NEXTHOP; 10516 else 10517 ipp->ipp_fields &= ~IPPF_NEXTHOP; 10518 } 10519 reterr = tcp_build_hdrs(q, tcp); 10520 if (reterr != 0) 10521 return (reterr); 10522 break; 10523 case IPV6_HOPOPTS: { 10524 ip6_hbh_t *hopts = (ip6_hbh_t *)invalp; 10525 10526 /* 10527 * Sanity checks - minimum size, size a multiple of 10528 * eight bytes, and matching size passed in. 10529 */ 10530 if (inlen != 0 && 10531 inlen != (8 * (hopts->ip6h_len + 1))) 10532 return (EINVAL); 10533 10534 if (checkonly) 10535 break; 10536 10537 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10538 (uchar_t **)&ipp->ipp_hopopts, 10539 &ipp->ipp_hopoptslen, tcp->tcp_label_len); 10540 if (reterr != 0) 10541 return (reterr); 10542 if (ipp->ipp_hopoptslen == 0) 10543 ipp->ipp_fields &= ~IPPF_HOPOPTS; 10544 else 10545 ipp->ipp_fields |= IPPF_HOPOPTS; 10546 reterr = tcp_build_hdrs(q, tcp); 10547 if (reterr != 0) 10548 return (reterr); 10549 break; 10550 } 10551 case IPV6_RTHDRDSTOPTS: { 10552 ip6_dest_t *dopts = (ip6_dest_t *)invalp; 10553 10554 /* 10555 * Sanity checks - minimum size, size a multiple of 10556 * eight bytes, and matching size passed in. 10557 */ 10558 if (inlen != 0 && 10559 inlen != (8 * (dopts->ip6d_len + 1))) 10560 return (EINVAL); 10561 10562 if (checkonly) 10563 break; 10564 10565 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10566 (uchar_t **)&ipp->ipp_rtdstopts, 10567 &ipp->ipp_rtdstoptslen, 0); 10568 if (reterr != 0) 10569 return (reterr); 10570 if (ipp->ipp_rtdstoptslen == 0) 10571 ipp->ipp_fields &= ~IPPF_RTDSTOPTS; 10572 else 10573 ipp->ipp_fields |= IPPF_RTDSTOPTS; 10574 reterr = tcp_build_hdrs(q, tcp); 10575 if (reterr != 0) 10576 return (reterr); 10577 break; 10578 } 10579 case IPV6_DSTOPTS: { 10580 ip6_dest_t *dopts = (ip6_dest_t *)invalp; 10581 10582 /* 10583 * Sanity checks - minimum size, size a multiple of 10584 * eight bytes, and matching size passed in. 10585 */ 10586 if (inlen != 0 && 10587 inlen != (8 * (dopts->ip6d_len + 1))) 10588 return (EINVAL); 10589 10590 if (checkonly) 10591 break; 10592 10593 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10594 (uchar_t **)&ipp->ipp_dstopts, 10595 &ipp->ipp_dstoptslen, 0); 10596 if (reterr != 0) 10597 return (reterr); 10598 if (ipp->ipp_dstoptslen == 0) 10599 ipp->ipp_fields &= ~IPPF_DSTOPTS; 10600 else 10601 ipp->ipp_fields |= IPPF_DSTOPTS; 10602 reterr = tcp_build_hdrs(q, tcp); 10603 if (reterr != 0) 10604 return (reterr); 10605 break; 10606 } 10607 case IPV6_RTHDR: { 10608 ip6_rthdr_t *rt = (ip6_rthdr_t *)invalp; 10609 10610 /* 10611 * Sanity checks - minimum size, size a multiple of 10612 * eight bytes, and matching size passed in. 10613 */ 10614 if (inlen != 0 && 10615 inlen != (8 * (rt->ip6r_len + 1))) 10616 return (EINVAL); 10617 10618 if (checkonly) 10619 break; 10620 10621 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10622 (uchar_t **)&ipp->ipp_rthdr, 10623 &ipp->ipp_rthdrlen, 0); 10624 if (reterr != 0) 10625 return (reterr); 10626 if (ipp->ipp_rthdrlen == 0) 10627 ipp->ipp_fields &= ~IPPF_RTHDR; 10628 else 10629 ipp->ipp_fields |= IPPF_RTHDR; 10630 reterr = tcp_build_hdrs(q, tcp); 10631 if (reterr != 0) 10632 return (reterr); 10633 break; 10634 } 10635 case IPV6_V6ONLY: 10636 if (!checkonly) 10637 tcp->tcp_connp->conn_ipv6_v6only = onoff; 10638 break; 10639 case IPV6_USE_MIN_MTU: 10640 if (inlen != sizeof (int)) 10641 return (EINVAL); 10642 10643 if (*i1 < -1 || *i1 > 1) 10644 return (EINVAL); 10645 10646 if (checkonly) 10647 break; 10648 10649 ipp->ipp_fields |= IPPF_USE_MIN_MTU; 10650 ipp->ipp_use_min_mtu = *i1; 10651 break; 10652 case IPV6_BOUND_PIF: 10653 /* Handled at the IP level */ 10654 return (-EINVAL); 10655 case IPV6_SEC_OPT: 10656 /* 10657 * We should not allow policy setting after 10658 * we start listening for connections. 10659 */ 10660 if (tcp->tcp_state == TCPS_LISTEN) { 10661 return (EINVAL); 10662 } else { 10663 /* Handled at the IP level */ 10664 return (-EINVAL); 10665 } 10666 case IPV6_SRC_PREFERENCES: 10667 if (inlen != sizeof (uint32_t)) 10668 return (EINVAL); 10669 reterr = ip6_set_src_preferences(tcp->tcp_connp, 10670 *(uint32_t *)invalp); 10671 if (reterr != 0) { 10672 *outlenp = 0; 10673 return (reterr); 10674 } 10675 break; 10676 default: 10677 *outlenp = 0; 10678 return (EINVAL); 10679 } 10680 break; 10681 } /* end IPPROTO_IPV6 */ 10682 default: 10683 *outlenp = 0; 10684 return (EINVAL); 10685 } 10686 /* 10687 * Common case of OK return with outval same as inval 10688 */ 10689 if (invalp != outvalp) { 10690 /* don't trust bcopy for identical src/dst */ 10691 (void) bcopy(invalp, outvalp, inlen); 10692 } 10693 *outlenp = inlen; 10694 return (0); 10695 } 10696 10697 /* 10698 * Update tcp_sticky_hdrs based on tcp_sticky_ipp. 10699 * The headers include ip6i_t (if needed), ip6_t, any sticky extension 10700 * headers, and the maximum size tcp header (to avoid reallocation 10701 * on the fly for additional tcp options). 10702 * Returns failure if can't allocate memory. 10703 */ 10704 static int 10705 tcp_build_hdrs(queue_t *q, tcp_t *tcp) 10706 { 10707 char *hdrs; 10708 uint_t hdrs_len; 10709 ip6i_t *ip6i; 10710 char buf[TCP_MAX_HDR_LENGTH]; 10711 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 10712 in6_addr_t src, dst; 10713 10714 /* 10715 * save the existing tcp header and source/dest IP addresses 10716 */ 10717 bcopy(tcp->tcp_tcph, buf, tcp->tcp_tcp_hdr_len); 10718 src = tcp->tcp_ip6h->ip6_src; 10719 dst = tcp->tcp_ip6h->ip6_dst; 10720 hdrs_len = ip_total_hdrs_len_v6(ipp) + TCP_MAX_HDR_LENGTH; 10721 ASSERT(hdrs_len != 0); 10722 if (hdrs_len > tcp->tcp_iphc_len) { 10723 /* Need to reallocate */ 10724 hdrs = kmem_zalloc(hdrs_len, KM_NOSLEEP); 10725 if (hdrs == NULL) 10726 return (ENOMEM); 10727 if (tcp->tcp_iphc != NULL) { 10728 if (tcp->tcp_hdr_grown) { 10729 kmem_free(tcp->tcp_iphc, tcp->tcp_iphc_len); 10730 } else { 10731 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 10732 kmem_cache_free(tcp_iphc_cache, tcp->tcp_iphc); 10733 } 10734 tcp->tcp_iphc_len = 0; 10735 } 10736 ASSERT(tcp->tcp_iphc_len == 0); 10737 tcp->tcp_iphc = hdrs; 10738 tcp->tcp_iphc_len = hdrs_len; 10739 tcp->tcp_hdr_grown = B_TRUE; 10740 } 10741 ip_build_hdrs_v6((uchar_t *)tcp->tcp_iphc, 10742 hdrs_len - TCP_MAX_HDR_LENGTH, ipp, IPPROTO_TCP); 10743 10744 /* Set header fields not in ipp */ 10745 if (ipp->ipp_fields & IPPF_HAS_IP6I) { 10746 ip6i = (ip6i_t *)tcp->tcp_iphc; 10747 tcp->tcp_ip6h = (ip6_t *)&ip6i[1]; 10748 } else { 10749 tcp->tcp_ip6h = (ip6_t *)tcp->tcp_iphc; 10750 } 10751 /* 10752 * tcp->tcp_ip_hdr_len will include ip6i_t if there is one. 10753 * 10754 * tcp->tcp_tcp_hdr_len doesn't change here. 10755 */ 10756 tcp->tcp_ip_hdr_len = hdrs_len - TCP_MAX_HDR_LENGTH; 10757 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + tcp->tcp_ip_hdr_len); 10758 tcp->tcp_hdr_len = tcp->tcp_ip_hdr_len + tcp->tcp_tcp_hdr_len; 10759 10760 bcopy(buf, tcp->tcp_tcph, tcp->tcp_tcp_hdr_len); 10761 10762 tcp->tcp_ip6h->ip6_src = src; 10763 tcp->tcp_ip6h->ip6_dst = dst; 10764 10765 /* 10766 * If the hop limit was not set by ip_build_hdrs_v6(), set it to 10767 * the default value for TCP. 10768 */ 10769 if (!(ipp->ipp_fields & IPPF_UNICAST_HOPS)) 10770 tcp->tcp_ip6h->ip6_hops = tcp_ipv6_hoplimit; 10771 10772 /* 10773 * If we're setting extension headers after a connection 10774 * has been established, and if we have a routing header 10775 * among the extension headers, call ip_massage_options_v6 to 10776 * manipulate the routing header/ip6_dst set the checksum 10777 * difference in the tcp header template. 10778 * (This happens in tcp_connect_ipv6 if the routing header 10779 * is set prior to the connect.) 10780 * Set the tcp_sum to zero first in case we've cleared a 10781 * routing header or don't have one at all. 10782 */ 10783 tcp->tcp_sum = 0; 10784 if ((tcp->tcp_state >= TCPS_SYN_SENT) && 10785 (tcp->tcp_ipp_fields & IPPF_RTHDR)) { 10786 ip6_rthdr_t *rth = ip_find_rthdr_v6(tcp->tcp_ip6h, 10787 (uint8_t *)tcp->tcp_tcph); 10788 if (rth != NULL) { 10789 tcp->tcp_sum = ip_massage_options_v6(tcp->tcp_ip6h, 10790 rth); 10791 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 10792 (tcp->tcp_sum >> 16)); 10793 } 10794 } 10795 10796 /* Try to get everything in a single mblk */ 10797 (void) mi_set_sth_wroff(RD(q), hdrs_len + tcp_wroff_xtra); 10798 return (0); 10799 } 10800 10801 /* 10802 * Transfer any source route option from ipha to buf/dst in reversed form. 10803 */ 10804 static int 10805 tcp_opt_rev_src_route(ipha_t *ipha, char *buf, uchar_t *dst) 10806 { 10807 ipoptp_t opts; 10808 uchar_t *opt; 10809 uint8_t optval; 10810 uint8_t optlen; 10811 uint32_t len = 0; 10812 10813 for (optval = ipoptp_first(&opts, ipha); 10814 optval != IPOPT_EOL; 10815 optval = ipoptp_next(&opts)) { 10816 opt = opts.ipoptp_cur; 10817 optlen = opts.ipoptp_len; 10818 switch (optval) { 10819 int off1, off2; 10820 case IPOPT_SSRR: 10821 case IPOPT_LSRR: 10822 10823 /* Reverse source route */ 10824 /* 10825 * First entry should be the next to last one in the 10826 * current source route (the last entry is our 10827 * address.) 10828 * The last entry should be the final destination. 10829 */ 10830 buf[IPOPT_OPTVAL] = (uint8_t)optval; 10831 buf[IPOPT_OLEN] = (uint8_t)optlen; 10832 off1 = IPOPT_MINOFF_SR - 1; 10833 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1; 10834 if (off2 < 0) { 10835 /* No entries in source route */ 10836 break; 10837 } 10838 bcopy(opt + off2, dst, IP_ADDR_LEN); 10839 /* 10840 * Note: use src since ipha has not had its src 10841 * and dst reversed (it is in the state it was 10842 * received. 10843 */ 10844 bcopy(&ipha->ipha_src, buf + off2, 10845 IP_ADDR_LEN); 10846 off2 -= IP_ADDR_LEN; 10847 10848 while (off2 > 0) { 10849 bcopy(opt + off2, buf + off1, 10850 IP_ADDR_LEN); 10851 off1 += IP_ADDR_LEN; 10852 off2 -= IP_ADDR_LEN; 10853 } 10854 buf[IPOPT_OFFSET] = IPOPT_MINOFF_SR; 10855 buf += optlen; 10856 len += optlen; 10857 break; 10858 } 10859 } 10860 done: 10861 /* Pad the resulting options */ 10862 while (len & 0x3) { 10863 *buf++ = IPOPT_EOL; 10864 len++; 10865 } 10866 return (len); 10867 } 10868 10869 10870 /* 10871 * Extract and revert a source route from ipha (if any) 10872 * and then update the relevant fields in both tcp_t and the standard header. 10873 */ 10874 static void 10875 tcp_opt_reverse(tcp_t *tcp, ipha_t *ipha) 10876 { 10877 char buf[TCP_MAX_HDR_LENGTH]; 10878 uint_t tcph_len; 10879 int len; 10880 10881 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 10882 len = IPH_HDR_LENGTH(ipha); 10883 if (len == IP_SIMPLE_HDR_LENGTH) 10884 /* Nothing to do */ 10885 return; 10886 if (len > IP_SIMPLE_HDR_LENGTH + TCP_MAX_IP_OPTIONS_LENGTH || 10887 (len & 0x3)) 10888 return; 10889 10890 tcph_len = tcp->tcp_tcp_hdr_len; 10891 bcopy(tcp->tcp_tcph, buf, tcph_len); 10892 tcp->tcp_sum = (tcp->tcp_ipha->ipha_dst >> 16) + 10893 (tcp->tcp_ipha->ipha_dst & 0xffff); 10894 len = tcp_opt_rev_src_route(ipha, (char *)tcp->tcp_ipha + 10895 IP_SIMPLE_HDR_LENGTH, (uchar_t *)&tcp->tcp_ipha->ipha_dst); 10896 len += IP_SIMPLE_HDR_LENGTH; 10897 tcp->tcp_sum -= ((tcp->tcp_ipha->ipha_dst >> 16) + 10898 (tcp->tcp_ipha->ipha_dst & 0xffff)); 10899 if ((int)tcp->tcp_sum < 0) 10900 tcp->tcp_sum--; 10901 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 10902 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16)); 10903 tcp->tcp_tcph = (tcph_t *)((char *)tcp->tcp_ipha + len); 10904 bcopy(buf, tcp->tcp_tcph, tcph_len); 10905 tcp->tcp_ip_hdr_len = len; 10906 tcp->tcp_ipha->ipha_version_and_hdr_length = 10907 (IP_VERSION << 4) | (len >> 2); 10908 len += tcph_len; 10909 tcp->tcp_hdr_len = len; 10910 } 10911 10912 /* 10913 * Copy the standard header into its new location, 10914 * lay in the new options and then update the relevant 10915 * fields in both tcp_t and the standard header. 10916 */ 10917 static int 10918 tcp_opt_set_header(tcp_t *tcp, boolean_t checkonly, uchar_t *ptr, uint_t len) 10919 { 10920 uint_t tcph_len; 10921 uint8_t *ip_optp; 10922 tcph_t *new_tcph; 10923 10924 if ((len > TCP_MAX_IP_OPTIONS_LENGTH) || (len & 0x3)) 10925 return (EINVAL); 10926 10927 if (len > IP_MAX_OPT_LENGTH - tcp->tcp_label_len) 10928 return (EINVAL); 10929 10930 if (checkonly) { 10931 /* 10932 * do not really set, just pretend to - T_CHECK 10933 */ 10934 return (0); 10935 } 10936 10937 ip_optp = (uint8_t *)tcp->tcp_ipha + IP_SIMPLE_HDR_LENGTH; 10938 if (tcp->tcp_label_len > 0) { 10939 int padlen; 10940 uint8_t opt; 10941 10942 /* convert list termination to no-ops */ 10943 padlen = tcp->tcp_label_len - ip_optp[IPOPT_OLEN]; 10944 ip_optp += ip_optp[IPOPT_OLEN]; 10945 opt = len > 0 ? IPOPT_NOP : IPOPT_EOL; 10946 while (--padlen >= 0) 10947 *ip_optp++ = opt; 10948 } 10949 tcph_len = tcp->tcp_tcp_hdr_len; 10950 new_tcph = (tcph_t *)(ip_optp + len); 10951 ovbcopy(tcp->tcp_tcph, new_tcph, tcph_len); 10952 tcp->tcp_tcph = new_tcph; 10953 bcopy(ptr, ip_optp, len); 10954 10955 len += IP_SIMPLE_HDR_LENGTH + tcp->tcp_label_len; 10956 10957 tcp->tcp_ip_hdr_len = len; 10958 tcp->tcp_ipha->ipha_version_and_hdr_length = 10959 (IP_VERSION << 4) | (len >> 2); 10960 tcp->tcp_hdr_len = len + tcph_len; 10961 if (!TCP_IS_DETACHED(tcp)) { 10962 /* Always allocate room for all options. */ 10963 (void) mi_set_sth_wroff(tcp->tcp_rq, 10964 TCP_MAX_COMBINED_HEADER_LENGTH + tcp_wroff_xtra); 10965 } 10966 return (0); 10967 } 10968 10969 /* Get callback routine passed to nd_load by tcp_param_register */ 10970 /* ARGSUSED */ 10971 static int 10972 tcp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 10973 { 10974 tcpparam_t *tcppa = (tcpparam_t *)cp; 10975 10976 (void) mi_mpprintf(mp, "%u", tcppa->tcp_param_val); 10977 return (0); 10978 } 10979 10980 /* 10981 * Walk through the param array specified registering each element with the 10982 * named dispatch handler. 10983 */ 10984 static boolean_t 10985 tcp_param_register(tcpparam_t *tcppa, int cnt) 10986 { 10987 for (; cnt-- > 0; tcppa++) { 10988 if (tcppa->tcp_param_name && tcppa->tcp_param_name[0]) { 10989 if (!nd_load(&tcp_g_nd, tcppa->tcp_param_name, 10990 tcp_param_get, tcp_param_set, 10991 (caddr_t)tcppa)) { 10992 nd_free(&tcp_g_nd); 10993 return (B_FALSE); 10994 } 10995 } 10996 } 10997 if (!nd_load(&tcp_g_nd, tcp_wroff_xtra_param.tcp_param_name, 10998 tcp_param_get, tcp_param_set_aligned, 10999 (caddr_t)&tcp_wroff_xtra_param)) { 11000 nd_free(&tcp_g_nd); 11001 return (B_FALSE); 11002 } 11003 if (!nd_load(&tcp_g_nd, tcp_mdt_head_param.tcp_param_name, 11004 tcp_param_get, tcp_param_set_aligned, 11005 (caddr_t)&tcp_mdt_head_param)) { 11006 nd_free(&tcp_g_nd); 11007 return (B_FALSE); 11008 } 11009 if (!nd_load(&tcp_g_nd, tcp_mdt_tail_param.tcp_param_name, 11010 tcp_param_get, tcp_param_set_aligned, 11011 (caddr_t)&tcp_mdt_tail_param)) { 11012 nd_free(&tcp_g_nd); 11013 return (B_FALSE); 11014 } 11015 if (!nd_load(&tcp_g_nd, tcp_mdt_max_pbufs_param.tcp_param_name, 11016 tcp_param_get, tcp_param_set, 11017 (caddr_t)&tcp_mdt_max_pbufs_param)) { 11018 nd_free(&tcp_g_nd); 11019 return (B_FALSE); 11020 } 11021 if (!nd_load(&tcp_g_nd, "tcp_extra_priv_ports", 11022 tcp_extra_priv_ports_get, NULL, NULL)) { 11023 nd_free(&tcp_g_nd); 11024 return (B_FALSE); 11025 } 11026 if (!nd_load(&tcp_g_nd, "tcp_extra_priv_ports_add", 11027 NULL, tcp_extra_priv_ports_add, NULL)) { 11028 nd_free(&tcp_g_nd); 11029 return (B_FALSE); 11030 } 11031 if (!nd_load(&tcp_g_nd, "tcp_extra_priv_ports_del", 11032 NULL, tcp_extra_priv_ports_del, NULL)) { 11033 nd_free(&tcp_g_nd); 11034 return (B_FALSE); 11035 } 11036 if (!nd_load(&tcp_g_nd, "tcp_status", tcp_status_report, NULL, 11037 NULL)) { 11038 nd_free(&tcp_g_nd); 11039 return (B_FALSE); 11040 } 11041 if (!nd_load(&tcp_g_nd, "tcp_bind_hash", tcp_bind_hash_report, 11042 NULL, NULL)) { 11043 nd_free(&tcp_g_nd); 11044 return (B_FALSE); 11045 } 11046 if (!nd_load(&tcp_g_nd, "tcp_listen_hash", tcp_listen_hash_report, 11047 NULL, NULL)) { 11048 nd_free(&tcp_g_nd); 11049 return (B_FALSE); 11050 } 11051 if (!nd_load(&tcp_g_nd, "tcp_conn_hash", tcp_conn_hash_report, 11052 NULL, NULL)) { 11053 nd_free(&tcp_g_nd); 11054 return (B_FALSE); 11055 } 11056 if (!nd_load(&tcp_g_nd, "tcp_acceptor_hash", tcp_acceptor_hash_report, 11057 NULL, NULL)) { 11058 nd_free(&tcp_g_nd); 11059 return (B_FALSE); 11060 } 11061 if (!nd_load(&tcp_g_nd, "tcp_host_param", tcp_host_param_report, 11062 tcp_host_param_set, NULL)) { 11063 nd_free(&tcp_g_nd); 11064 return (B_FALSE); 11065 } 11066 if (!nd_load(&tcp_g_nd, "tcp_host_param_ipv6", tcp_host_param_report, 11067 tcp_host_param_set_ipv6, NULL)) { 11068 nd_free(&tcp_g_nd); 11069 return (B_FALSE); 11070 } 11071 if (!nd_load(&tcp_g_nd, "tcp_1948_phrase", NULL, tcp_1948_phrase_set, 11072 NULL)) { 11073 nd_free(&tcp_g_nd); 11074 return (B_FALSE); 11075 } 11076 if (!nd_load(&tcp_g_nd, "tcp_reserved_port_list", 11077 tcp_reserved_port_list, NULL, NULL)) { 11078 nd_free(&tcp_g_nd); 11079 return (B_FALSE); 11080 } 11081 /* 11082 * Dummy ndd variables - only to convey obsolescence information 11083 * through printing of their name (no get or set routines) 11084 * XXX Remove in future releases ? 11085 */ 11086 if (!nd_load(&tcp_g_nd, 11087 "tcp_close_wait_interval(obsoleted - " 11088 "use tcp_time_wait_interval)", NULL, NULL, NULL)) { 11089 nd_free(&tcp_g_nd); 11090 return (B_FALSE); 11091 } 11092 return (B_TRUE); 11093 } 11094 11095 /* ndd set routine for tcp_wroff_xtra, tcp_mdt_hdr_{head,tail}_min. */ 11096 /* ARGSUSED */ 11097 static int 11098 tcp_param_set_aligned(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 11099 cred_t *cr) 11100 { 11101 long new_value; 11102 tcpparam_t *tcppa = (tcpparam_t *)cp; 11103 11104 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 11105 new_value < tcppa->tcp_param_min || 11106 new_value > tcppa->tcp_param_max) { 11107 return (EINVAL); 11108 } 11109 /* 11110 * Need to make sure new_value is a multiple of 4. If it is not, 11111 * round it up. For future 64 bit requirement, we actually make it 11112 * a multiple of 8. 11113 */ 11114 if (new_value & 0x7) { 11115 new_value = (new_value & ~0x7) + 0x8; 11116 } 11117 tcppa->tcp_param_val = new_value; 11118 return (0); 11119 } 11120 11121 /* Set callback routine passed to nd_load by tcp_param_register */ 11122 /* ARGSUSED */ 11123 static int 11124 tcp_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr) 11125 { 11126 long new_value; 11127 tcpparam_t *tcppa = (tcpparam_t *)cp; 11128 11129 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 11130 new_value < tcppa->tcp_param_min || 11131 new_value > tcppa->tcp_param_max) { 11132 return (EINVAL); 11133 } 11134 tcppa->tcp_param_val = new_value; 11135 return (0); 11136 } 11137 11138 /* 11139 * Add a new piece to the tcp reassembly queue. If the gap at the beginning 11140 * is filled, return as much as we can. The message passed in may be 11141 * multi-part, chained using b_cont. "start" is the starting sequence 11142 * number for this piece. 11143 */ 11144 static mblk_t * 11145 tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start) 11146 { 11147 uint32_t end; 11148 mblk_t *mp1; 11149 mblk_t *mp2; 11150 mblk_t *next_mp; 11151 uint32_t u1; 11152 11153 /* Walk through all the new pieces. */ 11154 do { 11155 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 11156 (uintptr_t)INT_MAX); 11157 end = start + (int)(mp->b_wptr - mp->b_rptr); 11158 next_mp = mp->b_cont; 11159 if (start == end) { 11160 /* Empty. Blast it. */ 11161 freeb(mp); 11162 continue; 11163 } 11164 mp->b_cont = NULL; 11165 TCP_REASS_SET_SEQ(mp, start); 11166 TCP_REASS_SET_END(mp, end); 11167 mp1 = tcp->tcp_reass_tail; 11168 if (!mp1) { 11169 tcp->tcp_reass_tail = mp; 11170 tcp->tcp_reass_head = mp; 11171 BUMP_MIB(&tcp_mib, tcpInDataUnorderSegs); 11172 UPDATE_MIB(&tcp_mib, 11173 tcpInDataUnorderBytes, end - start); 11174 continue; 11175 } 11176 /* New stuff completely beyond tail? */ 11177 if (SEQ_GEQ(start, TCP_REASS_END(mp1))) { 11178 /* Link it on end. */ 11179 mp1->b_cont = mp; 11180 tcp->tcp_reass_tail = mp; 11181 BUMP_MIB(&tcp_mib, tcpInDataUnorderSegs); 11182 UPDATE_MIB(&tcp_mib, 11183 tcpInDataUnorderBytes, end - start); 11184 continue; 11185 } 11186 mp1 = tcp->tcp_reass_head; 11187 u1 = TCP_REASS_SEQ(mp1); 11188 /* New stuff at the front? */ 11189 if (SEQ_LT(start, u1)) { 11190 /* Yes... Check for overlap. */ 11191 mp->b_cont = mp1; 11192 tcp->tcp_reass_head = mp; 11193 tcp_reass_elim_overlap(tcp, mp); 11194 continue; 11195 } 11196 /* 11197 * The new piece fits somewhere between the head and tail. 11198 * We find our slot, where mp1 precedes us and mp2 trails. 11199 */ 11200 for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) { 11201 u1 = TCP_REASS_SEQ(mp2); 11202 if (SEQ_LEQ(start, u1)) 11203 break; 11204 } 11205 /* Link ourselves in */ 11206 mp->b_cont = mp2; 11207 mp1->b_cont = mp; 11208 11209 /* Trim overlap with following mblk(s) first */ 11210 tcp_reass_elim_overlap(tcp, mp); 11211 11212 /* Trim overlap with preceding mblk */ 11213 tcp_reass_elim_overlap(tcp, mp1); 11214 11215 } while (start = end, mp = next_mp); 11216 mp1 = tcp->tcp_reass_head; 11217 /* Anything ready to go? */ 11218 if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt) 11219 return (NULL); 11220 /* Eat what we can off the queue */ 11221 for (;;) { 11222 mp = mp1->b_cont; 11223 end = TCP_REASS_END(mp1); 11224 TCP_REASS_SET_SEQ(mp1, 0); 11225 TCP_REASS_SET_END(mp1, 0); 11226 if (!mp) { 11227 tcp->tcp_reass_tail = NULL; 11228 break; 11229 } 11230 if (end != TCP_REASS_SEQ(mp)) { 11231 mp1->b_cont = NULL; 11232 break; 11233 } 11234 mp1 = mp; 11235 } 11236 mp1 = tcp->tcp_reass_head; 11237 tcp->tcp_reass_head = mp; 11238 return (mp1); 11239 } 11240 11241 /* Eliminate any overlap that mp may have over later mblks */ 11242 static void 11243 tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp) 11244 { 11245 uint32_t end; 11246 mblk_t *mp1; 11247 uint32_t u1; 11248 11249 end = TCP_REASS_END(mp); 11250 while ((mp1 = mp->b_cont) != NULL) { 11251 u1 = TCP_REASS_SEQ(mp1); 11252 if (!SEQ_GT(end, u1)) 11253 break; 11254 if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) { 11255 mp->b_wptr -= end - u1; 11256 TCP_REASS_SET_END(mp, u1); 11257 BUMP_MIB(&tcp_mib, tcpInDataPartDupSegs); 11258 UPDATE_MIB(&tcp_mib, tcpInDataPartDupBytes, end - u1); 11259 break; 11260 } 11261 mp->b_cont = mp1->b_cont; 11262 TCP_REASS_SET_SEQ(mp1, 0); 11263 TCP_REASS_SET_END(mp1, 0); 11264 freeb(mp1); 11265 BUMP_MIB(&tcp_mib, tcpInDataDupSegs); 11266 UPDATE_MIB(&tcp_mib, tcpInDataDupBytes, end - u1); 11267 } 11268 if (!mp1) 11269 tcp->tcp_reass_tail = mp; 11270 } 11271 11272 /* 11273 * Send up all messages queued on tcp_rcv_list. 11274 */ 11275 static uint_t 11276 tcp_rcv_drain(queue_t *q, tcp_t *tcp) 11277 { 11278 mblk_t *mp; 11279 uint_t ret = 0; 11280 uint_t thwin; 11281 #ifdef DEBUG 11282 uint_t cnt = 0; 11283 #endif 11284 /* Can't drain on an eager connection */ 11285 if (tcp->tcp_listener != NULL) 11286 return (ret); 11287 11288 /* 11289 * Handle two cases here: we are currently fused or we were 11290 * previously fused and have some urgent data to be delivered 11291 * upstream. The latter happens because we either ran out of 11292 * memory or were detached and therefore sending the SIGURG was 11293 * deferred until this point. In either case we pass control 11294 * over to tcp_fuse_rcv_drain() since it may need to complete 11295 * some work. 11296 */ 11297 if ((tcp->tcp_fused || tcp->tcp_fused_sigurg)) { 11298 ASSERT(tcp->tcp_fused_sigurg_mp != NULL); 11299 if (tcp_fuse_rcv_drain(q, tcp, tcp->tcp_fused ? NULL : 11300 &tcp->tcp_fused_sigurg_mp)) 11301 return (ret); 11302 } 11303 11304 while ((mp = tcp->tcp_rcv_list) != NULL) { 11305 tcp->tcp_rcv_list = mp->b_next; 11306 mp->b_next = NULL; 11307 #ifdef DEBUG 11308 cnt += msgdsize(mp); 11309 #endif 11310 /* Does this need SSL processing first? */ 11311 if ((tcp->tcp_kssl_ctx != NULL) && (DB_TYPE(mp) == M_DATA)) { 11312 tcp_kssl_input(tcp, mp); 11313 continue; 11314 } 11315 putnext(q, mp); 11316 } 11317 ASSERT(cnt == tcp->tcp_rcv_cnt); 11318 tcp->tcp_rcv_last_head = NULL; 11319 tcp->tcp_rcv_last_tail = NULL; 11320 tcp->tcp_rcv_cnt = 0; 11321 11322 /* Learn the latest rwnd information that we sent to the other side. */ 11323 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 11324 << tcp->tcp_rcv_ws; 11325 /* This is peer's calculated send window (our receive window). */ 11326 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 11327 /* 11328 * Increase the receive window to max. But we need to do receiver 11329 * SWS avoidance. This means that we need to check the increase of 11330 * of receive window is at least 1 MSS. 11331 */ 11332 if (canputnext(q) && (q->q_hiwat - thwin >= tcp->tcp_mss)) { 11333 /* 11334 * If the window that the other side knows is less than max 11335 * deferred acks segments, send an update immediately. 11336 */ 11337 if (thwin < tcp->tcp_rack_cur_max * tcp->tcp_mss) { 11338 BUMP_MIB(&tcp_mib, tcpOutWinUpdate); 11339 ret = TH_ACK_NEEDED; 11340 } 11341 tcp->tcp_rwnd = q->q_hiwat; 11342 } 11343 /* No need for the push timer now. */ 11344 if (tcp->tcp_push_tid != 0) { 11345 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 11346 tcp->tcp_push_tid = 0; 11347 } 11348 return (ret); 11349 } 11350 11351 /* 11352 * Queue data on tcp_rcv_list which is a b_next chain. 11353 * tcp_rcv_last_head/tail is the last element of this chain. 11354 * Each element of the chain is a b_cont chain. 11355 * 11356 * M_DATA messages are added to the current element. 11357 * Other messages are added as new (b_next) elements. 11358 */ 11359 void 11360 tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len) 11361 { 11362 ASSERT(seg_len == msgdsize(mp)); 11363 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_rcv_last_head != NULL); 11364 11365 if (tcp->tcp_rcv_list == NULL) { 11366 ASSERT(tcp->tcp_rcv_last_head == NULL); 11367 tcp->tcp_rcv_list = mp; 11368 tcp->tcp_rcv_last_head = mp; 11369 } else if (DB_TYPE(mp) == DB_TYPE(tcp->tcp_rcv_last_head)) { 11370 tcp->tcp_rcv_last_tail->b_cont = mp; 11371 } else { 11372 tcp->tcp_rcv_last_head->b_next = mp; 11373 tcp->tcp_rcv_last_head = mp; 11374 } 11375 11376 while (mp->b_cont) 11377 mp = mp->b_cont; 11378 11379 tcp->tcp_rcv_last_tail = mp; 11380 tcp->tcp_rcv_cnt += seg_len; 11381 tcp->tcp_rwnd -= seg_len; 11382 } 11383 11384 /* 11385 * DEFAULT TCP ENTRY POINT via squeue on READ side. 11386 * 11387 * This is the default entry function into TCP on the read side. TCP is 11388 * always entered via squeue i.e. using squeue's for mutual exclusion. 11389 * When classifier does a lookup to find the tcp, it also puts a reference 11390 * on the conn structure associated so the tcp is guaranteed to exist 11391 * when we come here. We still need to check the state because it might 11392 * as well has been closed. The squeue processing function i.e. squeue_enter, 11393 * squeue_enter_nodrain, or squeue_drain is responsible for doing the 11394 * CONN_DEC_REF. 11395 * 11396 * Apart from the default entry point, IP also sends packets directly to 11397 * tcp_rput_data for AF_INET fast path and tcp_conn_request for incoming 11398 * connections. 11399 */ 11400 void 11401 tcp_input(void *arg, mblk_t *mp, void *arg2) 11402 { 11403 conn_t *connp = (conn_t *)arg; 11404 tcp_t *tcp = (tcp_t *)connp->conn_tcp; 11405 11406 /* arg2 is the sqp */ 11407 ASSERT(arg2 != NULL); 11408 ASSERT(mp != NULL); 11409 11410 /* 11411 * Don't accept any input on a closed tcp as this TCP logically does 11412 * not exist on the system. Don't proceed further with this TCP. 11413 * For eg. this packet could trigger another close of this tcp 11414 * which would be disastrous for tcp_refcnt. tcp_close_detached / 11415 * tcp_clean_death / tcp_closei_local must be called at most once 11416 * on a TCP. In this case we need to refeed the packet into the 11417 * classifier and figure out where the packet should go. Need to 11418 * preserve the recv_ill somehow. Until we figure that out, for 11419 * now just drop the packet if we can't classify the packet. 11420 */ 11421 if (tcp->tcp_state == TCPS_CLOSED || 11422 tcp->tcp_state == TCPS_BOUND) { 11423 conn_t *new_connp; 11424 11425 new_connp = ipcl_classify(mp, connp->conn_zoneid); 11426 if (new_connp != NULL) { 11427 tcp_reinput(new_connp, mp, arg2); 11428 return; 11429 } 11430 /* We failed to classify. For now just drop the packet */ 11431 freemsg(mp); 11432 return; 11433 } 11434 11435 if (DB_TYPE(mp) == M_DATA) 11436 tcp_rput_data(connp, mp, arg2); 11437 else 11438 tcp_rput_common(tcp, mp); 11439 } 11440 11441 /* 11442 * The read side put procedure. 11443 * The packets passed up by ip are assume to be aligned according to 11444 * OK_32PTR and the IP+TCP headers fitting in the first mblk. 11445 */ 11446 static void 11447 tcp_rput_common(tcp_t *tcp, mblk_t *mp) 11448 { 11449 /* 11450 * tcp_rput_data() does not expect M_CTL except for the case 11451 * where tcp_ipv6_recvancillary is set and we get a IN_PKTINFO 11452 * type. Need to make sure that any other M_CTLs don't make 11453 * it to tcp_rput_data since it is not expecting any and doesn't 11454 * check for it. 11455 */ 11456 if (DB_TYPE(mp) == M_CTL) { 11457 switch (*(uint32_t *)(mp->b_rptr)) { 11458 case TCP_IOC_ABORT_CONN: 11459 /* 11460 * Handle connection abort request. 11461 */ 11462 tcp_ioctl_abort_handler(tcp, mp); 11463 return; 11464 case IPSEC_IN: 11465 /* 11466 * Only secure icmp arrive in TCP and they 11467 * don't go through data path. 11468 */ 11469 tcp_icmp_error(tcp, mp); 11470 return; 11471 case IN_PKTINFO: 11472 /* 11473 * Handle IPV6_RECVPKTINFO socket option on AF_INET6 11474 * sockets that are receiving IPv4 traffic. tcp 11475 */ 11476 ASSERT(tcp->tcp_family == AF_INET6); 11477 ASSERT(tcp->tcp_ipv6_recvancillary & 11478 TCP_IPV6_RECVPKTINFO); 11479 tcp_rput_data(tcp->tcp_connp, mp, 11480 tcp->tcp_connp->conn_sqp); 11481 return; 11482 case MDT_IOC_INFO_UPDATE: 11483 /* 11484 * Handle Multidata information update; the 11485 * following routine will free the message. 11486 */ 11487 if (tcp->tcp_connp->conn_mdt_ok) { 11488 tcp_mdt_update(tcp, 11489 &((ip_mdt_info_t *)mp->b_rptr)->mdt_capab, 11490 B_FALSE); 11491 } 11492 freemsg(mp); 11493 return; 11494 default: 11495 break; 11496 } 11497 } 11498 11499 /* No point processing the message if tcp is already closed */ 11500 if (TCP_IS_DETACHED_NONEAGER(tcp)) { 11501 freemsg(mp); 11502 return; 11503 } 11504 11505 tcp_rput_other(tcp, mp); 11506 } 11507 11508 11509 /* The minimum of smoothed mean deviation in RTO calculation. */ 11510 #define TCP_SD_MIN 400 11511 11512 /* 11513 * Set RTO for this connection. The formula is from Jacobson and Karels' 11514 * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names 11515 * are the same as those in Appendix A.2 of that paper. 11516 * 11517 * m = new measurement 11518 * sa = smoothed RTT average (8 * average estimates). 11519 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates). 11520 */ 11521 static void 11522 tcp_set_rto(tcp_t *tcp, clock_t rtt) 11523 { 11524 long m = TICK_TO_MSEC(rtt); 11525 clock_t sa = tcp->tcp_rtt_sa; 11526 clock_t sv = tcp->tcp_rtt_sd; 11527 clock_t rto; 11528 11529 BUMP_MIB(&tcp_mib, tcpRttUpdate); 11530 tcp->tcp_rtt_update++; 11531 11532 /* tcp_rtt_sa is not 0 means this is a new sample. */ 11533 if (sa != 0) { 11534 /* 11535 * Update average estimator: 11536 * new rtt = 7/8 old rtt + 1/8 Error 11537 */ 11538 11539 /* m is now Error in estimate. */ 11540 m -= sa >> 3; 11541 if ((sa += m) <= 0) { 11542 /* 11543 * Don't allow the smoothed average to be negative. 11544 * We use 0 to denote reinitialization of the 11545 * variables. 11546 */ 11547 sa = 1; 11548 } 11549 11550 /* 11551 * Update deviation estimator: 11552 * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev) 11553 */ 11554 if (m < 0) 11555 m = -m; 11556 m -= sv >> 2; 11557 sv += m; 11558 } else { 11559 /* 11560 * This follows BSD's implementation. So the reinitialized 11561 * RTO is 3 * m. We cannot go less than 2 because if the 11562 * link is bandwidth dominated, doubling the window size 11563 * during slow start means doubling the RTT. We want to be 11564 * more conservative when we reinitialize our estimates. 3 11565 * is just a convenient number. 11566 */ 11567 sa = m << 3; 11568 sv = m << 1; 11569 } 11570 if (sv < TCP_SD_MIN) { 11571 /* 11572 * We do not know that if sa captures the delay ACK 11573 * effect as in a long train of segments, a receiver 11574 * does not delay its ACKs. So set the minimum of sv 11575 * to be TCP_SD_MIN, which is default to 400 ms, twice 11576 * of BSD DATO. That means the minimum of mean 11577 * deviation is 100 ms. 11578 * 11579 */ 11580 sv = TCP_SD_MIN; 11581 } 11582 tcp->tcp_rtt_sa = sa; 11583 tcp->tcp_rtt_sd = sv; 11584 /* 11585 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv) 11586 * 11587 * Add tcp_rexmit_interval extra in case of extreme environment 11588 * where the algorithm fails to work. The default value of 11589 * tcp_rexmit_interval_extra should be 0. 11590 * 11591 * As we use a finer grained clock than BSD and update 11592 * RTO for every ACKs, add in another .25 of RTT to the 11593 * deviation of RTO to accomodate burstiness of 1/4 of 11594 * window size. 11595 */ 11596 rto = (sa >> 3) + sv + tcp_rexmit_interval_extra + (sa >> 5); 11597 11598 if (rto > tcp_rexmit_interval_max) { 11599 tcp->tcp_rto = tcp_rexmit_interval_max; 11600 } else if (rto < tcp_rexmit_interval_min) { 11601 tcp->tcp_rto = tcp_rexmit_interval_min; 11602 } else { 11603 tcp->tcp_rto = rto; 11604 } 11605 11606 /* Now, we can reset tcp_timer_backoff to use the new RTO... */ 11607 tcp->tcp_timer_backoff = 0; 11608 } 11609 11610 /* 11611 * tcp_get_seg_mp() is called to get the pointer to a segment in the 11612 * send queue which starts at the given seq. no. 11613 * 11614 * Parameters: 11615 * tcp_t *tcp: the tcp instance pointer. 11616 * uint32_t seq: the starting seq. no of the requested segment. 11617 * int32_t *off: after the execution, *off will be the offset to 11618 * the returned mblk which points to the requested seq no. 11619 * It is the caller's responsibility to send in a non-null off. 11620 * 11621 * Return: 11622 * A mblk_t pointer pointing to the requested segment in send queue. 11623 */ 11624 static mblk_t * 11625 tcp_get_seg_mp(tcp_t *tcp, uint32_t seq, int32_t *off) 11626 { 11627 int32_t cnt; 11628 mblk_t *mp; 11629 11630 /* Defensive coding. Make sure we don't send incorrect data. */ 11631 if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt)) 11632 return (NULL); 11633 11634 cnt = seq - tcp->tcp_suna; 11635 mp = tcp->tcp_xmit_head; 11636 while (cnt > 0 && mp != NULL) { 11637 cnt -= mp->b_wptr - mp->b_rptr; 11638 if (cnt < 0) { 11639 cnt += mp->b_wptr - mp->b_rptr; 11640 break; 11641 } 11642 mp = mp->b_cont; 11643 } 11644 ASSERT(mp != NULL); 11645 *off = cnt; 11646 return (mp); 11647 } 11648 11649 /* 11650 * This function handles all retransmissions if SACK is enabled for this 11651 * connection. First it calculates how many segments can be retransmitted 11652 * based on tcp_pipe. Then it goes thru the notsack list to find eligible 11653 * segments. A segment is eligible if sack_cnt for that segment is greater 11654 * than or equal tcp_dupack_fast_retransmit. After it has retransmitted 11655 * all eligible segments, it checks to see if TCP can send some new segments 11656 * (fast recovery). If it can, set the appropriate flag for tcp_rput_data(). 11657 * 11658 * Parameters: 11659 * tcp_t *tcp: the tcp structure of the connection. 11660 * uint_t *flags: in return, appropriate value will be set for 11661 * tcp_rput_data(). 11662 */ 11663 static void 11664 tcp_sack_rxmit(tcp_t *tcp, uint_t *flags) 11665 { 11666 notsack_blk_t *notsack_blk; 11667 int32_t usable_swnd; 11668 int32_t mss; 11669 uint32_t seg_len; 11670 mblk_t *xmit_mp; 11671 11672 ASSERT(tcp->tcp_sack_info != NULL); 11673 ASSERT(tcp->tcp_notsack_list != NULL); 11674 ASSERT(tcp->tcp_rexmit == B_FALSE); 11675 11676 /* Defensive coding in case there is a bug... */ 11677 if (tcp->tcp_notsack_list == NULL) { 11678 return; 11679 } 11680 notsack_blk = tcp->tcp_notsack_list; 11681 mss = tcp->tcp_mss; 11682 11683 /* 11684 * Limit the num of outstanding data in the network to be 11685 * tcp_cwnd_ssthresh, which is half of the original congestion wnd. 11686 */ 11687 usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe; 11688 11689 /* At least retransmit 1 MSS of data. */ 11690 if (usable_swnd <= 0) { 11691 usable_swnd = mss; 11692 } 11693 11694 /* Make sure no new RTT samples will be taken. */ 11695 tcp->tcp_csuna = tcp->tcp_snxt; 11696 11697 notsack_blk = tcp->tcp_notsack_list; 11698 while (usable_swnd > 0) { 11699 mblk_t *snxt_mp, *tmp_mp; 11700 tcp_seq begin = tcp->tcp_sack_snxt; 11701 tcp_seq end; 11702 int32_t off; 11703 11704 for (; notsack_blk != NULL; notsack_blk = notsack_blk->next) { 11705 if (SEQ_GT(notsack_blk->end, begin) && 11706 (notsack_blk->sack_cnt >= 11707 tcp_dupack_fast_retransmit)) { 11708 end = notsack_blk->end; 11709 if (SEQ_LT(begin, notsack_blk->begin)) { 11710 begin = notsack_blk->begin; 11711 } 11712 break; 11713 } 11714 } 11715 /* 11716 * All holes are filled. Manipulate tcp_cwnd to send more 11717 * if we can. Note that after the SACK recovery, tcp_cwnd is 11718 * set to tcp_cwnd_ssthresh. 11719 */ 11720 if (notsack_blk == NULL) { 11721 usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe; 11722 if (usable_swnd <= 0 || tcp->tcp_unsent == 0) { 11723 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna; 11724 ASSERT(tcp->tcp_cwnd > 0); 11725 return; 11726 } else { 11727 usable_swnd = usable_swnd / mss; 11728 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna + 11729 MAX(usable_swnd * mss, mss); 11730 *flags |= TH_XMIT_NEEDED; 11731 return; 11732 } 11733 } 11734 11735 /* 11736 * Note that we may send more than usable_swnd allows here 11737 * because of round off, but no more than 1 MSS of data. 11738 */ 11739 seg_len = end - begin; 11740 if (seg_len > mss) 11741 seg_len = mss; 11742 snxt_mp = tcp_get_seg_mp(tcp, begin, &off); 11743 ASSERT(snxt_mp != NULL); 11744 /* This should not happen. Defensive coding again... */ 11745 if (snxt_mp == NULL) { 11746 return; 11747 } 11748 11749 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, seg_len, &off, 11750 &tmp_mp, begin, B_TRUE, &seg_len, B_TRUE); 11751 if (xmit_mp == NULL) 11752 return; 11753 11754 usable_swnd -= seg_len; 11755 tcp->tcp_pipe += seg_len; 11756 tcp->tcp_sack_snxt = begin + seg_len; 11757 TCP_RECORD_TRACE(tcp, xmit_mp, TCP_TRACE_SEND_PKT); 11758 tcp_send_data(tcp, tcp->tcp_wq, xmit_mp); 11759 11760 /* 11761 * Update the send timestamp to avoid false retransmission. 11762 */ 11763 snxt_mp->b_prev = (mblk_t *)lbolt; 11764 11765 BUMP_MIB(&tcp_mib, tcpRetransSegs); 11766 UPDATE_MIB(&tcp_mib, tcpRetransBytes, seg_len); 11767 BUMP_MIB(&tcp_mib, tcpOutSackRetransSegs); 11768 /* 11769 * Update tcp_rexmit_max to extend this SACK recovery phase. 11770 * This happens when new data sent during fast recovery is 11771 * also lost. If TCP retransmits those new data, it needs 11772 * to extend SACK recover phase to avoid starting another 11773 * fast retransmit/recovery unnecessarily. 11774 */ 11775 if (SEQ_GT(tcp->tcp_sack_snxt, tcp->tcp_rexmit_max)) { 11776 tcp->tcp_rexmit_max = tcp->tcp_sack_snxt; 11777 } 11778 } 11779 } 11780 11781 /* 11782 * This function handles policy checking at TCP level for non-hard_bound/ 11783 * detached connections. 11784 */ 11785 static boolean_t 11786 tcp_check_policy(tcp_t *tcp, mblk_t *first_mp, ipha_t *ipha, ip6_t *ip6h, 11787 boolean_t secure, boolean_t mctl_present) 11788 { 11789 ipsec_latch_t *ipl = NULL; 11790 ipsec_action_t *act = NULL; 11791 mblk_t *data_mp; 11792 ipsec_in_t *ii; 11793 const char *reason; 11794 kstat_named_t *counter; 11795 11796 ASSERT(mctl_present || !secure); 11797 11798 ASSERT((ipha == NULL && ip6h != NULL) || 11799 (ip6h == NULL && ipha != NULL)); 11800 11801 /* 11802 * We don't necessarily have an ipsec_in_act action to verify 11803 * policy because of assymetrical policy where we have only 11804 * outbound policy and no inbound policy (possible with global 11805 * policy). 11806 */ 11807 if (!secure) { 11808 if (act == NULL || act->ipa_act.ipa_type == IPSEC_ACT_BYPASS || 11809 act->ipa_act.ipa_type == IPSEC_ACT_CLEAR) 11810 return (B_TRUE); 11811 ipsec_log_policy_failure(tcp->tcp_wq, IPSEC_POLICY_MISMATCH, 11812 "tcp_check_policy", ipha, ip6h, secure); 11813 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, 11814 &ipdrops_tcp_clear, &tcp_dropper); 11815 return (B_FALSE); 11816 } 11817 11818 /* 11819 * We have a secure packet. 11820 */ 11821 if (act == NULL) { 11822 ipsec_log_policy_failure(tcp->tcp_wq, 11823 IPSEC_POLICY_NOT_NEEDED, "tcp_check_policy", ipha, ip6h, 11824 secure); 11825 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, 11826 &ipdrops_tcp_secure, &tcp_dropper); 11827 return (B_FALSE); 11828 } 11829 11830 /* 11831 * XXX This whole routine is currently incorrect. ipl should 11832 * be set to the latch pointer, but is currently not set, so 11833 * we initialize it to NULL to avoid picking up random garbage. 11834 */ 11835 if (ipl == NULL) 11836 return (B_TRUE); 11837 11838 data_mp = first_mp->b_cont; 11839 11840 ii = (ipsec_in_t *)first_mp->b_rptr; 11841 11842 if (ipsec_check_ipsecin_latch(ii, data_mp, ipl, ipha, ip6h, &reason, 11843 &counter)) { 11844 BUMP_MIB(&ip_mib, ipsecInSucceeded); 11845 return (B_TRUE); 11846 } 11847 (void) strlog(TCP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE, 11848 "tcp inbound policy mismatch: %s, packet dropped\n", 11849 reason); 11850 BUMP_MIB(&ip_mib, ipsecInFailed); 11851 11852 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, counter, &tcp_dropper); 11853 return (B_FALSE); 11854 } 11855 11856 /* 11857 * tcp_ss_rexmit() is called in tcp_rput_data() to do slow start 11858 * retransmission after a timeout. 11859 * 11860 * To limit the number of duplicate segments, we limit the number of segment 11861 * to be sent in one time to tcp_snd_burst, the burst variable. 11862 */ 11863 static void 11864 tcp_ss_rexmit(tcp_t *tcp) 11865 { 11866 uint32_t snxt; 11867 uint32_t smax; 11868 int32_t win; 11869 int32_t mss; 11870 int32_t off; 11871 int32_t burst = tcp->tcp_snd_burst; 11872 mblk_t *snxt_mp; 11873 11874 /* 11875 * Note that tcp_rexmit can be set even though TCP has retransmitted 11876 * all unack'ed segments. 11877 */ 11878 if (SEQ_LT(tcp->tcp_rexmit_nxt, tcp->tcp_rexmit_max)) { 11879 smax = tcp->tcp_rexmit_max; 11880 snxt = tcp->tcp_rexmit_nxt; 11881 if (SEQ_LT(snxt, tcp->tcp_suna)) { 11882 snxt = tcp->tcp_suna; 11883 } 11884 win = MIN(tcp->tcp_cwnd, tcp->tcp_swnd); 11885 win -= snxt - tcp->tcp_suna; 11886 mss = tcp->tcp_mss; 11887 snxt_mp = tcp_get_seg_mp(tcp, snxt, &off); 11888 11889 while (SEQ_LT(snxt, smax) && (win > 0) && 11890 (burst > 0) && (snxt_mp != NULL)) { 11891 mblk_t *xmit_mp; 11892 mblk_t *old_snxt_mp = snxt_mp; 11893 uint32_t cnt = mss; 11894 11895 if (win < cnt) { 11896 cnt = win; 11897 } 11898 if (SEQ_GT(snxt + cnt, smax)) { 11899 cnt = smax - snxt; 11900 } 11901 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, cnt, &off, 11902 &snxt_mp, snxt, B_TRUE, &cnt, B_TRUE); 11903 if (xmit_mp == NULL) 11904 return; 11905 11906 tcp_send_data(tcp, tcp->tcp_wq, xmit_mp); 11907 11908 snxt += cnt; 11909 win -= cnt; 11910 /* 11911 * Update the send timestamp to avoid false 11912 * retransmission. 11913 */ 11914 old_snxt_mp->b_prev = (mblk_t *)lbolt; 11915 BUMP_MIB(&tcp_mib, tcpRetransSegs); 11916 UPDATE_MIB(&tcp_mib, tcpRetransBytes, cnt); 11917 11918 tcp->tcp_rexmit_nxt = snxt; 11919 burst--; 11920 } 11921 /* 11922 * If we have transmitted all we have at the time 11923 * we started the retranmission, we can leave 11924 * the rest of the job to tcp_wput_data(). But we 11925 * need to check the send window first. If the 11926 * win is not 0, go on with tcp_wput_data(). 11927 */ 11928 if (SEQ_LT(snxt, smax) || win == 0) { 11929 return; 11930 } 11931 } 11932 /* Only call tcp_wput_data() if there is data to be sent. */ 11933 if (tcp->tcp_unsent) { 11934 tcp_wput_data(tcp, NULL, B_FALSE); 11935 } 11936 } 11937 11938 /* 11939 * Process all TCP option in SYN segment. Note that this function should 11940 * be called after tcp_adapt_ire() is called so that the necessary info 11941 * from IRE is already set in the tcp structure. 11942 * 11943 * This function sets up the correct tcp_mss value according to the 11944 * MSS option value and our header size. It also sets up the window scale 11945 * and timestamp values, and initialize SACK info blocks. But it does not 11946 * change receive window size after setting the tcp_mss value. The caller 11947 * should do the appropriate change. 11948 */ 11949 void 11950 tcp_process_options(tcp_t *tcp, tcph_t *tcph) 11951 { 11952 int options; 11953 tcp_opt_t tcpopt; 11954 uint32_t mss_max; 11955 char *tmp_tcph; 11956 11957 tcpopt.tcp = NULL; 11958 options = tcp_parse_options(tcph, &tcpopt); 11959 11960 /* 11961 * Process MSS option. Note that MSS option value does not account 11962 * for IP or TCP options. This means that it is equal to MTU - minimum 11963 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for 11964 * IPv6. 11965 */ 11966 if (!(options & TCP_OPT_MSS_PRESENT)) { 11967 if (tcp->tcp_ipversion == IPV4_VERSION) 11968 tcpopt.tcp_opt_mss = tcp_mss_def_ipv4; 11969 else 11970 tcpopt.tcp_opt_mss = tcp_mss_def_ipv6; 11971 } else { 11972 if (tcp->tcp_ipversion == IPV4_VERSION) 11973 mss_max = tcp_mss_max_ipv4; 11974 else 11975 mss_max = tcp_mss_max_ipv6; 11976 if (tcpopt.tcp_opt_mss < tcp_mss_min) 11977 tcpopt.tcp_opt_mss = tcp_mss_min; 11978 else if (tcpopt.tcp_opt_mss > mss_max) 11979 tcpopt.tcp_opt_mss = mss_max; 11980 } 11981 11982 /* Process Window Scale option. */ 11983 if (options & TCP_OPT_WSCALE_PRESENT) { 11984 tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale; 11985 tcp->tcp_snd_ws_ok = B_TRUE; 11986 } else { 11987 tcp->tcp_snd_ws = B_FALSE; 11988 tcp->tcp_snd_ws_ok = B_FALSE; 11989 tcp->tcp_rcv_ws = B_FALSE; 11990 } 11991 11992 /* Process Timestamp option. */ 11993 if ((options & TCP_OPT_TSTAMP_PRESENT) && 11994 (tcp->tcp_snd_ts_ok || TCP_IS_DETACHED(tcp))) { 11995 tmp_tcph = (char *)tcp->tcp_tcph; 11996 11997 tcp->tcp_snd_ts_ok = B_TRUE; 11998 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 11999 tcp->tcp_last_rcv_lbolt = lbolt64; 12000 ASSERT(OK_32PTR(tmp_tcph)); 12001 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 12002 12003 /* Fill in our template header with basic timestamp option. */ 12004 tmp_tcph += tcp->tcp_tcp_hdr_len; 12005 tmp_tcph[0] = TCPOPT_NOP; 12006 tmp_tcph[1] = TCPOPT_NOP; 12007 tmp_tcph[2] = TCPOPT_TSTAMP; 12008 tmp_tcph[3] = TCPOPT_TSTAMP_LEN; 12009 tcp->tcp_hdr_len += TCPOPT_REAL_TS_LEN; 12010 tcp->tcp_tcp_hdr_len += TCPOPT_REAL_TS_LEN; 12011 tcp->tcp_tcph->th_offset_and_rsrvd[0] += (3 << 4); 12012 } else { 12013 tcp->tcp_snd_ts_ok = B_FALSE; 12014 } 12015 12016 /* 12017 * Process SACK options. If SACK is enabled for this connection, 12018 * then allocate the SACK info structure. Note the following ways 12019 * when tcp_snd_sack_ok is set to true. 12020 * 12021 * For active connection: in tcp_adapt_ire() called in 12022 * tcp_rput_other(), or in tcp_rput_other() when tcp_sack_permitted 12023 * is checked. 12024 * 12025 * For passive connection: in tcp_adapt_ire() called in 12026 * tcp_accept_comm(). 12027 * 12028 * That's the reason why the extra TCP_IS_DETACHED() check is there. 12029 * That check makes sure that if we did not send a SACK OK option, 12030 * we will not enable SACK for this connection even though the other 12031 * side sends us SACK OK option. For active connection, the SACK 12032 * info structure has already been allocated. So we need to free 12033 * it if SACK is disabled. 12034 */ 12035 if ((options & TCP_OPT_SACK_OK_PRESENT) && 12036 (tcp->tcp_snd_sack_ok || 12037 (tcp_sack_permitted != 0 && TCP_IS_DETACHED(tcp)))) { 12038 /* This should be true only in the passive case. */ 12039 if (tcp->tcp_sack_info == NULL) { 12040 ASSERT(TCP_IS_DETACHED(tcp)); 12041 tcp->tcp_sack_info = 12042 kmem_cache_alloc(tcp_sack_info_cache, KM_NOSLEEP); 12043 } 12044 if (tcp->tcp_sack_info == NULL) { 12045 tcp->tcp_snd_sack_ok = B_FALSE; 12046 } else { 12047 tcp->tcp_snd_sack_ok = B_TRUE; 12048 if (tcp->tcp_snd_ts_ok) { 12049 tcp->tcp_max_sack_blk = 3; 12050 } else { 12051 tcp->tcp_max_sack_blk = 4; 12052 } 12053 } 12054 } else { 12055 /* 12056 * Resetting tcp_snd_sack_ok to B_FALSE so that 12057 * no SACK info will be used for this 12058 * connection. This assumes that SACK usage 12059 * permission is negotiated. This may need 12060 * to be changed once this is clarified. 12061 */ 12062 if (tcp->tcp_sack_info != NULL) { 12063 ASSERT(tcp->tcp_notsack_list == NULL); 12064 kmem_cache_free(tcp_sack_info_cache, 12065 tcp->tcp_sack_info); 12066 tcp->tcp_sack_info = NULL; 12067 } 12068 tcp->tcp_snd_sack_ok = B_FALSE; 12069 } 12070 12071 /* 12072 * Now we know the exact TCP/IP header length, subtract 12073 * that from tcp_mss to get our side's MSS. 12074 */ 12075 tcp->tcp_mss -= tcp->tcp_hdr_len; 12076 /* 12077 * Here we assume that the other side's header size will be equal to 12078 * our header size. We calculate the real MSS accordingly. Need to 12079 * take into additional stuffs IPsec puts in. 12080 * 12081 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header) 12082 */ 12083 tcpopt.tcp_opt_mss -= tcp->tcp_hdr_len + tcp->tcp_ipsec_overhead - 12084 ((tcp->tcp_ipversion == IPV4_VERSION ? 12085 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) + TCP_MIN_HEADER_LENGTH); 12086 12087 /* 12088 * Set MSS to the smaller one of both ends of the connection. 12089 * We should not have called tcp_mss_set() before, but our 12090 * side of the MSS should have been set to a proper value 12091 * by tcp_adapt_ire(). tcp_mss_set() will also set up the 12092 * STREAM head parameters properly. 12093 * 12094 * If we have a larger-than-16-bit window but the other side 12095 * didn't want to do window scale, tcp_rwnd_set() will take 12096 * care of that. 12097 */ 12098 tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss)); 12099 } 12100 12101 /* 12102 * Sends the T_CONN_IND to the listener. The caller calls this 12103 * functions via squeue to get inside the listener's perimeter 12104 * once the 3 way hand shake is done a T_CONN_IND needs to be 12105 * sent. As an optimization, the caller can call this directly 12106 * if listener's perimeter is same as eager's. 12107 */ 12108 /* ARGSUSED */ 12109 void 12110 tcp_send_conn_ind(void *arg, mblk_t *mp, void *arg2) 12111 { 12112 conn_t *lconnp = (conn_t *)arg; 12113 tcp_t *listener = lconnp->conn_tcp; 12114 tcp_t *tcp; 12115 struct T_conn_ind *conn_ind; 12116 ipaddr_t *addr_cache; 12117 boolean_t need_send_conn_ind = B_FALSE; 12118 12119 /* retrieve the eager */ 12120 conn_ind = (struct T_conn_ind *)mp->b_rptr; 12121 ASSERT(conn_ind->OPT_offset != 0 && 12122 conn_ind->OPT_length == sizeof (intptr_t)); 12123 bcopy(mp->b_rptr + conn_ind->OPT_offset, &tcp, 12124 conn_ind->OPT_length); 12125 12126 /* 12127 * TLI/XTI applications will get confused by 12128 * sending eager as an option since it violates 12129 * the option semantics. So remove the eager as 12130 * option since TLI/XTI app doesn't need it anyway. 12131 */ 12132 if (!TCP_IS_SOCKET(listener)) { 12133 conn_ind->OPT_length = 0; 12134 conn_ind->OPT_offset = 0; 12135 } 12136 if (listener->tcp_state == TCPS_CLOSED || 12137 TCP_IS_DETACHED(listener)) { 12138 /* 12139 * If listener has closed, it would have caused a 12140 * a cleanup/blowoff to happen for the eager. We 12141 * just need to return. 12142 */ 12143 freemsg(mp); 12144 return; 12145 } 12146 12147 12148 /* 12149 * if the conn_req_q is full defer passing up the 12150 * T_CONN_IND until space is availabe after t_accept() 12151 * processing 12152 */ 12153 mutex_enter(&listener->tcp_eager_lock); 12154 if (listener->tcp_conn_req_cnt_q < listener->tcp_conn_req_max) { 12155 tcp_t *tail; 12156 12157 /* 12158 * The eager already has an extra ref put in tcp_rput_data 12159 * so that it stays till accept comes back even though it 12160 * might get into TCPS_CLOSED as a result of a TH_RST etc. 12161 */ 12162 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 12163 listener->tcp_conn_req_cnt_q0--; 12164 listener->tcp_conn_req_cnt_q++; 12165 12166 /* Move from SYN_RCVD to ESTABLISHED list */ 12167 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 12168 tcp->tcp_eager_prev_q0; 12169 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 12170 tcp->tcp_eager_next_q0; 12171 tcp->tcp_eager_prev_q0 = NULL; 12172 tcp->tcp_eager_next_q0 = NULL; 12173 12174 /* 12175 * Insert at end of the queue because sockfs 12176 * sends down T_CONN_RES in chronological 12177 * order. Leaving the older conn indications 12178 * at front of the queue helps reducing search 12179 * time. 12180 */ 12181 tail = listener->tcp_eager_last_q; 12182 if (tail != NULL) 12183 tail->tcp_eager_next_q = tcp; 12184 else 12185 listener->tcp_eager_next_q = tcp; 12186 listener->tcp_eager_last_q = tcp; 12187 tcp->tcp_eager_next_q = NULL; 12188 /* 12189 * Delay sending up the T_conn_ind until we are 12190 * done with the eager. Once we have have sent up 12191 * the T_conn_ind, the accept can potentially complete 12192 * any time and release the refhold we have on the eager. 12193 */ 12194 need_send_conn_ind = B_TRUE; 12195 } else { 12196 /* 12197 * Defer connection on q0 and set deferred 12198 * connection bit true 12199 */ 12200 tcp->tcp_conn_def_q0 = B_TRUE; 12201 12202 /* take tcp out of q0 ... */ 12203 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 12204 tcp->tcp_eager_next_q0; 12205 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 12206 tcp->tcp_eager_prev_q0; 12207 12208 /* ... and place it at the end of q0 */ 12209 tcp->tcp_eager_prev_q0 = listener->tcp_eager_prev_q0; 12210 tcp->tcp_eager_next_q0 = listener; 12211 listener->tcp_eager_prev_q0->tcp_eager_next_q0 = tcp; 12212 listener->tcp_eager_prev_q0 = tcp; 12213 tcp->tcp_conn.tcp_eager_conn_ind = mp; 12214 } 12215 12216 /* we have timed out before */ 12217 if (tcp->tcp_syn_rcvd_timeout != 0) { 12218 tcp->tcp_syn_rcvd_timeout = 0; 12219 listener->tcp_syn_rcvd_timeout--; 12220 if (listener->tcp_syn_defense && 12221 listener->tcp_syn_rcvd_timeout <= 12222 (tcp_conn_req_max_q0 >> 5) && 12223 10*MINUTES < TICK_TO_MSEC(lbolt64 - 12224 listener->tcp_last_rcv_lbolt)) { 12225 /* 12226 * Turn off the defense mode if we 12227 * believe the SYN attack is over. 12228 */ 12229 listener->tcp_syn_defense = B_FALSE; 12230 if (listener->tcp_ip_addr_cache) { 12231 kmem_free((void *)listener->tcp_ip_addr_cache, 12232 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 12233 listener->tcp_ip_addr_cache = NULL; 12234 } 12235 } 12236 } 12237 addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache); 12238 if (addr_cache != NULL) { 12239 /* 12240 * We have finished a 3-way handshake with this 12241 * remote host. This proves the IP addr is good. 12242 * Cache it! 12243 */ 12244 addr_cache[IP_ADDR_CACHE_HASH( 12245 tcp->tcp_remote)] = tcp->tcp_remote; 12246 } 12247 mutex_exit(&listener->tcp_eager_lock); 12248 if (need_send_conn_ind) 12249 putnext(listener->tcp_rq, mp); 12250 } 12251 12252 mblk_t * 12253 tcp_find_pktinfo(tcp_t *tcp, mblk_t *mp, uint_t *ipversp, uint_t *ip_hdr_lenp, 12254 uint_t *ifindexp, ip6_pkt_t *ippp) 12255 { 12256 in_pktinfo_t *pinfo; 12257 ip6_t *ip6h; 12258 uchar_t *rptr; 12259 mblk_t *first_mp = mp; 12260 boolean_t mctl_present = B_FALSE; 12261 uint_t ifindex = 0; 12262 ip6_pkt_t ipp; 12263 uint_t ipvers; 12264 uint_t ip_hdr_len; 12265 12266 rptr = mp->b_rptr; 12267 ASSERT(OK_32PTR(rptr)); 12268 ASSERT(tcp != NULL); 12269 ipp.ipp_fields = 0; 12270 12271 switch DB_TYPE(mp) { 12272 case M_CTL: 12273 mp = mp->b_cont; 12274 if (mp == NULL) { 12275 freemsg(first_mp); 12276 return (NULL); 12277 } 12278 if (DB_TYPE(mp) != M_DATA) { 12279 freemsg(first_mp); 12280 return (NULL); 12281 } 12282 mctl_present = B_TRUE; 12283 break; 12284 case M_DATA: 12285 break; 12286 default: 12287 cmn_err(CE_NOTE, "tcp_find_pktinfo: unknown db_type"); 12288 freemsg(mp); 12289 return (NULL); 12290 } 12291 ipvers = IPH_HDR_VERSION(rptr); 12292 if (ipvers == IPV4_VERSION) { 12293 if (tcp == NULL) { 12294 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12295 goto done; 12296 } 12297 12298 ipp.ipp_fields |= IPPF_HOPLIMIT; 12299 ipp.ipp_hoplimit = ((ipha_t *)rptr)->ipha_ttl; 12300 12301 /* 12302 * If we have IN_PKTINFO in an M_CTL and tcp_ipv6_recvancillary 12303 * has TCP_IPV6_RECVPKTINFO set, pass I/F index along in ipp. 12304 */ 12305 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) && 12306 mctl_present) { 12307 pinfo = (in_pktinfo_t *)first_mp->b_rptr; 12308 if ((MBLKL(first_mp) == sizeof (in_pktinfo_t)) && 12309 (pinfo->in_pkt_ulp_type == IN_PKTINFO) && 12310 (pinfo->in_pkt_flags & IPF_RECVIF)) { 12311 ipp.ipp_fields |= IPPF_IFINDEX; 12312 ipp.ipp_ifindex = pinfo->in_pkt_ifindex; 12313 ifindex = pinfo->in_pkt_ifindex; 12314 } 12315 freeb(first_mp); 12316 mctl_present = B_FALSE; 12317 } 12318 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12319 } else { 12320 ip6h = (ip6_t *)rptr; 12321 12322 ASSERT(ipvers == IPV6_VERSION); 12323 ipp.ipp_fields = IPPF_HOPLIMIT | IPPF_TCLASS; 12324 ipp.ipp_tclass = (ip6h->ip6_flow & 0x0FF00000) >> 20; 12325 ipp.ipp_hoplimit = ip6h->ip6_hops; 12326 12327 if (ip6h->ip6_nxt != IPPROTO_TCP) { 12328 uint8_t nexthdrp; 12329 12330 /* Look for ifindex information */ 12331 if (ip6h->ip6_nxt == IPPROTO_RAW) { 12332 ip6i_t *ip6i = (ip6i_t *)ip6h; 12333 if ((uchar_t *)&ip6i[1] > mp->b_wptr) { 12334 BUMP_MIB(&ip_mib, tcpInErrs); 12335 freemsg(first_mp); 12336 return (NULL); 12337 } 12338 12339 if (ip6i->ip6i_flags & IP6I_IFINDEX) { 12340 ASSERT(ip6i->ip6i_ifindex != 0); 12341 ipp.ipp_fields |= IPPF_IFINDEX; 12342 ipp.ipp_ifindex = ip6i->ip6i_ifindex; 12343 ifindex = ip6i->ip6i_ifindex; 12344 } 12345 rptr = (uchar_t *)&ip6i[1]; 12346 mp->b_rptr = rptr; 12347 if (rptr == mp->b_wptr) { 12348 mblk_t *mp1; 12349 mp1 = mp->b_cont; 12350 freeb(mp); 12351 mp = mp1; 12352 rptr = mp->b_rptr; 12353 } 12354 if (MBLKL(mp) < IPV6_HDR_LEN + 12355 sizeof (tcph_t)) { 12356 BUMP_MIB(&ip_mib, tcpInErrs); 12357 freemsg(first_mp); 12358 return (NULL); 12359 } 12360 ip6h = (ip6_t *)rptr; 12361 } 12362 12363 /* 12364 * Find any potentially interesting extension headers 12365 * as well as the length of the IPv6 + extension 12366 * headers. 12367 */ 12368 ip_hdr_len = ip_find_hdr_v6(mp, ip6h, &ipp, &nexthdrp); 12369 /* Verify if this is a TCP packet */ 12370 if (nexthdrp != IPPROTO_TCP) { 12371 BUMP_MIB(&ip_mib, tcpInErrs); 12372 freemsg(first_mp); 12373 return (NULL); 12374 } 12375 } else { 12376 ip_hdr_len = IPV6_HDR_LEN; 12377 } 12378 } 12379 12380 done: 12381 if (ipversp != NULL) 12382 *ipversp = ipvers; 12383 if (ip_hdr_lenp != NULL) 12384 *ip_hdr_lenp = ip_hdr_len; 12385 if (ippp != NULL) 12386 *ippp = ipp; 12387 if (ifindexp != NULL) 12388 *ifindexp = ifindex; 12389 if (mctl_present) { 12390 freeb(first_mp); 12391 } 12392 return (mp); 12393 } 12394 12395 /* 12396 * Handle M_DATA messages from IP. Its called directly from IP via 12397 * squeue for AF_INET type sockets fast path. No M_CTL are expected 12398 * in this path. 12399 * 12400 * For everything else (including AF_INET6 sockets with 'tcp_ipversion' 12401 * v4 and v6), we are called through tcp_input() and a M_CTL can 12402 * be present for options but tcp_find_pktinfo() deals with it. We 12403 * only expect M_DATA packets after tcp_find_pktinfo() is done. 12404 * 12405 * The first argument is always the connp/tcp to which the mp belongs. 12406 * There are no exceptions to this rule. The caller has already put 12407 * a reference on this connp/tcp and once tcp_rput_data() returns, 12408 * the squeue will do the refrele. 12409 * 12410 * The TH_SYN for the listener directly go to tcp_conn_request via 12411 * squeue. 12412 * 12413 * sqp: NULL = recursive, sqp != NULL means called from squeue 12414 */ 12415 void 12416 tcp_rput_data(void *arg, mblk_t *mp, void *arg2) 12417 { 12418 int32_t bytes_acked; 12419 int32_t gap; 12420 mblk_t *mp1; 12421 uint_t flags; 12422 uint32_t new_swnd = 0; 12423 uchar_t *iphdr; 12424 uchar_t *rptr; 12425 int32_t rgap; 12426 uint32_t seg_ack; 12427 int seg_len; 12428 uint_t ip_hdr_len; 12429 uint32_t seg_seq; 12430 tcph_t *tcph; 12431 int urp; 12432 tcp_opt_t tcpopt; 12433 uint_t ipvers; 12434 ip6_pkt_t ipp; 12435 boolean_t ofo_seg = B_FALSE; /* Out of order segment */ 12436 uint32_t cwnd; 12437 uint32_t add; 12438 int npkt; 12439 int mss; 12440 conn_t *connp = (conn_t *)arg; 12441 squeue_t *sqp = (squeue_t *)arg2; 12442 tcp_t *tcp = connp->conn_tcp; 12443 12444 /* 12445 * RST from fused tcp loopback peer should trigger an unfuse. 12446 */ 12447 if (tcp->tcp_fused) { 12448 TCP_STAT(tcp_fusion_aborted); 12449 tcp_unfuse(tcp); 12450 } 12451 12452 iphdr = mp->b_rptr; 12453 rptr = mp->b_rptr; 12454 ASSERT(OK_32PTR(rptr)); 12455 12456 /* 12457 * An AF_INET socket is not capable of receiving any pktinfo. Do inline 12458 * processing here. For rest call tcp_find_pktinfo to fill up the 12459 * necessary information. 12460 */ 12461 if (IPCL_IS_TCP4(connp)) { 12462 ipvers = IPV4_VERSION; 12463 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12464 } else { 12465 mp = tcp_find_pktinfo(tcp, mp, &ipvers, &ip_hdr_len, 12466 NULL, &ipp); 12467 if (mp == NULL) { 12468 TCP_STAT(tcp_rput_v6_error); 12469 return; 12470 } 12471 iphdr = mp->b_rptr; 12472 rptr = mp->b_rptr; 12473 } 12474 ASSERT(DB_TYPE(mp) == M_DATA); 12475 12476 tcph = (tcph_t *)&rptr[ip_hdr_len]; 12477 seg_seq = ABE32_TO_U32(tcph->th_seq); 12478 seg_ack = ABE32_TO_U32(tcph->th_ack); 12479 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 12480 seg_len = (int)(mp->b_wptr - rptr) - 12481 (ip_hdr_len + TCP_HDR_LENGTH(tcph)); 12482 if ((mp1 = mp->b_cont) != NULL && mp1->b_datap->db_type == M_DATA) { 12483 do { 12484 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 12485 (uintptr_t)INT_MAX); 12486 seg_len += (int)(mp1->b_wptr - mp1->b_rptr); 12487 } while ((mp1 = mp1->b_cont) != NULL && 12488 mp1->b_datap->db_type == M_DATA); 12489 } 12490 12491 if (tcp->tcp_state == TCPS_TIME_WAIT) { 12492 tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack, 12493 seg_len, tcph); 12494 return; 12495 } 12496 12497 if (sqp != NULL) { 12498 /* 12499 * This is the correct place to update tcp_last_recv_time. Note 12500 * that it is also updated for tcp structure that belongs to 12501 * global and listener queues which do not really need updating. 12502 * But that should not cause any harm. And it is updated for 12503 * all kinds of incoming segments, not only for data segments. 12504 */ 12505 tcp->tcp_last_recv_time = lbolt; 12506 } 12507 12508 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 12509 12510 BUMP_LOCAL(tcp->tcp_ibsegs); 12511 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_RECV_PKT); 12512 12513 if ((flags & TH_URG) && sqp != NULL) { 12514 /* 12515 * TCP can't handle urgent pointers that arrive before 12516 * the connection has been accept()ed since it can't 12517 * buffer OOB data. Discard segment if this happens. 12518 * 12519 * Nor can it reassemble urgent pointers, so discard 12520 * if it's not the next segment expected. 12521 * 12522 * Otherwise, collapse chain into one mblk (discard if 12523 * that fails). This makes sure the headers, retransmitted 12524 * data, and new data all are in the same mblk. 12525 */ 12526 ASSERT(mp != NULL); 12527 if (tcp->tcp_listener || !pullupmsg(mp, -1)) { 12528 freemsg(mp); 12529 return; 12530 } 12531 /* Update pointers into message */ 12532 iphdr = rptr = mp->b_rptr; 12533 tcph = (tcph_t *)&rptr[ip_hdr_len]; 12534 if (SEQ_GT(seg_seq, tcp->tcp_rnxt)) { 12535 /* 12536 * Since we can't handle any data with this urgent 12537 * pointer that is out of sequence, we expunge 12538 * the data. This allows us to still register 12539 * the urgent mark and generate the M_PCSIG, 12540 * which we can do. 12541 */ 12542 mp->b_wptr = (uchar_t *)tcph + TCP_HDR_LENGTH(tcph); 12543 seg_len = 0; 12544 } 12545 } 12546 12547 switch (tcp->tcp_state) { 12548 case TCPS_SYN_SENT: 12549 if (flags & TH_ACK) { 12550 /* 12551 * Note that our stack cannot send data before a 12552 * connection is established, therefore the 12553 * following check is valid. Otherwise, it has 12554 * to be changed. 12555 */ 12556 if (SEQ_LEQ(seg_ack, tcp->tcp_iss) || 12557 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 12558 freemsg(mp); 12559 if (flags & TH_RST) 12560 return; 12561 tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq", 12562 tcp, seg_ack, 0, TH_RST); 12563 return; 12564 } 12565 ASSERT(tcp->tcp_suna + 1 == seg_ack); 12566 } 12567 if (flags & TH_RST) { 12568 freemsg(mp); 12569 if (flags & TH_ACK) 12570 (void) tcp_clean_death(tcp, 12571 ECONNREFUSED, 13); 12572 return; 12573 } 12574 if (!(flags & TH_SYN)) { 12575 freemsg(mp); 12576 return; 12577 } 12578 12579 /* Process all TCP options. */ 12580 tcp_process_options(tcp, tcph); 12581 /* 12582 * The following changes our rwnd to be a multiple of the 12583 * MIN(peer MSS, our MSS) for performance reason. 12584 */ 12585 (void) tcp_rwnd_set(tcp, MSS_ROUNDUP(tcp->tcp_rq->q_hiwat, 12586 tcp->tcp_mss)); 12587 12588 /* Is the other end ECN capable? */ 12589 if (tcp->tcp_ecn_ok) { 12590 if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) { 12591 tcp->tcp_ecn_ok = B_FALSE; 12592 } 12593 } 12594 /* 12595 * Clear ECN flags because it may interfere with later 12596 * processing. 12597 */ 12598 flags &= ~(TH_ECE|TH_CWR); 12599 12600 tcp->tcp_irs = seg_seq; 12601 tcp->tcp_rack = seg_seq; 12602 tcp->tcp_rnxt = seg_seq + 1; 12603 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 12604 if (!TCP_IS_DETACHED(tcp)) { 12605 /* Allocate room for SACK options if needed. */ 12606 if (tcp->tcp_snd_sack_ok) { 12607 (void) mi_set_sth_wroff(tcp->tcp_rq, 12608 tcp->tcp_hdr_len + TCPOPT_MAX_SACK_LEN + 12609 (tcp->tcp_loopback ? 0 : tcp_wroff_xtra)); 12610 } else { 12611 (void) mi_set_sth_wroff(tcp->tcp_rq, 12612 tcp->tcp_hdr_len + 12613 (tcp->tcp_loopback ? 0 : tcp_wroff_xtra)); 12614 } 12615 } 12616 if (flags & TH_ACK) { 12617 /* 12618 * If we can't get the confirmation upstream, pretend 12619 * we didn't even see this one. 12620 * 12621 * XXX: how can we pretend we didn't see it if we 12622 * have updated rnxt et. al. 12623 * 12624 * For loopback we defer sending up the T_CONN_CON 12625 * until after some checks below. 12626 */ 12627 mp1 = NULL; 12628 if (!tcp_conn_con(tcp, iphdr, tcph, mp, 12629 tcp->tcp_loopback ? &mp1 : NULL)) { 12630 freemsg(mp); 12631 return; 12632 } 12633 /* SYN was acked - making progress */ 12634 if (tcp->tcp_ipversion == IPV6_VERSION) 12635 tcp->tcp_ip_forward_progress = B_TRUE; 12636 12637 /* One for the SYN */ 12638 tcp->tcp_suna = tcp->tcp_iss + 1; 12639 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 12640 tcp->tcp_state = TCPS_ESTABLISHED; 12641 12642 /* 12643 * If SYN was retransmitted, need to reset all 12644 * retransmission info. This is because this 12645 * segment will be treated as a dup ACK. 12646 */ 12647 if (tcp->tcp_rexmit) { 12648 tcp->tcp_rexmit = B_FALSE; 12649 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 12650 tcp->tcp_rexmit_max = tcp->tcp_snxt; 12651 tcp->tcp_snd_burst = tcp->tcp_localnet ? 12652 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 12653 tcp->tcp_ms_we_have_waited = 0; 12654 12655 /* 12656 * Set tcp_cwnd back to 1 MSS, per 12657 * recommendation from 12658 * draft-floyd-incr-init-win-01.txt, 12659 * Increasing TCP's Initial Window. 12660 */ 12661 tcp->tcp_cwnd = tcp->tcp_mss; 12662 } 12663 12664 tcp->tcp_swl1 = seg_seq; 12665 tcp->tcp_swl2 = seg_ack; 12666 12667 new_swnd = BE16_TO_U16(tcph->th_win); 12668 tcp->tcp_swnd = new_swnd; 12669 if (new_swnd > tcp->tcp_max_swnd) 12670 tcp->tcp_max_swnd = new_swnd; 12671 12672 /* 12673 * Always send the three-way handshake ack immediately 12674 * in order to make the connection complete as soon as 12675 * possible on the accepting host. 12676 */ 12677 flags |= TH_ACK_NEEDED; 12678 12679 /* 12680 * Special case for loopback. At this point we have 12681 * received SYN-ACK from the remote endpoint. In 12682 * order to ensure that both endpoints reach the 12683 * fused state prior to any data exchange, the final 12684 * ACK needs to be sent before we indicate T_CONN_CON 12685 * to the module upstream. 12686 */ 12687 if (tcp->tcp_loopback) { 12688 mblk_t *ack_mp; 12689 12690 ASSERT(!tcp->tcp_unfusable); 12691 ASSERT(mp1 != NULL); 12692 /* 12693 * For loopback, we always get a pure SYN-ACK 12694 * and only need to send back the final ACK 12695 * with no data (this is because the other 12696 * tcp is ours and we don't do T/TCP). This 12697 * final ACK triggers the passive side to 12698 * perform fusion in ESTABLISHED state. 12699 */ 12700 if ((ack_mp = tcp_ack_mp(tcp)) != NULL) { 12701 if (tcp->tcp_ack_tid != 0) { 12702 (void) TCP_TIMER_CANCEL(tcp, 12703 tcp->tcp_ack_tid); 12704 tcp->tcp_ack_tid = 0; 12705 } 12706 TCP_RECORD_TRACE(tcp, ack_mp, 12707 TCP_TRACE_SEND_PKT); 12708 tcp_send_data(tcp, tcp->tcp_wq, ack_mp); 12709 BUMP_LOCAL(tcp->tcp_obsegs); 12710 BUMP_MIB(&tcp_mib, tcpOutAck); 12711 12712 /* Send up T_CONN_CON */ 12713 putnext(tcp->tcp_rq, mp1); 12714 12715 freemsg(mp); 12716 return; 12717 } 12718 /* 12719 * Forget fusion; we need to handle more 12720 * complex cases below. Send the deferred 12721 * T_CONN_CON message upstream and proceed 12722 * as usual. Mark this tcp as not capable 12723 * of fusion. 12724 */ 12725 TCP_STAT(tcp_fusion_unfusable); 12726 tcp->tcp_unfusable = B_TRUE; 12727 putnext(tcp->tcp_rq, mp1); 12728 } 12729 12730 /* 12731 * Check to see if there is data to be sent. If 12732 * yes, set the transmit flag. Then check to see 12733 * if received data processing needs to be done. 12734 * If not, go straight to xmit_check. This short 12735 * cut is OK as we don't support T/TCP. 12736 */ 12737 if (tcp->tcp_unsent) 12738 flags |= TH_XMIT_NEEDED; 12739 12740 if (seg_len == 0 && !(flags & TH_URG)) { 12741 freemsg(mp); 12742 goto xmit_check; 12743 } 12744 12745 flags &= ~TH_SYN; 12746 seg_seq++; 12747 break; 12748 } 12749 tcp->tcp_state = TCPS_SYN_RCVD; 12750 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss, 12751 NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 12752 if (mp1) { 12753 DB_CPID(mp1) = tcp->tcp_cpid; 12754 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 12755 tcp_send_data(tcp, tcp->tcp_wq, mp1); 12756 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 12757 } 12758 freemsg(mp); 12759 return; 12760 case TCPS_SYN_RCVD: 12761 if (flags & TH_ACK) { 12762 /* 12763 * In this state, a SYN|ACK packet is either bogus 12764 * because the other side must be ACKing our SYN which 12765 * indicates it has seen the ACK for their SYN and 12766 * shouldn't retransmit it or we're crossing SYNs 12767 * on active open. 12768 */ 12769 if ((flags & TH_SYN) && !tcp->tcp_active_open) { 12770 freemsg(mp); 12771 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn", 12772 tcp, seg_ack, 0, TH_RST); 12773 return; 12774 } 12775 /* 12776 * NOTE: RFC 793 pg. 72 says this should be 12777 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt 12778 * but that would mean we have an ack that ignored 12779 * our SYN. 12780 */ 12781 if (SEQ_LEQ(seg_ack, tcp->tcp_suna) || 12782 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 12783 freemsg(mp); 12784 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack", 12785 tcp, seg_ack, 0, TH_RST); 12786 return; 12787 } 12788 } 12789 break; 12790 case TCPS_LISTEN: 12791 /* 12792 * Only a TLI listener can come through this path when a 12793 * acceptor is going back to be a listener and a packet 12794 * for the acceptor hits the classifier. For a socket 12795 * listener, this can never happen because a listener 12796 * can never accept connection on itself and hence a 12797 * socket acceptor can not go back to being a listener. 12798 */ 12799 ASSERT(!TCP_IS_SOCKET(tcp)); 12800 /*FALLTHRU*/ 12801 case TCPS_CLOSED: 12802 case TCPS_BOUND: { 12803 conn_t *new_connp; 12804 12805 new_connp = ipcl_classify(mp, connp->conn_zoneid); 12806 if (new_connp != NULL) { 12807 tcp_reinput(new_connp, mp, connp->conn_sqp); 12808 return; 12809 } 12810 /* We failed to classify. For now just drop the packet */ 12811 freemsg(mp); 12812 return; 12813 } 12814 case TCPS_IDLE: 12815 /* 12816 * Handle the case where the tcp_clean_death() has happened 12817 * on a connection (application hasn't closed yet) but a packet 12818 * was already queued on squeue before tcp_clean_death() 12819 * was processed. Calling tcp_clean_death() twice on same 12820 * connection can result in weird behaviour. 12821 */ 12822 freemsg(mp); 12823 return; 12824 default: 12825 break; 12826 } 12827 12828 /* 12829 * Already on the correct queue/perimeter. 12830 * If this is a detached connection and not an eager 12831 * connection hanging off a listener then new data 12832 * (past the FIN) will cause a reset. 12833 * We do a special check here where it 12834 * is out of the main line, rather than check 12835 * if we are detached every time we see new 12836 * data down below. 12837 */ 12838 if (TCP_IS_DETACHED_NONEAGER(tcp) && 12839 (seg_len > 0 && SEQ_GT(seg_seq + seg_len, tcp->tcp_rnxt))) { 12840 BUMP_MIB(&tcp_mib, tcpInClosed); 12841 TCP_RECORD_TRACE(tcp, 12842 mp, TCP_TRACE_RECV_PKT); 12843 12844 freemsg(mp); 12845 /* 12846 * This could be an SSL closure alert. We're detached so just 12847 * acknowledge it this last time. 12848 */ 12849 if (tcp->tcp_kssl_ctx != NULL) { 12850 kssl_release_ctx(tcp->tcp_kssl_ctx); 12851 tcp->tcp_kssl_ctx = NULL; 12852 12853 tcp->tcp_rnxt += seg_len; 12854 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 12855 flags |= TH_ACK_NEEDED; 12856 goto ack_check; 12857 } 12858 12859 tcp_xmit_ctl("new data when detached", tcp, 12860 tcp->tcp_snxt, 0, TH_RST); 12861 (void) tcp_clean_death(tcp, EPROTO, 12); 12862 return; 12863 } 12864 12865 mp->b_rptr = (uchar_t *)tcph + TCP_HDR_LENGTH(tcph); 12866 urp = BE16_TO_U16(tcph->th_urp) - TCP_OLD_URP_INTERPRETATION; 12867 new_swnd = BE16_TO_U16(tcph->th_win) << 12868 ((tcph->th_flags[0] & TH_SYN) ? 0 : tcp->tcp_snd_ws); 12869 mss = tcp->tcp_mss; 12870 12871 if (tcp->tcp_snd_ts_ok) { 12872 if (!tcp_paws_check(tcp, tcph, &tcpopt)) { 12873 /* 12874 * This segment is not acceptable. 12875 * Drop it and send back an ACK. 12876 */ 12877 freemsg(mp); 12878 flags |= TH_ACK_NEEDED; 12879 goto ack_check; 12880 } 12881 } else if (tcp->tcp_snd_sack_ok) { 12882 ASSERT(tcp->tcp_sack_info != NULL); 12883 tcpopt.tcp = tcp; 12884 /* 12885 * SACK info in already updated in tcp_parse_options. Ignore 12886 * all other TCP options... 12887 */ 12888 (void) tcp_parse_options(tcph, &tcpopt); 12889 } 12890 try_again:; 12891 gap = seg_seq - tcp->tcp_rnxt; 12892 rgap = tcp->tcp_rwnd - (gap + seg_len); 12893 /* 12894 * gap is the amount of sequence space between what we expect to see 12895 * and what we got for seg_seq. A positive value for gap means 12896 * something got lost. A negative value means we got some old stuff. 12897 */ 12898 if (gap < 0) { 12899 /* Old stuff present. Is the SYN in there? */ 12900 if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) && 12901 (seg_len != 0)) { 12902 flags &= ~TH_SYN; 12903 seg_seq++; 12904 urp--; 12905 /* Recompute the gaps after noting the SYN. */ 12906 goto try_again; 12907 } 12908 BUMP_MIB(&tcp_mib, tcpInDataDupSegs); 12909 UPDATE_MIB(&tcp_mib, tcpInDataDupBytes, 12910 (seg_len > -gap ? -gap : seg_len)); 12911 /* Remove the old stuff from seg_len. */ 12912 seg_len += gap; 12913 /* 12914 * Anything left? 12915 * Make sure to check for unack'd FIN when rest of data 12916 * has been previously ack'd. 12917 */ 12918 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 12919 /* 12920 * Resets are only valid if they lie within our offered 12921 * window. If the RST bit is set, we just ignore this 12922 * segment. 12923 */ 12924 if (flags & TH_RST) { 12925 freemsg(mp); 12926 return; 12927 } 12928 12929 /* 12930 * The arriving of dup data packets indicate that we 12931 * may have postponed an ack for too long, or the other 12932 * side's RTT estimate is out of shape. Start acking 12933 * more often. 12934 */ 12935 if (SEQ_GEQ(seg_seq + seg_len - gap, tcp->tcp_rack) && 12936 tcp->tcp_rack_cnt >= 1 && 12937 tcp->tcp_rack_abs_max > 2) { 12938 tcp->tcp_rack_abs_max--; 12939 } 12940 tcp->tcp_rack_cur_max = 1; 12941 12942 /* 12943 * This segment is "unacceptable". None of its 12944 * sequence space lies within our advertized window. 12945 * 12946 * Adjust seg_len to the original value for tracing. 12947 */ 12948 seg_len -= gap; 12949 if (tcp->tcp_debug) { 12950 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 12951 "tcp_rput: unacceptable, gap %d, rgap %d, " 12952 "flags 0x%x, seg_seq %u, seg_ack %u, " 12953 "seg_len %d, rnxt %u, snxt %u, %s", 12954 gap, rgap, flags, seg_seq, seg_ack, 12955 seg_len, tcp->tcp_rnxt, tcp->tcp_snxt, 12956 tcp_display(tcp, NULL, 12957 DISP_ADDR_AND_PORT)); 12958 } 12959 12960 /* 12961 * Arrange to send an ACK in response to the 12962 * unacceptable segment per RFC 793 page 69. There 12963 * is only one small difference between ours and the 12964 * acceptability test in the RFC - we accept ACK-only 12965 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK 12966 * will be generated. 12967 * 12968 * Note that we have to ACK an ACK-only packet at least 12969 * for stacks that send 0-length keep-alives with 12970 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122, 12971 * section 4.2.3.6. As long as we don't ever generate 12972 * an unacceptable packet in response to an incoming 12973 * packet that is unacceptable, it should not cause 12974 * "ACK wars". 12975 */ 12976 flags |= TH_ACK_NEEDED; 12977 12978 /* 12979 * Continue processing this segment in order to use the 12980 * ACK information it contains, but skip all other 12981 * sequence-number processing. Processing the ACK 12982 * information is necessary in order to 12983 * re-synchronize connections that may have lost 12984 * synchronization. 12985 * 12986 * We clear seg_len and flag fields related to 12987 * sequence number processing as they are not 12988 * to be trusted for an unacceptable segment. 12989 */ 12990 seg_len = 0; 12991 flags &= ~(TH_SYN | TH_FIN | TH_URG); 12992 goto process_ack; 12993 } 12994 12995 /* Fix seg_seq, and chew the gap off the front. */ 12996 seg_seq = tcp->tcp_rnxt; 12997 urp += gap; 12998 do { 12999 mblk_t *mp2; 13000 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 13001 (uintptr_t)UINT_MAX); 13002 gap += (uint_t)(mp->b_wptr - mp->b_rptr); 13003 if (gap > 0) { 13004 mp->b_rptr = mp->b_wptr - gap; 13005 break; 13006 } 13007 mp2 = mp; 13008 mp = mp->b_cont; 13009 freeb(mp2); 13010 } while (gap < 0); 13011 /* 13012 * If the urgent data has already been acknowledged, we 13013 * should ignore TH_URG below 13014 */ 13015 if (urp < 0) 13016 flags &= ~TH_URG; 13017 } 13018 /* 13019 * rgap is the amount of stuff received out of window. A negative 13020 * value is the amount out of window. 13021 */ 13022 if (rgap < 0) { 13023 mblk_t *mp2; 13024 13025 if (tcp->tcp_rwnd == 0) { 13026 BUMP_MIB(&tcp_mib, tcpInWinProbe); 13027 } else { 13028 BUMP_MIB(&tcp_mib, tcpInDataPastWinSegs); 13029 UPDATE_MIB(&tcp_mib, tcpInDataPastWinBytes, -rgap); 13030 } 13031 13032 /* 13033 * seg_len does not include the FIN, so if more than 13034 * just the FIN is out of window, we act like we don't 13035 * see it. (If just the FIN is out of window, rgap 13036 * will be zero and we will go ahead and acknowledge 13037 * the FIN.) 13038 */ 13039 flags &= ~TH_FIN; 13040 13041 /* Fix seg_len and make sure there is something left. */ 13042 seg_len += rgap; 13043 if (seg_len <= 0) { 13044 /* 13045 * Resets are only valid if they lie within our offered 13046 * window. If the RST bit is set, we just ignore this 13047 * segment. 13048 */ 13049 if (flags & TH_RST) { 13050 freemsg(mp); 13051 return; 13052 } 13053 13054 /* Per RFC 793, we need to send back an ACK. */ 13055 flags |= TH_ACK_NEEDED; 13056 13057 /* 13058 * Send SIGURG as soon as possible i.e. even 13059 * if the TH_URG was delivered in a window probe 13060 * packet (which will be unacceptable). 13061 * 13062 * We generate a signal if none has been generated 13063 * for this connection or if this is a new urgent 13064 * byte. Also send a zero-length "unmarked" message 13065 * to inform SIOCATMARK that this is not the mark. 13066 * 13067 * tcp_urp_last_valid is cleared when the T_exdata_ind 13068 * is sent up. This plus the check for old data 13069 * (gap >= 0) handles the wraparound of the sequence 13070 * number space without having to always track the 13071 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks 13072 * this max in its rcv_up variable). 13073 * 13074 * This prevents duplicate SIGURGS due to a "late" 13075 * zero-window probe when the T_EXDATA_IND has already 13076 * been sent up. 13077 */ 13078 if ((flags & TH_URG) && 13079 (!tcp->tcp_urp_last_valid || SEQ_GT(urp + seg_seq, 13080 tcp->tcp_urp_last))) { 13081 mp1 = allocb(0, BPRI_MED); 13082 if (mp1 == NULL) { 13083 freemsg(mp); 13084 return; 13085 } 13086 if (!TCP_IS_DETACHED(tcp) && 13087 !putnextctl1(tcp->tcp_rq, M_PCSIG, 13088 SIGURG)) { 13089 /* Try again on the rexmit. */ 13090 freemsg(mp1); 13091 freemsg(mp); 13092 return; 13093 } 13094 /* 13095 * If the next byte would be the mark 13096 * then mark with MARKNEXT else mark 13097 * with NOTMARKNEXT. 13098 */ 13099 if (gap == 0 && urp == 0) 13100 mp1->b_flag |= MSGMARKNEXT; 13101 else 13102 mp1->b_flag |= MSGNOTMARKNEXT; 13103 freemsg(tcp->tcp_urp_mark_mp); 13104 tcp->tcp_urp_mark_mp = mp1; 13105 flags |= TH_SEND_URP_MARK; 13106 tcp->tcp_urp_last_valid = B_TRUE; 13107 tcp->tcp_urp_last = urp + seg_seq; 13108 } 13109 /* 13110 * If this is a zero window probe, continue to 13111 * process the ACK part. But we need to set seg_len 13112 * to 0 to avoid data processing. Otherwise just 13113 * drop the segment and send back an ACK. 13114 */ 13115 if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) { 13116 flags &= ~(TH_SYN | TH_URG); 13117 seg_len = 0; 13118 goto process_ack; 13119 } else { 13120 freemsg(mp); 13121 goto ack_check; 13122 } 13123 } 13124 /* Pitch out of window stuff off the end. */ 13125 rgap = seg_len; 13126 mp2 = mp; 13127 do { 13128 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 13129 (uintptr_t)INT_MAX); 13130 rgap -= (int)(mp2->b_wptr - mp2->b_rptr); 13131 if (rgap < 0) { 13132 mp2->b_wptr += rgap; 13133 if ((mp1 = mp2->b_cont) != NULL) { 13134 mp2->b_cont = NULL; 13135 freemsg(mp1); 13136 } 13137 break; 13138 } 13139 } while ((mp2 = mp2->b_cont) != NULL); 13140 } 13141 ok:; 13142 /* 13143 * TCP should check ECN info for segments inside the window only. 13144 * Therefore the check should be done here. 13145 */ 13146 if (tcp->tcp_ecn_ok) { 13147 if (flags & TH_CWR) { 13148 tcp->tcp_ecn_echo_on = B_FALSE; 13149 } 13150 /* 13151 * Note that both ECN_CE and CWR can be set in the 13152 * same segment. In this case, we once again turn 13153 * on ECN_ECHO. 13154 */ 13155 if (tcp->tcp_ipversion == IPV4_VERSION) { 13156 uchar_t tos = ((ipha_t *)rptr)->ipha_type_of_service; 13157 13158 if ((tos & IPH_ECN_CE) == IPH_ECN_CE) { 13159 tcp->tcp_ecn_echo_on = B_TRUE; 13160 } 13161 } else { 13162 uint32_t vcf = ((ip6_t *)rptr)->ip6_vcf; 13163 13164 if ((vcf & htonl(IPH_ECN_CE << 20)) == 13165 htonl(IPH_ECN_CE << 20)) { 13166 tcp->tcp_ecn_echo_on = B_TRUE; 13167 } 13168 } 13169 } 13170 13171 /* 13172 * Check whether we can update tcp_ts_recent. This test is 13173 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 13174 * Extensions for High Performance: An Update", Internet Draft. 13175 */ 13176 if (tcp->tcp_snd_ts_ok && 13177 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 13178 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 13179 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 13180 tcp->tcp_last_rcv_lbolt = lbolt64; 13181 } 13182 13183 if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) { 13184 /* 13185 * FIN in an out of order segment. We record this in 13186 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq. 13187 * Clear the FIN so that any check on FIN flag will fail. 13188 * Remember that FIN also counts in the sequence number 13189 * space. So we need to ack out of order FIN only segments. 13190 */ 13191 if (flags & TH_FIN) { 13192 tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID; 13193 tcp->tcp_ofo_fin_seq = seg_seq + seg_len; 13194 flags &= ~TH_FIN; 13195 flags |= TH_ACK_NEEDED; 13196 } 13197 if (seg_len > 0) { 13198 /* Fill in the SACK blk list. */ 13199 if (tcp->tcp_snd_sack_ok) { 13200 ASSERT(tcp->tcp_sack_info != NULL); 13201 tcp_sack_insert(tcp->tcp_sack_list, 13202 seg_seq, seg_seq + seg_len, 13203 &(tcp->tcp_num_sack_blk)); 13204 } 13205 13206 /* 13207 * Attempt reassembly and see if we have something 13208 * ready to go. 13209 */ 13210 mp = tcp_reass(tcp, mp, seg_seq); 13211 /* Always ack out of order packets */ 13212 flags |= TH_ACK_NEEDED | TH_PUSH; 13213 if (mp) { 13214 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 13215 (uintptr_t)INT_MAX); 13216 seg_len = mp->b_cont ? msgdsize(mp) : 13217 (int)(mp->b_wptr - mp->b_rptr); 13218 seg_seq = tcp->tcp_rnxt; 13219 /* 13220 * A gap is filled and the seq num and len 13221 * of the gap match that of a previously 13222 * received FIN, put the FIN flag back in. 13223 */ 13224 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 13225 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 13226 flags |= TH_FIN; 13227 tcp->tcp_valid_bits &= 13228 ~TCP_OFO_FIN_VALID; 13229 } 13230 } else { 13231 /* 13232 * Keep going even with NULL mp. 13233 * There may be a useful ACK or something else 13234 * we don't want to miss. 13235 * 13236 * But TCP should not perform fast retransmit 13237 * because of the ack number. TCP uses 13238 * seg_len == 0 to determine if it is a pure 13239 * ACK. And this is not a pure ACK. 13240 */ 13241 seg_len = 0; 13242 ofo_seg = B_TRUE; 13243 } 13244 } 13245 } else if (seg_len > 0) { 13246 BUMP_MIB(&tcp_mib, tcpInDataInorderSegs); 13247 UPDATE_MIB(&tcp_mib, tcpInDataInorderBytes, seg_len); 13248 /* 13249 * If an out of order FIN was received before, and the seq 13250 * num and len of the new segment match that of the FIN, 13251 * put the FIN flag back in. 13252 */ 13253 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 13254 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 13255 flags |= TH_FIN; 13256 tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID; 13257 } 13258 } 13259 if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) { 13260 if (flags & TH_RST) { 13261 freemsg(mp); 13262 switch (tcp->tcp_state) { 13263 case TCPS_SYN_RCVD: 13264 (void) tcp_clean_death(tcp, ECONNREFUSED, 14); 13265 break; 13266 case TCPS_ESTABLISHED: 13267 case TCPS_FIN_WAIT_1: 13268 case TCPS_FIN_WAIT_2: 13269 case TCPS_CLOSE_WAIT: 13270 (void) tcp_clean_death(tcp, ECONNRESET, 15); 13271 break; 13272 case TCPS_CLOSING: 13273 case TCPS_LAST_ACK: 13274 (void) tcp_clean_death(tcp, 0, 16); 13275 break; 13276 default: 13277 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 13278 (void) tcp_clean_death(tcp, ENXIO, 17); 13279 break; 13280 } 13281 return; 13282 } 13283 if (flags & TH_SYN) { 13284 /* 13285 * See RFC 793, Page 71 13286 * 13287 * The seq number must be in the window as it should 13288 * be "fixed" above. If it is outside window, it should 13289 * be already rejected. Note that we allow seg_seq to be 13290 * rnxt + rwnd because we want to accept 0 window probe. 13291 */ 13292 ASSERT(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) && 13293 SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd)); 13294 freemsg(mp); 13295 /* 13296 * If the ACK flag is not set, just use our snxt as the 13297 * seq number of the RST segment. 13298 */ 13299 if (!(flags & TH_ACK)) { 13300 seg_ack = tcp->tcp_snxt; 13301 } 13302 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 13303 TH_RST|TH_ACK); 13304 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 13305 (void) tcp_clean_death(tcp, ECONNRESET, 18); 13306 return; 13307 } 13308 /* 13309 * urp could be -1 when the urp field in the packet is 0 13310 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent 13311 * byte was at seg_seq - 1, in which case we ignore the urgent flag. 13312 */ 13313 if (flags & TH_URG && urp >= 0) { 13314 if (!tcp->tcp_urp_last_valid || 13315 SEQ_GT(urp + seg_seq, tcp->tcp_urp_last)) { 13316 /* 13317 * If we haven't generated the signal yet for this 13318 * urgent pointer value, do it now. Also, send up a 13319 * zero-length M_DATA indicating whether or not this is 13320 * the mark. The latter is not needed when a 13321 * T_EXDATA_IND is sent up. However, if there are 13322 * allocation failures this code relies on the sender 13323 * retransmitting and the socket code for determining 13324 * the mark should not block waiting for the peer to 13325 * transmit. Thus, for simplicity we always send up the 13326 * mark indication. 13327 */ 13328 mp1 = allocb(0, BPRI_MED); 13329 if (mp1 == NULL) { 13330 freemsg(mp); 13331 return; 13332 } 13333 if (!TCP_IS_DETACHED(tcp) && 13334 !putnextctl1(tcp->tcp_rq, M_PCSIG, SIGURG)) { 13335 /* Try again on the rexmit. */ 13336 freemsg(mp1); 13337 freemsg(mp); 13338 return; 13339 } 13340 /* 13341 * Mark with NOTMARKNEXT for now. 13342 * The code below will change this to MARKNEXT 13343 * if we are at the mark. 13344 * 13345 * If there are allocation failures (e.g. in dupmsg 13346 * below) the next time tcp_rput_data sees the urgent 13347 * segment it will send up the MSG*MARKNEXT message. 13348 */ 13349 mp1->b_flag |= MSGNOTMARKNEXT; 13350 freemsg(tcp->tcp_urp_mark_mp); 13351 tcp->tcp_urp_mark_mp = mp1; 13352 flags |= TH_SEND_URP_MARK; 13353 #ifdef DEBUG 13354 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13355 "tcp_rput: sent M_PCSIG 2 seq %x urp %x " 13356 "last %x, %s", 13357 seg_seq, urp, tcp->tcp_urp_last, 13358 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 13359 #endif /* DEBUG */ 13360 tcp->tcp_urp_last_valid = B_TRUE; 13361 tcp->tcp_urp_last = urp + seg_seq; 13362 } else if (tcp->tcp_urp_mark_mp != NULL) { 13363 /* 13364 * An allocation failure prevented the previous 13365 * tcp_rput_data from sending up the allocated 13366 * MSG*MARKNEXT message - send it up this time 13367 * around. 13368 */ 13369 flags |= TH_SEND_URP_MARK; 13370 } 13371 13372 /* 13373 * If the urgent byte is in this segment, make sure that it is 13374 * all by itself. This makes it much easier to deal with the 13375 * possibility of an allocation failure on the T_exdata_ind. 13376 * Note that seg_len is the number of bytes in the segment, and 13377 * urp is the offset into the segment of the urgent byte. 13378 * urp < seg_len means that the urgent byte is in this segment. 13379 */ 13380 if (urp < seg_len) { 13381 if (seg_len != 1) { 13382 uint32_t tmp_rnxt; 13383 /* 13384 * Break it up and feed it back in. 13385 * Re-attach the IP header. 13386 */ 13387 mp->b_rptr = iphdr; 13388 if (urp > 0) { 13389 /* 13390 * There is stuff before the urgent 13391 * byte. 13392 */ 13393 mp1 = dupmsg(mp); 13394 if (!mp1) { 13395 /* 13396 * Trim from urgent byte on. 13397 * The rest will come back. 13398 */ 13399 (void) adjmsg(mp, 13400 urp - seg_len); 13401 tcp_rput_data(connp, 13402 mp, NULL); 13403 return; 13404 } 13405 (void) adjmsg(mp1, urp - seg_len); 13406 /* Feed this piece back in. */ 13407 tmp_rnxt = tcp->tcp_rnxt; 13408 tcp_rput_data(connp, mp1, NULL); 13409 /* 13410 * If the data passed back in was not 13411 * processed (ie: bad ACK) sending 13412 * the remainder back in will cause a 13413 * loop. In this case, drop the 13414 * packet and let the sender try 13415 * sending a good packet. 13416 */ 13417 if (tmp_rnxt == tcp->tcp_rnxt) { 13418 freemsg(mp); 13419 return; 13420 } 13421 } 13422 if (urp != seg_len - 1) { 13423 uint32_t tmp_rnxt; 13424 /* 13425 * There is stuff after the urgent 13426 * byte. 13427 */ 13428 mp1 = dupmsg(mp); 13429 if (!mp1) { 13430 /* 13431 * Trim everything beyond the 13432 * urgent byte. The rest will 13433 * come back. 13434 */ 13435 (void) adjmsg(mp, 13436 urp + 1 - seg_len); 13437 tcp_rput_data(connp, 13438 mp, NULL); 13439 return; 13440 } 13441 (void) adjmsg(mp1, urp + 1 - seg_len); 13442 tmp_rnxt = tcp->tcp_rnxt; 13443 tcp_rput_data(connp, mp1, NULL); 13444 /* 13445 * If the data passed back in was not 13446 * processed (ie: bad ACK) sending 13447 * the remainder back in will cause a 13448 * loop. In this case, drop the 13449 * packet and let the sender try 13450 * sending a good packet. 13451 */ 13452 if (tmp_rnxt == tcp->tcp_rnxt) { 13453 freemsg(mp); 13454 return; 13455 } 13456 } 13457 tcp_rput_data(connp, mp, NULL); 13458 return; 13459 } 13460 /* 13461 * This segment contains only the urgent byte. We 13462 * have to allocate the T_exdata_ind, if we can. 13463 */ 13464 if (!tcp->tcp_urp_mp) { 13465 struct T_exdata_ind *tei; 13466 mp1 = allocb(sizeof (struct T_exdata_ind), 13467 BPRI_MED); 13468 if (!mp1) { 13469 /* 13470 * Sigh... It'll be back. 13471 * Generate any MSG*MARK message now. 13472 */ 13473 freemsg(mp); 13474 seg_len = 0; 13475 if (flags & TH_SEND_URP_MARK) { 13476 13477 13478 ASSERT(tcp->tcp_urp_mark_mp); 13479 tcp->tcp_urp_mark_mp->b_flag &= 13480 ~MSGNOTMARKNEXT; 13481 tcp->tcp_urp_mark_mp->b_flag |= 13482 MSGMARKNEXT; 13483 } 13484 goto ack_check; 13485 } 13486 mp1->b_datap->db_type = M_PROTO; 13487 tei = (struct T_exdata_ind *)mp1->b_rptr; 13488 tei->PRIM_type = T_EXDATA_IND; 13489 tei->MORE_flag = 0; 13490 mp1->b_wptr = (uchar_t *)&tei[1]; 13491 tcp->tcp_urp_mp = mp1; 13492 #ifdef DEBUG 13493 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13494 "tcp_rput: allocated exdata_ind %s", 13495 tcp_display(tcp, NULL, 13496 DISP_PORT_ONLY)); 13497 #endif /* DEBUG */ 13498 /* 13499 * There is no need to send a separate MSG*MARK 13500 * message since the T_EXDATA_IND will be sent 13501 * now. 13502 */ 13503 flags &= ~TH_SEND_URP_MARK; 13504 freemsg(tcp->tcp_urp_mark_mp); 13505 tcp->tcp_urp_mark_mp = NULL; 13506 } 13507 /* 13508 * Now we are all set. On the next putnext upstream, 13509 * tcp_urp_mp will be non-NULL and will get prepended 13510 * to what has to be this piece containing the urgent 13511 * byte. If for any reason we abort this segment below, 13512 * if it comes back, we will have this ready, or it 13513 * will get blown off in close. 13514 */ 13515 } else if (urp == seg_len) { 13516 /* 13517 * The urgent byte is the next byte after this sequence 13518 * number. If there is data it is marked with 13519 * MSGMARKNEXT and any tcp_urp_mark_mp is discarded 13520 * since it is not needed. Otherwise, if the code 13521 * above just allocated a zero-length tcp_urp_mark_mp 13522 * message, that message is tagged with MSGMARKNEXT. 13523 * Sending up these MSGMARKNEXT messages makes 13524 * SIOCATMARK work correctly even though 13525 * the T_EXDATA_IND will not be sent up until the 13526 * urgent byte arrives. 13527 */ 13528 if (seg_len != 0) { 13529 flags |= TH_MARKNEXT_NEEDED; 13530 freemsg(tcp->tcp_urp_mark_mp); 13531 tcp->tcp_urp_mark_mp = NULL; 13532 flags &= ~TH_SEND_URP_MARK; 13533 } else if (tcp->tcp_urp_mark_mp != NULL) { 13534 flags |= TH_SEND_URP_MARK; 13535 tcp->tcp_urp_mark_mp->b_flag &= 13536 ~MSGNOTMARKNEXT; 13537 tcp->tcp_urp_mark_mp->b_flag |= MSGMARKNEXT; 13538 } 13539 #ifdef DEBUG 13540 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13541 "tcp_rput: AT MARK, len %d, flags 0x%x, %s", 13542 seg_len, flags, 13543 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 13544 #endif /* DEBUG */ 13545 } else { 13546 /* Data left until we hit mark */ 13547 #ifdef DEBUG 13548 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13549 "tcp_rput: URP %d bytes left, %s", 13550 urp - seg_len, tcp_display(tcp, NULL, 13551 DISP_PORT_ONLY)); 13552 #endif /* DEBUG */ 13553 } 13554 } 13555 13556 process_ack: 13557 if (!(flags & TH_ACK)) { 13558 freemsg(mp); 13559 goto xmit_check; 13560 } 13561 } 13562 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 13563 13564 if (tcp->tcp_ipversion == IPV6_VERSION && bytes_acked > 0) 13565 tcp->tcp_ip_forward_progress = B_TRUE; 13566 if (tcp->tcp_state == TCPS_SYN_RCVD) { 13567 if ((tcp->tcp_conn.tcp_eager_conn_ind != NULL) && 13568 ((tcp->tcp_kssl_ent == NULL) || !tcp->tcp_kssl_pending)) { 13569 /* 3-way handshake complete - pass up the T_CONN_IND */ 13570 tcp_t *listener = tcp->tcp_listener; 13571 mblk_t *mp = tcp->tcp_conn.tcp_eager_conn_ind; 13572 13573 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 13574 /* 13575 * We are here means eager is fine but it can 13576 * get a TH_RST at any point between now and till 13577 * accept completes and disappear. We need to 13578 * ensure that reference to eager is valid after 13579 * we get out of eager's perimeter. So we do 13580 * an extra refhold. 13581 */ 13582 CONN_INC_REF(connp); 13583 13584 /* 13585 * The listener also exists because of the refhold 13586 * done in tcp_conn_request. Its possible that it 13587 * might have closed. We will check that once we 13588 * get inside listeners context. 13589 */ 13590 CONN_INC_REF(listener->tcp_connp); 13591 if (listener->tcp_connp->conn_sqp == 13592 connp->conn_sqp) { 13593 tcp_send_conn_ind(listener->tcp_connp, mp, 13594 listener->tcp_connp->conn_sqp); 13595 CONN_DEC_REF(listener->tcp_connp); 13596 } else if (!tcp->tcp_loopback) { 13597 squeue_fill(listener->tcp_connp->conn_sqp, mp, 13598 tcp_send_conn_ind, 13599 listener->tcp_connp, SQTAG_TCP_CONN_IND); 13600 } else { 13601 squeue_enter(listener->tcp_connp->conn_sqp, mp, 13602 tcp_send_conn_ind, listener->tcp_connp, 13603 SQTAG_TCP_CONN_IND); 13604 } 13605 } 13606 13607 if (tcp->tcp_active_open) { 13608 /* 13609 * We are seeing the final ack in the three way 13610 * hand shake of a active open'ed connection 13611 * so we must send up a T_CONN_CON 13612 */ 13613 if (!tcp_conn_con(tcp, iphdr, tcph, mp, NULL)) { 13614 freemsg(mp); 13615 return; 13616 } 13617 /* 13618 * Don't fuse the loopback endpoints for 13619 * simultaneous active opens. 13620 */ 13621 if (tcp->tcp_loopback) { 13622 TCP_STAT(tcp_fusion_unfusable); 13623 tcp->tcp_unfusable = B_TRUE; 13624 } 13625 } 13626 13627 tcp->tcp_suna = tcp->tcp_iss + 1; /* One for the SYN */ 13628 bytes_acked--; 13629 /* SYN was acked - making progress */ 13630 if (tcp->tcp_ipversion == IPV6_VERSION) 13631 tcp->tcp_ip_forward_progress = B_TRUE; 13632 13633 /* 13634 * If SYN was retransmitted, need to reset all 13635 * retransmission info as this segment will be 13636 * treated as a dup ACK. 13637 */ 13638 if (tcp->tcp_rexmit) { 13639 tcp->tcp_rexmit = B_FALSE; 13640 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 13641 tcp->tcp_rexmit_max = tcp->tcp_snxt; 13642 tcp->tcp_snd_burst = tcp->tcp_localnet ? 13643 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 13644 tcp->tcp_ms_we_have_waited = 0; 13645 tcp->tcp_cwnd = mss; 13646 } 13647 13648 /* 13649 * We set the send window to zero here. 13650 * This is needed if there is data to be 13651 * processed already on the queue. 13652 * Later (at swnd_update label), the 13653 * "new_swnd > tcp_swnd" condition is satisfied 13654 * the XMIT_NEEDED flag is set in the current 13655 * (SYN_RCVD) state. This ensures tcp_wput_data() is 13656 * called if there is already data on queue in 13657 * this state. 13658 */ 13659 tcp->tcp_swnd = 0; 13660 13661 if (new_swnd > tcp->tcp_max_swnd) 13662 tcp->tcp_max_swnd = new_swnd; 13663 tcp->tcp_swl1 = seg_seq; 13664 tcp->tcp_swl2 = seg_ack; 13665 tcp->tcp_state = TCPS_ESTABLISHED; 13666 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 13667 13668 /* Fuse when both sides are in ESTABLISHED state */ 13669 if (tcp->tcp_loopback && do_tcp_fusion) 13670 tcp_fuse(tcp, iphdr, tcph); 13671 13672 } 13673 /* This code follows 4.4BSD-Lite2 mostly. */ 13674 if (bytes_acked < 0) 13675 goto est; 13676 13677 /* 13678 * If TCP is ECN capable and the congestion experience bit is 13679 * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be 13680 * done once per window (or more loosely, per RTT). 13681 */ 13682 if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max)) 13683 tcp->tcp_cwr = B_FALSE; 13684 if (tcp->tcp_ecn_ok && (flags & TH_ECE)) { 13685 if (!tcp->tcp_cwr) { 13686 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / mss; 13687 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss; 13688 tcp->tcp_cwnd = npkt * mss; 13689 /* 13690 * If the cwnd is 0, use the timer to clock out 13691 * new segments. This is required by the ECN spec. 13692 */ 13693 if (npkt == 0) { 13694 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 13695 /* 13696 * This makes sure that when the ACK comes 13697 * back, we will increase tcp_cwnd by 1 MSS. 13698 */ 13699 tcp->tcp_cwnd_cnt = 0; 13700 } 13701 tcp->tcp_cwr = B_TRUE; 13702 /* 13703 * This marks the end of the current window of in 13704 * flight data. That is why we don't use 13705 * tcp_suna + tcp_swnd. Only data in flight can 13706 * provide ECN info. 13707 */ 13708 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 13709 tcp->tcp_ecn_cwr_sent = B_FALSE; 13710 } 13711 } 13712 13713 mp1 = tcp->tcp_xmit_head; 13714 if (bytes_acked == 0) { 13715 if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) { 13716 int dupack_cnt; 13717 13718 BUMP_MIB(&tcp_mib, tcpInDupAck); 13719 /* 13720 * Fast retransmit. When we have seen exactly three 13721 * identical ACKs while we have unacked data 13722 * outstanding we take it as a hint that our peer 13723 * dropped something. 13724 * 13725 * If TCP is retransmitting, don't do fast retransmit. 13726 */ 13727 if (mp1 && tcp->tcp_suna != tcp->tcp_snxt && 13728 ! tcp->tcp_rexmit) { 13729 /* Do Limited Transmit */ 13730 if ((dupack_cnt = ++tcp->tcp_dupack_cnt) < 13731 tcp_dupack_fast_retransmit) { 13732 /* 13733 * RFC 3042 13734 * 13735 * What we need to do is temporarily 13736 * increase tcp_cwnd so that new 13737 * data can be sent if it is allowed 13738 * by the receive window (tcp_rwnd). 13739 * tcp_wput_data() will take care of 13740 * the rest. 13741 * 13742 * If the connection is SACK capable, 13743 * only do limited xmit when there 13744 * is SACK info. 13745 * 13746 * Note how tcp_cwnd is incremented. 13747 * The first dup ACK will increase 13748 * it by 1 MSS. The second dup ACK 13749 * will increase it by 2 MSS. This 13750 * means that only 1 new segment will 13751 * be sent for each dup ACK. 13752 */ 13753 if (tcp->tcp_unsent > 0 && 13754 (!tcp->tcp_snd_sack_ok || 13755 (tcp->tcp_snd_sack_ok && 13756 tcp->tcp_notsack_list != NULL))) { 13757 tcp->tcp_cwnd += mss << 13758 (tcp->tcp_dupack_cnt - 1); 13759 flags |= TH_LIMIT_XMIT; 13760 } 13761 } else if (dupack_cnt == 13762 tcp_dupack_fast_retransmit) { 13763 13764 /* 13765 * If we have reduced tcp_ssthresh 13766 * because of ECN, do not reduce it again 13767 * unless it is already one window of data 13768 * away. After one window of data, tcp_cwr 13769 * should then be cleared. Note that 13770 * for non ECN capable connection, tcp_cwr 13771 * should always be false. 13772 * 13773 * Adjust cwnd since the duplicate 13774 * ack indicates that a packet was 13775 * dropped (due to congestion.) 13776 */ 13777 if (!tcp->tcp_cwr) { 13778 npkt = ((tcp->tcp_snxt - 13779 tcp->tcp_suna) >> 1) / mss; 13780 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 13781 mss; 13782 tcp->tcp_cwnd = (npkt + 13783 tcp->tcp_dupack_cnt) * mss; 13784 } 13785 if (tcp->tcp_ecn_ok) { 13786 tcp->tcp_cwr = B_TRUE; 13787 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 13788 tcp->tcp_ecn_cwr_sent = B_FALSE; 13789 } 13790 13791 /* 13792 * We do Hoe's algorithm. Refer to her 13793 * paper "Improving the Start-up Behavior 13794 * of a Congestion Control Scheme for TCP," 13795 * appeared in SIGCOMM'96. 13796 * 13797 * Save highest seq no we have sent so far. 13798 * Be careful about the invisible FIN byte. 13799 */ 13800 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 13801 (tcp->tcp_unsent == 0)) { 13802 tcp->tcp_rexmit_max = tcp->tcp_fss; 13803 } else { 13804 tcp->tcp_rexmit_max = tcp->tcp_snxt; 13805 } 13806 13807 /* 13808 * Do not allow bursty traffic during. 13809 * fast recovery. Refer to Fall and Floyd's 13810 * paper "Simulation-based Comparisons of 13811 * Tahoe, Reno and SACK TCP" (in CCR?) 13812 * This is a best current practise. 13813 */ 13814 tcp->tcp_snd_burst = TCP_CWND_SS; 13815 13816 /* 13817 * For SACK: 13818 * Calculate tcp_pipe, which is the 13819 * estimated number of bytes in 13820 * network. 13821 * 13822 * tcp_fack is the highest sack'ed seq num 13823 * TCP has received. 13824 * 13825 * tcp_pipe is explained in the above quoted 13826 * Fall and Floyd's paper. tcp_fack is 13827 * explained in Mathis and Mahdavi's 13828 * "Forward Acknowledgment: Refining TCP 13829 * Congestion Control" in SIGCOMM '96. 13830 */ 13831 if (tcp->tcp_snd_sack_ok) { 13832 ASSERT(tcp->tcp_sack_info != NULL); 13833 if (tcp->tcp_notsack_list != NULL) { 13834 tcp->tcp_pipe = tcp->tcp_snxt - 13835 tcp->tcp_fack; 13836 tcp->tcp_sack_snxt = seg_ack; 13837 flags |= TH_NEED_SACK_REXMIT; 13838 } else { 13839 /* 13840 * Always initialize tcp_pipe 13841 * even though we don't have 13842 * any SACK info. If later 13843 * we get SACK info and 13844 * tcp_pipe is not initialized, 13845 * funny things will happen. 13846 */ 13847 tcp->tcp_pipe = 13848 tcp->tcp_cwnd_ssthresh; 13849 } 13850 } else { 13851 flags |= TH_REXMIT_NEEDED; 13852 } /* tcp_snd_sack_ok */ 13853 13854 } else { 13855 /* 13856 * Here we perform congestion 13857 * avoidance, but NOT slow start. 13858 * This is known as the Fast 13859 * Recovery Algorithm. 13860 */ 13861 if (tcp->tcp_snd_sack_ok && 13862 tcp->tcp_notsack_list != NULL) { 13863 flags |= TH_NEED_SACK_REXMIT; 13864 tcp->tcp_pipe -= mss; 13865 if (tcp->tcp_pipe < 0) 13866 tcp->tcp_pipe = 0; 13867 } else { 13868 /* 13869 * We know that one more packet has 13870 * left the pipe thus we can update 13871 * cwnd. 13872 */ 13873 cwnd = tcp->tcp_cwnd + mss; 13874 if (cwnd > tcp->tcp_cwnd_max) 13875 cwnd = tcp->tcp_cwnd_max; 13876 tcp->tcp_cwnd = cwnd; 13877 if (tcp->tcp_unsent > 0) 13878 flags |= TH_XMIT_NEEDED; 13879 } 13880 } 13881 } 13882 } else if (tcp->tcp_zero_win_probe) { 13883 /* 13884 * If the window has opened, need to arrange 13885 * to send additional data. 13886 */ 13887 if (new_swnd != 0) { 13888 /* tcp_suna != tcp_snxt */ 13889 /* Packet contains a window update */ 13890 BUMP_MIB(&tcp_mib, tcpInWinUpdate); 13891 tcp->tcp_zero_win_probe = 0; 13892 tcp->tcp_timer_backoff = 0; 13893 tcp->tcp_ms_we_have_waited = 0; 13894 13895 /* 13896 * Transmit starting with tcp_suna since 13897 * the one byte probe is not ack'ed. 13898 * If TCP has sent more than one identical 13899 * probe, tcp_rexmit will be set. That means 13900 * tcp_ss_rexmit() will send out the one 13901 * byte along with new data. Otherwise, 13902 * fake the retransmission. 13903 */ 13904 flags |= TH_XMIT_NEEDED; 13905 if (!tcp->tcp_rexmit) { 13906 tcp->tcp_rexmit = B_TRUE; 13907 tcp->tcp_dupack_cnt = 0; 13908 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 13909 tcp->tcp_rexmit_max = tcp->tcp_suna + 1; 13910 } 13911 } 13912 } 13913 goto swnd_update; 13914 } 13915 13916 /* 13917 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73. 13918 * If the ACK value acks something that we have not yet sent, it might 13919 * be an old duplicate segment. Send an ACK to re-synchronize the 13920 * other side. 13921 * Note: reset in response to unacceptable ACK in SYN_RECEIVE 13922 * state is handled above, so we can always just drop the segment and 13923 * send an ACK here. 13924 * 13925 * Should we send ACKs in response to ACK only segments? 13926 */ 13927 if (SEQ_GT(seg_ack, tcp->tcp_snxt)) { 13928 BUMP_MIB(&tcp_mib, tcpInAckUnsent); 13929 /* drop the received segment */ 13930 freemsg(mp); 13931 13932 /* 13933 * Send back an ACK. If tcp_drop_ack_unsent_cnt is 13934 * greater than 0, check if the number of such 13935 * bogus ACks is greater than that count. If yes, 13936 * don't send back any ACK. This prevents TCP from 13937 * getting into an ACK storm if somehow an attacker 13938 * successfully spoofs an acceptable segment to our 13939 * peer. 13940 */ 13941 if (tcp_drop_ack_unsent_cnt > 0 && 13942 ++tcp->tcp_in_ack_unsent > tcp_drop_ack_unsent_cnt) { 13943 TCP_STAT(tcp_in_ack_unsent_drop); 13944 return; 13945 } 13946 mp = tcp_ack_mp(tcp); 13947 if (mp != NULL) { 13948 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 13949 BUMP_LOCAL(tcp->tcp_obsegs); 13950 BUMP_MIB(&tcp_mib, tcpOutAck); 13951 tcp_send_data(tcp, tcp->tcp_wq, mp); 13952 } 13953 return; 13954 } 13955 13956 /* 13957 * TCP gets a new ACK, update the notsack'ed list to delete those 13958 * blocks that are covered by this ACK. 13959 */ 13960 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 13961 tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack, 13962 &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list)); 13963 } 13964 13965 /* 13966 * If we got an ACK after fast retransmit, check to see 13967 * if it is a partial ACK. If it is not and the congestion 13968 * window was inflated to account for the other side's 13969 * cached packets, retract it. If it is, do Hoe's algorithm. 13970 */ 13971 if (tcp->tcp_dupack_cnt >= tcp_dupack_fast_retransmit) { 13972 ASSERT(tcp->tcp_rexmit == B_FALSE); 13973 if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) { 13974 tcp->tcp_dupack_cnt = 0; 13975 /* 13976 * Restore the orig tcp_cwnd_ssthresh after 13977 * fast retransmit phase. 13978 */ 13979 if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) { 13980 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh; 13981 } 13982 tcp->tcp_rexmit_max = seg_ack; 13983 tcp->tcp_cwnd_cnt = 0; 13984 tcp->tcp_snd_burst = tcp->tcp_localnet ? 13985 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 13986 13987 /* 13988 * Remove all notsack info to avoid confusion with 13989 * the next fast retrasnmit/recovery phase. 13990 */ 13991 if (tcp->tcp_snd_sack_ok && 13992 tcp->tcp_notsack_list != NULL) { 13993 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 13994 } 13995 } else { 13996 if (tcp->tcp_snd_sack_ok && 13997 tcp->tcp_notsack_list != NULL) { 13998 flags |= TH_NEED_SACK_REXMIT; 13999 tcp->tcp_pipe -= mss; 14000 if (tcp->tcp_pipe < 0) 14001 tcp->tcp_pipe = 0; 14002 } else { 14003 /* 14004 * Hoe's algorithm: 14005 * 14006 * Retransmit the unack'ed segment and 14007 * restart fast recovery. Note that we 14008 * need to scale back tcp_cwnd to the 14009 * original value when we started fast 14010 * recovery. This is to prevent overly 14011 * aggressive behaviour in sending new 14012 * segments. 14013 */ 14014 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh + 14015 tcp_dupack_fast_retransmit * mss; 14016 tcp->tcp_cwnd_cnt = tcp->tcp_cwnd; 14017 flags |= TH_REXMIT_NEEDED; 14018 } 14019 } 14020 } else { 14021 tcp->tcp_dupack_cnt = 0; 14022 if (tcp->tcp_rexmit) { 14023 /* 14024 * TCP is retranmitting. If the ACK ack's all 14025 * outstanding data, update tcp_rexmit_max and 14026 * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt 14027 * to the correct value. 14028 * 14029 * Note that SEQ_LEQ() is used. This is to avoid 14030 * unnecessary fast retransmit caused by dup ACKs 14031 * received when TCP does slow start retransmission 14032 * after a time out. During this phase, TCP may 14033 * send out segments which are already received. 14034 * This causes dup ACKs to be sent back. 14035 */ 14036 if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) { 14037 if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) { 14038 tcp->tcp_rexmit_nxt = seg_ack; 14039 } 14040 if (seg_ack != tcp->tcp_rexmit_max) { 14041 flags |= TH_XMIT_NEEDED; 14042 } 14043 } else { 14044 tcp->tcp_rexmit = B_FALSE; 14045 tcp->tcp_xmit_zc_clean = B_FALSE; 14046 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 14047 tcp->tcp_snd_burst = tcp->tcp_localnet ? 14048 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 14049 } 14050 tcp->tcp_ms_we_have_waited = 0; 14051 } 14052 } 14053 14054 BUMP_MIB(&tcp_mib, tcpInAckSegs); 14055 UPDATE_MIB(&tcp_mib, tcpInAckBytes, bytes_acked); 14056 tcp->tcp_suna = seg_ack; 14057 if (tcp->tcp_zero_win_probe != 0) { 14058 tcp->tcp_zero_win_probe = 0; 14059 tcp->tcp_timer_backoff = 0; 14060 } 14061 14062 /* 14063 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed. 14064 * Note that it cannot be the SYN being ack'ed. The code flow 14065 * will not reach here. 14066 */ 14067 if (mp1 == NULL) { 14068 goto fin_acked; 14069 } 14070 14071 /* 14072 * Update the congestion window. 14073 * 14074 * If TCP is not ECN capable or TCP is ECN capable but the 14075 * congestion experience bit is not set, increase the tcp_cwnd as 14076 * usual. 14077 */ 14078 if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) { 14079 cwnd = tcp->tcp_cwnd; 14080 add = mss; 14081 14082 if (cwnd >= tcp->tcp_cwnd_ssthresh) { 14083 /* 14084 * This is to prevent an increase of less than 1 MSS of 14085 * tcp_cwnd. With partial increase, tcp_wput_data() 14086 * may send out tinygrams in order to preserve mblk 14087 * boundaries. 14088 * 14089 * By initializing tcp_cwnd_cnt to new tcp_cwnd and 14090 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is 14091 * increased by 1 MSS for every RTTs. 14092 */ 14093 if (tcp->tcp_cwnd_cnt <= 0) { 14094 tcp->tcp_cwnd_cnt = cwnd + add; 14095 } else { 14096 tcp->tcp_cwnd_cnt -= add; 14097 add = 0; 14098 } 14099 } 14100 tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max); 14101 } 14102 14103 /* See if the latest urgent data has been acknowledged */ 14104 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && 14105 SEQ_GT(seg_ack, tcp->tcp_urg)) 14106 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 14107 14108 /* Can we update the RTT estimates? */ 14109 if (tcp->tcp_snd_ts_ok) { 14110 /* Ignore zero timestamp echo-reply. */ 14111 if (tcpopt.tcp_opt_ts_ecr != 0) { 14112 tcp_set_rto(tcp, (int32_t)lbolt - 14113 (int32_t)tcpopt.tcp_opt_ts_ecr); 14114 } 14115 14116 /* If needed, restart the timer. */ 14117 if (tcp->tcp_set_timer == 1) { 14118 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 14119 tcp->tcp_set_timer = 0; 14120 } 14121 /* 14122 * Update tcp_csuna in case the other side stops sending 14123 * us timestamps. 14124 */ 14125 tcp->tcp_csuna = tcp->tcp_snxt; 14126 } else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) { 14127 /* 14128 * An ACK sequence we haven't seen before, so get the RTT 14129 * and update the RTO. But first check if the timestamp is 14130 * valid to use. 14131 */ 14132 if ((mp1->b_next != NULL) && 14133 SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next))) 14134 tcp_set_rto(tcp, (int32_t)lbolt - 14135 (int32_t)(intptr_t)mp1->b_prev); 14136 else 14137 BUMP_MIB(&tcp_mib, tcpRttNoUpdate); 14138 14139 /* Remeber the last sequence to be ACKed */ 14140 tcp->tcp_csuna = seg_ack; 14141 if (tcp->tcp_set_timer == 1) { 14142 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 14143 tcp->tcp_set_timer = 0; 14144 } 14145 } else { 14146 BUMP_MIB(&tcp_mib, tcpRttNoUpdate); 14147 } 14148 14149 /* Eat acknowledged bytes off the xmit queue. */ 14150 for (;;) { 14151 mblk_t *mp2; 14152 uchar_t *wptr; 14153 14154 wptr = mp1->b_wptr; 14155 ASSERT((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX); 14156 bytes_acked -= (int)(wptr - mp1->b_rptr); 14157 if (bytes_acked < 0) { 14158 mp1->b_rptr = wptr + bytes_acked; 14159 /* 14160 * Set a new timestamp if all the bytes timed by the 14161 * old timestamp have been ack'ed. 14162 */ 14163 if (SEQ_GT(seg_ack, 14164 (uint32_t)(uintptr_t)(mp1->b_next))) { 14165 mp1->b_prev = (mblk_t *)(uintptr_t)lbolt; 14166 mp1->b_next = NULL; 14167 } 14168 break; 14169 } 14170 mp1->b_next = NULL; 14171 mp1->b_prev = NULL; 14172 mp2 = mp1; 14173 mp1 = mp1->b_cont; 14174 14175 /* 14176 * This notification is required for some zero-copy 14177 * clients to maintain a copy semantic. After the data 14178 * is ack'ed, client is safe to modify or reuse the buffer. 14179 */ 14180 if (tcp->tcp_snd_zcopy_aware && 14181 (mp2->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 14182 tcp_zcopy_notify(tcp); 14183 freeb(mp2); 14184 if (bytes_acked == 0) { 14185 if (mp1 == NULL) { 14186 /* Everything is ack'ed, clear the tail. */ 14187 tcp->tcp_xmit_tail = NULL; 14188 /* 14189 * Cancel the timer unless we are still 14190 * waiting for an ACK for the FIN packet. 14191 */ 14192 if (tcp->tcp_timer_tid != 0 && 14193 tcp->tcp_snxt == tcp->tcp_suna) { 14194 (void) TCP_TIMER_CANCEL(tcp, 14195 tcp->tcp_timer_tid); 14196 tcp->tcp_timer_tid = 0; 14197 } 14198 goto pre_swnd_update; 14199 } 14200 if (mp2 != tcp->tcp_xmit_tail) 14201 break; 14202 tcp->tcp_xmit_tail = mp1; 14203 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 14204 (uintptr_t)INT_MAX); 14205 tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr - 14206 mp1->b_rptr); 14207 break; 14208 } 14209 if (mp1 == NULL) { 14210 /* 14211 * More was acked but there is nothing more 14212 * outstanding. This means that the FIN was 14213 * just acked or that we're talking to a clown. 14214 */ 14215 fin_acked: 14216 ASSERT(tcp->tcp_fin_sent); 14217 tcp->tcp_xmit_tail = NULL; 14218 if (tcp->tcp_fin_sent) { 14219 /* FIN was acked - making progress */ 14220 if (tcp->tcp_ipversion == IPV6_VERSION && 14221 !tcp->tcp_fin_acked) 14222 tcp->tcp_ip_forward_progress = B_TRUE; 14223 tcp->tcp_fin_acked = B_TRUE; 14224 if (tcp->tcp_linger_tid != 0 && 14225 TCP_TIMER_CANCEL(tcp, 14226 tcp->tcp_linger_tid) >= 0) { 14227 tcp_stop_lingering(tcp); 14228 } 14229 } else { 14230 /* 14231 * We should never get here because 14232 * we have already checked that the 14233 * number of bytes ack'ed should be 14234 * smaller than or equal to what we 14235 * have sent so far (it is the 14236 * acceptability check of the ACK). 14237 * We can only get here if the send 14238 * queue is corrupted. 14239 * 14240 * Terminate the connection and 14241 * panic the system. It is better 14242 * for us to panic instead of 14243 * continuing to avoid other disaster. 14244 */ 14245 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 14246 tcp->tcp_rnxt, TH_RST|TH_ACK); 14247 panic("Memory corruption " 14248 "detected for connection %s.", 14249 tcp_display(tcp, NULL, 14250 DISP_ADDR_AND_PORT)); 14251 /*NOTREACHED*/ 14252 } 14253 goto pre_swnd_update; 14254 } 14255 ASSERT(mp2 != tcp->tcp_xmit_tail); 14256 } 14257 if (tcp->tcp_unsent) { 14258 flags |= TH_XMIT_NEEDED; 14259 } 14260 pre_swnd_update: 14261 tcp->tcp_xmit_head = mp1; 14262 swnd_update: 14263 /* 14264 * The following check is different from most other implementations. 14265 * For bi-directional transfer, when segments are dropped, the 14266 * "normal" check will not accept a window update in those 14267 * retransmitted segemnts. Failing to do that, TCP may send out 14268 * segments which are outside receiver's window. As TCP accepts 14269 * the ack in those retransmitted segments, if the window update in 14270 * the same segment is not accepted, TCP will incorrectly calculates 14271 * that it can send more segments. This can create a deadlock 14272 * with the receiver if its window becomes zero. 14273 */ 14274 if (SEQ_LT(tcp->tcp_swl2, seg_ack) || 14275 SEQ_LT(tcp->tcp_swl1, seg_seq) || 14276 (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) { 14277 /* 14278 * The criteria for update is: 14279 * 14280 * 1. the segment acknowledges some data. Or 14281 * 2. the segment is new, i.e. it has a higher seq num. Or 14282 * 3. the segment is not old and the advertised window is 14283 * larger than the previous advertised window. 14284 */ 14285 if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd) 14286 flags |= TH_XMIT_NEEDED; 14287 tcp->tcp_swnd = new_swnd; 14288 if (new_swnd > tcp->tcp_max_swnd) 14289 tcp->tcp_max_swnd = new_swnd; 14290 tcp->tcp_swl1 = seg_seq; 14291 tcp->tcp_swl2 = seg_ack; 14292 } 14293 est: 14294 if (tcp->tcp_state > TCPS_ESTABLISHED) { 14295 14296 switch (tcp->tcp_state) { 14297 case TCPS_FIN_WAIT_1: 14298 if (tcp->tcp_fin_acked) { 14299 tcp->tcp_state = TCPS_FIN_WAIT_2; 14300 /* 14301 * We implement the non-standard BSD/SunOS 14302 * FIN_WAIT_2 flushing algorithm. 14303 * If there is no user attached to this 14304 * TCP endpoint, then this TCP struct 14305 * could hang around forever in FIN_WAIT_2 14306 * state if the peer forgets to send us 14307 * a FIN. To prevent this, we wait only 14308 * 2*MSL (a convenient time value) for 14309 * the FIN to arrive. If it doesn't show up, 14310 * we flush the TCP endpoint. This algorithm, 14311 * though a violation of RFC-793, has worked 14312 * for over 10 years in BSD systems. 14313 * Note: SunOS 4.x waits 675 seconds before 14314 * flushing the FIN_WAIT_2 connection. 14315 */ 14316 TCP_TIMER_RESTART(tcp, 14317 tcp_fin_wait_2_flush_interval); 14318 } 14319 break; 14320 case TCPS_FIN_WAIT_2: 14321 break; /* Shutdown hook? */ 14322 case TCPS_LAST_ACK: 14323 freemsg(mp); 14324 if (tcp->tcp_fin_acked) { 14325 (void) tcp_clean_death(tcp, 0, 19); 14326 return; 14327 } 14328 goto xmit_check; 14329 case TCPS_CLOSING: 14330 if (tcp->tcp_fin_acked) { 14331 tcp->tcp_state = TCPS_TIME_WAIT; 14332 if (!TCP_IS_DETACHED(tcp)) { 14333 TCP_TIMER_RESTART(tcp, 14334 tcp_time_wait_interval); 14335 } else { 14336 tcp_time_wait_append(tcp); 14337 TCP_DBGSTAT(tcp_rput_time_wait); 14338 } 14339 } 14340 /*FALLTHRU*/ 14341 case TCPS_CLOSE_WAIT: 14342 freemsg(mp); 14343 goto xmit_check; 14344 default: 14345 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 14346 break; 14347 } 14348 } 14349 if (flags & TH_FIN) { 14350 /* Make sure we ack the fin */ 14351 flags |= TH_ACK_NEEDED; 14352 if (!tcp->tcp_fin_rcvd) { 14353 tcp->tcp_fin_rcvd = B_TRUE; 14354 tcp->tcp_rnxt++; 14355 tcph = tcp->tcp_tcph; 14356 U32_TO_ABE32(tcp->tcp_rnxt, tcph->th_ack); 14357 14358 /* 14359 * Generate the ordrel_ind at the end unless we 14360 * are an eager guy. 14361 * In the eager case tcp_rsrv will do this when run 14362 * after tcp_accept is done. 14363 */ 14364 if (tcp->tcp_listener == NULL && 14365 !TCP_IS_DETACHED(tcp) && (!tcp->tcp_hard_binding)) 14366 flags |= TH_ORDREL_NEEDED; 14367 switch (tcp->tcp_state) { 14368 case TCPS_SYN_RCVD: 14369 case TCPS_ESTABLISHED: 14370 tcp->tcp_state = TCPS_CLOSE_WAIT; 14371 /* Keepalive? */ 14372 break; 14373 case TCPS_FIN_WAIT_1: 14374 if (!tcp->tcp_fin_acked) { 14375 tcp->tcp_state = TCPS_CLOSING; 14376 break; 14377 } 14378 /* FALLTHRU */ 14379 case TCPS_FIN_WAIT_2: 14380 tcp->tcp_state = TCPS_TIME_WAIT; 14381 if (!TCP_IS_DETACHED(tcp)) { 14382 TCP_TIMER_RESTART(tcp, 14383 tcp_time_wait_interval); 14384 } else { 14385 tcp_time_wait_append(tcp); 14386 TCP_DBGSTAT(tcp_rput_time_wait); 14387 } 14388 if (seg_len) { 14389 /* 14390 * implies data piggybacked on FIN. 14391 * break to handle data. 14392 */ 14393 break; 14394 } 14395 freemsg(mp); 14396 goto ack_check; 14397 } 14398 } 14399 } 14400 if (mp == NULL) 14401 goto xmit_check; 14402 if (seg_len == 0) { 14403 freemsg(mp); 14404 goto xmit_check; 14405 } 14406 if (mp->b_rptr == mp->b_wptr) { 14407 /* 14408 * The header has been consumed, so we remove the 14409 * zero-length mblk here. 14410 */ 14411 mp1 = mp; 14412 mp = mp->b_cont; 14413 freeb(mp1); 14414 } 14415 tcph = tcp->tcp_tcph; 14416 tcp->tcp_rack_cnt++; 14417 { 14418 uint32_t cur_max; 14419 14420 cur_max = tcp->tcp_rack_cur_max; 14421 if (tcp->tcp_rack_cnt >= cur_max) { 14422 /* 14423 * We have more unacked data than we should - send 14424 * an ACK now. 14425 */ 14426 flags |= TH_ACK_NEEDED; 14427 cur_max++; 14428 if (cur_max > tcp->tcp_rack_abs_max) 14429 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 14430 else 14431 tcp->tcp_rack_cur_max = cur_max; 14432 } else if (TCP_IS_DETACHED(tcp)) { 14433 /* We don't have an ACK timer for detached TCP. */ 14434 flags |= TH_ACK_NEEDED; 14435 } else if (seg_len < mss) { 14436 /* 14437 * If we get a segment that is less than an mss, and we 14438 * already have unacknowledged data, and the amount 14439 * unacknowledged is not a multiple of mss, then we 14440 * better generate an ACK now. Otherwise, this may be 14441 * the tail piece of a transaction, and we would rather 14442 * wait for the response. 14443 */ 14444 uint32_t udif; 14445 ASSERT((uintptr_t)(tcp->tcp_rnxt - tcp->tcp_rack) <= 14446 (uintptr_t)INT_MAX); 14447 udif = (int)(tcp->tcp_rnxt - tcp->tcp_rack); 14448 if (udif && (udif % mss)) 14449 flags |= TH_ACK_NEEDED; 14450 else 14451 flags |= TH_ACK_TIMER_NEEDED; 14452 } else { 14453 /* Start delayed ack timer */ 14454 flags |= TH_ACK_TIMER_NEEDED; 14455 } 14456 } 14457 tcp->tcp_rnxt += seg_len; 14458 U32_TO_ABE32(tcp->tcp_rnxt, tcph->th_ack); 14459 14460 /* Update SACK list */ 14461 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 14462 tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt, 14463 &(tcp->tcp_num_sack_blk)); 14464 } 14465 14466 if (tcp->tcp_urp_mp) { 14467 tcp->tcp_urp_mp->b_cont = mp; 14468 mp = tcp->tcp_urp_mp; 14469 tcp->tcp_urp_mp = NULL; 14470 /* Ready for a new signal. */ 14471 tcp->tcp_urp_last_valid = B_FALSE; 14472 #ifdef DEBUG 14473 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 14474 "tcp_rput: sending exdata_ind %s", 14475 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 14476 #endif /* DEBUG */ 14477 } 14478 14479 /* 14480 * Check for ancillary data changes compared to last segment. 14481 */ 14482 if (tcp->tcp_ipv6_recvancillary != 0) { 14483 mp = tcp_rput_add_ancillary(tcp, mp, &ipp); 14484 if (mp == NULL) 14485 return; 14486 } 14487 14488 if (tcp->tcp_listener || tcp->tcp_hard_binding) { 14489 /* 14490 * Side queue inbound data until the accept happens. 14491 * tcp_accept/tcp_rput drains this when the accept happens. 14492 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or 14493 * T_EXDATA_IND) it is queued on b_next. 14494 * XXX Make urgent data use this. Requires: 14495 * Removing tcp_listener check for TH_URG 14496 * Making M_PCPROTO and MARK messages skip the eager case 14497 */ 14498 14499 if (tcp->tcp_kssl_pending) { 14500 tcp_kssl_input(tcp, mp); 14501 } else { 14502 tcp_rcv_enqueue(tcp, mp, seg_len); 14503 } 14504 } else { 14505 if (mp->b_datap->db_type != M_DATA || 14506 (flags & TH_MARKNEXT_NEEDED)) { 14507 if (tcp->tcp_rcv_list != NULL) { 14508 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 14509 } 14510 ASSERT(tcp->tcp_rcv_list == NULL || 14511 tcp->tcp_fused_sigurg); 14512 if (flags & TH_MARKNEXT_NEEDED) { 14513 #ifdef DEBUG 14514 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 14515 "tcp_rput: sending MSGMARKNEXT %s", 14516 tcp_display(tcp, NULL, 14517 DISP_PORT_ONLY)); 14518 #endif /* DEBUG */ 14519 mp->b_flag |= MSGMARKNEXT; 14520 flags &= ~TH_MARKNEXT_NEEDED; 14521 } 14522 14523 /* Does this need SSL processing first? */ 14524 if ((tcp->tcp_kssl_ctx != NULL) && 14525 (DB_TYPE(mp) == M_DATA)) { 14526 tcp_kssl_input(tcp, mp); 14527 } else { 14528 putnext(tcp->tcp_rq, mp); 14529 if (!canputnext(tcp->tcp_rq)) 14530 tcp->tcp_rwnd -= seg_len; 14531 } 14532 } else if ((flags & (TH_PUSH|TH_FIN)) || 14533 tcp->tcp_rcv_cnt + seg_len >= tcp->tcp_rq->q_hiwat >> 3) { 14534 if (tcp->tcp_rcv_list != NULL) { 14535 /* 14536 * Enqueue the new segment first and then 14537 * call tcp_rcv_drain() to send all data 14538 * up. The other way to do this is to 14539 * send all queued data up and then call 14540 * putnext() to send the new segment up. 14541 * This way can remove the else part later 14542 * on. 14543 * 14544 * We don't this to avoid one more call to 14545 * canputnext() as tcp_rcv_drain() needs to 14546 * call canputnext(). 14547 */ 14548 tcp_rcv_enqueue(tcp, mp, seg_len); 14549 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 14550 } else { 14551 /* Does this need SSL processing first? */ 14552 if ((tcp->tcp_kssl_ctx != NULL) && 14553 (DB_TYPE(mp) == M_DATA)) { 14554 tcp_kssl_input(tcp, mp); 14555 } else { 14556 putnext(tcp->tcp_rq, mp); 14557 if (!canputnext(tcp->tcp_rq)) 14558 tcp->tcp_rwnd -= seg_len; 14559 } 14560 } 14561 } else { 14562 /* 14563 * Enqueue all packets when processing an mblk 14564 * from the co queue and also enqueue normal packets. 14565 */ 14566 tcp_rcv_enqueue(tcp, mp, seg_len); 14567 } 14568 /* 14569 * Make sure the timer is running if we have data waiting 14570 * for a push bit. This provides resiliency against 14571 * implementations that do not correctly generate push bits. 14572 */ 14573 if (tcp->tcp_rcv_list != NULL && tcp->tcp_push_tid == 0) { 14574 /* 14575 * The connection may be closed at this point, so don't 14576 * do anything for a detached tcp. 14577 */ 14578 if (!TCP_IS_DETACHED(tcp)) 14579 tcp->tcp_push_tid = TCP_TIMER(tcp, 14580 tcp_push_timer, 14581 MSEC_TO_TICK(tcp_push_timer_interval)); 14582 } 14583 } 14584 xmit_check: 14585 /* Is there anything left to do? */ 14586 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 14587 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED| 14588 TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_ACK_TIMER_NEEDED| 14589 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 14590 goto done; 14591 14592 /* Any transmit work to do and a non-zero window? */ 14593 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT| 14594 TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) { 14595 if (flags & TH_REXMIT_NEEDED) { 14596 uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna; 14597 14598 BUMP_MIB(&tcp_mib, tcpOutFastRetrans); 14599 if (snd_size > mss) 14600 snd_size = mss; 14601 if (snd_size > tcp->tcp_swnd) 14602 snd_size = tcp->tcp_swnd; 14603 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size, 14604 NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size, 14605 B_TRUE); 14606 14607 if (mp1 != NULL) { 14608 tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt; 14609 tcp->tcp_csuna = tcp->tcp_snxt; 14610 BUMP_MIB(&tcp_mib, tcpRetransSegs); 14611 UPDATE_MIB(&tcp_mib, tcpRetransBytes, snd_size); 14612 TCP_RECORD_TRACE(tcp, mp1, 14613 TCP_TRACE_SEND_PKT); 14614 tcp_send_data(tcp, tcp->tcp_wq, mp1); 14615 } 14616 } 14617 if (flags & TH_NEED_SACK_REXMIT) { 14618 tcp_sack_rxmit(tcp, &flags); 14619 } 14620 /* 14621 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send 14622 * out new segment. Note that tcp_rexmit should not be 14623 * set, otherwise TH_LIMIT_XMIT should not be set. 14624 */ 14625 if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) { 14626 if (!tcp->tcp_rexmit) { 14627 tcp_wput_data(tcp, NULL, B_FALSE); 14628 } else { 14629 tcp_ss_rexmit(tcp); 14630 } 14631 } 14632 /* 14633 * Adjust tcp_cwnd back to normal value after sending 14634 * new data segments. 14635 */ 14636 if (flags & TH_LIMIT_XMIT) { 14637 tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1); 14638 /* 14639 * This will restart the timer. Restarting the 14640 * timer is used to avoid a timeout before the 14641 * limited transmitted segment's ACK gets back. 14642 */ 14643 if (tcp->tcp_xmit_head != NULL) 14644 tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt; 14645 } 14646 14647 /* Anything more to do? */ 14648 if ((flags & (TH_ACK_NEEDED|TH_ACK_TIMER_NEEDED| 14649 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 14650 goto done; 14651 } 14652 ack_check: 14653 if (flags & TH_SEND_URP_MARK) { 14654 ASSERT(tcp->tcp_urp_mark_mp); 14655 /* 14656 * Send up any queued data and then send the mark message 14657 */ 14658 if (tcp->tcp_rcv_list != NULL) { 14659 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 14660 } 14661 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 14662 14663 mp1 = tcp->tcp_urp_mark_mp; 14664 tcp->tcp_urp_mark_mp = NULL; 14665 #ifdef DEBUG 14666 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 14667 "tcp_rput: sending zero-length %s %s", 14668 ((mp1->b_flag & MSGMARKNEXT) ? "MSGMARKNEXT" : 14669 "MSGNOTMARKNEXT"), 14670 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 14671 #endif /* DEBUG */ 14672 putnext(tcp->tcp_rq, mp1); 14673 flags &= ~TH_SEND_URP_MARK; 14674 } 14675 if (flags & TH_ACK_NEEDED) { 14676 /* 14677 * Time to send an ack for some reason. 14678 */ 14679 mp1 = tcp_ack_mp(tcp); 14680 14681 if (mp1 != NULL) { 14682 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 14683 tcp_send_data(tcp, tcp->tcp_wq, mp1); 14684 BUMP_LOCAL(tcp->tcp_obsegs); 14685 BUMP_MIB(&tcp_mib, tcpOutAck); 14686 } 14687 if (tcp->tcp_ack_tid != 0) { 14688 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 14689 tcp->tcp_ack_tid = 0; 14690 } 14691 } 14692 if (flags & TH_ACK_TIMER_NEEDED) { 14693 /* 14694 * Arrange for deferred ACK or push wait timeout. 14695 * Start timer if it is not already running. 14696 */ 14697 if (tcp->tcp_ack_tid == 0) { 14698 tcp->tcp_ack_tid = TCP_TIMER(tcp, tcp_ack_timer, 14699 MSEC_TO_TICK(tcp->tcp_localnet ? 14700 (clock_t)tcp_local_dack_interval : 14701 (clock_t)tcp_deferred_ack_interval)); 14702 } 14703 } 14704 if (flags & TH_ORDREL_NEEDED) { 14705 /* 14706 * Send up the ordrel_ind unless we are an eager guy. 14707 * In the eager case tcp_rsrv will do this when run 14708 * after tcp_accept is done. 14709 */ 14710 ASSERT(tcp->tcp_listener == NULL); 14711 if (tcp->tcp_rcv_list != NULL) { 14712 /* 14713 * Push any mblk(s) enqueued from co processing. 14714 */ 14715 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 14716 } 14717 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 14718 if ((mp1 = mi_tpi_ordrel_ind()) != NULL) { 14719 tcp->tcp_ordrel_done = B_TRUE; 14720 putnext(tcp->tcp_rq, mp1); 14721 if (tcp->tcp_deferred_clean_death) { 14722 /* 14723 * tcp_clean_death was deferred 14724 * for T_ORDREL_IND - do it now 14725 */ 14726 (void) tcp_clean_death(tcp, 14727 tcp->tcp_client_errno, 20); 14728 tcp->tcp_deferred_clean_death = B_FALSE; 14729 } 14730 } else { 14731 /* 14732 * Run the orderly release in the 14733 * service routine. 14734 */ 14735 qenable(tcp->tcp_rq); 14736 /* 14737 * Caveat(XXX): The machine may be so 14738 * overloaded that tcp_rsrv() is not scheduled 14739 * until after the endpoint has transitioned 14740 * to TCPS_TIME_WAIT 14741 * and tcp_time_wait_interval expires. Then 14742 * tcp_timer() will blow away state in tcp_t 14743 * and T_ORDREL_IND will never be delivered 14744 * upstream. Unlikely but potentially 14745 * a problem. 14746 */ 14747 } 14748 } 14749 done: 14750 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 14751 } 14752 14753 /* 14754 * This function does PAWS protection check. Returns B_TRUE if the 14755 * segment passes the PAWS test, else returns B_FALSE. 14756 */ 14757 boolean_t 14758 tcp_paws_check(tcp_t *tcp, tcph_t *tcph, tcp_opt_t *tcpoptp) 14759 { 14760 uint8_t flags; 14761 int options; 14762 uint8_t *up; 14763 14764 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 14765 /* 14766 * If timestamp option is aligned nicely, get values inline, 14767 * otherwise call general routine to parse. Only do that 14768 * if timestamp is the only option. 14769 */ 14770 if (TCP_HDR_LENGTH(tcph) == (uint32_t)TCP_MIN_HEADER_LENGTH + 14771 TCPOPT_REAL_TS_LEN && 14772 OK_32PTR((up = ((uint8_t *)tcph) + 14773 TCP_MIN_HEADER_LENGTH)) && 14774 *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) { 14775 tcpoptp->tcp_opt_ts_val = ABE32_TO_U32((up+4)); 14776 tcpoptp->tcp_opt_ts_ecr = ABE32_TO_U32((up+8)); 14777 14778 options = TCP_OPT_TSTAMP_PRESENT; 14779 } else { 14780 if (tcp->tcp_snd_sack_ok) { 14781 tcpoptp->tcp = tcp; 14782 } else { 14783 tcpoptp->tcp = NULL; 14784 } 14785 options = tcp_parse_options(tcph, tcpoptp); 14786 } 14787 14788 if (options & TCP_OPT_TSTAMP_PRESENT) { 14789 /* 14790 * Do PAWS per RFC 1323 section 4.2. Accept RST 14791 * regardless of the timestamp, page 18 RFC 1323.bis. 14792 */ 14793 if ((flags & TH_RST) == 0 && 14794 TSTMP_LT(tcpoptp->tcp_opt_ts_val, 14795 tcp->tcp_ts_recent)) { 14796 if (TSTMP_LT(lbolt64, tcp->tcp_last_rcv_lbolt + 14797 PAWS_TIMEOUT)) { 14798 /* This segment is not acceptable. */ 14799 return (B_FALSE); 14800 } else { 14801 /* 14802 * Connection has been idle for 14803 * too long. Reset the timestamp 14804 * and assume the segment is valid. 14805 */ 14806 tcp->tcp_ts_recent = 14807 tcpoptp->tcp_opt_ts_val; 14808 } 14809 } 14810 } else { 14811 /* 14812 * If we don't get a timestamp on every packet, we 14813 * figure we can't really trust 'em, so we stop sending 14814 * and parsing them. 14815 */ 14816 tcp->tcp_snd_ts_ok = B_FALSE; 14817 14818 tcp->tcp_hdr_len -= TCPOPT_REAL_TS_LEN; 14819 tcp->tcp_tcp_hdr_len -= TCPOPT_REAL_TS_LEN; 14820 tcp->tcp_tcph->th_offset_and_rsrvd[0] -= (3 << 4); 14821 tcp_mss_set(tcp, tcp->tcp_mss + TCPOPT_REAL_TS_LEN); 14822 if (tcp->tcp_snd_sack_ok) { 14823 ASSERT(tcp->tcp_sack_info != NULL); 14824 tcp->tcp_max_sack_blk = 4; 14825 } 14826 } 14827 return (B_TRUE); 14828 } 14829 14830 /* 14831 * Attach ancillary data to a received TCP segments for the 14832 * ancillary pieces requested by the application that are 14833 * different than they were in the previous data segment. 14834 * 14835 * Save the "current" values once memory allocation is ok so that 14836 * when memory allocation fails we can just wait for the next data segment. 14837 */ 14838 static mblk_t * 14839 tcp_rput_add_ancillary(tcp_t *tcp, mblk_t *mp, ip6_pkt_t *ipp) 14840 { 14841 struct T_optdata_ind *todi; 14842 int optlen; 14843 uchar_t *optptr; 14844 struct T_opthdr *toh; 14845 uint_t addflag; /* Which pieces to add */ 14846 mblk_t *mp1; 14847 14848 optlen = 0; 14849 addflag = 0; 14850 /* If app asked for pktinfo and the index has changed ... */ 14851 if ((ipp->ipp_fields & IPPF_IFINDEX) && 14852 ipp->ipp_ifindex != tcp->tcp_recvifindex && 14853 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO)) { 14854 optlen += sizeof (struct T_opthdr) + 14855 sizeof (struct in6_pktinfo); 14856 addflag |= TCP_IPV6_RECVPKTINFO; 14857 } 14858 /* If app asked for hoplimit and it has changed ... */ 14859 if ((ipp->ipp_fields & IPPF_HOPLIMIT) && 14860 ipp->ipp_hoplimit != tcp->tcp_recvhops && 14861 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPLIMIT)) { 14862 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 14863 addflag |= TCP_IPV6_RECVHOPLIMIT; 14864 } 14865 /* If app asked for tclass and it has changed ... */ 14866 if ((ipp->ipp_fields & IPPF_TCLASS) && 14867 ipp->ipp_tclass != tcp->tcp_recvtclass && 14868 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVTCLASS)) { 14869 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 14870 addflag |= TCP_IPV6_RECVTCLASS; 14871 } 14872 /* 14873 * If app asked for hopbyhop headers and it has changed ... 14874 * For security labels, note that (1) security labels can't change on 14875 * a connected socket at all, (2) we're connected to at most one peer, 14876 * (3) if anything changes, then it must be some other extra option. 14877 */ 14878 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPOPTS) && 14879 ip_cmpbuf(tcp->tcp_hopopts, tcp->tcp_hopoptslen, 14880 (ipp->ipp_fields & IPPF_HOPOPTS), 14881 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) { 14882 optlen += sizeof (struct T_opthdr) + ipp->ipp_hopoptslen - 14883 tcp->tcp_label_len; 14884 addflag |= TCP_IPV6_RECVHOPOPTS; 14885 if (!ip_allocbuf((void **)&tcp->tcp_hopopts, 14886 &tcp->tcp_hopoptslen, (ipp->ipp_fields & IPPF_HOPOPTS), 14887 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) 14888 return (mp); 14889 } 14890 /* If app asked for dst headers before routing headers ... */ 14891 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTDSTOPTS) && 14892 ip_cmpbuf(tcp->tcp_rtdstopts, tcp->tcp_rtdstoptslen, 14893 (ipp->ipp_fields & IPPF_RTDSTOPTS), 14894 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen)) { 14895 optlen += sizeof (struct T_opthdr) + 14896 ipp->ipp_rtdstoptslen; 14897 addflag |= TCP_IPV6_RECVRTDSTOPTS; 14898 if (!ip_allocbuf((void **)&tcp->tcp_rtdstopts, 14899 &tcp->tcp_rtdstoptslen, (ipp->ipp_fields & IPPF_RTDSTOPTS), 14900 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen)) 14901 return (mp); 14902 } 14903 /* If app asked for routing headers and it has changed ... */ 14904 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTHDR) && 14905 ip_cmpbuf(tcp->tcp_rthdr, tcp->tcp_rthdrlen, 14906 (ipp->ipp_fields & IPPF_RTHDR), 14907 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) { 14908 optlen += sizeof (struct T_opthdr) + ipp->ipp_rthdrlen; 14909 addflag |= TCP_IPV6_RECVRTHDR; 14910 if (!ip_allocbuf((void **)&tcp->tcp_rthdr, 14911 &tcp->tcp_rthdrlen, (ipp->ipp_fields & IPPF_RTHDR), 14912 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) 14913 return (mp); 14914 } 14915 /* If app asked for dest headers and it has changed ... */ 14916 if ((tcp->tcp_ipv6_recvancillary & 14917 (TCP_IPV6_RECVDSTOPTS | TCP_OLD_IPV6_RECVDSTOPTS)) && 14918 ip_cmpbuf(tcp->tcp_dstopts, tcp->tcp_dstoptslen, 14919 (ipp->ipp_fields & IPPF_DSTOPTS), 14920 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) { 14921 optlen += sizeof (struct T_opthdr) + ipp->ipp_dstoptslen; 14922 addflag |= TCP_IPV6_RECVDSTOPTS; 14923 if (!ip_allocbuf((void **)&tcp->tcp_dstopts, 14924 &tcp->tcp_dstoptslen, (ipp->ipp_fields & IPPF_DSTOPTS), 14925 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) 14926 return (mp); 14927 } 14928 14929 if (optlen == 0) { 14930 /* Nothing to add */ 14931 return (mp); 14932 } 14933 mp1 = allocb(sizeof (struct T_optdata_ind) + optlen, BPRI_MED); 14934 if (mp1 == NULL) { 14935 /* 14936 * Defer sending ancillary data until the next TCP segment 14937 * arrives. 14938 */ 14939 return (mp); 14940 } 14941 mp1->b_cont = mp; 14942 mp = mp1; 14943 mp->b_wptr += sizeof (*todi) + optlen; 14944 mp->b_datap->db_type = M_PROTO; 14945 todi = (struct T_optdata_ind *)mp->b_rptr; 14946 todi->PRIM_type = T_OPTDATA_IND; 14947 todi->DATA_flag = 1; /* MORE data */ 14948 todi->OPT_length = optlen; 14949 todi->OPT_offset = sizeof (*todi); 14950 optptr = (uchar_t *)&todi[1]; 14951 /* 14952 * If app asked for pktinfo and the index has changed ... 14953 * Note that the local address never changes for the connection. 14954 */ 14955 if (addflag & TCP_IPV6_RECVPKTINFO) { 14956 struct in6_pktinfo *pkti; 14957 14958 toh = (struct T_opthdr *)optptr; 14959 toh->level = IPPROTO_IPV6; 14960 toh->name = IPV6_PKTINFO; 14961 toh->len = sizeof (*toh) + sizeof (*pkti); 14962 toh->status = 0; 14963 optptr += sizeof (*toh); 14964 pkti = (struct in6_pktinfo *)optptr; 14965 if (tcp->tcp_ipversion == IPV6_VERSION) 14966 pkti->ipi6_addr = tcp->tcp_ip6h->ip6_src; 14967 else 14968 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 14969 &pkti->ipi6_addr); 14970 pkti->ipi6_ifindex = ipp->ipp_ifindex; 14971 optptr += sizeof (*pkti); 14972 ASSERT(OK_32PTR(optptr)); 14973 /* Save as "last" value */ 14974 tcp->tcp_recvifindex = ipp->ipp_ifindex; 14975 } 14976 /* If app asked for hoplimit and it has changed ... */ 14977 if (addflag & TCP_IPV6_RECVHOPLIMIT) { 14978 toh = (struct T_opthdr *)optptr; 14979 toh->level = IPPROTO_IPV6; 14980 toh->name = IPV6_HOPLIMIT; 14981 toh->len = sizeof (*toh) + sizeof (uint_t); 14982 toh->status = 0; 14983 optptr += sizeof (*toh); 14984 *(uint_t *)optptr = ipp->ipp_hoplimit; 14985 optptr += sizeof (uint_t); 14986 ASSERT(OK_32PTR(optptr)); 14987 /* Save as "last" value */ 14988 tcp->tcp_recvhops = ipp->ipp_hoplimit; 14989 } 14990 /* If app asked for tclass and it has changed ... */ 14991 if (addflag & TCP_IPV6_RECVTCLASS) { 14992 toh = (struct T_opthdr *)optptr; 14993 toh->level = IPPROTO_IPV6; 14994 toh->name = IPV6_TCLASS; 14995 toh->len = sizeof (*toh) + sizeof (uint_t); 14996 toh->status = 0; 14997 optptr += sizeof (*toh); 14998 *(uint_t *)optptr = ipp->ipp_tclass; 14999 optptr += sizeof (uint_t); 15000 ASSERT(OK_32PTR(optptr)); 15001 /* Save as "last" value */ 15002 tcp->tcp_recvtclass = ipp->ipp_tclass; 15003 } 15004 if (addflag & TCP_IPV6_RECVHOPOPTS) { 15005 toh = (struct T_opthdr *)optptr; 15006 toh->level = IPPROTO_IPV6; 15007 toh->name = IPV6_HOPOPTS; 15008 toh->len = sizeof (*toh) + ipp->ipp_hopoptslen - 15009 tcp->tcp_label_len; 15010 toh->status = 0; 15011 optptr += sizeof (*toh); 15012 bcopy((uchar_t *)ipp->ipp_hopopts + tcp->tcp_label_len, optptr, 15013 ipp->ipp_hopoptslen - tcp->tcp_label_len); 15014 optptr += ipp->ipp_hopoptslen - tcp->tcp_label_len; 15015 ASSERT(OK_32PTR(optptr)); 15016 /* Save as last value */ 15017 ip_savebuf((void **)&tcp->tcp_hopopts, &tcp->tcp_hopoptslen, 15018 (ipp->ipp_fields & IPPF_HOPOPTS), 15019 ipp->ipp_hopopts, ipp->ipp_hopoptslen); 15020 } 15021 if (addflag & TCP_IPV6_RECVRTDSTOPTS) { 15022 toh = (struct T_opthdr *)optptr; 15023 toh->level = IPPROTO_IPV6; 15024 toh->name = IPV6_RTHDRDSTOPTS; 15025 toh->len = sizeof (*toh) + ipp->ipp_rtdstoptslen; 15026 toh->status = 0; 15027 optptr += sizeof (*toh); 15028 bcopy(ipp->ipp_rtdstopts, optptr, ipp->ipp_rtdstoptslen); 15029 optptr += ipp->ipp_rtdstoptslen; 15030 ASSERT(OK_32PTR(optptr)); 15031 /* Save as last value */ 15032 ip_savebuf((void **)&tcp->tcp_rtdstopts, 15033 &tcp->tcp_rtdstoptslen, 15034 (ipp->ipp_fields & IPPF_RTDSTOPTS), 15035 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen); 15036 } 15037 if (addflag & TCP_IPV6_RECVRTHDR) { 15038 toh = (struct T_opthdr *)optptr; 15039 toh->level = IPPROTO_IPV6; 15040 toh->name = IPV6_RTHDR; 15041 toh->len = sizeof (*toh) + ipp->ipp_rthdrlen; 15042 toh->status = 0; 15043 optptr += sizeof (*toh); 15044 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen); 15045 optptr += ipp->ipp_rthdrlen; 15046 ASSERT(OK_32PTR(optptr)); 15047 /* Save as last value */ 15048 ip_savebuf((void **)&tcp->tcp_rthdr, &tcp->tcp_rthdrlen, 15049 (ipp->ipp_fields & IPPF_RTHDR), 15050 ipp->ipp_rthdr, ipp->ipp_rthdrlen); 15051 } 15052 if (addflag & (TCP_IPV6_RECVDSTOPTS | TCP_OLD_IPV6_RECVDSTOPTS)) { 15053 toh = (struct T_opthdr *)optptr; 15054 toh->level = IPPROTO_IPV6; 15055 toh->name = IPV6_DSTOPTS; 15056 toh->len = sizeof (*toh) + ipp->ipp_dstoptslen; 15057 toh->status = 0; 15058 optptr += sizeof (*toh); 15059 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen); 15060 optptr += ipp->ipp_dstoptslen; 15061 ASSERT(OK_32PTR(optptr)); 15062 /* Save as last value */ 15063 ip_savebuf((void **)&tcp->tcp_dstopts, &tcp->tcp_dstoptslen, 15064 (ipp->ipp_fields & IPPF_DSTOPTS), 15065 ipp->ipp_dstopts, ipp->ipp_dstoptslen); 15066 } 15067 ASSERT(optptr == mp->b_wptr); 15068 return (mp); 15069 } 15070 15071 15072 /* 15073 * Handle a *T_BIND_REQ that has failed either due to a T_ERROR_ACK 15074 * or a "bad" IRE detected by tcp_adapt_ire. 15075 * We can't tell if the failure was due to the laddr or the faddr 15076 * thus we clear out all addresses and ports. 15077 */ 15078 static void 15079 tcp_bind_failed(tcp_t *tcp, mblk_t *mp, int error) 15080 { 15081 queue_t *q = tcp->tcp_rq; 15082 tcph_t *tcph; 15083 struct T_error_ack *tea; 15084 conn_t *connp = tcp->tcp_connp; 15085 15086 15087 ASSERT(mp->b_datap->db_type == M_PCPROTO); 15088 15089 if (mp->b_cont) { 15090 freemsg(mp->b_cont); 15091 mp->b_cont = NULL; 15092 } 15093 tea = (struct T_error_ack *)mp->b_rptr; 15094 switch (tea->PRIM_type) { 15095 case T_BIND_ACK: 15096 /* 15097 * Need to unbind with classifier since we were just told that 15098 * our bind succeeded. 15099 */ 15100 tcp->tcp_hard_bound = B_FALSE; 15101 tcp->tcp_hard_binding = B_FALSE; 15102 15103 ipcl_hash_remove(connp); 15104 /* Reuse the mblk if possible */ 15105 ASSERT(mp->b_datap->db_lim - mp->b_datap->db_base >= 15106 sizeof (*tea)); 15107 mp->b_rptr = mp->b_datap->db_base; 15108 mp->b_wptr = mp->b_rptr + sizeof (*tea); 15109 tea = (struct T_error_ack *)mp->b_rptr; 15110 tea->PRIM_type = T_ERROR_ACK; 15111 tea->TLI_error = TSYSERR; 15112 tea->UNIX_error = error; 15113 if (tcp->tcp_state >= TCPS_SYN_SENT) { 15114 tea->ERROR_prim = T_CONN_REQ; 15115 } else { 15116 tea->ERROR_prim = O_T_BIND_REQ; 15117 } 15118 break; 15119 15120 case T_ERROR_ACK: 15121 if (tcp->tcp_state >= TCPS_SYN_SENT) 15122 tea->ERROR_prim = T_CONN_REQ; 15123 break; 15124 default: 15125 panic("tcp_bind_failed: unexpected TPI type"); 15126 /*NOTREACHED*/ 15127 } 15128 15129 tcp->tcp_state = TCPS_IDLE; 15130 if (tcp->tcp_ipversion == IPV4_VERSION) 15131 tcp->tcp_ipha->ipha_src = 0; 15132 else 15133 V6_SET_ZERO(tcp->tcp_ip6h->ip6_src); 15134 /* 15135 * Copy of the src addr. in tcp_t is needed since 15136 * the lookup funcs. can only look at tcp_t 15137 */ 15138 V6_SET_ZERO(tcp->tcp_ip_src_v6); 15139 15140 tcph = tcp->tcp_tcph; 15141 tcph->th_lport[0] = 0; 15142 tcph->th_lport[1] = 0; 15143 tcp_bind_hash_remove(tcp); 15144 bzero(&connp->u_port, sizeof (connp->u_port)); 15145 /* blow away saved option results if any */ 15146 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 15147 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 15148 15149 conn_delete_ire(tcp->tcp_connp, NULL); 15150 putnext(q, mp); 15151 } 15152 15153 /* 15154 * tcp_rput_other is called by tcp_rput to handle everything other than M_DATA 15155 * messages. 15156 */ 15157 void 15158 tcp_rput_other(tcp_t *tcp, mblk_t *mp) 15159 { 15160 mblk_t *mp1; 15161 uchar_t *rptr = mp->b_rptr; 15162 queue_t *q = tcp->tcp_rq; 15163 struct T_error_ack *tea; 15164 uint32_t mss; 15165 mblk_t *syn_mp; 15166 mblk_t *mdti; 15167 int retval; 15168 mblk_t *ire_mp; 15169 15170 switch (mp->b_datap->db_type) { 15171 case M_PROTO: 15172 case M_PCPROTO: 15173 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 15174 if ((mp->b_wptr - rptr) < sizeof (t_scalar_t)) 15175 break; 15176 tea = (struct T_error_ack *)rptr; 15177 switch (tea->PRIM_type) { 15178 case T_BIND_ACK: 15179 /* 15180 * Adapt Multidata information, if any. The 15181 * following tcp_mdt_update routine will free 15182 * the message. 15183 */ 15184 if ((mdti = tcp_mdt_info_mp(mp)) != NULL) { 15185 tcp_mdt_update(tcp, &((ip_mdt_info_t *)mdti-> 15186 b_rptr)->mdt_capab, B_TRUE); 15187 freemsg(mdti); 15188 } 15189 15190 /* Get the IRE, if we had requested for it */ 15191 ire_mp = tcp_ire_mp(mp); 15192 15193 if (tcp->tcp_hard_binding) { 15194 tcp->tcp_hard_binding = B_FALSE; 15195 tcp->tcp_hard_bound = B_TRUE; 15196 CL_INET_CONNECT(tcp); 15197 } else { 15198 if (ire_mp != NULL) 15199 freeb(ire_mp); 15200 goto after_syn_sent; 15201 } 15202 15203 retval = tcp_adapt_ire(tcp, ire_mp); 15204 if (ire_mp != NULL) 15205 freeb(ire_mp); 15206 if (retval == 0) { 15207 tcp_bind_failed(tcp, mp, 15208 (int)((tcp->tcp_state >= TCPS_SYN_SENT) ? 15209 ENETUNREACH : EADDRNOTAVAIL)); 15210 return; 15211 } 15212 /* 15213 * Don't let an endpoint connect to itself. 15214 * Also checked in tcp_connect() but that 15215 * check can't handle the case when the 15216 * local IP address is INADDR_ANY. 15217 */ 15218 if (tcp->tcp_ipversion == IPV4_VERSION) { 15219 if ((tcp->tcp_ipha->ipha_dst == 15220 tcp->tcp_ipha->ipha_src) && 15221 (BE16_EQL(tcp->tcp_tcph->th_lport, 15222 tcp->tcp_tcph->th_fport))) { 15223 tcp_bind_failed(tcp, mp, EADDRNOTAVAIL); 15224 return; 15225 } 15226 } else { 15227 if (IN6_ARE_ADDR_EQUAL( 15228 &tcp->tcp_ip6h->ip6_dst, 15229 &tcp->tcp_ip6h->ip6_src) && 15230 (BE16_EQL(tcp->tcp_tcph->th_lport, 15231 tcp->tcp_tcph->th_fport))) { 15232 tcp_bind_failed(tcp, mp, EADDRNOTAVAIL); 15233 return; 15234 } 15235 } 15236 ASSERT(tcp->tcp_state == TCPS_SYN_SENT); 15237 /* 15238 * This should not be possible! Just for 15239 * defensive coding... 15240 */ 15241 if (tcp->tcp_state != TCPS_SYN_SENT) 15242 goto after_syn_sent; 15243 15244 if (is_system_labeled() && 15245 !tcp_update_label(tcp, CONN_CRED(tcp->tcp_connp))) { 15246 tcp_bind_failed(tcp, mp, EHOSTUNREACH); 15247 return; 15248 } 15249 15250 ASSERT(q == tcp->tcp_rq); 15251 /* 15252 * tcp_adapt_ire() does not adjust 15253 * for TCP/IP header length. 15254 */ 15255 mss = tcp->tcp_mss - tcp->tcp_hdr_len; 15256 15257 /* 15258 * Just make sure our rwnd is at 15259 * least tcp_recv_hiwat_mss * MSS 15260 * large, and round up to the nearest 15261 * MSS. 15262 * 15263 * We do the round up here because 15264 * we need to get the interface 15265 * MTU first before we can do the 15266 * round up. 15267 */ 15268 tcp->tcp_rwnd = MAX(MSS_ROUNDUP(tcp->tcp_rwnd, mss), 15269 tcp_recv_hiwat_minmss * mss); 15270 q->q_hiwat = tcp->tcp_rwnd; 15271 tcp_set_ws_value(tcp); 15272 U32_TO_ABE16((tcp->tcp_rwnd >> tcp->tcp_rcv_ws), 15273 tcp->tcp_tcph->th_win); 15274 if (tcp->tcp_rcv_ws > 0 || tcp_wscale_always) 15275 tcp->tcp_snd_ws_ok = B_TRUE; 15276 15277 /* 15278 * Set tcp_snd_ts_ok to true 15279 * so that tcp_xmit_mp will 15280 * include the timestamp 15281 * option in the SYN segment. 15282 */ 15283 if (tcp_tstamp_always || 15284 (tcp->tcp_rcv_ws && tcp_tstamp_if_wscale)) { 15285 tcp->tcp_snd_ts_ok = B_TRUE; 15286 } 15287 15288 /* 15289 * tcp_snd_sack_ok can be set in 15290 * tcp_adapt_ire() if the sack metric 15291 * is set. So check it here also. 15292 */ 15293 if (tcp_sack_permitted == 2 || 15294 tcp->tcp_snd_sack_ok) { 15295 if (tcp->tcp_sack_info == NULL) { 15296 tcp->tcp_sack_info = 15297 kmem_cache_alloc(tcp_sack_info_cache, 15298 KM_SLEEP); 15299 } 15300 tcp->tcp_snd_sack_ok = B_TRUE; 15301 } 15302 15303 /* 15304 * Should we use ECN? Note that the current 15305 * default value (SunOS 5.9) of tcp_ecn_permitted 15306 * is 1. The reason for doing this is that there 15307 * are equipments out there that will drop ECN 15308 * enabled IP packets. Setting it to 1 avoids 15309 * compatibility problems. 15310 */ 15311 if (tcp_ecn_permitted == 2) 15312 tcp->tcp_ecn_ok = B_TRUE; 15313 15314 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 15315 syn_mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, 15316 tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 15317 if (syn_mp) { 15318 cred_t *cr; 15319 pid_t pid; 15320 15321 /* 15322 * Obtain the credential from the 15323 * thread calling connect(); the credential 15324 * lives on in the second mblk which 15325 * originated from T_CONN_REQ and is echoed 15326 * with the T_BIND_ACK from ip. If none 15327 * can be found, default to the creator 15328 * of the socket. 15329 */ 15330 if (mp->b_cont == NULL || 15331 (cr = DB_CRED(mp->b_cont)) == NULL) { 15332 cr = tcp->tcp_cred; 15333 pid = tcp->tcp_cpid; 15334 } else { 15335 pid = DB_CPID(mp->b_cont); 15336 } 15337 15338 TCP_RECORD_TRACE(tcp, syn_mp, 15339 TCP_TRACE_SEND_PKT); 15340 mblk_setcred(syn_mp, cr); 15341 DB_CPID(syn_mp) = pid; 15342 tcp_send_data(tcp, tcp->tcp_wq, syn_mp); 15343 } 15344 after_syn_sent: 15345 /* 15346 * A trailer mblk indicates a waiting client upstream. 15347 * We complete here the processing begun in 15348 * either tcp_bind() or tcp_connect() by passing 15349 * upstream the reply message they supplied. 15350 */ 15351 mp1 = mp; 15352 mp = mp->b_cont; 15353 freeb(mp1); 15354 if (mp) 15355 break; 15356 return; 15357 case T_ERROR_ACK: 15358 if (tcp->tcp_debug) { 15359 (void) strlog(TCP_MOD_ID, 0, 1, 15360 SL_TRACE|SL_ERROR, 15361 "tcp_rput_other: case T_ERROR_ACK, " 15362 "ERROR_prim == %d", 15363 tea->ERROR_prim); 15364 } 15365 switch (tea->ERROR_prim) { 15366 case O_T_BIND_REQ: 15367 case T_BIND_REQ: 15368 tcp_bind_failed(tcp, mp, 15369 (int)((tcp->tcp_state >= TCPS_SYN_SENT) ? 15370 ENETUNREACH : EADDRNOTAVAIL)); 15371 return; 15372 case T_UNBIND_REQ: 15373 tcp->tcp_hard_binding = B_FALSE; 15374 tcp->tcp_hard_bound = B_FALSE; 15375 if (mp->b_cont) { 15376 freemsg(mp->b_cont); 15377 mp->b_cont = NULL; 15378 } 15379 if (tcp->tcp_unbind_pending) 15380 tcp->tcp_unbind_pending = 0; 15381 else { 15382 /* From tcp_ip_unbind() - free */ 15383 freemsg(mp); 15384 return; 15385 } 15386 break; 15387 case T_SVR4_OPTMGMT_REQ: 15388 if (tcp->tcp_drop_opt_ack_cnt > 0) { 15389 /* T_OPTMGMT_REQ generated by TCP */ 15390 printf("T_SVR4_OPTMGMT_REQ failed " 15391 "%d/%d - dropped (cnt %d)\n", 15392 tea->TLI_error, tea->UNIX_error, 15393 tcp->tcp_drop_opt_ack_cnt); 15394 freemsg(mp); 15395 tcp->tcp_drop_opt_ack_cnt--; 15396 return; 15397 } 15398 break; 15399 } 15400 if (tea->ERROR_prim == T_SVR4_OPTMGMT_REQ && 15401 tcp->tcp_drop_opt_ack_cnt > 0) { 15402 printf("T_SVR4_OPTMGMT_REQ failed %d/%d " 15403 "- dropped (cnt %d)\n", 15404 tea->TLI_error, tea->UNIX_error, 15405 tcp->tcp_drop_opt_ack_cnt); 15406 freemsg(mp); 15407 tcp->tcp_drop_opt_ack_cnt--; 15408 return; 15409 } 15410 break; 15411 case T_OPTMGMT_ACK: 15412 if (tcp->tcp_drop_opt_ack_cnt > 0) { 15413 /* T_OPTMGMT_REQ generated by TCP */ 15414 freemsg(mp); 15415 tcp->tcp_drop_opt_ack_cnt--; 15416 return; 15417 } 15418 break; 15419 default: 15420 break; 15421 } 15422 break; 15423 case M_CTL: 15424 /* 15425 * ICMP messages. 15426 */ 15427 tcp_icmp_error(tcp, mp); 15428 return; 15429 case M_FLUSH: 15430 if (*rptr & FLUSHR) 15431 flushq(q, FLUSHDATA); 15432 break; 15433 default: 15434 break; 15435 } 15436 /* 15437 * Make sure we set this bit before sending the ACK for 15438 * bind. Otherwise accept could possibly run and free 15439 * this tcp struct. 15440 */ 15441 putnext(q, mp); 15442 } 15443 15444 /* 15445 * Called as the result of a qbufcall or a qtimeout to remedy a failure 15446 * to allocate a T_ordrel_ind in tcp_rsrv(). qenable(q) will make 15447 * tcp_rsrv() try again. 15448 */ 15449 static void 15450 tcp_ordrel_kick(void *arg) 15451 { 15452 conn_t *connp = (conn_t *)arg; 15453 tcp_t *tcp = connp->conn_tcp; 15454 15455 tcp->tcp_ordrelid = 0; 15456 tcp->tcp_timeout = B_FALSE; 15457 if (!TCP_IS_DETACHED(tcp) && tcp->tcp_rq != NULL && 15458 tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 15459 qenable(tcp->tcp_rq); 15460 } 15461 } 15462 15463 /* ARGSUSED */ 15464 static void 15465 tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2) 15466 { 15467 conn_t *connp = (conn_t *)arg; 15468 tcp_t *tcp = connp->conn_tcp; 15469 queue_t *q = tcp->tcp_rq; 15470 uint_t thwin; 15471 15472 freeb(mp); 15473 15474 TCP_STAT(tcp_rsrv_calls); 15475 15476 if (TCP_IS_DETACHED(tcp) || q == NULL) { 15477 return; 15478 } 15479 15480 if (tcp->tcp_fused) { 15481 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 15482 15483 ASSERT(tcp->tcp_fused); 15484 ASSERT(peer_tcp != NULL && peer_tcp->tcp_fused); 15485 ASSERT(peer_tcp->tcp_loopback_peer == tcp); 15486 ASSERT(!TCP_IS_DETACHED(tcp)); 15487 ASSERT(tcp->tcp_connp->conn_sqp == 15488 peer_tcp->tcp_connp->conn_sqp); 15489 15490 /* 15491 * Normally we would not get backenabled in synchronous 15492 * streams mode, but in case this happens, we need to stop 15493 * synchronous streams temporarily to prevent a race with 15494 * tcp_fuse_rrw() or tcp_fuse_rinfop(). It is safe to access 15495 * tcp_rcv_list here because those entry points will return 15496 * right away when synchronous streams is stopped. 15497 */ 15498 TCP_FUSE_SYNCSTR_STOP(tcp); 15499 if (tcp->tcp_rcv_list != NULL) 15500 (void) tcp_rcv_drain(tcp->tcp_rq, tcp); 15501 15502 tcp_clrqfull(peer_tcp); 15503 TCP_FUSE_SYNCSTR_RESUME(tcp); 15504 TCP_STAT(tcp_fusion_backenabled); 15505 return; 15506 } 15507 15508 if (canputnext(q)) { 15509 tcp->tcp_rwnd = q->q_hiwat; 15510 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 15511 << tcp->tcp_rcv_ws; 15512 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 15513 /* 15514 * Send back a window update immediately if TCP is above 15515 * ESTABLISHED state and the increase of the rcv window 15516 * that the other side knows is at least 1 MSS after flow 15517 * control is lifted. 15518 */ 15519 if (tcp->tcp_state >= TCPS_ESTABLISHED && 15520 (q->q_hiwat - thwin >= tcp->tcp_mss)) { 15521 tcp_xmit_ctl(NULL, tcp, 15522 (tcp->tcp_swnd == 0) ? tcp->tcp_suna : 15523 tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 15524 BUMP_MIB(&tcp_mib, tcpOutWinUpdate); 15525 } 15526 } 15527 /* Handle a failure to allocate a T_ORDREL_IND here */ 15528 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 15529 ASSERT(tcp->tcp_listener == NULL); 15530 if (tcp->tcp_rcv_list != NULL) { 15531 (void) tcp_rcv_drain(q, tcp); 15532 } 15533 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 15534 mp = mi_tpi_ordrel_ind(); 15535 if (mp) { 15536 tcp->tcp_ordrel_done = B_TRUE; 15537 putnext(q, mp); 15538 if (tcp->tcp_deferred_clean_death) { 15539 /* 15540 * tcp_clean_death was deferred for 15541 * T_ORDREL_IND - do it now 15542 */ 15543 tcp->tcp_deferred_clean_death = B_FALSE; 15544 (void) tcp_clean_death(tcp, 15545 tcp->tcp_client_errno, 22); 15546 } 15547 } else if (!tcp->tcp_timeout && tcp->tcp_ordrelid == 0) { 15548 /* 15549 * If there isn't already a timer running 15550 * start one. Use a 4 second 15551 * timer as a fallback since it can't fail. 15552 */ 15553 tcp->tcp_timeout = B_TRUE; 15554 tcp->tcp_ordrelid = TCP_TIMER(tcp, tcp_ordrel_kick, 15555 MSEC_TO_TICK(4000)); 15556 } 15557 } 15558 } 15559 15560 /* 15561 * The read side service routine is called mostly when we get back-enabled as a 15562 * result of flow control relief. Since we don't actually queue anything in 15563 * TCP, we have no data to send out of here. What we do is clear the receive 15564 * window, and send out a window update. 15565 * This routine is also called to drive an orderly release message upstream 15566 * if the attempt in tcp_rput failed. 15567 */ 15568 static void 15569 tcp_rsrv(queue_t *q) 15570 { 15571 conn_t *connp = Q_TO_CONN(q); 15572 tcp_t *tcp = connp->conn_tcp; 15573 mblk_t *mp; 15574 15575 /* No code does a putq on the read side */ 15576 ASSERT(q->q_first == NULL); 15577 15578 /* Nothing to do for the default queue */ 15579 if (q == tcp_g_q) { 15580 return; 15581 } 15582 15583 mp = allocb(0, BPRI_HI); 15584 if (mp == NULL) { 15585 /* 15586 * We are under memory pressure. Return for now and we 15587 * we will be called again later. 15588 */ 15589 if (!tcp->tcp_timeout && tcp->tcp_ordrelid == 0) { 15590 /* 15591 * If there isn't already a timer running 15592 * start one. Use a 4 second 15593 * timer as a fallback since it can't fail. 15594 */ 15595 tcp->tcp_timeout = B_TRUE; 15596 tcp->tcp_ordrelid = TCP_TIMER(tcp, tcp_ordrel_kick, 15597 MSEC_TO_TICK(4000)); 15598 } 15599 return; 15600 } 15601 CONN_INC_REF(connp); 15602 squeue_enter(connp->conn_sqp, mp, tcp_rsrv_input, connp, 15603 SQTAG_TCP_RSRV); 15604 } 15605 15606 /* 15607 * tcp_rwnd_set() is called to adjust the receive window to a desired value. 15608 * We do not allow the receive window to shrink. After setting rwnd, 15609 * set the flow control hiwat of the stream. 15610 * 15611 * This function is called in 2 cases: 15612 * 15613 * 1) Before data transfer begins, in tcp_accept_comm() for accepting a 15614 * connection (passive open) and in tcp_rput_data() for active connect. 15615 * This is called after tcp_mss_set() when the desired MSS value is known. 15616 * This makes sure that our window size is a mutiple of the other side's 15617 * MSS. 15618 * 2) Handling SO_RCVBUF option. 15619 * 15620 * It is ASSUMED that the requested size is a multiple of the current MSS. 15621 * 15622 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the 15623 * user requests so. 15624 */ 15625 static int 15626 tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd) 15627 { 15628 uint32_t mss = tcp->tcp_mss; 15629 uint32_t old_max_rwnd; 15630 uint32_t max_transmittable_rwnd; 15631 boolean_t tcp_detached = TCP_IS_DETACHED(tcp); 15632 15633 if (tcp->tcp_fused) { 15634 size_t sth_hiwat; 15635 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 15636 15637 ASSERT(peer_tcp != NULL); 15638 /* 15639 * Record the stream head's high water mark for 15640 * this endpoint; this is used for flow-control 15641 * purposes in tcp_fuse_output(). 15642 */ 15643 sth_hiwat = tcp_fuse_set_rcv_hiwat(tcp, rwnd); 15644 if (!tcp_detached) 15645 (void) mi_set_sth_hiwat(tcp->tcp_rq, sth_hiwat); 15646 15647 /* 15648 * In the fusion case, the maxpsz stream head value of 15649 * our peer is set according to its send buffer size 15650 * and our receive buffer size; since the latter may 15651 * have changed we need to update the peer's maxpsz. 15652 */ 15653 (void) tcp_maxpsz_set(peer_tcp, B_TRUE); 15654 return (rwnd); 15655 } 15656 15657 if (tcp_detached) 15658 old_max_rwnd = tcp->tcp_rwnd; 15659 else 15660 old_max_rwnd = tcp->tcp_rq->q_hiwat; 15661 15662 /* 15663 * Insist on a receive window that is at least 15664 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid 15665 * funny TCP interactions of Nagle algorithm, SWS avoidance 15666 * and delayed acknowledgement. 15667 */ 15668 rwnd = MAX(rwnd, tcp_recv_hiwat_minmss * mss); 15669 15670 /* 15671 * If window size info has already been exchanged, TCP should not 15672 * shrink the window. Shrinking window is doable if done carefully. 15673 * We may add that support later. But so far there is not a real 15674 * need to do that. 15675 */ 15676 if (rwnd < old_max_rwnd && tcp->tcp_state > TCPS_SYN_SENT) { 15677 /* MSS may have changed, do a round up again. */ 15678 rwnd = MSS_ROUNDUP(old_max_rwnd, mss); 15679 } 15680 15681 /* 15682 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check 15683 * can be applied even before the window scale option is decided. 15684 */ 15685 max_transmittable_rwnd = TCP_MAXWIN << tcp->tcp_rcv_ws; 15686 if (rwnd > max_transmittable_rwnd) { 15687 rwnd = max_transmittable_rwnd - 15688 (max_transmittable_rwnd % mss); 15689 if (rwnd < mss) 15690 rwnd = max_transmittable_rwnd; 15691 /* 15692 * If we're over the limit we may have to back down tcp_rwnd. 15693 * The increment below won't work for us. So we set all three 15694 * here and the increment below will have no effect. 15695 */ 15696 tcp->tcp_rwnd = old_max_rwnd = rwnd; 15697 } 15698 if (tcp->tcp_localnet) { 15699 tcp->tcp_rack_abs_max = 15700 MIN(tcp_local_dacks_max, rwnd / mss / 2); 15701 } else { 15702 /* 15703 * For a remote host on a different subnet (through a router), 15704 * we ack every other packet to be conforming to RFC1122. 15705 * tcp_deferred_acks_max is default to 2. 15706 */ 15707 tcp->tcp_rack_abs_max = 15708 MIN(tcp_deferred_acks_max, rwnd / mss / 2); 15709 } 15710 if (tcp->tcp_rack_cur_max > tcp->tcp_rack_abs_max) 15711 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 15712 else 15713 tcp->tcp_rack_cur_max = 0; 15714 /* 15715 * Increment the current rwnd by the amount the maximum grew (we 15716 * can not overwrite it since we might be in the middle of a 15717 * connection.) 15718 */ 15719 tcp->tcp_rwnd += rwnd - old_max_rwnd; 15720 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, tcp->tcp_tcph->th_win); 15721 if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max) 15722 tcp->tcp_cwnd_max = rwnd; 15723 15724 if (tcp_detached) 15725 return (rwnd); 15726 /* 15727 * We set the maximum receive window into rq->q_hiwat. 15728 * This is not actually used for flow control. 15729 */ 15730 tcp->tcp_rq->q_hiwat = rwnd; 15731 /* 15732 * Set the Stream head high water mark. This doesn't have to be 15733 * here, since we are simply using default values, but we would 15734 * prefer to choose these values algorithmically, with a likely 15735 * relationship to rwnd. 15736 */ 15737 (void) mi_set_sth_hiwat(tcp->tcp_rq, MAX(rwnd, tcp_sth_rcv_hiwat)); 15738 return (rwnd); 15739 } 15740 15741 /* 15742 * Return SNMP stuff in buffer in mpdata. 15743 */ 15744 int 15745 tcp_snmp_get(queue_t *q, mblk_t *mpctl) 15746 { 15747 mblk_t *mpdata; 15748 mblk_t *mp_conn_ctl = NULL; 15749 mblk_t *mp_conn_tail; 15750 mblk_t *mp_attr_ctl = NULL; 15751 mblk_t *mp_attr_tail; 15752 mblk_t *mp6_conn_ctl = NULL; 15753 mblk_t *mp6_conn_tail; 15754 mblk_t *mp6_attr_ctl = NULL; 15755 mblk_t *mp6_attr_tail; 15756 struct opthdr *optp; 15757 mib2_tcpConnEntry_t tce; 15758 mib2_tcp6ConnEntry_t tce6; 15759 mib2_transportMLPEntry_t mlp; 15760 connf_t *connfp; 15761 conn_t *connp; 15762 int i; 15763 boolean_t ispriv; 15764 zoneid_t zoneid; 15765 int v4_conn_idx; 15766 int v6_conn_idx; 15767 15768 if (mpctl == NULL || 15769 (mpdata = mpctl->b_cont) == NULL || 15770 (mp_conn_ctl = copymsg(mpctl)) == NULL || 15771 (mp_attr_ctl = copymsg(mpctl)) == NULL || 15772 (mp6_conn_ctl = copymsg(mpctl)) == NULL || 15773 (mp6_attr_ctl = copymsg(mpctl)) == NULL) { 15774 freemsg(mp_conn_ctl); 15775 freemsg(mp_attr_ctl); 15776 freemsg(mp6_conn_ctl); 15777 freemsg(mp6_attr_ctl); 15778 return (0); 15779 } 15780 15781 /* build table of connections -- need count in fixed part */ 15782 SET_MIB(tcp_mib.tcpRtoAlgorithm, 4); /* vanj */ 15783 SET_MIB(tcp_mib.tcpRtoMin, tcp_rexmit_interval_min); 15784 SET_MIB(tcp_mib.tcpRtoMax, tcp_rexmit_interval_max); 15785 SET_MIB(tcp_mib.tcpMaxConn, -1); 15786 SET_MIB(tcp_mib.tcpCurrEstab, 0); 15787 15788 ispriv = 15789 secpolicy_net_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0; 15790 zoneid = Q_TO_CONN(q)->conn_zoneid; 15791 15792 v4_conn_idx = v6_conn_idx = 0; 15793 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL; 15794 15795 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 15796 15797 connfp = &ipcl_globalhash_fanout[i]; 15798 15799 connp = NULL; 15800 15801 while ((connp = 15802 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 15803 tcp_t *tcp; 15804 boolean_t needattr; 15805 15806 if (connp->conn_zoneid != zoneid) 15807 continue; /* not in this zone */ 15808 15809 tcp = connp->conn_tcp; 15810 UPDATE_MIB(&tcp_mib, tcpInSegs, tcp->tcp_ibsegs); 15811 tcp->tcp_ibsegs = 0; 15812 UPDATE_MIB(&tcp_mib, tcpOutSegs, tcp->tcp_obsegs); 15813 tcp->tcp_obsegs = 0; 15814 15815 tce6.tcp6ConnState = tce.tcpConnState = 15816 tcp_snmp_state(tcp); 15817 if (tce.tcpConnState == MIB2_TCP_established || 15818 tce.tcpConnState == MIB2_TCP_closeWait) 15819 BUMP_MIB(&tcp_mib, tcpCurrEstab); 15820 15821 needattr = B_FALSE; 15822 bzero(&mlp, sizeof (mlp)); 15823 if (connp->conn_mlp_type != mlptSingle) { 15824 if (connp->conn_mlp_type == mlptShared || 15825 connp->conn_mlp_type == mlptBoth) 15826 mlp.tme_flags |= MIB2_TMEF_SHARED; 15827 if (connp->conn_mlp_type == mlptPrivate || 15828 connp->conn_mlp_type == mlptBoth) 15829 mlp.tme_flags |= MIB2_TMEF_PRIVATE; 15830 needattr = B_TRUE; 15831 } 15832 if (connp->conn_peercred != NULL) { 15833 ts_label_t *tsl; 15834 15835 tsl = crgetlabel(connp->conn_peercred); 15836 mlp.tme_doi = label2doi(tsl); 15837 mlp.tme_label = *label2bslabel(tsl); 15838 needattr = B_TRUE; 15839 } 15840 15841 /* Create a message to report on IPv6 entries */ 15842 if (tcp->tcp_ipversion == IPV6_VERSION) { 15843 tce6.tcp6ConnLocalAddress = tcp->tcp_ip_src_v6; 15844 tce6.tcp6ConnRemAddress = tcp->tcp_remote_v6; 15845 tce6.tcp6ConnLocalPort = ntohs(tcp->tcp_lport); 15846 tce6.tcp6ConnRemPort = ntohs(tcp->tcp_fport); 15847 tce6.tcp6ConnIfIndex = tcp->tcp_bound_if; 15848 /* Don't want just anybody seeing these... */ 15849 if (ispriv) { 15850 tce6.tcp6ConnEntryInfo.ce_snxt = 15851 tcp->tcp_snxt; 15852 tce6.tcp6ConnEntryInfo.ce_suna = 15853 tcp->tcp_suna; 15854 tce6.tcp6ConnEntryInfo.ce_rnxt = 15855 tcp->tcp_rnxt; 15856 tce6.tcp6ConnEntryInfo.ce_rack = 15857 tcp->tcp_rack; 15858 } else { 15859 /* 15860 * Netstat, unfortunately, uses this to 15861 * get send/receive queue sizes. How to fix? 15862 * Why not compute the difference only? 15863 */ 15864 tce6.tcp6ConnEntryInfo.ce_snxt = 15865 tcp->tcp_snxt - tcp->tcp_suna; 15866 tce6.tcp6ConnEntryInfo.ce_suna = 0; 15867 tce6.tcp6ConnEntryInfo.ce_rnxt = 15868 tcp->tcp_rnxt - tcp->tcp_rack; 15869 tce6.tcp6ConnEntryInfo.ce_rack = 0; 15870 } 15871 15872 tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd; 15873 tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 15874 tce6.tcp6ConnEntryInfo.ce_rto = tcp->tcp_rto; 15875 tce6.tcp6ConnEntryInfo.ce_mss = tcp->tcp_mss; 15876 tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state; 15877 15878 (void) snmp_append_data2(mp6_conn_ctl->b_cont, 15879 &mp6_conn_tail, (char *)&tce6, sizeof (tce6)); 15880 15881 mlp.tme_connidx = v6_conn_idx++; 15882 if (needattr) 15883 (void) snmp_append_data2(mp6_attr_ctl->b_cont, 15884 &mp6_attr_tail, (char *)&mlp, sizeof (mlp)); 15885 } 15886 /* 15887 * Create an IPv4 table entry for IPv4 entries and also 15888 * for IPv6 entries which are bound to in6addr_any 15889 * but don't have IPV6_V6ONLY set. 15890 * (i.e. anything an IPv4 peer could connect to) 15891 */ 15892 if (tcp->tcp_ipversion == IPV4_VERSION || 15893 (tcp->tcp_state <= TCPS_LISTEN && 15894 !tcp->tcp_connp->conn_ipv6_v6only && 15895 IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip_src_v6))) { 15896 if (tcp->tcp_ipversion == IPV6_VERSION) { 15897 tce.tcpConnRemAddress = INADDR_ANY; 15898 tce.tcpConnLocalAddress = INADDR_ANY; 15899 } else { 15900 tce.tcpConnRemAddress = 15901 tcp->tcp_remote; 15902 tce.tcpConnLocalAddress = 15903 tcp->tcp_ip_src; 15904 } 15905 tce.tcpConnLocalPort = ntohs(tcp->tcp_lport); 15906 tce.tcpConnRemPort = ntohs(tcp->tcp_fport); 15907 /* Don't want just anybody seeing these... */ 15908 if (ispriv) { 15909 tce.tcpConnEntryInfo.ce_snxt = 15910 tcp->tcp_snxt; 15911 tce.tcpConnEntryInfo.ce_suna = 15912 tcp->tcp_suna; 15913 tce.tcpConnEntryInfo.ce_rnxt = 15914 tcp->tcp_rnxt; 15915 tce.tcpConnEntryInfo.ce_rack = 15916 tcp->tcp_rack; 15917 } else { 15918 /* 15919 * Netstat, unfortunately, uses this to 15920 * get send/receive queue sizes. How 15921 * to fix? 15922 * Why not compute the difference only? 15923 */ 15924 tce.tcpConnEntryInfo.ce_snxt = 15925 tcp->tcp_snxt - tcp->tcp_suna; 15926 tce.tcpConnEntryInfo.ce_suna = 0; 15927 tce.tcpConnEntryInfo.ce_rnxt = 15928 tcp->tcp_rnxt - tcp->tcp_rack; 15929 tce.tcpConnEntryInfo.ce_rack = 0; 15930 } 15931 15932 tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd; 15933 tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 15934 tce.tcpConnEntryInfo.ce_rto = tcp->tcp_rto; 15935 tce.tcpConnEntryInfo.ce_mss = tcp->tcp_mss; 15936 tce.tcpConnEntryInfo.ce_state = 15937 tcp->tcp_state; 15938 15939 (void) snmp_append_data2(mp_conn_ctl->b_cont, 15940 &mp_conn_tail, (char *)&tce, sizeof (tce)); 15941 15942 mlp.tme_connidx = v4_conn_idx++; 15943 if (needattr) 15944 (void) snmp_append_data2( 15945 mp_attr_ctl->b_cont, 15946 &mp_attr_tail, (char *)&mlp, 15947 sizeof (mlp)); 15948 } 15949 } 15950 } 15951 15952 /* fixed length structure for IPv4 and IPv6 counters */ 15953 SET_MIB(tcp_mib.tcpConnTableSize, sizeof (mib2_tcpConnEntry_t)); 15954 SET_MIB(tcp_mib.tcp6ConnTableSize, sizeof (mib2_tcp6ConnEntry_t)); 15955 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 15956 optp->level = MIB2_TCP; 15957 optp->name = 0; 15958 (void) snmp_append_data(mpdata, (char *)&tcp_mib, sizeof (tcp_mib)); 15959 optp->len = msgdsize(mpdata); 15960 qreply(q, mpctl); 15961 15962 /* table of connections... */ 15963 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[ 15964 sizeof (struct T_optmgmt_ack)]; 15965 optp->level = MIB2_TCP; 15966 optp->name = MIB2_TCP_CONN; 15967 optp->len = msgdsize(mp_conn_ctl->b_cont); 15968 qreply(q, mp_conn_ctl); 15969 15970 /* table of MLP attributes... */ 15971 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[ 15972 sizeof (struct T_optmgmt_ack)]; 15973 optp->level = MIB2_TCP; 15974 optp->name = EXPER_XPORT_MLP; 15975 optp->len = msgdsize(mp_attr_ctl->b_cont); 15976 if (optp->len == 0) 15977 freemsg(mp_attr_ctl); 15978 else 15979 qreply(q, mp_attr_ctl); 15980 15981 /* table of IPv6 connections... */ 15982 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[ 15983 sizeof (struct T_optmgmt_ack)]; 15984 optp->level = MIB2_TCP6; 15985 optp->name = MIB2_TCP6_CONN; 15986 optp->len = msgdsize(mp6_conn_ctl->b_cont); 15987 qreply(q, mp6_conn_ctl); 15988 15989 /* table of IPv6 MLP attributes... */ 15990 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[ 15991 sizeof (struct T_optmgmt_ack)]; 15992 optp->level = MIB2_TCP6; 15993 optp->name = EXPER_XPORT_MLP; 15994 optp->len = msgdsize(mp6_attr_ctl->b_cont); 15995 if (optp->len == 0) 15996 freemsg(mp6_attr_ctl); 15997 else 15998 qreply(q, mp6_attr_ctl); 15999 return (1); 16000 } 16001 16002 /* Return 0 if invalid set request, 1 otherwise, including non-tcp requests */ 16003 /* ARGSUSED */ 16004 int 16005 tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len) 16006 { 16007 mib2_tcpConnEntry_t *tce = (mib2_tcpConnEntry_t *)ptr; 16008 16009 switch (level) { 16010 case MIB2_TCP: 16011 switch (name) { 16012 case 13: 16013 if (tce->tcpConnState != MIB2_TCP_deleteTCB) 16014 return (0); 16015 /* TODO: delete entry defined by tce */ 16016 return (1); 16017 default: 16018 return (0); 16019 } 16020 default: 16021 return (1); 16022 } 16023 } 16024 16025 /* Translate TCP state to MIB2 TCP state. */ 16026 static int 16027 tcp_snmp_state(tcp_t *tcp) 16028 { 16029 if (tcp == NULL) 16030 return (0); 16031 16032 switch (tcp->tcp_state) { 16033 case TCPS_CLOSED: 16034 case TCPS_IDLE: /* RFC1213 doesn't have analogue for IDLE & BOUND */ 16035 case TCPS_BOUND: 16036 return (MIB2_TCP_closed); 16037 case TCPS_LISTEN: 16038 return (MIB2_TCP_listen); 16039 case TCPS_SYN_SENT: 16040 return (MIB2_TCP_synSent); 16041 case TCPS_SYN_RCVD: 16042 return (MIB2_TCP_synReceived); 16043 case TCPS_ESTABLISHED: 16044 return (MIB2_TCP_established); 16045 case TCPS_CLOSE_WAIT: 16046 return (MIB2_TCP_closeWait); 16047 case TCPS_FIN_WAIT_1: 16048 return (MIB2_TCP_finWait1); 16049 case TCPS_CLOSING: 16050 return (MIB2_TCP_closing); 16051 case TCPS_LAST_ACK: 16052 return (MIB2_TCP_lastAck); 16053 case TCPS_FIN_WAIT_2: 16054 return (MIB2_TCP_finWait2); 16055 case TCPS_TIME_WAIT: 16056 return (MIB2_TCP_timeWait); 16057 default: 16058 return (0); 16059 } 16060 } 16061 16062 static char tcp_report_header[] = 16063 "TCP " MI_COL_HDRPAD_STR 16064 "zone dest snxt suna " 16065 "swnd rnxt rack rwnd rto mss w sw rw t " 16066 "recent [lport,fport] state"; 16067 16068 /* 16069 * TCP status report triggered via the Named Dispatch mechanism. 16070 */ 16071 /* ARGSUSED */ 16072 static void 16073 tcp_report_item(mblk_t *mp, tcp_t *tcp, int hashval, tcp_t *thisstream, 16074 cred_t *cr) 16075 { 16076 char hash[10], addrbuf[INET6_ADDRSTRLEN]; 16077 boolean_t ispriv = secpolicy_net_config(cr, B_TRUE) == 0; 16078 char cflag; 16079 in6_addr_t v6dst; 16080 char buf[80]; 16081 uint_t print_len, buf_len; 16082 16083 buf_len = mp->b_datap->db_lim - mp->b_wptr; 16084 if (buf_len <= 0) 16085 return; 16086 16087 if (hashval >= 0) 16088 (void) sprintf(hash, "%03d ", hashval); 16089 else 16090 hash[0] = '\0'; 16091 16092 /* 16093 * Note that we use the remote address in the tcp_b structure. 16094 * This means that it will print out the real destination address, 16095 * not the next hop's address if source routing is used. This 16096 * avoid the confusion on the output because user may not 16097 * know that source routing is used for a connection. 16098 */ 16099 if (tcp->tcp_ipversion == IPV4_VERSION) { 16100 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_remote, &v6dst); 16101 } else { 16102 v6dst = tcp->tcp_remote_v6; 16103 } 16104 (void) inet_ntop(AF_INET6, &v6dst, addrbuf, sizeof (addrbuf)); 16105 /* 16106 * the ispriv checks are so that normal users cannot determine 16107 * sequence number information using NDD. 16108 */ 16109 16110 if (TCP_IS_DETACHED(tcp)) 16111 cflag = '*'; 16112 else 16113 cflag = ' '; 16114 print_len = snprintf((char *)mp->b_wptr, buf_len, 16115 "%s " MI_COL_PTRFMT_STR "%d %s %08x %08x %010d %08x %08x " 16116 "%010d %05ld %05d %1d %02d %02d %1d %08x %s%c\n", 16117 hash, 16118 (void *)tcp, 16119 tcp->tcp_connp->conn_zoneid, 16120 addrbuf, 16121 (ispriv) ? tcp->tcp_snxt : 0, 16122 (ispriv) ? tcp->tcp_suna : 0, 16123 tcp->tcp_swnd, 16124 (ispriv) ? tcp->tcp_rnxt : 0, 16125 (ispriv) ? tcp->tcp_rack : 0, 16126 tcp->tcp_rwnd, 16127 tcp->tcp_rto, 16128 tcp->tcp_mss, 16129 tcp->tcp_snd_ws_ok, 16130 tcp->tcp_snd_ws, 16131 tcp->tcp_rcv_ws, 16132 tcp->tcp_snd_ts_ok, 16133 tcp->tcp_ts_recent, 16134 tcp_display(tcp, buf, DISP_PORT_ONLY), cflag); 16135 if (print_len < buf_len) { 16136 ((mblk_t *)mp)->b_wptr += print_len; 16137 } else { 16138 ((mblk_t *)mp)->b_wptr += buf_len; 16139 } 16140 } 16141 16142 /* 16143 * TCP status report (for listeners only) triggered via the Named Dispatch 16144 * mechanism. 16145 */ 16146 /* ARGSUSED */ 16147 static void 16148 tcp_report_listener(mblk_t *mp, tcp_t *tcp, int hashval) 16149 { 16150 char addrbuf[INET6_ADDRSTRLEN]; 16151 in6_addr_t v6dst; 16152 uint_t print_len, buf_len; 16153 16154 buf_len = mp->b_datap->db_lim - mp->b_wptr; 16155 if (buf_len <= 0) 16156 return; 16157 16158 if (tcp->tcp_ipversion == IPV4_VERSION) { 16159 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, &v6dst); 16160 (void) inet_ntop(AF_INET6, &v6dst, addrbuf, sizeof (addrbuf)); 16161 } else { 16162 (void) inet_ntop(AF_INET6, &tcp->tcp_ip6h->ip6_src, 16163 addrbuf, sizeof (addrbuf)); 16164 } 16165 print_len = snprintf((char *)mp->b_wptr, buf_len, 16166 "%03d " 16167 MI_COL_PTRFMT_STR 16168 "%d %s %05u %08u %d/%d/%d%c\n", 16169 hashval, (void *)tcp, 16170 tcp->tcp_connp->conn_zoneid, 16171 addrbuf, 16172 (uint_t)BE16_TO_U16(tcp->tcp_tcph->th_lport), 16173 tcp->tcp_conn_req_seqnum, 16174 tcp->tcp_conn_req_cnt_q0, tcp->tcp_conn_req_cnt_q, 16175 tcp->tcp_conn_req_max, 16176 tcp->tcp_syn_defense ? '*' : ' '); 16177 if (print_len < buf_len) { 16178 ((mblk_t *)mp)->b_wptr += print_len; 16179 } else { 16180 ((mblk_t *)mp)->b_wptr += buf_len; 16181 } 16182 } 16183 16184 /* TCP status report triggered via the Named Dispatch mechanism. */ 16185 /* ARGSUSED */ 16186 static int 16187 tcp_status_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16188 { 16189 tcp_t *tcp; 16190 int i; 16191 conn_t *connp; 16192 connf_t *connfp; 16193 zoneid_t zoneid; 16194 16195 /* 16196 * Because of the ndd constraint, at most we can have 64K buffer 16197 * to put in all TCP info. So to be more efficient, just 16198 * allocate a 64K buffer here, assuming we need that large buffer. 16199 * This may be a problem as any user can read tcp_status. Therefore 16200 * we limit the rate of doing this using tcp_ndd_get_info_interval. 16201 * This should be OK as normal users should not do this too often. 16202 */ 16203 if (cr == NULL || secpolicy_net_config(cr, B_TRUE) != 0) { 16204 if (ddi_get_lbolt() - tcp_last_ndd_get_info_time < 16205 drv_usectohz(tcp_ndd_get_info_interval * 1000)) { 16206 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16207 return (0); 16208 } 16209 } 16210 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16211 /* The following may work even if we cannot get a large buf. */ 16212 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16213 return (0); 16214 } 16215 16216 (void) mi_mpprintf(mp, "%s", tcp_report_header); 16217 16218 zoneid = Q_TO_CONN(q)->conn_zoneid; 16219 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 16220 16221 connfp = &ipcl_globalhash_fanout[i]; 16222 16223 connp = NULL; 16224 16225 while ((connp = 16226 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16227 tcp = connp->conn_tcp; 16228 if (zoneid != GLOBAL_ZONEID && 16229 zoneid != connp->conn_zoneid) 16230 continue; 16231 tcp_report_item(mp->b_cont, tcp, -1, tcp, 16232 cr); 16233 } 16234 16235 } 16236 16237 tcp_last_ndd_get_info_time = ddi_get_lbolt(); 16238 return (0); 16239 } 16240 16241 /* TCP status report triggered via the Named Dispatch mechanism. */ 16242 /* ARGSUSED */ 16243 static int 16244 tcp_bind_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16245 { 16246 tf_t *tbf; 16247 tcp_t *tcp; 16248 int i; 16249 zoneid_t zoneid; 16250 16251 /* Refer to comments in tcp_status_report(). */ 16252 if (cr == NULL || secpolicy_net_config(cr, B_TRUE) != 0) { 16253 if (ddi_get_lbolt() - tcp_last_ndd_get_info_time < 16254 drv_usectohz(tcp_ndd_get_info_interval * 1000)) { 16255 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16256 return (0); 16257 } 16258 } 16259 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16260 /* The following may work even if we cannot get a large buf. */ 16261 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16262 return (0); 16263 } 16264 16265 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16266 16267 zoneid = Q_TO_CONN(q)->conn_zoneid; 16268 16269 for (i = 0; i < A_CNT(tcp_bind_fanout); i++) { 16270 tbf = &tcp_bind_fanout[i]; 16271 mutex_enter(&tbf->tf_lock); 16272 for (tcp = tbf->tf_tcp; tcp != NULL; 16273 tcp = tcp->tcp_bind_hash) { 16274 if (zoneid != GLOBAL_ZONEID && 16275 zoneid != tcp->tcp_connp->conn_zoneid) 16276 continue; 16277 CONN_INC_REF(tcp->tcp_connp); 16278 tcp_report_item(mp->b_cont, tcp, i, 16279 Q_TO_TCP(q), cr); 16280 CONN_DEC_REF(tcp->tcp_connp); 16281 } 16282 mutex_exit(&tbf->tf_lock); 16283 } 16284 tcp_last_ndd_get_info_time = ddi_get_lbolt(); 16285 return (0); 16286 } 16287 16288 /* TCP status report triggered via the Named Dispatch mechanism. */ 16289 /* ARGSUSED */ 16290 static int 16291 tcp_listen_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16292 { 16293 connf_t *connfp; 16294 conn_t *connp; 16295 tcp_t *tcp; 16296 int i; 16297 zoneid_t zoneid; 16298 16299 /* Refer to comments in tcp_status_report(). */ 16300 if (cr == NULL || secpolicy_net_config(cr, B_TRUE) != 0) { 16301 if (ddi_get_lbolt() - tcp_last_ndd_get_info_time < 16302 drv_usectohz(tcp_ndd_get_info_interval * 1000)) { 16303 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16304 return (0); 16305 } 16306 } 16307 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16308 /* The following may work even if we cannot get a large buf. */ 16309 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16310 return (0); 16311 } 16312 16313 (void) mi_mpprintf(mp, 16314 " TCP " MI_COL_HDRPAD_STR 16315 "zone IP addr port seqnum backlog (q0/q/max)"); 16316 16317 zoneid = Q_TO_CONN(q)->conn_zoneid; 16318 16319 for (i = 0; i < ipcl_bind_fanout_size; i++) { 16320 connfp = &ipcl_bind_fanout[i]; 16321 connp = NULL; 16322 while ((connp = 16323 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16324 tcp = connp->conn_tcp; 16325 if (zoneid != GLOBAL_ZONEID && 16326 zoneid != connp->conn_zoneid) 16327 continue; 16328 tcp_report_listener(mp->b_cont, tcp, i); 16329 } 16330 } 16331 16332 tcp_last_ndd_get_info_time = ddi_get_lbolt(); 16333 return (0); 16334 } 16335 16336 /* TCP status report triggered via the Named Dispatch mechanism. */ 16337 /* ARGSUSED */ 16338 static int 16339 tcp_conn_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16340 { 16341 connf_t *connfp; 16342 conn_t *connp; 16343 tcp_t *tcp; 16344 int i; 16345 zoneid_t zoneid; 16346 16347 /* Refer to comments in tcp_status_report(). */ 16348 if (cr == NULL || secpolicy_net_config(cr, B_TRUE) != 0) { 16349 if (ddi_get_lbolt() - tcp_last_ndd_get_info_time < 16350 drv_usectohz(tcp_ndd_get_info_interval * 1000)) { 16351 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16352 return (0); 16353 } 16354 } 16355 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16356 /* The following may work even if we cannot get a large buf. */ 16357 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16358 return (0); 16359 } 16360 16361 (void) mi_mpprintf(mp, "tcp_conn_hash_size = %d", 16362 ipcl_conn_fanout_size); 16363 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16364 16365 zoneid = Q_TO_CONN(q)->conn_zoneid; 16366 16367 for (i = 0; i < ipcl_conn_fanout_size; i++) { 16368 connfp = &ipcl_conn_fanout[i]; 16369 connp = NULL; 16370 while ((connp = 16371 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16372 tcp = connp->conn_tcp; 16373 if (zoneid != GLOBAL_ZONEID && 16374 zoneid != connp->conn_zoneid) 16375 continue; 16376 tcp_report_item(mp->b_cont, tcp, i, 16377 Q_TO_TCP(q), cr); 16378 } 16379 } 16380 16381 tcp_last_ndd_get_info_time = ddi_get_lbolt(); 16382 return (0); 16383 } 16384 16385 /* TCP status report triggered via the Named Dispatch mechanism. */ 16386 /* ARGSUSED */ 16387 static int 16388 tcp_acceptor_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16389 { 16390 tf_t *tf; 16391 tcp_t *tcp; 16392 int i; 16393 zoneid_t zoneid; 16394 16395 /* Refer to comments in tcp_status_report(). */ 16396 if (cr == NULL || secpolicy_net_config(cr, B_TRUE) != 0) { 16397 if (ddi_get_lbolt() - tcp_last_ndd_get_info_time < 16398 drv_usectohz(tcp_ndd_get_info_interval * 1000)) { 16399 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16400 return (0); 16401 } 16402 } 16403 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16404 /* The following may work even if we cannot get a large buf. */ 16405 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16406 return (0); 16407 } 16408 16409 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16410 16411 zoneid = Q_TO_CONN(q)->conn_zoneid; 16412 16413 for (i = 0; i < A_CNT(tcp_acceptor_fanout); i++) { 16414 tf = &tcp_acceptor_fanout[i]; 16415 mutex_enter(&tf->tf_lock); 16416 for (tcp = tf->tf_tcp; tcp != NULL; 16417 tcp = tcp->tcp_acceptor_hash) { 16418 if (zoneid != GLOBAL_ZONEID && 16419 zoneid != tcp->tcp_connp->conn_zoneid) 16420 continue; 16421 tcp_report_item(mp->b_cont, tcp, i, 16422 Q_TO_TCP(q), cr); 16423 } 16424 mutex_exit(&tf->tf_lock); 16425 } 16426 tcp_last_ndd_get_info_time = ddi_get_lbolt(); 16427 return (0); 16428 } 16429 16430 /* 16431 * tcp_timer is the timer service routine. It handles the retransmission, 16432 * FIN_WAIT_2 flush, and zero window probe timeout events. It figures out 16433 * from the state of the tcp instance what kind of action needs to be done 16434 * at the time it is called. 16435 */ 16436 static void 16437 tcp_timer(void *arg) 16438 { 16439 mblk_t *mp; 16440 clock_t first_threshold; 16441 clock_t second_threshold; 16442 clock_t ms; 16443 uint32_t mss; 16444 conn_t *connp = (conn_t *)arg; 16445 tcp_t *tcp = connp->conn_tcp; 16446 16447 tcp->tcp_timer_tid = 0; 16448 16449 if (tcp->tcp_fused) 16450 return; 16451 16452 first_threshold = tcp->tcp_first_timer_threshold; 16453 second_threshold = tcp->tcp_second_timer_threshold; 16454 switch (tcp->tcp_state) { 16455 case TCPS_IDLE: 16456 case TCPS_BOUND: 16457 case TCPS_LISTEN: 16458 return; 16459 case TCPS_SYN_RCVD: { 16460 tcp_t *listener = tcp->tcp_listener; 16461 16462 if (tcp->tcp_syn_rcvd_timeout == 0 && (listener != NULL)) { 16463 ASSERT(tcp->tcp_rq == listener->tcp_rq); 16464 /* it's our first timeout */ 16465 tcp->tcp_syn_rcvd_timeout = 1; 16466 mutex_enter(&listener->tcp_eager_lock); 16467 listener->tcp_syn_rcvd_timeout++; 16468 if (!listener->tcp_syn_defense && 16469 (listener->tcp_syn_rcvd_timeout > 16470 (tcp_conn_req_max_q0 >> 2)) && 16471 (tcp_conn_req_max_q0 > 200)) { 16472 /* We may be under attack. Put on a defense. */ 16473 listener->tcp_syn_defense = B_TRUE; 16474 cmn_err(CE_WARN, "High TCP connect timeout " 16475 "rate! System (port %d) may be under a " 16476 "SYN flood attack!", 16477 BE16_TO_U16(listener->tcp_tcph->th_lport)); 16478 16479 listener->tcp_ip_addr_cache = kmem_zalloc( 16480 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t), 16481 KM_NOSLEEP); 16482 } 16483 mutex_exit(&listener->tcp_eager_lock); 16484 } 16485 } 16486 /* FALLTHRU */ 16487 case TCPS_SYN_SENT: 16488 first_threshold = tcp->tcp_first_ctimer_threshold; 16489 second_threshold = tcp->tcp_second_ctimer_threshold; 16490 break; 16491 case TCPS_ESTABLISHED: 16492 case TCPS_FIN_WAIT_1: 16493 case TCPS_CLOSING: 16494 case TCPS_CLOSE_WAIT: 16495 case TCPS_LAST_ACK: 16496 /* If we have data to rexmit */ 16497 if (tcp->tcp_suna != tcp->tcp_snxt) { 16498 clock_t time_to_wait; 16499 16500 BUMP_MIB(&tcp_mib, tcpTimRetrans); 16501 if (!tcp->tcp_xmit_head) 16502 break; 16503 time_to_wait = lbolt - 16504 (clock_t)tcp->tcp_xmit_head->b_prev; 16505 time_to_wait = tcp->tcp_rto - 16506 TICK_TO_MSEC(time_to_wait); 16507 /* 16508 * If the timer fires too early, 1 clock tick earlier, 16509 * restart the timer. 16510 */ 16511 if (time_to_wait > msec_per_tick) { 16512 TCP_STAT(tcp_timer_fire_early); 16513 TCP_TIMER_RESTART(tcp, time_to_wait); 16514 return; 16515 } 16516 /* 16517 * When we probe zero windows, we force the swnd open. 16518 * If our peer acks with a closed window swnd will be 16519 * set to zero by tcp_rput(). As long as we are 16520 * receiving acks tcp_rput will 16521 * reset 'tcp_ms_we_have_waited' so as not to trip the 16522 * first and second interval actions. NOTE: the timer 16523 * interval is allowed to continue its exponential 16524 * backoff. 16525 */ 16526 if (tcp->tcp_swnd == 0 || tcp->tcp_zero_win_probe) { 16527 if (tcp->tcp_debug) { 16528 (void) strlog(TCP_MOD_ID, 0, 1, 16529 SL_TRACE, "tcp_timer: zero win"); 16530 } 16531 } else { 16532 /* 16533 * After retransmission, we need to do 16534 * slow start. Set the ssthresh to one 16535 * half of current effective window and 16536 * cwnd to one MSS. Also reset 16537 * tcp_cwnd_cnt. 16538 * 16539 * Note that if tcp_ssthresh is reduced because 16540 * of ECN, do not reduce it again unless it is 16541 * already one window of data away (tcp_cwr 16542 * should then be cleared) or this is a 16543 * timeout for a retransmitted segment. 16544 */ 16545 uint32_t npkt; 16546 16547 if (!tcp->tcp_cwr || tcp->tcp_rexmit) { 16548 npkt = ((tcp->tcp_timer_backoff ? 16549 tcp->tcp_cwnd_ssthresh : 16550 tcp->tcp_snxt - 16551 tcp->tcp_suna) >> 1) / tcp->tcp_mss; 16552 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 16553 tcp->tcp_mss; 16554 } 16555 tcp->tcp_cwnd = tcp->tcp_mss; 16556 tcp->tcp_cwnd_cnt = 0; 16557 if (tcp->tcp_ecn_ok) { 16558 tcp->tcp_cwr = B_TRUE; 16559 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 16560 tcp->tcp_ecn_cwr_sent = B_FALSE; 16561 } 16562 } 16563 break; 16564 } 16565 /* 16566 * We have something to send yet we cannot send. The 16567 * reason can be: 16568 * 16569 * 1. Zero send window: we need to do zero window probe. 16570 * 2. Zero cwnd: because of ECN, we need to "clock out 16571 * segments. 16572 * 3. SWS avoidance: receiver may have shrunk window, 16573 * reset our knowledge. 16574 * 16575 * Note that condition 2 can happen with either 1 or 16576 * 3. But 1 and 3 are exclusive. 16577 */ 16578 if (tcp->tcp_unsent != 0) { 16579 if (tcp->tcp_cwnd == 0) { 16580 /* 16581 * Set tcp_cwnd to 1 MSS so that a 16582 * new segment can be sent out. We 16583 * are "clocking out" new data when 16584 * the network is really congested. 16585 */ 16586 ASSERT(tcp->tcp_ecn_ok); 16587 tcp->tcp_cwnd = tcp->tcp_mss; 16588 } 16589 if (tcp->tcp_swnd == 0) { 16590 /* Extend window for zero window probe */ 16591 tcp->tcp_swnd++; 16592 tcp->tcp_zero_win_probe = B_TRUE; 16593 BUMP_MIB(&tcp_mib, tcpOutWinProbe); 16594 } else { 16595 /* 16596 * Handle timeout from sender SWS avoidance. 16597 * Reset our knowledge of the max send window 16598 * since the receiver might have reduced its 16599 * receive buffer. Avoid setting tcp_max_swnd 16600 * to one since that will essentially disable 16601 * the SWS checks. 16602 * 16603 * Note that since we don't have a SWS 16604 * state variable, if the timeout is set 16605 * for ECN but not for SWS, this 16606 * code will also be executed. This is 16607 * fine as tcp_max_swnd is updated 16608 * constantly and it will not affect 16609 * anything. 16610 */ 16611 tcp->tcp_max_swnd = MAX(tcp->tcp_swnd, 2); 16612 } 16613 tcp_wput_data(tcp, NULL, B_FALSE); 16614 return; 16615 } 16616 /* Is there a FIN that needs to be to re retransmitted? */ 16617 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 16618 !tcp->tcp_fin_acked) 16619 break; 16620 /* Nothing to do, return without restarting timer. */ 16621 TCP_STAT(tcp_timer_fire_miss); 16622 return; 16623 case TCPS_FIN_WAIT_2: 16624 /* 16625 * User closed the TCP endpoint and peer ACK'ed our FIN. 16626 * We waited some time for for peer's FIN, but it hasn't 16627 * arrived. We flush the connection now to avoid 16628 * case where the peer has rebooted. 16629 */ 16630 if (TCP_IS_DETACHED(tcp)) { 16631 (void) tcp_clean_death(tcp, 0, 23); 16632 } else { 16633 TCP_TIMER_RESTART(tcp, tcp_fin_wait_2_flush_interval); 16634 } 16635 return; 16636 case TCPS_TIME_WAIT: 16637 (void) tcp_clean_death(tcp, 0, 24); 16638 return; 16639 default: 16640 if (tcp->tcp_debug) { 16641 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 16642 "tcp_timer: strange state (%d) %s", 16643 tcp->tcp_state, tcp_display(tcp, NULL, 16644 DISP_PORT_ONLY)); 16645 } 16646 return; 16647 } 16648 if ((ms = tcp->tcp_ms_we_have_waited) > second_threshold) { 16649 /* 16650 * For zero window probe, we need to send indefinitely, 16651 * unless we have not heard from the other side for some 16652 * time... 16653 */ 16654 if ((tcp->tcp_zero_win_probe == 0) || 16655 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) > 16656 second_threshold)) { 16657 BUMP_MIB(&tcp_mib, tcpTimRetransDrop); 16658 /* 16659 * If TCP is in SYN_RCVD state, send back a 16660 * RST|ACK as BSD does. Note that tcp_zero_win_probe 16661 * should be zero in TCPS_SYN_RCVD state. 16662 */ 16663 if (tcp->tcp_state == TCPS_SYN_RCVD) { 16664 tcp_xmit_ctl("tcp_timer: RST sent on timeout " 16665 "in SYN_RCVD", 16666 tcp, tcp->tcp_snxt, 16667 tcp->tcp_rnxt, TH_RST | TH_ACK); 16668 } 16669 (void) tcp_clean_death(tcp, 16670 tcp->tcp_client_errno ? 16671 tcp->tcp_client_errno : ETIMEDOUT, 25); 16672 return; 16673 } else { 16674 /* 16675 * Set tcp_ms_we_have_waited to second_threshold 16676 * so that in next timeout, we will do the above 16677 * check (lbolt - tcp_last_recv_time). This is 16678 * also to avoid overflow. 16679 * 16680 * We don't need to decrement tcp_timer_backoff 16681 * to avoid overflow because it will be decremented 16682 * later if new timeout value is greater than 16683 * tcp_rexmit_interval_max. In the case when 16684 * tcp_rexmit_interval_max is greater than 16685 * second_threshold, it means that we will wait 16686 * longer than second_threshold to send the next 16687 * window probe. 16688 */ 16689 tcp->tcp_ms_we_have_waited = second_threshold; 16690 } 16691 } else if (ms > first_threshold) { 16692 if (tcp->tcp_snd_zcopy_aware && (!tcp->tcp_xmit_zc_clean) && 16693 tcp->tcp_xmit_head != NULL) { 16694 tcp->tcp_xmit_head = 16695 tcp_zcopy_backoff(tcp, tcp->tcp_xmit_head, 1); 16696 } 16697 /* 16698 * We have been retransmitting for too long... The RTT 16699 * we calculated is probably incorrect. Reinitialize it. 16700 * Need to compensate for 0 tcp_rtt_sa. Reset 16701 * tcp_rtt_update so that we won't accidentally cache a 16702 * bad value. But only do this if this is not a zero 16703 * window probe. 16704 */ 16705 if (tcp->tcp_rtt_sa != 0 && tcp->tcp_zero_win_probe == 0) { 16706 tcp->tcp_rtt_sd += (tcp->tcp_rtt_sa >> 3) + 16707 (tcp->tcp_rtt_sa >> 5); 16708 tcp->tcp_rtt_sa = 0; 16709 tcp_ip_notify(tcp); 16710 tcp->tcp_rtt_update = 0; 16711 } 16712 } 16713 tcp->tcp_timer_backoff++; 16714 if ((ms = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 16715 tcp_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5)) < 16716 tcp_rexmit_interval_min) { 16717 /* 16718 * This means the original RTO is tcp_rexmit_interval_min. 16719 * So we will use tcp_rexmit_interval_min as the RTO value 16720 * and do the backoff. 16721 */ 16722 ms = tcp_rexmit_interval_min << tcp->tcp_timer_backoff; 16723 } else { 16724 ms <<= tcp->tcp_timer_backoff; 16725 } 16726 if (ms > tcp_rexmit_interval_max) { 16727 ms = tcp_rexmit_interval_max; 16728 /* 16729 * ms is at max, decrement tcp_timer_backoff to avoid 16730 * overflow. 16731 */ 16732 tcp->tcp_timer_backoff--; 16733 } 16734 tcp->tcp_ms_we_have_waited += ms; 16735 if (tcp->tcp_zero_win_probe == 0) { 16736 tcp->tcp_rto = ms; 16737 } 16738 TCP_TIMER_RESTART(tcp, ms); 16739 /* 16740 * This is after a timeout and tcp_rto is backed off. Set 16741 * tcp_set_timer to 1 so that next time RTO is updated, we will 16742 * restart the timer with a correct value. 16743 */ 16744 tcp->tcp_set_timer = 1; 16745 mss = tcp->tcp_snxt - tcp->tcp_suna; 16746 if (mss > tcp->tcp_mss) 16747 mss = tcp->tcp_mss; 16748 if (mss > tcp->tcp_swnd && tcp->tcp_swnd != 0) 16749 mss = tcp->tcp_swnd; 16750 16751 if ((mp = tcp->tcp_xmit_head) != NULL) 16752 mp->b_prev = (mblk_t *)lbolt; 16753 mp = tcp_xmit_mp(tcp, mp, mss, NULL, NULL, tcp->tcp_suna, B_TRUE, &mss, 16754 B_TRUE); 16755 16756 /* 16757 * When slow start after retransmission begins, start with 16758 * this seq no. tcp_rexmit_max marks the end of special slow 16759 * start phase. tcp_snd_burst controls how many segments 16760 * can be sent because of an ack. 16761 */ 16762 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 16763 tcp->tcp_snd_burst = TCP_CWND_SS; 16764 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 16765 (tcp->tcp_unsent == 0)) { 16766 tcp->tcp_rexmit_max = tcp->tcp_fss; 16767 } else { 16768 tcp->tcp_rexmit_max = tcp->tcp_snxt; 16769 } 16770 tcp->tcp_rexmit = B_TRUE; 16771 tcp->tcp_dupack_cnt = 0; 16772 16773 /* 16774 * Remove all rexmit SACK blk to start from fresh. 16775 */ 16776 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 16777 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 16778 tcp->tcp_num_notsack_blk = 0; 16779 tcp->tcp_cnt_notsack_list = 0; 16780 } 16781 if (mp == NULL) { 16782 return; 16783 } 16784 /* Attach credentials to retransmitted initial SYNs. */ 16785 if (tcp->tcp_state == TCPS_SYN_SENT) { 16786 mblk_setcred(mp, tcp->tcp_cred); 16787 DB_CPID(mp) = tcp->tcp_cpid; 16788 } 16789 16790 tcp->tcp_csuna = tcp->tcp_snxt; 16791 BUMP_MIB(&tcp_mib, tcpRetransSegs); 16792 UPDATE_MIB(&tcp_mib, tcpRetransBytes, mss); 16793 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 16794 tcp_send_data(tcp, tcp->tcp_wq, mp); 16795 16796 } 16797 16798 /* tcp_unbind is called by tcp_wput_proto to handle T_UNBIND_REQ messages. */ 16799 static void 16800 tcp_unbind(tcp_t *tcp, mblk_t *mp) 16801 { 16802 conn_t *connp; 16803 16804 switch (tcp->tcp_state) { 16805 case TCPS_BOUND: 16806 case TCPS_LISTEN: 16807 break; 16808 default: 16809 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 16810 return; 16811 } 16812 16813 /* 16814 * Need to clean up all the eagers since after the unbind, segments 16815 * will no longer be delivered to this listener stream. 16816 */ 16817 mutex_enter(&tcp->tcp_eager_lock); 16818 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) { 16819 tcp_eager_cleanup(tcp, 0); 16820 } 16821 mutex_exit(&tcp->tcp_eager_lock); 16822 16823 if (tcp->tcp_ipversion == IPV4_VERSION) { 16824 tcp->tcp_ipha->ipha_src = 0; 16825 } else { 16826 V6_SET_ZERO(tcp->tcp_ip6h->ip6_src); 16827 } 16828 V6_SET_ZERO(tcp->tcp_ip_src_v6); 16829 bzero(tcp->tcp_tcph->th_lport, sizeof (tcp->tcp_tcph->th_lport)); 16830 tcp_bind_hash_remove(tcp); 16831 tcp->tcp_state = TCPS_IDLE; 16832 tcp->tcp_mdt = B_FALSE; 16833 /* Send M_FLUSH according to TPI */ 16834 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 16835 connp = tcp->tcp_connp; 16836 connp->conn_mdt_ok = B_FALSE; 16837 ipcl_hash_remove(connp); 16838 bzero(&connp->conn_ports, sizeof (connp->conn_ports)); 16839 mp = mi_tpi_ok_ack_alloc(mp); 16840 putnext(tcp->tcp_rq, mp); 16841 } 16842 16843 /* 16844 * Don't let port fall into the privileged range. 16845 * Since the extra privileged ports can be arbitrary we also 16846 * ensure that we exclude those from consideration. 16847 * tcp_g_epriv_ports is not sorted thus we loop over it until 16848 * there are no changes. 16849 * 16850 * Note: No locks are held when inspecting tcp_g_*epriv_ports 16851 * but instead the code relies on: 16852 * - the fact that the address of the array and its size never changes 16853 * - the atomic assignment of the elements of the array 16854 * 16855 * Returns 0 if there are no more ports available. 16856 * 16857 * TS note: skip multilevel ports. 16858 */ 16859 static in_port_t 16860 tcp_update_next_port(in_port_t port, const tcp_t *tcp, boolean_t random) 16861 { 16862 int i; 16863 boolean_t restart = B_FALSE; 16864 16865 if (random && tcp_random_anon_port != 0) { 16866 (void) random_get_pseudo_bytes((uint8_t *)&port, 16867 sizeof (in_port_t)); 16868 /* 16869 * Unless changed by a sys admin, the smallest anon port 16870 * is 32768 and the largest anon port is 65535. It is 16871 * very likely (50%) for the random port to be smaller 16872 * than the smallest anon port. When that happens, 16873 * add port % (anon port range) to the smallest anon 16874 * port to get the random port. It should fall into the 16875 * valid anon port range. 16876 */ 16877 if (port < tcp_smallest_anon_port) { 16878 port = tcp_smallest_anon_port + 16879 port % (tcp_largest_anon_port - 16880 tcp_smallest_anon_port); 16881 } 16882 } 16883 16884 retry: 16885 if (port < tcp_smallest_anon_port) 16886 port = (in_port_t)tcp_smallest_anon_port; 16887 16888 if (port > tcp_largest_anon_port) { 16889 if (restart) 16890 return (0); 16891 restart = B_TRUE; 16892 port = (in_port_t)tcp_smallest_anon_port; 16893 } 16894 16895 if (port < tcp_smallest_nonpriv_port) 16896 port = (in_port_t)tcp_smallest_nonpriv_port; 16897 16898 for (i = 0; i < tcp_g_num_epriv_ports; i++) { 16899 if (port == tcp_g_epriv_ports[i]) { 16900 port++; 16901 /* 16902 * Make sure whether the port is in the 16903 * valid range. 16904 */ 16905 goto retry; 16906 } 16907 } 16908 if (is_system_labeled() && 16909 (i = tsol_next_port(crgetzone(tcp->tcp_cred), port, 16910 IPPROTO_TCP, B_TRUE)) != 0) { 16911 port = i; 16912 goto retry; 16913 } 16914 return (port); 16915 } 16916 16917 /* 16918 * Return the next anonymous port in the privileged port range for 16919 * bind checking. It starts at IPPORT_RESERVED - 1 and goes 16920 * downwards. This is the same behavior as documented in the userland 16921 * library call rresvport(3N). 16922 * 16923 * TS note: skip multilevel ports. 16924 */ 16925 static in_port_t 16926 tcp_get_next_priv_port(const tcp_t *tcp) 16927 { 16928 static in_port_t next_priv_port = IPPORT_RESERVED - 1; 16929 in_port_t nextport; 16930 boolean_t restart = B_FALSE; 16931 16932 retry: 16933 if (next_priv_port < tcp_min_anonpriv_port || 16934 next_priv_port >= IPPORT_RESERVED) { 16935 next_priv_port = IPPORT_RESERVED - 1; 16936 if (restart) 16937 return (0); 16938 restart = B_TRUE; 16939 } 16940 if (is_system_labeled() && 16941 (nextport = tsol_next_port(crgetzone(tcp->tcp_cred), 16942 next_priv_port, IPPROTO_TCP, B_FALSE)) != 0) { 16943 next_priv_port = nextport; 16944 goto retry; 16945 } 16946 return (next_priv_port--); 16947 } 16948 16949 /* The write side r/w procedure. */ 16950 16951 #if CCS_STATS 16952 struct { 16953 struct { 16954 int64_t count, bytes; 16955 } tot, hit; 16956 } wrw_stats; 16957 #endif 16958 16959 /* 16960 * Call by tcp_wput() to handle all non data, except M_PROTO and M_PCPROTO, 16961 * messages. 16962 */ 16963 /* ARGSUSED */ 16964 static void 16965 tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2) 16966 { 16967 conn_t *connp = (conn_t *)arg; 16968 tcp_t *tcp = connp->conn_tcp; 16969 queue_t *q = tcp->tcp_wq; 16970 16971 ASSERT(DB_TYPE(mp) != M_IOCTL); 16972 /* 16973 * TCP is D_MP and qprocsoff() is done towards the end of the tcp_close. 16974 * Once the close starts, streamhead and sockfs will not let any data 16975 * packets come down (close ensures that there are no threads using the 16976 * queue and no new threads will come down) but since qprocsoff() 16977 * hasn't happened yet, a M_FLUSH or some non data message might 16978 * get reflected back (in response to our own FLUSHRW) and get 16979 * processed after tcp_close() is done. The conn would still be valid 16980 * because a ref would have added but we need to check the state 16981 * before actually processing the packet. 16982 */ 16983 if (TCP_IS_DETACHED(tcp) || (tcp->tcp_state == TCPS_CLOSED)) { 16984 freemsg(mp); 16985 return; 16986 } 16987 16988 switch (DB_TYPE(mp)) { 16989 case M_IOCDATA: 16990 tcp_wput_iocdata(tcp, mp); 16991 break; 16992 case M_FLUSH: 16993 tcp_wput_flush(tcp, mp); 16994 break; 16995 default: 16996 CALL_IP_WPUT(connp, q, mp); 16997 break; 16998 } 16999 } 17000 17001 /* 17002 * The TCP fast path write put procedure. 17003 * NOTE: the logic of the fast path is duplicated from tcp_wput_data() 17004 */ 17005 /* ARGSUSED */ 17006 void 17007 tcp_output(void *arg, mblk_t *mp, void *arg2) 17008 { 17009 int len; 17010 int hdrlen; 17011 int plen; 17012 mblk_t *mp1; 17013 uchar_t *rptr; 17014 uint32_t snxt; 17015 tcph_t *tcph; 17016 struct datab *db; 17017 uint32_t suna; 17018 uint32_t mss; 17019 ipaddr_t *dst; 17020 ipaddr_t *src; 17021 uint32_t sum; 17022 int usable; 17023 conn_t *connp = (conn_t *)arg; 17024 tcp_t *tcp = connp->conn_tcp; 17025 uint32_t msize; 17026 17027 /* 17028 * Try and ASSERT the minimum possible references on the 17029 * conn early enough. Since we are executing on write side, 17030 * the connection is obviously not detached and that means 17031 * there is a ref each for TCP and IP. Since we are behind 17032 * the squeue, the minimum references needed are 3. If the 17033 * conn is in classifier hash list, there should be an 17034 * extra ref for that (we check both the possibilities). 17035 */ 17036 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 17037 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 17038 17039 ASSERT(DB_TYPE(mp) == M_DATA); 17040 msize = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp); 17041 17042 mutex_enter(&connp->conn_lock); 17043 tcp->tcp_squeue_bytes -= msize; 17044 mutex_exit(&connp->conn_lock); 17045 17046 /* Bypass tcp protocol for fused tcp loopback */ 17047 if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize)) 17048 return; 17049 17050 mss = tcp->tcp_mss; 17051 if (tcp->tcp_xmit_zc_clean) 17052 mp = tcp_zcopy_backoff(tcp, mp, 0); 17053 17054 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 17055 len = (int)(mp->b_wptr - mp->b_rptr); 17056 17057 /* 17058 * Criteria for fast path: 17059 * 17060 * 1. no unsent data 17061 * 2. single mblk in request 17062 * 3. connection established 17063 * 4. data in mblk 17064 * 5. len <= mss 17065 * 6. no tcp_valid bits 17066 */ 17067 if ((tcp->tcp_unsent != 0) || 17068 (tcp->tcp_cork) || 17069 (mp->b_cont != NULL) || 17070 (tcp->tcp_state != TCPS_ESTABLISHED) || 17071 (len == 0) || 17072 (len > mss) || 17073 (tcp->tcp_valid_bits != 0)) { 17074 tcp_wput_data(tcp, mp, B_FALSE); 17075 return; 17076 } 17077 17078 ASSERT(tcp->tcp_xmit_tail_unsent == 0); 17079 ASSERT(tcp->tcp_fin_sent == 0); 17080 17081 /* queue new packet onto retransmission queue */ 17082 if (tcp->tcp_xmit_head == NULL) { 17083 tcp->tcp_xmit_head = mp; 17084 } else { 17085 tcp->tcp_xmit_last->b_cont = mp; 17086 } 17087 tcp->tcp_xmit_last = mp; 17088 tcp->tcp_xmit_tail = mp; 17089 17090 /* find out how much we can send */ 17091 /* BEGIN CSTYLED */ 17092 /* 17093 * un-acked usable 17094 * |--------------|-----------------| 17095 * tcp_suna tcp_snxt tcp_suna+tcp_swnd 17096 */ 17097 /* END CSTYLED */ 17098 17099 /* start sending from tcp_snxt */ 17100 snxt = tcp->tcp_snxt; 17101 17102 /* 17103 * Check to see if this connection has been idled for some 17104 * time and no ACK is expected. If it is, we need to slow 17105 * start again to get back the connection's "self-clock" as 17106 * described in VJ's paper. 17107 * 17108 * Refer to the comment in tcp_mss_set() for the calculation 17109 * of tcp_cwnd after idle. 17110 */ 17111 if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet && 17112 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) { 17113 SET_TCP_INIT_CWND(tcp, mss, tcp_slow_start_after_idle); 17114 } 17115 17116 usable = tcp->tcp_swnd; /* tcp window size */ 17117 if (usable > tcp->tcp_cwnd) 17118 usable = tcp->tcp_cwnd; /* congestion window smaller */ 17119 usable -= snxt; /* subtract stuff already sent */ 17120 suna = tcp->tcp_suna; 17121 usable += suna; 17122 /* usable can be < 0 if the congestion window is smaller */ 17123 if (len > usable) { 17124 /* Can't send complete M_DATA in one shot */ 17125 goto slow; 17126 } 17127 17128 if (tcp->tcp_flow_stopped && 17129 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 17130 tcp_clrqfull(tcp); 17131 } 17132 17133 /* 17134 * determine if anything to send (Nagle). 17135 * 17136 * 1. len < tcp_mss (i.e. small) 17137 * 2. unacknowledged data present 17138 * 3. len < nagle limit 17139 * 4. last packet sent < nagle limit (previous packet sent) 17140 */ 17141 if ((len < mss) && (snxt != suna) && 17142 (len < (int)tcp->tcp_naglim) && 17143 (tcp->tcp_last_sent_len < tcp->tcp_naglim)) { 17144 /* 17145 * This was the first unsent packet and normally 17146 * mss < xmit_hiwater so there is no need to worry 17147 * about flow control. The next packet will go 17148 * through the flow control check in tcp_wput_data(). 17149 */ 17150 /* leftover work from above */ 17151 tcp->tcp_unsent = len; 17152 tcp->tcp_xmit_tail_unsent = len; 17153 17154 return; 17155 } 17156 17157 /* len <= tcp->tcp_mss && len == unsent so no silly window */ 17158 17159 if (snxt == suna) { 17160 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 17161 } 17162 17163 /* we have always sent something */ 17164 tcp->tcp_rack_cnt = 0; 17165 17166 tcp->tcp_snxt = snxt + len; 17167 tcp->tcp_rack = tcp->tcp_rnxt; 17168 17169 if ((mp1 = dupb(mp)) == 0) 17170 goto no_memory; 17171 mp->b_prev = (mblk_t *)(uintptr_t)lbolt; 17172 mp->b_next = (mblk_t *)(uintptr_t)snxt; 17173 17174 /* adjust tcp header information */ 17175 tcph = tcp->tcp_tcph; 17176 tcph->th_flags[0] = (TH_ACK|TH_PUSH); 17177 17178 sum = len + tcp->tcp_tcp_hdr_len + tcp->tcp_sum; 17179 sum = (sum >> 16) + (sum & 0xFFFF); 17180 U16_TO_ABE16(sum, tcph->th_sum); 17181 17182 U32_TO_ABE32(snxt, tcph->th_seq); 17183 17184 BUMP_MIB(&tcp_mib, tcpOutDataSegs); 17185 UPDATE_MIB(&tcp_mib, tcpOutDataBytes, len); 17186 BUMP_LOCAL(tcp->tcp_obsegs); 17187 17188 /* Update the latest receive window size in TCP header. */ 17189 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 17190 tcph->th_win); 17191 17192 tcp->tcp_last_sent_len = (ushort_t)len; 17193 17194 plen = len + tcp->tcp_hdr_len; 17195 17196 if (tcp->tcp_ipversion == IPV4_VERSION) { 17197 tcp->tcp_ipha->ipha_length = htons(plen); 17198 } else { 17199 tcp->tcp_ip6h->ip6_plen = htons(plen - 17200 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 17201 } 17202 17203 /* see if we need to allocate a mblk for the headers */ 17204 hdrlen = tcp->tcp_hdr_len; 17205 rptr = mp1->b_rptr - hdrlen; 17206 db = mp1->b_datap; 17207 if ((db->db_ref != 2) || rptr < db->db_base || 17208 (!OK_32PTR(rptr))) { 17209 /* NOTE: we assume allocb returns an OK_32PTR */ 17210 mp = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 17211 tcp_wroff_xtra, BPRI_MED); 17212 if (!mp) { 17213 freemsg(mp1); 17214 goto no_memory; 17215 } 17216 mp->b_cont = mp1; 17217 mp1 = mp; 17218 /* Leave room for Link Level header */ 17219 /* hdrlen = tcp->tcp_hdr_len; */ 17220 rptr = &mp1->b_rptr[tcp_wroff_xtra]; 17221 mp1->b_wptr = &rptr[hdrlen]; 17222 } 17223 mp1->b_rptr = rptr; 17224 17225 /* Fill in the timestamp option. */ 17226 if (tcp->tcp_snd_ts_ok) { 17227 U32_TO_BE32((uint32_t)lbolt, 17228 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 17229 U32_TO_BE32(tcp->tcp_ts_recent, 17230 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 17231 } else { 17232 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 17233 } 17234 17235 /* copy header into outgoing packet */ 17236 dst = (ipaddr_t *)rptr; 17237 src = (ipaddr_t *)tcp->tcp_iphc; 17238 dst[0] = src[0]; 17239 dst[1] = src[1]; 17240 dst[2] = src[2]; 17241 dst[3] = src[3]; 17242 dst[4] = src[4]; 17243 dst[5] = src[5]; 17244 dst[6] = src[6]; 17245 dst[7] = src[7]; 17246 dst[8] = src[8]; 17247 dst[9] = src[9]; 17248 if (hdrlen -= 40) { 17249 hdrlen >>= 2; 17250 dst += 10; 17251 src += 10; 17252 do { 17253 *dst++ = *src++; 17254 } while (--hdrlen); 17255 } 17256 17257 /* 17258 * Set the ECN info in the TCP header. Note that this 17259 * is not the template header. 17260 */ 17261 if (tcp->tcp_ecn_ok) { 17262 SET_ECT(tcp, rptr); 17263 17264 tcph = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 17265 if (tcp->tcp_ecn_echo_on) 17266 tcph->th_flags[0] |= TH_ECE; 17267 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 17268 tcph->th_flags[0] |= TH_CWR; 17269 tcp->tcp_ecn_cwr_sent = B_TRUE; 17270 } 17271 } 17272 17273 if (tcp->tcp_ip_forward_progress) { 17274 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 17275 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 17276 tcp->tcp_ip_forward_progress = B_FALSE; 17277 } 17278 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 17279 tcp_send_data(tcp, tcp->tcp_wq, mp1); 17280 return; 17281 17282 /* 17283 * If we ran out of memory, we pretend to have sent the packet 17284 * and that it was lost on the wire. 17285 */ 17286 no_memory: 17287 return; 17288 17289 slow: 17290 /* leftover work from above */ 17291 tcp->tcp_unsent = len; 17292 tcp->tcp_xmit_tail_unsent = len; 17293 tcp_wput_data(tcp, NULL, B_FALSE); 17294 } 17295 17296 /* 17297 * The function called through squeue to get behind eager's perimeter to 17298 * finish the accept processing. 17299 */ 17300 /* ARGSUSED */ 17301 void 17302 tcp_accept_finish(void *arg, mblk_t *mp, void *arg2) 17303 { 17304 conn_t *connp = (conn_t *)arg; 17305 tcp_t *tcp = connp->conn_tcp; 17306 queue_t *q = tcp->tcp_rq; 17307 mblk_t *mp1; 17308 mblk_t *stropt_mp = mp; 17309 struct stroptions *stropt; 17310 uint_t thwin; 17311 17312 /* 17313 * Drop the eager's ref on the listener, that was placed when 17314 * this eager began life in tcp_conn_request. 17315 */ 17316 CONN_DEC_REF(tcp->tcp_saved_listener->tcp_connp); 17317 17318 if (tcp->tcp_state <= TCPS_BOUND || tcp->tcp_accept_error) { 17319 /* 17320 * Someone blewoff the eager before we could finish 17321 * the accept. 17322 * 17323 * The only reason eager exists it because we put in 17324 * a ref on it when conn ind went up. We need to send 17325 * a disconnect indication up while the last reference 17326 * on the eager will be dropped by the squeue when we 17327 * return. 17328 */ 17329 ASSERT(tcp->tcp_listener == NULL); 17330 if (tcp->tcp_issocket || tcp->tcp_send_discon_ind) { 17331 struct T_discon_ind *tdi; 17332 17333 (void) putnextctl1(q, M_FLUSH, FLUSHRW); 17334 /* 17335 * Let us reuse the incoming mblk to avoid memory 17336 * allocation failure problems. We know that the 17337 * size of the incoming mblk i.e. stroptions is greater 17338 * than sizeof T_discon_ind. So the reallocb below 17339 * can't fail. 17340 */ 17341 freemsg(mp->b_cont); 17342 mp->b_cont = NULL; 17343 ASSERT(DB_REF(mp) == 1); 17344 mp = reallocb(mp, sizeof (struct T_discon_ind), 17345 B_FALSE); 17346 ASSERT(mp != NULL); 17347 DB_TYPE(mp) = M_PROTO; 17348 ((union T_primitives *)mp->b_rptr)->type = T_DISCON_IND; 17349 tdi = (struct T_discon_ind *)mp->b_rptr; 17350 if (tcp->tcp_issocket) { 17351 tdi->DISCON_reason = ECONNREFUSED; 17352 tdi->SEQ_number = 0; 17353 } else { 17354 tdi->DISCON_reason = ENOPROTOOPT; 17355 tdi->SEQ_number = 17356 tcp->tcp_conn_req_seqnum; 17357 } 17358 mp->b_wptr = mp->b_rptr + sizeof (struct T_discon_ind); 17359 putnext(q, mp); 17360 } else { 17361 freemsg(mp); 17362 } 17363 if (tcp->tcp_hard_binding) { 17364 tcp->tcp_hard_binding = B_FALSE; 17365 tcp->tcp_hard_bound = B_TRUE; 17366 } 17367 tcp->tcp_detached = B_FALSE; 17368 return; 17369 } 17370 17371 mp1 = stropt_mp->b_cont; 17372 stropt_mp->b_cont = NULL; 17373 ASSERT(DB_TYPE(stropt_mp) == M_SETOPTS); 17374 stropt = (struct stroptions *)stropt_mp->b_rptr; 17375 17376 while (mp1 != NULL) { 17377 mp = mp1; 17378 mp1 = mp1->b_cont; 17379 mp->b_cont = NULL; 17380 tcp->tcp_drop_opt_ack_cnt++; 17381 CALL_IP_WPUT(connp, tcp->tcp_wq, mp); 17382 } 17383 mp = NULL; 17384 17385 /* 17386 * For a loopback connection with tcp_direct_sockfs on, note that 17387 * we don't have to protect tcp_rcv_list yet because synchronous 17388 * streams has not yet been enabled and tcp_fuse_rrw() cannot 17389 * possibly race with us. 17390 */ 17391 17392 /* 17393 * Set the max window size (tcp_rq->q_hiwat) of the acceptor 17394 * properly. This is the first time we know of the acceptor' 17395 * queue. So we do it here. 17396 */ 17397 if (tcp->tcp_rcv_list == NULL) { 17398 /* 17399 * Recv queue is empty, tcp_rwnd should not have changed. 17400 * That means it should be equal to the listener's tcp_rwnd. 17401 */ 17402 tcp->tcp_rq->q_hiwat = tcp->tcp_rwnd; 17403 } else { 17404 #ifdef DEBUG 17405 uint_t cnt = 0; 17406 17407 mp1 = tcp->tcp_rcv_list; 17408 while ((mp = mp1) != NULL) { 17409 mp1 = mp->b_next; 17410 cnt += msgdsize(mp); 17411 } 17412 ASSERT(cnt != 0 && tcp->tcp_rcv_cnt == cnt); 17413 #endif 17414 /* There is some data, add them back to get the max. */ 17415 tcp->tcp_rq->q_hiwat = tcp->tcp_rwnd + tcp->tcp_rcv_cnt; 17416 } 17417 17418 stropt->so_flags = SO_HIWAT; 17419 stropt->so_hiwat = MAX(q->q_hiwat, tcp_sth_rcv_hiwat); 17420 17421 stropt->so_flags |= SO_MAXBLK; 17422 stropt->so_maxblk = tcp_maxpsz_set(tcp, B_FALSE); 17423 17424 /* 17425 * This is the first time we run on the correct 17426 * queue after tcp_accept. So fix all the q parameters 17427 * here. 17428 */ 17429 /* Allocate room for SACK options if needed. */ 17430 stropt->so_flags |= SO_WROFF; 17431 if (tcp->tcp_fused) { 17432 ASSERT(tcp->tcp_loopback); 17433 ASSERT(tcp->tcp_loopback_peer != NULL); 17434 /* 17435 * For fused tcp loopback, set the stream head's write 17436 * offset value to zero since we won't be needing any room 17437 * for TCP/IP headers. This would also improve performance 17438 * since it would reduce the amount of work done by kmem. 17439 * Non-fused tcp loopback case is handled separately below. 17440 */ 17441 stropt->so_wroff = 0; 17442 /* 17443 * Record the stream head's high water mark for this endpoint; 17444 * this is used for flow-control purposes in tcp_fuse_output(). 17445 */ 17446 stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(tcp, q->q_hiwat); 17447 /* 17448 * Update the peer's transmit parameters according to 17449 * our recently calculated high water mark value. 17450 */ 17451 (void) tcp_maxpsz_set(tcp->tcp_loopback_peer, B_TRUE); 17452 } else if (tcp->tcp_snd_sack_ok) { 17453 stropt->so_wroff = tcp->tcp_hdr_len + TCPOPT_MAX_SACK_LEN + 17454 (tcp->tcp_loopback ? 0 : tcp_wroff_xtra); 17455 } else { 17456 stropt->so_wroff = tcp->tcp_hdr_len + (tcp->tcp_loopback ? 0 : 17457 tcp_wroff_xtra); 17458 } 17459 17460 /* 17461 * If this is endpoint is handling SSL, then reserve extra 17462 * offset and space at the end. 17463 * Also have the stream head allocate SSL3_MAX_RECORD_LEN packets, 17464 * overriding the previous setting. The extra cost of signing and 17465 * encrypting multiple MSS-size records (12 of them with Ethernet), 17466 * instead of a single contiguous one by the stream head 17467 * largely outweighs the statistical reduction of ACKs, when 17468 * applicable. The peer will also save on decyption and verification 17469 * costs. 17470 */ 17471 if (tcp->tcp_kssl_ctx != NULL) { 17472 stropt->so_wroff += SSL3_WROFFSET; 17473 17474 stropt->so_flags |= SO_TAIL; 17475 stropt->so_tail = SSL3_MAX_TAIL_LEN; 17476 17477 stropt->so_maxblk = SSL3_MAX_RECORD_LEN; 17478 } 17479 17480 /* Send the options up */ 17481 putnext(q, stropt_mp); 17482 17483 /* 17484 * Pass up any data and/or a fin that has been received. 17485 * 17486 * Adjust receive window in case it had decreased 17487 * (because there is data <=> tcp_rcv_list != NULL) 17488 * while the connection was detached. Note that 17489 * in case the eager was flow-controlled, w/o this 17490 * code, the rwnd may never open up again! 17491 */ 17492 if (tcp->tcp_rcv_list != NULL) { 17493 /* We drain directly in case of fused tcp loopback */ 17494 if (!tcp->tcp_fused && canputnext(q)) { 17495 tcp->tcp_rwnd = q->q_hiwat; 17496 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 17497 << tcp->tcp_rcv_ws; 17498 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 17499 if (tcp->tcp_state >= TCPS_ESTABLISHED && 17500 (q->q_hiwat - thwin >= tcp->tcp_mss)) { 17501 tcp_xmit_ctl(NULL, 17502 tcp, (tcp->tcp_swnd == 0) ? 17503 tcp->tcp_suna : tcp->tcp_snxt, 17504 tcp->tcp_rnxt, TH_ACK); 17505 BUMP_MIB(&tcp_mib, tcpOutWinUpdate); 17506 } 17507 17508 } 17509 (void) tcp_rcv_drain(q, tcp); 17510 17511 /* 17512 * For fused tcp loopback, back-enable peer endpoint 17513 * if it's currently flow-controlled. 17514 */ 17515 if (tcp->tcp_fused && 17516 tcp->tcp_loopback_peer->tcp_flow_stopped) { 17517 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 17518 17519 ASSERT(peer_tcp != NULL); 17520 ASSERT(peer_tcp->tcp_fused); 17521 17522 tcp_clrqfull(peer_tcp); 17523 TCP_STAT(tcp_fusion_backenabled); 17524 } 17525 } 17526 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 17527 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 17528 mp = mi_tpi_ordrel_ind(); 17529 if (mp) { 17530 tcp->tcp_ordrel_done = B_TRUE; 17531 putnext(q, mp); 17532 if (tcp->tcp_deferred_clean_death) { 17533 /* 17534 * tcp_clean_death was deferred 17535 * for T_ORDREL_IND - do it now 17536 */ 17537 (void) tcp_clean_death(tcp, 17538 tcp->tcp_client_errno, 21); 17539 tcp->tcp_deferred_clean_death = B_FALSE; 17540 } 17541 } else { 17542 /* 17543 * Run the orderly release in the 17544 * service routine. 17545 */ 17546 qenable(q); 17547 } 17548 } 17549 if (tcp->tcp_hard_binding) { 17550 tcp->tcp_hard_binding = B_FALSE; 17551 tcp->tcp_hard_bound = B_TRUE; 17552 } 17553 17554 tcp->tcp_detached = B_FALSE; 17555 17556 /* We can enable synchronous streams now */ 17557 if (tcp->tcp_fused) { 17558 tcp_fuse_syncstr_enable_pair(tcp); 17559 } 17560 17561 if (tcp->tcp_ka_enabled) { 17562 tcp->tcp_ka_last_intrvl = 0; 17563 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 17564 MSEC_TO_TICK(tcp->tcp_ka_interval)); 17565 } 17566 17567 /* 17568 * At this point, eager is fully established and will 17569 * have the following references - 17570 * 17571 * 2 references for connection to exist (1 for TCP and 1 for IP). 17572 * 1 reference for the squeue which will be dropped by the squeue as 17573 * soon as this function returns. 17574 * There will be 1 additonal reference for being in classifier 17575 * hash list provided something bad hasn't happened. 17576 */ 17577 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 17578 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 17579 } 17580 17581 /* 17582 * The function called through squeue to get behind listener's perimeter to 17583 * send a deffered conn_ind. 17584 */ 17585 /* ARGSUSED */ 17586 void 17587 tcp_send_pending(void *arg, mblk_t *mp, void *arg2) 17588 { 17589 conn_t *connp = (conn_t *)arg; 17590 tcp_t *listener = connp->conn_tcp; 17591 17592 if (listener->tcp_state == TCPS_CLOSED || 17593 TCP_IS_DETACHED(listener)) { 17594 /* 17595 * If listener has closed, it would have caused a 17596 * a cleanup/blowoff to happen for the eager. 17597 */ 17598 tcp_t *tcp; 17599 struct T_conn_ind *conn_ind; 17600 17601 conn_ind = (struct T_conn_ind *)mp->b_rptr; 17602 bcopy(mp->b_rptr + conn_ind->OPT_offset, &tcp, 17603 conn_ind->OPT_length); 17604 /* 17605 * We need to drop the ref on eager that was put 17606 * tcp_rput_data() before trying to send the conn_ind 17607 * to listener. The conn_ind was deferred in tcp_send_conn_ind 17608 * and tcp_wput_accept() is sending this deferred conn_ind but 17609 * listener is closed so we drop the ref. 17610 */ 17611 CONN_DEC_REF(tcp->tcp_connp); 17612 freemsg(mp); 17613 return; 17614 } 17615 putnext(listener->tcp_rq, mp); 17616 } 17617 17618 17619 /* 17620 * This is the STREAMS entry point for T_CONN_RES coming down on 17621 * Acceptor STREAM when sockfs listener does accept processing. 17622 * Read the block comment on top pf tcp_conn_request(). 17623 */ 17624 void 17625 tcp_wput_accept(queue_t *q, mblk_t *mp) 17626 { 17627 queue_t *rq = RD(q); 17628 struct T_conn_res *conn_res; 17629 tcp_t *eager; 17630 tcp_t *listener; 17631 struct T_ok_ack *ok; 17632 t_scalar_t PRIM_type; 17633 mblk_t *opt_mp; 17634 conn_t *econnp; 17635 17636 ASSERT(DB_TYPE(mp) == M_PROTO); 17637 17638 conn_res = (struct T_conn_res *)mp->b_rptr; 17639 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 17640 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_conn_res)) { 17641 mp = mi_tpi_err_ack_alloc(mp, TPROTO, 0); 17642 if (mp != NULL) 17643 putnext(rq, mp); 17644 return; 17645 } 17646 switch (conn_res->PRIM_type) { 17647 case O_T_CONN_RES: 17648 case T_CONN_RES: 17649 /* 17650 * We pass up an err ack if allocb fails. This will 17651 * cause sockfs to issue a T_DISCON_REQ which will cause 17652 * tcp_eager_blowoff to be called. sockfs will then call 17653 * rq->q_qinfo->qi_qclose to cleanup the acceptor stream. 17654 * we need to do the allocb up here because we have to 17655 * make sure rq->q_qinfo->qi_qclose still points to the 17656 * correct function (tcpclose_accept) in case allocb 17657 * fails. 17658 */ 17659 opt_mp = allocb(sizeof (struct stroptions), BPRI_HI); 17660 if (opt_mp == NULL) { 17661 mp = mi_tpi_err_ack_alloc(mp, TPROTO, 0); 17662 if (mp != NULL) 17663 putnext(rq, mp); 17664 return; 17665 } 17666 17667 bcopy(mp->b_rptr + conn_res->OPT_offset, 17668 &eager, conn_res->OPT_length); 17669 PRIM_type = conn_res->PRIM_type; 17670 mp->b_datap->db_type = M_PCPROTO; 17671 mp->b_wptr = mp->b_rptr + sizeof (struct T_ok_ack); 17672 ok = (struct T_ok_ack *)mp->b_rptr; 17673 ok->PRIM_type = T_OK_ACK; 17674 ok->CORRECT_prim = PRIM_type; 17675 econnp = eager->tcp_connp; 17676 econnp->conn_dev = (dev_t)q->q_ptr; 17677 eager->tcp_rq = rq; 17678 eager->tcp_wq = q; 17679 rq->q_ptr = econnp; 17680 rq->q_qinfo = &tcp_rinit; 17681 q->q_ptr = econnp; 17682 q->q_qinfo = &tcp_winit; 17683 listener = eager->tcp_listener; 17684 eager->tcp_issocket = B_TRUE; 17685 econnp->conn_zoneid = listener->tcp_connp->conn_zoneid; 17686 17687 /* Put the ref for IP */ 17688 CONN_INC_REF(econnp); 17689 17690 /* 17691 * We should have minimum of 3 references on the conn 17692 * at this point. One each for TCP and IP and one for 17693 * the T_conn_ind that was sent up when the 3-way handshake 17694 * completed. In the normal case we would also have another 17695 * reference (making a total of 4) for the conn being in the 17696 * classifier hash list. However the eager could have received 17697 * an RST subsequently and tcp_closei_local could have removed 17698 * the eager from the classifier hash list, hence we can't 17699 * assert that reference. 17700 */ 17701 ASSERT(econnp->conn_ref >= 3); 17702 17703 /* 17704 * Send the new local address also up to sockfs. There 17705 * should already be enough space in the mp that came 17706 * down from soaccept(). 17707 */ 17708 if (eager->tcp_family == AF_INET) { 17709 sin_t *sin; 17710 17711 ASSERT((mp->b_datap->db_lim - mp->b_datap->db_base) >= 17712 (sizeof (struct T_ok_ack) + sizeof (sin_t))); 17713 sin = (sin_t *)mp->b_wptr; 17714 mp->b_wptr += sizeof (sin_t); 17715 sin->sin_family = AF_INET; 17716 sin->sin_port = eager->tcp_lport; 17717 sin->sin_addr.s_addr = eager->tcp_ipha->ipha_src; 17718 } else { 17719 sin6_t *sin6; 17720 17721 ASSERT((mp->b_datap->db_lim - mp->b_datap->db_base) >= 17722 sizeof (struct T_ok_ack) + sizeof (sin6_t)); 17723 sin6 = (sin6_t *)mp->b_wptr; 17724 mp->b_wptr += sizeof (sin6_t); 17725 sin6->sin6_family = AF_INET6; 17726 sin6->sin6_port = eager->tcp_lport; 17727 if (eager->tcp_ipversion == IPV4_VERSION) { 17728 sin6->sin6_flowinfo = 0; 17729 IN6_IPADDR_TO_V4MAPPED( 17730 eager->tcp_ipha->ipha_src, 17731 &sin6->sin6_addr); 17732 } else { 17733 ASSERT(eager->tcp_ip6h != NULL); 17734 sin6->sin6_flowinfo = 17735 eager->tcp_ip6h->ip6_vcf & 17736 ~IPV6_VERS_AND_FLOW_MASK; 17737 sin6->sin6_addr = eager->tcp_ip6h->ip6_src; 17738 } 17739 sin6->sin6_scope_id = 0; 17740 sin6->__sin6_src_id = 0; 17741 } 17742 17743 putnext(rq, mp); 17744 17745 opt_mp->b_datap->db_type = M_SETOPTS; 17746 opt_mp->b_wptr += sizeof (struct stroptions); 17747 17748 /* 17749 * Prepare for inheriting IPV6_BOUND_IF and IPV6_RECVPKTINFO 17750 * from listener to acceptor. The message is chained on the 17751 * bind_mp which tcp_rput_other will send down to IP. 17752 */ 17753 if (listener->tcp_bound_if != 0) { 17754 /* allocate optmgmt req */ 17755 mp = tcp_setsockopt_mp(IPPROTO_IPV6, 17756 IPV6_BOUND_IF, (char *)&listener->tcp_bound_if, 17757 sizeof (int)); 17758 if (mp != NULL) 17759 linkb(opt_mp, mp); 17760 } 17761 if (listener->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) { 17762 uint_t on = 1; 17763 17764 /* allocate optmgmt req */ 17765 mp = tcp_setsockopt_mp(IPPROTO_IPV6, 17766 IPV6_RECVPKTINFO, (char *)&on, sizeof (on)); 17767 if (mp != NULL) 17768 linkb(opt_mp, mp); 17769 } 17770 17771 17772 mutex_enter(&listener->tcp_eager_lock); 17773 17774 if (listener->tcp_eager_prev_q0->tcp_conn_def_q0) { 17775 17776 tcp_t *tail; 17777 tcp_t *tcp; 17778 mblk_t *mp1; 17779 17780 tcp = listener->tcp_eager_prev_q0; 17781 /* 17782 * listener->tcp_eager_prev_q0 points to the TAIL of the 17783 * deferred T_conn_ind queue. We need to get to the head 17784 * of the queue in order to send up T_conn_ind the same 17785 * order as how the 3WHS is completed. 17786 */ 17787 while (tcp != listener) { 17788 if (!tcp->tcp_eager_prev_q0->tcp_conn_def_q0 && 17789 !tcp->tcp_kssl_pending) 17790 break; 17791 else 17792 tcp = tcp->tcp_eager_prev_q0; 17793 } 17794 /* None of the pending eagers can be sent up now */ 17795 if (tcp == listener) 17796 goto no_more_eagers; 17797 17798 mp1 = tcp->tcp_conn.tcp_eager_conn_ind; 17799 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 17800 /* Move from q0 to q */ 17801 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 17802 listener->tcp_conn_req_cnt_q0--; 17803 listener->tcp_conn_req_cnt_q++; 17804 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 17805 tcp->tcp_eager_prev_q0; 17806 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 17807 tcp->tcp_eager_next_q0; 17808 tcp->tcp_eager_prev_q0 = NULL; 17809 tcp->tcp_eager_next_q0 = NULL; 17810 tcp->tcp_conn_def_q0 = B_FALSE; 17811 17812 /* 17813 * Insert at end of the queue because sockfs sends 17814 * down T_CONN_RES in chronological order. Leaving 17815 * the older conn indications at front of the queue 17816 * helps reducing search time. 17817 */ 17818 tail = listener->tcp_eager_last_q; 17819 if (tail != NULL) { 17820 tail->tcp_eager_next_q = tcp; 17821 } else { 17822 listener->tcp_eager_next_q = tcp; 17823 } 17824 listener->tcp_eager_last_q = tcp; 17825 tcp->tcp_eager_next_q = NULL; 17826 17827 /* Need to get inside the listener perimeter */ 17828 CONN_INC_REF(listener->tcp_connp); 17829 squeue_fill(listener->tcp_connp->conn_sqp, mp1, 17830 tcp_send_pending, listener->tcp_connp, 17831 SQTAG_TCP_SEND_PENDING); 17832 } 17833 no_more_eagers: 17834 tcp_eager_unlink(eager); 17835 mutex_exit(&listener->tcp_eager_lock); 17836 17837 /* 17838 * At this point, the eager is detached from the listener 17839 * but we still have an extra refs on eager (apart from the 17840 * usual tcp references). The ref was placed in tcp_rput_data 17841 * before sending the conn_ind in tcp_send_conn_ind. 17842 * The ref will be dropped in tcp_accept_finish(). 17843 */ 17844 squeue_enter_nodrain(econnp->conn_sqp, opt_mp, 17845 tcp_accept_finish, econnp, SQTAG_TCP_ACCEPT_FINISH_Q0); 17846 return; 17847 default: 17848 mp = mi_tpi_err_ack_alloc(mp, TNOTSUPPORT, 0); 17849 if (mp != NULL) 17850 putnext(rq, mp); 17851 return; 17852 } 17853 } 17854 17855 void 17856 tcp_wput(queue_t *q, mblk_t *mp) 17857 { 17858 conn_t *connp = Q_TO_CONN(q); 17859 tcp_t *tcp; 17860 void (*output_proc)(); 17861 t_scalar_t type; 17862 uchar_t *rptr; 17863 struct iocblk *iocp; 17864 uint32_t msize; 17865 17866 ASSERT(connp->conn_ref >= 2); 17867 17868 switch (DB_TYPE(mp)) { 17869 case M_DATA: 17870 tcp = connp->conn_tcp; 17871 ASSERT(tcp != NULL); 17872 17873 msize = msgdsize(mp); 17874 17875 mutex_enter(&connp->conn_lock); 17876 CONN_INC_REF_LOCKED(connp); 17877 17878 tcp->tcp_squeue_bytes += msize; 17879 if (TCP_UNSENT_BYTES(tcp) > tcp->tcp_xmit_hiwater) { 17880 mutex_exit(&connp->conn_lock); 17881 tcp_setqfull(tcp); 17882 } else 17883 mutex_exit(&connp->conn_lock); 17884 17885 (*tcp_squeue_wput_proc)(connp->conn_sqp, mp, 17886 tcp_output, connp, SQTAG_TCP_OUTPUT); 17887 return; 17888 case M_PROTO: 17889 case M_PCPROTO: 17890 /* 17891 * if it is a snmp message, don't get behind the squeue 17892 */ 17893 tcp = connp->conn_tcp; 17894 rptr = mp->b_rptr; 17895 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) { 17896 type = ((union T_primitives *)rptr)->type; 17897 } else { 17898 if (tcp->tcp_debug) { 17899 (void) strlog(TCP_MOD_ID, 0, 1, 17900 SL_ERROR|SL_TRACE, 17901 "tcp_wput_proto, dropping one..."); 17902 } 17903 freemsg(mp); 17904 return; 17905 } 17906 if (type == T_SVR4_OPTMGMT_REQ) { 17907 cred_t *cr = DB_CREDDEF(mp, tcp->tcp_cred); 17908 if (snmpcom_req(q, mp, tcp_snmp_set, tcp_snmp_get, 17909 cr)) { 17910 /* 17911 * This was a SNMP request 17912 */ 17913 return; 17914 } else { 17915 output_proc = tcp_wput_proto; 17916 } 17917 } else { 17918 output_proc = tcp_wput_proto; 17919 } 17920 break; 17921 case M_IOCTL: 17922 /* 17923 * Most ioctls can be processed right away without going via 17924 * squeues - process them right here. Those that do require 17925 * squeue (currently TCP_IOC_DEFAULT_Q and _SIOCSOCKFALLBACK) 17926 * are processed by tcp_wput_ioctl(). 17927 */ 17928 iocp = (struct iocblk *)mp->b_rptr; 17929 tcp = connp->conn_tcp; 17930 17931 switch (iocp->ioc_cmd) { 17932 case TCP_IOC_ABORT_CONN: 17933 tcp_ioctl_abort_conn(q, mp); 17934 return; 17935 case TI_GETPEERNAME: 17936 if (tcp->tcp_state < TCPS_SYN_RCVD) { 17937 iocp->ioc_error = ENOTCONN; 17938 iocp->ioc_count = 0; 17939 mp->b_datap->db_type = M_IOCACK; 17940 qreply(q, mp); 17941 return; 17942 } 17943 /* FALLTHRU */ 17944 case TI_GETMYNAME: 17945 mi_copyin(q, mp, NULL, 17946 SIZEOF_STRUCT(strbuf, iocp->ioc_flag)); 17947 return; 17948 case ND_SET: 17949 /* nd_getset does the necessary checks */ 17950 case ND_GET: 17951 if (!nd_getset(q, tcp_g_nd, mp)) { 17952 CALL_IP_WPUT(connp, q, mp); 17953 return; 17954 } 17955 qreply(q, mp); 17956 return; 17957 case TCP_IOC_DEFAULT_Q: 17958 /* 17959 * Wants to be the default wq. Check the credentials 17960 * first, the rest is executed via squeue. 17961 */ 17962 if (secpolicy_net_config(iocp->ioc_cr, B_FALSE) != 0) { 17963 iocp->ioc_error = EPERM; 17964 iocp->ioc_count = 0; 17965 mp->b_datap->db_type = M_IOCACK; 17966 qreply(q, mp); 17967 return; 17968 } 17969 output_proc = tcp_wput_ioctl; 17970 break; 17971 default: 17972 output_proc = tcp_wput_ioctl; 17973 break; 17974 } 17975 break; 17976 default: 17977 output_proc = tcp_wput_nondata; 17978 break; 17979 } 17980 17981 CONN_INC_REF(connp); 17982 (*tcp_squeue_wput_proc)(connp->conn_sqp, mp, 17983 output_proc, connp, SQTAG_TCP_WPUT_OTHER); 17984 } 17985 17986 /* 17987 * Initial STREAMS write side put() procedure for sockets. It tries to 17988 * handle the T_CAPABILITY_REQ which sockfs sends down while setting 17989 * up the socket without using the squeue. Non T_CAPABILITY_REQ messages 17990 * are handled by tcp_wput() as usual. 17991 * 17992 * All further messages will also be handled by tcp_wput() because we cannot 17993 * be sure that the above short cut is safe later. 17994 */ 17995 static void 17996 tcp_wput_sock(queue_t *wq, mblk_t *mp) 17997 { 17998 conn_t *connp = Q_TO_CONN(wq); 17999 tcp_t *tcp = connp->conn_tcp; 18000 struct T_capability_req *car = (struct T_capability_req *)mp->b_rptr; 18001 18002 ASSERT(wq->q_qinfo == &tcp_sock_winit); 18003 wq->q_qinfo = &tcp_winit; 18004 18005 ASSERT(IPCL_IS_TCP(connp)); 18006 ASSERT(TCP_IS_SOCKET(tcp)); 18007 18008 if (DB_TYPE(mp) == M_PCPROTO && 18009 MBLKL(mp) == sizeof (struct T_capability_req) && 18010 car->PRIM_type == T_CAPABILITY_REQ) { 18011 tcp_capability_req(tcp, mp); 18012 return; 18013 } 18014 18015 tcp_wput(wq, mp); 18016 } 18017 18018 static boolean_t 18019 tcp_zcopy_check(tcp_t *tcp) 18020 { 18021 conn_t *connp = tcp->tcp_connp; 18022 ire_t *ire; 18023 boolean_t zc_enabled = B_FALSE; 18024 18025 if (do_tcpzcopy == 2) 18026 zc_enabled = B_TRUE; 18027 else if (tcp->tcp_ipversion == IPV4_VERSION && 18028 IPCL_IS_CONNECTED(connp) && 18029 (connp->conn_flags & IPCL_CHECK_POLICY) == 0 && 18030 connp->conn_dontroute == 0 && 18031 !connp->conn_nexthop_set && 18032 connp->conn_xmit_if_ill == NULL && 18033 connp->conn_nofailover_ill == NULL && 18034 do_tcpzcopy == 1) { 18035 /* 18036 * the checks above closely resemble the fast path checks 18037 * in tcp_send_data(). 18038 */ 18039 mutex_enter(&connp->conn_lock); 18040 ire = connp->conn_ire_cache; 18041 ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT)); 18042 if (ire != NULL && !(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18043 IRE_REFHOLD(ire); 18044 if (ire->ire_stq != NULL) { 18045 ill_t *ill = (ill_t *)ire->ire_stq->q_ptr; 18046 18047 zc_enabled = ill && (ill->ill_capabilities & 18048 ILL_CAPAB_ZEROCOPY) && 18049 (ill->ill_zerocopy_capab-> 18050 ill_zerocopy_flags != 0); 18051 } 18052 IRE_REFRELE(ire); 18053 } 18054 mutex_exit(&connp->conn_lock); 18055 } 18056 tcp->tcp_snd_zcopy_on = zc_enabled; 18057 if (!TCP_IS_DETACHED(tcp)) { 18058 if (zc_enabled) { 18059 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMSAFE); 18060 TCP_STAT(tcp_zcopy_on); 18061 } else { 18062 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMUNSAFE); 18063 TCP_STAT(tcp_zcopy_off); 18064 } 18065 } 18066 return (zc_enabled); 18067 } 18068 18069 static mblk_t * 18070 tcp_zcopy_disable(tcp_t *tcp, mblk_t *bp) 18071 { 18072 if (do_tcpzcopy == 2) 18073 return (bp); 18074 else if (tcp->tcp_snd_zcopy_on) { 18075 tcp->tcp_snd_zcopy_on = B_FALSE; 18076 if (!TCP_IS_DETACHED(tcp)) { 18077 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMUNSAFE); 18078 TCP_STAT(tcp_zcopy_disable); 18079 } 18080 } 18081 return (tcp_zcopy_backoff(tcp, bp, 0)); 18082 } 18083 18084 /* 18085 * Backoff from a zero-copy mblk by copying data to a new mblk and freeing 18086 * the original desballoca'ed segmapped mblk. 18087 */ 18088 static mblk_t * 18089 tcp_zcopy_backoff(tcp_t *tcp, mblk_t *bp, int fix_xmitlist) 18090 { 18091 mblk_t *head, *tail, *nbp; 18092 if (IS_VMLOANED_MBLK(bp)) { 18093 TCP_STAT(tcp_zcopy_backoff); 18094 if ((head = copyb(bp)) == NULL) { 18095 /* fail to backoff; leave it for the next backoff */ 18096 tcp->tcp_xmit_zc_clean = B_FALSE; 18097 return (bp); 18098 } 18099 if (bp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) { 18100 if (fix_xmitlist) 18101 tcp_zcopy_notify(tcp); 18102 else 18103 head->b_datap->db_struioflag |= STRUIO_ZCNOTIFY; 18104 } 18105 nbp = bp->b_cont; 18106 if (fix_xmitlist) { 18107 head->b_prev = bp->b_prev; 18108 head->b_next = bp->b_next; 18109 if (tcp->tcp_xmit_tail == bp) 18110 tcp->tcp_xmit_tail = head; 18111 } 18112 bp->b_next = NULL; 18113 bp->b_prev = NULL; 18114 freeb(bp); 18115 } else { 18116 head = bp; 18117 nbp = bp->b_cont; 18118 } 18119 tail = head; 18120 while (nbp) { 18121 if (IS_VMLOANED_MBLK(nbp)) { 18122 TCP_STAT(tcp_zcopy_backoff); 18123 if ((tail->b_cont = copyb(nbp)) == NULL) { 18124 tcp->tcp_xmit_zc_clean = B_FALSE; 18125 tail->b_cont = nbp; 18126 return (head); 18127 } 18128 tail = tail->b_cont; 18129 if (nbp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) { 18130 if (fix_xmitlist) 18131 tcp_zcopy_notify(tcp); 18132 else 18133 tail->b_datap->db_struioflag |= 18134 STRUIO_ZCNOTIFY; 18135 } 18136 bp = nbp; 18137 nbp = nbp->b_cont; 18138 if (fix_xmitlist) { 18139 tail->b_prev = bp->b_prev; 18140 tail->b_next = bp->b_next; 18141 if (tcp->tcp_xmit_tail == bp) 18142 tcp->tcp_xmit_tail = tail; 18143 } 18144 bp->b_next = NULL; 18145 bp->b_prev = NULL; 18146 freeb(bp); 18147 } else { 18148 tail->b_cont = nbp; 18149 tail = nbp; 18150 nbp = nbp->b_cont; 18151 } 18152 } 18153 if (fix_xmitlist) { 18154 tcp->tcp_xmit_last = tail; 18155 tcp->tcp_xmit_zc_clean = B_TRUE; 18156 } 18157 return (head); 18158 } 18159 18160 static void 18161 tcp_zcopy_notify(tcp_t *tcp) 18162 { 18163 struct stdata *stp; 18164 18165 if (tcp->tcp_detached) 18166 return; 18167 stp = STREAM(tcp->tcp_rq); 18168 mutex_enter(&stp->sd_lock); 18169 stp->sd_flag |= STZCNOTIFY; 18170 cv_broadcast(&stp->sd_zcopy_wait); 18171 mutex_exit(&stp->sd_lock); 18172 } 18173 18174 static void 18175 tcp_send_data(tcp_t *tcp, queue_t *q, mblk_t *mp) 18176 { 18177 ipha_t *ipha; 18178 ipaddr_t src; 18179 ipaddr_t dst; 18180 uint32_t cksum; 18181 ire_t *ire; 18182 uint16_t *up; 18183 ill_t *ill; 18184 conn_t *connp = tcp->tcp_connp; 18185 uint32_t hcksum_txflags = 0; 18186 mblk_t *ire_fp_mp; 18187 uint_t ire_fp_mp_len; 18188 18189 ASSERT(DB_TYPE(mp) == M_DATA); 18190 18191 if (DB_CRED(mp) == NULL) 18192 mblk_setcred(mp, CONN_CRED(connp)); 18193 18194 ipha = (ipha_t *)mp->b_rptr; 18195 src = ipha->ipha_src; 18196 dst = ipha->ipha_dst; 18197 18198 /* 18199 * Drop off fast path for IPv6 and also if options are present or 18200 * we need to resolve a TS label. 18201 */ 18202 if (tcp->tcp_ipversion != IPV4_VERSION || 18203 !IPCL_IS_CONNECTED(connp) || 18204 (connp->conn_flags & IPCL_CHECK_POLICY) != 0 || 18205 connp->conn_dontroute || 18206 connp->conn_nexthop_set || 18207 connp->conn_xmit_if_ill != NULL || 18208 connp->conn_nofailover_ill != NULL || 18209 !connp->conn_ulp_labeled || 18210 ipha->ipha_ident == IP_HDR_INCLUDED || 18211 ipha->ipha_version_and_hdr_length != IP_SIMPLE_HDR_VERSION || 18212 IPP_ENABLED(IPP_LOCAL_OUT)) { 18213 if (tcp->tcp_snd_zcopy_aware) 18214 mp = tcp_zcopy_disable(tcp, mp); 18215 TCP_STAT(tcp_ip_send); 18216 CALL_IP_WPUT(connp, q, mp); 18217 return; 18218 } 18219 18220 mutex_enter(&connp->conn_lock); 18221 ire = connp->conn_ire_cache; 18222 ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT)); 18223 if (ire != NULL && ire->ire_addr == dst && 18224 !(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18225 IRE_REFHOLD(ire); 18226 mutex_exit(&connp->conn_lock); 18227 } else { 18228 boolean_t cached = B_FALSE; 18229 18230 /* force a recheck later on */ 18231 tcp->tcp_ire_ill_check_done = B_FALSE; 18232 18233 TCP_DBGSTAT(tcp_ire_null1); 18234 connp->conn_ire_cache = NULL; 18235 mutex_exit(&connp->conn_lock); 18236 if (ire != NULL) 18237 IRE_REFRELE_NOTR(ire); 18238 ire = ire_cache_lookup(dst, connp->conn_zoneid, 18239 MBLK_GETLABEL(mp)); 18240 if (ire == NULL) { 18241 if (tcp->tcp_snd_zcopy_aware) 18242 mp = tcp_zcopy_backoff(tcp, mp, 0); 18243 TCP_STAT(tcp_ire_null); 18244 CALL_IP_WPUT(connp, q, mp); 18245 return; 18246 } 18247 IRE_REFHOLD_NOTR(ire); 18248 /* 18249 * Since we are inside the squeue, there cannot be another 18250 * thread in TCP trying to set the conn_ire_cache now. The 18251 * check for IRE_MARK_CONDEMNED ensures that an interface 18252 * unplumb thread has not yet started cleaning up the conns. 18253 * Hence we don't need to grab the conn lock. 18254 */ 18255 if (!(connp->conn_state_flags & CONN_CLOSING)) { 18256 rw_enter(&ire->ire_bucket->irb_lock, RW_READER); 18257 if (!(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18258 connp->conn_ire_cache = ire; 18259 cached = B_TRUE; 18260 } 18261 rw_exit(&ire->ire_bucket->irb_lock); 18262 } 18263 18264 /* 18265 * We can continue to use the ire but since it was 18266 * not cached, we should drop the extra reference. 18267 */ 18268 if (!cached) 18269 IRE_REFRELE_NOTR(ire); 18270 18271 /* 18272 * Rampart note: no need to select a new label here, since 18273 * labels are not allowed to change during the life of a TCP 18274 * connection. 18275 */ 18276 } 18277 18278 if (ire->ire_flags & RTF_MULTIRT || 18279 ire->ire_stq == NULL || 18280 ire->ire_max_frag < ntohs(ipha->ipha_length) || 18281 (ire_fp_mp = ire->ire_fp_mp) == NULL || 18282 (ire_fp_mp_len = MBLKL(ire_fp_mp)) > MBLKHEAD(mp)) { 18283 if (tcp->tcp_snd_zcopy_aware) 18284 mp = tcp_zcopy_disable(tcp, mp); 18285 TCP_STAT(tcp_ip_ire_send); 18286 IRE_REFRELE(ire); 18287 CALL_IP_WPUT(connp, q, mp); 18288 return; 18289 } 18290 18291 ill = ire_to_ill(ire); 18292 if (connp->conn_outgoing_ill != NULL) { 18293 ill_t *conn_outgoing_ill = NULL; 18294 /* 18295 * Choose a good ill in the group to send the packets on. 18296 */ 18297 ire = conn_set_outgoing_ill(connp, ire, &conn_outgoing_ill); 18298 ill = ire_to_ill(ire); 18299 } 18300 ASSERT(ill != NULL); 18301 18302 if (!tcp->tcp_ire_ill_check_done) { 18303 tcp_ire_ill_check(tcp, ire, ill, B_TRUE); 18304 tcp->tcp_ire_ill_check_done = B_TRUE; 18305 } 18306 18307 ASSERT(ipha->ipha_ident == 0 || ipha->ipha_ident == IP_HDR_INCLUDED); 18308 ipha->ipha_ident = (uint16_t)atomic_add_32_nv(&ire->ire_ident, 1); 18309 #ifndef _BIG_ENDIAN 18310 ipha->ipha_ident = (ipha->ipha_ident << 8) | (ipha->ipha_ident >> 8); 18311 #endif 18312 18313 /* 18314 * Check to see if we need to re-enable MDT for this connection 18315 * because it was previously disabled due to changes in the ill; 18316 * note that by doing it here, this re-enabling only applies when 18317 * the packet is not dispatched through CALL_IP_WPUT(). 18318 * 18319 * That means for IPv4, it is worth re-enabling MDT for the fastpath 18320 * case, since that's how we ended up here. For IPv6, we do the 18321 * re-enabling work in ip_xmit_v6(), albeit indirectly via squeue. 18322 */ 18323 if (connp->conn_mdt_ok && !tcp->tcp_mdt && ILL_MDT_USABLE(ill)) { 18324 /* 18325 * Restore MDT for this connection, so that next time around 18326 * it is eligible to go through tcp_multisend() path again. 18327 */ 18328 TCP_STAT(tcp_mdt_conn_resumed1); 18329 tcp->tcp_mdt = B_TRUE; 18330 ip1dbg(("tcp_send_data: reenabling MDT for connp %p on " 18331 "interface %s\n", (void *)connp, ill->ill_name)); 18332 } 18333 18334 if (tcp->tcp_snd_zcopy_aware) { 18335 if ((ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) == 0 || 18336 (ill->ill_zerocopy_capab->ill_zerocopy_flags == 0)) 18337 mp = tcp_zcopy_disable(tcp, mp); 18338 /* 18339 * we shouldn't need to reset ipha as the mp containing 18340 * ipha should never be a zero-copy mp. 18341 */ 18342 } 18343 18344 if (ILL_HCKSUM_CAPABLE(ill) && dohwcksum) { 18345 ASSERT(ill->ill_hcksum_capab != NULL); 18346 hcksum_txflags = ill->ill_hcksum_capab->ill_hcksum_txflags; 18347 } 18348 18349 /* pseudo-header checksum (do it in parts for IP header checksum) */ 18350 cksum = (dst >> 16) + (dst & 0xFFFF) + (src >> 16) + (src & 0xFFFF); 18351 18352 ASSERT(ipha->ipha_version_and_hdr_length == IP_SIMPLE_HDR_VERSION); 18353 up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 18354 18355 IP_CKSUM_XMIT_FAST(ire->ire_ipversion, hcksum_txflags, mp, ipha, up, 18356 IPPROTO_TCP, IP_SIMPLE_HDR_LENGTH, ntohs(ipha->ipha_length), cksum); 18357 18358 /* Software checksum? */ 18359 if (DB_CKSUMFLAGS(mp) == 0) { 18360 TCP_STAT(tcp_out_sw_cksum); 18361 TCP_STAT_UPDATE(tcp_out_sw_cksum_bytes, 18362 ntohs(ipha->ipha_length) - IP_SIMPLE_HDR_LENGTH); 18363 } 18364 18365 ipha->ipha_fragment_offset_and_flags |= 18366 (uint32_t)htons(ire->ire_frag_flag); 18367 18368 /* Calculate IP header checksum if hardware isn't capable */ 18369 if (!(DB_CKSUMFLAGS(mp) & HCK_IPV4_HDRCKSUM)) { 18370 IP_HDR_CKSUM(ipha, cksum, ((uint32_t *)ipha)[0], 18371 ((uint16_t *)ipha)[4]); 18372 } 18373 18374 ASSERT(DB_TYPE(ire_fp_mp) == M_DATA); 18375 mp->b_rptr = (uchar_t *)ipha - ire_fp_mp_len; 18376 bcopy(ire_fp_mp->b_rptr, mp->b_rptr, ire_fp_mp_len); 18377 18378 UPDATE_OB_PKT_COUNT(ire); 18379 ire->ire_last_used_time = lbolt; 18380 BUMP_MIB(&ip_mib, ipOutRequests); 18381 18382 if (ILL_DLS_CAPABLE(ill)) { 18383 /* 18384 * Send the packet directly to DLD, where it may be queued 18385 * depending on the availability of transmit resources at 18386 * the media layer. 18387 */ 18388 IP_DLS_ILL_TX(ill, mp); 18389 } else { 18390 putnext(ire->ire_stq, mp); 18391 } 18392 IRE_REFRELE(ire); 18393 } 18394 18395 /* 18396 * This handles the case when the receiver has shrunk its win. Per RFC 1122 18397 * if the receiver shrinks the window, i.e. moves the right window to the 18398 * left, the we should not send new data, but should retransmit normally the 18399 * old unacked data between suna and suna + swnd. We might has sent data 18400 * that is now outside the new window, pretend that we didn't send it. 18401 */ 18402 static void 18403 tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_count) 18404 { 18405 uint32_t snxt = tcp->tcp_snxt; 18406 mblk_t *xmit_tail; 18407 int32_t offset; 18408 18409 ASSERT(shrunk_count > 0); 18410 18411 /* Pretend we didn't send the data outside the window */ 18412 snxt -= shrunk_count; 18413 18414 /* Get the mblk and the offset in it per the shrunk window */ 18415 xmit_tail = tcp_get_seg_mp(tcp, snxt, &offset); 18416 18417 ASSERT(xmit_tail != NULL); 18418 18419 /* Reset all the values per the now shrunk window */ 18420 tcp->tcp_snxt = snxt; 18421 tcp->tcp_xmit_tail = xmit_tail; 18422 tcp->tcp_xmit_tail_unsent = xmit_tail->b_wptr - xmit_tail->b_rptr - 18423 offset; 18424 tcp->tcp_unsent += shrunk_count; 18425 18426 if (tcp->tcp_suna == tcp->tcp_snxt && tcp->tcp_swnd == 0) 18427 /* 18428 * Make sure the timer is running so that we will probe a zero 18429 * window. 18430 */ 18431 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 18432 } 18433 18434 18435 /* 18436 * The TCP normal data output path. 18437 * NOTE: the logic of the fast path is duplicated from this function. 18438 */ 18439 static void 18440 tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent) 18441 { 18442 int len; 18443 mblk_t *local_time; 18444 mblk_t *mp1; 18445 uint32_t snxt; 18446 int tail_unsent; 18447 int tcpstate; 18448 int usable = 0; 18449 mblk_t *xmit_tail; 18450 queue_t *q = tcp->tcp_wq; 18451 int32_t mss; 18452 int32_t num_sack_blk = 0; 18453 int32_t tcp_hdr_len; 18454 int32_t tcp_tcp_hdr_len; 18455 int mdt_thres; 18456 int rc; 18457 18458 tcpstate = tcp->tcp_state; 18459 if (mp == NULL) { 18460 /* 18461 * tcp_wput_data() with NULL mp should only be called when 18462 * there is unsent data. 18463 */ 18464 ASSERT(tcp->tcp_unsent > 0); 18465 /* Really tacky... but we need this for detached closes. */ 18466 len = tcp->tcp_unsent; 18467 goto data_null; 18468 } 18469 18470 #if CCS_STATS 18471 wrw_stats.tot.count++; 18472 wrw_stats.tot.bytes += msgdsize(mp); 18473 #endif 18474 ASSERT(mp->b_datap->db_type == M_DATA); 18475 /* 18476 * Don't allow data after T_ORDREL_REQ or T_DISCON_REQ, 18477 * or before a connection attempt has begun. 18478 */ 18479 if (tcpstate < TCPS_SYN_SENT || tcpstate > TCPS_CLOSE_WAIT || 18480 (tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) { 18481 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) { 18482 #ifdef DEBUG 18483 cmn_err(CE_WARN, 18484 "tcp_wput_data: data after ordrel, %s", 18485 tcp_display(tcp, NULL, 18486 DISP_ADDR_AND_PORT)); 18487 #else 18488 if (tcp->tcp_debug) { 18489 (void) strlog(TCP_MOD_ID, 0, 1, 18490 SL_TRACE|SL_ERROR, 18491 "tcp_wput_data: data after ordrel, %s\n", 18492 tcp_display(tcp, NULL, 18493 DISP_ADDR_AND_PORT)); 18494 } 18495 #endif /* DEBUG */ 18496 } 18497 if (tcp->tcp_snd_zcopy_aware && 18498 (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) != 0) 18499 tcp_zcopy_notify(tcp); 18500 freemsg(mp); 18501 if (tcp->tcp_flow_stopped && 18502 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 18503 tcp_clrqfull(tcp); 18504 } 18505 return; 18506 } 18507 18508 /* Strip empties */ 18509 for (;;) { 18510 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 18511 (uintptr_t)INT_MAX); 18512 len = (int)(mp->b_wptr - mp->b_rptr); 18513 if (len > 0) 18514 break; 18515 mp1 = mp; 18516 mp = mp->b_cont; 18517 freeb(mp1); 18518 if (!mp) { 18519 return; 18520 } 18521 } 18522 18523 /* If we are the first on the list ... */ 18524 if (tcp->tcp_xmit_head == NULL) { 18525 tcp->tcp_xmit_head = mp; 18526 tcp->tcp_xmit_tail = mp; 18527 tcp->tcp_xmit_tail_unsent = len; 18528 } else { 18529 /* If tiny tx and room in txq tail, pullup to save mblks. */ 18530 struct datab *dp; 18531 18532 mp1 = tcp->tcp_xmit_last; 18533 if (len < tcp_tx_pull_len && 18534 (dp = mp1->b_datap)->db_ref == 1 && 18535 dp->db_lim - mp1->b_wptr >= len) { 18536 ASSERT(len > 0); 18537 ASSERT(!mp1->b_cont); 18538 if (len == 1) { 18539 *mp1->b_wptr++ = *mp->b_rptr; 18540 } else { 18541 bcopy(mp->b_rptr, mp1->b_wptr, len); 18542 mp1->b_wptr += len; 18543 } 18544 if (mp1 == tcp->tcp_xmit_tail) 18545 tcp->tcp_xmit_tail_unsent += len; 18546 mp1->b_cont = mp->b_cont; 18547 if (tcp->tcp_snd_zcopy_aware && 18548 (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 18549 mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY; 18550 freeb(mp); 18551 mp = mp1; 18552 } else { 18553 tcp->tcp_xmit_last->b_cont = mp; 18554 } 18555 len += tcp->tcp_unsent; 18556 } 18557 18558 /* Tack on however many more positive length mblks we have */ 18559 if ((mp1 = mp->b_cont) != NULL) { 18560 do { 18561 int tlen; 18562 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 18563 (uintptr_t)INT_MAX); 18564 tlen = (int)(mp1->b_wptr - mp1->b_rptr); 18565 if (tlen <= 0) { 18566 mp->b_cont = mp1->b_cont; 18567 freeb(mp1); 18568 } else { 18569 len += tlen; 18570 mp = mp1; 18571 } 18572 } while ((mp1 = mp->b_cont) != NULL); 18573 } 18574 tcp->tcp_xmit_last = mp; 18575 tcp->tcp_unsent = len; 18576 18577 if (urgent) 18578 usable = 1; 18579 18580 data_null: 18581 snxt = tcp->tcp_snxt; 18582 xmit_tail = tcp->tcp_xmit_tail; 18583 tail_unsent = tcp->tcp_xmit_tail_unsent; 18584 18585 /* 18586 * Note that tcp_mss has been adjusted to take into account the 18587 * timestamp option if applicable. Because SACK options do not 18588 * appear in every TCP segments and they are of variable lengths, 18589 * they cannot be included in tcp_mss. Thus we need to calculate 18590 * the actual segment length when we need to send a segment which 18591 * includes SACK options. 18592 */ 18593 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 18594 int32_t opt_len; 18595 18596 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 18597 tcp->tcp_num_sack_blk); 18598 opt_len = num_sack_blk * sizeof (sack_blk_t) + TCPOPT_NOP_LEN * 18599 2 + TCPOPT_HEADER_LEN; 18600 mss = tcp->tcp_mss - opt_len; 18601 tcp_hdr_len = tcp->tcp_hdr_len + opt_len; 18602 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len + opt_len; 18603 } else { 18604 mss = tcp->tcp_mss; 18605 tcp_hdr_len = tcp->tcp_hdr_len; 18606 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len; 18607 } 18608 18609 if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet && 18610 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) { 18611 SET_TCP_INIT_CWND(tcp, mss, tcp_slow_start_after_idle); 18612 } 18613 if (tcpstate == TCPS_SYN_RCVD) { 18614 /* 18615 * The three-way connection establishment handshake is not 18616 * complete yet. We want to queue the data for transmission 18617 * after entering ESTABLISHED state (RFC793). A jump to 18618 * "done" label effectively leaves data on the queue. 18619 */ 18620 goto done; 18621 } else { 18622 int usable_r; 18623 18624 /* 18625 * In the special case when cwnd is zero, which can only 18626 * happen if the connection is ECN capable, return now. 18627 * New segments is sent using tcp_timer(). The timer 18628 * is set in tcp_rput_data(). 18629 */ 18630 if (tcp->tcp_cwnd == 0) { 18631 /* 18632 * Note that tcp_cwnd is 0 before 3-way handshake is 18633 * finished. 18634 */ 18635 ASSERT(tcp->tcp_ecn_ok || 18636 tcp->tcp_state < TCPS_ESTABLISHED); 18637 return; 18638 } 18639 18640 /* NOTE: trouble if xmitting while SYN not acked? */ 18641 usable_r = snxt - tcp->tcp_suna; 18642 usable_r = tcp->tcp_swnd - usable_r; 18643 18644 /* 18645 * Check if the receiver has shrunk the window. If 18646 * tcp_wput_data() with NULL mp is called, tcp_fin_sent 18647 * cannot be set as there is unsent data, so FIN cannot 18648 * be sent out. Otherwise, we need to take into account 18649 * of FIN as it consumes an "invisible" sequence number. 18650 */ 18651 ASSERT(tcp->tcp_fin_sent == 0); 18652 if (usable_r < 0) { 18653 /* 18654 * The receiver has shrunk the window and we have sent 18655 * -usable_r date beyond the window, re-adjust. 18656 * 18657 * If TCP window scaling is enabled, there can be 18658 * round down error as the advertised receive window 18659 * is actually right shifted n bits. This means that 18660 * the lower n bits info is wiped out. It will look 18661 * like the window is shrunk. Do a check here to 18662 * see if the shrunk amount is actually within the 18663 * error in window calculation. If it is, just 18664 * return. Note that this check is inside the 18665 * shrunk window check. This makes sure that even 18666 * though tcp_process_shrunk_swnd() is not called, 18667 * we will stop further processing. 18668 */ 18669 if ((-usable_r >> tcp->tcp_snd_ws) > 0) { 18670 tcp_process_shrunk_swnd(tcp, -usable_r); 18671 } 18672 return; 18673 } 18674 18675 /* usable = MIN(swnd, cwnd) - unacked_bytes */ 18676 if (tcp->tcp_swnd > tcp->tcp_cwnd) 18677 usable_r -= tcp->tcp_swnd - tcp->tcp_cwnd; 18678 18679 /* usable = MIN(usable, unsent) */ 18680 if (usable_r > len) 18681 usable_r = len; 18682 18683 /* usable = MAX(usable, {1 for urgent, 0 for data}) */ 18684 if (usable_r > 0) { 18685 usable = usable_r; 18686 } else { 18687 /* Bypass all other unnecessary processing. */ 18688 goto done; 18689 } 18690 } 18691 18692 local_time = (mblk_t *)lbolt; 18693 18694 /* 18695 * "Our" Nagle Algorithm. This is not the same as in the old 18696 * BSD. This is more in line with the true intent of Nagle. 18697 * 18698 * The conditions are: 18699 * 1. The amount of unsent data (or amount of data which can be 18700 * sent, whichever is smaller) is less than Nagle limit. 18701 * 2. The last sent size is also less than Nagle limit. 18702 * 3. There is unack'ed data. 18703 * 4. Urgent pointer is not set. Send urgent data ignoring the 18704 * Nagle algorithm. This reduces the probability that urgent 18705 * bytes get "merged" together. 18706 * 5. The app has not closed the connection. This eliminates the 18707 * wait time of the receiving side waiting for the last piece of 18708 * (small) data. 18709 * 18710 * If all are satisified, exit without sending anything. Note 18711 * that Nagle limit can be smaller than 1 MSS. Nagle limit is 18712 * the smaller of 1 MSS and global tcp_naglim_def (default to be 18713 * 4095). 18714 */ 18715 if (usable < (int)tcp->tcp_naglim && 18716 tcp->tcp_naglim > tcp->tcp_last_sent_len && 18717 snxt != tcp->tcp_suna && 18718 !(tcp->tcp_valid_bits & TCP_URG_VALID) && 18719 !(tcp->tcp_valid_bits & TCP_FSS_VALID)) { 18720 goto done; 18721 } 18722 18723 if (tcp->tcp_cork) { 18724 /* 18725 * if the tcp->tcp_cork option is set, then we have to force 18726 * TCP not to send partial segment (smaller than MSS bytes). 18727 * We are calculating the usable now based on full mss and 18728 * will save the rest of remaining data for later. 18729 */ 18730 if (usable < mss) 18731 goto done; 18732 usable = (usable / mss) * mss; 18733 } 18734 18735 /* Update the latest receive window size in TCP header. */ 18736 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 18737 tcp->tcp_tcph->th_win); 18738 18739 /* 18740 * Determine if it's worthwhile to attempt MDT, based on: 18741 * 18742 * 1. Simple TCP/IP{v4,v6} (no options). 18743 * 2. IPSEC/IPQoS processing is not needed for the TCP connection. 18744 * 3. If the TCP connection is in ESTABLISHED state. 18745 * 4. The TCP is not detached. 18746 * 18747 * If any of the above conditions have changed during the 18748 * connection, stop using MDT and restore the stream head 18749 * parameters accordingly. 18750 */ 18751 if (tcp->tcp_mdt && 18752 ((tcp->tcp_ipversion == IPV4_VERSION && 18753 tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) || 18754 (tcp->tcp_ipversion == IPV6_VERSION && 18755 tcp->tcp_ip_hdr_len != IPV6_HDR_LEN) || 18756 tcp->tcp_state != TCPS_ESTABLISHED || 18757 TCP_IS_DETACHED(tcp) || !CONN_IS_MD_FASTPATH(tcp->tcp_connp) || 18758 CONN_IPSEC_OUT_ENCAPSULATED(tcp->tcp_connp) || 18759 IPP_ENABLED(IPP_LOCAL_OUT))) { 18760 tcp->tcp_connp->conn_mdt_ok = B_FALSE; 18761 tcp->tcp_mdt = B_FALSE; 18762 18763 /* Anything other than detached is considered pathological */ 18764 if (!TCP_IS_DETACHED(tcp)) { 18765 TCP_STAT(tcp_mdt_conn_halted1); 18766 (void) tcp_maxpsz_set(tcp, B_TRUE); 18767 } 18768 } 18769 18770 /* Use MDT if sendable amount is greater than the threshold */ 18771 if (tcp->tcp_mdt && 18772 (mdt_thres = mss << tcp_mdt_smss_threshold, usable > mdt_thres) && 18773 (tail_unsent > mdt_thres || (xmit_tail->b_cont != NULL && 18774 MBLKL(xmit_tail->b_cont) > mdt_thres)) && 18775 (tcp->tcp_valid_bits == 0 || 18776 tcp->tcp_valid_bits == TCP_FSS_VALID)) { 18777 ASSERT(tcp->tcp_connp->conn_mdt_ok); 18778 rc = tcp_multisend(q, tcp, mss, tcp_hdr_len, tcp_tcp_hdr_len, 18779 num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail, 18780 local_time, mdt_thres); 18781 } else { 18782 rc = tcp_send(q, tcp, mss, tcp_hdr_len, tcp_tcp_hdr_len, 18783 num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail, 18784 local_time, INT_MAX); 18785 } 18786 18787 /* Pretend that all we were trying to send really got sent */ 18788 if (rc < 0 && tail_unsent < 0) { 18789 do { 18790 xmit_tail = xmit_tail->b_cont; 18791 xmit_tail->b_prev = local_time; 18792 ASSERT((uintptr_t)(xmit_tail->b_wptr - 18793 xmit_tail->b_rptr) <= (uintptr_t)INT_MAX); 18794 tail_unsent += (int)(xmit_tail->b_wptr - 18795 xmit_tail->b_rptr); 18796 } while (tail_unsent < 0); 18797 } 18798 done:; 18799 tcp->tcp_xmit_tail = xmit_tail; 18800 tcp->tcp_xmit_tail_unsent = tail_unsent; 18801 len = tcp->tcp_snxt - snxt; 18802 if (len) { 18803 /* 18804 * If new data was sent, need to update the notsack 18805 * list, which is, afterall, data blocks that have 18806 * not been sack'ed by the receiver. New data is 18807 * not sack'ed. 18808 */ 18809 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 18810 /* len is a negative value. */ 18811 tcp->tcp_pipe -= len; 18812 tcp_notsack_update(&(tcp->tcp_notsack_list), 18813 tcp->tcp_snxt, snxt, 18814 &(tcp->tcp_num_notsack_blk), 18815 &(tcp->tcp_cnt_notsack_list)); 18816 } 18817 tcp->tcp_snxt = snxt + tcp->tcp_fin_sent; 18818 tcp->tcp_rack = tcp->tcp_rnxt; 18819 tcp->tcp_rack_cnt = 0; 18820 if ((snxt + len) == tcp->tcp_suna) { 18821 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 18822 } 18823 } else if (snxt == tcp->tcp_suna && tcp->tcp_swnd == 0) { 18824 /* 18825 * Didn't send anything. Make sure the timer is running 18826 * so that we will probe a zero window. 18827 */ 18828 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 18829 } 18830 /* Note that len is the amount we just sent but with a negative sign */ 18831 tcp->tcp_unsent += len; 18832 if (tcp->tcp_flow_stopped) { 18833 if (TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 18834 tcp_clrqfull(tcp); 18835 } 18836 } else if (TCP_UNSENT_BYTES(tcp) >= tcp->tcp_xmit_hiwater) { 18837 tcp_setqfull(tcp); 18838 } 18839 } 18840 18841 /* 18842 * tcp_fill_header is called by tcp_send() and tcp_multisend() to fill the 18843 * outgoing TCP header with the template header, as well as other 18844 * options such as time-stamp, ECN and/or SACK. 18845 */ 18846 static void 18847 tcp_fill_header(tcp_t *tcp, uchar_t *rptr, clock_t now, int num_sack_blk) 18848 { 18849 tcph_t *tcp_tmpl, *tcp_h; 18850 uint32_t *dst, *src; 18851 int hdrlen; 18852 18853 ASSERT(OK_32PTR(rptr)); 18854 18855 /* Template header */ 18856 tcp_tmpl = tcp->tcp_tcph; 18857 18858 /* Header of outgoing packet */ 18859 tcp_h = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 18860 18861 /* dst and src are opaque 32-bit fields, used for copying */ 18862 dst = (uint32_t *)rptr; 18863 src = (uint32_t *)tcp->tcp_iphc; 18864 hdrlen = tcp->tcp_hdr_len; 18865 18866 /* Fill time-stamp option if needed */ 18867 if (tcp->tcp_snd_ts_ok) { 18868 U32_TO_BE32((uint32_t)now, 18869 (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 4); 18870 U32_TO_BE32(tcp->tcp_ts_recent, 18871 (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 8); 18872 } else { 18873 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 18874 } 18875 18876 /* 18877 * Copy the template header; is this really more efficient than 18878 * calling bcopy()? For simple IPv4/TCP, it may be the case, 18879 * but perhaps not for other scenarios. 18880 */ 18881 dst[0] = src[0]; 18882 dst[1] = src[1]; 18883 dst[2] = src[2]; 18884 dst[3] = src[3]; 18885 dst[4] = src[4]; 18886 dst[5] = src[5]; 18887 dst[6] = src[6]; 18888 dst[7] = src[7]; 18889 dst[8] = src[8]; 18890 dst[9] = src[9]; 18891 if (hdrlen -= 40) { 18892 hdrlen >>= 2; 18893 dst += 10; 18894 src += 10; 18895 do { 18896 *dst++ = *src++; 18897 } while (--hdrlen); 18898 } 18899 18900 /* 18901 * Set the ECN info in the TCP header if it is not a zero 18902 * window probe. Zero window probe is only sent in 18903 * tcp_wput_data() and tcp_timer(). 18904 */ 18905 if (tcp->tcp_ecn_ok && !tcp->tcp_zero_win_probe) { 18906 SET_ECT(tcp, rptr); 18907 18908 if (tcp->tcp_ecn_echo_on) 18909 tcp_h->th_flags[0] |= TH_ECE; 18910 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 18911 tcp_h->th_flags[0] |= TH_CWR; 18912 tcp->tcp_ecn_cwr_sent = B_TRUE; 18913 } 18914 } 18915 18916 /* Fill in SACK options */ 18917 if (num_sack_blk > 0) { 18918 uchar_t *wptr = rptr + tcp->tcp_hdr_len; 18919 sack_blk_t *tmp; 18920 int32_t i; 18921 18922 wptr[0] = TCPOPT_NOP; 18923 wptr[1] = TCPOPT_NOP; 18924 wptr[2] = TCPOPT_SACK; 18925 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 18926 sizeof (sack_blk_t); 18927 wptr += TCPOPT_REAL_SACK_LEN; 18928 18929 tmp = tcp->tcp_sack_list; 18930 for (i = 0; i < num_sack_blk; i++) { 18931 U32_TO_BE32(tmp[i].begin, wptr); 18932 wptr += sizeof (tcp_seq); 18933 U32_TO_BE32(tmp[i].end, wptr); 18934 wptr += sizeof (tcp_seq); 18935 } 18936 tcp_h->th_offset_and_rsrvd[0] += 18937 ((num_sack_blk * 2 + 1) << 4); 18938 } 18939 } 18940 18941 /* 18942 * tcp_mdt_add_attrs() is called by tcp_multisend() in order to attach 18943 * the destination address and SAP attribute, and if necessary, the 18944 * hardware checksum offload attribute to a Multidata message. 18945 */ 18946 static int 18947 tcp_mdt_add_attrs(multidata_t *mmd, const mblk_t *dlmp, const boolean_t hwcksum, 18948 const uint32_t start, const uint32_t stuff, const uint32_t end, 18949 const uint32_t flags) 18950 { 18951 /* Add global destination address & SAP attribute */ 18952 if (dlmp == NULL || !ip_md_addr_attr(mmd, NULL, dlmp)) { 18953 ip1dbg(("tcp_mdt_add_attrs: can't add global physical " 18954 "destination address+SAP\n")); 18955 18956 if (dlmp != NULL) 18957 TCP_STAT(tcp_mdt_allocfail); 18958 return (-1); 18959 } 18960 18961 /* Add global hwcksum attribute */ 18962 if (hwcksum && 18963 !ip_md_hcksum_attr(mmd, NULL, start, stuff, end, flags)) { 18964 ip1dbg(("tcp_mdt_add_attrs: can't add global hardware " 18965 "checksum attribute\n")); 18966 18967 TCP_STAT(tcp_mdt_allocfail); 18968 return (-1); 18969 } 18970 18971 return (0); 18972 } 18973 18974 /* 18975 * Smaller and private version of pdescinfo_t used specifically for TCP, 18976 * which allows for only two payload spans per packet. 18977 */ 18978 typedef struct tcp_pdescinfo_s PDESCINFO_STRUCT(2) tcp_pdescinfo_t; 18979 18980 /* 18981 * tcp_multisend() is called by tcp_wput_data() for Multidata Transmit 18982 * scheme, and returns one the following: 18983 * 18984 * -1 = failed allocation. 18985 * 0 = success; burst count reached, or usable send window is too small, 18986 * and that we'd rather wait until later before sending again. 18987 */ 18988 static int 18989 tcp_multisend(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len, 18990 const int tcp_tcp_hdr_len, const int num_sack_blk, int *usable, 18991 uint_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 18992 const int mdt_thres) 18993 { 18994 mblk_t *md_mp_head, *md_mp, *md_pbuf, *md_pbuf_nxt, *md_hbuf; 18995 multidata_t *mmd; 18996 uint_t obsegs, obbytes, hdr_frag_sz; 18997 uint_t cur_hdr_off, cur_pld_off, base_pld_off, first_snxt; 18998 int num_burst_seg, max_pld; 18999 pdesc_t *pkt; 19000 tcp_pdescinfo_t tcp_pkt_info; 19001 pdescinfo_t *pkt_info; 19002 int pbuf_idx, pbuf_idx_nxt; 19003 int seg_len, len, spill, af; 19004 boolean_t add_buffer, zcopy, clusterwide; 19005 boolean_t rconfirm = B_FALSE; 19006 boolean_t done = B_FALSE; 19007 uint32_t cksum; 19008 uint32_t hwcksum_flags; 19009 ire_t *ire; 19010 ill_t *ill; 19011 ipha_t *ipha; 19012 ip6_t *ip6h; 19013 ipaddr_t src, dst; 19014 ill_zerocopy_capab_t *zc_cap = NULL; 19015 uint16_t *up; 19016 int err; 19017 conn_t *connp; 19018 19019 #ifdef _BIG_ENDIAN 19020 #define IPVER(ip6h) ((((uint32_t *)ip6h)[0] >> 28) & 0x7) 19021 #else 19022 #define IPVER(ip6h) ((((uint32_t *)ip6h)[0] >> 4) & 0x7) 19023 #endif 19024 19025 #define PREP_NEW_MULTIDATA() { \ 19026 mmd = NULL; \ 19027 md_mp = md_hbuf = NULL; \ 19028 cur_hdr_off = 0; \ 19029 max_pld = tcp->tcp_mdt_max_pld; \ 19030 pbuf_idx = pbuf_idx_nxt = -1; \ 19031 add_buffer = B_TRUE; \ 19032 zcopy = B_FALSE; \ 19033 } 19034 19035 #define PREP_NEW_PBUF() { \ 19036 md_pbuf = md_pbuf_nxt = NULL; \ 19037 pbuf_idx = pbuf_idx_nxt = -1; \ 19038 cur_pld_off = 0; \ 19039 first_snxt = *snxt; \ 19040 ASSERT(*tail_unsent > 0); \ 19041 base_pld_off = MBLKL(*xmit_tail) - *tail_unsent; \ 19042 } 19043 19044 ASSERT(mdt_thres >= mss); 19045 ASSERT(*usable > 0 && *usable > mdt_thres); 19046 ASSERT(tcp->tcp_state == TCPS_ESTABLISHED); 19047 ASSERT(!TCP_IS_DETACHED(tcp)); 19048 ASSERT(tcp->tcp_valid_bits == 0 || 19049 tcp->tcp_valid_bits == TCP_FSS_VALID); 19050 ASSERT((tcp->tcp_ipversion == IPV4_VERSION && 19051 tcp->tcp_ip_hdr_len == IP_SIMPLE_HDR_LENGTH) || 19052 (tcp->tcp_ipversion == IPV6_VERSION && 19053 tcp->tcp_ip_hdr_len == IPV6_HDR_LEN)); 19054 19055 connp = tcp->tcp_connp; 19056 ASSERT(connp != NULL); 19057 ASSERT(CONN_IS_MD_FASTPATH(connp)); 19058 ASSERT(!CONN_IPSEC_OUT_ENCAPSULATED(connp)); 19059 19060 /* 19061 * Note that tcp will only declare at most 2 payload spans per 19062 * packet, which is much lower than the maximum allowable number 19063 * of packet spans per Multidata. For this reason, we use the 19064 * privately declared and smaller descriptor info structure, in 19065 * order to save some stack space. 19066 */ 19067 pkt_info = (pdescinfo_t *)&tcp_pkt_info; 19068 19069 af = (tcp->tcp_ipversion == IPV4_VERSION) ? AF_INET : AF_INET6; 19070 if (af == AF_INET) { 19071 dst = tcp->tcp_ipha->ipha_dst; 19072 src = tcp->tcp_ipha->ipha_src; 19073 ASSERT(!CLASSD(dst)); 19074 } 19075 ASSERT(af == AF_INET || 19076 !IN6_IS_ADDR_MULTICAST(&tcp->tcp_ip6h->ip6_dst)); 19077 19078 obsegs = obbytes = 0; 19079 num_burst_seg = tcp->tcp_snd_burst; 19080 md_mp_head = NULL; 19081 PREP_NEW_MULTIDATA(); 19082 19083 /* 19084 * Before we go on further, make sure there is an IRE that we can 19085 * use, and that the ILL supports MDT. Otherwise, there's no point 19086 * in proceeding any further, and we should just hand everything 19087 * off to the legacy path. 19088 */ 19089 mutex_enter(&connp->conn_lock); 19090 ire = connp->conn_ire_cache; 19091 ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT)); 19092 if (ire != NULL && ((af == AF_INET && ire->ire_addr == dst) || 19093 (af == AF_INET6 && IN6_ARE_ADDR_EQUAL(&ire->ire_addr_v6, 19094 &tcp->tcp_ip6h->ip6_dst))) && 19095 !(ire->ire_marks & IRE_MARK_CONDEMNED)) { 19096 IRE_REFHOLD(ire); 19097 mutex_exit(&connp->conn_lock); 19098 } else { 19099 boolean_t cached = B_FALSE; 19100 ts_label_t *tsl; 19101 19102 /* force a recheck later on */ 19103 tcp->tcp_ire_ill_check_done = B_FALSE; 19104 19105 TCP_DBGSTAT(tcp_ire_null1); 19106 connp->conn_ire_cache = NULL; 19107 mutex_exit(&connp->conn_lock); 19108 19109 /* Release the old ire */ 19110 if (ire != NULL) 19111 IRE_REFRELE_NOTR(ire); 19112 19113 tsl = crgetlabel(CONN_CRED(connp)); 19114 ire = (af == AF_INET) ? 19115 ire_cache_lookup(dst, connp->conn_zoneid, tsl) : 19116 ire_cache_lookup_v6(&tcp->tcp_ip6h->ip6_dst, 19117 connp->conn_zoneid, tsl); 19118 19119 if (ire == NULL) { 19120 TCP_STAT(tcp_ire_null); 19121 goto legacy_send_no_md; 19122 } 19123 19124 IRE_REFHOLD_NOTR(ire); 19125 /* 19126 * Since we are inside the squeue, there cannot be another 19127 * thread in TCP trying to set the conn_ire_cache now. The 19128 * check for IRE_MARK_CONDEMNED ensures that an interface 19129 * unplumb thread has not yet started cleaning up the conns. 19130 * Hence we don't need to grab the conn lock. 19131 */ 19132 if (!(connp->conn_state_flags & CONN_CLOSING)) { 19133 rw_enter(&ire->ire_bucket->irb_lock, RW_READER); 19134 if (!(ire->ire_marks & IRE_MARK_CONDEMNED)) { 19135 connp->conn_ire_cache = ire; 19136 cached = B_TRUE; 19137 } 19138 rw_exit(&ire->ire_bucket->irb_lock); 19139 } 19140 19141 /* 19142 * We can continue to use the ire but since it was not 19143 * cached, we should drop the extra reference. 19144 */ 19145 if (!cached) 19146 IRE_REFRELE_NOTR(ire); 19147 } 19148 19149 ASSERT(ire != NULL); 19150 ASSERT(af != AF_INET || ire->ire_ipversion == IPV4_VERSION); 19151 ASSERT(af == AF_INET || !IN6_IS_ADDR_V4MAPPED(&(ire->ire_addr_v6))); 19152 ASSERT(af == AF_INET || ire->ire_nce != NULL); 19153 ASSERT(!(ire->ire_type & IRE_BROADCAST)); 19154 /* 19155 * If we do support loopback for MDT (which requires modifications 19156 * to the receiving paths), the following assertions should go away, 19157 * and we would be sending the Multidata to loopback conn later on. 19158 */ 19159 ASSERT(!IRE_IS_LOCAL(ire)); 19160 ASSERT(ire->ire_stq != NULL); 19161 19162 ill = ire_to_ill(ire); 19163 ASSERT(ill != NULL); 19164 ASSERT(!ILL_MDT_CAPABLE(ill) || ill->ill_mdt_capab != NULL); 19165 19166 if (!tcp->tcp_ire_ill_check_done) { 19167 tcp_ire_ill_check(tcp, ire, ill, B_TRUE); 19168 tcp->tcp_ire_ill_check_done = B_TRUE; 19169 } 19170 19171 /* 19172 * If the underlying interface conditions have changed, or if the 19173 * new interface does not support MDT, go back to legacy path. 19174 */ 19175 if (!ILL_MDT_USABLE(ill) || (ire->ire_flags & RTF_MULTIRT) != 0) { 19176 /* don't go through this path anymore for this connection */ 19177 TCP_STAT(tcp_mdt_conn_halted2); 19178 tcp->tcp_mdt = B_FALSE; 19179 ip1dbg(("tcp_multisend: disabling MDT for connp %p on " 19180 "interface %s\n", (void *)connp, ill->ill_name)); 19181 /* IRE will be released prior to returning */ 19182 goto legacy_send_no_md; 19183 } 19184 19185 if (ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) 19186 zc_cap = ill->ill_zerocopy_capab; 19187 19188 /* go to legacy path if interface doesn't support zerocopy */ 19189 if (tcp->tcp_snd_zcopy_aware && do_tcpzcopy != 2 && 19190 (zc_cap == NULL || zc_cap->ill_zerocopy_flags == 0)) { 19191 /* IRE will be released prior to returning */ 19192 goto legacy_send_no_md; 19193 } 19194 19195 /* does the interface support hardware checksum offload? */ 19196 hwcksum_flags = 0; 19197 if (ILL_HCKSUM_CAPABLE(ill) && 19198 (ill->ill_hcksum_capab->ill_hcksum_txflags & 19199 (HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 | HCKSUM_INET_PARTIAL | 19200 HCKSUM_IPHDRCKSUM)) && dohwcksum) { 19201 if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19202 HCKSUM_IPHDRCKSUM) 19203 hwcksum_flags = HCK_IPV4_HDRCKSUM; 19204 19205 if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19206 (HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6)) 19207 hwcksum_flags |= HCK_FULLCKSUM; 19208 else if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19209 HCKSUM_INET_PARTIAL) 19210 hwcksum_flags |= HCK_PARTIALCKSUM; 19211 } 19212 19213 /* 19214 * Each header fragment consists of the leading extra space, 19215 * followed by the TCP/IP header, and the trailing extra space. 19216 * We make sure that each header fragment begins on a 32-bit 19217 * aligned memory address (tcp_mdt_hdr_head is already 32-bit 19218 * aligned in tcp_mdt_update). 19219 */ 19220 hdr_frag_sz = roundup((tcp->tcp_mdt_hdr_head + tcp_hdr_len + 19221 tcp->tcp_mdt_hdr_tail), 4); 19222 19223 /* are we starting from the beginning of data block? */ 19224 if (*tail_unsent == 0) { 19225 *xmit_tail = (*xmit_tail)->b_cont; 19226 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= (uintptr_t)INT_MAX); 19227 *tail_unsent = (int)MBLKL(*xmit_tail); 19228 } 19229 19230 /* 19231 * Here we create one or more Multidata messages, each made up of 19232 * one header buffer and up to N payload buffers. This entire 19233 * operation is done within two loops: 19234 * 19235 * The outer loop mostly deals with creating the Multidata message, 19236 * as well as the header buffer that gets added to it. It also 19237 * links the Multidata messages together such that all of them can 19238 * be sent down to the lower layer in a single putnext call; this 19239 * linking behavior depends on the tcp_mdt_chain tunable. 19240 * 19241 * The inner loop takes an existing Multidata message, and adds 19242 * one or more (up to tcp_mdt_max_pld) payload buffers to it. It 19243 * packetizes those buffers by filling up the corresponding header 19244 * buffer fragments with the proper IP and TCP headers, and by 19245 * describing the layout of each packet in the packet descriptors 19246 * that get added to the Multidata. 19247 */ 19248 do { 19249 /* 19250 * If usable send window is too small, or data blocks in 19251 * transmit list are smaller than our threshold (i.e. app 19252 * performs large writes followed by small ones), we hand 19253 * off the control over to the legacy path. Note that we'll 19254 * get back the control once it encounters a large block. 19255 */ 19256 if (*usable < mss || (*tail_unsent <= mdt_thres && 19257 (*xmit_tail)->b_cont != NULL && 19258 MBLKL((*xmit_tail)->b_cont) <= mdt_thres)) { 19259 /* send down what we've got so far */ 19260 if (md_mp_head != NULL) { 19261 tcp_multisend_data(tcp, ire, ill, md_mp_head, 19262 obsegs, obbytes, &rconfirm); 19263 } 19264 /* 19265 * Pass control over to tcp_send(), but tell it to 19266 * return to us once a large-size transmission is 19267 * possible. 19268 */ 19269 TCP_STAT(tcp_mdt_legacy_small); 19270 if ((err = tcp_send(q, tcp, mss, tcp_hdr_len, 19271 tcp_tcp_hdr_len, num_sack_blk, usable, snxt, 19272 tail_unsent, xmit_tail, local_time, 19273 mdt_thres)) <= 0) { 19274 /* burst count reached, or alloc failed */ 19275 IRE_REFRELE(ire); 19276 return (err); 19277 } 19278 19279 /* tcp_send() may have sent everything, so check */ 19280 if (*usable <= 0) { 19281 IRE_REFRELE(ire); 19282 return (0); 19283 } 19284 19285 TCP_STAT(tcp_mdt_legacy_ret); 19286 /* 19287 * We may have delivered the Multidata, so make sure 19288 * to re-initialize before the next round. 19289 */ 19290 md_mp_head = NULL; 19291 obsegs = obbytes = 0; 19292 num_burst_seg = tcp->tcp_snd_burst; 19293 PREP_NEW_MULTIDATA(); 19294 19295 /* are we starting from the beginning of data block? */ 19296 if (*tail_unsent == 0) { 19297 *xmit_tail = (*xmit_tail)->b_cont; 19298 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 19299 (uintptr_t)INT_MAX); 19300 *tail_unsent = (int)MBLKL(*xmit_tail); 19301 } 19302 } 19303 19304 /* 19305 * max_pld limits the number of mblks in tcp's transmit 19306 * queue that can be added to a Multidata message. Once 19307 * this counter reaches zero, no more additional mblks 19308 * can be added to it. What happens afterwards depends 19309 * on whether or not we are set to chain the Multidata 19310 * messages. If we are to link them together, reset 19311 * max_pld to its original value (tcp_mdt_max_pld) and 19312 * prepare to create a new Multidata message which will 19313 * get linked to md_mp_head. Else, leave it alone and 19314 * let the inner loop break on its own. 19315 */ 19316 if (tcp_mdt_chain && max_pld == 0) 19317 PREP_NEW_MULTIDATA(); 19318 19319 /* adding a payload buffer; re-initialize values */ 19320 if (add_buffer) 19321 PREP_NEW_PBUF(); 19322 19323 /* 19324 * If we don't have a Multidata, either because we just 19325 * (re)entered this outer loop, or after we branched off 19326 * to tcp_send above, setup the Multidata and header 19327 * buffer to be used. 19328 */ 19329 if (md_mp == NULL) { 19330 int md_hbuflen; 19331 uint32_t start, stuff; 19332 19333 /* 19334 * Calculate Multidata header buffer size large enough 19335 * to hold all of the headers that can possibly be 19336 * sent at this moment. We'd rather over-estimate 19337 * the size than running out of space; this is okay 19338 * since this buffer is small anyway. 19339 */ 19340 md_hbuflen = (howmany(*usable, mss) + 1) * hdr_frag_sz; 19341 19342 /* 19343 * Start and stuff offset for partial hardware 19344 * checksum offload; these are currently for IPv4. 19345 * For full checksum offload, they are set to zero. 19346 */ 19347 if ((hwcksum_flags & HCK_PARTIALCKSUM)) { 19348 if (af == AF_INET) { 19349 start = IP_SIMPLE_HDR_LENGTH; 19350 stuff = IP_SIMPLE_HDR_LENGTH + 19351 TCP_CHECKSUM_OFFSET; 19352 } else { 19353 start = IPV6_HDR_LEN; 19354 stuff = IPV6_HDR_LEN + 19355 TCP_CHECKSUM_OFFSET; 19356 } 19357 } else { 19358 start = stuff = 0; 19359 } 19360 19361 /* 19362 * Create the header buffer, Multidata, as well as 19363 * any necessary attributes (destination address, 19364 * SAP and hardware checksum offload) that should 19365 * be associated with the Multidata message. 19366 */ 19367 ASSERT(cur_hdr_off == 0); 19368 if ((md_hbuf = allocb(md_hbuflen, BPRI_HI)) == NULL || 19369 ((md_hbuf->b_wptr += md_hbuflen), 19370 (mmd = mmd_alloc(md_hbuf, &md_mp, 19371 KM_NOSLEEP)) == NULL) || (tcp_mdt_add_attrs(mmd, 19372 /* fastpath mblk */ 19373 (af == AF_INET) ? ire->ire_dlureq_mp : 19374 ire->ire_nce->nce_res_mp, 19375 /* hardware checksum enabled */ 19376 (hwcksum_flags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)), 19377 /* hardware checksum offsets */ 19378 start, stuff, 0, 19379 /* hardware checksum flag */ 19380 hwcksum_flags) != 0)) { 19381 legacy_send: 19382 if (md_mp != NULL) { 19383 /* Unlink message from the chain */ 19384 if (md_mp_head != NULL) { 19385 err = (intptr_t)rmvb(md_mp_head, 19386 md_mp); 19387 /* 19388 * We can't assert that rmvb 19389 * did not return -1, since we 19390 * may get here before linkb 19391 * happens. We do, however, 19392 * check if we just removed the 19393 * only element in the list. 19394 */ 19395 if (err == 0) 19396 md_mp_head = NULL; 19397 } 19398 /* md_hbuf gets freed automatically */ 19399 TCP_STAT(tcp_mdt_discarded); 19400 freeb(md_mp); 19401 } else { 19402 /* Either allocb or mmd_alloc failed */ 19403 TCP_STAT(tcp_mdt_allocfail); 19404 if (md_hbuf != NULL) 19405 freeb(md_hbuf); 19406 } 19407 19408 /* send down what we've got so far */ 19409 if (md_mp_head != NULL) { 19410 tcp_multisend_data(tcp, ire, ill, 19411 md_mp_head, obsegs, obbytes, 19412 &rconfirm); 19413 } 19414 legacy_send_no_md: 19415 if (ire != NULL) 19416 IRE_REFRELE(ire); 19417 /* 19418 * Too bad; let the legacy path handle this. 19419 * We specify INT_MAX for the threshold, since 19420 * we gave up with the Multidata processings 19421 * and let the old path have it all. 19422 */ 19423 TCP_STAT(tcp_mdt_legacy_all); 19424 return (tcp_send(q, tcp, mss, tcp_hdr_len, 19425 tcp_tcp_hdr_len, num_sack_blk, usable, 19426 snxt, tail_unsent, xmit_tail, local_time, 19427 INT_MAX)); 19428 } 19429 19430 /* link to any existing ones, if applicable */ 19431 TCP_STAT(tcp_mdt_allocd); 19432 if (md_mp_head == NULL) { 19433 md_mp_head = md_mp; 19434 } else if (tcp_mdt_chain) { 19435 TCP_STAT(tcp_mdt_linked); 19436 linkb(md_mp_head, md_mp); 19437 } 19438 } 19439 19440 ASSERT(md_mp_head != NULL); 19441 ASSERT(tcp_mdt_chain || md_mp_head->b_cont == NULL); 19442 ASSERT(md_mp != NULL && mmd != NULL); 19443 ASSERT(md_hbuf != NULL); 19444 19445 /* 19446 * Packetize the transmittable portion of the data block; 19447 * each data block is essentially added to the Multidata 19448 * as a payload buffer. We also deal with adding more 19449 * than one payload buffers, which happens when the remaining 19450 * packetized portion of the current payload buffer is less 19451 * than MSS, while the next data block in transmit queue 19452 * has enough data to make up for one. This "spillover" 19453 * case essentially creates a split-packet, where portions 19454 * of the packet's payload fragments may span across two 19455 * virtually discontiguous address blocks. 19456 */ 19457 seg_len = mss; 19458 do { 19459 len = seg_len; 19460 19461 ASSERT(len > 0); 19462 ASSERT(max_pld >= 0); 19463 ASSERT(!add_buffer || cur_pld_off == 0); 19464 19465 /* 19466 * First time around for this payload buffer; note 19467 * in the case of a spillover, the following has 19468 * been done prior to adding the split-packet 19469 * descriptor to Multidata, and we don't want to 19470 * repeat the process. 19471 */ 19472 if (add_buffer) { 19473 ASSERT(mmd != NULL); 19474 ASSERT(md_pbuf == NULL); 19475 ASSERT(md_pbuf_nxt == NULL); 19476 ASSERT(pbuf_idx == -1 && pbuf_idx_nxt == -1); 19477 19478 /* 19479 * Have we reached the limit? We'd get to 19480 * this case when we're not chaining the 19481 * Multidata messages together, and since 19482 * we're done, terminate this loop. 19483 */ 19484 if (max_pld == 0) 19485 break; /* done */ 19486 19487 if ((md_pbuf = dupb(*xmit_tail)) == NULL) { 19488 TCP_STAT(tcp_mdt_allocfail); 19489 goto legacy_send; /* out_of_mem */ 19490 } 19491 19492 if (IS_VMLOANED_MBLK(md_pbuf) && !zcopy && 19493 zc_cap != NULL) { 19494 if (!ip_md_zcopy_attr(mmd, NULL, 19495 zc_cap->ill_zerocopy_flags)) { 19496 freeb(md_pbuf); 19497 TCP_STAT(tcp_mdt_allocfail); 19498 /* out_of_mem */ 19499 goto legacy_send; 19500 } 19501 zcopy = B_TRUE; 19502 } 19503 19504 md_pbuf->b_rptr += base_pld_off; 19505 19506 /* 19507 * Add a payload buffer to the Multidata; this 19508 * operation must not fail, or otherwise our 19509 * logic in this routine is broken. There 19510 * is no memory allocation done by the 19511 * routine, so any returned failure simply 19512 * tells us that we've done something wrong. 19513 * 19514 * A failure tells us that either we're adding 19515 * the same payload buffer more than once, or 19516 * we're trying to add more buffers than 19517 * allowed (max_pld calculation is wrong). 19518 * None of the above cases should happen, and 19519 * we panic because either there's horrible 19520 * heap corruption, and/or programming mistake. 19521 */ 19522 pbuf_idx = mmd_addpldbuf(mmd, md_pbuf); 19523 if (pbuf_idx < 0) { 19524 cmn_err(CE_PANIC, "tcp_multisend: " 19525 "payload buffer logic error " 19526 "detected for tcp %p mmd %p " 19527 "pbuf %p (%d)\n", 19528 (void *)tcp, (void *)mmd, 19529 (void *)md_pbuf, pbuf_idx); 19530 } 19531 19532 ASSERT(max_pld > 0); 19533 --max_pld; 19534 add_buffer = B_FALSE; 19535 } 19536 19537 ASSERT(md_mp_head != NULL); 19538 ASSERT(md_pbuf != NULL); 19539 ASSERT(md_pbuf_nxt == NULL); 19540 ASSERT(pbuf_idx != -1); 19541 ASSERT(pbuf_idx_nxt == -1); 19542 ASSERT(*usable > 0); 19543 19544 /* 19545 * We spillover to the next payload buffer only 19546 * if all of the following is true: 19547 * 19548 * 1. There is not enough data on the current 19549 * payload buffer to make up `len', 19550 * 2. We are allowed to send `len', 19551 * 3. The next payload buffer length is large 19552 * enough to accomodate `spill'. 19553 */ 19554 if ((spill = len - *tail_unsent) > 0 && 19555 *usable >= len && 19556 MBLKL((*xmit_tail)->b_cont) >= spill && 19557 max_pld > 0) { 19558 md_pbuf_nxt = dupb((*xmit_tail)->b_cont); 19559 if (md_pbuf_nxt == NULL) { 19560 TCP_STAT(tcp_mdt_allocfail); 19561 goto legacy_send; /* out_of_mem */ 19562 } 19563 19564 if (IS_VMLOANED_MBLK(md_pbuf_nxt) && !zcopy && 19565 zc_cap != NULL) { 19566 if (!ip_md_zcopy_attr(mmd, NULL, 19567 zc_cap->ill_zerocopy_flags)) { 19568 freeb(md_pbuf_nxt); 19569 TCP_STAT(tcp_mdt_allocfail); 19570 /* out_of_mem */ 19571 goto legacy_send; 19572 } 19573 zcopy = B_TRUE; 19574 } 19575 19576 /* 19577 * See comments above on the first call to 19578 * mmd_addpldbuf for explanation on the panic. 19579 */ 19580 pbuf_idx_nxt = mmd_addpldbuf(mmd, md_pbuf_nxt); 19581 if (pbuf_idx_nxt < 0) { 19582 panic("tcp_multisend: " 19583 "next payload buffer logic error " 19584 "detected for tcp %p mmd %p " 19585 "pbuf %p (%d)\n", 19586 (void *)tcp, (void *)mmd, 19587 (void *)md_pbuf_nxt, pbuf_idx_nxt); 19588 } 19589 19590 ASSERT(max_pld > 0); 19591 --max_pld; 19592 } else if (spill > 0) { 19593 /* 19594 * If there's a spillover, but the following 19595 * xmit_tail couldn't give us enough octets 19596 * to reach "len", then stop the current 19597 * Multidata creation and let the legacy 19598 * tcp_send() path take over. We don't want 19599 * to send the tiny segment as part of this 19600 * Multidata for performance reasons; instead, 19601 * we let the legacy path deal with grouping 19602 * it with the subsequent small mblks. 19603 */ 19604 if (*usable >= len && 19605 MBLKL((*xmit_tail)->b_cont) < spill) { 19606 max_pld = 0; 19607 break; /* done */ 19608 } 19609 19610 /* 19611 * We can't spillover, and we are near 19612 * the end of the current payload buffer, 19613 * so send what's left. 19614 */ 19615 ASSERT(*tail_unsent > 0); 19616 len = *tail_unsent; 19617 } 19618 19619 /* tail_unsent is negated if there is a spillover */ 19620 *tail_unsent -= len; 19621 *usable -= len; 19622 ASSERT(*usable >= 0); 19623 19624 if (*usable < mss) 19625 seg_len = *usable; 19626 /* 19627 * Sender SWS avoidance; see comments in tcp_send(); 19628 * everything else is the same, except that we only 19629 * do this here if there is no more data to be sent 19630 * following the current xmit_tail. We don't check 19631 * for 1-byte urgent data because we shouldn't get 19632 * here if TCP_URG_VALID is set. 19633 */ 19634 if (*usable > 0 && *usable < mss && 19635 ((md_pbuf_nxt == NULL && 19636 (*xmit_tail)->b_cont == NULL) || 19637 (md_pbuf_nxt != NULL && 19638 (*xmit_tail)->b_cont->b_cont == NULL)) && 19639 seg_len < (tcp->tcp_max_swnd >> 1) && 19640 (tcp->tcp_unsent - 19641 ((*snxt + len) - tcp->tcp_snxt)) > seg_len && 19642 !tcp->tcp_zero_win_probe) { 19643 if ((*snxt + len) == tcp->tcp_snxt && 19644 (*snxt + len) == tcp->tcp_suna) { 19645 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 19646 } 19647 done = B_TRUE; 19648 } 19649 19650 /* 19651 * Prime pump for IP's checksumming on our behalf; 19652 * include the adjustment for a source route if any. 19653 * Do this only for software/partial hardware checksum 19654 * offload, as this field gets zeroed out later for 19655 * the full hardware checksum offload case. 19656 */ 19657 if (!(hwcksum_flags & HCK_FULLCKSUM)) { 19658 cksum = len + tcp_tcp_hdr_len + tcp->tcp_sum; 19659 cksum = (cksum >> 16) + (cksum & 0xFFFF); 19660 U16_TO_ABE16(cksum, tcp->tcp_tcph->th_sum); 19661 } 19662 19663 U32_TO_ABE32(*snxt, tcp->tcp_tcph->th_seq); 19664 *snxt += len; 19665 19666 tcp->tcp_tcph->th_flags[0] = TH_ACK; 19667 /* 19668 * We set the PUSH bit only if TCP has no more buffered 19669 * data to be transmitted (or if sender SWS avoidance 19670 * takes place), as opposed to setting it for every 19671 * last packet in the burst. 19672 */ 19673 if (done || 19674 (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) == 0) 19675 tcp->tcp_tcph->th_flags[0] |= TH_PUSH; 19676 19677 /* 19678 * Set FIN bit if this is our last segment; snxt 19679 * already includes its length, and it will not 19680 * be adjusted after this point. 19681 */ 19682 if (tcp->tcp_valid_bits == TCP_FSS_VALID && 19683 *snxt == tcp->tcp_fss) { 19684 if (!tcp->tcp_fin_acked) { 19685 tcp->tcp_tcph->th_flags[0] |= TH_FIN; 19686 BUMP_MIB(&tcp_mib, tcpOutControl); 19687 } 19688 if (!tcp->tcp_fin_sent) { 19689 tcp->tcp_fin_sent = B_TRUE; 19690 /* 19691 * tcp state must be ESTABLISHED 19692 * in order for us to get here in 19693 * the first place. 19694 */ 19695 tcp->tcp_state = TCPS_FIN_WAIT_1; 19696 19697 /* 19698 * Upon returning from this routine, 19699 * tcp_wput_data() will set tcp_snxt 19700 * to be equal to snxt + tcp_fin_sent. 19701 * This is essentially the same as 19702 * setting it to tcp_fss + 1. 19703 */ 19704 } 19705 } 19706 19707 tcp->tcp_last_sent_len = (ushort_t)len; 19708 19709 len += tcp_hdr_len; 19710 if (tcp->tcp_ipversion == IPV4_VERSION) 19711 tcp->tcp_ipha->ipha_length = htons(len); 19712 else 19713 tcp->tcp_ip6h->ip6_plen = htons(len - 19714 ((char *)&tcp->tcp_ip6h[1] - 19715 tcp->tcp_iphc)); 19716 19717 pkt_info->flags = (PDESC_HBUF_REF | PDESC_PBUF_REF); 19718 19719 /* setup header fragment */ 19720 PDESC_HDR_ADD(pkt_info, 19721 md_hbuf->b_rptr + cur_hdr_off, /* base */ 19722 tcp->tcp_mdt_hdr_head, /* head room */ 19723 tcp_hdr_len, /* len */ 19724 tcp->tcp_mdt_hdr_tail); /* tail room */ 19725 19726 ASSERT(pkt_info->hdr_lim - pkt_info->hdr_base == 19727 hdr_frag_sz); 19728 ASSERT(MBLKIN(md_hbuf, 19729 (pkt_info->hdr_base - md_hbuf->b_rptr), 19730 PDESC_HDRSIZE(pkt_info))); 19731 19732 /* setup first payload fragment */ 19733 PDESC_PLD_INIT(pkt_info); 19734 PDESC_PLD_SPAN_ADD(pkt_info, 19735 pbuf_idx, /* index */ 19736 md_pbuf->b_rptr + cur_pld_off, /* start */ 19737 tcp->tcp_last_sent_len); /* len */ 19738 19739 /* create a split-packet in case of a spillover */ 19740 if (md_pbuf_nxt != NULL) { 19741 ASSERT(spill > 0); 19742 ASSERT(pbuf_idx_nxt > pbuf_idx); 19743 ASSERT(!add_buffer); 19744 19745 md_pbuf = md_pbuf_nxt; 19746 md_pbuf_nxt = NULL; 19747 pbuf_idx = pbuf_idx_nxt; 19748 pbuf_idx_nxt = -1; 19749 cur_pld_off = spill; 19750 19751 /* trim out first payload fragment */ 19752 PDESC_PLD_SPAN_TRIM(pkt_info, 0, spill); 19753 19754 /* setup second payload fragment */ 19755 PDESC_PLD_SPAN_ADD(pkt_info, 19756 pbuf_idx, /* index */ 19757 md_pbuf->b_rptr, /* start */ 19758 spill); /* len */ 19759 19760 if ((*xmit_tail)->b_next == NULL) { 19761 /* 19762 * Store the lbolt used for RTT 19763 * estimation. We can only record one 19764 * timestamp per mblk so we do it when 19765 * we reach the end of the payload 19766 * buffer. Also we only take a new 19767 * timestamp sample when the previous 19768 * timed data from the same mblk has 19769 * been ack'ed. 19770 */ 19771 (*xmit_tail)->b_prev = local_time; 19772 (*xmit_tail)->b_next = 19773 (mblk_t *)(uintptr_t)first_snxt; 19774 } 19775 19776 first_snxt = *snxt - spill; 19777 19778 /* 19779 * Advance xmit_tail; usable could be 0 by 19780 * the time we got here, but we made sure 19781 * above that we would only spillover to 19782 * the next data block if usable includes 19783 * the spilled-over amount prior to the 19784 * subtraction. Therefore, we are sure 19785 * that xmit_tail->b_cont can't be NULL. 19786 */ 19787 ASSERT((*xmit_tail)->b_cont != NULL); 19788 *xmit_tail = (*xmit_tail)->b_cont; 19789 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 19790 (uintptr_t)INT_MAX); 19791 *tail_unsent = (int)MBLKL(*xmit_tail) - spill; 19792 } else { 19793 cur_pld_off += tcp->tcp_last_sent_len; 19794 } 19795 19796 /* 19797 * Fill in the header using the template header, and 19798 * add options such as time-stamp, ECN and/or SACK, 19799 * as needed. 19800 */ 19801 tcp_fill_header(tcp, pkt_info->hdr_rptr, 19802 (clock_t)local_time, num_sack_blk); 19803 19804 /* take care of some IP header businesses */ 19805 if (af == AF_INET) { 19806 ipha = (ipha_t *)pkt_info->hdr_rptr; 19807 19808 ASSERT(OK_32PTR((uchar_t *)ipha)); 19809 ASSERT(PDESC_HDRL(pkt_info) >= 19810 IP_SIMPLE_HDR_LENGTH); 19811 ASSERT(ipha->ipha_version_and_hdr_length == 19812 IP_SIMPLE_HDR_VERSION); 19813 19814 /* 19815 * Assign ident value for current packet; see 19816 * related comments in ip_wput_ire() about the 19817 * contract private interface with clustering 19818 * group. 19819 */ 19820 clusterwide = B_FALSE; 19821 if (cl_inet_ipident != NULL) { 19822 ASSERT(cl_inet_isclusterwide != NULL); 19823 if ((*cl_inet_isclusterwide)(IPPROTO_IP, 19824 AF_INET, 19825 (uint8_t *)(uintptr_t)src)) { 19826 ipha->ipha_ident = 19827 (*cl_inet_ipident) 19828 (IPPROTO_IP, AF_INET, 19829 (uint8_t *)(uintptr_t)src, 19830 (uint8_t *)(uintptr_t)dst); 19831 clusterwide = B_TRUE; 19832 } 19833 } 19834 19835 if (!clusterwide) { 19836 ipha->ipha_ident = (uint16_t) 19837 atomic_add_32_nv( 19838 &ire->ire_ident, 1); 19839 } 19840 #ifndef _BIG_ENDIAN 19841 ipha->ipha_ident = (ipha->ipha_ident << 8) | 19842 (ipha->ipha_ident >> 8); 19843 #endif 19844 } else { 19845 ip6h = (ip6_t *)pkt_info->hdr_rptr; 19846 19847 ASSERT(OK_32PTR((uchar_t *)ip6h)); 19848 ASSERT(IPVER(ip6h) == IPV6_VERSION); 19849 ASSERT(ip6h->ip6_nxt == IPPROTO_TCP); 19850 ASSERT(PDESC_HDRL(pkt_info) >= 19851 (IPV6_HDR_LEN + TCP_CHECKSUM_OFFSET + 19852 TCP_CHECKSUM_SIZE)); 19853 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 19854 19855 if (tcp->tcp_ip_forward_progress) { 19856 rconfirm = B_TRUE; 19857 tcp->tcp_ip_forward_progress = B_FALSE; 19858 } 19859 } 19860 19861 /* at least one payload span, and at most two */ 19862 ASSERT(pkt_info->pld_cnt > 0 && pkt_info->pld_cnt < 3); 19863 19864 /* add the packet descriptor to Multidata */ 19865 if ((pkt = mmd_addpdesc(mmd, pkt_info, &err, 19866 KM_NOSLEEP)) == NULL) { 19867 /* 19868 * Any failure other than ENOMEM indicates 19869 * that we have passed in invalid pkt_info 19870 * or parameters to mmd_addpdesc, which must 19871 * not happen. 19872 * 19873 * EINVAL is a result of failure on boundary 19874 * checks against the pkt_info contents. It 19875 * should not happen, and we panic because 19876 * either there's horrible heap corruption, 19877 * and/or programming mistake. 19878 */ 19879 if (err != ENOMEM) { 19880 cmn_err(CE_PANIC, "tcp_multisend: " 19881 "pdesc logic error detected for " 19882 "tcp %p mmd %p pinfo %p (%d)\n", 19883 (void *)tcp, (void *)mmd, 19884 (void *)pkt_info, err); 19885 } 19886 TCP_STAT(tcp_mdt_addpdescfail); 19887 goto legacy_send; /* out_of_mem */ 19888 } 19889 ASSERT(pkt != NULL); 19890 19891 /* calculate IP header and TCP checksums */ 19892 if (af == AF_INET) { 19893 /* calculate pseudo-header checksum */ 19894 cksum = (dst >> 16) + (dst & 0xFFFF) + 19895 (src >> 16) + (src & 0xFFFF); 19896 19897 /* offset for TCP header checksum */ 19898 up = IPH_TCPH_CHECKSUMP(ipha, 19899 IP_SIMPLE_HDR_LENGTH); 19900 } else { 19901 up = (uint16_t *)&ip6h->ip6_src; 19902 19903 /* calculate pseudo-header checksum */ 19904 cksum = up[0] + up[1] + up[2] + up[3] + 19905 up[4] + up[5] + up[6] + up[7] + 19906 up[8] + up[9] + up[10] + up[11] + 19907 up[12] + up[13] + up[14] + up[15]; 19908 19909 /* Fold the initial sum */ 19910 cksum = (cksum & 0xffff) + (cksum >> 16); 19911 19912 up = (uint16_t *)(((uchar_t *)ip6h) + 19913 IPV6_HDR_LEN + TCP_CHECKSUM_OFFSET); 19914 } 19915 19916 if (hwcksum_flags & HCK_FULLCKSUM) { 19917 /* clear checksum field for hardware */ 19918 *up = 0; 19919 } else if (hwcksum_flags & HCK_PARTIALCKSUM) { 19920 uint32_t sum; 19921 19922 /* pseudo-header checksumming */ 19923 sum = *up + cksum + IP_TCP_CSUM_COMP; 19924 sum = (sum & 0xFFFF) + (sum >> 16); 19925 *up = (sum & 0xFFFF) + (sum >> 16); 19926 } else { 19927 /* software checksumming */ 19928 TCP_STAT(tcp_out_sw_cksum); 19929 TCP_STAT_UPDATE(tcp_out_sw_cksum_bytes, 19930 tcp->tcp_hdr_len + tcp->tcp_last_sent_len); 19931 *up = IP_MD_CSUM(pkt, tcp->tcp_ip_hdr_len, 19932 cksum + IP_TCP_CSUM_COMP); 19933 if (*up == 0) 19934 *up = 0xFFFF; 19935 } 19936 19937 /* IPv4 header checksum */ 19938 if (af == AF_INET) { 19939 ipha->ipha_fragment_offset_and_flags |= 19940 (uint32_t)htons(ire->ire_frag_flag); 19941 19942 if (hwcksum_flags & HCK_IPV4_HDRCKSUM) { 19943 ipha->ipha_hdr_checksum = 0; 19944 } else { 19945 IP_HDR_CKSUM(ipha, cksum, 19946 ((uint32_t *)ipha)[0], 19947 ((uint16_t *)ipha)[4]); 19948 } 19949 } 19950 19951 /* advance header offset */ 19952 cur_hdr_off += hdr_frag_sz; 19953 19954 obbytes += tcp->tcp_last_sent_len; 19955 ++obsegs; 19956 } while (!done && *usable > 0 && --num_burst_seg > 0 && 19957 *tail_unsent > 0); 19958 19959 if ((*xmit_tail)->b_next == NULL) { 19960 /* 19961 * Store the lbolt used for RTT estimation. We can only 19962 * record one timestamp per mblk so we do it when we 19963 * reach the end of the payload buffer. Also we only 19964 * take a new timestamp sample when the previous timed 19965 * data from the same mblk has been ack'ed. 19966 */ 19967 (*xmit_tail)->b_prev = local_time; 19968 (*xmit_tail)->b_next = (mblk_t *)(uintptr_t)first_snxt; 19969 } 19970 19971 ASSERT(*tail_unsent >= 0); 19972 if (*tail_unsent > 0) { 19973 /* 19974 * We got here because we broke out of the above 19975 * loop due to of one of the following cases: 19976 * 19977 * 1. len < adjusted MSS (i.e. small), 19978 * 2. Sender SWS avoidance, 19979 * 3. max_pld is zero. 19980 * 19981 * We are done for this Multidata, so trim our 19982 * last payload buffer (if any) accordingly. 19983 */ 19984 if (md_pbuf != NULL) 19985 md_pbuf->b_wptr -= *tail_unsent; 19986 } else if (*usable > 0) { 19987 *xmit_tail = (*xmit_tail)->b_cont; 19988 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 19989 (uintptr_t)INT_MAX); 19990 *tail_unsent = (int)MBLKL(*xmit_tail); 19991 add_buffer = B_TRUE; 19992 } 19993 } while (!done && *usable > 0 && num_burst_seg > 0 && 19994 (tcp_mdt_chain || max_pld > 0)); 19995 19996 /* send everything down */ 19997 tcp_multisend_data(tcp, ire, ill, md_mp_head, obsegs, obbytes, 19998 &rconfirm); 19999 20000 #undef PREP_NEW_MULTIDATA 20001 #undef PREP_NEW_PBUF 20002 #undef IPVER 20003 20004 IRE_REFRELE(ire); 20005 return (0); 20006 } 20007 20008 /* 20009 * A wrapper function for sending one or more Multidata messages down to 20010 * the module below ip; this routine does not release the reference of the 20011 * IRE (caller does that). This routine is analogous to tcp_send_data(). 20012 */ 20013 static void 20014 tcp_multisend_data(tcp_t *tcp, ire_t *ire, const ill_t *ill, mblk_t *md_mp_head, 20015 const uint_t obsegs, const uint_t obbytes, boolean_t *rconfirm) 20016 { 20017 uint64_t delta; 20018 nce_t *nce; 20019 20020 ASSERT(ire != NULL && ill != NULL); 20021 ASSERT(ire->ire_stq != NULL); 20022 ASSERT(md_mp_head != NULL); 20023 ASSERT(rconfirm != NULL); 20024 20025 /* adjust MIBs and IRE timestamp */ 20026 TCP_RECORD_TRACE(tcp, md_mp_head, TCP_TRACE_SEND_PKT); 20027 tcp->tcp_obsegs += obsegs; 20028 UPDATE_MIB(&tcp_mib, tcpOutDataSegs, obsegs); 20029 UPDATE_MIB(&tcp_mib, tcpOutDataBytes, obbytes); 20030 TCP_STAT_UPDATE(tcp_mdt_pkt_out, obsegs); 20031 20032 if (tcp->tcp_ipversion == IPV4_VERSION) { 20033 TCP_STAT_UPDATE(tcp_mdt_pkt_out_v4, obsegs); 20034 UPDATE_MIB(&ip_mib, ipOutRequests, obsegs); 20035 } else { 20036 TCP_STAT_UPDATE(tcp_mdt_pkt_out_v6, obsegs); 20037 UPDATE_MIB(&ip6_mib, ipv6OutRequests, obsegs); 20038 } 20039 20040 ire->ire_ob_pkt_count += obsegs; 20041 if (ire->ire_ipif != NULL) 20042 atomic_add_32(&ire->ire_ipif->ipif_ob_pkt_count, obsegs); 20043 ire->ire_last_used_time = lbolt; 20044 20045 /* send it down */ 20046 putnext(ire->ire_stq, md_mp_head); 20047 20048 /* we're done for TCP/IPv4 */ 20049 if (tcp->tcp_ipversion == IPV4_VERSION) 20050 return; 20051 20052 nce = ire->ire_nce; 20053 20054 ASSERT(nce != NULL); 20055 ASSERT(!(nce->nce_flags & (NCE_F_NONUD|NCE_F_PERMANENT))); 20056 ASSERT(nce->nce_state != ND_INCOMPLETE); 20057 20058 /* reachability confirmation? */ 20059 if (*rconfirm) { 20060 nce->nce_last = TICK_TO_MSEC(lbolt64); 20061 if (nce->nce_state != ND_REACHABLE) { 20062 mutex_enter(&nce->nce_lock); 20063 nce->nce_state = ND_REACHABLE; 20064 nce->nce_pcnt = ND_MAX_UNICAST_SOLICIT; 20065 mutex_exit(&nce->nce_lock); 20066 (void) untimeout(nce->nce_timeout_id); 20067 if (ip_debug > 2) { 20068 /* ip1dbg */ 20069 pr_addr_dbg("tcp_multisend_data: state " 20070 "for %s changed to REACHABLE\n", 20071 AF_INET6, &ire->ire_addr_v6); 20072 } 20073 } 20074 /* reset transport reachability confirmation */ 20075 *rconfirm = B_FALSE; 20076 } 20077 20078 delta = TICK_TO_MSEC(lbolt64) - nce->nce_last; 20079 ip1dbg(("tcp_multisend_data: delta = %" PRId64 20080 " ill_reachable_time = %d \n", delta, ill->ill_reachable_time)); 20081 20082 if (delta > (uint64_t)ill->ill_reachable_time) { 20083 mutex_enter(&nce->nce_lock); 20084 switch (nce->nce_state) { 20085 case ND_REACHABLE: 20086 case ND_STALE: 20087 /* 20088 * ND_REACHABLE is identical to ND_STALE in this 20089 * specific case. If reachable time has expired for 20090 * this neighbor (delta is greater than reachable 20091 * time), conceptually, the neighbor cache is no 20092 * longer in REACHABLE state, but already in STALE 20093 * state. So the correct transition here is to 20094 * ND_DELAY. 20095 */ 20096 nce->nce_state = ND_DELAY; 20097 mutex_exit(&nce->nce_lock); 20098 NDP_RESTART_TIMER(nce, delay_first_probe_time); 20099 if (ip_debug > 3) { 20100 /* ip2dbg */ 20101 pr_addr_dbg("tcp_multisend_data: state " 20102 "for %s changed to DELAY\n", 20103 AF_INET6, &ire->ire_addr_v6); 20104 } 20105 break; 20106 case ND_DELAY: 20107 case ND_PROBE: 20108 mutex_exit(&nce->nce_lock); 20109 /* Timers have already started */ 20110 break; 20111 case ND_UNREACHABLE: 20112 /* 20113 * ndp timer has detected that this nce is 20114 * unreachable and initiated deleting this nce 20115 * and all its associated IREs. This is a race 20116 * where we found the ire before it was deleted 20117 * and have just sent out a packet using this 20118 * unreachable nce. 20119 */ 20120 mutex_exit(&nce->nce_lock); 20121 break; 20122 default: 20123 ASSERT(0); 20124 } 20125 } 20126 } 20127 20128 /* 20129 * tcp_send() is called by tcp_wput_data() for non-Multidata transmission 20130 * scheme, and returns one of the following: 20131 * 20132 * -1 = failed allocation. 20133 * 0 = success; burst count reached, or usable send window is too small, 20134 * and that we'd rather wait until later before sending again. 20135 * 1 = success; we are called from tcp_multisend(), and both usable send 20136 * window and tail_unsent are greater than the MDT threshold, and thus 20137 * Multidata Transmit should be used instead. 20138 */ 20139 static int 20140 tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len, 20141 const int tcp_tcp_hdr_len, const int num_sack_blk, int *usable, 20142 uint_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 20143 const int mdt_thres) 20144 { 20145 int num_burst_seg = tcp->tcp_snd_burst; 20146 20147 for (;;) { 20148 struct datab *db; 20149 tcph_t *tcph; 20150 uint32_t sum; 20151 mblk_t *mp, *mp1; 20152 uchar_t *rptr; 20153 int len; 20154 20155 /* 20156 * If we're called by tcp_multisend(), and the amount of 20157 * sendable data as well as the size of current xmit_tail 20158 * is beyond the MDT threshold, return to the caller and 20159 * let the large data transmit be done using MDT. 20160 */ 20161 if (*usable > 0 && *usable > mdt_thres && 20162 (*tail_unsent > mdt_thres || (*tail_unsent == 0 && 20163 MBLKL((*xmit_tail)->b_cont) > mdt_thres))) { 20164 ASSERT(tcp->tcp_mdt); 20165 return (1); /* success; do large send */ 20166 } 20167 20168 if (num_burst_seg-- == 0) 20169 break; /* success; burst count reached */ 20170 20171 len = mss; 20172 if (len > *usable) { 20173 len = *usable; 20174 if (len <= 0) { 20175 /* Terminate the loop */ 20176 break; /* success; too small */ 20177 } 20178 /* 20179 * Sender silly-window avoidance. 20180 * Ignore this if we are going to send a 20181 * zero window probe out. 20182 * 20183 * TODO: force data into microscopic window? 20184 * ==> (!pushed || (unsent > usable)) 20185 */ 20186 if (len < (tcp->tcp_max_swnd >> 1) && 20187 (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) > len && 20188 !((tcp->tcp_valid_bits & TCP_URG_VALID) && 20189 len == 1) && (! tcp->tcp_zero_win_probe)) { 20190 /* 20191 * If the retransmit timer is not running 20192 * we start it so that we will retransmit 20193 * in the case when the the receiver has 20194 * decremented the window. 20195 */ 20196 if (*snxt == tcp->tcp_snxt && 20197 *snxt == tcp->tcp_suna) { 20198 /* 20199 * We are not supposed to send 20200 * anything. So let's wait a little 20201 * bit longer before breaking SWS 20202 * avoidance. 20203 * 20204 * What should the value be? 20205 * Suggestion: MAX(init rexmit time, 20206 * tcp->tcp_rto) 20207 */ 20208 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 20209 } 20210 break; /* success; too small */ 20211 } 20212 } 20213 20214 tcph = tcp->tcp_tcph; 20215 20216 *usable -= len; /* Approximate - can be adjusted later */ 20217 if (*usable > 0) 20218 tcph->th_flags[0] = TH_ACK; 20219 else 20220 tcph->th_flags[0] = (TH_ACK | TH_PUSH); 20221 20222 /* 20223 * Prime pump for IP's checksumming on our behalf 20224 * Include the adjustment for a source route if any. 20225 */ 20226 sum = len + tcp_tcp_hdr_len + tcp->tcp_sum; 20227 sum = (sum >> 16) + (sum & 0xFFFF); 20228 U16_TO_ABE16(sum, tcph->th_sum); 20229 20230 U32_TO_ABE32(*snxt, tcph->th_seq); 20231 20232 /* 20233 * Branch off to tcp_xmit_mp() if any of the VALID bits is 20234 * set. For the case when TCP_FSS_VALID is the only valid 20235 * bit (normal active close), branch off only when we think 20236 * that the FIN flag needs to be set. Note for this case, 20237 * that (snxt + len) may not reflect the actual seg_len, 20238 * as len may be further reduced in tcp_xmit_mp(). If len 20239 * gets modified, we will end up here again. 20240 */ 20241 if (tcp->tcp_valid_bits != 0 && 20242 (tcp->tcp_valid_bits != TCP_FSS_VALID || 20243 ((*snxt + len) == tcp->tcp_fss))) { 20244 uchar_t *prev_rptr; 20245 uint32_t prev_snxt = tcp->tcp_snxt; 20246 20247 if (*tail_unsent == 0) { 20248 ASSERT((*xmit_tail)->b_cont != NULL); 20249 *xmit_tail = (*xmit_tail)->b_cont; 20250 prev_rptr = (*xmit_tail)->b_rptr; 20251 *tail_unsent = (int)((*xmit_tail)->b_wptr - 20252 (*xmit_tail)->b_rptr); 20253 } else { 20254 prev_rptr = (*xmit_tail)->b_rptr; 20255 (*xmit_tail)->b_rptr = (*xmit_tail)->b_wptr - 20256 *tail_unsent; 20257 } 20258 mp = tcp_xmit_mp(tcp, *xmit_tail, len, NULL, NULL, 20259 *snxt, B_FALSE, (uint32_t *)&len, B_FALSE); 20260 /* Restore tcp_snxt so we get amount sent right. */ 20261 tcp->tcp_snxt = prev_snxt; 20262 if (prev_rptr == (*xmit_tail)->b_rptr) { 20263 /* 20264 * If the previous timestamp is still in use, 20265 * don't stomp on it. 20266 */ 20267 if ((*xmit_tail)->b_next == NULL) { 20268 (*xmit_tail)->b_prev = local_time; 20269 (*xmit_tail)->b_next = 20270 (mblk_t *)(uintptr_t)(*snxt); 20271 } 20272 } else 20273 (*xmit_tail)->b_rptr = prev_rptr; 20274 20275 if (mp == NULL) 20276 return (-1); 20277 mp1 = mp->b_cont; 20278 20279 tcp->tcp_last_sent_len = (ushort_t)len; 20280 while (mp1->b_cont) { 20281 *xmit_tail = (*xmit_tail)->b_cont; 20282 (*xmit_tail)->b_prev = local_time; 20283 (*xmit_tail)->b_next = 20284 (mblk_t *)(uintptr_t)(*snxt); 20285 mp1 = mp1->b_cont; 20286 } 20287 *snxt += len; 20288 *tail_unsent = (*xmit_tail)->b_wptr - mp1->b_wptr; 20289 BUMP_LOCAL(tcp->tcp_obsegs); 20290 BUMP_MIB(&tcp_mib, tcpOutDataSegs); 20291 UPDATE_MIB(&tcp_mib, tcpOutDataBytes, len); 20292 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 20293 tcp_send_data(tcp, q, mp); 20294 continue; 20295 } 20296 20297 *snxt += len; /* Adjust later if we don't send all of len */ 20298 BUMP_MIB(&tcp_mib, tcpOutDataSegs); 20299 UPDATE_MIB(&tcp_mib, tcpOutDataBytes, len); 20300 20301 if (*tail_unsent) { 20302 /* Are the bytes above us in flight? */ 20303 rptr = (*xmit_tail)->b_wptr - *tail_unsent; 20304 if (rptr != (*xmit_tail)->b_rptr) { 20305 *tail_unsent -= len; 20306 tcp->tcp_last_sent_len = (ushort_t)len; 20307 len += tcp_hdr_len; 20308 if (tcp->tcp_ipversion == IPV4_VERSION) 20309 tcp->tcp_ipha->ipha_length = htons(len); 20310 else 20311 tcp->tcp_ip6h->ip6_plen = 20312 htons(len - 20313 ((char *)&tcp->tcp_ip6h[1] - 20314 tcp->tcp_iphc)); 20315 mp = dupb(*xmit_tail); 20316 if (!mp) 20317 return (-1); /* out_of_mem */ 20318 mp->b_rptr = rptr; 20319 /* 20320 * If the old timestamp is no longer in use, 20321 * sample a new timestamp now. 20322 */ 20323 if ((*xmit_tail)->b_next == NULL) { 20324 (*xmit_tail)->b_prev = local_time; 20325 (*xmit_tail)->b_next = 20326 (mblk_t *)(uintptr_t)(*snxt-len); 20327 } 20328 goto must_alloc; 20329 } 20330 } else { 20331 *xmit_tail = (*xmit_tail)->b_cont; 20332 ASSERT((uintptr_t)((*xmit_tail)->b_wptr - 20333 (*xmit_tail)->b_rptr) <= (uintptr_t)INT_MAX); 20334 *tail_unsent = (int)((*xmit_tail)->b_wptr - 20335 (*xmit_tail)->b_rptr); 20336 } 20337 20338 (*xmit_tail)->b_prev = local_time; 20339 (*xmit_tail)->b_next = (mblk_t *)(uintptr_t)(*snxt - len); 20340 20341 *tail_unsent -= len; 20342 tcp->tcp_last_sent_len = (ushort_t)len; 20343 20344 len += tcp_hdr_len; 20345 if (tcp->tcp_ipversion == IPV4_VERSION) 20346 tcp->tcp_ipha->ipha_length = htons(len); 20347 else 20348 tcp->tcp_ip6h->ip6_plen = htons(len - 20349 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 20350 20351 mp = dupb(*xmit_tail); 20352 if (!mp) 20353 return (-1); /* out_of_mem */ 20354 20355 len = tcp_hdr_len; 20356 /* 20357 * There are four reasons to allocate a new hdr mblk: 20358 * 1) The bytes above us are in use by another packet 20359 * 2) We don't have good alignment 20360 * 3) The mblk is being shared 20361 * 4) We don't have enough room for a header 20362 */ 20363 rptr = mp->b_rptr - len; 20364 if (!OK_32PTR(rptr) || 20365 ((db = mp->b_datap), db->db_ref != 2) || 20366 rptr < db->db_base) { 20367 /* NOTE: we assume allocb returns an OK_32PTR */ 20368 20369 must_alloc:; 20370 mp1 = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 20371 tcp_wroff_xtra, BPRI_MED); 20372 if (!mp1) { 20373 freemsg(mp); 20374 return (-1); /* out_of_mem */ 20375 } 20376 mp1->b_cont = mp; 20377 mp = mp1; 20378 /* Leave room for Link Level header */ 20379 len = tcp_hdr_len; 20380 rptr = &mp->b_rptr[tcp_wroff_xtra]; 20381 mp->b_wptr = &rptr[len]; 20382 } 20383 20384 /* 20385 * Fill in the header using the template header, and add 20386 * options such as time-stamp, ECN and/or SACK, as needed. 20387 */ 20388 tcp_fill_header(tcp, rptr, (clock_t)local_time, num_sack_blk); 20389 20390 mp->b_rptr = rptr; 20391 20392 if (*tail_unsent) { 20393 int spill = *tail_unsent; 20394 20395 mp1 = mp->b_cont; 20396 if (!mp1) 20397 mp1 = mp; 20398 20399 /* 20400 * If we're a little short, tack on more mblks until 20401 * there is no more spillover. 20402 */ 20403 while (spill < 0) { 20404 mblk_t *nmp; 20405 int nmpsz; 20406 20407 nmp = (*xmit_tail)->b_cont; 20408 nmpsz = MBLKL(nmp); 20409 20410 /* 20411 * Excess data in mblk; can we split it? 20412 * If MDT is enabled for the connection, 20413 * keep on splitting as this is a transient 20414 * send path. 20415 */ 20416 if (!tcp->tcp_mdt && (spill + nmpsz > 0)) { 20417 /* 20418 * Don't split if stream head was 20419 * told to break up larger writes 20420 * into smaller ones. 20421 */ 20422 if (tcp->tcp_maxpsz > 0) 20423 break; 20424 20425 /* 20426 * Next mblk is less than SMSS/2 20427 * rounded up to nearest 64-byte; 20428 * let it get sent as part of the 20429 * next segment. 20430 */ 20431 if (tcp->tcp_localnet && 20432 !tcp->tcp_cork && 20433 (nmpsz < roundup((mss >> 1), 64))) 20434 break; 20435 } 20436 20437 *xmit_tail = nmp; 20438 ASSERT((uintptr_t)nmpsz <= (uintptr_t)INT_MAX); 20439 /* Stash for rtt use later */ 20440 (*xmit_tail)->b_prev = local_time; 20441 (*xmit_tail)->b_next = 20442 (mblk_t *)(uintptr_t)(*snxt - len); 20443 mp1->b_cont = dupb(*xmit_tail); 20444 mp1 = mp1->b_cont; 20445 20446 spill += nmpsz; 20447 if (mp1 == NULL) { 20448 *tail_unsent = spill; 20449 freemsg(mp); 20450 return (-1); /* out_of_mem */ 20451 } 20452 } 20453 20454 /* Trim back any surplus on the last mblk */ 20455 if (spill >= 0) { 20456 mp1->b_wptr -= spill; 20457 *tail_unsent = spill; 20458 } else { 20459 /* 20460 * We did not send everything we could in 20461 * order to remain within the b_cont limit. 20462 */ 20463 *usable -= spill; 20464 *snxt += spill; 20465 tcp->tcp_last_sent_len += spill; 20466 UPDATE_MIB(&tcp_mib, tcpOutDataBytes, spill); 20467 /* 20468 * Adjust the checksum 20469 */ 20470 tcph = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 20471 sum += spill; 20472 sum = (sum >> 16) + (sum & 0xFFFF); 20473 U16_TO_ABE16(sum, tcph->th_sum); 20474 if (tcp->tcp_ipversion == IPV4_VERSION) { 20475 sum = ntohs( 20476 ((ipha_t *)rptr)->ipha_length) + 20477 spill; 20478 ((ipha_t *)rptr)->ipha_length = 20479 htons(sum); 20480 } else { 20481 sum = ntohs( 20482 ((ip6_t *)rptr)->ip6_plen) + 20483 spill; 20484 ((ip6_t *)rptr)->ip6_plen = 20485 htons(sum); 20486 } 20487 *tail_unsent = 0; 20488 } 20489 } 20490 if (tcp->tcp_ip_forward_progress) { 20491 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 20492 *(uint32_t *)mp->b_rptr |= IP_FORWARD_PROG; 20493 tcp->tcp_ip_forward_progress = B_FALSE; 20494 } 20495 20496 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 20497 tcp_send_data(tcp, q, mp); 20498 BUMP_LOCAL(tcp->tcp_obsegs); 20499 } 20500 20501 return (0); 20502 } 20503 20504 /* Unlink and return any mblk that looks like it contains a MDT info */ 20505 static mblk_t * 20506 tcp_mdt_info_mp(mblk_t *mp) 20507 { 20508 mblk_t *prev_mp; 20509 20510 for (;;) { 20511 prev_mp = mp; 20512 /* no more to process? */ 20513 if ((mp = mp->b_cont) == NULL) 20514 break; 20515 20516 switch (DB_TYPE(mp)) { 20517 case M_CTL: 20518 if (*(uint32_t *)mp->b_rptr != MDT_IOC_INFO_UPDATE) 20519 continue; 20520 ASSERT(prev_mp != NULL); 20521 prev_mp->b_cont = mp->b_cont; 20522 mp->b_cont = NULL; 20523 return (mp); 20524 default: 20525 break; 20526 } 20527 } 20528 return (mp); 20529 } 20530 20531 /* MDT info update routine, called when IP notifies us about MDT */ 20532 static void 20533 tcp_mdt_update(tcp_t *tcp, ill_mdt_capab_t *mdt_capab, boolean_t first) 20534 { 20535 boolean_t prev_state; 20536 20537 /* 20538 * IP is telling us to abort MDT on this connection? We know 20539 * this because the capability is only turned off when IP 20540 * encounters some pathological cases, e.g. link-layer change 20541 * where the new driver doesn't support MDT, or in situation 20542 * where MDT usage on the link-layer has been switched off. 20543 * IP would not have sent us the initial MDT_IOC_INFO_UPDATE 20544 * if the link-layer doesn't support MDT, and if it does, it 20545 * will indicate that the feature is to be turned on. 20546 */ 20547 prev_state = tcp->tcp_mdt; 20548 tcp->tcp_mdt = (mdt_capab->ill_mdt_on != 0); 20549 if (!tcp->tcp_mdt && !first) { 20550 TCP_STAT(tcp_mdt_conn_halted3); 20551 ip1dbg(("tcp_mdt_update: disabling MDT for connp %p\n", 20552 (void *)tcp->tcp_connp)); 20553 } 20554 20555 /* 20556 * We currently only support MDT on simple TCP/{IPv4,IPv6}, 20557 * so disable MDT otherwise. The checks are done here 20558 * and in tcp_wput_data(). 20559 */ 20560 if (tcp->tcp_mdt && 20561 (tcp->tcp_ipversion == IPV4_VERSION && 20562 tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) || 20563 (tcp->tcp_ipversion == IPV6_VERSION && 20564 tcp->tcp_ip_hdr_len != IPV6_HDR_LEN)) 20565 tcp->tcp_mdt = B_FALSE; 20566 20567 if (tcp->tcp_mdt) { 20568 if (mdt_capab->ill_mdt_version != MDT_VERSION_2) { 20569 cmn_err(CE_NOTE, "tcp_mdt_update: unknown MDT " 20570 "version (%d), expected version is %d", 20571 mdt_capab->ill_mdt_version, MDT_VERSION_2); 20572 tcp->tcp_mdt = B_FALSE; 20573 return; 20574 } 20575 20576 /* 20577 * We need the driver to be able to handle at least three 20578 * spans per packet in order for tcp MDT to be utilized. 20579 * The first is for the header portion, while the rest are 20580 * needed to handle a packet that straddles across two 20581 * virtually non-contiguous buffers; a typical tcp packet 20582 * therefore consists of only two spans. Note that we take 20583 * a zero as "don't care". 20584 */ 20585 if (mdt_capab->ill_mdt_span_limit > 0 && 20586 mdt_capab->ill_mdt_span_limit < 3) { 20587 tcp->tcp_mdt = B_FALSE; 20588 return; 20589 } 20590 20591 /* a zero means driver wants default value */ 20592 tcp->tcp_mdt_max_pld = MIN(mdt_capab->ill_mdt_max_pld, 20593 tcp_mdt_max_pbufs); 20594 if (tcp->tcp_mdt_max_pld == 0) 20595 tcp->tcp_mdt_max_pld = tcp_mdt_max_pbufs; 20596 20597 /* ensure 32-bit alignment */ 20598 tcp->tcp_mdt_hdr_head = roundup(MAX(tcp_mdt_hdr_head_min, 20599 mdt_capab->ill_mdt_hdr_head), 4); 20600 tcp->tcp_mdt_hdr_tail = roundup(MAX(tcp_mdt_hdr_tail_min, 20601 mdt_capab->ill_mdt_hdr_tail), 4); 20602 20603 if (!first && !prev_state) { 20604 TCP_STAT(tcp_mdt_conn_resumed2); 20605 ip1dbg(("tcp_mdt_update: reenabling MDT for connp %p\n", 20606 (void *)tcp->tcp_connp)); 20607 } 20608 } 20609 } 20610 20611 static void 20612 tcp_ire_ill_check(tcp_t *tcp, ire_t *ire, ill_t *ill, boolean_t check_mdt) 20613 { 20614 conn_t *connp = tcp->tcp_connp; 20615 20616 ASSERT(ire != NULL); 20617 20618 /* 20619 * We may be in the fastpath here, and although we essentially do 20620 * similar checks as in ip_bind_connected{_v6}/ip_mdinfo_return, 20621 * we try to keep things as brief as possible. After all, these 20622 * are only best-effort checks, and we do more thorough ones prior 20623 * to calling tcp_multisend(). 20624 */ 20625 if (ip_multidata_outbound && check_mdt && 20626 !(ire->ire_type & (IRE_LOCAL | IRE_LOOPBACK)) && 20627 ill != NULL && ILL_MDT_CAPABLE(ill) && 20628 !CONN_IPSEC_OUT_ENCAPSULATED(connp) && 20629 !(ire->ire_flags & RTF_MULTIRT) && 20630 !IPP_ENABLED(IPP_LOCAL_OUT) && 20631 CONN_IS_MD_FASTPATH(connp)) { 20632 /* Remember the result */ 20633 connp->conn_mdt_ok = B_TRUE; 20634 20635 ASSERT(ill->ill_mdt_capab != NULL); 20636 if (!ill->ill_mdt_capab->ill_mdt_on) { 20637 /* 20638 * If MDT has been previously turned off in the past, 20639 * and we currently can do MDT (due to IPQoS policy 20640 * removal, etc.) then enable it for this interface. 20641 */ 20642 ill->ill_mdt_capab->ill_mdt_on = 1; 20643 ip1dbg(("tcp_ire_ill_check: connp %p enables MDT for " 20644 "interface %s\n", (void *)connp, ill->ill_name)); 20645 } 20646 tcp_mdt_update(tcp, ill->ill_mdt_capab, B_TRUE); 20647 } 20648 20649 /* 20650 * The goal is to reduce the number of generated tcp segments by 20651 * setting the maxpsz multiplier to 0; this will have an affect on 20652 * tcp_maxpsz_set(). With this behavior, tcp will pack more data 20653 * into each packet, up to SMSS bytes. Doing this reduces the number 20654 * of outbound segments and incoming ACKs, thus allowing for better 20655 * network and system performance. In contrast the legacy behavior 20656 * may result in sending less than SMSS size, because the last mblk 20657 * for some packets may have more data than needed to make up SMSS, 20658 * and the legacy code refused to "split" it. 20659 * 20660 * We apply the new behavior on following situations: 20661 * 20662 * 1) Loopback connections, 20663 * 2) Connections in which the remote peer is not on local subnet, 20664 * 3) Local subnet connections over the bge interface (see below). 20665 * 20666 * Ideally, we would like this behavior to apply for interfaces other 20667 * than bge. However, doing so would negatively impact drivers which 20668 * perform dynamic mapping and unmapping of DMA resources, which are 20669 * increased by setting the maxpsz multiplier to 0 (more mblks per 20670 * packet will be generated by tcp). The bge driver does not suffer 20671 * from this, as it copies the mblks into pre-mapped buffers, and 20672 * therefore does not require more I/O resources than before. 20673 * 20674 * Otherwise, this behavior is present on all network interfaces when 20675 * the destination endpoint is non-local, since reducing the number 20676 * of packets in general is good for the network. 20677 * 20678 * TODO We need to remove this hard-coded conditional for bge once 20679 * a better "self-tuning" mechanism, or a way to comprehend 20680 * the driver transmit strategy is devised. Until the solution 20681 * is found and well understood, we live with this hack. 20682 */ 20683 if (!tcp_static_maxpsz && 20684 (tcp->tcp_loopback || !tcp->tcp_localnet || 20685 (ill->ill_name_length > 3 && bcmp(ill->ill_name, "bge", 3) == 0))) { 20686 /* override the default value */ 20687 tcp->tcp_maxpsz = 0; 20688 20689 ip3dbg(("tcp_ire_ill_check: connp %p tcp_maxpsz %d on " 20690 "interface %s\n", (void *)connp, tcp->tcp_maxpsz, 20691 ill != NULL ? ill->ill_name : ipif_loopback_name)); 20692 } 20693 20694 /* set the stream head parameters accordingly */ 20695 (void) tcp_maxpsz_set(tcp, B_TRUE); 20696 } 20697 20698 /* tcp_wput_flush is called by tcp_wput_nondata to handle M_FLUSH messages. */ 20699 static void 20700 tcp_wput_flush(tcp_t *tcp, mblk_t *mp) 20701 { 20702 uchar_t fval = *mp->b_rptr; 20703 mblk_t *tail; 20704 queue_t *q = tcp->tcp_wq; 20705 20706 /* TODO: How should flush interact with urgent data? */ 20707 if ((fval & FLUSHW) && tcp->tcp_xmit_head && 20708 !(tcp->tcp_valid_bits & TCP_URG_VALID)) { 20709 /* 20710 * Flush only data that has not yet been put on the wire. If 20711 * we flush data that we have already transmitted, life, as we 20712 * know it, may come to an end. 20713 */ 20714 tail = tcp->tcp_xmit_tail; 20715 tail->b_wptr -= tcp->tcp_xmit_tail_unsent; 20716 tcp->tcp_xmit_tail_unsent = 0; 20717 tcp->tcp_unsent = 0; 20718 if (tail->b_wptr != tail->b_rptr) 20719 tail = tail->b_cont; 20720 if (tail) { 20721 mblk_t **excess = &tcp->tcp_xmit_head; 20722 for (;;) { 20723 mblk_t *mp1 = *excess; 20724 if (mp1 == tail) 20725 break; 20726 tcp->tcp_xmit_tail = mp1; 20727 tcp->tcp_xmit_last = mp1; 20728 excess = &mp1->b_cont; 20729 } 20730 *excess = NULL; 20731 tcp_close_mpp(&tail); 20732 if (tcp->tcp_snd_zcopy_aware) 20733 tcp_zcopy_notify(tcp); 20734 } 20735 /* 20736 * We have no unsent data, so unsent must be less than 20737 * tcp_xmit_lowater, so re-enable flow. 20738 */ 20739 if (tcp->tcp_flow_stopped) { 20740 tcp_clrqfull(tcp); 20741 } 20742 } 20743 /* 20744 * TODO: you can't just flush these, you have to increase rwnd for one 20745 * thing. For another, how should urgent data interact? 20746 */ 20747 if (fval & FLUSHR) { 20748 *mp->b_rptr = fval & ~FLUSHW; 20749 /* XXX */ 20750 qreply(q, mp); 20751 return; 20752 } 20753 freemsg(mp); 20754 } 20755 20756 /* 20757 * tcp_wput_iocdata is called by tcp_wput_nondata to handle all M_IOCDATA 20758 * messages. 20759 */ 20760 static void 20761 tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp) 20762 { 20763 mblk_t *mp1; 20764 STRUCT_HANDLE(strbuf, sb); 20765 uint16_t port; 20766 queue_t *q = tcp->tcp_wq; 20767 in6_addr_t v6addr; 20768 ipaddr_t v4addr; 20769 uint32_t flowinfo = 0; 20770 int addrlen; 20771 20772 /* Make sure it is one of ours. */ 20773 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 20774 case TI_GETMYNAME: 20775 case TI_GETPEERNAME: 20776 break; 20777 default: 20778 CALL_IP_WPUT(tcp->tcp_connp, q, mp); 20779 return; 20780 } 20781 switch (mi_copy_state(q, mp, &mp1)) { 20782 case -1: 20783 return; 20784 case MI_COPY_CASE(MI_COPY_IN, 1): 20785 break; 20786 case MI_COPY_CASE(MI_COPY_OUT, 1): 20787 /* Copy out the strbuf. */ 20788 mi_copyout(q, mp); 20789 return; 20790 case MI_COPY_CASE(MI_COPY_OUT, 2): 20791 /* All done. */ 20792 mi_copy_done(q, mp, 0); 20793 return; 20794 default: 20795 mi_copy_done(q, mp, EPROTO); 20796 return; 20797 } 20798 /* Check alignment of the strbuf */ 20799 if (!OK_32PTR(mp1->b_rptr)) { 20800 mi_copy_done(q, mp, EINVAL); 20801 return; 20802 } 20803 20804 STRUCT_SET_HANDLE(sb, ((struct iocblk *)mp->b_rptr)->ioc_flag, 20805 (void *)mp1->b_rptr); 20806 addrlen = tcp->tcp_family == AF_INET ? sizeof (sin_t) : sizeof (sin6_t); 20807 20808 if (STRUCT_FGET(sb, maxlen) < addrlen) { 20809 mi_copy_done(q, mp, EINVAL); 20810 return; 20811 } 20812 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 20813 case TI_GETMYNAME: 20814 if (tcp->tcp_family == AF_INET) { 20815 if (tcp->tcp_ipversion == IPV4_VERSION) { 20816 v4addr = tcp->tcp_ipha->ipha_src; 20817 } else { 20818 /* can't return an address in this case */ 20819 v4addr = 0; 20820 } 20821 } else { 20822 /* tcp->tcp_family == AF_INET6 */ 20823 if (tcp->tcp_ipversion == IPV4_VERSION) { 20824 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 20825 &v6addr); 20826 } else { 20827 v6addr = tcp->tcp_ip6h->ip6_src; 20828 } 20829 } 20830 port = tcp->tcp_lport; 20831 break; 20832 case TI_GETPEERNAME: 20833 if (tcp->tcp_family == AF_INET) { 20834 if (tcp->tcp_ipversion == IPV4_VERSION) { 20835 IN6_V4MAPPED_TO_IPADDR(&tcp->tcp_remote_v6, 20836 v4addr); 20837 } else { 20838 /* can't return an address in this case */ 20839 v4addr = 0; 20840 } 20841 } else { 20842 /* tcp->tcp_family == AF_INET6) */ 20843 v6addr = tcp->tcp_remote_v6; 20844 if (tcp->tcp_ipversion == IPV6_VERSION) { 20845 /* 20846 * No flowinfo if tcp->tcp_ipversion is v4. 20847 * 20848 * flowinfo was already initialized to zero 20849 * where it was declared above, so only 20850 * set it if ipversion is v6. 20851 */ 20852 flowinfo = tcp->tcp_ip6h->ip6_vcf & 20853 ~IPV6_VERS_AND_FLOW_MASK; 20854 } 20855 } 20856 port = tcp->tcp_fport; 20857 break; 20858 default: 20859 mi_copy_done(q, mp, EPROTO); 20860 return; 20861 } 20862 mp1 = mi_copyout_alloc(q, mp, STRUCT_FGETP(sb, buf), addrlen, B_TRUE); 20863 if (!mp1) 20864 return; 20865 20866 if (tcp->tcp_family == AF_INET) { 20867 sin_t *sin; 20868 20869 STRUCT_FSET(sb, len, (int)sizeof (sin_t)); 20870 sin = (sin_t *)mp1->b_rptr; 20871 mp1->b_wptr = (uchar_t *)&sin[1]; 20872 *sin = sin_null; 20873 sin->sin_family = AF_INET; 20874 sin->sin_addr.s_addr = v4addr; 20875 sin->sin_port = port; 20876 } else { 20877 /* tcp->tcp_family == AF_INET6 */ 20878 sin6_t *sin6; 20879 20880 STRUCT_FSET(sb, len, (int)sizeof (sin6_t)); 20881 sin6 = (sin6_t *)mp1->b_rptr; 20882 mp1->b_wptr = (uchar_t *)&sin6[1]; 20883 *sin6 = sin6_null; 20884 sin6->sin6_family = AF_INET6; 20885 sin6->sin6_flowinfo = flowinfo; 20886 sin6->sin6_addr = v6addr; 20887 sin6->sin6_port = port; 20888 } 20889 /* Copy out the address */ 20890 mi_copyout(q, mp); 20891 } 20892 20893 /* 20894 * tcp_wput_ioctl is called by tcp_wput_nondata() to handle all M_IOCTL 20895 * messages. 20896 */ 20897 /* ARGSUSED */ 20898 static void 20899 tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2) 20900 { 20901 conn_t *connp = (conn_t *)arg; 20902 tcp_t *tcp = connp->conn_tcp; 20903 queue_t *q = tcp->tcp_wq; 20904 struct iocblk *iocp; 20905 20906 ASSERT(DB_TYPE(mp) == M_IOCTL); 20907 /* 20908 * Try and ASSERT the minimum possible references on the 20909 * conn early enough. Since we are executing on write side, 20910 * the connection is obviously not detached and that means 20911 * there is a ref each for TCP and IP. Since we are behind 20912 * the squeue, the minimum references needed are 3. If the 20913 * conn is in classifier hash list, there should be an 20914 * extra ref for that (we check both the possibilities). 20915 */ 20916 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 20917 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 20918 20919 iocp = (struct iocblk *)mp->b_rptr; 20920 switch (iocp->ioc_cmd) { 20921 case TCP_IOC_DEFAULT_Q: 20922 /* Wants to be the default wq. */ 20923 if (secpolicy_net_config(iocp->ioc_cr, B_FALSE) != 0) { 20924 iocp->ioc_error = EPERM; 20925 iocp->ioc_count = 0; 20926 mp->b_datap->db_type = M_IOCACK; 20927 qreply(q, mp); 20928 return; 20929 } 20930 tcp_def_q_set(tcp, mp); 20931 return; 20932 case _SIOCSOCKFALLBACK: 20933 /* 20934 * Either sockmod is about to be popped and the socket 20935 * would now be treated as a plain stream, or a module 20936 * is about to be pushed so we could no longer use read- 20937 * side synchronous streams for fused loopback tcp. 20938 * Drain any queued data and disable direct sockfs 20939 * interface from now on. 20940 */ 20941 if (!tcp->tcp_issocket) { 20942 DB_TYPE(mp) = M_IOCNAK; 20943 iocp->ioc_error = EINVAL; 20944 } else { 20945 #ifdef _ILP32 20946 tcp->tcp_acceptor_id = (t_uscalar_t)RD(q); 20947 #else 20948 tcp->tcp_acceptor_id = tcp->tcp_connp->conn_dev; 20949 #endif 20950 /* 20951 * Insert this socket into the acceptor hash. 20952 * We might need it for T_CONN_RES message 20953 */ 20954 tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp); 20955 20956 if (tcp->tcp_fused) { 20957 /* 20958 * This is a fused loopback tcp; disable 20959 * read-side synchronous streams interface 20960 * and drain any queued data. It is okay 20961 * to do this for non-synchronous streams 20962 * fused tcp as well. 20963 */ 20964 tcp_fuse_disable_pair(tcp, B_FALSE); 20965 } 20966 tcp->tcp_issocket = B_FALSE; 20967 TCP_STAT(tcp_sock_fallback); 20968 20969 DB_TYPE(mp) = M_IOCACK; 20970 iocp->ioc_error = 0; 20971 } 20972 iocp->ioc_count = 0; 20973 iocp->ioc_rval = 0; 20974 qreply(q, mp); 20975 return; 20976 } 20977 CALL_IP_WPUT(connp, q, mp); 20978 } 20979 20980 /* 20981 * This routine is called by tcp_wput() to handle all TPI requests. 20982 */ 20983 /* ARGSUSED */ 20984 static void 20985 tcp_wput_proto(void *arg, mblk_t *mp, void *arg2) 20986 { 20987 conn_t *connp = (conn_t *)arg; 20988 tcp_t *tcp = connp->conn_tcp; 20989 union T_primitives *tprim = (union T_primitives *)mp->b_rptr; 20990 uchar_t *rptr; 20991 t_scalar_t type; 20992 int len; 20993 cred_t *cr = DB_CREDDEF(mp, tcp->tcp_cred); 20994 20995 /* 20996 * Try and ASSERT the minimum possible references on the 20997 * conn early enough. Since we are executing on write side, 20998 * the connection is obviously not detached and that means 20999 * there is a ref each for TCP and IP. Since we are behind 21000 * the squeue, the minimum references needed are 3. If the 21001 * conn is in classifier hash list, there should be an 21002 * extra ref for that (we check both the possibilities). 21003 */ 21004 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 21005 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 21006 21007 rptr = mp->b_rptr; 21008 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 21009 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) { 21010 type = ((union T_primitives *)rptr)->type; 21011 if (type == T_EXDATA_REQ) { 21012 uint32_t msize = msgdsize(mp->b_cont); 21013 21014 len = msize - 1; 21015 if (len < 0) { 21016 freemsg(mp); 21017 return; 21018 } 21019 /* 21020 * Try to force urgent data out on the wire. 21021 * Even if we have unsent data this will 21022 * at least send the urgent flag. 21023 * XXX does not handle more flag correctly. 21024 */ 21025 len += tcp->tcp_unsent; 21026 len += tcp->tcp_snxt; 21027 tcp->tcp_urg = len; 21028 tcp->tcp_valid_bits |= TCP_URG_VALID; 21029 21030 /* Bypass tcp protocol for fused tcp loopback */ 21031 if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize)) 21032 return; 21033 } else if (type != T_DATA_REQ) { 21034 goto non_urgent_data; 21035 } 21036 /* TODO: options, flags, ... from user */ 21037 /* Set length to zero for reclamation below */ 21038 tcp_wput_data(tcp, mp->b_cont, B_TRUE); 21039 freeb(mp); 21040 return; 21041 } else { 21042 if (tcp->tcp_debug) { 21043 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 21044 "tcp_wput_proto, dropping one..."); 21045 } 21046 freemsg(mp); 21047 return; 21048 } 21049 21050 non_urgent_data: 21051 21052 switch ((int)tprim->type) { 21053 case T_SSL_PROXY_BIND_REQ: /* an SSL proxy endpoint bind request */ 21054 /* 21055 * save the kssl_ent_t from the next block, and convert this 21056 * back to a normal bind_req. 21057 */ 21058 if (mp->b_cont != NULL) { 21059 ASSERT(MBLKL(mp->b_cont) >= sizeof (kssl_ent_t)); 21060 21061 if (tcp->tcp_kssl_ent != NULL) { 21062 kssl_release_ent(tcp->tcp_kssl_ent, NULL, 21063 KSSL_NO_PROXY); 21064 tcp->tcp_kssl_ent = NULL; 21065 } 21066 bcopy(mp->b_cont->b_rptr, &tcp->tcp_kssl_ent, 21067 sizeof (kssl_ent_t)); 21068 kssl_hold_ent(tcp->tcp_kssl_ent); 21069 freemsg(mp->b_cont); 21070 mp->b_cont = NULL; 21071 } 21072 tprim->type = T_BIND_REQ; 21073 21074 /* FALLTHROUGH */ 21075 case O_T_BIND_REQ: /* bind request */ 21076 case T_BIND_REQ: /* new semantics bind request */ 21077 tcp_bind(tcp, mp); 21078 break; 21079 case T_UNBIND_REQ: /* unbind request */ 21080 tcp_unbind(tcp, mp); 21081 break; 21082 case O_T_CONN_RES: /* old connection response XXX */ 21083 case T_CONN_RES: /* connection response */ 21084 tcp_accept(tcp, mp); 21085 break; 21086 case T_CONN_REQ: /* connection request */ 21087 tcp_connect(tcp, mp); 21088 break; 21089 case T_DISCON_REQ: /* disconnect request */ 21090 tcp_disconnect(tcp, mp); 21091 break; 21092 case T_CAPABILITY_REQ: 21093 tcp_capability_req(tcp, mp); /* capability request */ 21094 break; 21095 case T_INFO_REQ: /* information request */ 21096 tcp_info_req(tcp, mp); 21097 break; 21098 case T_SVR4_OPTMGMT_REQ: /* manage options req */ 21099 /* Only IP is allowed to return meaningful value */ 21100 (void) svr4_optcom_req(tcp->tcp_wq, mp, cr, &tcp_opt_obj); 21101 break; 21102 case T_OPTMGMT_REQ: 21103 /* 21104 * Note: no support for snmpcom_req() through new 21105 * T_OPTMGMT_REQ. See comments in ip.c 21106 */ 21107 /* Only IP is allowed to return meaningful value */ 21108 (void) tpi_optcom_req(tcp->tcp_wq, mp, cr, &tcp_opt_obj); 21109 break; 21110 21111 case T_UNITDATA_REQ: /* unitdata request */ 21112 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0); 21113 break; 21114 case T_ORDREL_REQ: /* orderly release req */ 21115 freemsg(mp); 21116 21117 if (tcp->tcp_fused) 21118 tcp_unfuse(tcp); 21119 21120 if (tcp_xmit_end(tcp) != 0) { 21121 /* 21122 * We were crossing FINs and got a reset from 21123 * the other side. Just ignore it. 21124 */ 21125 if (tcp->tcp_debug) { 21126 (void) strlog(TCP_MOD_ID, 0, 1, 21127 SL_ERROR|SL_TRACE, 21128 "tcp_wput_proto, T_ORDREL_REQ out of " 21129 "state %s", 21130 tcp_display(tcp, NULL, 21131 DISP_ADDR_AND_PORT)); 21132 } 21133 } 21134 break; 21135 case T_ADDR_REQ: 21136 tcp_addr_req(tcp, mp); 21137 break; 21138 default: 21139 if (tcp->tcp_debug) { 21140 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 21141 "tcp_wput_proto, bogus TPI msg, type %d", 21142 tprim->type); 21143 } 21144 /* 21145 * We used to M_ERROR. Sending TNOTSUPPORT gives the user 21146 * to recover. 21147 */ 21148 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0); 21149 break; 21150 } 21151 } 21152 21153 /* 21154 * The TCP write service routine should never be called... 21155 */ 21156 /* ARGSUSED */ 21157 static void 21158 tcp_wsrv(queue_t *q) 21159 { 21160 TCP_STAT(tcp_wsrv_called); 21161 } 21162 21163 /* Non overlapping byte exchanger */ 21164 static void 21165 tcp_xchg(uchar_t *a, uchar_t *b, int len) 21166 { 21167 uchar_t uch; 21168 21169 while (len-- > 0) { 21170 uch = a[len]; 21171 a[len] = b[len]; 21172 b[len] = uch; 21173 } 21174 } 21175 21176 /* 21177 * Send out a control packet on the tcp connection specified. This routine 21178 * is typically called where we need a simple ACK or RST generated. 21179 */ 21180 static void 21181 tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, uint32_t ack, int ctl) 21182 { 21183 uchar_t *rptr; 21184 tcph_t *tcph; 21185 ipha_t *ipha = NULL; 21186 ip6_t *ip6h = NULL; 21187 uint32_t sum; 21188 int tcp_hdr_len; 21189 int tcp_ip_hdr_len; 21190 mblk_t *mp; 21191 21192 /* 21193 * Save sum for use in source route later. 21194 */ 21195 ASSERT(tcp != NULL); 21196 sum = tcp->tcp_tcp_hdr_len + tcp->tcp_sum; 21197 tcp_hdr_len = tcp->tcp_hdr_len; 21198 tcp_ip_hdr_len = tcp->tcp_ip_hdr_len; 21199 21200 /* If a text string is passed in with the request, pass it to strlog. */ 21201 if (str != NULL && tcp->tcp_debug) { 21202 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 21203 "tcp_xmit_ctl: '%s', seq 0x%x, ack 0x%x, ctl 0x%x", 21204 str, seq, ack, ctl); 21205 } 21206 mp = allocb(tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + tcp_wroff_xtra, 21207 BPRI_MED); 21208 if (mp == NULL) { 21209 return; 21210 } 21211 rptr = &mp->b_rptr[tcp_wroff_xtra]; 21212 mp->b_rptr = rptr; 21213 mp->b_wptr = &rptr[tcp_hdr_len]; 21214 bcopy(tcp->tcp_iphc, rptr, tcp_hdr_len); 21215 21216 if (tcp->tcp_ipversion == IPV4_VERSION) { 21217 ipha = (ipha_t *)rptr; 21218 ipha->ipha_length = htons(tcp_hdr_len); 21219 } else { 21220 ip6h = (ip6_t *)rptr; 21221 ASSERT(tcp != NULL); 21222 ip6h->ip6_plen = htons(tcp->tcp_hdr_len - 21223 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 21224 } 21225 tcph = (tcph_t *)&rptr[tcp_ip_hdr_len]; 21226 tcph->th_flags[0] = (uint8_t)ctl; 21227 if (ctl & TH_RST) { 21228 BUMP_MIB(&tcp_mib, tcpOutRsts); 21229 BUMP_MIB(&tcp_mib, tcpOutControl); 21230 /* 21231 * Don't send TSopt w/ TH_RST packets per RFC 1323. 21232 */ 21233 if (tcp->tcp_snd_ts_ok && 21234 tcp->tcp_state > TCPS_SYN_SENT) { 21235 mp->b_wptr = &rptr[tcp_hdr_len - TCPOPT_REAL_TS_LEN]; 21236 *(mp->b_wptr) = TCPOPT_EOL; 21237 if (tcp->tcp_ipversion == IPV4_VERSION) { 21238 ipha->ipha_length = htons(tcp_hdr_len - 21239 TCPOPT_REAL_TS_LEN); 21240 } else { 21241 ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) - 21242 TCPOPT_REAL_TS_LEN); 21243 } 21244 tcph->th_offset_and_rsrvd[0] -= (3 << 4); 21245 sum -= TCPOPT_REAL_TS_LEN; 21246 } 21247 } 21248 if (ctl & TH_ACK) { 21249 if (tcp->tcp_snd_ts_ok) { 21250 U32_TO_BE32(lbolt, 21251 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 21252 U32_TO_BE32(tcp->tcp_ts_recent, 21253 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 21254 } 21255 21256 /* Update the latest receive window size in TCP header. */ 21257 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 21258 tcph->th_win); 21259 tcp->tcp_rack = ack; 21260 tcp->tcp_rack_cnt = 0; 21261 BUMP_MIB(&tcp_mib, tcpOutAck); 21262 } 21263 BUMP_LOCAL(tcp->tcp_obsegs); 21264 U32_TO_BE32(seq, tcph->th_seq); 21265 U32_TO_BE32(ack, tcph->th_ack); 21266 /* 21267 * Include the adjustment for a source route if any. 21268 */ 21269 sum = (sum >> 16) + (sum & 0xFFFF); 21270 U16_TO_BE16(sum, tcph->th_sum); 21271 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 21272 tcp_send_data(tcp, tcp->tcp_wq, mp); 21273 } 21274 21275 /* 21276 * If this routine returns B_TRUE, TCP can generate a RST in response 21277 * to a segment. If it returns B_FALSE, TCP should not respond. 21278 */ 21279 static boolean_t 21280 tcp_send_rst_chk(void) 21281 { 21282 clock_t now; 21283 21284 /* 21285 * TCP needs to protect itself from generating too many RSTs. 21286 * This can be a DoS attack by sending us random segments 21287 * soliciting RSTs. 21288 * 21289 * What we do here is to have a limit of tcp_rst_sent_rate RSTs 21290 * in each 1 second interval. In this way, TCP still generate 21291 * RSTs in normal cases but when under attack, the impact is 21292 * limited. 21293 */ 21294 if (tcp_rst_sent_rate_enabled != 0) { 21295 now = lbolt; 21296 /* lbolt can wrap around. */ 21297 if ((tcp_last_rst_intrvl > now) || 21298 (TICK_TO_MSEC(now - tcp_last_rst_intrvl) > 1*SECONDS)) { 21299 tcp_last_rst_intrvl = now; 21300 tcp_rst_cnt = 1; 21301 } else if (++tcp_rst_cnt > tcp_rst_sent_rate) { 21302 return (B_FALSE); 21303 } 21304 } 21305 return (B_TRUE); 21306 } 21307 21308 /* 21309 * Send down the advice IP ioctl to tell IP to mark an IRE temporary. 21310 */ 21311 static void 21312 tcp_ip_ire_mark_advice(tcp_t *tcp) 21313 { 21314 mblk_t *mp; 21315 ipic_t *ipic; 21316 21317 if (tcp->tcp_ipversion == IPV4_VERSION) { 21318 mp = tcp_ip_advise_mblk(&tcp->tcp_ipha->ipha_dst, IP_ADDR_LEN, 21319 &ipic); 21320 } else { 21321 mp = tcp_ip_advise_mblk(&tcp->tcp_ip6h->ip6_dst, IPV6_ADDR_LEN, 21322 &ipic); 21323 } 21324 if (mp == NULL) 21325 return; 21326 ipic->ipic_ire_marks |= IRE_MARK_TEMPORARY; 21327 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 21328 } 21329 21330 /* 21331 * Return an IP advice ioctl mblk and set ipic to be the pointer 21332 * to the advice structure. 21333 */ 21334 static mblk_t * 21335 tcp_ip_advise_mblk(void *addr, int addr_len, ipic_t **ipic) 21336 { 21337 struct iocblk *ioc; 21338 mblk_t *mp, *mp1; 21339 21340 mp = allocb(sizeof (ipic_t) + addr_len, BPRI_HI); 21341 if (mp == NULL) 21342 return (NULL); 21343 bzero(mp->b_rptr, sizeof (ipic_t) + addr_len); 21344 *ipic = (ipic_t *)mp->b_rptr; 21345 (*ipic)->ipic_cmd = IP_IOC_IRE_ADVISE_NO_REPLY; 21346 (*ipic)->ipic_addr_offset = sizeof (ipic_t); 21347 21348 bcopy(addr, *ipic + 1, addr_len); 21349 21350 (*ipic)->ipic_addr_length = addr_len; 21351 mp->b_wptr = &mp->b_rptr[sizeof (ipic_t) + addr_len]; 21352 21353 mp1 = mkiocb(IP_IOCTL); 21354 if (mp1 == NULL) { 21355 freemsg(mp); 21356 return (NULL); 21357 } 21358 mp1->b_cont = mp; 21359 ioc = (struct iocblk *)mp1->b_rptr; 21360 ioc->ioc_count = sizeof (ipic_t) + addr_len; 21361 21362 return (mp1); 21363 } 21364 21365 /* 21366 * Generate a reset based on an inbound packet for which there is no active 21367 * tcp state that we can find. 21368 * 21369 * IPSEC NOTE : Try to send the reply with the same protection as it came 21370 * in. We still have the ipsec_mp that the packet was attached to. Thus 21371 * the packet will go out at the same level of protection as it came in by 21372 * converting the IPSEC_IN to IPSEC_OUT. 21373 */ 21374 static void 21375 tcp_xmit_early_reset(char *str, mblk_t *mp, uint32_t seq, 21376 uint32_t ack, int ctl, uint_t ip_hdr_len) 21377 { 21378 ipha_t *ipha = NULL; 21379 ip6_t *ip6h = NULL; 21380 ushort_t len; 21381 tcph_t *tcph; 21382 int i; 21383 mblk_t *ipsec_mp; 21384 boolean_t mctl_present; 21385 ipic_t *ipic; 21386 ipaddr_t v4addr; 21387 in6_addr_t v6addr; 21388 int addr_len; 21389 void *addr; 21390 queue_t *q = tcp_g_q; 21391 tcp_t *tcp = Q_TO_TCP(q); 21392 cred_t *cr; 21393 21394 if (!tcp_send_rst_chk()) { 21395 tcp_rst_unsent++; 21396 freemsg(mp); 21397 return; 21398 } 21399 21400 if (mp->b_datap->db_type == M_CTL) { 21401 ipsec_mp = mp; 21402 mp = mp->b_cont; 21403 mctl_present = B_TRUE; 21404 } else { 21405 ipsec_mp = mp; 21406 mctl_present = B_FALSE; 21407 } 21408 21409 if (str && q && tcp_dbg) { 21410 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 21411 "tcp_xmit_early_reset: '%s', seq 0x%x, ack 0x%x, " 21412 "flags 0x%x", 21413 str, seq, ack, ctl); 21414 } 21415 if (mp->b_datap->db_ref != 1) { 21416 mblk_t *mp1 = copyb(mp); 21417 freemsg(mp); 21418 mp = mp1; 21419 if (!mp) { 21420 if (mctl_present) 21421 freeb(ipsec_mp); 21422 return; 21423 } else { 21424 if (mctl_present) { 21425 ipsec_mp->b_cont = mp; 21426 } else { 21427 ipsec_mp = mp; 21428 } 21429 } 21430 } else if (mp->b_cont) { 21431 freemsg(mp->b_cont); 21432 mp->b_cont = NULL; 21433 } 21434 /* 21435 * We skip reversing source route here. 21436 * (for now we replace all IP options with EOL) 21437 */ 21438 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 21439 ipha = (ipha_t *)mp->b_rptr; 21440 for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++) 21441 mp->b_rptr[i] = IPOPT_EOL; 21442 /* 21443 * Make sure that src address isn't flagrantly invalid. 21444 * Not all broadcast address checking for the src address 21445 * is possible, since we don't know the netmask of the src 21446 * addr. No check for destination address is done, since 21447 * IP will not pass up a packet with a broadcast dest 21448 * address to TCP. Similar checks are done below for IPv6. 21449 */ 21450 if (ipha->ipha_src == 0 || ipha->ipha_src == INADDR_BROADCAST || 21451 CLASSD(ipha->ipha_src)) { 21452 freemsg(ipsec_mp); 21453 BUMP_MIB(&ip_mib, ipInDiscards); 21454 return; 21455 } 21456 } else { 21457 ip6h = (ip6_t *)mp->b_rptr; 21458 21459 if (IN6_IS_ADDR_UNSPECIFIED(&ip6h->ip6_src) || 21460 IN6_IS_ADDR_MULTICAST(&ip6h->ip6_src)) { 21461 freemsg(ipsec_mp); 21462 BUMP_MIB(&ip6_mib, ipv6InDiscards); 21463 return; 21464 } 21465 21466 /* Remove any extension headers assuming partial overlay */ 21467 if (ip_hdr_len > IPV6_HDR_LEN) { 21468 uint8_t *to; 21469 21470 to = mp->b_rptr + ip_hdr_len - IPV6_HDR_LEN; 21471 ovbcopy(ip6h, to, IPV6_HDR_LEN); 21472 mp->b_rptr += ip_hdr_len - IPV6_HDR_LEN; 21473 ip_hdr_len = IPV6_HDR_LEN; 21474 ip6h = (ip6_t *)mp->b_rptr; 21475 ip6h->ip6_nxt = IPPROTO_TCP; 21476 } 21477 } 21478 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 21479 if (tcph->th_flags[0] & TH_RST) { 21480 freemsg(ipsec_mp); 21481 return; 21482 } 21483 tcph->th_offset_and_rsrvd[0] = (5 << 4); 21484 len = ip_hdr_len + sizeof (tcph_t); 21485 mp->b_wptr = &mp->b_rptr[len]; 21486 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 21487 ipha->ipha_length = htons(len); 21488 /* Swap addresses */ 21489 v4addr = ipha->ipha_src; 21490 ipha->ipha_src = ipha->ipha_dst; 21491 ipha->ipha_dst = v4addr; 21492 ipha->ipha_ident = 0; 21493 ipha->ipha_ttl = (uchar_t)tcp_ipv4_ttl; 21494 addr_len = IP_ADDR_LEN; 21495 addr = &v4addr; 21496 } else { 21497 /* No ip6i_t in this case */ 21498 ip6h->ip6_plen = htons(len - IPV6_HDR_LEN); 21499 /* Swap addresses */ 21500 v6addr = ip6h->ip6_src; 21501 ip6h->ip6_src = ip6h->ip6_dst; 21502 ip6h->ip6_dst = v6addr; 21503 ip6h->ip6_hops = (uchar_t)tcp_ipv6_hoplimit; 21504 addr_len = IPV6_ADDR_LEN; 21505 addr = &v6addr; 21506 } 21507 tcp_xchg(tcph->th_fport, tcph->th_lport, 2); 21508 U32_TO_BE32(ack, tcph->th_ack); 21509 U32_TO_BE32(seq, tcph->th_seq); 21510 U16_TO_BE16(0, tcph->th_win); 21511 U16_TO_BE16(sizeof (tcph_t), tcph->th_sum); 21512 tcph->th_flags[0] = (uint8_t)ctl; 21513 if (ctl & TH_RST) { 21514 BUMP_MIB(&tcp_mib, tcpOutRsts); 21515 BUMP_MIB(&tcp_mib, tcpOutControl); 21516 } 21517 21518 /* IP trusts us to set up labels when required. */ 21519 if (is_system_labeled() && (cr = DB_CRED(mp)) != NULL && 21520 crgetlabel(cr) != NULL) { 21521 int err, adjust; 21522 21523 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) 21524 err = tsol_check_label(cr, &mp, &adjust, 21525 tcp->tcp_connp->conn_mac_exempt); 21526 else 21527 err = tsol_check_label_v6(cr, &mp, &adjust, 21528 tcp->tcp_connp->conn_mac_exempt); 21529 if (mctl_present) 21530 ipsec_mp->b_cont = mp; 21531 else 21532 ipsec_mp = mp; 21533 if (err != 0) { 21534 freemsg(ipsec_mp); 21535 return; 21536 } 21537 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 21538 ipha = (ipha_t *)mp->b_rptr; 21539 adjust += ntohs(ipha->ipha_length); 21540 ipha->ipha_length = htons(adjust); 21541 } else { 21542 ip6h = (ip6_t *)mp->b_rptr; 21543 } 21544 } 21545 21546 if (mctl_present) { 21547 ipsec_in_t *ii = (ipsec_in_t *)ipsec_mp->b_rptr; 21548 21549 ASSERT(ii->ipsec_in_type == IPSEC_IN); 21550 if (!ipsec_in_to_out(ipsec_mp, ipha, ip6h)) { 21551 return; 21552 } 21553 } 21554 /* 21555 * NOTE: one might consider tracing a TCP packet here, but 21556 * this function has no active TCP state and no tcp structure 21557 * that has a trace buffer. If we traced here, we would have 21558 * to keep a local trace buffer in tcp_record_trace(). 21559 * 21560 * TSol note: The mblk that contains the incoming packet was 21561 * reused by tcp_xmit_listener_reset, so it already contains 21562 * the right credentials and we don't need to call mblk_setcred. 21563 * Also the conn's cred is not right since it is associated 21564 * with tcp_g_q. 21565 */ 21566 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, ipsec_mp); 21567 21568 /* 21569 * Tell IP to mark the IRE used for this destination temporary. 21570 * This way, we can limit our exposure to DoS attack because IP 21571 * creates an IRE for each destination. If there are too many, 21572 * the time to do any routing lookup will be extremely long. And 21573 * the lookup can be in interrupt context. 21574 * 21575 * Note that in normal circumstances, this marking should not 21576 * affect anything. It would be nice if only 1 message is 21577 * needed to inform IP that the IRE created for this RST should 21578 * not be added to the cache table. But there is currently 21579 * not such communication mechanism between TCP and IP. So 21580 * the best we can do now is to send the advice ioctl to IP 21581 * to mark the IRE temporary. 21582 */ 21583 if ((mp = tcp_ip_advise_mblk(addr, addr_len, &ipic)) != NULL) { 21584 ipic->ipic_ire_marks |= IRE_MARK_TEMPORARY; 21585 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 21586 } 21587 } 21588 21589 /* 21590 * Initiate closedown sequence on an active connection. (May be called as 21591 * writer.) Return value zero for OK return, non-zero for error return. 21592 */ 21593 static int 21594 tcp_xmit_end(tcp_t *tcp) 21595 { 21596 ipic_t *ipic; 21597 mblk_t *mp; 21598 21599 if (tcp->tcp_state < TCPS_SYN_RCVD || 21600 tcp->tcp_state > TCPS_CLOSE_WAIT) { 21601 /* 21602 * Invalid state, only states TCPS_SYN_RCVD, 21603 * TCPS_ESTABLISHED and TCPS_CLOSE_WAIT are valid 21604 */ 21605 return (-1); 21606 } 21607 21608 tcp->tcp_fss = tcp->tcp_snxt + tcp->tcp_unsent; 21609 tcp->tcp_valid_bits |= TCP_FSS_VALID; 21610 /* 21611 * If there is nothing more unsent, send the FIN now. 21612 * Otherwise, it will go out with the last segment. 21613 */ 21614 if (tcp->tcp_unsent == 0) { 21615 mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, 21616 tcp->tcp_fss, B_FALSE, NULL, B_FALSE); 21617 21618 if (mp) { 21619 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 21620 tcp_send_data(tcp, tcp->tcp_wq, mp); 21621 } else { 21622 /* 21623 * Couldn't allocate msg. Pretend we got it out. 21624 * Wait for rexmit timeout. 21625 */ 21626 tcp->tcp_snxt = tcp->tcp_fss + 1; 21627 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 21628 } 21629 21630 /* 21631 * If needed, update tcp_rexmit_snxt as tcp_snxt is 21632 * changed. 21633 */ 21634 if (tcp->tcp_rexmit && tcp->tcp_rexmit_nxt == tcp->tcp_fss) { 21635 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 21636 } 21637 } else { 21638 /* 21639 * If tcp->tcp_cork is set, then the data will not get sent, 21640 * so we have to check that and unset it first. 21641 */ 21642 if (tcp->tcp_cork) 21643 tcp->tcp_cork = B_FALSE; 21644 tcp_wput_data(tcp, NULL, B_FALSE); 21645 } 21646 21647 /* 21648 * If TCP does not get enough samples of RTT or tcp_rtt_updates 21649 * is 0, don't update the cache. 21650 */ 21651 if (tcp_rtt_updates == 0 || tcp->tcp_rtt_update < tcp_rtt_updates) 21652 return (0); 21653 21654 /* 21655 * NOTE: should not update if source routes i.e. if tcp_remote if 21656 * different from the destination. 21657 */ 21658 if (tcp->tcp_ipversion == IPV4_VERSION) { 21659 if (tcp->tcp_remote != tcp->tcp_ipha->ipha_dst) { 21660 return (0); 21661 } 21662 mp = tcp_ip_advise_mblk(&tcp->tcp_ipha->ipha_dst, IP_ADDR_LEN, 21663 &ipic); 21664 } else { 21665 if (!(IN6_ARE_ADDR_EQUAL(&tcp->tcp_remote_v6, 21666 &tcp->tcp_ip6h->ip6_dst))) { 21667 return (0); 21668 } 21669 mp = tcp_ip_advise_mblk(&tcp->tcp_ip6h->ip6_dst, IPV6_ADDR_LEN, 21670 &ipic); 21671 } 21672 21673 /* Record route attributes in the IRE for use by future connections. */ 21674 if (mp == NULL) 21675 return (0); 21676 21677 /* 21678 * We do not have a good algorithm to update ssthresh at this time. 21679 * So don't do any update. 21680 */ 21681 ipic->ipic_rtt = tcp->tcp_rtt_sa; 21682 ipic->ipic_rtt_sd = tcp->tcp_rtt_sd; 21683 21684 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 21685 return (0); 21686 } 21687 21688 /* 21689 * Generate a "no listener here" RST in response to an "unknown" segment. 21690 * Note that we are reusing the incoming mp to construct the outgoing 21691 * RST. 21692 */ 21693 void 21694 tcp_xmit_listeners_reset(mblk_t *mp, uint_t ip_hdr_len) 21695 { 21696 uchar_t *rptr; 21697 uint32_t seg_len; 21698 tcph_t *tcph; 21699 uint32_t seg_seq; 21700 uint32_t seg_ack; 21701 uint_t flags; 21702 mblk_t *ipsec_mp; 21703 ipha_t *ipha; 21704 ip6_t *ip6h; 21705 boolean_t mctl_present = B_FALSE; 21706 boolean_t check = B_TRUE; 21707 boolean_t policy_present; 21708 21709 TCP_STAT(tcp_no_listener); 21710 21711 ipsec_mp = mp; 21712 21713 if (mp->b_datap->db_type == M_CTL) { 21714 ipsec_in_t *ii; 21715 21716 mctl_present = B_TRUE; 21717 mp = mp->b_cont; 21718 21719 ii = (ipsec_in_t *)ipsec_mp->b_rptr; 21720 ASSERT(ii->ipsec_in_type == IPSEC_IN); 21721 if (ii->ipsec_in_dont_check) { 21722 check = B_FALSE; 21723 if (!ii->ipsec_in_secure) { 21724 freeb(ipsec_mp); 21725 mctl_present = B_FALSE; 21726 ipsec_mp = mp; 21727 } 21728 } 21729 } 21730 21731 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 21732 policy_present = ipsec_inbound_v4_policy_present; 21733 ipha = (ipha_t *)mp->b_rptr; 21734 ip6h = NULL; 21735 } else { 21736 policy_present = ipsec_inbound_v6_policy_present; 21737 ipha = NULL; 21738 ip6h = (ip6_t *)mp->b_rptr; 21739 } 21740 21741 if (check && policy_present) { 21742 /* 21743 * The conn_t parameter is NULL because we already know 21744 * nobody's home. 21745 */ 21746 ipsec_mp = ipsec_check_global_policy( 21747 ipsec_mp, (conn_t *)NULL, ipha, ip6h, mctl_present); 21748 if (ipsec_mp == NULL) 21749 return; 21750 } 21751 if (is_system_labeled() && !tsol_can_reply_error(mp)) { 21752 DTRACE_PROBE2( 21753 tx__ip__log__error__nolistener__tcp, 21754 char *, "Could not reply with RST to mp(1)", 21755 mblk_t *, mp); 21756 ip2dbg(("tcp_xmit_listeners_reset: not permitted to reply\n")); 21757 freemsg(ipsec_mp); 21758 return; 21759 } 21760 21761 rptr = mp->b_rptr; 21762 21763 tcph = (tcph_t *)&rptr[ip_hdr_len]; 21764 seg_seq = BE32_TO_U32(tcph->th_seq); 21765 seg_ack = BE32_TO_U32(tcph->th_ack); 21766 flags = tcph->th_flags[0]; 21767 21768 seg_len = msgdsize(mp) - (TCP_HDR_LENGTH(tcph) + ip_hdr_len); 21769 if (flags & TH_RST) { 21770 freemsg(ipsec_mp); 21771 } else if (flags & TH_ACK) { 21772 tcp_xmit_early_reset("no tcp, reset", 21773 ipsec_mp, seg_ack, 0, TH_RST, ip_hdr_len); 21774 } else { 21775 if (flags & TH_SYN) { 21776 seg_len++; 21777 } else { 21778 /* 21779 * Here we violate the RFC. Note that a normal 21780 * TCP will never send a segment without the ACK 21781 * flag, except for RST or SYN segment. This 21782 * segment is neither. Just drop it on the 21783 * floor. 21784 */ 21785 freemsg(ipsec_mp); 21786 tcp_rst_unsent++; 21787 return; 21788 } 21789 21790 tcp_xmit_early_reset("no tcp, reset/ack", 21791 ipsec_mp, 0, seg_seq + seg_len, 21792 TH_RST | TH_ACK, ip_hdr_len); 21793 } 21794 } 21795 21796 /* 21797 * tcp_xmit_mp is called to return a pointer to an mblk chain complete with 21798 * ip and tcp header ready to pass down to IP. If the mp passed in is 21799 * non-NULL, then up to max_to_send bytes of data will be dup'ed off that 21800 * mblk. (If sendall is not set the dup'ing will stop at an mblk boundary 21801 * otherwise it will dup partial mblks.) 21802 * Otherwise, an appropriate ACK packet will be generated. This 21803 * routine is not usually called to send new data for the first time. It 21804 * is mostly called out of the timer for retransmits, and to generate ACKs. 21805 * 21806 * If offset is not NULL, the returned mblk chain's first mblk's b_rptr will 21807 * be adjusted by *offset. And after dupb(), the offset and the ending mblk 21808 * of the original mblk chain will be returned in *offset and *end_mp. 21809 */ 21810 static mblk_t * 21811 tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, int32_t *offset, 21812 mblk_t **end_mp, uint32_t seq, boolean_t sendall, uint32_t *seg_len, 21813 boolean_t rexmit) 21814 { 21815 int data_length; 21816 int32_t off = 0; 21817 uint_t flags; 21818 mblk_t *mp1; 21819 mblk_t *mp2; 21820 uchar_t *rptr; 21821 tcph_t *tcph; 21822 int32_t num_sack_blk = 0; 21823 int32_t sack_opt_len = 0; 21824 21825 /* Allocate for our maximum TCP header + link-level */ 21826 mp1 = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + tcp_wroff_xtra, 21827 BPRI_MED); 21828 if (!mp1) 21829 return (NULL); 21830 data_length = 0; 21831 21832 /* 21833 * Note that tcp_mss has been adjusted to take into account the 21834 * timestamp option if applicable. Because SACK options do not 21835 * appear in every TCP segments and they are of variable lengths, 21836 * they cannot be included in tcp_mss. Thus we need to calculate 21837 * the actual segment length when we need to send a segment which 21838 * includes SACK options. 21839 */ 21840 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 21841 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 21842 tcp->tcp_num_sack_blk); 21843 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 21844 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 21845 if (max_to_send + sack_opt_len > tcp->tcp_mss) 21846 max_to_send -= sack_opt_len; 21847 } 21848 21849 if (offset != NULL) { 21850 off = *offset; 21851 /* We use offset as an indicator that end_mp is not NULL. */ 21852 *end_mp = NULL; 21853 } 21854 for (mp2 = mp1; mp && data_length != max_to_send; mp = mp->b_cont) { 21855 /* This could be faster with cooperation from downstream */ 21856 if (mp2 != mp1 && !sendall && 21857 data_length + (int)(mp->b_wptr - mp->b_rptr) > 21858 max_to_send) 21859 /* 21860 * Don't send the next mblk since the whole mblk 21861 * does not fit. 21862 */ 21863 break; 21864 mp2->b_cont = dupb(mp); 21865 mp2 = mp2->b_cont; 21866 if (!mp2) { 21867 freemsg(mp1); 21868 return (NULL); 21869 } 21870 mp2->b_rptr += off; 21871 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 21872 (uintptr_t)INT_MAX); 21873 21874 data_length += (int)(mp2->b_wptr - mp2->b_rptr); 21875 if (data_length > max_to_send) { 21876 mp2->b_wptr -= data_length - max_to_send; 21877 data_length = max_to_send; 21878 off = mp2->b_wptr - mp->b_rptr; 21879 break; 21880 } else { 21881 off = 0; 21882 } 21883 } 21884 if (offset != NULL) { 21885 *offset = off; 21886 *end_mp = mp; 21887 } 21888 if (seg_len != NULL) { 21889 *seg_len = data_length; 21890 } 21891 21892 /* Update the latest receive window size in TCP header. */ 21893 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 21894 tcp->tcp_tcph->th_win); 21895 21896 rptr = mp1->b_rptr + tcp_wroff_xtra; 21897 mp1->b_rptr = rptr; 21898 mp1->b_wptr = rptr + tcp->tcp_hdr_len + sack_opt_len; 21899 bcopy(tcp->tcp_iphc, rptr, tcp->tcp_hdr_len); 21900 tcph = (tcph_t *)&rptr[tcp->tcp_ip_hdr_len]; 21901 U32_TO_ABE32(seq, tcph->th_seq); 21902 21903 /* 21904 * Use tcp_unsent to determine if the PUSH bit should be used assumes 21905 * that this function was called from tcp_wput_data. Thus, when called 21906 * to retransmit data the setting of the PUSH bit may appear some 21907 * what random in that it might get set when it should not. This 21908 * should not pose any performance issues. 21909 */ 21910 if (data_length != 0 && (tcp->tcp_unsent == 0 || 21911 tcp->tcp_unsent == data_length)) { 21912 flags = TH_ACK | TH_PUSH; 21913 } else { 21914 flags = TH_ACK; 21915 } 21916 21917 if (tcp->tcp_ecn_ok) { 21918 if (tcp->tcp_ecn_echo_on) 21919 flags |= TH_ECE; 21920 21921 /* 21922 * Only set ECT bit and ECN_CWR if a segment contains new data. 21923 * There is no TCP flow control for non-data segments, and 21924 * only data segment is transmitted reliably. 21925 */ 21926 if (data_length > 0 && !rexmit) { 21927 SET_ECT(tcp, rptr); 21928 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 21929 flags |= TH_CWR; 21930 tcp->tcp_ecn_cwr_sent = B_TRUE; 21931 } 21932 } 21933 } 21934 21935 if (tcp->tcp_valid_bits) { 21936 uint32_t u1; 21937 21938 if ((tcp->tcp_valid_bits & TCP_ISS_VALID) && 21939 seq == tcp->tcp_iss) { 21940 uchar_t *wptr; 21941 21942 /* 21943 * If TCP_ISS_VALID and the seq number is tcp_iss, 21944 * TCP can only be in SYN-SENT, SYN-RCVD or 21945 * FIN-WAIT-1 state. It can be FIN-WAIT-1 if 21946 * our SYN is not ack'ed but the app closes this 21947 * TCP connection. 21948 */ 21949 ASSERT(tcp->tcp_state == TCPS_SYN_SENT || 21950 tcp->tcp_state == TCPS_SYN_RCVD || 21951 tcp->tcp_state == TCPS_FIN_WAIT_1); 21952 21953 /* 21954 * Tack on the MSS option. It is always needed 21955 * for both active and passive open. 21956 * 21957 * MSS option value should be interface MTU - MIN 21958 * TCP/IP header according to RFC 793 as it means 21959 * the maximum segment size TCP can receive. But 21960 * to get around some broken middle boxes/end hosts 21961 * out there, we allow the option value to be the 21962 * same as the MSS option size on the peer side. 21963 * In this way, the other side will not send 21964 * anything larger than they can receive. 21965 * 21966 * Note that for SYN_SENT state, the ndd param 21967 * tcp_use_smss_as_mss_opt has no effect as we 21968 * don't know the peer's MSS option value. So 21969 * the only case we need to take care of is in 21970 * SYN_RCVD state, which is done later. 21971 */ 21972 wptr = mp1->b_wptr; 21973 wptr[0] = TCPOPT_MAXSEG; 21974 wptr[1] = TCPOPT_MAXSEG_LEN; 21975 wptr += 2; 21976 u1 = tcp->tcp_if_mtu - 21977 (tcp->tcp_ipversion == IPV4_VERSION ? 21978 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) - 21979 TCP_MIN_HEADER_LENGTH; 21980 U16_TO_BE16(u1, wptr); 21981 mp1->b_wptr = wptr + 2; 21982 /* Update the offset to cover the additional word */ 21983 tcph->th_offset_and_rsrvd[0] += (1 << 4); 21984 21985 /* 21986 * Note that the following way of filling in 21987 * TCP options are not optimal. Some NOPs can 21988 * be saved. But there is no need at this time 21989 * to optimize it. When it is needed, we will 21990 * do it. 21991 */ 21992 switch (tcp->tcp_state) { 21993 case TCPS_SYN_SENT: 21994 flags = TH_SYN; 21995 21996 if (tcp->tcp_snd_ts_ok) { 21997 uint32_t llbolt = (uint32_t)lbolt; 21998 21999 wptr = mp1->b_wptr; 22000 wptr[0] = TCPOPT_NOP; 22001 wptr[1] = TCPOPT_NOP; 22002 wptr[2] = TCPOPT_TSTAMP; 22003 wptr[3] = TCPOPT_TSTAMP_LEN; 22004 wptr += 4; 22005 U32_TO_BE32(llbolt, wptr); 22006 wptr += 4; 22007 ASSERT(tcp->tcp_ts_recent == 0); 22008 U32_TO_BE32(0L, wptr); 22009 mp1->b_wptr += TCPOPT_REAL_TS_LEN; 22010 tcph->th_offset_and_rsrvd[0] += 22011 (3 << 4); 22012 } 22013 22014 /* 22015 * Set up all the bits to tell other side 22016 * we are ECN capable. 22017 */ 22018 if (tcp->tcp_ecn_ok) { 22019 flags |= (TH_ECE | TH_CWR); 22020 } 22021 break; 22022 case TCPS_SYN_RCVD: 22023 flags |= TH_SYN; 22024 22025 /* 22026 * Reset the MSS option value to be SMSS 22027 * We should probably add back the bytes 22028 * for timestamp option and IPsec. We 22029 * don't do that as this is a workaround 22030 * for broken middle boxes/end hosts, it 22031 * is better for us to be more cautious. 22032 * They may not take these things into 22033 * account in their SMSS calculation. Thus 22034 * the peer's calculated SMSS may be smaller 22035 * than what it can be. This should be OK. 22036 */ 22037 if (tcp_use_smss_as_mss_opt) { 22038 u1 = tcp->tcp_mss; 22039 U16_TO_BE16(u1, wptr); 22040 } 22041 22042 /* 22043 * If the other side is ECN capable, reply 22044 * that we are also ECN capable. 22045 */ 22046 if (tcp->tcp_ecn_ok) 22047 flags |= TH_ECE; 22048 break; 22049 default: 22050 /* 22051 * The above ASSERT() makes sure that this 22052 * must be FIN-WAIT-1 state. Our SYN has 22053 * not been ack'ed so retransmit it. 22054 */ 22055 flags |= TH_SYN; 22056 break; 22057 } 22058 22059 if (tcp->tcp_snd_ws_ok) { 22060 wptr = mp1->b_wptr; 22061 wptr[0] = TCPOPT_NOP; 22062 wptr[1] = TCPOPT_WSCALE; 22063 wptr[2] = TCPOPT_WS_LEN; 22064 wptr[3] = (uchar_t)tcp->tcp_rcv_ws; 22065 mp1->b_wptr += TCPOPT_REAL_WS_LEN; 22066 tcph->th_offset_and_rsrvd[0] += (1 << 4); 22067 } 22068 22069 if (tcp->tcp_snd_sack_ok) { 22070 wptr = mp1->b_wptr; 22071 wptr[0] = TCPOPT_NOP; 22072 wptr[1] = TCPOPT_NOP; 22073 wptr[2] = TCPOPT_SACK_PERMITTED; 22074 wptr[3] = TCPOPT_SACK_OK_LEN; 22075 mp1->b_wptr += TCPOPT_REAL_SACK_OK_LEN; 22076 tcph->th_offset_and_rsrvd[0] += (1 << 4); 22077 } 22078 22079 /* allocb() of adequate mblk assures space */ 22080 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 22081 (uintptr_t)INT_MAX); 22082 u1 = (int)(mp1->b_wptr - mp1->b_rptr); 22083 /* 22084 * Get IP set to checksum on our behalf 22085 * Include the adjustment for a source route if any. 22086 */ 22087 u1 += tcp->tcp_sum; 22088 u1 = (u1 >> 16) + (u1 & 0xFFFF); 22089 U16_TO_BE16(u1, tcph->th_sum); 22090 BUMP_MIB(&tcp_mib, tcpOutControl); 22091 } 22092 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 22093 (seq + data_length) == tcp->tcp_fss) { 22094 if (!tcp->tcp_fin_acked) { 22095 flags |= TH_FIN; 22096 BUMP_MIB(&tcp_mib, tcpOutControl); 22097 } 22098 if (!tcp->tcp_fin_sent) { 22099 tcp->tcp_fin_sent = B_TRUE; 22100 switch (tcp->tcp_state) { 22101 case TCPS_SYN_RCVD: 22102 case TCPS_ESTABLISHED: 22103 tcp->tcp_state = TCPS_FIN_WAIT_1; 22104 break; 22105 case TCPS_CLOSE_WAIT: 22106 tcp->tcp_state = TCPS_LAST_ACK; 22107 break; 22108 } 22109 if (tcp->tcp_suna == tcp->tcp_snxt) 22110 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 22111 tcp->tcp_snxt = tcp->tcp_fss + 1; 22112 } 22113 } 22114 /* 22115 * Note the trick here. u1 is unsigned. When tcp_urg 22116 * is smaller than seq, u1 will become a very huge value. 22117 * So the comparison will fail. Also note that tcp_urp 22118 * should be positive, see RFC 793 page 17. 22119 */ 22120 u1 = tcp->tcp_urg - seq + TCP_OLD_URP_INTERPRETATION; 22121 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && u1 != 0 && 22122 u1 < (uint32_t)(64 * 1024)) { 22123 flags |= TH_URG; 22124 BUMP_MIB(&tcp_mib, tcpOutUrg); 22125 U32_TO_ABE16(u1, tcph->th_urp); 22126 } 22127 } 22128 tcph->th_flags[0] = (uchar_t)flags; 22129 tcp->tcp_rack = tcp->tcp_rnxt; 22130 tcp->tcp_rack_cnt = 0; 22131 22132 if (tcp->tcp_snd_ts_ok) { 22133 if (tcp->tcp_state != TCPS_SYN_SENT) { 22134 uint32_t llbolt = (uint32_t)lbolt; 22135 22136 U32_TO_BE32(llbolt, 22137 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 22138 U32_TO_BE32(tcp->tcp_ts_recent, 22139 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 22140 } 22141 } 22142 22143 if (num_sack_blk > 0) { 22144 uchar_t *wptr = (uchar_t *)tcph + tcp->tcp_tcp_hdr_len; 22145 sack_blk_t *tmp; 22146 int32_t i; 22147 22148 wptr[0] = TCPOPT_NOP; 22149 wptr[1] = TCPOPT_NOP; 22150 wptr[2] = TCPOPT_SACK; 22151 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 22152 sizeof (sack_blk_t); 22153 wptr += TCPOPT_REAL_SACK_LEN; 22154 22155 tmp = tcp->tcp_sack_list; 22156 for (i = 0; i < num_sack_blk; i++) { 22157 U32_TO_BE32(tmp[i].begin, wptr); 22158 wptr += sizeof (tcp_seq); 22159 U32_TO_BE32(tmp[i].end, wptr); 22160 wptr += sizeof (tcp_seq); 22161 } 22162 tcph->th_offset_and_rsrvd[0] += ((num_sack_blk * 2 + 1) << 4); 22163 } 22164 ASSERT((uintptr_t)(mp1->b_wptr - rptr) <= (uintptr_t)INT_MAX); 22165 data_length += (int)(mp1->b_wptr - rptr); 22166 if (tcp->tcp_ipversion == IPV4_VERSION) { 22167 ((ipha_t *)rptr)->ipha_length = htons(data_length); 22168 } else { 22169 ip6_t *ip6 = (ip6_t *)(rptr + 22170 (((ip6_t *)rptr)->ip6_nxt == IPPROTO_RAW ? 22171 sizeof (ip6i_t) : 0)); 22172 22173 ip6->ip6_plen = htons(data_length - 22174 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 22175 } 22176 22177 /* 22178 * Prime pump for IP 22179 * Include the adjustment for a source route if any. 22180 */ 22181 data_length -= tcp->tcp_ip_hdr_len; 22182 data_length += tcp->tcp_sum; 22183 data_length = (data_length >> 16) + (data_length & 0xFFFF); 22184 U16_TO_ABE16(data_length, tcph->th_sum); 22185 if (tcp->tcp_ip_forward_progress) { 22186 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 22187 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 22188 tcp->tcp_ip_forward_progress = B_FALSE; 22189 } 22190 return (mp1); 22191 } 22192 22193 /* This function handles the push timeout. */ 22194 void 22195 tcp_push_timer(void *arg) 22196 { 22197 conn_t *connp = (conn_t *)arg; 22198 tcp_t *tcp = connp->conn_tcp; 22199 22200 TCP_DBGSTAT(tcp_push_timer_cnt); 22201 22202 ASSERT(tcp->tcp_listener == NULL); 22203 22204 /* 22205 * We need to stop synchronous streams temporarily to prevent a race 22206 * with tcp_fuse_rrw() or tcp_fusion rinfop(). It is safe to access 22207 * tcp_rcv_list here because those entry points will return right 22208 * away when synchronous streams is stopped. 22209 */ 22210 TCP_FUSE_SYNCSTR_STOP(tcp); 22211 tcp->tcp_push_tid = 0; 22212 if ((tcp->tcp_rcv_list != NULL) && 22213 (tcp_rcv_drain(tcp->tcp_rq, tcp) == TH_ACK_NEEDED)) 22214 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 22215 TCP_FUSE_SYNCSTR_RESUME(tcp); 22216 } 22217 22218 /* 22219 * This function handles delayed ACK timeout. 22220 */ 22221 static void 22222 tcp_ack_timer(void *arg) 22223 { 22224 conn_t *connp = (conn_t *)arg; 22225 tcp_t *tcp = connp->conn_tcp; 22226 mblk_t *mp; 22227 22228 TCP_DBGSTAT(tcp_ack_timer_cnt); 22229 22230 tcp->tcp_ack_tid = 0; 22231 22232 if (tcp->tcp_fused) 22233 return; 22234 22235 /* 22236 * Do not send ACK if there is no outstanding unack'ed data. 22237 */ 22238 if (tcp->tcp_rnxt == tcp->tcp_rack) { 22239 return; 22240 } 22241 22242 if ((tcp->tcp_rnxt - tcp->tcp_rack) > tcp->tcp_mss) { 22243 /* 22244 * Make sure we don't allow deferred ACKs to result in 22245 * timer-based ACKing. If we have held off an ACK 22246 * when there was more than an mss here, and the timer 22247 * goes off, we have to worry about the possibility 22248 * that the sender isn't doing slow-start, or is out 22249 * of step with us for some other reason. We fall 22250 * permanently back in the direction of 22251 * ACK-every-other-packet as suggested in RFC 1122. 22252 */ 22253 if (tcp->tcp_rack_abs_max > 2) 22254 tcp->tcp_rack_abs_max--; 22255 tcp->tcp_rack_cur_max = 2; 22256 } 22257 mp = tcp_ack_mp(tcp); 22258 22259 if (mp != NULL) { 22260 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 22261 BUMP_LOCAL(tcp->tcp_obsegs); 22262 BUMP_MIB(&tcp_mib, tcpOutAck); 22263 BUMP_MIB(&tcp_mib, tcpOutAckDelayed); 22264 tcp_send_data(tcp, tcp->tcp_wq, mp); 22265 } 22266 } 22267 22268 22269 /* Generate an ACK-only (no data) segment for a TCP endpoint */ 22270 static mblk_t * 22271 tcp_ack_mp(tcp_t *tcp) 22272 { 22273 uint32_t seq_no; 22274 22275 /* 22276 * There are a few cases to be considered while setting the sequence no. 22277 * Essentially, we can come here while processing an unacceptable pkt 22278 * in the TCPS_SYN_RCVD state, in which case we set the sequence number 22279 * to snxt (per RFC 793), note the swnd wouldn't have been set yet. 22280 * If we are here for a zero window probe, stick with suna. In all 22281 * other cases, we check if suna + swnd encompasses snxt and set 22282 * the sequence number to snxt, if so. If snxt falls outside the 22283 * window (the receiver probably shrunk its window), we will go with 22284 * suna + swnd, otherwise the sequence no will be unacceptable to the 22285 * receiver. 22286 */ 22287 if (tcp->tcp_zero_win_probe) { 22288 seq_no = tcp->tcp_suna; 22289 } else if (tcp->tcp_state == TCPS_SYN_RCVD) { 22290 ASSERT(tcp->tcp_swnd == 0); 22291 seq_no = tcp->tcp_snxt; 22292 } else { 22293 seq_no = SEQ_GT(tcp->tcp_snxt, 22294 (tcp->tcp_suna + tcp->tcp_swnd)) ? 22295 (tcp->tcp_suna + tcp->tcp_swnd) : tcp->tcp_snxt; 22296 } 22297 22298 if (tcp->tcp_valid_bits) { 22299 /* 22300 * For the complex case where we have to send some 22301 * controls (FIN or SYN), let tcp_xmit_mp do it. 22302 */ 22303 return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, seq_no, B_FALSE, 22304 NULL, B_FALSE)); 22305 } else { 22306 /* Generate a simple ACK */ 22307 int data_length; 22308 uchar_t *rptr; 22309 tcph_t *tcph; 22310 mblk_t *mp1; 22311 int32_t tcp_hdr_len; 22312 int32_t tcp_tcp_hdr_len; 22313 int32_t num_sack_blk = 0; 22314 int32_t sack_opt_len; 22315 22316 /* 22317 * Allocate space for TCP + IP headers 22318 * and link-level header 22319 */ 22320 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 22321 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 22322 tcp->tcp_num_sack_blk); 22323 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 22324 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 22325 tcp_hdr_len = tcp->tcp_hdr_len + sack_opt_len; 22326 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len + sack_opt_len; 22327 } else { 22328 tcp_hdr_len = tcp->tcp_hdr_len; 22329 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len; 22330 } 22331 mp1 = allocb(tcp_hdr_len + tcp_wroff_xtra, BPRI_MED); 22332 if (!mp1) 22333 return (NULL); 22334 22335 /* Update the latest receive window size in TCP header. */ 22336 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 22337 tcp->tcp_tcph->th_win); 22338 /* copy in prototype TCP + IP header */ 22339 rptr = mp1->b_rptr + tcp_wroff_xtra; 22340 mp1->b_rptr = rptr; 22341 mp1->b_wptr = rptr + tcp_hdr_len; 22342 bcopy(tcp->tcp_iphc, rptr, tcp->tcp_hdr_len); 22343 22344 tcph = (tcph_t *)&rptr[tcp->tcp_ip_hdr_len]; 22345 22346 /* Set the TCP sequence number. */ 22347 U32_TO_ABE32(seq_no, tcph->th_seq); 22348 22349 /* Set up the TCP flag field. */ 22350 tcph->th_flags[0] = (uchar_t)TH_ACK; 22351 if (tcp->tcp_ecn_echo_on) 22352 tcph->th_flags[0] |= TH_ECE; 22353 22354 tcp->tcp_rack = tcp->tcp_rnxt; 22355 tcp->tcp_rack_cnt = 0; 22356 22357 /* fill in timestamp option if in use */ 22358 if (tcp->tcp_snd_ts_ok) { 22359 uint32_t llbolt = (uint32_t)lbolt; 22360 22361 U32_TO_BE32(llbolt, 22362 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 22363 U32_TO_BE32(tcp->tcp_ts_recent, 22364 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 22365 } 22366 22367 /* Fill in SACK options */ 22368 if (num_sack_blk > 0) { 22369 uchar_t *wptr = (uchar_t *)tcph + tcp->tcp_tcp_hdr_len; 22370 sack_blk_t *tmp; 22371 int32_t i; 22372 22373 wptr[0] = TCPOPT_NOP; 22374 wptr[1] = TCPOPT_NOP; 22375 wptr[2] = TCPOPT_SACK; 22376 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 22377 sizeof (sack_blk_t); 22378 wptr += TCPOPT_REAL_SACK_LEN; 22379 22380 tmp = tcp->tcp_sack_list; 22381 for (i = 0; i < num_sack_blk; i++) { 22382 U32_TO_BE32(tmp[i].begin, wptr); 22383 wptr += sizeof (tcp_seq); 22384 U32_TO_BE32(tmp[i].end, wptr); 22385 wptr += sizeof (tcp_seq); 22386 } 22387 tcph->th_offset_and_rsrvd[0] += ((num_sack_blk * 2 + 1) 22388 << 4); 22389 } 22390 22391 if (tcp->tcp_ipversion == IPV4_VERSION) { 22392 ((ipha_t *)rptr)->ipha_length = htons(tcp_hdr_len); 22393 } else { 22394 /* Check for ip6i_t header in sticky hdrs */ 22395 ip6_t *ip6 = (ip6_t *)(rptr + 22396 (((ip6_t *)rptr)->ip6_nxt == IPPROTO_RAW ? 22397 sizeof (ip6i_t) : 0)); 22398 22399 ip6->ip6_plen = htons(tcp_hdr_len - 22400 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 22401 } 22402 22403 /* 22404 * Prime pump for checksum calculation in IP. Include the 22405 * adjustment for a source route if any. 22406 */ 22407 data_length = tcp_tcp_hdr_len + tcp->tcp_sum; 22408 data_length = (data_length >> 16) + (data_length & 0xFFFF); 22409 U16_TO_ABE16(data_length, tcph->th_sum); 22410 22411 if (tcp->tcp_ip_forward_progress) { 22412 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 22413 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 22414 tcp->tcp_ip_forward_progress = B_FALSE; 22415 } 22416 return (mp1); 22417 } 22418 } 22419 22420 /* 22421 * To create a temporary tcp structure for inserting into bind hash list. 22422 * The parameter is assumed to be in network byte order, ready for use. 22423 */ 22424 /* ARGSUSED */ 22425 static tcp_t * 22426 tcp_alloc_temp_tcp(in_port_t port) 22427 { 22428 conn_t *connp; 22429 tcp_t *tcp; 22430 22431 connp = ipcl_conn_create(IPCL_TCPCONN, KM_SLEEP); 22432 if (connp == NULL) 22433 return (NULL); 22434 22435 tcp = connp->conn_tcp; 22436 22437 /* 22438 * Only initialize the necessary info in those structures. Note 22439 * that since INADDR_ANY is all 0, we do not need to set 22440 * tcp_bound_source to INADDR_ANY here. 22441 */ 22442 tcp->tcp_state = TCPS_BOUND; 22443 tcp->tcp_lport = port; 22444 tcp->tcp_exclbind = 1; 22445 tcp->tcp_reserved_port = 1; 22446 22447 /* Just for place holding... */ 22448 tcp->tcp_ipversion = IPV4_VERSION; 22449 22450 return (tcp); 22451 } 22452 22453 /* 22454 * To remove a port range specified by lo_port and hi_port from the 22455 * reserved port ranges. This is one of the three public functions of 22456 * the reserved port interface. Note that a port range has to be removed 22457 * as a whole. Ports in a range cannot be removed individually. 22458 * 22459 * Params: 22460 * in_port_t lo_port: the beginning port of the reserved port range to 22461 * be deleted. 22462 * in_port_t hi_port: the ending port of the reserved port range to 22463 * be deleted. 22464 * 22465 * Return: 22466 * B_TRUE if the deletion is successful, B_FALSE otherwise. 22467 */ 22468 boolean_t 22469 tcp_reserved_port_del(in_port_t lo_port, in_port_t hi_port) 22470 { 22471 int i, j; 22472 int size; 22473 tcp_t **temp_tcp_array; 22474 tcp_t *tcp; 22475 22476 rw_enter(&tcp_reserved_port_lock, RW_WRITER); 22477 22478 /* First make sure that the port ranage is indeed reserved. */ 22479 for (i = 0; i < tcp_reserved_port_array_size; i++) { 22480 if (tcp_reserved_port[i].lo_port == lo_port) { 22481 hi_port = tcp_reserved_port[i].hi_port; 22482 temp_tcp_array = tcp_reserved_port[i].temp_tcp_array; 22483 break; 22484 } 22485 } 22486 if (i == tcp_reserved_port_array_size) { 22487 rw_exit(&tcp_reserved_port_lock); 22488 return (B_FALSE); 22489 } 22490 22491 /* 22492 * Remove the range from the array. This simple loop is possible 22493 * because port ranges are inserted in ascending order. 22494 */ 22495 for (j = i; j < tcp_reserved_port_array_size - 1; j++) { 22496 tcp_reserved_port[j].lo_port = tcp_reserved_port[j+1].lo_port; 22497 tcp_reserved_port[j].hi_port = tcp_reserved_port[j+1].hi_port; 22498 tcp_reserved_port[j].temp_tcp_array = 22499 tcp_reserved_port[j+1].temp_tcp_array; 22500 } 22501 22502 /* Remove all the temporary tcp structures. */ 22503 size = hi_port - lo_port + 1; 22504 while (size > 0) { 22505 tcp = temp_tcp_array[size - 1]; 22506 ASSERT(tcp != NULL); 22507 tcp_bind_hash_remove(tcp); 22508 CONN_DEC_REF(tcp->tcp_connp); 22509 size--; 22510 } 22511 kmem_free(temp_tcp_array, (hi_port - lo_port + 1) * sizeof (tcp_t *)); 22512 tcp_reserved_port_array_size--; 22513 rw_exit(&tcp_reserved_port_lock); 22514 return (B_TRUE); 22515 } 22516 22517 /* 22518 * Macro to remove temporary tcp structure from the bind hash list. The 22519 * first parameter is the list of tcp to be removed. The second parameter 22520 * is the number of tcps in the array. 22521 */ 22522 #define TCP_TMP_TCP_REMOVE(tcp_array, num) \ 22523 { \ 22524 while ((num) > 0) { \ 22525 tcp_t *tcp = (tcp_array)[(num) - 1]; \ 22526 tf_t *tbf; \ 22527 tcp_t *tcpnext; \ 22528 tbf = &tcp_bind_fanout[TCP_BIND_HASH(tcp->tcp_lport)]; \ 22529 mutex_enter(&tbf->tf_lock); \ 22530 tcpnext = tcp->tcp_bind_hash; \ 22531 if (tcpnext) { \ 22532 tcpnext->tcp_ptpbhn = \ 22533 tcp->tcp_ptpbhn; \ 22534 } \ 22535 *tcp->tcp_ptpbhn = tcpnext; \ 22536 mutex_exit(&tbf->tf_lock); \ 22537 kmem_free(tcp, sizeof (tcp_t)); \ 22538 (tcp_array)[(num) - 1] = NULL; \ 22539 (num)--; \ 22540 } \ 22541 } 22542 22543 /* 22544 * The public interface for other modules to call to reserve a port range 22545 * in TCP. The caller passes in how large a port range it wants. TCP 22546 * will try to find a range and return it via lo_port and hi_port. This is 22547 * used by NCA's nca_conn_init. 22548 * NCA can only be used in the global zone so this only affects the global 22549 * zone's ports. 22550 * 22551 * Params: 22552 * int size: the size of the port range to be reserved. 22553 * in_port_t *lo_port (referenced): returns the beginning port of the 22554 * reserved port range added. 22555 * in_port_t *hi_port (referenced): returns the ending port of the 22556 * reserved port range added. 22557 * 22558 * Return: 22559 * B_TRUE if the port reservation is successful, B_FALSE otherwise. 22560 */ 22561 boolean_t 22562 tcp_reserved_port_add(int size, in_port_t *lo_port, in_port_t *hi_port) 22563 { 22564 tcp_t *tcp; 22565 tcp_t *tmp_tcp; 22566 tcp_t **temp_tcp_array; 22567 tf_t *tbf; 22568 in_port_t net_port; 22569 in_port_t port; 22570 int32_t cur_size; 22571 int i, j; 22572 boolean_t used; 22573 tcp_rport_t tmp_ports[TCP_RESERVED_PORTS_ARRAY_MAX_SIZE]; 22574 zoneid_t zoneid = GLOBAL_ZONEID; 22575 22576 /* Sanity check. */ 22577 if (size <= 0 || size > TCP_RESERVED_PORTS_RANGE_MAX) { 22578 return (B_FALSE); 22579 } 22580 22581 rw_enter(&tcp_reserved_port_lock, RW_WRITER); 22582 if (tcp_reserved_port_array_size == TCP_RESERVED_PORTS_ARRAY_MAX_SIZE) { 22583 rw_exit(&tcp_reserved_port_lock); 22584 return (B_FALSE); 22585 } 22586 22587 /* 22588 * Find the starting port to try. Since the port ranges are ordered 22589 * in the reserved port array, we can do a simple search here. 22590 */ 22591 *lo_port = TCP_SMALLEST_RESERVED_PORT; 22592 *hi_port = TCP_LARGEST_RESERVED_PORT; 22593 for (i = 0; i < tcp_reserved_port_array_size; 22594 *lo_port = tcp_reserved_port[i].hi_port + 1, i++) { 22595 if (tcp_reserved_port[i].lo_port - *lo_port >= size) { 22596 *hi_port = tcp_reserved_port[i].lo_port - 1; 22597 break; 22598 } 22599 } 22600 /* No available port range. */ 22601 if (i == tcp_reserved_port_array_size && *hi_port - *lo_port < size) { 22602 rw_exit(&tcp_reserved_port_lock); 22603 return (B_FALSE); 22604 } 22605 22606 temp_tcp_array = kmem_zalloc(size * sizeof (tcp_t *), KM_NOSLEEP); 22607 if (temp_tcp_array == NULL) { 22608 rw_exit(&tcp_reserved_port_lock); 22609 return (B_FALSE); 22610 } 22611 22612 /* Go thru the port range to see if some ports are already bound. */ 22613 for (port = *lo_port, cur_size = 0; 22614 cur_size < size && port <= *hi_port; 22615 cur_size++, port++) { 22616 used = B_FALSE; 22617 net_port = htons(port); 22618 tbf = &tcp_bind_fanout[TCP_BIND_HASH(net_port)]; 22619 mutex_enter(&tbf->tf_lock); 22620 for (tcp = tbf->tf_tcp; tcp != NULL; 22621 tcp = tcp->tcp_bind_hash) { 22622 if (IPCL_ZONE_MATCH(tcp->tcp_connp, zoneid) && 22623 net_port == tcp->tcp_lport) { 22624 /* 22625 * A port is already bound. Search again 22626 * starting from port + 1. Release all 22627 * temporary tcps. 22628 */ 22629 mutex_exit(&tbf->tf_lock); 22630 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size); 22631 *lo_port = port + 1; 22632 cur_size = -1; 22633 used = B_TRUE; 22634 break; 22635 } 22636 } 22637 if (!used) { 22638 if ((tmp_tcp = tcp_alloc_temp_tcp(net_port)) == NULL) { 22639 /* 22640 * Allocation failure. Just fail the request. 22641 * Need to remove all those temporary tcp 22642 * structures. 22643 */ 22644 mutex_exit(&tbf->tf_lock); 22645 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size); 22646 rw_exit(&tcp_reserved_port_lock); 22647 kmem_free(temp_tcp_array, 22648 (hi_port - lo_port + 1) * 22649 sizeof (tcp_t *)); 22650 return (B_FALSE); 22651 } 22652 temp_tcp_array[cur_size] = tmp_tcp; 22653 tcp_bind_hash_insert(tbf, tmp_tcp, B_TRUE); 22654 mutex_exit(&tbf->tf_lock); 22655 } 22656 } 22657 22658 /* 22659 * The current range is not large enough. We can actually do another 22660 * search if this search is done between 2 reserved port ranges. But 22661 * for first release, we just stop here and return saying that no port 22662 * range is available. 22663 */ 22664 if (cur_size < size) { 22665 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size); 22666 rw_exit(&tcp_reserved_port_lock); 22667 kmem_free(temp_tcp_array, size * sizeof (tcp_t *)); 22668 return (B_FALSE); 22669 } 22670 *hi_port = port - 1; 22671 22672 /* 22673 * Insert range into array in ascending order. Since this function 22674 * must not be called often, we choose to use the simplest method. 22675 * The above array should not consume excessive stack space as 22676 * the size must be very small. If in future releases, we find 22677 * that we should provide more reserved port ranges, this function 22678 * has to be modified to be more efficient. 22679 */ 22680 if (tcp_reserved_port_array_size == 0) { 22681 tcp_reserved_port[0].lo_port = *lo_port; 22682 tcp_reserved_port[0].hi_port = *hi_port; 22683 tcp_reserved_port[0].temp_tcp_array = temp_tcp_array; 22684 } else { 22685 for (i = 0, j = 0; i < tcp_reserved_port_array_size; i++, j++) { 22686 if (*lo_port < tcp_reserved_port[i].lo_port && i == j) { 22687 tmp_ports[j].lo_port = *lo_port; 22688 tmp_ports[j].hi_port = *hi_port; 22689 tmp_ports[j].temp_tcp_array = temp_tcp_array; 22690 j++; 22691 } 22692 tmp_ports[j].lo_port = tcp_reserved_port[i].lo_port; 22693 tmp_ports[j].hi_port = tcp_reserved_port[i].hi_port; 22694 tmp_ports[j].temp_tcp_array = 22695 tcp_reserved_port[i].temp_tcp_array; 22696 } 22697 if (j == i) { 22698 tmp_ports[j].lo_port = *lo_port; 22699 tmp_ports[j].hi_port = *hi_port; 22700 tmp_ports[j].temp_tcp_array = temp_tcp_array; 22701 } 22702 bcopy(tmp_ports, tcp_reserved_port, sizeof (tmp_ports)); 22703 } 22704 tcp_reserved_port_array_size++; 22705 rw_exit(&tcp_reserved_port_lock); 22706 return (B_TRUE); 22707 } 22708 22709 /* 22710 * Check to see if a port is in any reserved port range. 22711 * 22712 * Params: 22713 * in_port_t port: the port to be verified. 22714 * 22715 * Return: 22716 * B_TRUE is the port is inside a reserved port range, B_FALSE otherwise. 22717 */ 22718 boolean_t 22719 tcp_reserved_port_check(in_port_t port) 22720 { 22721 int i; 22722 22723 rw_enter(&tcp_reserved_port_lock, RW_READER); 22724 for (i = 0; i < tcp_reserved_port_array_size; i++) { 22725 if (port >= tcp_reserved_port[i].lo_port || 22726 port <= tcp_reserved_port[i].hi_port) { 22727 rw_exit(&tcp_reserved_port_lock); 22728 return (B_TRUE); 22729 } 22730 } 22731 rw_exit(&tcp_reserved_port_lock); 22732 return (B_FALSE); 22733 } 22734 22735 /* 22736 * To list all reserved port ranges. This is the function to handle 22737 * ndd tcp_reserved_port_list. 22738 */ 22739 /* ARGSUSED */ 22740 static int 22741 tcp_reserved_port_list(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 22742 { 22743 int i; 22744 22745 rw_enter(&tcp_reserved_port_lock, RW_READER); 22746 if (tcp_reserved_port_array_size > 0) 22747 (void) mi_mpprintf(mp, "The following ports are reserved:"); 22748 else 22749 (void) mi_mpprintf(mp, "No port is reserved."); 22750 for (i = 0; i < tcp_reserved_port_array_size; i++) { 22751 (void) mi_mpprintf(mp, "%d-%d", 22752 tcp_reserved_port[i].lo_port, tcp_reserved_port[i].hi_port); 22753 } 22754 rw_exit(&tcp_reserved_port_lock); 22755 return (0); 22756 } 22757 22758 /* 22759 * Hash list insertion routine for tcp_t structures. 22760 * Inserts entries with the ones bound to a specific IP address first 22761 * followed by those bound to INADDR_ANY. 22762 */ 22763 static void 22764 tcp_bind_hash_insert(tf_t *tbf, tcp_t *tcp, int caller_holds_lock) 22765 { 22766 tcp_t **tcpp; 22767 tcp_t *tcpnext; 22768 22769 if (tcp->tcp_ptpbhn != NULL) { 22770 ASSERT(!caller_holds_lock); 22771 tcp_bind_hash_remove(tcp); 22772 } 22773 tcpp = &tbf->tf_tcp; 22774 if (!caller_holds_lock) { 22775 mutex_enter(&tbf->tf_lock); 22776 } else { 22777 ASSERT(MUTEX_HELD(&tbf->tf_lock)); 22778 } 22779 tcpnext = tcpp[0]; 22780 if (tcpnext) { 22781 /* 22782 * If the new tcp bound to the INADDR_ANY address 22783 * and the first one in the list is not bound to 22784 * INADDR_ANY we skip all entries until we find the 22785 * first one bound to INADDR_ANY. 22786 * This makes sure that applications binding to a 22787 * specific address get preference over those binding to 22788 * INADDR_ANY. 22789 */ 22790 if (V6_OR_V4_INADDR_ANY(tcp->tcp_bound_source_v6) && 22791 !V6_OR_V4_INADDR_ANY(tcpnext->tcp_bound_source_v6)) { 22792 while ((tcpnext = tcpp[0]) != NULL && 22793 !V6_OR_V4_INADDR_ANY(tcpnext->tcp_bound_source_v6)) 22794 tcpp = &(tcpnext->tcp_bind_hash); 22795 if (tcpnext) 22796 tcpnext->tcp_ptpbhn = &tcp->tcp_bind_hash; 22797 } else 22798 tcpnext->tcp_ptpbhn = &tcp->tcp_bind_hash; 22799 } 22800 tcp->tcp_bind_hash = tcpnext; 22801 tcp->tcp_ptpbhn = tcpp; 22802 tcpp[0] = tcp; 22803 if (!caller_holds_lock) 22804 mutex_exit(&tbf->tf_lock); 22805 } 22806 22807 /* 22808 * Hash list removal routine for tcp_t structures. 22809 */ 22810 static void 22811 tcp_bind_hash_remove(tcp_t *tcp) 22812 { 22813 tcp_t *tcpnext; 22814 kmutex_t *lockp; 22815 22816 if (tcp->tcp_ptpbhn == NULL) 22817 return; 22818 22819 /* 22820 * Extract the lock pointer in case there are concurrent 22821 * hash_remove's for this instance. 22822 */ 22823 ASSERT(tcp->tcp_lport != 0); 22824 lockp = &tcp_bind_fanout[TCP_BIND_HASH(tcp->tcp_lport)].tf_lock; 22825 22826 ASSERT(lockp != NULL); 22827 mutex_enter(lockp); 22828 if (tcp->tcp_ptpbhn) { 22829 tcpnext = tcp->tcp_bind_hash; 22830 if (tcpnext) { 22831 tcpnext->tcp_ptpbhn = tcp->tcp_ptpbhn; 22832 tcp->tcp_bind_hash = NULL; 22833 } 22834 *tcp->tcp_ptpbhn = tcpnext; 22835 tcp->tcp_ptpbhn = NULL; 22836 } 22837 mutex_exit(lockp); 22838 } 22839 22840 22841 /* 22842 * Hash list lookup routine for tcp_t structures. 22843 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF. 22844 */ 22845 static tcp_t * 22846 tcp_acceptor_hash_lookup(t_uscalar_t id) 22847 { 22848 tf_t *tf; 22849 tcp_t *tcp; 22850 22851 tf = &tcp_acceptor_fanout[TCP_ACCEPTOR_HASH(id)]; 22852 mutex_enter(&tf->tf_lock); 22853 for (tcp = tf->tf_tcp; tcp != NULL; 22854 tcp = tcp->tcp_acceptor_hash) { 22855 if (tcp->tcp_acceptor_id == id) { 22856 CONN_INC_REF(tcp->tcp_connp); 22857 mutex_exit(&tf->tf_lock); 22858 return (tcp); 22859 } 22860 } 22861 mutex_exit(&tf->tf_lock); 22862 return (NULL); 22863 } 22864 22865 22866 /* 22867 * Hash list insertion routine for tcp_t structures. 22868 */ 22869 void 22870 tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp) 22871 { 22872 tf_t *tf; 22873 tcp_t **tcpp; 22874 tcp_t *tcpnext; 22875 22876 tf = &tcp_acceptor_fanout[TCP_ACCEPTOR_HASH(id)]; 22877 22878 if (tcp->tcp_ptpahn != NULL) 22879 tcp_acceptor_hash_remove(tcp); 22880 tcpp = &tf->tf_tcp; 22881 mutex_enter(&tf->tf_lock); 22882 tcpnext = tcpp[0]; 22883 if (tcpnext) 22884 tcpnext->tcp_ptpahn = &tcp->tcp_acceptor_hash; 22885 tcp->tcp_acceptor_hash = tcpnext; 22886 tcp->tcp_ptpahn = tcpp; 22887 tcpp[0] = tcp; 22888 tcp->tcp_acceptor_lockp = &tf->tf_lock; /* For tcp_*_hash_remove */ 22889 mutex_exit(&tf->tf_lock); 22890 } 22891 22892 /* 22893 * Hash list removal routine for tcp_t structures. 22894 */ 22895 static void 22896 tcp_acceptor_hash_remove(tcp_t *tcp) 22897 { 22898 tcp_t *tcpnext; 22899 kmutex_t *lockp; 22900 22901 /* 22902 * Extract the lock pointer in case there are concurrent 22903 * hash_remove's for this instance. 22904 */ 22905 lockp = tcp->tcp_acceptor_lockp; 22906 22907 if (tcp->tcp_ptpahn == NULL) 22908 return; 22909 22910 ASSERT(lockp != NULL); 22911 mutex_enter(lockp); 22912 if (tcp->tcp_ptpahn) { 22913 tcpnext = tcp->tcp_acceptor_hash; 22914 if (tcpnext) { 22915 tcpnext->tcp_ptpahn = tcp->tcp_ptpahn; 22916 tcp->tcp_acceptor_hash = NULL; 22917 } 22918 *tcp->tcp_ptpahn = tcpnext; 22919 tcp->tcp_ptpahn = NULL; 22920 } 22921 mutex_exit(lockp); 22922 tcp->tcp_acceptor_lockp = NULL; 22923 } 22924 22925 /* ARGSUSED */ 22926 static int 22927 tcp_host_param_setvalue(queue_t *q, mblk_t *mp, char *value, caddr_t cp, int af) 22928 { 22929 int error = 0; 22930 int retval; 22931 char *end; 22932 22933 tcp_hsp_t *hsp; 22934 tcp_hsp_t *hspprev; 22935 22936 ipaddr_t addr = 0; /* Address we're looking for */ 22937 in6_addr_t v6addr; /* Address we're looking for */ 22938 uint32_t hash; /* Hash of that address */ 22939 22940 /* 22941 * If the following variables are still zero after parsing the input 22942 * string, the user didn't specify them and we don't change them in 22943 * the HSP. 22944 */ 22945 22946 ipaddr_t mask = 0; /* Subnet mask */ 22947 in6_addr_t v6mask; 22948 long sendspace = 0; /* Send buffer size */ 22949 long recvspace = 0; /* Receive buffer size */ 22950 long timestamp = 0; /* Originate TCP TSTAMP option, 1 = yes */ 22951 boolean_t delete = B_FALSE; /* User asked to delete this HSP */ 22952 22953 rw_enter(&tcp_hsp_lock, RW_WRITER); 22954 22955 /* Parse and validate address */ 22956 if (af == AF_INET) { 22957 retval = inet_pton(af, value, &addr); 22958 if (retval == 1) 22959 IN6_IPADDR_TO_V4MAPPED(addr, &v6addr); 22960 } else if (af == AF_INET6) { 22961 retval = inet_pton(af, value, &v6addr); 22962 } else { 22963 error = EINVAL; 22964 goto done; 22965 } 22966 if (retval == 0) { 22967 error = EINVAL; 22968 goto done; 22969 } 22970 22971 while ((*value) && *value != ' ') 22972 value++; 22973 22974 /* Parse individual keywords, set variables if found */ 22975 while (*value) { 22976 /* Skip leading blanks */ 22977 22978 while (*value == ' ' || *value == '\t') 22979 value++; 22980 22981 /* If at end of string, we're done */ 22982 22983 if (!*value) 22984 break; 22985 22986 /* We have a word, figure out what it is */ 22987 22988 if (strncmp("mask", value, 4) == 0) { 22989 value += 4; 22990 while (*value == ' ' || *value == '\t') 22991 value++; 22992 /* Parse subnet mask */ 22993 if (af == AF_INET) { 22994 retval = inet_pton(af, value, &mask); 22995 if (retval == 1) { 22996 V4MASK_TO_V6(mask, v6mask); 22997 } 22998 } else if (af == AF_INET6) { 22999 retval = inet_pton(af, value, &v6mask); 23000 } 23001 if (retval != 1) { 23002 error = EINVAL; 23003 goto done; 23004 } 23005 while ((*value) && *value != ' ') 23006 value++; 23007 } else if (strncmp("sendspace", value, 9) == 0) { 23008 value += 9; 23009 23010 if (ddi_strtol(value, &end, 0, &sendspace) != 0 || 23011 sendspace < TCP_XMIT_HIWATER || 23012 sendspace >= (1L<<30)) { 23013 error = EINVAL; 23014 goto done; 23015 } 23016 value = end; 23017 } else if (strncmp("recvspace", value, 9) == 0) { 23018 value += 9; 23019 23020 if (ddi_strtol(value, &end, 0, &recvspace) != 0 || 23021 recvspace < TCP_RECV_HIWATER || 23022 recvspace >= (1L<<30)) { 23023 error = EINVAL; 23024 goto done; 23025 } 23026 value = end; 23027 } else if (strncmp("timestamp", value, 9) == 0) { 23028 value += 9; 23029 23030 if (ddi_strtol(value, &end, 0, ×tamp) != 0 || 23031 timestamp < 0 || timestamp > 1) { 23032 error = EINVAL; 23033 goto done; 23034 } 23035 23036 /* 23037 * We increment timestamp so we know it's been set; 23038 * this is undone when we put it in the HSP 23039 */ 23040 timestamp++; 23041 value = end; 23042 } else if (strncmp("delete", value, 6) == 0) { 23043 value += 6; 23044 delete = B_TRUE; 23045 } else { 23046 error = EINVAL; 23047 goto done; 23048 } 23049 } 23050 23051 /* Hash address for lookup */ 23052 23053 hash = TCP_HSP_HASH(addr); 23054 23055 if (delete) { 23056 /* 23057 * Note that deletes don't return an error if the thing 23058 * we're trying to delete isn't there. 23059 */ 23060 if (tcp_hsp_hash == NULL) 23061 goto done; 23062 hsp = tcp_hsp_hash[hash]; 23063 23064 if (hsp) { 23065 if (IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, 23066 &v6addr)) { 23067 tcp_hsp_hash[hash] = hsp->tcp_hsp_next; 23068 mi_free((char *)hsp); 23069 } else { 23070 hspprev = hsp; 23071 while ((hsp = hsp->tcp_hsp_next) != NULL) { 23072 if (IN6_ARE_ADDR_EQUAL( 23073 &hsp->tcp_hsp_addr_v6, &v6addr)) { 23074 hspprev->tcp_hsp_next = 23075 hsp->tcp_hsp_next; 23076 mi_free((char *)hsp); 23077 break; 23078 } 23079 hspprev = hsp; 23080 } 23081 } 23082 } 23083 } else { 23084 /* 23085 * We're adding/modifying an HSP. If we haven't already done 23086 * so, allocate the hash table. 23087 */ 23088 23089 if (!tcp_hsp_hash) { 23090 tcp_hsp_hash = (tcp_hsp_t **) 23091 mi_zalloc(sizeof (tcp_hsp_t *) * TCP_HSP_HASH_SIZE); 23092 if (!tcp_hsp_hash) { 23093 error = EINVAL; 23094 goto done; 23095 } 23096 } 23097 23098 /* Get head of hash chain */ 23099 23100 hsp = tcp_hsp_hash[hash]; 23101 23102 /* Try to find pre-existing hsp on hash chain */ 23103 /* Doesn't handle CIDR prefixes. */ 23104 while (hsp) { 23105 if (IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, &v6addr)) 23106 break; 23107 hsp = hsp->tcp_hsp_next; 23108 } 23109 23110 /* 23111 * If we didn't, create one with default values and put it 23112 * at head of hash chain 23113 */ 23114 23115 if (!hsp) { 23116 hsp = (tcp_hsp_t *)mi_zalloc(sizeof (tcp_hsp_t)); 23117 if (!hsp) { 23118 error = EINVAL; 23119 goto done; 23120 } 23121 hsp->tcp_hsp_next = tcp_hsp_hash[hash]; 23122 tcp_hsp_hash[hash] = hsp; 23123 } 23124 23125 /* Set values that the user asked us to change */ 23126 23127 hsp->tcp_hsp_addr_v6 = v6addr; 23128 if (IN6_IS_ADDR_V4MAPPED(&v6addr)) 23129 hsp->tcp_hsp_vers = IPV4_VERSION; 23130 else 23131 hsp->tcp_hsp_vers = IPV6_VERSION; 23132 hsp->tcp_hsp_subnet_v6 = v6mask; 23133 if (sendspace > 0) 23134 hsp->tcp_hsp_sendspace = sendspace; 23135 if (recvspace > 0) 23136 hsp->tcp_hsp_recvspace = recvspace; 23137 if (timestamp > 0) 23138 hsp->tcp_hsp_tstamp = timestamp - 1; 23139 } 23140 23141 done: 23142 rw_exit(&tcp_hsp_lock); 23143 return (error); 23144 } 23145 23146 /* Set callback routine passed to nd_load by tcp_param_register. */ 23147 /* ARGSUSED */ 23148 static int 23149 tcp_host_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr) 23150 { 23151 return (tcp_host_param_setvalue(q, mp, value, cp, AF_INET)); 23152 } 23153 /* ARGSUSED */ 23154 static int 23155 tcp_host_param_set_ipv6(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 23156 cred_t *cr) 23157 { 23158 return (tcp_host_param_setvalue(q, mp, value, cp, AF_INET6)); 23159 } 23160 23161 /* TCP host parameters report triggered via the Named Dispatch mechanism. */ 23162 /* ARGSUSED */ 23163 static int 23164 tcp_host_param_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 23165 { 23166 tcp_hsp_t *hsp; 23167 int i; 23168 char addrbuf[INET6_ADDRSTRLEN], subnetbuf[INET6_ADDRSTRLEN]; 23169 23170 rw_enter(&tcp_hsp_lock, RW_READER); 23171 (void) mi_mpprintf(mp, 23172 "Hash HSP " MI_COL_HDRPAD_STR 23173 "Address Subnet Mask Send Receive TStamp"); 23174 if (tcp_hsp_hash) { 23175 for (i = 0; i < TCP_HSP_HASH_SIZE; i++) { 23176 hsp = tcp_hsp_hash[i]; 23177 while (hsp) { 23178 if (hsp->tcp_hsp_vers == IPV4_VERSION) { 23179 (void) inet_ntop(AF_INET, 23180 &hsp->tcp_hsp_addr, 23181 addrbuf, sizeof (addrbuf)); 23182 (void) inet_ntop(AF_INET, 23183 &hsp->tcp_hsp_subnet, 23184 subnetbuf, sizeof (subnetbuf)); 23185 } else { 23186 (void) inet_ntop(AF_INET6, 23187 &hsp->tcp_hsp_addr_v6, 23188 addrbuf, sizeof (addrbuf)); 23189 (void) inet_ntop(AF_INET6, 23190 &hsp->tcp_hsp_subnet_v6, 23191 subnetbuf, sizeof (subnetbuf)); 23192 } 23193 (void) mi_mpprintf(mp, 23194 " %03d " MI_COL_PTRFMT_STR 23195 "%s %s %010d %010d %d", 23196 i, 23197 (void *)hsp, 23198 addrbuf, 23199 subnetbuf, 23200 hsp->tcp_hsp_sendspace, 23201 hsp->tcp_hsp_recvspace, 23202 hsp->tcp_hsp_tstamp); 23203 23204 hsp = hsp->tcp_hsp_next; 23205 } 23206 } 23207 } 23208 rw_exit(&tcp_hsp_lock); 23209 return (0); 23210 } 23211 23212 23213 /* Data for fast netmask macro used by tcp_hsp_lookup */ 23214 23215 static ipaddr_t netmasks[] = { 23216 IN_CLASSA_NET, IN_CLASSA_NET, IN_CLASSB_NET, 23217 IN_CLASSC_NET | IN_CLASSD_NET /* Class C,D,E */ 23218 }; 23219 23220 #define netmask(addr) (netmasks[(ipaddr_t)(addr) >> 30]) 23221 23222 /* 23223 * XXX This routine should go away and instead we should use the metrics 23224 * associated with the routes to determine the default sndspace and rcvspace. 23225 */ 23226 static tcp_hsp_t * 23227 tcp_hsp_lookup(ipaddr_t addr) 23228 { 23229 tcp_hsp_t *hsp = NULL; 23230 23231 /* Quick check without acquiring the lock. */ 23232 if (tcp_hsp_hash == NULL) 23233 return (NULL); 23234 23235 rw_enter(&tcp_hsp_lock, RW_READER); 23236 23237 /* This routine finds the best-matching HSP for address addr. */ 23238 23239 if (tcp_hsp_hash) { 23240 int i; 23241 ipaddr_t srchaddr; 23242 tcp_hsp_t *hsp_net; 23243 23244 /* We do three passes: host, network, and subnet. */ 23245 23246 srchaddr = addr; 23247 23248 for (i = 1; i <= 3; i++) { 23249 /* Look for exact match on srchaddr */ 23250 23251 hsp = tcp_hsp_hash[TCP_HSP_HASH(srchaddr)]; 23252 while (hsp) { 23253 if (hsp->tcp_hsp_vers == IPV4_VERSION && 23254 hsp->tcp_hsp_addr == srchaddr) 23255 break; 23256 hsp = hsp->tcp_hsp_next; 23257 } 23258 ASSERT(hsp == NULL || 23259 hsp->tcp_hsp_vers == IPV4_VERSION); 23260 23261 /* 23262 * If this is the first pass: 23263 * If we found a match, great, return it. 23264 * If not, search for the network on the second pass. 23265 */ 23266 23267 if (i == 1) 23268 if (hsp) 23269 break; 23270 else 23271 { 23272 srchaddr = addr & netmask(addr); 23273 continue; 23274 } 23275 23276 /* 23277 * If this is the second pass: 23278 * If we found a match, but there's a subnet mask, 23279 * save the match but try again using the subnet 23280 * mask on the third pass. 23281 * Otherwise, return whatever we found. 23282 */ 23283 23284 if (i == 2) { 23285 if (hsp && hsp->tcp_hsp_subnet) { 23286 hsp_net = hsp; 23287 srchaddr = addr & hsp->tcp_hsp_subnet; 23288 continue; 23289 } else { 23290 break; 23291 } 23292 } 23293 23294 /* 23295 * This must be the third pass. If we didn't find 23296 * anything, return the saved network HSP instead. 23297 */ 23298 23299 if (!hsp) 23300 hsp = hsp_net; 23301 } 23302 } 23303 23304 rw_exit(&tcp_hsp_lock); 23305 return (hsp); 23306 } 23307 23308 /* 23309 * XXX Equally broken as the IPv4 routine. Doesn't handle longest 23310 * match lookup. 23311 */ 23312 static tcp_hsp_t * 23313 tcp_hsp_lookup_ipv6(in6_addr_t *v6addr) 23314 { 23315 tcp_hsp_t *hsp = NULL; 23316 23317 /* Quick check without acquiring the lock. */ 23318 if (tcp_hsp_hash == NULL) 23319 return (NULL); 23320 23321 rw_enter(&tcp_hsp_lock, RW_READER); 23322 23323 /* This routine finds the best-matching HSP for address addr. */ 23324 23325 if (tcp_hsp_hash) { 23326 int i; 23327 in6_addr_t v6srchaddr; 23328 tcp_hsp_t *hsp_net; 23329 23330 /* We do three passes: host, network, and subnet. */ 23331 23332 v6srchaddr = *v6addr; 23333 23334 for (i = 1; i <= 3; i++) { 23335 /* Look for exact match on srchaddr */ 23336 23337 hsp = tcp_hsp_hash[TCP_HSP_HASH( 23338 V4_PART_OF_V6(v6srchaddr))]; 23339 while (hsp) { 23340 if (hsp->tcp_hsp_vers == IPV6_VERSION && 23341 IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, 23342 &v6srchaddr)) 23343 break; 23344 hsp = hsp->tcp_hsp_next; 23345 } 23346 23347 /* 23348 * If this is the first pass: 23349 * If we found a match, great, return it. 23350 * If not, search for the network on the second pass. 23351 */ 23352 23353 if (i == 1) 23354 if (hsp) 23355 break; 23356 else { 23357 /* Assume a 64 bit mask */ 23358 v6srchaddr.s6_addr32[0] = 23359 v6addr->s6_addr32[0]; 23360 v6srchaddr.s6_addr32[1] = 23361 v6addr->s6_addr32[1]; 23362 v6srchaddr.s6_addr32[2] = 0; 23363 v6srchaddr.s6_addr32[3] = 0; 23364 continue; 23365 } 23366 23367 /* 23368 * If this is the second pass: 23369 * If we found a match, but there's a subnet mask, 23370 * save the match but try again using the subnet 23371 * mask on the third pass. 23372 * Otherwise, return whatever we found. 23373 */ 23374 23375 if (i == 2) { 23376 ASSERT(hsp == NULL || 23377 hsp->tcp_hsp_vers == IPV6_VERSION); 23378 if (hsp && 23379 !IN6_IS_ADDR_UNSPECIFIED( 23380 &hsp->tcp_hsp_subnet_v6)) { 23381 hsp_net = hsp; 23382 V6_MASK_COPY(*v6addr, 23383 hsp->tcp_hsp_subnet_v6, v6srchaddr); 23384 continue; 23385 } else { 23386 break; 23387 } 23388 } 23389 23390 /* 23391 * This must be the third pass. If we didn't find 23392 * anything, return the saved network HSP instead. 23393 */ 23394 23395 if (!hsp) 23396 hsp = hsp_net; 23397 } 23398 } 23399 23400 rw_exit(&tcp_hsp_lock); 23401 return (hsp); 23402 } 23403 23404 /* 23405 * Type three generator adapted from the random() function in 4.4 BSD: 23406 */ 23407 23408 /* 23409 * Copyright (c) 1983, 1993 23410 * The Regents of the University of California. All rights reserved. 23411 * 23412 * Redistribution and use in source and binary forms, with or without 23413 * modification, are permitted provided that the following conditions 23414 * are met: 23415 * 1. Redistributions of source code must retain the above copyright 23416 * notice, this list of conditions and the following disclaimer. 23417 * 2. Redistributions in binary form must reproduce the above copyright 23418 * notice, this list of conditions and the following disclaimer in the 23419 * documentation and/or other materials provided with the distribution. 23420 * 3. All advertising materials mentioning features or use of this software 23421 * must display the following acknowledgement: 23422 * This product includes software developed by the University of 23423 * California, Berkeley and its contributors. 23424 * 4. Neither the name of the University nor the names of its contributors 23425 * may be used to endorse or promote products derived from this software 23426 * without specific prior written permission. 23427 * 23428 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23429 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23430 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23431 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23432 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23433 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23434 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23435 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23436 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23437 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23438 * SUCH DAMAGE. 23439 */ 23440 23441 /* Type 3 -- x**31 + x**3 + 1 */ 23442 #define DEG_3 31 23443 #define SEP_3 3 23444 23445 23446 /* Protected by tcp_random_lock */ 23447 static int tcp_randtbl[DEG_3 + 1]; 23448 23449 static int *tcp_random_fptr = &tcp_randtbl[SEP_3 + 1]; 23450 static int *tcp_random_rptr = &tcp_randtbl[1]; 23451 23452 static int *tcp_random_state = &tcp_randtbl[1]; 23453 static int *tcp_random_end_ptr = &tcp_randtbl[DEG_3 + 1]; 23454 23455 kmutex_t tcp_random_lock; 23456 23457 void 23458 tcp_random_init(void) 23459 { 23460 int i; 23461 hrtime_t hrt; 23462 time_t wallclock; 23463 uint64_t result; 23464 23465 /* 23466 * Use high-res timer and current time for seed. Gethrtime() returns 23467 * a longlong, which may contain resolution down to nanoseconds. 23468 * The current time will either be a 32-bit or a 64-bit quantity. 23469 * XOR the two together in a 64-bit result variable. 23470 * Convert the result to a 32-bit value by multiplying the high-order 23471 * 32-bits by the low-order 32-bits. 23472 */ 23473 23474 hrt = gethrtime(); 23475 (void) drv_getparm(TIME, &wallclock); 23476 result = (uint64_t)wallclock ^ (uint64_t)hrt; 23477 mutex_enter(&tcp_random_lock); 23478 tcp_random_state[0] = ((result >> 32) & 0xffffffff) * 23479 (result & 0xffffffff); 23480 23481 for (i = 1; i < DEG_3; i++) 23482 tcp_random_state[i] = 1103515245 * tcp_random_state[i - 1] 23483 + 12345; 23484 tcp_random_fptr = &tcp_random_state[SEP_3]; 23485 tcp_random_rptr = &tcp_random_state[0]; 23486 mutex_exit(&tcp_random_lock); 23487 for (i = 0; i < 10 * DEG_3; i++) 23488 (void) tcp_random(); 23489 } 23490 23491 /* 23492 * tcp_random: Return a random number in the range [1 - (128K + 1)]. 23493 * This range is selected to be approximately centered on TCP_ISS / 2, 23494 * and easy to compute. We get this value by generating a 32-bit random 23495 * number, selecting out the high-order 17 bits, and then adding one so 23496 * that we never return zero. 23497 */ 23498 int 23499 tcp_random(void) 23500 { 23501 int i; 23502 23503 mutex_enter(&tcp_random_lock); 23504 *tcp_random_fptr += *tcp_random_rptr; 23505 23506 /* 23507 * The high-order bits are more random than the low-order bits, 23508 * so we select out the high-order 17 bits and add one so that 23509 * we never return zero. 23510 */ 23511 i = ((*tcp_random_fptr >> 15) & 0x1ffff) + 1; 23512 if (++tcp_random_fptr >= tcp_random_end_ptr) { 23513 tcp_random_fptr = tcp_random_state; 23514 ++tcp_random_rptr; 23515 } else if (++tcp_random_rptr >= tcp_random_end_ptr) 23516 tcp_random_rptr = tcp_random_state; 23517 23518 mutex_exit(&tcp_random_lock); 23519 return (i); 23520 } 23521 23522 /* 23523 * XXX This will go away when TPI is extended to send 23524 * info reqs to sockfs/timod ..... 23525 * Given a queue, set the max packet size for the write 23526 * side of the queue below stream head. This value is 23527 * cached on the stream head. 23528 * Returns 1 on success, 0 otherwise. 23529 */ 23530 static int 23531 setmaxps(queue_t *q, int maxpsz) 23532 { 23533 struct stdata *stp; 23534 queue_t *wq; 23535 stp = STREAM(q); 23536 23537 /* 23538 * At this point change of a queue parameter is not allowed 23539 * when a multiplexor is sitting on top. 23540 */ 23541 if (stp->sd_flag & STPLEX) 23542 return (0); 23543 23544 claimstr(stp->sd_wrq); 23545 wq = stp->sd_wrq->q_next; 23546 ASSERT(wq != NULL); 23547 (void) strqset(wq, QMAXPSZ, 0, maxpsz); 23548 releasestr(stp->sd_wrq); 23549 return (1); 23550 } 23551 23552 static int 23553 tcp_conprim_opt_process(tcp_t *tcp, mblk_t *mp, int *do_disconnectp, 23554 int *t_errorp, int *sys_errorp) 23555 { 23556 int error; 23557 int is_absreq_failure; 23558 t_scalar_t *opt_lenp; 23559 t_scalar_t opt_offset; 23560 int prim_type; 23561 struct T_conn_req *tcreqp; 23562 struct T_conn_res *tcresp; 23563 cred_t *cr; 23564 23565 cr = DB_CREDDEF(mp, tcp->tcp_cred); 23566 23567 prim_type = ((union T_primitives *)mp->b_rptr)->type; 23568 ASSERT(prim_type == T_CONN_REQ || prim_type == O_T_CONN_RES || 23569 prim_type == T_CONN_RES); 23570 23571 switch (prim_type) { 23572 case T_CONN_REQ: 23573 tcreqp = (struct T_conn_req *)mp->b_rptr; 23574 opt_offset = tcreqp->OPT_offset; 23575 opt_lenp = (t_scalar_t *)&tcreqp->OPT_length; 23576 break; 23577 case O_T_CONN_RES: 23578 case T_CONN_RES: 23579 tcresp = (struct T_conn_res *)mp->b_rptr; 23580 opt_offset = tcresp->OPT_offset; 23581 opt_lenp = (t_scalar_t *)&tcresp->OPT_length; 23582 break; 23583 } 23584 23585 *t_errorp = 0; 23586 *sys_errorp = 0; 23587 *do_disconnectp = 0; 23588 23589 error = tpi_optcom_buf(tcp->tcp_wq, mp, opt_lenp, 23590 opt_offset, cr, &tcp_opt_obj, 23591 NULL, &is_absreq_failure); 23592 23593 switch (error) { 23594 case 0: /* no error */ 23595 ASSERT(is_absreq_failure == 0); 23596 return (0); 23597 case ENOPROTOOPT: 23598 *t_errorp = TBADOPT; 23599 break; 23600 case EACCES: 23601 *t_errorp = TACCES; 23602 break; 23603 default: 23604 *t_errorp = TSYSERR; *sys_errorp = error; 23605 break; 23606 } 23607 if (is_absreq_failure != 0) { 23608 /* 23609 * The connection request should get the local ack 23610 * T_OK_ACK and then a T_DISCON_IND. 23611 */ 23612 *do_disconnectp = 1; 23613 } 23614 return (-1); 23615 } 23616 23617 /* 23618 * Split this function out so that if the secret changes, I'm okay. 23619 * 23620 * Initialize the tcp_iss_cookie and tcp_iss_key. 23621 */ 23622 23623 #define PASSWD_SIZE 16 /* MUST be multiple of 4 */ 23624 23625 static void 23626 tcp_iss_key_init(uint8_t *phrase, int len) 23627 { 23628 struct { 23629 int32_t current_time; 23630 uint32_t randnum; 23631 uint16_t pad; 23632 uint8_t ether[6]; 23633 uint8_t passwd[PASSWD_SIZE]; 23634 } tcp_iss_cookie; 23635 time_t t; 23636 23637 /* 23638 * Start with the current absolute time. 23639 */ 23640 (void) drv_getparm(TIME, &t); 23641 tcp_iss_cookie.current_time = t; 23642 23643 /* 23644 * XXX - Need a more random number per RFC 1750, not this crap. 23645 * OTOH, if what follows is pretty random, then I'm in better shape. 23646 */ 23647 tcp_iss_cookie.randnum = (uint32_t)(gethrtime() + tcp_random()); 23648 tcp_iss_cookie.pad = 0x365c; /* Picked from HMAC pad values. */ 23649 23650 /* 23651 * The cpu_type_info is pretty non-random. Ugggh. It does serve 23652 * as a good template. 23653 */ 23654 bcopy(&cpu_list->cpu_type_info, &tcp_iss_cookie.passwd, 23655 min(PASSWD_SIZE, sizeof (cpu_list->cpu_type_info))); 23656 23657 /* 23658 * The pass-phrase. Normally this is supplied by user-called NDD. 23659 */ 23660 bcopy(phrase, &tcp_iss_cookie.passwd, min(PASSWD_SIZE, len)); 23661 23662 /* 23663 * See 4010593 if this section becomes a problem again, 23664 * but the local ethernet address is useful here. 23665 */ 23666 (void) localetheraddr(NULL, 23667 (struct ether_addr *)&tcp_iss_cookie.ether); 23668 23669 /* 23670 * Hash 'em all together. The MD5Final is called per-connection. 23671 */ 23672 mutex_enter(&tcp_iss_key_lock); 23673 MD5Init(&tcp_iss_key); 23674 MD5Update(&tcp_iss_key, (uchar_t *)&tcp_iss_cookie, 23675 sizeof (tcp_iss_cookie)); 23676 mutex_exit(&tcp_iss_key_lock); 23677 } 23678 23679 /* 23680 * Set the RFC 1948 pass phrase 23681 */ 23682 /* ARGSUSED */ 23683 static int 23684 tcp_1948_phrase_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 23685 cred_t *cr) 23686 { 23687 /* 23688 * Basically, value contains a new pass phrase. Pass it along! 23689 */ 23690 tcp_iss_key_init((uint8_t *)value, strlen(value)); 23691 return (0); 23692 } 23693 23694 /* ARGSUSED */ 23695 static int 23696 tcp_sack_info_constructor(void *buf, void *cdrarg, int kmflags) 23697 { 23698 bzero(buf, sizeof (tcp_sack_info_t)); 23699 return (0); 23700 } 23701 23702 /* ARGSUSED */ 23703 static int 23704 tcp_iphc_constructor(void *buf, void *cdrarg, int kmflags) 23705 { 23706 bzero(buf, TCP_MAX_COMBINED_HEADER_LENGTH); 23707 return (0); 23708 } 23709 23710 void 23711 tcp_ddi_init(void) 23712 { 23713 int i; 23714 23715 /* Initialize locks */ 23716 rw_init(&tcp_hsp_lock, NULL, RW_DEFAULT, NULL); 23717 mutex_init(&tcp_g_q_lock, NULL, MUTEX_DEFAULT, NULL); 23718 mutex_init(&tcp_random_lock, NULL, MUTEX_DEFAULT, NULL); 23719 mutex_init(&tcp_iss_key_lock, NULL, MUTEX_DEFAULT, NULL); 23720 mutex_init(&tcp_epriv_port_lock, NULL, MUTEX_DEFAULT, NULL); 23721 rw_init(&tcp_reserved_port_lock, NULL, RW_DEFAULT, NULL); 23722 23723 for (i = 0; i < A_CNT(tcp_bind_fanout); i++) { 23724 mutex_init(&tcp_bind_fanout[i].tf_lock, NULL, 23725 MUTEX_DEFAULT, NULL); 23726 } 23727 23728 for (i = 0; i < A_CNT(tcp_acceptor_fanout); i++) { 23729 mutex_init(&tcp_acceptor_fanout[i].tf_lock, NULL, 23730 MUTEX_DEFAULT, NULL); 23731 } 23732 23733 /* TCP's IPsec code calls the packet dropper. */ 23734 ip_drop_register(&tcp_dropper, "TCP IPsec policy enforcement"); 23735 23736 if (!tcp_g_nd) { 23737 if (!tcp_param_register(tcp_param_arr, A_CNT(tcp_param_arr))) { 23738 nd_free(&tcp_g_nd); 23739 } 23740 } 23741 23742 /* 23743 * Note: To really walk the device tree you need the devinfo 23744 * pointer to your device which is only available after probe/attach. 23745 * The following is safe only because it uses ddi_root_node() 23746 */ 23747 tcp_max_optsize = optcom_max_optsize(tcp_opt_obj.odb_opt_des_arr, 23748 tcp_opt_obj.odb_opt_arr_cnt); 23749 23750 tcp_timercache = kmem_cache_create("tcp_timercache", 23751 sizeof (tcp_timer_t) + sizeof (mblk_t), 0, 23752 NULL, NULL, NULL, NULL, NULL, 0); 23753 23754 tcp_sack_info_cache = kmem_cache_create("tcp_sack_info_cache", 23755 sizeof (tcp_sack_info_t), 0, 23756 tcp_sack_info_constructor, NULL, NULL, NULL, NULL, 0); 23757 23758 tcp_iphc_cache = kmem_cache_create("tcp_iphc_cache", 23759 TCP_MAX_COMBINED_HEADER_LENGTH, 0, 23760 tcp_iphc_constructor, NULL, NULL, NULL, NULL, 0); 23761 23762 tcp_squeue_wput_proc = tcp_squeue_switch(tcp_squeue_wput); 23763 tcp_squeue_close_proc = tcp_squeue_switch(tcp_squeue_close); 23764 23765 ip_squeue_init(tcp_squeue_add); 23766 23767 /* Initialize the random number generator */ 23768 tcp_random_init(); 23769 23770 /* 23771 * Initialize RFC 1948 secret values. This will probably be reset once 23772 * by the boot scripts. 23773 * 23774 * Use NULL name, as the name is caught by the new lockstats. 23775 * 23776 * Initialize with some random, non-guessable string, like the global 23777 * T_INFO_ACK. 23778 */ 23779 23780 tcp_iss_key_init((uint8_t *)&tcp_g_t_info_ack, 23781 sizeof (tcp_g_t_info_ack)); 23782 23783 if ((tcp_kstat = kstat_create(TCP_MOD_NAME, 0, "tcpstat", 23784 "net", KSTAT_TYPE_NAMED, 23785 sizeof (tcp_statistics) / sizeof (kstat_named_t), 23786 KSTAT_FLAG_VIRTUAL)) != NULL) { 23787 tcp_kstat->ks_data = &tcp_statistics; 23788 kstat_install(tcp_kstat); 23789 } 23790 23791 tcp_kstat_init(); 23792 } 23793 23794 void 23795 tcp_ddi_destroy(void) 23796 { 23797 int i; 23798 23799 nd_free(&tcp_g_nd); 23800 23801 for (i = 0; i < A_CNT(tcp_bind_fanout); i++) { 23802 mutex_destroy(&tcp_bind_fanout[i].tf_lock); 23803 } 23804 23805 for (i = 0; i < A_CNT(tcp_acceptor_fanout); i++) { 23806 mutex_destroy(&tcp_acceptor_fanout[i].tf_lock); 23807 } 23808 23809 mutex_destroy(&tcp_iss_key_lock); 23810 rw_destroy(&tcp_hsp_lock); 23811 mutex_destroy(&tcp_g_q_lock); 23812 mutex_destroy(&tcp_random_lock); 23813 mutex_destroy(&tcp_epriv_port_lock); 23814 rw_destroy(&tcp_reserved_port_lock); 23815 23816 ip_drop_unregister(&tcp_dropper); 23817 23818 kmem_cache_destroy(tcp_timercache); 23819 kmem_cache_destroy(tcp_sack_info_cache); 23820 kmem_cache_destroy(tcp_iphc_cache); 23821 23822 tcp_kstat_fini(); 23823 } 23824 23825 /* 23826 * Generate ISS, taking into account NDD changes may happen halfway through. 23827 * (If the iss is not zero, set it.) 23828 */ 23829 23830 static void 23831 tcp_iss_init(tcp_t *tcp) 23832 { 23833 MD5_CTX context; 23834 struct { uint32_t ports; in6_addr_t src; in6_addr_t dst; } arg; 23835 uint32_t answer[4]; 23836 23837 tcp_iss_incr_extra += (ISS_INCR >> 1); 23838 tcp->tcp_iss = tcp_iss_incr_extra; 23839 switch (tcp_strong_iss) { 23840 case 2: 23841 mutex_enter(&tcp_iss_key_lock); 23842 context = tcp_iss_key; 23843 mutex_exit(&tcp_iss_key_lock); 23844 arg.ports = tcp->tcp_ports; 23845 if (tcp->tcp_ipversion == IPV4_VERSION) { 23846 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 23847 &arg.src); 23848 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_dst, 23849 &arg.dst); 23850 } else { 23851 arg.src = tcp->tcp_ip6h->ip6_src; 23852 arg.dst = tcp->tcp_ip6h->ip6_dst; 23853 } 23854 MD5Update(&context, (uchar_t *)&arg, sizeof (arg)); 23855 MD5Final((uchar_t *)answer, &context); 23856 tcp->tcp_iss += answer[0] ^ answer[1] ^ answer[2] ^ answer[3]; 23857 /* 23858 * Now that we've hashed into a unique per-connection sequence 23859 * space, add a random increment per strong_iss == 1. So I 23860 * guess we'll have to... 23861 */ 23862 /* FALLTHRU */ 23863 case 1: 23864 tcp->tcp_iss += (gethrtime() >> ISS_NSEC_SHT) + tcp_random(); 23865 break; 23866 default: 23867 tcp->tcp_iss += (uint32_t)gethrestime_sec() * ISS_INCR; 23868 break; 23869 } 23870 tcp->tcp_valid_bits = TCP_ISS_VALID; 23871 tcp->tcp_fss = tcp->tcp_iss - 1; 23872 tcp->tcp_suna = tcp->tcp_iss; 23873 tcp->tcp_snxt = tcp->tcp_iss + 1; 23874 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 23875 tcp->tcp_csuna = tcp->tcp_snxt; 23876 } 23877 23878 /* 23879 * Exported routine for extracting active tcp connection status. 23880 * 23881 * This is used by the Solaris Cluster Networking software to 23882 * gather a list of connections that need to be forwarded to 23883 * specific nodes in the cluster when configuration changes occur. 23884 * 23885 * The callback is invoked for each tcp_t structure. Returning 23886 * non-zero from the callback routine terminates the search. 23887 */ 23888 int 23889 cl_tcp_walk_list(int (*callback)(cl_tcp_info_t *, void *), void *arg) 23890 { 23891 tcp_t *tcp; 23892 cl_tcp_info_t cl_tcpi; 23893 connf_t *connfp; 23894 conn_t *connp; 23895 int i; 23896 23897 ASSERT(callback != NULL); 23898 23899 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 23900 23901 connfp = &ipcl_globalhash_fanout[i]; 23902 connp = NULL; 23903 23904 while ((connp = 23905 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 23906 23907 tcp = connp->conn_tcp; 23908 cl_tcpi.cl_tcpi_version = CL_TCPI_V1; 23909 cl_tcpi.cl_tcpi_ipversion = tcp->tcp_ipversion; 23910 cl_tcpi.cl_tcpi_state = tcp->tcp_state; 23911 cl_tcpi.cl_tcpi_lport = tcp->tcp_lport; 23912 cl_tcpi.cl_tcpi_fport = tcp->tcp_fport; 23913 /* 23914 * The macros tcp_laddr and tcp_faddr give the IPv4 23915 * addresses. They are copied implicitly below as 23916 * mapped addresses. 23917 */ 23918 cl_tcpi.cl_tcpi_laddr_v6 = tcp->tcp_ip_src_v6; 23919 if (tcp->tcp_ipversion == IPV4_VERSION) { 23920 cl_tcpi.cl_tcpi_faddr = 23921 tcp->tcp_ipha->ipha_dst; 23922 } else { 23923 cl_tcpi.cl_tcpi_faddr_v6 = 23924 tcp->tcp_ip6h->ip6_dst; 23925 } 23926 23927 /* 23928 * If the callback returns non-zero 23929 * we terminate the traversal. 23930 */ 23931 if ((*callback)(&cl_tcpi, arg) != 0) { 23932 CONN_DEC_REF(tcp->tcp_connp); 23933 return (1); 23934 } 23935 } 23936 } 23937 23938 return (0); 23939 } 23940 23941 /* 23942 * Macros used for accessing the different types of sockaddr 23943 * structures inside a tcp_ioc_abort_conn_t. 23944 */ 23945 #define TCP_AC_V4LADDR(acp) ((sin_t *)&(acp)->ac_local) 23946 #define TCP_AC_V4RADDR(acp) ((sin_t *)&(acp)->ac_remote) 23947 #define TCP_AC_V4LOCAL(acp) (TCP_AC_V4LADDR(acp)->sin_addr.s_addr) 23948 #define TCP_AC_V4REMOTE(acp) (TCP_AC_V4RADDR(acp)->sin_addr.s_addr) 23949 #define TCP_AC_V4LPORT(acp) (TCP_AC_V4LADDR(acp)->sin_port) 23950 #define TCP_AC_V4RPORT(acp) (TCP_AC_V4RADDR(acp)->sin_port) 23951 #define TCP_AC_V6LADDR(acp) ((sin6_t *)&(acp)->ac_local) 23952 #define TCP_AC_V6RADDR(acp) ((sin6_t *)&(acp)->ac_remote) 23953 #define TCP_AC_V6LOCAL(acp) (TCP_AC_V6LADDR(acp)->sin6_addr) 23954 #define TCP_AC_V6REMOTE(acp) (TCP_AC_V6RADDR(acp)->sin6_addr) 23955 #define TCP_AC_V6LPORT(acp) (TCP_AC_V6LADDR(acp)->sin6_port) 23956 #define TCP_AC_V6RPORT(acp) (TCP_AC_V6RADDR(acp)->sin6_port) 23957 23958 /* 23959 * Return the correct error code to mimic the behavior 23960 * of a connection reset. 23961 */ 23962 #define TCP_AC_GET_ERRCODE(state, err) { \ 23963 switch ((state)) { \ 23964 case TCPS_SYN_SENT: \ 23965 case TCPS_SYN_RCVD: \ 23966 (err) = ECONNREFUSED; \ 23967 break; \ 23968 case TCPS_ESTABLISHED: \ 23969 case TCPS_FIN_WAIT_1: \ 23970 case TCPS_FIN_WAIT_2: \ 23971 case TCPS_CLOSE_WAIT: \ 23972 (err) = ECONNRESET; \ 23973 break; \ 23974 case TCPS_CLOSING: \ 23975 case TCPS_LAST_ACK: \ 23976 case TCPS_TIME_WAIT: \ 23977 (err) = 0; \ 23978 break; \ 23979 default: \ 23980 (err) = ENXIO; \ 23981 } \ 23982 } 23983 23984 /* 23985 * Check if a tcp structure matches the info in acp. 23986 */ 23987 #define TCP_AC_ADDR_MATCH(acp, tcp) \ 23988 (((acp)->ac_local.ss_family == AF_INET) ? \ 23989 ((TCP_AC_V4LOCAL((acp)) == INADDR_ANY || \ 23990 TCP_AC_V4LOCAL((acp)) == (tcp)->tcp_ip_src) && \ 23991 (TCP_AC_V4REMOTE((acp)) == INADDR_ANY || \ 23992 TCP_AC_V4REMOTE((acp)) == (tcp)->tcp_remote) && \ 23993 (TCP_AC_V4LPORT((acp)) == 0 || \ 23994 TCP_AC_V4LPORT((acp)) == (tcp)->tcp_lport) && \ 23995 (TCP_AC_V4RPORT((acp)) == 0 || \ 23996 TCP_AC_V4RPORT((acp)) == (tcp)->tcp_fport) && \ 23997 (acp)->ac_start <= (tcp)->tcp_state && \ 23998 (acp)->ac_end >= (tcp)->tcp_state) : \ 23999 ((IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL((acp))) || \ 24000 IN6_ARE_ADDR_EQUAL(&TCP_AC_V6LOCAL((acp)), \ 24001 &(tcp)->tcp_ip_src_v6)) && \ 24002 (IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE((acp))) || \ 24003 IN6_ARE_ADDR_EQUAL(&TCP_AC_V6REMOTE((acp)), \ 24004 &(tcp)->tcp_remote_v6)) && \ 24005 (TCP_AC_V6LPORT((acp)) == 0 || \ 24006 TCP_AC_V6LPORT((acp)) == (tcp)->tcp_lport) && \ 24007 (TCP_AC_V6RPORT((acp)) == 0 || \ 24008 TCP_AC_V6RPORT((acp)) == (tcp)->tcp_fport) && \ 24009 (acp)->ac_start <= (tcp)->tcp_state && \ 24010 (acp)->ac_end >= (tcp)->tcp_state)) 24011 24012 #define TCP_AC_MATCH(acp, tcp) \ 24013 (((acp)->ac_zoneid == ALL_ZONES || \ 24014 (acp)->ac_zoneid == tcp->tcp_connp->conn_zoneid) ? \ 24015 TCP_AC_ADDR_MATCH(acp, tcp) : 0) 24016 24017 /* 24018 * Build a message containing a tcp_ioc_abort_conn_t structure 24019 * which is filled in with information from acp and tp. 24020 */ 24021 static mblk_t * 24022 tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *acp, tcp_t *tp) 24023 { 24024 mblk_t *mp; 24025 tcp_ioc_abort_conn_t *tacp; 24026 24027 mp = allocb(sizeof (uint32_t) + sizeof (*acp), BPRI_LO); 24028 if (mp == NULL) 24029 return (NULL); 24030 24031 mp->b_datap->db_type = M_CTL; 24032 24033 *((uint32_t *)mp->b_rptr) = TCP_IOC_ABORT_CONN; 24034 tacp = (tcp_ioc_abort_conn_t *)((uchar_t *)mp->b_rptr + 24035 sizeof (uint32_t)); 24036 24037 tacp->ac_start = acp->ac_start; 24038 tacp->ac_end = acp->ac_end; 24039 tacp->ac_zoneid = acp->ac_zoneid; 24040 24041 if (acp->ac_local.ss_family == AF_INET) { 24042 tacp->ac_local.ss_family = AF_INET; 24043 tacp->ac_remote.ss_family = AF_INET; 24044 TCP_AC_V4LOCAL(tacp) = tp->tcp_ip_src; 24045 TCP_AC_V4REMOTE(tacp) = tp->tcp_remote; 24046 TCP_AC_V4LPORT(tacp) = tp->tcp_lport; 24047 TCP_AC_V4RPORT(tacp) = tp->tcp_fport; 24048 } else { 24049 tacp->ac_local.ss_family = AF_INET6; 24050 tacp->ac_remote.ss_family = AF_INET6; 24051 TCP_AC_V6LOCAL(tacp) = tp->tcp_ip_src_v6; 24052 TCP_AC_V6REMOTE(tacp) = tp->tcp_remote_v6; 24053 TCP_AC_V6LPORT(tacp) = tp->tcp_lport; 24054 TCP_AC_V6RPORT(tacp) = tp->tcp_fport; 24055 } 24056 mp->b_wptr = (uchar_t *)mp->b_rptr + sizeof (uint32_t) + sizeof (*acp); 24057 return (mp); 24058 } 24059 24060 /* 24061 * Print a tcp_ioc_abort_conn_t structure. 24062 */ 24063 static void 24064 tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *acp) 24065 { 24066 char lbuf[128]; 24067 char rbuf[128]; 24068 sa_family_t af; 24069 in_port_t lport, rport; 24070 ushort_t logflags; 24071 24072 af = acp->ac_local.ss_family; 24073 24074 if (af == AF_INET) { 24075 (void) inet_ntop(af, (const void *)&TCP_AC_V4LOCAL(acp), 24076 lbuf, 128); 24077 (void) inet_ntop(af, (const void *)&TCP_AC_V4REMOTE(acp), 24078 rbuf, 128); 24079 lport = ntohs(TCP_AC_V4LPORT(acp)); 24080 rport = ntohs(TCP_AC_V4RPORT(acp)); 24081 } else { 24082 (void) inet_ntop(af, (const void *)&TCP_AC_V6LOCAL(acp), 24083 lbuf, 128); 24084 (void) inet_ntop(af, (const void *)&TCP_AC_V6REMOTE(acp), 24085 rbuf, 128); 24086 lport = ntohs(TCP_AC_V6LPORT(acp)); 24087 rport = ntohs(TCP_AC_V6RPORT(acp)); 24088 } 24089 24090 logflags = SL_TRACE | SL_NOTE; 24091 /* 24092 * Don't print this message to the console if the operation was done 24093 * to a non-global zone. 24094 */ 24095 if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES) 24096 logflags |= SL_CONSOLE; 24097 (void) strlog(TCP_MOD_ID, 0, 1, logflags, 24098 "TCP_IOC_ABORT_CONN: local = %s:%d, remote = %s:%d, " 24099 "start = %d, end = %d\n", lbuf, lport, rbuf, rport, 24100 acp->ac_start, acp->ac_end); 24101 } 24102 24103 /* 24104 * Called inside tcp_rput when a message built using 24105 * tcp_ioctl_abort_build_msg is put into a queue. 24106 * Note that when we get here there is no wildcard in acp any more. 24107 */ 24108 static void 24109 tcp_ioctl_abort_handler(tcp_t *tcp, mblk_t *mp) 24110 { 24111 tcp_ioc_abort_conn_t *acp; 24112 24113 acp = (tcp_ioc_abort_conn_t *)(mp->b_rptr + sizeof (uint32_t)); 24114 if (tcp->tcp_state <= acp->ac_end) { 24115 /* 24116 * If we get here, we are already on the correct 24117 * squeue. This ioctl follows the following path 24118 * tcp_wput -> tcp_wput_ioctl -> tcp_ioctl_abort_conn 24119 * ->tcp_ioctl_abort->squeue_fill (if on a 24120 * different squeue) 24121 */ 24122 int errcode; 24123 24124 TCP_AC_GET_ERRCODE(tcp->tcp_state, errcode); 24125 (void) tcp_clean_death(tcp, errcode, 26); 24126 } 24127 freemsg(mp); 24128 } 24129 24130 /* 24131 * Abort all matching connections on a hash chain. 24132 */ 24133 static int 24134 tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *acp, int index, int *count, 24135 boolean_t exact) 24136 { 24137 int nmatch, err = 0; 24138 tcp_t *tcp; 24139 MBLKP mp, last, listhead = NULL; 24140 conn_t *tconnp; 24141 connf_t *connfp = &ipcl_conn_fanout[index]; 24142 24143 startover: 24144 nmatch = 0; 24145 24146 mutex_enter(&connfp->connf_lock); 24147 for (tconnp = connfp->connf_head; tconnp != NULL; 24148 tconnp = tconnp->conn_next) { 24149 tcp = tconnp->conn_tcp; 24150 if (TCP_AC_MATCH(acp, tcp)) { 24151 CONN_INC_REF(tcp->tcp_connp); 24152 mp = tcp_ioctl_abort_build_msg(acp, tcp); 24153 if (mp == NULL) { 24154 err = ENOMEM; 24155 CONN_DEC_REF(tcp->tcp_connp); 24156 break; 24157 } 24158 mp->b_prev = (mblk_t *)tcp; 24159 24160 if (listhead == NULL) { 24161 listhead = mp; 24162 last = mp; 24163 } else { 24164 last->b_next = mp; 24165 last = mp; 24166 } 24167 nmatch++; 24168 if (exact) 24169 break; 24170 } 24171 24172 /* Avoid holding lock for too long. */ 24173 if (nmatch >= 500) 24174 break; 24175 } 24176 mutex_exit(&connfp->connf_lock); 24177 24178 /* Pass mp into the correct tcp */ 24179 while ((mp = listhead) != NULL) { 24180 listhead = listhead->b_next; 24181 tcp = (tcp_t *)mp->b_prev; 24182 mp->b_next = mp->b_prev = NULL; 24183 squeue_fill(tcp->tcp_connp->conn_sqp, mp, 24184 tcp_input, tcp->tcp_connp, SQTAG_TCP_ABORT_BUCKET); 24185 } 24186 24187 *count += nmatch; 24188 if (nmatch >= 500 && err == 0) 24189 goto startover; 24190 return (err); 24191 } 24192 24193 /* 24194 * Abort all connections that matches the attributes specified in acp. 24195 */ 24196 static int 24197 tcp_ioctl_abort(tcp_ioc_abort_conn_t *acp) 24198 { 24199 sa_family_t af; 24200 uint32_t ports; 24201 uint16_t *pports; 24202 int err = 0, count = 0; 24203 boolean_t exact = B_FALSE; /* set when there is no wildcard */ 24204 int index = -1; 24205 ushort_t logflags; 24206 24207 af = acp->ac_local.ss_family; 24208 24209 if (af == AF_INET) { 24210 if (TCP_AC_V4REMOTE(acp) != INADDR_ANY && 24211 TCP_AC_V4LPORT(acp) != 0 && TCP_AC_V4RPORT(acp) != 0) { 24212 pports = (uint16_t *)&ports; 24213 pports[1] = TCP_AC_V4LPORT(acp); 24214 pports[0] = TCP_AC_V4RPORT(acp); 24215 exact = (TCP_AC_V4LOCAL(acp) != INADDR_ANY); 24216 } 24217 } else { 24218 if (!IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE(acp)) && 24219 TCP_AC_V6LPORT(acp) != 0 && TCP_AC_V6RPORT(acp) != 0) { 24220 pports = (uint16_t *)&ports; 24221 pports[1] = TCP_AC_V6LPORT(acp); 24222 pports[0] = TCP_AC_V6RPORT(acp); 24223 exact = !IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL(acp)); 24224 } 24225 } 24226 24227 /* 24228 * For cases where remote addr, local port, and remote port are non- 24229 * wildcards, tcp_ioctl_abort_bucket will only be called once. 24230 */ 24231 if (index != -1) { 24232 err = tcp_ioctl_abort_bucket(acp, index, 24233 &count, exact); 24234 } else { 24235 /* 24236 * loop through all entries for wildcard case 24237 */ 24238 for (index = 0; index < ipcl_conn_fanout_size; index++) { 24239 err = tcp_ioctl_abort_bucket(acp, index, 24240 &count, exact); 24241 if (err != 0) 24242 break; 24243 } 24244 } 24245 24246 logflags = SL_TRACE | SL_NOTE; 24247 /* 24248 * Don't print this message to the console if the operation was done 24249 * to a non-global zone. 24250 */ 24251 if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES) 24252 logflags |= SL_CONSOLE; 24253 (void) strlog(TCP_MOD_ID, 0, 1, logflags, "TCP_IOC_ABORT_CONN: " 24254 "aborted %d connection%c\n", count, ((count > 1) ? 's' : ' ')); 24255 if (err == 0 && count == 0) 24256 err = ENOENT; 24257 return (err); 24258 } 24259 24260 /* 24261 * Process the TCP_IOC_ABORT_CONN ioctl request. 24262 */ 24263 static void 24264 tcp_ioctl_abort_conn(queue_t *q, mblk_t *mp) 24265 { 24266 int err; 24267 IOCP iocp; 24268 MBLKP mp1; 24269 sa_family_t laf, raf; 24270 tcp_ioc_abort_conn_t *acp; 24271 zone_t *zptr; 24272 zoneid_t zoneid = Q_TO_CONN(q)->conn_zoneid; 24273 24274 iocp = (IOCP)mp->b_rptr; 24275 24276 if ((mp1 = mp->b_cont) == NULL || 24277 iocp->ioc_count != sizeof (tcp_ioc_abort_conn_t)) { 24278 err = EINVAL; 24279 goto out; 24280 } 24281 24282 /* check permissions */ 24283 if (secpolicy_net_config(iocp->ioc_cr, B_FALSE) != 0) { 24284 err = EPERM; 24285 goto out; 24286 } 24287 24288 if (mp1->b_cont != NULL) { 24289 freemsg(mp1->b_cont); 24290 mp1->b_cont = NULL; 24291 } 24292 24293 acp = (tcp_ioc_abort_conn_t *)mp1->b_rptr; 24294 laf = acp->ac_local.ss_family; 24295 raf = acp->ac_remote.ss_family; 24296 24297 /* check that a zone with the supplied zoneid exists */ 24298 if (acp->ac_zoneid != GLOBAL_ZONEID && acp->ac_zoneid != ALL_ZONES) { 24299 zptr = zone_find_by_id(zoneid); 24300 if (zptr != NULL) { 24301 zone_rele(zptr); 24302 } else { 24303 err = EINVAL; 24304 goto out; 24305 } 24306 } 24307 24308 if (acp->ac_start < TCPS_SYN_SENT || acp->ac_end > TCPS_TIME_WAIT || 24309 acp->ac_start > acp->ac_end || laf != raf || 24310 (laf != AF_INET && laf != AF_INET6)) { 24311 err = EINVAL; 24312 goto out; 24313 } 24314 24315 tcp_ioctl_abort_dump(acp); 24316 err = tcp_ioctl_abort(acp); 24317 24318 out: 24319 if (mp1 != NULL) { 24320 freemsg(mp1); 24321 mp->b_cont = NULL; 24322 } 24323 24324 if (err != 0) 24325 miocnak(q, mp, 0, err); 24326 else 24327 miocack(q, mp, 0, 0); 24328 } 24329 24330 /* 24331 * tcp_time_wait_processing() handles processing of incoming packets when 24332 * the tcp is in the TIME_WAIT state. 24333 * A TIME_WAIT tcp that has an associated open TCP stream is never put 24334 * on the time wait list. 24335 */ 24336 void 24337 tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, uint32_t seg_seq, 24338 uint32_t seg_ack, int seg_len, tcph_t *tcph) 24339 { 24340 int32_t bytes_acked; 24341 int32_t gap; 24342 int32_t rgap; 24343 tcp_opt_t tcpopt; 24344 uint_t flags; 24345 uint32_t new_swnd = 0; 24346 conn_t *connp; 24347 24348 BUMP_LOCAL(tcp->tcp_ibsegs); 24349 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_RECV_PKT); 24350 24351 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 24352 new_swnd = BE16_TO_U16(tcph->th_win) << 24353 ((tcph->th_flags[0] & TH_SYN) ? 0 : tcp->tcp_snd_ws); 24354 if (tcp->tcp_snd_ts_ok) { 24355 if (!tcp_paws_check(tcp, tcph, &tcpopt)) { 24356 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 24357 tcp->tcp_rnxt, TH_ACK); 24358 goto done; 24359 } 24360 } 24361 gap = seg_seq - tcp->tcp_rnxt; 24362 rgap = tcp->tcp_rwnd - (gap + seg_len); 24363 if (gap < 0) { 24364 BUMP_MIB(&tcp_mib, tcpInDataDupSegs); 24365 UPDATE_MIB(&tcp_mib, tcpInDataDupBytes, 24366 (seg_len > -gap ? -gap : seg_len)); 24367 seg_len += gap; 24368 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 24369 if (flags & TH_RST) { 24370 goto done; 24371 } 24372 if ((flags & TH_FIN) && seg_len == -1) { 24373 /* 24374 * When TCP receives a duplicate FIN in 24375 * TIME_WAIT state, restart the 2 MSL timer. 24376 * See page 73 in RFC 793. Make sure this TCP 24377 * is already on the TIME_WAIT list. If not, 24378 * just restart the timer. 24379 */ 24380 if (TCP_IS_DETACHED(tcp)) { 24381 tcp_time_wait_remove(tcp, NULL); 24382 tcp_time_wait_append(tcp); 24383 TCP_DBGSTAT(tcp_rput_time_wait); 24384 } else { 24385 ASSERT(tcp != NULL); 24386 TCP_TIMER_RESTART(tcp, 24387 tcp_time_wait_interval); 24388 } 24389 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 24390 tcp->tcp_rnxt, TH_ACK); 24391 goto done; 24392 } 24393 flags |= TH_ACK_NEEDED; 24394 seg_len = 0; 24395 goto process_ack; 24396 } 24397 24398 /* Fix seg_seq, and chew the gap off the front. */ 24399 seg_seq = tcp->tcp_rnxt; 24400 } 24401 24402 if ((flags & TH_SYN) && gap > 0 && rgap < 0) { 24403 /* 24404 * Make sure that when we accept the connection, pick 24405 * an ISS greater than (tcp_snxt + ISS_INCR/2) for the 24406 * old connection. 24407 * 24408 * The next ISS generated is equal to tcp_iss_incr_extra 24409 * + ISS_INCR/2 + other components depending on the 24410 * value of tcp_strong_iss. We pre-calculate the new 24411 * ISS here and compare with tcp_snxt to determine if 24412 * we need to make adjustment to tcp_iss_incr_extra. 24413 * 24414 * The above calculation is ugly and is a 24415 * waste of CPU cycles... 24416 */ 24417 uint32_t new_iss = tcp_iss_incr_extra; 24418 int32_t adj; 24419 24420 switch (tcp_strong_iss) { 24421 case 2: { 24422 /* Add time and MD5 components. */ 24423 uint32_t answer[4]; 24424 struct { 24425 uint32_t ports; 24426 in6_addr_t src; 24427 in6_addr_t dst; 24428 } arg; 24429 MD5_CTX context; 24430 24431 mutex_enter(&tcp_iss_key_lock); 24432 context = tcp_iss_key; 24433 mutex_exit(&tcp_iss_key_lock); 24434 arg.ports = tcp->tcp_ports; 24435 /* We use MAPPED addresses in tcp_iss_init */ 24436 arg.src = tcp->tcp_ip_src_v6; 24437 if (tcp->tcp_ipversion == IPV4_VERSION) { 24438 IN6_IPADDR_TO_V4MAPPED( 24439 tcp->tcp_ipha->ipha_dst, 24440 &arg.dst); 24441 } else { 24442 arg.dst = 24443 tcp->tcp_ip6h->ip6_dst; 24444 } 24445 MD5Update(&context, (uchar_t *)&arg, 24446 sizeof (arg)); 24447 MD5Final((uchar_t *)answer, &context); 24448 answer[0] ^= answer[1] ^ answer[2] ^ answer[3]; 24449 new_iss += (gethrtime() >> ISS_NSEC_SHT) + answer[0]; 24450 break; 24451 } 24452 case 1: 24453 /* Add time component and min random (i.e. 1). */ 24454 new_iss += (gethrtime() >> ISS_NSEC_SHT) + 1; 24455 break; 24456 default: 24457 /* Add only time component. */ 24458 new_iss += (uint32_t)gethrestime_sec() * ISS_INCR; 24459 break; 24460 } 24461 if ((adj = (int32_t)(tcp->tcp_snxt - new_iss)) > 0) { 24462 /* 24463 * New ISS not guaranteed to be ISS_INCR/2 24464 * ahead of the current tcp_snxt, so add the 24465 * difference to tcp_iss_incr_extra. 24466 */ 24467 tcp_iss_incr_extra += adj; 24468 } 24469 /* 24470 * If tcp_clean_death() can not perform the task now, 24471 * drop the SYN packet and let the other side re-xmit. 24472 * Otherwise pass the SYN packet back in, since the 24473 * old tcp state has been cleaned up or freed. 24474 */ 24475 if (tcp_clean_death(tcp, 0, 27) == -1) 24476 goto done; 24477 /* 24478 * We will come back to tcp_rput_data 24479 * on the global queue. Packets destined 24480 * for the global queue will be checked 24481 * with global policy. But the policy for 24482 * this packet has already been checked as 24483 * this was destined for the detached 24484 * connection. We need to bypass policy 24485 * check this time by attaching a dummy 24486 * ipsec_in with ipsec_in_dont_check set. 24487 */ 24488 if ((connp = ipcl_classify(mp, tcp->tcp_connp->conn_zoneid)) != 24489 NULL) { 24490 TCP_STAT(tcp_time_wait_syn_success); 24491 tcp_reinput(connp, mp, tcp->tcp_connp->conn_sqp); 24492 return; 24493 } 24494 goto done; 24495 } 24496 24497 /* 24498 * rgap is the amount of stuff received out of window. A negative 24499 * value is the amount out of window. 24500 */ 24501 if (rgap < 0) { 24502 BUMP_MIB(&tcp_mib, tcpInDataPastWinSegs); 24503 UPDATE_MIB(&tcp_mib, tcpInDataPastWinBytes, -rgap); 24504 /* Fix seg_len and make sure there is something left. */ 24505 seg_len += rgap; 24506 if (seg_len <= 0) { 24507 if (flags & TH_RST) { 24508 goto done; 24509 } 24510 flags |= TH_ACK_NEEDED; 24511 seg_len = 0; 24512 goto process_ack; 24513 } 24514 } 24515 /* 24516 * Check whether we can update tcp_ts_recent. This test is 24517 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 24518 * Extensions for High Performance: An Update", Internet Draft. 24519 */ 24520 if (tcp->tcp_snd_ts_ok && 24521 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 24522 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 24523 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 24524 tcp->tcp_last_rcv_lbolt = lbolt64; 24525 } 24526 24527 if (seg_seq != tcp->tcp_rnxt && seg_len > 0) { 24528 /* Always ack out of order packets */ 24529 flags |= TH_ACK_NEEDED; 24530 seg_len = 0; 24531 } else if (seg_len > 0) { 24532 BUMP_MIB(&tcp_mib, tcpInClosed); 24533 BUMP_MIB(&tcp_mib, tcpInDataInorderSegs); 24534 UPDATE_MIB(&tcp_mib, tcpInDataInorderBytes, seg_len); 24535 } 24536 if (flags & TH_RST) { 24537 (void) tcp_clean_death(tcp, 0, 28); 24538 goto done; 24539 } 24540 if (flags & TH_SYN) { 24541 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 24542 TH_RST|TH_ACK); 24543 /* 24544 * Do not delete the TCP structure if it is in 24545 * TIME_WAIT state. Refer to RFC 1122, 4.2.2.13. 24546 */ 24547 goto done; 24548 } 24549 process_ack: 24550 if (flags & TH_ACK) { 24551 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 24552 if (bytes_acked <= 0) { 24553 if (bytes_acked == 0 && seg_len == 0 && 24554 new_swnd == tcp->tcp_swnd) 24555 BUMP_MIB(&tcp_mib, tcpInDupAck); 24556 } else { 24557 /* Acks something not sent */ 24558 flags |= TH_ACK_NEEDED; 24559 } 24560 } 24561 if (flags & TH_ACK_NEEDED) { 24562 /* 24563 * Time to send an ack for some reason. 24564 */ 24565 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 24566 tcp->tcp_rnxt, TH_ACK); 24567 } 24568 done: 24569 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 24570 DB_CKSUMSTART(mp) = 0; 24571 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 24572 TCP_STAT(tcp_time_wait_syn_fail); 24573 } 24574 freemsg(mp); 24575 } 24576 24577 /* 24578 * Allocate a T_SVR4_OPTMGMT_REQ. 24579 * The caller needs to increment tcp_drop_opt_ack_cnt when sending these so 24580 * that tcp_rput_other can drop the acks. 24581 */ 24582 static mblk_t * 24583 tcp_setsockopt_mp(int level, int cmd, char *opt, int optlen) 24584 { 24585 mblk_t *mp; 24586 struct T_optmgmt_req *tor; 24587 struct opthdr *oh; 24588 uint_t size; 24589 char *optptr; 24590 24591 size = sizeof (*tor) + sizeof (*oh) + optlen; 24592 mp = allocb(size, BPRI_MED); 24593 if (mp == NULL) 24594 return (NULL); 24595 24596 mp->b_wptr += size; 24597 mp->b_datap->db_type = M_PROTO; 24598 tor = (struct T_optmgmt_req *)mp->b_rptr; 24599 tor->PRIM_type = T_SVR4_OPTMGMT_REQ; 24600 tor->MGMT_flags = T_NEGOTIATE; 24601 tor->OPT_length = sizeof (*oh) + optlen; 24602 tor->OPT_offset = (t_scalar_t)sizeof (*tor); 24603 24604 oh = (struct opthdr *)&tor[1]; 24605 oh->level = level; 24606 oh->name = cmd; 24607 oh->len = optlen; 24608 if (optlen != 0) { 24609 optptr = (char *)&oh[1]; 24610 bcopy(opt, optptr, optlen); 24611 } 24612 return (mp); 24613 } 24614 24615 /* 24616 * TCP Timers Implementation. 24617 */ 24618 timeout_id_t 24619 tcp_timeout(conn_t *connp, void (*f)(void *), clock_t tim) 24620 { 24621 mblk_t *mp; 24622 tcp_timer_t *tcpt; 24623 tcp_t *tcp = connp->conn_tcp; 24624 24625 ASSERT(connp->conn_sqp != NULL); 24626 24627 TCP_DBGSTAT(tcp_timeout_calls); 24628 24629 if (tcp->tcp_timercache == NULL) { 24630 mp = tcp_timermp_alloc(KM_NOSLEEP | KM_PANIC); 24631 } else { 24632 TCP_DBGSTAT(tcp_timeout_cached_alloc); 24633 mp = tcp->tcp_timercache; 24634 tcp->tcp_timercache = mp->b_next; 24635 mp->b_next = NULL; 24636 ASSERT(mp->b_wptr == NULL); 24637 } 24638 24639 CONN_INC_REF(connp); 24640 tcpt = (tcp_timer_t *)mp->b_rptr; 24641 tcpt->connp = connp; 24642 tcpt->tcpt_proc = f; 24643 tcpt->tcpt_tid = timeout(tcp_timer_callback, mp, tim); 24644 return ((timeout_id_t)mp); 24645 } 24646 24647 static void 24648 tcp_timer_callback(void *arg) 24649 { 24650 mblk_t *mp = (mblk_t *)arg; 24651 tcp_timer_t *tcpt; 24652 conn_t *connp; 24653 24654 tcpt = (tcp_timer_t *)mp->b_rptr; 24655 connp = tcpt->connp; 24656 squeue_fill(connp->conn_sqp, mp, 24657 tcp_timer_handler, connp, SQTAG_TCP_TIMER); 24658 } 24659 24660 static void 24661 tcp_timer_handler(void *arg, mblk_t *mp, void *arg2) 24662 { 24663 tcp_timer_t *tcpt; 24664 conn_t *connp = (conn_t *)arg; 24665 tcp_t *tcp = connp->conn_tcp; 24666 24667 tcpt = (tcp_timer_t *)mp->b_rptr; 24668 ASSERT(connp == tcpt->connp); 24669 ASSERT((squeue_t *)arg2 == connp->conn_sqp); 24670 24671 /* 24672 * If the TCP has reached the closed state, don't proceed any 24673 * further. This TCP logically does not exist on the system. 24674 * tcpt_proc could for example access queues, that have already 24675 * been qprocoff'ed off. Also see comments at the start of tcp_input 24676 */ 24677 if (tcp->tcp_state != TCPS_CLOSED) { 24678 (*tcpt->tcpt_proc)(connp); 24679 } else { 24680 tcp->tcp_timer_tid = 0; 24681 } 24682 tcp_timer_free(connp->conn_tcp, mp); 24683 } 24684 24685 /* 24686 * There is potential race with untimeout and the handler firing at the same 24687 * time. The mblock may be freed by the handler while we are trying to use 24688 * it. But since both should execute on the same squeue, this race should not 24689 * occur. 24690 */ 24691 clock_t 24692 tcp_timeout_cancel(conn_t *connp, timeout_id_t id) 24693 { 24694 mblk_t *mp = (mblk_t *)id; 24695 tcp_timer_t *tcpt; 24696 clock_t delta; 24697 24698 TCP_DBGSTAT(tcp_timeout_cancel_reqs); 24699 24700 if (mp == NULL) 24701 return (-1); 24702 24703 tcpt = (tcp_timer_t *)mp->b_rptr; 24704 ASSERT(tcpt->connp == connp); 24705 24706 delta = untimeout(tcpt->tcpt_tid); 24707 24708 if (delta >= 0) { 24709 TCP_DBGSTAT(tcp_timeout_canceled); 24710 tcp_timer_free(connp->conn_tcp, mp); 24711 CONN_DEC_REF(connp); 24712 } 24713 24714 return (delta); 24715 } 24716 24717 /* 24718 * Allocate space for the timer event. The allocation looks like mblk, but it is 24719 * not a proper mblk. To avoid confusion we set b_wptr to NULL. 24720 * 24721 * Dealing with failures: If we can't allocate from the timer cache we try 24722 * allocating from dblock caches using allocb_tryhard(). In this case b_wptr 24723 * points to b_rptr. 24724 * If we can't allocate anything using allocb_tryhard(), we perform a last 24725 * attempt and use kmem_alloc_tryhard(). In this case we set b_wptr to -1 and 24726 * save the actual allocation size in b_datap. 24727 */ 24728 mblk_t * 24729 tcp_timermp_alloc(int kmflags) 24730 { 24731 mblk_t *mp = (mblk_t *)kmem_cache_alloc(tcp_timercache, 24732 kmflags & ~KM_PANIC); 24733 24734 if (mp != NULL) { 24735 mp->b_next = mp->b_prev = NULL; 24736 mp->b_rptr = (uchar_t *)(&mp[1]); 24737 mp->b_wptr = NULL; 24738 mp->b_datap = NULL; 24739 mp->b_queue = NULL; 24740 } else if (kmflags & KM_PANIC) { 24741 /* 24742 * Failed to allocate memory for the timer. Try allocating from 24743 * dblock caches. 24744 */ 24745 TCP_STAT(tcp_timermp_allocfail); 24746 mp = allocb_tryhard(sizeof (tcp_timer_t)); 24747 if (mp == NULL) { 24748 size_t size = 0; 24749 /* 24750 * Memory is really low. Try tryhard allocation. 24751 */ 24752 TCP_STAT(tcp_timermp_allocdblfail); 24753 mp = kmem_alloc_tryhard(sizeof (mblk_t) + 24754 sizeof (tcp_timer_t), &size, kmflags); 24755 mp->b_rptr = (uchar_t *)(&mp[1]); 24756 mp->b_next = mp->b_prev = NULL; 24757 mp->b_wptr = (uchar_t *)-1; 24758 mp->b_datap = (dblk_t *)size; 24759 mp->b_queue = NULL; 24760 } 24761 ASSERT(mp->b_wptr != NULL); 24762 } 24763 TCP_DBGSTAT(tcp_timermp_alloced); 24764 24765 return (mp); 24766 } 24767 24768 /* 24769 * Free per-tcp timer cache. 24770 * It can only contain entries from tcp_timercache. 24771 */ 24772 void 24773 tcp_timermp_free(tcp_t *tcp) 24774 { 24775 mblk_t *mp; 24776 24777 while ((mp = tcp->tcp_timercache) != NULL) { 24778 ASSERT(mp->b_wptr == NULL); 24779 tcp->tcp_timercache = tcp->tcp_timercache->b_next; 24780 kmem_cache_free(tcp_timercache, mp); 24781 } 24782 } 24783 24784 /* 24785 * Free timer event. Put it on the per-tcp timer cache if there is not too many 24786 * events there already (currently at most two events are cached). 24787 * If the event is not allocated from the timer cache, free it right away. 24788 */ 24789 static void 24790 tcp_timer_free(tcp_t *tcp, mblk_t *mp) 24791 { 24792 mblk_t *mp1 = tcp->tcp_timercache; 24793 24794 if (mp->b_wptr != NULL) { 24795 /* 24796 * This allocation is not from a timer cache, free it right 24797 * away. 24798 */ 24799 if (mp->b_wptr != (uchar_t *)-1) 24800 freeb(mp); 24801 else 24802 kmem_free(mp, (size_t)mp->b_datap); 24803 } else if (mp1 == NULL || mp1->b_next == NULL) { 24804 /* Cache this timer block for future allocations */ 24805 mp->b_rptr = (uchar_t *)(&mp[1]); 24806 mp->b_next = mp1; 24807 tcp->tcp_timercache = mp; 24808 } else { 24809 kmem_cache_free(tcp_timercache, mp); 24810 TCP_DBGSTAT(tcp_timermp_freed); 24811 } 24812 } 24813 24814 /* 24815 * End of TCP Timers implementation. 24816 */ 24817 24818 /* 24819 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL 24820 * on the specified backing STREAMS q. Note, the caller may make the 24821 * decision to call based on the tcp_t.tcp_flow_stopped value which 24822 * when check outside the q's lock is only an advisory check ... 24823 */ 24824 24825 void 24826 tcp_setqfull(tcp_t *tcp) 24827 { 24828 queue_t *q = tcp->tcp_wq; 24829 24830 if (!(q->q_flag & QFULL)) { 24831 mutex_enter(QLOCK(q)); 24832 if (!(q->q_flag & QFULL)) { 24833 /* still need to set QFULL */ 24834 q->q_flag |= QFULL; 24835 tcp->tcp_flow_stopped = B_TRUE; 24836 mutex_exit(QLOCK(q)); 24837 TCP_STAT(tcp_flwctl_on); 24838 } else { 24839 mutex_exit(QLOCK(q)); 24840 } 24841 } 24842 } 24843 24844 void 24845 tcp_clrqfull(tcp_t *tcp) 24846 { 24847 queue_t *q = tcp->tcp_wq; 24848 24849 if (q->q_flag & QFULL) { 24850 mutex_enter(QLOCK(q)); 24851 if (q->q_flag & QFULL) { 24852 q->q_flag &= ~QFULL; 24853 tcp->tcp_flow_stopped = B_FALSE; 24854 mutex_exit(QLOCK(q)); 24855 if (q->q_flag & QWANTW) 24856 qbackenable(q, 0); 24857 } else { 24858 mutex_exit(QLOCK(q)); 24859 } 24860 } 24861 } 24862 24863 /* 24864 * TCP Kstats implementation 24865 */ 24866 static void 24867 tcp_kstat_init(void) 24868 { 24869 tcp_named_kstat_t template = { 24870 { "rtoAlgorithm", KSTAT_DATA_INT32, 0 }, 24871 { "rtoMin", KSTAT_DATA_INT32, 0 }, 24872 { "rtoMax", KSTAT_DATA_INT32, 0 }, 24873 { "maxConn", KSTAT_DATA_INT32, 0 }, 24874 { "activeOpens", KSTAT_DATA_UINT32, 0 }, 24875 { "passiveOpens", KSTAT_DATA_UINT32, 0 }, 24876 { "attemptFails", KSTAT_DATA_UINT32, 0 }, 24877 { "estabResets", KSTAT_DATA_UINT32, 0 }, 24878 { "currEstab", KSTAT_DATA_UINT32, 0 }, 24879 { "inSegs", KSTAT_DATA_UINT32, 0 }, 24880 { "outSegs", KSTAT_DATA_UINT32, 0 }, 24881 { "retransSegs", KSTAT_DATA_UINT32, 0 }, 24882 { "connTableSize", KSTAT_DATA_INT32, 0 }, 24883 { "outRsts", KSTAT_DATA_UINT32, 0 }, 24884 { "outDataSegs", KSTAT_DATA_UINT32, 0 }, 24885 { "outDataBytes", KSTAT_DATA_UINT32, 0 }, 24886 { "retransBytes", KSTAT_DATA_UINT32, 0 }, 24887 { "outAck", KSTAT_DATA_UINT32, 0 }, 24888 { "outAckDelayed", KSTAT_DATA_UINT32, 0 }, 24889 { "outUrg", KSTAT_DATA_UINT32, 0 }, 24890 { "outWinUpdate", KSTAT_DATA_UINT32, 0 }, 24891 { "outWinProbe", KSTAT_DATA_UINT32, 0 }, 24892 { "outControl", KSTAT_DATA_UINT32, 0 }, 24893 { "outFastRetrans", KSTAT_DATA_UINT32, 0 }, 24894 { "inAckSegs", KSTAT_DATA_UINT32, 0 }, 24895 { "inAckBytes", KSTAT_DATA_UINT32, 0 }, 24896 { "inDupAck", KSTAT_DATA_UINT32, 0 }, 24897 { "inAckUnsent", KSTAT_DATA_UINT32, 0 }, 24898 { "inDataInorderSegs", KSTAT_DATA_UINT32, 0 }, 24899 { "inDataInorderBytes", KSTAT_DATA_UINT32, 0 }, 24900 { "inDataUnorderSegs", KSTAT_DATA_UINT32, 0 }, 24901 { "inDataUnorderBytes", KSTAT_DATA_UINT32, 0 }, 24902 { "inDataDupSegs", KSTAT_DATA_UINT32, 0 }, 24903 { "inDataDupBytes", KSTAT_DATA_UINT32, 0 }, 24904 { "inDataPartDupSegs", KSTAT_DATA_UINT32, 0 }, 24905 { "inDataPartDupBytes", KSTAT_DATA_UINT32, 0 }, 24906 { "inDataPastWinSegs", KSTAT_DATA_UINT32, 0 }, 24907 { "inDataPastWinBytes", KSTAT_DATA_UINT32, 0 }, 24908 { "inWinProbe", KSTAT_DATA_UINT32, 0 }, 24909 { "inWinUpdate", KSTAT_DATA_UINT32, 0 }, 24910 { "inClosed", KSTAT_DATA_UINT32, 0 }, 24911 { "rttUpdate", KSTAT_DATA_UINT32, 0 }, 24912 { "rttNoUpdate", KSTAT_DATA_UINT32, 0 }, 24913 { "timRetrans", KSTAT_DATA_UINT32, 0 }, 24914 { "timRetransDrop", KSTAT_DATA_UINT32, 0 }, 24915 { "timKeepalive", KSTAT_DATA_UINT32, 0 }, 24916 { "timKeepaliveProbe", KSTAT_DATA_UINT32, 0 }, 24917 { "timKeepaliveDrop", KSTAT_DATA_UINT32, 0 }, 24918 { "listenDrop", KSTAT_DATA_UINT32, 0 }, 24919 { "listenDropQ0", KSTAT_DATA_UINT32, 0 }, 24920 { "halfOpenDrop", KSTAT_DATA_UINT32, 0 }, 24921 { "outSackRetransSegs", KSTAT_DATA_UINT32, 0 }, 24922 { "connTableSize6", KSTAT_DATA_INT32, 0 } 24923 }; 24924 24925 tcp_mibkp = kstat_create(TCP_MOD_NAME, 0, TCP_MOD_NAME, 24926 "mib2", KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0); 24927 24928 if (tcp_mibkp == NULL) 24929 return; 24930 24931 template.rtoAlgorithm.value.ui32 = 4; 24932 template.rtoMin.value.ui32 = tcp_rexmit_interval_min; 24933 template.rtoMax.value.ui32 = tcp_rexmit_interval_max; 24934 template.maxConn.value.i32 = -1; 24935 24936 bcopy(&template, tcp_mibkp->ks_data, sizeof (template)); 24937 24938 tcp_mibkp->ks_update = tcp_kstat_update; 24939 24940 kstat_install(tcp_mibkp); 24941 } 24942 24943 static void 24944 tcp_kstat_fini(void) 24945 { 24946 24947 if (tcp_mibkp != NULL) { 24948 kstat_delete(tcp_mibkp); 24949 tcp_mibkp = NULL; 24950 } 24951 } 24952 24953 static int 24954 tcp_kstat_update(kstat_t *kp, int rw) 24955 { 24956 tcp_named_kstat_t *tcpkp; 24957 tcp_t *tcp; 24958 connf_t *connfp; 24959 conn_t *connp; 24960 int i; 24961 24962 if (!kp || !kp->ks_data) 24963 return (EIO); 24964 24965 if (rw == KSTAT_WRITE) 24966 return (EACCES); 24967 24968 tcpkp = (tcp_named_kstat_t *)kp->ks_data; 24969 24970 tcpkp->currEstab.value.ui32 = 0; 24971 24972 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 24973 connfp = &ipcl_globalhash_fanout[i]; 24974 connp = NULL; 24975 while ((connp = 24976 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 24977 tcp = connp->conn_tcp; 24978 switch (tcp_snmp_state(tcp)) { 24979 case MIB2_TCP_established: 24980 case MIB2_TCP_closeWait: 24981 tcpkp->currEstab.value.ui32++; 24982 break; 24983 } 24984 } 24985 } 24986 24987 tcpkp->activeOpens.value.ui32 = tcp_mib.tcpActiveOpens; 24988 tcpkp->passiveOpens.value.ui32 = tcp_mib.tcpPassiveOpens; 24989 tcpkp->attemptFails.value.ui32 = tcp_mib.tcpAttemptFails; 24990 tcpkp->estabResets.value.ui32 = tcp_mib.tcpEstabResets; 24991 tcpkp->inSegs.value.ui32 = tcp_mib.tcpInSegs; 24992 tcpkp->outSegs.value.ui32 = tcp_mib.tcpOutSegs; 24993 tcpkp->retransSegs.value.ui32 = tcp_mib.tcpRetransSegs; 24994 tcpkp->connTableSize.value.i32 = tcp_mib.tcpConnTableSize; 24995 tcpkp->outRsts.value.ui32 = tcp_mib.tcpOutRsts; 24996 tcpkp->outDataSegs.value.ui32 = tcp_mib.tcpOutDataSegs; 24997 tcpkp->outDataBytes.value.ui32 = tcp_mib.tcpOutDataBytes; 24998 tcpkp->retransBytes.value.ui32 = tcp_mib.tcpRetransBytes; 24999 tcpkp->outAck.value.ui32 = tcp_mib.tcpOutAck; 25000 tcpkp->outAckDelayed.value.ui32 = tcp_mib.tcpOutAckDelayed; 25001 tcpkp->outUrg.value.ui32 = tcp_mib.tcpOutUrg; 25002 tcpkp->outWinUpdate.value.ui32 = tcp_mib.tcpOutWinUpdate; 25003 tcpkp->outWinProbe.value.ui32 = tcp_mib.tcpOutWinProbe; 25004 tcpkp->outControl.value.ui32 = tcp_mib.tcpOutControl; 25005 tcpkp->outFastRetrans.value.ui32 = tcp_mib.tcpOutFastRetrans; 25006 tcpkp->inAckSegs.value.ui32 = tcp_mib.tcpInAckSegs; 25007 tcpkp->inAckBytes.value.ui32 = tcp_mib.tcpInAckBytes; 25008 tcpkp->inDupAck.value.ui32 = tcp_mib.tcpInDupAck; 25009 tcpkp->inAckUnsent.value.ui32 = tcp_mib.tcpInAckUnsent; 25010 tcpkp->inDataInorderSegs.value.ui32 = tcp_mib.tcpInDataInorderSegs; 25011 tcpkp->inDataInorderBytes.value.ui32 = tcp_mib.tcpInDataInorderBytes; 25012 tcpkp->inDataUnorderSegs.value.ui32 = tcp_mib.tcpInDataUnorderSegs; 25013 tcpkp->inDataUnorderBytes.value.ui32 = tcp_mib.tcpInDataUnorderBytes; 25014 tcpkp->inDataDupSegs.value.ui32 = tcp_mib.tcpInDataDupSegs; 25015 tcpkp->inDataDupBytes.value.ui32 = tcp_mib.tcpInDataDupBytes; 25016 tcpkp->inDataPartDupSegs.value.ui32 = tcp_mib.tcpInDataPartDupSegs; 25017 tcpkp->inDataPartDupBytes.value.ui32 = tcp_mib.tcpInDataPartDupBytes; 25018 tcpkp->inDataPastWinSegs.value.ui32 = tcp_mib.tcpInDataPastWinSegs; 25019 tcpkp->inDataPastWinBytes.value.ui32 = tcp_mib.tcpInDataPastWinBytes; 25020 tcpkp->inWinProbe.value.ui32 = tcp_mib.tcpInWinProbe; 25021 tcpkp->inWinUpdate.value.ui32 = tcp_mib.tcpInWinUpdate; 25022 tcpkp->inClosed.value.ui32 = tcp_mib.tcpInClosed; 25023 tcpkp->rttNoUpdate.value.ui32 = tcp_mib.tcpRttNoUpdate; 25024 tcpkp->rttUpdate.value.ui32 = tcp_mib.tcpRttUpdate; 25025 tcpkp->timRetrans.value.ui32 = tcp_mib.tcpTimRetrans; 25026 tcpkp->timRetransDrop.value.ui32 = tcp_mib.tcpTimRetransDrop; 25027 tcpkp->timKeepalive.value.ui32 = tcp_mib.tcpTimKeepalive; 25028 tcpkp->timKeepaliveProbe.value.ui32 = tcp_mib.tcpTimKeepaliveProbe; 25029 tcpkp->timKeepaliveDrop.value.ui32 = tcp_mib.tcpTimKeepaliveDrop; 25030 tcpkp->listenDrop.value.ui32 = tcp_mib.tcpListenDrop; 25031 tcpkp->listenDropQ0.value.ui32 = tcp_mib.tcpListenDropQ0; 25032 tcpkp->halfOpenDrop.value.ui32 = tcp_mib.tcpHalfOpenDrop; 25033 tcpkp->outSackRetransSegs.value.ui32 = tcp_mib.tcpOutSackRetransSegs; 25034 tcpkp->connTableSize6.value.i32 = tcp_mib.tcp6ConnTableSize; 25035 25036 return (0); 25037 } 25038 25039 void 25040 tcp_reinput(conn_t *connp, mblk_t *mp, squeue_t *sqp) 25041 { 25042 uint16_t hdr_len; 25043 ipha_t *ipha; 25044 uint8_t *nexthdrp; 25045 tcph_t *tcph; 25046 25047 /* Already has an eager */ 25048 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 25049 TCP_STAT(tcp_reinput_syn); 25050 squeue_enter(connp->conn_sqp, mp, connp->conn_recv, 25051 connp, SQTAG_TCP_REINPUT_EAGER); 25052 return; 25053 } 25054 25055 switch (IPH_HDR_VERSION(mp->b_rptr)) { 25056 case IPV4_VERSION: 25057 ipha = (ipha_t *)mp->b_rptr; 25058 hdr_len = IPH_HDR_LENGTH(ipha); 25059 break; 25060 case IPV6_VERSION: 25061 if (!ip_hdr_length_nexthdr_v6(mp, (ip6_t *)mp->b_rptr, 25062 &hdr_len, &nexthdrp)) { 25063 CONN_DEC_REF(connp); 25064 freemsg(mp); 25065 return; 25066 } 25067 break; 25068 } 25069 25070 tcph = (tcph_t *)&mp->b_rptr[hdr_len]; 25071 if ((tcph->th_flags[0] & (TH_SYN|TH_ACK|TH_RST|TH_URG)) == TH_SYN) { 25072 mp->b_datap->db_struioflag |= STRUIO_EAGER; 25073 DB_CKSUMSTART(mp) = (intptr_t)sqp; 25074 } 25075 25076 squeue_fill(connp->conn_sqp, mp, connp->conn_recv, connp, 25077 SQTAG_TCP_REINPUT); 25078 } 25079 25080 static squeue_func_t 25081 tcp_squeue_switch(int val) 25082 { 25083 squeue_func_t rval = squeue_fill; 25084 25085 switch (val) { 25086 case 1: 25087 rval = squeue_enter_nodrain; 25088 break; 25089 case 2: 25090 rval = squeue_enter; 25091 break; 25092 default: 25093 break; 25094 } 25095 return (rval); 25096 } 25097 25098 static void 25099 tcp_squeue_add(squeue_t *sqp) 25100 { 25101 tcp_squeue_priv_t *tcp_time_wait = kmem_zalloc( 25102 sizeof (tcp_squeue_priv_t), KM_SLEEP); 25103 25104 *squeue_getprivate(sqp, SQPRIVATE_TCP) = (intptr_t)tcp_time_wait; 25105 tcp_time_wait->tcp_time_wait_tid = timeout(tcp_time_wait_collector, 25106 sqp, TCP_TIME_WAIT_DELAY); 25107 if (tcp_free_list_max_cnt == 0) { 25108 int tcp_ncpus = ((boot_max_ncpus == -1) ? 25109 max_ncpus : boot_max_ncpus); 25110 25111 /* 25112 * Limit number of entries to 1% of availble memory / tcp_ncpus 25113 */ 25114 tcp_free_list_max_cnt = (freemem * PAGESIZE) / 25115 (tcp_ncpus * sizeof (tcp_t) * 100); 25116 } 25117 tcp_time_wait->tcp_free_list_cnt = 0; 25118 } 25119