1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 /* Copyright (c) 1990 Mentat Inc. */ 27 28 #pragma ident "%Z%%M% %I% %E% SMI" 29 const char tcp_version[] = "%Z%%M% %I% %E% SMI"; 30 31 32 #include <sys/types.h> 33 #include <sys/stream.h> 34 #include <sys/strsun.h> 35 #include <sys/strsubr.h> 36 #include <sys/stropts.h> 37 #include <sys/strlog.h> 38 #include <sys/strsun.h> 39 #define _SUN_TPI_VERSION 2 40 #include <sys/tihdr.h> 41 #include <sys/timod.h> 42 #include <sys/ddi.h> 43 #include <sys/sunddi.h> 44 #include <sys/suntpi.h> 45 #include <sys/xti_inet.h> 46 #include <sys/cmn_err.h> 47 #include <sys/debug.h> 48 #include <sys/sdt.h> 49 #include <sys/vtrace.h> 50 #include <sys/kmem.h> 51 #include <sys/ethernet.h> 52 #include <sys/cpuvar.h> 53 #include <sys/dlpi.h> 54 #include <sys/multidata.h> 55 #include <sys/multidata_impl.h> 56 #include <sys/pattr.h> 57 #include <sys/policy.h> 58 #include <sys/priv.h> 59 #include <sys/zone.h> 60 #include <sys/sunldi.h> 61 62 #include <sys/errno.h> 63 #include <sys/signal.h> 64 #include <sys/socket.h> 65 #include <sys/sockio.h> 66 #include <sys/isa_defs.h> 67 #include <sys/md5.h> 68 #include <sys/random.h> 69 #include <netinet/in.h> 70 #include <netinet/tcp.h> 71 #include <netinet/ip6.h> 72 #include <netinet/icmp6.h> 73 #include <net/if.h> 74 #include <net/route.h> 75 #include <inet/ipsec_impl.h> 76 77 #include <inet/common.h> 78 #include <inet/ip.h> 79 #include <inet/ip_impl.h> 80 #include <inet/ip6.h> 81 #include <inet/ip_ndp.h> 82 #include <inet/mi.h> 83 #include <inet/mib2.h> 84 #include <inet/nd.h> 85 #include <inet/optcom.h> 86 #include <inet/snmpcom.h> 87 #include <inet/kstatcom.h> 88 #include <inet/tcp.h> 89 #include <inet/tcp_impl.h> 90 #include <net/pfkeyv2.h> 91 #include <inet/ipsec_info.h> 92 #include <inet/ipdrop.h> 93 #include <inet/tcp_trace.h> 94 95 #include <inet/ipclassifier.h> 96 #include <inet/ip_ire.h> 97 #include <inet/ip_ftable.h> 98 #include <inet/ip_if.h> 99 #include <inet/ipp_common.h> 100 #include <inet/ip_netinfo.h> 101 #include <sys/squeue.h> 102 #include <inet/kssl/ksslapi.h> 103 #include <sys/tsol/label.h> 104 #include <sys/tsol/tnet.h> 105 #include <rpc/pmap_prot.h> 106 107 /* 108 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433) 109 * 110 * (Read the detailed design doc in PSARC case directory) 111 * 112 * The entire tcp state is contained in tcp_t and conn_t structure 113 * which are allocated in tandem using ipcl_conn_create() and passing 114 * IPCL_CONNTCP as a flag. We use 'conn_ref' and 'conn_lock' to protect 115 * the references on the tcp_t. The tcp_t structure is never compressed 116 * and packets always land on the correct TCP perimeter from the time 117 * eager is created till the time tcp_t dies (as such the old mentat 118 * TCP global queue is not used for detached state and no IPSEC checking 119 * is required). The global queue is still allocated to send out resets 120 * for connection which have no listeners and IP directly calls 121 * tcp_xmit_listeners_reset() which does any policy check. 122 * 123 * Protection and Synchronisation mechanism: 124 * 125 * The tcp data structure does not use any kind of lock for protecting 126 * its state but instead uses 'squeues' for mutual exclusion from various 127 * read and write side threads. To access a tcp member, the thread should 128 * always be behind squeue (via squeue_enter, squeue_enter_nodrain, or 129 * squeue_fill). Since the squeues allow a direct function call, caller 130 * can pass any tcp function having prototype of edesc_t as argument 131 * (different from traditional STREAMs model where packets come in only 132 * designated entry points). The list of functions that can be directly 133 * called via squeue are listed before the usual function prototype. 134 * 135 * Referencing: 136 * 137 * TCP is MT-Hot and we use a reference based scheme to make sure that the 138 * tcp structure doesn't disappear when its needed. When the application 139 * creates an outgoing connection or accepts an incoming connection, we 140 * start out with 2 references on 'conn_ref'. One for TCP and one for IP. 141 * The IP reference is just a symbolic reference since ip_tcpclose() 142 * looks at tcp structure after tcp_close_output() returns which could 143 * have dropped the last TCP reference. So as long as the connection is 144 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the 145 * conn_t. The classifier puts its own reference when the connection is 146 * inserted in listen or connected hash. Anytime a thread needs to enter 147 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr 148 * on write side or by doing a classify on read side and then puts a 149 * reference on the conn before doing squeue_enter/tryenter/fill. For 150 * read side, the classifier itself puts the reference under fanout lock 151 * to make sure that tcp can't disappear before it gets processed. The 152 * squeue will drop this reference automatically so the called function 153 * doesn't have to do a DEC_REF. 154 * 155 * Opening a new connection: 156 * 157 * The outgoing connection open is pretty simple. tcp_open() does the 158 * work in creating the conn/tcp structure and initializing it. The 159 * squeue assignment is done based on the CPU the application 160 * is running on. So for outbound connections, processing is always done 161 * on application CPU which might be different from the incoming CPU 162 * being interrupted by the NIC. An optimal way would be to figure out 163 * the NIC <-> CPU binding at listen time, and assign the outgoing 164 * connection to the squeue attached to the CPU that will be interrupted 165 * for incoming packets (we know the NIC based on the bind IP address). 166 * This might seem like a problem if more data is going out but the 167 * fact is that in most cases the transmit is ACK driven transmit where 168 * the outgoing data normally sits on TCP's xmit queue waiting to be 169 * transmitted. 170 * 171 * Accepting a connection: 172 * 173 * This is a more interesting case because of various races involved in 174 * establishing a eager in its own perimeter. Read the meta comment on 175 * top of tcp_conn_request(). But briefly, the squeue is picked by 176 * ip_tcp_input()/ip_fanout_tcp_v6() based on the interrupted CPU. 177 * 178 * Closing a connection: 179 * 180 * The close is fairly straight forward. tcp_close() calls tcp_close_output() 181 * via squeue to do the close and mark the tcp as detached if the connection 182 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its 183 * reference but tcp_close() drop IP's reference always. So if tcp was 184 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP 185 * and 1 because it is in classifier's connected hash. This is the condition 186 * we use to determine that its OK to clean up the tcp outside of squeue 187 * when time wait expires (check the ref under fanout and conn_lock and 188 * if it is 2, remove it from fanout hash and kill it). 189 * 190 * Although close just drops the necessary references and marks the 191 * tcp_detached state, tcp_close needs to know the tcp_detached has been 192 * set (under squeue) before letting the STREAM go away (because a 193 * inbound packet might attempt to go up the STREAM while the close 194 * has happened and tcp_detached is not set). So a special lock and 195 * flag is used along with a condition variable (tcp_closelock, tcp_closed, 196 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked 197 * tcp_detached. 198 * 199 * Special provisions and fast paths: 200 * 201 * We make special provision for (AF_INET, SOCK_STREAM) sockets which 202 * can't have 'ipv6_recvpktinfo' set and for these type of sockets, IP 203 * will never send a M_CTL to TCP. As such, ip_tcp_input() which handles 204 * all TCP packets from the wire makes a IPCL_IS_TCP4_CONNECTED_NO_POLICY 205 * check to send packets directly to tcp_rput_data via squeue. Everyone 206 * else comes through tcp_input() on the read side. 207 * 208 * We also make special provisions for sockfs by marking tcp_issocket 209 * whenever we have only sockfs on top of TCP. This allows us to skip 210 * putting the tcp in acceptor hash since a sockfs listener can never 211 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM 212 * since eager has already been allocated and the accept now happens 213 * on acceptor STREAM. There is a big blob of comment on top of 214 * tcp_conn_request explaining the new accept. When socket is POP'd, 215 * sockfs sends us an ioctl to mark the fact and we go back to old 216 * behaviour. Once tcp_issocket is unset, its never set for the 217 * life of that connection. 218 * 219 * IPsec notes : 220 * 221 * Since a packet is always executed on the correct TCP perimeter 222 * all IPsec processing is defered to IP including checking new 223 * connections and setting IPSEC policies for new connection. The 224 * only exception is tcp_xmit_listeners_reset() which is called 225 * directly from IP and needs to policy check to see if TH_RST 226 * can be sent out. 227 * 228 * PFHooks notes : 229 * 230 * For mdt case, one meta buffer contains multiple packets. Mblks for every 231 * packet are assembled and passed to the hooks. When packets are blocked, 232 * or boundary of any packet is changed, the mdt processing is stopped, and 233 * packets of the meta buffer are send to the IP path one by one. 234 */ 235 236 /* 237 * Values for squeue switch: 238 * 1: squeue_enter_nodrain 239 * 2: squeue_enter 240 * 3: squeue_fill 241 */ 242 int tcp_squeue_close = 2; /* Setable in /etc/system */ 243 int tcp_squeue_wput = 2; 244 245 squeue_func_t tcp_squeue_close_proc; 246 squeue_func_t tcp_squeue_wput_proc; 247 248 /* 249 * This controls how tiny a write must be before we try to copy it 250 * into the the mblk on the tail of the transmit queue. Not much 251 * speedup is observed for values larger than sixteen. Zero will 252 * disable the optimisation. 253 */ 254 int tcp_tx_pull_len = 16; 255 256 /* 257 * TCP Statistics. 258 * 259 * How TCP statistics work. 260 * 261 * There are two types of statistics invoked by two macros. 262 * 263 * TCP_STAT(name) does non-atomic increment of a named stat counter. It is 264 * supposed to be used in non MT-hot paths of the code. 265 * 266 * TCP_DBGSTAT(name) does atomic increment of a named stat counter. It is 267 * supposed to be used for DEBUG purposes and may be used on a hot path. 268 * 269 * Both TCP_STAT and TCP_DBGSTAT counters are available using kstat 270 * (use "kstat tcp" to get them). 271 * 272 * There is also additional debugging facility that marks tcp_clean_death() 273 * instances and saves them in tcp_t structure. It is triggered by 274 * TCP_TAG_CLEAN_DEATH define. Also, there is a global array of counters for 275 * tcp_clean_death() calls that counts the number of times each tag was hit. It 276 * is triggered by TCP_CLD_COUNTERS define. 277 * 278 * How to add new counters. 279 * 280 * 1) Add a field in the tcp_stat structure describing your counter. 281 * 2) Add a line in the template in tcp_kstat2_init() with the name 282 * of the counter. 283 * 284 * IMPORTANT!! - make sure that both are in sync !! 285 * 3) Use either TCP_STAT or TCP_DBGSTAT with the name. 286 * 287 * Please avoid using private counters which are not kstat-exported. 288 * 289 * TCP_TAG_CLEAN_DEATH set to 1 enables tagging of tcp_clean_death() instances 290 * in tcp_t structure. 291 * 292 * TCP_MAX_CLEAN_DEATH_TAG is the maximum number of possible clean death tags. 293 */ 294 295 #ifndef TCP_DEBUG_COUNTER 296 #ifdef DEBUG 297 #define TCP_DEBUG_COUNTER 1 298 #else 299 #define TCP_DEBUG_COUNTER 0 300 #endif 301 #endif 302 303 #define TCP_CLD_COUNTERS 0 304 305 #define TCP_TAG_CLEAN_DEATH 1 306 #define TCP_MAX_CLEAN_DEATH_TAG 32 307 308 #ifdef lint 309 static int _lint_dummy_; 310 #endif 311 312 #if TCP_CLD_COUNTERS 313 static uint_t tcp_clean_death_stat[TCP_MAX_CLEAN_DEATH_TAG]; 314 #define TCP_CLD_STAT(x) tcp_clean_death_stat[x]++ 315 #elif defined(lint) 316 #define TCP_CLD_STAT(x) ASSERT(_lint_dummy_ == 0); 317 #else 318 #define TCP_CLD_STAT(x) 319 #endif 320 321 #if TCP_DEBUG_COUNTER 322 #define TCP_DBGSTAT(tcps, x) \ 323 atomic_add_64(&((tcps)->tcps_statistics.x.value.ui64), 1) 324 #define TCP_G_DBGSTAT(x) \ 325 atomic_add_64(&(tcp_g_statistics.x.value.ui64), 1) 326 #elif defined(lint) 327 #define TCP_DBGSTAT(tcps, x) ASSERT(_lint_dummy_ == 0); 328 #define TCP_G_DBGSTAT(x) ASSERT(_lint_dummy_ == 0); 329 #else 330 #define TCP_DBGSTAT(tcps, x) 331 #define TCP_G_DBGSTAT(x) 332 #endif 333 334 #define TCP_G_STAT(x) (tcp_g_statistics.x.value.ui64++) 335 336 tcp_g_stat_t tcp_g_statistics; 337 kstat_t *tcp_g_kstat; 338 339 /* 340 * Call either ip_output or ip_output_v6. This replaces putnext() calls on the 341 * tcp write side. 342 */ 343 #define CALL_IP_WPUT(connp, q, mp) { \ 344 tcp_stack_t *tcps; \ 345 \ 346 tcps = connp->conn_netstack->netstack_tcp; \ 347 ASSERT(((q)->q_flag & QREADR) == 0); \ 348 TCP_DBGSTAT(tcps, tcp_ip_output); \ 349 connp->conn_send(connp, (mp), (q), IP_WPUT); \ 350 } 351 352 /* Macros for timestamp comparisons */ 353 #define TSTMP_GEQ(a, b) ((int32_t)((a)-(b)) >= 0) 354 #define TSTMP_LT(a, b) ((int32_t)((a)-(b)) < 0) 355 356 /* 357 * Parameters for TCP Initial Send Sequence number (ISS) generation. When 358 * tcp_strong_iss is set to 1, which is the default, the ISS is calculated 359 * by adding three components: a time component which grows by 1 every 4096 360 * nanoseconds (versus every 4 microseconds suggested by RFC 793, page 27); 361 * a per-connection component which grows by 125000 for every new connection; 362 * and an "extra" component that grows by a random amount centered 363 * approximately on 64000. This causes the the ISS generator to cycle every 364 * 4.89 hours if no TCP connections are made, and faster if connections are 365 * made. 366 * 367 * When tcp_strong_iss is set to 0, ISS is calculated by adding two 368 * components: a time component which grows by 250000 every second; and 369 * a per-connection component which grows by 125000 for every new connections. 370 * 371 * A third method, when tcp_strong_iss is set to 2, for generating ISS is 372 * prescribed by Steve Bellovin. This involves adding time, the 125000 per 373 * connection, and a one-way hash (MD5) of the connection ID <sport, dport, 374 * src, dst>, a "truly" random (per RFC 1750) number, and a console-entered 375 * password. 376 */ 377 #define ISS_INCR 250000 378 #define ISS_NSEC_SHT 12 379 380 static sin_t sin_null; /* Zero address for quick clears */ 381 static sin6_t sin6_null; /* Zero address for quick clears */ 382 383 /* 384 * This implementation follows the 4.3BSD interpretation of the urgent 385 * pointer and not RFC 1122. Switching to RFC 1122 behavior would cause 386 * incompatible changes in protocols like telnet and rlogin. 387 */ 388 #define TCP_OLD_URP_INTERPRETATION 1 389 390 #define TCP_IS_DETACHED_NONEAGER(tcp) \ 391 (TCP_IS_DETACHED(tcp) && \ 392 (!(tcp)->tcp_hard_binding)) 393 394 /* 395 * TCP reassembly macros. We hide starting and ending sequence numbers in 396 * b_next and b_prev of messages on the reassembly queue. The messages are 397 * chained using b_cont. These macros are used in tcp_reass() so we don't 398 * have to see the ugly casts and assignments. 399 */ 400 #define TCP_REASS_SEQ(mp) ((uint32_t)(uintptr_t)((mp)->b_next)) 401 #define TCP_REASS_SET_SEQ(mp, u) ((mp)->b_next = \ 402 (mblk_t *)(uintptr_t)(u)) 403 #define TCP_REASS_END(mp) ((uint32_t)(uintptr_t)((mp)->b_prev)) 404 #define TCP_REASS_SET_END(mp, u) ((mp)->b_prev = \ 405 (mblk_t *)(uintptr_t)(u)) 406 407 /* 408 * Implementation of TCP Timers. 409 * ============================= 410 * 411 * INTERFACE: 412 * 413 * There are two basic functions dealing with tcp timers: 414 * 415 * timeout_id_t tcp_timeout(connp, func, time) 416 * clock_t tcp_timeout_cancel(connp, timeout_id) 417 * TCP_TIMER_RESTART(tcp, intvl) 418 * 419 * tcp_timeout() starts a timer for the 'tcp' instance arranging to call 'func' 420 * after 'time' ticks passed. The function called by timeout() must adhere to 421 * the same restrictions as a driver soft interrupt handler - it must not sleep 422 * or call other functions that might sleep. The value returned is the opaque 423 * non-zero timeout identifier that can be passed to tcp_timeout_cancel() to 424 * cancel the request. The call to tcp_timeout() may fail in which case it 425 * returns zero. This is different from the timeout(9F) function which never 426 * fails. 427 * 428 * The call-back function 'func' always receives 'connp' as its single 429 * argument. It is always executed in the squeue corresponding to the tcp 430 * structure. The tcp structure is guaranteed to be present at the time the 431 * call-back is called. 432 * 433 * NOTE: The call-back function 'func' is never called if tcp is in 434 * the TCPS_CLOSED state. 435 * 436 * tcp_timeout_cancel() attempts to cancel a pending tcp_timeout() 437 * request. locks acquired by the call-back routine should not be held across 438 * the call to tcp_timeout_cancel() or a deadlock may result. 439 * 440 * tcp_timeout_cancel() returns -1 if it can not cancel the timeout request. 441 * Otherwise, it returns an integer value greater than or equal to 0. In 442 * particular, if the call-back function is already placed on the squeue, it can 443 * not be canceled. 444 * 445 * NOTE: both tcp_timeout() and tcp_timeout_cancel() should always be called 446 * within squeue context corresponding to the tcp instance. Since the 447 * call-back is also called via the same squeue, there are no race 448 * conditions described in untimeout(9F) manual page since all calls are 449 * strictly serialized. 450 * 451 * TCP_TIMER_RESTART() is a macro that attempts to cancel a pending timeout 452 * stored in tcp_timer_tid and starts a new one using 453 * MSEC_TO_TICK(intvl). It always uses tcp_timer() function as a call-back 454 * and stores the return value of tcp_timeout() in the tcp->tcp_timer_tid 455 * field. 456 * 457 * NOTE: since the timeout cancellation is not guaranteed, the cancelled 458 * call-back may still be called, so it is possible tcp_timer() will be 459 * called several times. This should not be a problem since tcp_timer() 460 * should always check the tcp instance state. 461 * 462 * 463 * IMPLEMENTATION: 464 * 465 * TCP timers are implemented using three-stage process. The call to 466 * tcp_timeout() uses timeout(9F) function to call tcp_timer_callback() function 467 * when the timer expires. The tcp_timer_callback() arranges the call of the 468 * tcp_timer_handler() function via squeue corresponding to the tcp 469 * instance. The tcp_timer_handler() calls actual requested timeout call-back 470 * and passes tcp instance as an argument to it. Information is passed between 471 * stages using the tcp_timer_t structure which contains the connp pointer, the 472 * tcp call-back to call and the timeout id returned by the timeout(9F). 473 * 474 * The tcp_timer_t structure is not used directly, it is embedded in an mblk_t - 475 * like structure that is used to enter an squeue. The mp->b_rptr of this pseudo 476 * mblk points to the beginning of tcp_timer_t structure. The tcp_timeout() 477 * returns the pointer to this mblk. 478 * 479 * The pseudo mblk is allocated from a special tcp_timer_cache kmem cache. It 480 * looks like a normal mblk without actual dblk attached to it. 481 * 482 * To optimize performance each tcp instance holds a small cache of timer 483 * mblocks. In the current implementation it caches up to two timer mblocks per 484 * tcp instance. The cache is preserved over tcp frees and is only freed when 485 * the whole tcp structure is destroyed by its kmem destructor. Since all tcp 486 * timer processing happens on a corresponding squeue, the cache manipulation 487 * does not require any locks. Experiments show that majority of timer mblocks 488 * allocations are satisfied from the tcp cache and do not involve kmem calls. 489 * 490 * The tcp_timeout() places a refhold on the connp instance which guarantees 491 * that it will be present at the time the call-back function fires. The 492 * tcp_timer_handler() drops the reference after calling the call-back, so the 493 * call-back function does not need to manipulate the references explicitly. 494 */ 495 496 typedef struct tcp_timer_s { 497 conn_t *connp; 498 void (*tcpt_proc)(void *); 499 timeout_id_t tcpt_tid; 500 } tcp_timer_t; 501 502 static kmem_cache_t *tcp_timercache; 503 kmem_cache_t *tcp_sack_info_cache; 504 kmem_cache_t *tcp_iphc_cache; 505 506 /* 507 * For scalability, we must not run a timer for every TCP connection 508 * in TIME_WAIT state. To see why, consider (for time wait interval of 509 * 4 minutes): 510 * 1000 connections/sec * 240 seconds/time wait = 240,000 active conn's 511 * 512 * This list is ordered by time, so you need only delete from the head 513 * until you get to entries which aren't old enough to delete yet. 514 * The list consists of only the detached TIME_WAIT connections. 515 * 516 * Note that the timer (tcp_time_wait_expire) is started when the tcp_t 517 * becomes detached TIME_WAIT (either by changing the state and already 518 * being detached or the other way around). This means that the TIME_WAIT 519 * state can be extended (up to doubled) if the connection doesn't become 520 * detached for a long time. 521 * 522 * The list manipulations (including tcp_time_wait_next/prev) 523 * are protected by the tcp_time_wait_lock. The content of the 524 * detached TIME_WAIT connections is protected by the normal perimeters. 525 * 526 * This list is per squeue and squeues are shared across the tcp_stack_t's. 527 * Things on tcp_time_wait_head remain associated with the tcp_stack_t 528 * and conn_netstack. 529 * The tcp_t's that are added to tcp_free_list are disassociated and 530 * have NULL tcp_tcps and conn_netstack pointers. 531 */ 532 typedef struct tcp_squeue_priv_s { 533 kmutex_t tcp_time_wait_lock; 534 timeout_id_t tcp_time_wait_tid; 535 tcp_t *tcp_time_wait_head; 536 tcp_t *tcp_time_wait_tail; 537 tcp_t *tcp_free_list; 538 uint_t tcp_free_list_cnt; 539 } tcp_squeue_priv_t; 540 541 /* 542 * TCP_TIME_WAIT_DELAY governs how often the time_wait_collector runs. 543 * Running it every 5 seconds seems to give the best results. 544 */ 545 #define TCP_TIME_WAIT_DELAY drv_usectohz(5000000) 546 547 /* 548 * To prevent memory hog, limit the number of entries in tcp_free_list 549 * to 1% of available memory / number of cpus 550 */ 551 uint_t tcp_free_list_max_cnt = 0; 552 553 #define TCP_XMIT_LOWATER 4096 554 #define TCP_XMIT_HIWATER 49152 555 #define TCP_RECV_LOWATER 2048 556 #define TCP_RECV_HIWATER 49152 557 558 /* 559 * PAWS needs a timer for 24 days. This is the number of ticks in 24 days 560 */ 561 #define PAWS_TIMEOUT ((clock_t)(24*24*60*60*hz)) 562 563 #define TIDUSZ 4096 /* transport interface data unit size */ 564 565 /* 566 * Bind hash list size and has function. It has to be a power of 2 for 567 * hashing. 568 */ 569 #define TCP_BIND_FANOUT_SIZE 512 570 #define TCP_BIND_HASH(lport) (ntohs(lport) & (TCP_BIND_FANOUT_SIZE - 1)) 571 /* 572 * Size of listen and acceptor hash list. It has to be a power of 2 for 573 * hashing. 574 */ 575 #define TCP_FANOUT_SIZE 256 576 577 #ifdef _ILP32 578 #define TCP_ACCEPTOR_HASH(accid) \ 579 (((uint_t)(accid) >> 8) & (TCP_FANOUT_SIZE - 1)) 580 #else 581 #define TCP_ACCEPTOR_HASH(accid) \ 582 ((uint_t)(accid) & (TCP_FANOUT_SIZE - 1)) 583 #endif /* _ILP32 */ 584 585 #define IP_ADDR_CACHE_SIZE 2048 586 #define IP_ADDR_CACHE_HASH(faddr) \ 587 (ntohl(faddr) & (IP_ADDR_CACHE_SIZE -1)) 588 589 /* Hash for HSPs uses all 32 bits, since both networks and hosts are in table */ 590 #define TCP_HSP_HASH_SIZE 256 591 592 #define TCP_HSP_HASH(addr) \ 593 (((addr>>24) ^ (addr >>16) ^ \ 594 (addr>>8) ^ (addr)) % TCP_HSP_HASH_SIZE) 595 596 /* 597 * TCP options struct returned from tcp_parse_options. 598 */ 599 typedef struct tcp_opt_s { 600 uint32_t tcp_opt_mss; 601 uint32_t tcp_opt_wscale; 602 uint32_t tcp_opt_ts_val; 603 uint32_t tcp_opt_ts_ecr; 604 tcp_t *tcp; 605 } tcp_opt_t; 606 607 /* 608 * RFC1323-recommended phrasing of TSTAMP option, for easier parsing 609 */ 610 611 #ifdef _BIG_ENDIAN 612 #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 613 (TCPOPT_TSTAMP << 8) | 10) 614 #else 615 #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \ 616 (TCPOPT_NOP << 8) | TCPOPT_NOP) 617 #endif 618 619 /* 620 * Flags returned from tcp_parse_options. 621 */ 622 #define TCP_OPT_MSS_PRESENT 1 623 #define TCP_OPT_WSCALE_PRESENT 2 624 #define TCP_OPT_TSTAMP_PRESENT 4 625 #define TCP_OPT_SACK_OK_PRESENT 8 626 #define TCP_OPT_SACK_PRESENT 16 627 628 /* TCP option length */ 629 #define TCPOPT_NOP_LEN 1 630 #define TCPOPT_MAXSEG_LEN 4 631 #define TCPOPT_WS_LEN 3 632 #define TCPOPT_REAL_WS_LEN (TCPOPT_WS_LEN+1) 633 #define TCPOPT_TSTAMP_LEN 10 634 #define TCPOPT_REAL_TS_LEN (TCPOPT_TSTAMP_LEN+2) 635 #define TCPOPT_SACK_OK_LEN 2 636 #define TCPOPT_REAL_SACK_OK_LEN (TCPOPT_SACK_OK_LEN+2) 637 #define TCPOPT_REAL_SACK_LEN 4 638 #define TCPOPT_MAX_SACK_LEN 36 639 #define TCPOPT_HEADER_LEN 2 640 641 /* TCP cwnd burst factor. */ 642 #define TCP_CWND_INFINITE 65535 643 #define TCP_CWND_SS 3 644 #define TCP_CWND_NORMAL 5 645 646 /* Maximum TCP initial cwin (start/restart). */ 647 #define TCP_MAX_INIT_CWND 8 648 649 /* 650 * Initialize cwnd according to RFC 3390. def_max_init_cwnd is 651 * either tcp_slow_start_initial or tcp_slow_start_after idle 652 * depending on the caller. If the upper layer has not used the 653 * TCP_INIT_CWND option to change the initial cwnd, tcp_init_cwnd 654 * should be 0 and we use the formula in RFC 3390 to set tcp_cwnd. 655 * If the upper layer has changed set the tcp_init_cwnd, just use 656 * it to calculate the tcp_cwnd. 657 */ 658 #define SET_TCP_INIT_CWND(tcp, mss, def_max_init_cwnd) \ 659 { \ 660 if ((tcp)->tcp_init_cwnd == 0) { \ 661 (tcp)->tcp_cwnd = MIN(def_max_init_cwnd * (mss), \ 662 MIN(4 * (mss), MAX(2 * (mss), 4380 / (mss) * (mss)))); \ 663 } else { \ 664 (tcp)->tcp_cwnd = (tcp)->tcp_init_cwnd * (mss); \ 665 } \ 666 tcp->tcp_cwnd_cnt = 0; \ 667 } 668 669 /* TCP Timer control structure */ 670 typedef struct tcpt_s { 671 pfv_t tcpt_pfv; /* The routine we are to call */ 672 tcp_t *tcpt_tcp; /* The parameter we are to pass in */ 673 } tcpt_t; 674 675 /* Host Specific Parameter structure */ 676 typedef struct tcp_hsp { 677 struct tcp_hsp *tcp_hsp_next; 678 in6_addr_t tcp_hsp_addr_v6; 679 in6_addr_t tcp_hsp_subnet_v6; 680 uint_t tcp_hsp_vers; /* IPV4_VERSION | IPV6_VERSION */ 681 int32_t tcp_hsp_sendspace; 682 int32_t tcp_hsp_recvspace; 683 int32_t tcp_hsp_tstamp; 684 } tcp_hsp_t; 685 #define tcp_hsp_addr V4_PART_OF_V6(tcp_hsp_addr_v6) 686 #define tcp_hsp_subnet V4_PART_OF_V6(tcp_hsp_subnet_v6) 687 688 /* 689 * Functions called directly via squeue having a prototype of edesc_t. 690 */ 691 void tcp_conn_request(void *arg, mblk_t *mp, void *arg2); 692 static void tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2); 693 void tcp_accept_finish(void *arg, mblk_t *mp, void *arg2); 694 static void tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2); 695 static void tcp_wput_proto(void *arg, mblk_t *mp, void *arg2); 696 void tcp_input(void *arg, mblk_t *mp, void *arg2); 697 void tcp_rput_data(void *arg, mblk_t *mp, void *arg2); 698 static void tcp_close_output(void *arg, mblk_t *mp, void *arg2); 699 void tcp_output(void *arg, mblk_t *mp, void *arg2); 700 static void tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2); 701 static void tcp_timer_handler(void *arg, mblk_t *mp, void *arg2); 702 static void tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2); 703 704 705 /* Prototype for TCP functions */ 706 static void tcp_random_init(void); 707 int tcp_random(void); 708 static void tcp_accept(tcp_t *tcp, mblk_t *mp); 709 static void tcp_accept_swap(tcp_t *listener, tcp_t *acceptor, 710 tcp_t *eager); 711 static int tcp_adapt_ire(tcp_t *tcp, mblk_t *ire_mp); 712 static in_port_t tcp_bindi(tcp_t *tcp, in_port_t port, const in6_addr_t *laddr, 713 int reuseaddr, boolean_t quick_connect, boolean_t bind_to_req_port_only, 714 boolean_t user_specified); 715 static void tcp_closei_local(tcp_t *tcp); 716 static void tcp_close_detached(tcp_t *tcp); 717 static boolean_t tcp_conn_con(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph, 718 mblk_t *idmp, mblk_t **defermp); 719 static void tcp_connect(tcp_t *tcp, mblk_t *mp); 720 static void tcp_connect_ipv4(tcp_t *tcp, mblk_t *mp, ipaddr_t *dstaddrp, 721 in_port_t dstport, uint_t srcid); 722 static void tcp_connect_ipv6(tcp_t *tcp, mblk_t *mp, in6_addr_t *dstaddrp, 723 in_port_t dstport, uint32_t flowinfo, uint_t srcid, 724 uint32_t scope_id); 725 static int tcp_clean_death(tcp_t *tcp, int err, uint8_t tag); 726 static void tcp_def_q_set(tcp_t *tcp, mblk_t *mp); 727 static void tcp_disconnect(tcp_t *tcp, mblk_t *mp); 728 static char *tcp_display(tcp_t *tcp, char *, char); 729 static boolean_t tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum); 730 static void tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only); 731 static void tcp_eager_unlink(tcp_t *tcp); 732 static void tcp_err_ack(tcp_t *tcp, mblk_t *mp, int tlierr, 733 int unixerr); 734 static void tcp_err_ack_prim(tcp_t *tcp, mblk_t *mp, int primitive, 735 int tlierr, int unixerr); 736 static int tcp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, 737 cred_t *cr); 738 static int tcp_extra_priv_ports_add(queue_t *q, mblk_t *mp, 739 char *value, caddr_t cp, cred_t *cr); 740 static int tcp_extra_priv_ports_del(queue_t *q, mblk_t *mp, 741 char *value, caddr_t cp, cred_t *cr); 742 static int tcp_tpistate(tcp_t *tcp); 743 static void tcp_bind_hash_insert(tf_t *tf, tcp_t *tcp, 744 int caller_holds_lock); 745 static void tcp_bind_hash_remove(tcp_t *tcp); 746 static tcp_t *tcp_acceptor_hash_lookup(t_uscalar_t id, tcp_stack_t *); 747 void tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp); 748 static void tcp_acceptor_hash_remove(tcp_t *tcp); 749 static void tcp_capability_req(tcp_t *tcp, mblk_t *mp); 750 static void tcp_info_req(tcp_t *tcp, mblk_t *mp); 751 static void tcp_addr_req(tcp_t *tcp, mblk_t *mp); 752 static void tcp_addr_req_ipv6(tcp_t *tcp, mblk_t *mp); 753 void tcp_g_q_setup(tcp_stack_t *); 754 void tcp_g_q_create(tcp_stack_t *); 755 void tcp_g_q_destroy(tcp_stack_t *); 756 static int tcp_header_init_ipv4(tcp_t *tcp); 757 static int tcp_header_init_ipv6(tcp_t *tcp); 758 int tcp_init(tcp_t *tcp, queue_t *q); 759 static int tcp_init_values(tcp_t *tcp); 760 static mblk_t *tcp_ip_advise_mblk(void *addr, int addr_len, ipic_t **ipic); 761 static mblk_t *tcp_ip_bind_mp(tcp_t *tcp, t_scalar_t bind_prim, 762 t_scalar_t addr_length); 763 static void tcp_ip_ire_mark_advice(tcp_t *tcp); 764 static void tcp_ip_notify(tcp_t *tcp); 765 static mblk_t *tcp_ire_mp(mblk_t *mp); 766 static void tcp_iss_init(tcp_t *tcp); 767 static void tcp_keepalive_killer(void *arg); 768 static int tcp_parse_options(tcph_t *tcph, tcp_opt_t *tcpopt); 769 static void tcp_mss_set(tcp_t *tcp, uint32_t size, boolean_t do_ss); 770 static int tcp_conprim_opt_process(tcp_t *tcp, mblk_t *mp, 771 int *do_disconnectp, int *t_errorp, int *sys_errorp); 772 static boolean_t tcp_allow_connopt_set(int level, int name); 773 int tcp_opt_default(queue_t *q, int level, int name, uchar_t *ptr); 774 int tcp_opt_get(queue_t *q, int level, int name, uchar_t *ptr); 775 int tcp_opt_set(queue_t *q, uint_t optset_context, int level, 776 int name, uint_t inlen, uchar_t *invalp, uint_t *outlenp, 777 uchar_t *outvalp, void *thisdg_attrs, cred_t *cr, 778 mblk_t *mblk); 779 static void tcp_opt_reverse(tcp_t *tcp, ipha_t *ipha); 780 static int tcp_opt_set_header(tcp_t *tcp, boolean_t checkonly, 781 uchar_t *ptr, uint_t len); 782 static int tcp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr); 783 static boolean_t tcp_param_register(IDP *ndp, tcpparam_t *tcppa, int cnt, 784 tcp_stack_t *); 785 static int tcp_param_set(queue_t *q, mblk_t *mp, char *value, 786 caddr_t cp, cred_t *cr); 787 static int tcp_param_set_aligned(queue_t *q, mblk_t *mp, char *value, 788 caddr_t cp, cred_t *cr); 789 static void tcp_iss_key_init(uint8_t *phrase, int len, tcp_stack_t *); 790 static int tcp_1948_phrase_set(queue_t *q, mblk_t *mp, char *value, 791 caddr_t cp, cred_t *cr); 792 static void tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_cnt); 793 static mblk_t *tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start); 794 static void tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp); 795 static void tcp_reinit(tcp_t *tcp); 796 static void tcp_reinit_values(tcp_t *tcp); 797 static void tcp_report_item(mblk_t *mp, tcp_t *tcp, int hashval, 798 tcp_t *thisstream, cred_t *cr); 799 800 static uint_t tcp_rcv_drain(queue_t *q, tcp_t *tcp); 801 static void tcp_sack_rxmit(tcp_t *tcp, uint_t *flags); 802 static boolean_t tcp_send_rst_chk(tcp_stack_t *); 803 static void tcp_ss_rexmit(tcp_t *tcp); 804 static mblk_t *tcp_rput_add_ancillary(tcp_t *tcp, mblk_t *mp, ip6_pkt_t *ipp); 805 static void tcp_process_options(tcp_t *, tcph_t *); 806 static void tcp_rput_common(tcp_t *tcp, mblk_t *mp); 807 static void tcp_rsrv(queue_t *q); 808 static int tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd); 809 static int tcp_snmp_state(tcp_t *tcp); 810 static int tcp_status_report(queue_t *q, mblk_t *mp, caddr_t cp, 811 cred_t *cr); 812 static int tcp_bind_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 813 cred_t *cr); 814 static int tcp_listen_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 815 cred_t *cr); 816 static int tcp_conn_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 817 cred_t *cr); 818 static int tcp_acceptor_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 819 cred_t *cr); 820 static int tcp_host_param_set(queue_t *q, mblk_t *mp, char *value, 821 caddr_t cp, cred_t *cr); 822 static int tcp_host_param_set_ipv6(queue_t *q, mblk_t *mp, char *value, 823 caddr_t cp, cred_t *cr); 824 static int tcp_host_param_report(queue_t *q, mblk_t *mp, caddr_t cp, 825 cred_t *cr); 826 static void tcp_timer(void *arg); 827 static void tcp_timer_callback(void *); 828 static in_port_t tcp_update_next_port(in_port_t port, const tcp_t *tcp, 829 boolean_t random); 830 static in_port_t tcp_get_next_priv_port(const tcp_t *); 831 static void tcp_wput_sock(queue_t *q, mblk_t *mp); 832 void tcp_wput_accept(queue_t *q, mblk_t *mp); 833 static void tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent); 834 static void tcp_wput_flush(tcp_t *tcp, mblk_t *mp); 835 static void tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp); 836 static int tcp_send(queue_t *q, tcp_t *tcp, const int mss, 837 const int tcp_hdr_len, const int tcp_tcp_hdr_len, 838 const int num_sack_blk, int *usable, uint_t *snxt, 839 int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 840 const int mdt_thres); 841 static int tcp_multisend(queue_t *q, tcp_t *tcp, const int mss, 842 const int tcp_hdr_len, const int tcp_tcp_hdr_len, 843 const int num_sack_blk, int *usable, uint_t *snxt, 844 int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 845 const int mdt_thres); 846 static void tcp_fill_header(tcp_t *tcp, uchar_t *rptr, clock_t now, 847 int num_sack_blk); 848 static void tcp_wsrv(queue_t *q); 849 static int tcp_xmit_end(tcp_t *tcp); 850 static void tcp_ack_timer(void *arg); 851 static mblk_t *tcp_ack_mp(tcp_t *tcp); 852 static void tcp_xmit_early_reset(char *str, mblk_t *mp, 853 uint32_t seq, uint32_t ack, int ctl, uint_t ip_hdr_len, 854 zoneid_t zoneid, tcp_stack_t *, conn_t *connp); 855 static void tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, 856 uint32_t ack, int ctl); 857 static tcp_hsp_t *tcp_hsp_lookup(ipaddr_t addr, tcp_stack_t *); 858 static tcp_hsp_t *tcp_hsp_lookup_ipv6(in6_addr_t *addr, tcp_stack_t *); 859 static int setmaxps(queue_t *q, int maxpsz); 860 static void tcp_set_rto(tcp_t *, time_t); 861 static boolean_t tcp_check_policy(tcp_t *, mblk_t *, ipha_t *, ip6_t *, 862 boolean_t, boolean_t); 863 static void tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, 864 boolean_t ipsec_mctl); 865 static mblk_t *tcp_setsockopt_mp(int level, int cmd, 866 char *opt, int optlen); 867 static int tcp_build_hdrs(queue_t *, tcp_t *); 868 static void tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, 869 uint32_t seg_seq, uint32_t seg_ack, int seg_len, 870 tcph_t *tcph); 871 boolean_t tcp_paws_check(tcp_t *tcp, tcph_t *tcph, tcp_opt_t *tcpoptp); 872 boolean_t tcp_reserved_port_add(int, in_port_t *, in_port_t *); 873 boolean_t tcp_reserved_port_del(in_port_t, in_port_t); 874 boolean_t tcp_reserved_port_check(in_port_t, tcp_stack_t *); 875 static tcp_t *tcp_alloc_temp_tcp(in_port_t, tcp_stack_t *); 876 static int tcp_reserved_port_list(queue_t *, mblk_t *, caddr_t, cred_t *); 877 static mblk_t *tcp_mdt_info_mp(mblk_t *); 878 static void tcp_mdt_update(tcp_t *, ill_mdt_capab_t *, boolean_t); 879 static int tcp_mdt_add_attrs(multidata_t *, const mblk_t *, 880 const boolean_t, const uint32_t, const uint32_t, 881 const uint32_t, const uint32_t, tcp_stack_t *); 882 static void tcp_multisend_data(tcp_t *, ire_t *, const ill_t *, mblk_t *, 883 const uint_t, const uint_t, boolean_t *); 884 static mblk_t *tcp_lso_info_mp(mblk_t *); 885 static void tcp_lso_update(tcp_t *, ill_lso_capab_t *); 886 static void tcp_send_data(tcp_t *, queue_t *, mblk_t *); 887 extern mblk_t *tcp_timermp_alloc(int); 888 extern void tcp_timermp_free(tcp_t *); 889 static void tcp_timer_free(tcp_t *tcp, mblk_t *mp); 890 static void tcp_stop_lingering(tcp_t *tcp); 891 static void tcp_close_linger_timeout(void *arg); 892 static void *tcp_stack_init(netstackid_t stackid, netstack_t *ns); 893 static void tcp_stack_shutdown(netstackid_t stackid, void *arg); 894 static void tcp_stack_fini(netstackid_t stackid, void *arg); 895 static void *tcp_g_kstat_init(tcp_g_stat_t *); 896 static void tcp_g_kstat_fini(kstat_t *); 897 static void *tcp_kstat_init(netstackid_t, tcp_stack_t *); 898 static void tcp_kstat_fini(netstackid_t, kstat_t *); 899 static void *tcp_kstat2_init(netstackid_t, tcp_stat_t *); 900 static void tcp_kstat2_fini(netstackid_t, kstat_t *); 901 static int tcp_kstat_update(kstat_t *kp, int rw); 902 void tcp_reinput(conn_t *connp, mblk_t *mp, squeue_t *sqp); 903 static int tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 904 tcph_t *tcph, uint_t ipvers, mblk_t *idmp); 905 static int tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, ipha_t *ipha, 906 tcph_t *tcph, mblk_t *idmp); 907 static squeue_func_t tcp_squeue_switch(int); 908 909 static int tcp_open(queue_t *, dev_t *, int, int, cred_t *, boolean_t); 910 static int tcp_openv4(queue_t *, dev_t *, int, int, cred_t *); 911 static int tcp_openv6(queue_t *, dev_t *, int, int, cred_t *); 912 static int tcp_close(queue_t *, int); 913 static int tcpclose_accept(queue_t *); 914 915 static void tcp_squeue_add(squeue_t *); 916 static boolean_t tcp_zcopy_check(tcp_t *); 917 static void tcp_zcopy_notify(tcp_t *); 918 static mblk_t *tcp_zcopy_disable(tcp_t *, mblk_t *); 919 static mblk_t *tcp_zcopy_backoff(tcp_t *, mblk_t *, int); 920 static void tcp_ire_ill_check(tcp_t *, ire_t *, ill_t *, boolean_t); 921 922 extern void tcp_kssl_input(tcp_t *, mblk_t *); 923 924 void tcp_eager_kill(void *arg, mblk_t *mp, void *arg2); 925 void tcp_clean_death_wrapper(void *arg, mblk_t *mp, void *arg2); 926 927 /* 928 * Routines related to the TCP_IOC_ABORT_CONN ioctl command. 929 * 930 * TCP_IOC_ABORT_CONN is a non-transparent ioctl command used for aborting 931 * TCP connections. To invoke this ioctl, a tcp_ioc_abort_conn_t structure 932 * (defined in tcp.h) needs to be filled in and passed into the kernel 933 * via an I_STR ioctl command (see streamio(7I)). The tcp_ioc_abort_conn_t 934 * structure contains the four-tuple of a TCP connection and a range of TCP 935 * states (specified by ac_start and ac_end). The use of wildcard addresses 936 * and ports is allowed. Connections with a matching four tuple and a state 937 * within the specified range will be aborted. The valid states for the 938 * ac_start and ac_end fields are in the range TCPS_SYN_SENT to TCPS_TIME_WAIT, 939 * inclusive. 940 * 941 * An application which has its connection aborted by this ioctl will receive 942 * an error that is dependent on the connection state at the time of the abort. 943 * If the connection state is < TCPS_TIME_WAIT, an application should behave as 944 * though a RST packet has been received. If the connection state is equal to 945 * TCPS_TIME_WAIT, the 2MSL timeout will immediately be canceled by the kernel 946 * and all resources associated with the connection will be freed. 947 */ 948 static mblk_t *tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *, tcp_t *); 949 static void tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *); 950 static void tcp_ioctl_abort_handler(tcp_t *, mblk_t *); 951 static int tcp_ioctl_abort(tcp_ioc_abort_conn_t *, tcp_stack_t *tcps); 952 static void tcp_ioctl_abort_conn(queue_t *, mblk_t *); 953 static int tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *, int, int *, 954 boolean_t, tcp_stack_t *); 955 956 static struct module_info tcp_rinfo = { 957 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, TCP_RECV_HIWATER, TCP_RECV_LOWATER 958 }; 959 960 static struct module_info tcp_winfo = { 961 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, 127, 16 962 }; 963 964 /* 965 * Entry points for TCP as a device. The normal case which supports 966 * the TCP functionality. 967 * We have separate open functions for the /dev/tcp and /dev/tcp6 devices. 968 */ 969 struct qinit tcp_rinitv4 = { 970 NULL, (pfi_t)tcp_rsrv, tcp_openv4, tcp_close, NULL, &tcp_rinfo 971 }; 972 973 struct qinit tcp_rinitv6 = { 974 NULL, (pfi_t)tcp_rsrv, tcp_openv6, tcp_close, NULL, &tcp_rinfo 975 }; 976 977 struct qinit tcp_winit = { 978 (pfi_t)tcp_wput, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo 979 }; 980 981 /* Initial entry point for TCP in socket mode. */ 982 struct qinit tcp_sock_winit = { 983 (pfi_t)tcp_wput_sock, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo 984 }; 985 986 /* 987 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing 988 * an accept. Avoid allocating data structures since eager has already 989 * been created. 990 */ 991 struct qinit tcp_acceptor_rinit = { 992 NULL, (pfi_t)tcp_rsrv, NULL, tcpclose_accept, NULL, &tcp_winfo 993 }; 994 995 struct qinit tcp_acceptor_winit = { 996 (pfi_t)tcp_wput_accept, NULL, NULL, NULL, NULL, &tcp_winfo 997 }; 998 999 /* 1000 * Entry points for TCP loopback (read side only) 1001 * The open routine is only used for reopens, thus no need to 1002 * have a separate one for tcp_openv6. 1003 */ 1004 struct qinit tcp_loopback_rinit = { 1005 (pfi_t)0, (pfi_t)tcp_rsrv, tcp_openv4, tcp_close, (pfi_t)0, 1006 &tcp_rinfo, NULL, tcp_fuse_rrw, tcp_fuse_rinfop, STRUIOT_STANDARD 1007 }; 1008 1009 /* For AF_INET aka /dev/tcp */ 1010 struct streamtab tcpinfov4 = { 1011 &tcp_rinitv4, &tcp_winit 1012 }; 1013 1014 /* For AF_INET6 aka /dev/tcp6 */ 1015 struct streamtab tcpinfov6 = { 1016 &tcp_rinitv6, &tcp_winit 1017 }; 1018 1019 /* 1020 * Have to ensure that tcp_g_q_close is not done by an 1021 * interrupt thread. 1022 */ 1023 static taskq_t *tcp_taskq; 1024 1025 /* 1026 * TCP has a private interface for other kernel modules to reserve a 1027 * port range for them to use. Once reserved, TCP will not use any ports 1028 * in the range. This interface relies on the TCP_EXCLBIND feature. If 1029 * the semantics of TCP_EXCLBIND is changed, implementation of this interface 1030 * has to be verified. 1031 * 1032 * There can be TCP_RESERVED_PORTS_ARRAY_MAX_SIZE port ranges. Each port 1033 * range can cover at most TCP_RESERVED_PORTS_RANGE_MAX ports. A port 1034 * range is [port a, port b] inclusive. And each port range is between 1035 * TCP_LOWESET_RESERVED_PORT and TCP_LARGEST_RESERVED_PORT inclusive. 1036 * 1037 * Note that the default anonymous port range starts from 32768. There is 1038 * no port "collision" between that and the reserved port range. If there 1039 * is port collision (because the default smallest anonymous port is lowered 1040 * or some apps specifically bind to ports in the reserved port range), the 1041 * system may not be able to reserve a port range even there are enough 1042 * unbound ports as a reserved port range contains consecutive ports . 1043 */ 1044 #define TCP_RESERVED_PORTS_ARRAY_MAX_SIZE 5 1045 #define TCP_RESERVED_PORTS_RANGE_MAX 1000 1046 #define TCP_SMALLEST_RESERVED_PORT 10240 1047 #define TCP_LARGEST_RESERVED_PORT 20480 1048 1049 /* Structure to represent those reserved port ranges. */ 1050 typedef struct tcp_rport_s { 1051 in_port_t lo_port; 1052 in_port_t hi_port; 1053 tcp_t **temp_tcp_array; 1054 } tcp_rport_t; 1055 1056 /* Setable only in /etc/system. Move to ndd? */ 1057 boolean_t tcp_icmp_source_quench = B_FALSE; 1058 1059 /* 1060 * Following assumes TPI alignment requirements stay along 32 bit 1061 * boundaries 1062 */ 1063 #define ROUNDUP32(x) \ 1064 (((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1)) 1065 1066 /* Template for response to info request. */ 1067 static struct T_info_ack tcp_g_t_info_ack = { 1068 T_INFO_ACK, /* PRIM_type */ 1069 0, /* TSDU_size */ 1070 T_INFINITE, /* ETSDU_size */ 1071 T_INVALID, /* CDATA_size */ 1072 T_INVALID, /* DDATA_size */ 1073 sizeof (sin_t), /* ADDR_size */ 1074 0, /* OPT_size - not initialized here */ 1075 TIDUSZ, /* TIDU_size */ 1076 T_COTS_ORD, /* SERV_type */ 1077 TCPS_IDLE, /* CURRENT_state */ 1078 (XPG4_1|EXPINLINE) /* PROVIDER_flag */ 1079 }; 1080 1081 static struct T_info_ack tcp_g_t_info_ack_v6 = { 1082 T_INFO_ACK, /* PRIM_type */ 1083 0, /* TSDU_size */ 1084 T_INFINITE, /* ETSDU_size */ 1085 T_INVALID, /* CDATA_size */ 1086 T_INVALID, /* DDATA_size */ 1087 sizeof (sin6_t), /* ADDR_size */ 1088 0, /* OPT_size - not initialized here */ 1089 TIDUSZ, /* TIDU_size */ 1090 T_COTS_ORD, /* SERV_type */ 1091 TCPS_IDLE, /* CURRENT_state */ 1092 (XPG4_1|EXPINLINE) /* PROVIDER_flag */ 1093 }; 1094 1095 #define MS 1L 1096 #define SECONDS (1000 * MS) 1097 #define MINUTES (60 * SECONDS) 1098 #define HOURS (60 * MINUTES) 1099 #define DAYS (24 * HOURS) 1100 1101 #define PARAM_MAX (~(uint32_t)0) 1102 1103 /* Max size IP datagram is 64k - 1 */ 1104 #define TCP_MSS_MAX_IPV4 (IP_MAXPACKET - (sizeof (ipha_t) + sizeof (tcph_t))) 1105 #define TCP_MSS_MAX_IPV6 (IP_MAXPACKET - (sizeof (ip6_t) + sizeof (tcph_t))) 1106 /* Max of the above */ 1107 #define TCP_MSS_MAX TCP_MSS_MAX_IPV4 1108 1109 /* Largest TCP port number */ 1110 #define TCP_MAX_PORT (64 * 1024 - 1) 1111 1112 /* 1113 * tcp_wroff_xtra is the extra space in front of TCP/IP header for link 1114 * layer header. It has to be a multiple of 4. 1115 */ 1116 static tcpparam_t lcl_tcp_wroff_xtra_param = { 0, 256, 32, "tcp_wroff_xtra" }; 1117 #define tcps_wroff_xtra tcps_wroff_xtra_param->tcp_param_val 1118 1119 /* 1120 * All of these are alterable, within the min/max values given, at run time. 1121 * Note that the default value of "tcp_time_wait_interval" is four minutes, 1122 * per the TCP spec. 1123 */ 1124 /* BEGIN CSTYLED */ 1125 static tcpparam_t lcl_tcp_param_arr[] = { 1126 /*min max value name */ 1127 { 1*SECONDS, 10*MINUTES, 1*MINUTES, "tcp_time_wait_interval"}, 1128 { 1, PARAM_MAX, 128, "tcp_conn_req_max_q" }, 1129 { 0, PARAM_MAX, 1024, "tcp_conn_req_max_q0" }, 1130 { 1, 1024, 1, "tcp_conn_req_min" }, 1131 { 0*MS, 20*SECONDS, 0*MS, "tcp_conn_grace_period" }, 1132 { 128, (1<<30), 1024*1024, "tcp_cwnd_max" }, 1133 { 0, 10, 0, "tcp_debug" }, 1134 { 1024, (32*1024), 1024, "tcp_smallest_nonpriv_port"}, 1135 { 1*SECONDS, PARAM_MAX, 3*MINUTES, "tcp_ip_abort_cinterval"}, 1136 { 1*SECONDS, PARAM_MAX, 3*MINUTES, "tcp_ip_abort_linterval"}, 1137 { 500*MS, PARAM_MAX, 8*MINUTES, "tcp_ip_abort_interval"}, 1138 { 1*SECONDS, PARAM_MAX, 10*SECONDS, "tcp_ip_notify_cinterval"}, 1139 { 500*MS, PARAM_MAX, 10*SECONDS, "tcp_ip_notify_interval"}, 1140 { 1, 255, 64, "tcp_ipv4_ttl"}, 1141 { 10*SECONDS, 10*DAYS, 2*HOURS, "tcp_keepalive_interval"}, 1142 { 0, 100, 10, "tcp_maxpsz_multiplier" }, 1143 { 1, TCP_MSS_MAX_IPV4, 536, "tcp_mss_def_ipv4"}, 1144 { 1, TCP_MSS_MAX_IPV4, TCP_MSS_MAX_IPV4, "tcp_mss_max_ipv4"}, 1145 { 1, TCP_MSS_MAX, 108, "tcp_mss_min"}, 1146 { 1, (64*1024)-1, (4*1024)-1, "tcp_naglim_def"}, 1147 { 1*MS, 20*SECONDS, 3*SECONDS, "tcp_rexmit_interval_initial"}, 1148 { 1*MS, 2*HOURS, 60*SECONDS, "tcp_rexmit_interval_max"}, 1149 { 1*MS, 2*HOURS, 400*MS, "tcp_rexmit_interval_min"}, 1150 { 1*MS, 1*MINUTES, 100*MS, "tcp_deferred_ack_interval" }, 1151 { 0, 16, 0, "tcp_snd_lowat_fraction" }, 1152 { 0, 128000, 0, "tcp_sth_rcv_hiwat" }, 1153 { 0, 128000, 0, "tcp_sth_rcv_lowat" }, 1154 { 1, 10000, 3, "tcp_dupack_fast_retransmit" }, 1155 { 0, 1, 0, "tcp_ignore_path_mtu" }, 1156 { 1024, TCP_MAX_PORT, 32*1024, "tcp_smallest_anon_port"}, 1157 { 1024, TCP_MAX_PORT, TCP_MAX_PORT, "tcp_largest_anon_port"}, 1158 { TCP_XMIT_LOWATER, (1<<30), TCP_XMIT_HIWATER,"tcp_xmit_hiwat"}, 1159 { TCP_XMIT_LOWATER, (1<<30), TCP_XMIT_LOWATER,"tcp_xmit_lowat"}, 1160 { TCP_RECV_LOWATER, (1<<30), TCP_RECV_HIWATER,"tcp_recv_hiwat"}, 1161 { 1, 65536, 4, "tcp_recv_hiwat_minmss"}, 1162 { 1*SECONDS, PARAM_MAX, 675*SECONDS, "tcp_fin_wait_2_flush_interval"}, 1163 { 0, TCP_MSS_MAX, 64, "tcp_co_min"}, 1164 { 8192, (1<<30), 1024*1024, "tcp_max_buf"}, 1165 /* 1166 * Question: What default value should I set for tcp_strong_iss? 1167 */ 1168 { 0, 2, 1, "tcp_strong_iss"}, 1169 { 0, 65536, 20, "tcp_rtt_updates"}, 1170 { 0, 1, 1, "tcp_wscale_always"}, 1171 { 0, 1, 0, "tcp_tstamp_always"}, 1172 { 0, 1, 1, "tcp_tstamp_if_wscale"}, 1173 { 0*MS, 2*HOURS, 0*MS, "tcp_rexmit_interval_extra"}, 1174 { 0, 16, 2, "tcp_deferred_acks_max"}, 1175 { 1, 16384, 4, "tcp_slow_start_after_idle"}, 1176 { 1, 4, 4, "tcp_slow_start_initial"}, 1177 { 10*MS, 50*MS, 20*MS, "tcp_co_timer_interval"}, 1178 { 0, 2, 2, "tcp_sack_permitted"}, 1179 { 0, 1, 0, "tcp_trace"}, 1180 { 0, 1, 1, "tcp_compression_enabled"}, 1181 { 0, IPV6_MAX_HOPS, IPV6_DEFAULT_HOPS, "tcp_ipv6_hoplimit"}, 1182 { 1, TCP_MSS_MAX_IPV6, 1220, "tcp_mss_def_ipv6"}, 1183 { 1, TCP_MSS_MAX_IPV6, TCP_MSS_MAX_IPV6, "tcp_mss_max_ipv6"}, 1184 { 0, 1, 0, "tcp_rev_src_routes"}, 1185 { 10*MS, 500*MS, 50*MS, "tcp_local_dack_interval"}, 1186 { 100*MS, 60*SECONDS, 1*SECONDS, "tcp_ndd_get_info_interval"}, 1187 { 0, 16, 8, "tcp_local_dacks_max"}, 1188 { 0, 2, 1, "tcp_ecn_permitted"}, 1189 { 0, 1, 1, "tcp_rst_sent_rate_enabled"}, 1190 { 0, PARAM_MAX, 40, "tcp_rst_sent_rate"}, 1191 { 0, 100*MS, 50*MS, "tcp_push_timer_interval"}, 1192 { 0, 1, 0, "tcp_use_smss_as_mss_opt"}, 1193 { 0, PARAM_MAX, 8*MINUTES, "tcp_keepalive_abort_interval"}, 1194 }; 1195 /* END CSTYLED */ 1196 1197 /* 1198 * tcp_mdt_hdr_{head,tail}_min are the leading and trailing spaces of 1199 * each header fragment in the header buffer. Each parameter value has 1200 * to be a multiple of 4 (32-bit aligned). 1201 */ 1202 static tcpparam_t lcl_tcp_mdt_head_param = 1203 { 32, 256, 32, "tcp_mdt_hdr_head_min" }; 1204 static tcpparam_t lcl_tcp_mdt_tail_param = 1205 { 0, 256, 32, "tcp_mdt_hdr_tail_min" }; 1206 #define tcps_mdt_hdr_head_min tcps_mdt_head_param->tcp_param_val 1207 #define tcps_mdt_hdr_tail_min tcps_mdt_tail_param->tcp_param_val 1208 1209 /* 1210 * tcp_mdt_max_pbufs is the upper limit value that tcp uses to figure out 1211 * the maximum number of payload buffers associated per Multidata. 1212 */ 1213 static tcpparam_t lcl_tcp_mdt_max_pbufs_param = 1214 { 1, MULTIDATA_MAX_PBUFS, MULTIDATA_MAX_PBUFS, "tcp_mdt_max_pbufs" }; 1215 #define tcps_mdt_max_pbufs tcps_mdt_max_pbufs_param->tcp_param_val 1216 1217 /* Round up the value to the nearest mss. */ 1218 #define MSS_ROUNDUP(value, mss) ((((value) - 1) / (mss) + 1) * (mss)) 1219 1220 /* 1221 * Set ECN capable transport (ECT) code point in IP header. 1222 * 1223 * Note that there are 2 ECT code points '01' and '10', which are called 1224 * ECT(1) and ECT(0) respectively. Here we follow the original ECT code 1225 * point ECT(0) for TCP as described in RFC 2481. 1226 */ 1227 #define SET_ECT(tcp, iph) \ 1228 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1229 /* We need to clear the code point first. */ \ 1230 ((ipha_t *)(iph))->ipha_type_of_service &= 0xFC; \ 1231 ((ipha_t *)(iph))->ipha_type_of_service |= IPH_ECN_ECT0; \ 1232 } else { \ 1233 ((ip6_t *)(iph))->ip6_vcf &= htonl(0xFFCFFFFF); \ 1234 ((ip6_t *)(iph))->ip6_vcf |= htonl(IPH_ECN_ECT0 << 20); \ 1235 } 1236 1237 /* 1238 * The format argument to pass to tcp_display(). 1239 * DISP_PORT_ONLY means that the returned string has only port info. 1240 * DISP_ADDR_AND_PORT means that the returned string also contains the 1241 * remote and local IP address. 1242 */ 1243 #define DISP_PORT_ONLY 1 1244 #define DISP_ADDR_AND_PORT 2 1245 1246 #define NDD_TOO_QUICK_MSG \ 1247 "ndd get info rate too high for non-privileged users, try again " \ 1248 "later.\n" 1249 #define NDD_OUT_OF_BUF_MSG "<< Out of buffer >>\n" 1250 1251 #define IS_VMLOANED_MBLK(mp) \ 1252 (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0) 1253 1254 1255 /* Enable or disable b_cont M_MULTIDATA chaining for MDT. */ 1256 boolean_t tcp_mdt_chain = B_TRUE; 1257 1258 /* 1259 * MDT threshold in the form of effective send MSS multiplier; we take 1260 * the MDT path if the amount of unsent data exceeds the threshold value 1261 * (default threshold is 1*SMSS). 1262 */ 1263 uint_t tcp_mdt_smss_threshold = 1; 1264 1265 uint32_t do_tcpzcopy = 1; /* 0: disable, 1: enable, 2: force */ 1266 1267 /* 1268 * Forces all connections to obey the value of the tcps_maxpsz_multiplier 1269 * tunable settable via NDD. Otherwise, the per-connection behavior is 1270 * determined dynamically during tcp_adapt_ire(), which is the default. 1271 */ 1272 boolean_t tcp_static_maxpsz = B_FALSE; 1273 1274 /* Setable in /etc/system */ 1275 /* If set to 0, pick ephemeral port sequentially; otherwise randomly. */ 1276 uint32_t tcp_random_anon_port = 1; 1277 1278 /* 1279 * To reach to an eager in Q0 which can be dropped due to an incoming 1280 * new SYN request when Q0 is full, a new doubly linked list is 1281 * introduced. This list allows to select an eager from Q0 in O(1) time. 1282 * This is needed to avoid spending too much time walking through the 1283 * long list of eagers in Q0 when tcp_drop_q0() is called. Each member of 1284 * this new list has to be a member of Q0. 1285 * This list is headed by listener's tcp_t. When the list is empty, 1286 * both the pointers - tcp_eager_next_drop_q0 and tcp_eager_prev_drop_q0, 1287 * of listener's tcp_t point to listener's tcp_t itself. 1288 * 1289 * Given an eager in Q0 and a listener, MAKE_DROPPABLE() puts the eager 1290 * in the list. MAKE_UNDROPPABLE() takes the eager out of the list. 1291 * These macros do not affect the eager's membership to Q0. 1292 */ 1293 1294 1295 #define MAKE_DROPPABLE(listener, eager) \ 1296 if ((eager)->tcp_eager_next_drop_q0 == NULL) { \ 1297 (listener)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0\ 1298 = (eager); \ 1299 (eager)->tcp_eager_prev_drop_q0 = (listener); \ 1300 (eager)->tcp_eager_next_drop_q0 = \ 1301 (listener)->tcp_eager_next_drop_q0; \ 1302 (listener)->tcp_eager_next_drop_q0 = (eager); \ 1303 } 1304 1305 #define MAKE_UNDROPPABLE(eager) \ 1306 if ((eager)->tcp_eager_next_drop_q0 != NULL) { \ 1307 (eager)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0 \ 1308 = (eager)->tcp_eager_prev_drop_q0; \ 1309 (eager)->tcp_eager_prev_drop_q0->tcp_eager_next_drop_q0 \ 1310 = (eager)->tcp_eager_next_drop_q0; \ 1311 (eager)->tcp_eager_prev_drop_q0 = NULL; \ 1312 (eager)->tcp_eager_next_drop_q0 = NULL; \ 1313 } 1314 1315 /* 1316 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more 1317 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent 1318 * data, TCP will not respond with an ACK. RFC 793 requires that 1319 * TCP responds with an ACK for such a bogus ACK. By not following 1320 * the RFC, we prevent TCP from getting into an ACK storm if somehow 1321 * an attacker successfully spoofs an acceptable segment to our 1322 * peer; or when our peer is "confused." 1323 */ 1324 uint32_t tcp_drop_ack_unsent_cnt = 10; 1325 1326 /* 1327 * Hook functions to enable cluster networking 1328 * On non-clustered systems these vectors must always be NULL. 1329 */ 1330 1331 void (*cl_inet_listen)(uint8_t protocol, sa_family_t addr_family, 1332 uint8_t *laddrp, in_port_t lport) = NULL; 1333 void (*cl_inet_unlisten)(uint8_t protocol, sa_family_t addr_family, 1334 uint8_t *laddrp, in_port_t lport) = NULL; 1335 void (*cl_inet_connect)(uint8_t protocol, sa_family_t addr_family, 1336 uint8_t *laddrp, in_port_t lport, 1337 uint8_t *faddrp, in_port_t fport) = NULL; 1338 void (*cl_inet_disconnect)(uint8_t protocol, sa_family_t addr_family, 1339 uint8_t *laddrp, in_port_t lport, 1340 uint8_t *faddrp, in_port_t fport) = NULL; 1341 1342 /* 1343 * The following are defined in ip.c 1344 */ 1345 extern int (*cl_inet_isclusterwide)(uint8_t protocol, sa_family_t addr_family, 1346 uint8_t *laddrp); 1347 extern uint32_t (*cl_inet_ipident)(uint8_t protocol, sa_family_t addr_family, 1348 uint8_t *laddrp, uint8_t *faddrp); 1349 1350 #define CL_INET_CONNECT(tcp) { \ 1351 if (cl_inet_connect != NULL) { \ 1352 /* \ 1353 * Running in cluster mode - register active connection \ 1354 * information \ 1355 */ \ 1356 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1357 if ((tcp)->tcp_ipha->ipha_src != 0) { \ 1358 (*cl_inet_connect)(IPPROTO_TCP, AF_INET,\ 1359 (uint8_t *)(&((tcp)->tcp_ipha->ipha_src)),\ 1360 (in_port_t)(tcp)->tcp_lport, \ 1361 (uint8_t *)(&((tcp)->tcp_ipha->ipha_dst)),\ 1362 (in_port_t)(tcp)->tcp_fport); \ 1363 } \ 1364 } else { \ 1365 if (!IN6_IS_ADDR_UNSPECIFIED( \ 1366 &(tcp)->tcp_ip6h->ip6_src)) {\ 1367 (*cl_inet_connect)(IPPROTO_TCP, AF_INET6,\ 1368 (uint8_t *)(&((tcp)->tcp_ip6h->ip6_src)),\ 1369 (in_port_t)(tcp)->tcp_lport, \ 1370 (uint8_t *)(&((tcp)->tcp_ip6h->ip6_dst)),\ 1371 (in_port_t)(tcp)->tcp_fport); \ 1372 } \ 1373 } \ 1374 } \ 1375 } 1376 1377 #define CL_INET_DISCONNECT(tcp) { \ 1378 if (cl_inet_disconnect != NULL) { \ 1379 /* \ 1380 * Running in cluster mode - deregister active \ 1381 * connection information \ 1382 */ \ 1383 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1384 if ((tcp)->tcp_ip_src != 0) { \ 1385 (*cl_inet_disconnect)(IPPROTO_TCP, \ 1386 AF_INET, \ 1387 (uint8_t *)(&((tcp)->tcp_ip_src)),\ 1388 (in_port_t)(tcp)->tcp_lport, \ 1389 (uint8_t *) \ 1390 (&((tcp)->tcp_ipha->ipha_dst)),\ 1391 (in_port_t)(tcp)->tcp_fport); \ 1392 } \ 1393 } else { \ 1394 if (!IN6_IS_ADDR_UNSPECIFIED( \ 1395 &(tcp)->tcp_ip_src_v6)) { \ 1396 (*cl_inet_disconnect)(IPPROTO_TCP, AF_INET6,\ 1397 (uint8_t *)(&((tcp)->tcp_ip_src_v6)),\ 1398 (in_port_t)(tcp)->tcp_lport, \ 1399 (uint8_t *) \ 1400 (&((tcp)->tcp_ip6h->ip6_dst)),\ 1401 (in_port_t)(tcp)->tcp_fport); \ 1402 } \ 1403 } \ 1404 } \ 1405 } 1406 1407 /* 1408 * Cluster networking hook for traversing current connection list. 1409 * This routine is used to extract the current list of live connections 1410 * which must continue to to be dispatched to this node. 1411 */ 1412 int cl_tcp_walk_list(int (*callback)(cl_tcp_info_t *, void *), void *arg); 1413 1414 static int cl_tcp_walk_list_stack(int (*callback)(cl_tcp_info_t *, void *), 1415 void *arg, tcp_stack_t *tcps); 1416 1417 /* 1418 * Figure out the value of window scale opton. Note that the rwnd is 1419 * ASSUMED to be rounded up to the nearest MSS before the calculation. 1420 * We cannot find the scale value and then do a round up of tcp_rwnd 1421 * because the scale value may not be correct after that. 1422 * 1423 * Set the compiler flag to make this function inline. 1424 */ 1425 static void 1426 tcp_set_ws_value(tcp_t *tcp) 1427 { 1428 int i; 1429 uint32_t rwnd = tcp->tcp_rwnd; 1430 1431 for (i = 0; rwnd > TCP_MAXWIN && i < TCP_MAX_WINSHIFT; 1432 i++, rwnd >>= 1) 1433 ; 1434 tcp->tcp_rcv_ws = i; 1435 } 1436 1437 /* 1438 * Remove a connection from the list of detached TIME_WAIT connections. 1439 * It returns B_FALSE if it can't remove the connection from the list 1440 * as the connection has already been removed from the list due to an 1441 * earlier call to tcp_time_wait_remove(); otherwise it returns B_TRUE. 1442 */ 1443 static boolean_t 1444 tcp_time_wait_remove(tcp_t *tcp, tcp_squeue_priv_t *tcp_time_wait) 1445 { 1446 boolean_t locked = B_FALSE; 1447 1448 if (tcp_time_wait == NULL) { 1449 tcp_time_wait = *((tcp_squeue_priv_t **) 1450 squeue_getprivate(tcp->tcp_connp->conn_sqp, SQPRIVATE_TCP)); 1451 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1452 locked = B_TRUE; 1453 } else { 1454 ASSERT(MUTEX_HELD(&tcp_time_wait->tcp_time_wait_lock)); 1455 } 1456 1457 if (tcp->tcp_time_wait_expire == 0) { 1458 ASSERT(tcp->tcp_time_wait_next == NULL); 1459 ASSERT(tcp->tcp_time_wait_prev == NULL); 1460 if (locked) 1461 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1462 return (B_FALSE); 1463 } 1464 ASSERT(TCP_IS_DETACHED(tcp)); 1465 ASSERT(tcp->tcp_state == TCPS_TIME_WAIT); 1466 1467 if (tcp == tcp_time_wait->tcp_time_wait_head) { 1468 ASSERT(tcp->tcp_time_wait_prev == NULL); 1469 tcp_time_wait->tcp_time_wait_head = tcp->tcp_time_wait_next; 1470 if (tcp_time_wait->tcp_time_wait_head != NULL) { 1471 tcp_time_wait->tcp_time_wait_head->tcp_time_wait_prev = 1472 NULL; 1473 } else { 1474 tcp_time_wait->tcp_time_wait_tail = NULL; 1475 } 1476 } else if (tcp == tcp_time_wait->tcp_time_wait_tail) { 1477 ASSERT(tcp != tcp_time_wait->tcp_time_wait_head); 1478 ASSERT(tcp->tcp_time_wait_next == NULL); 1479 tcp_time_wait->tcp_time_wait_tail = tcp->tcp_time_wait_prev; 1480 ASSERT(tcp_time_wait->tcp_time_wait_tail != NULL); 1481 tcp_time_wait->tcp_time_wait_tail->tcp_time_wait_next = NULL; 1482 } else { 1483 ASSERT(tcp->tcp_time_wait_prev->tcp_time_wait_next == tcp); 1484 ASSERT(tcp->tcp_time_wait_next->tcp_time_wait_prev == tcp); 1485 tcp->tcp_time_wait_prev->tcp_time_wait_next = 1486 tcp->tcp_time_wait_next; 1487 tcp->tcp_time_wait_next->tcp_time_wait_prev = 1488 tcp->tcp_time_wait_prev; 1489 } 1490 tcp->tcp_time_wait_next = NULL; 1491 tcp->tcp_time_wait_prev = NULL; 1492 tcp->tcp_time_wait_expire = 0; 1493 1494 if (locked) 1495 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1496 return (B_TRUE); 1497 } 1498 1499 /* 1500 * Add a connection to the list of detached TIME_WAIT connections 1501 * and set its time to expire. 1502 */ 1503 static void 1504 tcp_time_wait_append(tcp_t *tcp) 1505 { 1506 tcp_stack_t *tcps = tcp->tcp_tcps; 1507 tcp_squeue_priv_t *tcp_time_wait = 1508 *((tcp_squeue_priv_t **)squeue_getprivate(tcp->tcp_connp->conn_sqp, 1509 SQPRIVATE_TCP)); 1510 1511 tcp_timers_stop(tcp); 1512 1513 /* Freed above */ 1514 ASSERT(tcp->tcp_timer_tid == 0); 1515 ASSERT(tcp->tcp_ack_tid == 0); 1516 1517 /* must have happened at the time of detaching the tcp */ 1518 ASSERT(tcp->tcp_ptpahn == NULL); 1519 ASSERT(tcp->tcp_flow_stopped == 0); 1520 ASSERT(tcp->tcp_time_wait_next == NULL); 1521 ASSERT(tcp->tcp_time_wait_prev == NULL); 1522 ASSERT(tcp->tcp_time_wait_expire == NULL); 1523 ASSERT(tcp->tcp_listener == NULL); 1524 1525 tcp->tcp_time_wait_expire = ddi_get_lbolt(); 1526 /* 1527 * The value computed below in tcp->tcp_time_wait_expire may 1528 * appear negative or wrap around. That is ok since our 1529 * interest is only in the difference between the current lbolt 1530 * value and tcp->tcp_time_wait_expire. But the value should not 1531 * be zero, since it means the tcp is not in the TIME_WAIT list. 1532 * The corresponding comparison in tcp_time_wait_collector() uses 1533 * modular arithmetic. 1534 */ 1535 tcp->tcp_time_wait_expire += 1536 drv_usectohz(tcps->tcps_time_wait_interval * 1000); 1537 if (tcp->tcp_time_wait_expire == 0) 1538 tcp->tcp_time_wait_expire = 1; 1539 1540 ASSERT(TCP_IS_DETACHED(tcp)); 1541 ASSERT(tcp->tcp_state == TCPS_TIME_WAIT); 1542 ASSERT(tcp->tcp_time_wait_next == NULL); 1543 ASSERT(tcp->tcp_time_wait_prev == NULL); 1544 TCP_DBGSTAT(tcps, tcp_time_wait); 1545 1546 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1547 if (tcp_time_wait->tcp_time_wait_head == NULL) { 1548 ASSERT(tcp_time_wait->tcp_time_wait_tail == NULL); 1549 tcp_time_wait->tcp_time_wait_head = tcp; 1550 } else { 1551 ASSERT(tcp_time_wait->tcp_time_wait_tail != NULL); 1552 ASSERT(tcp_time_wait->tcp_time_wait_tail->tcp_state == 1553 TCPS_TIME_WAIT); 1554 tcp_time_wait->tcp_time_wait_tail->tcp_time_wait_next = tcp; 1555 tcp->tcp_time_wait_prev = tcp_time_wait->tcp_time_wait_tail; 1556 } 1557 tcp_time_wait->tcp_time_wait_tail = tcp; 1558 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1559 } 1560 1561 /* ARGSUSED */ 1562 void 1563 tcp_timewait_output(void *arg, mblk_t *mp, void *arg2) 1564 { 1565 conn_t *connp = (conn_t *)arg; 1566 tcp_t *tcp = connp->conn_tcp; 1567 tcp_stack_t *tcps = tcp->tcp_tcps; 1568 1569 ASSERT(tcp != NULL); 1570 if (tcp->tcp_state == TCPS_CLOSED) { 1571 return; 1572 } 1573 1574 ASSERT((tcp->tcp_family == AF_INET && 1575 tcp->tcp_ipversion == IPV4_VERSION) || 1576 (tcp->tcp_family == AF_INET6 && 1577 (tcp->tcp_ipversion == IPV4_VERSION || 1578 tcp->tcp_ipversion == IPV6_VERSION))); 1579 ASSERT(!tcp->tcp_listener); 1580 1581 TCP_STAT(tcps, tcp_time_wait_reap); 1582 ASSERT(TCP_IS_DETACHED(tcp)); 1583 1584 /* 1585 * Because they have no upstream client to rebind or tcp_close() 1586 * them later, we axe the connection here and now. 1587 */ 1588 tcp_close_detached(tcp); 1589 } 1590 1591 /* 1592 * Remove cached/latched IPsec references. 1593 */ 1594 void 1595 tcp_ipsec_cleanup(tcp_t *tcp) 1596 { 1597 conn_t *connp = tcp->tcp_connp; 1598 1599 ASSERT(connp->conn_flags & IPCL_TCPCONN); 1600 1601 if (connp->conn_latch != NULL) { 1602 IPLATCH_REFRELE(connp->conn_latch, 1603 connp->conn_netstack); 1604 connp->conn_latch = NULL; 1605 } 1606 if (connp->conn_policy != NULL) { 1607 IPPH_REFRELE(connp->conn_policy, connp->conn_netstack); 1608 connp->conn_policy = NULL; 1609 } 1610 } 1611 1612 /* 1613 * Cleaup before placing on free list. 1614 * Disassociate from the netstack/tcp_stack_t since the freelist 1615 * is per squeue and not per netstack. 1616 */ 1617 void 1618 tcp_cleanup(tcp_t *tcp) 1619 { 1620 mblk_t *mp; 1621 char *tcp_iphc; 1622 int tcp_iphc_len; 1623 int tcp_hdr_grown; 1624 tcp_sack_info_t *tcp_sack_info; 1625 conn_t *connp = tcp->tcp_connp; 1626 tcp_stack_t *tcps = tcp->tcp_tcps; 1627 netstack_t *ns = tcps->tcps_netstack; 1628 1629 tcp_bind_hash_remove(tcp); 1630 1631 /* Cleanup that which needs the netstack first */ 1632 tcp_ipsec_cleanup(tcp); 1633 1634 tcp_free(tcp); 1635 1636 /* Release any SSL context */ 1637 if (tcp->tcp_kssl_ent != NULL) { 1638 kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); 1639 tcp->tcp_kssl_ent = NULL; 1640 } 1641 1642 if (tcp->tcp_kssl_ctx != NULL) { 1643 kssl_release_ctx(tcp->tcp_kssl_ctx); 1644 tcp->tcp_kssl_ctx = NULL; 1645 } 1646 tcp->tcp_kssl_pending = B_FALSE; 1647 1648 conn_delete_ire(connp, NULL); 1649 1650 /* 1651 * Since we will bzero the entire structure, we need to 1652 * remove it and reinsert it in global hash list. We 1653 * know the walkers can't get to this conn because we 1654 * had set CONDEMNED flag earlier and checked reference 1655 * under conn_lock so walker won't pick it and when we 1656 * go the ipcl_globalhash_remove() below, no walker 1657 * can get to it. 1658 */ 1659 ipcl_globalhash_remove(connp); 1660 1661 /* 1662 * Now it is safe to decrement the reference counts. 1663 * This might be the last reference on the netstack and TCPS 1664 * in which case it will cause the tcp_g_q_close and 1665 * the freeing of the IP Instance. 1666 */ 1667 connp->conn_netstack = NULL; 1668 netstack_rele(ns); 1669 ASSERT(tcps != NULL); 1670 tcp->tcp_tcps = NULL; 1671 TCPS_REFRELE(tcps); 1672 1673 /* Save some state */ 1674 mp = tcp->tcp_timercache; 1675 1676 tcp_sack_info = tcp->tcp_sack_info; 1677 tcp_iphc = tcp->tcp_iphc; 1678 tcp_iphc_len = tcp->tcp_iphc_len; 1679 tcp_hdr_grown = tcp->tcp_hdr_grown; 1680 1681 if (connp->conn_cred != NULL) { 1682 crfree(connp->conn_cred); 1683 connp->conn_cred = NULL; 1684 } 1685 if (connp->conn_peercred != NULL) { 1686 crfree(connp->conn_peercred); 1687 connp->conn_peercred = NULL; 1688 } 1689 ipcl_conn_cleanup(connp); 1690 connp->conn_flags = IPCL_TCPCONN; 1691 bzero(tcp, sizeof (tcp_t)); 1692 1693 /* restore the state */ 1694 tcp->tcp_timercache = mp; 1695 1696 tcp->tcp_sack_info = tcp_sack_info; 1697 tcp->tcp_iphc = tcp_iphc; 1698 tcp->tcp_iphc_len = tcp_iphc_len; 1699 tcp->tcp_hdr_grown = tcp_hdr_grown; 1700 1701 tcp->tcp_connp = connp; 1702 1703 ASSERT(connp->conn_tcp == tcp); 1704 ASSERT(connp->conn_flags & IPCL_TCPCONN); 1705 connp->conn_state_flags = CONN_INCIPIENT; 1706 ASSERT(connp->conn_ulp == IPPROTO_TCP); 1707 ASSERT(connp->conn_ref == 1); 1708 } 1709 1710 /* 1711 * Blows away all tcps whose TIME_WAIT has expired. List traversal 1712 * is done forwards from the head. 1713 * This walks all stack instances since 1714 * tcp_time_wait remains global across all stacks. 1715 */ 1716 /* ARGSUSED */ 1717 void 1718 tcp_time_wait_collector(void *arg) 1719 { 1720 tcp_t *tcp; 1721 clock_t now; 1722 mblk_t *mp; 1723 conn_t *connp; 1724 kmutex_t *lock; 1725 boolean_t removed; 1726 1727 squeue_t *sqp = (squeue_t *)arg; 1728 tcp_squeue_priv_t *tcp_time_wait = 1729 *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP)); 1730 1731 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1732 tcp_time_wait->tcp_time_wait_tid = 0; 1733 1734 if (tcp_time_wait->tcp_free_list != NULL && 1735 tcp_time_wait->tcp_free_list->tcp_in_free_list == B_TRUE) { 1736 TCP_G_STAT(tcp_freelist_cleanup); 1737 while ((tcp = tcp_time_wait->tcp_free_list) != NULL) { 1738 tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next; 1739 tcp->tcp_time_wait_next = NULL; 1740 tcp_time_wait->tcp_free_list_cnt--; 1741 ASSERT(tcp->tcp_tcps == NULL); 1742 CONN_DEC_REF(tcp->tcp_connp); 1743 } 1744 ASSERT(tcp_time_wait->tcp_free_list_cnt == 0); 1745 } 1746 1747 /* 1748 * In order to reap time waits reliably, we should use a 1749 * source of time that is not adjustable by the user -- hence 1750 * the call to ddi_get_lbolt(). 1751 */ 1752 now = ddi_get_lbolt(); 1753 while ((tcp = tcp_time_wait->tcp_time_wait_head) != NULL) { 1754 /* 1755 * Compare times using modular arithmetic, since 1756 * lbolt can wrapover. 1757 */ 1758 if ((now - tcp->tcp_time_wait_expire) < 0) { 1759 break; 1760 } 1761 1762 removed = tcp_time_wait_remove(tcp, tcp_time_wait); 1763 ASSERT(removed); 1764 1765 connp = tcp->tcp_connp; 1766 ASSERT(connp->conn_fanout != NULL); 1767 lock = &connp->conn_fanout->connf_lock; 1768 /* 1769 * This is essentially a TW reclaim fast path optimization for 1770 * performance where the timewait collector checks under the 1771 * fanout lock (so that no one else can get access to the 1772 * conn_t) that the refcnt is 2 i.e. one for TCP and one for 1773 * the classifier hash list. If ref count is indeed 2, we can 1774 * just remove the conn under the fanout lock and avoid 1775 * cleaning up the conn under the squeue, provided that 1776 * clustering callbacks are not enabled. If clustering is 1777 * enabled, we need to make the clustering callback before 1778 * setting the CONDEMNED flag and after dropping all locks and 1779 * so we forego this optimization and fall back to the slow 1780 * path. Also please see the comments in tcp_closei_local 1781 * regarding the refcnt logic. 1782 * 1783 * Since we are holding the tcp_time_wait_lock, its better 1784 * not to block on the fanout_lock because other connections 1785 * can't add themselves to time_wait list. So we do a 1786 * tryenter instead of mutex_enter. 1787 */ 1788 if (mutex_tryenter(lock)) { 1789 mutex_enter(&connp->conn_lock); 1790 if ((connp->conn_ref == 2) && 1791 (cl_inet_disconnect == NULL)) { 1792 ipcl_hash_remove_locked(connp, 1793 connp->conn_fanout); 1794 /* 1795 * Set the CONDEMNED flag now itself so that 1796 * the refcnt cannot increase due to any 1797 * walker. But we have still not cleaned up 1798 * conn_ire_cache. This is still ok since 1799 * we are going to clean it up in tcp_cleanup 1800 * immediately and any interface unplumb 1801 * thread will wait till the ire is blown away 1802 */ 1803 connp->conn_state_flags |= CONN_CONDEMNED; 1804 mutex_exit(lock); 1805 mutex_exit(&connp->conn_lock); 1806 if (tcp_time_wait->tcp_free_list_cnt < 1807 tcp_free_list_max_cnt) { 1808 /* Add to head of tcp_free_list */ 1809 mutex_exit( 1810 &tcp_time_wait->tcp_time_wait_lock); 1811 tcp_cleanup(tcp); 1812 ASSERT(connp->conn_latch == NULL); 1813 ASSERT(connp->conn_policy == NULL); 1814 ASSERT(tcp->tcp_tcps == NULL); 1815 ASSERT(connp->conn_netstack == NULL); 1816 1817 mutex_enter( 1818 &tcp_time_wait->tcp_time_wait_lock); 1819 tcp->tcp_time_wait_next = 1820 tcp_time_wait->tcp_free_list; 1821 tcp_time_wait->tcp_free_list = tcp; 1822 tcp_time_wait->tcp_free_list_cnt++; 1823 continue; 1824 } else { 1825 /* Do not add to tcp_free_list */ 1826 mutex_exit( 1827 &tcp_time_wait->tcp_time_wait_lock); 1828 tcp_bind_hash_remove(tcp); 1829 conn_delete_ire(tcp->tcp_connp, NULL); 1830 tcp_ipsec_cleanup(tcp); 1831 CONN_DEC_REF(tcp->tcp_connp); 1832 } 1833 } else { 1834 CONN_INC_REF_LOCKED(connp); 1835 mutex_exit(lock); 1836 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1837 mutex_exit(&connp->conn_lock); 1838 /* 1839 * We can reuse the closemp here since conn has 1840 * detached (otherwise we wouldn't even be in 1841 * time_wait list). tcp_closemp_used can safely 1842 * be changed without taking a lock as no other 1843 * thread can concurrently access it at this 1844 * point in the connection lifecycle. 1845 */ 1846 1847 if (tcp->tcp_closemp.b_prev == NULL) 1848 tcp->tcp_closemp_used = B_TRUE; 1849 else 1850 cmn_err(CE_PANIC, 1851 "tcp_timewait_collector: " 1852 "concurrent use of tcp_closemp: " 1853 "connp %p tcp %p\n", (void *)connp, 1854 (void *)tcp); 1855 1856 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15); 1857 mp = &tcp->tcp_closemp; 1858 squeue_fill(connp->conn_sqp, mp, 1859 tcp_timewait_output, connp, 1860 SQTAG_TCP_TIMEWAIT); 1861 } 1862 } else { 1863 mutex_enter(&connp->conn_lock); 1864 CONN_INC_REF_LOCKED(connp); 1865 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1866 mutex_exit(&connp->conn_lock); 1867 /* 1868 * We can reuse the closemp here since conn has 1869 * detached (otherwise we wouldn't even be in 1870 * time_wait list). tcp_closemp_used can safely 1871 * be changed without taking a lock as no other 1872 * thread can concurrently access it at this 1873 * point in the connection lifecycle. 1874 */ 1875 1876 if (tcp->tcp_closemp.b_prev == NULL) 1877 tcp->tcp_closemp_used = B_TRUE; 1878 else 1879 cmn_err(CE_PANIC, "tcp_timewait_collector: " 1880 "concurrent use of tcp_closemp: " 1881 "connp %p tcp %p\n", (void *)connp, 1882 (void *)tcp); 1883 1884 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15); 1885 mp = &tcp->tcp_closemp; 1886 squeue_fill(connp->conn_sqp, mp, 1887 tcp_timewait_output, connp, 0); 1888 } 1889 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1890 } 1891 1892 if (tcp_time_wait->tcp_free_list != NULL) 1893 tcp_time_wait->tcp_free_list->tcp_in_free_list = B_TRUE; 1894 1895 tcp_time_wait->tcp_time_wait_tid = 1896 timeout(tcp_time_wait_collector, sqp, TCP_TIME_WAIT_DELAY); 1897 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1898 } 1899 /* 1900 * Reply to a clients T_CONN_RES TPI message. This function 1901 * is used only for TLI/XTI listener. Sockfs sends T_CONN_RES 1902 * on the acceptor STREAM and processed in tcp_wput_accept(). 1903 * Read the block comment on top of tcp_conn_request(). 1904 */ 1905 static void 1906 tcp_accept(tcp_t *listener, mblk_t *mp) 1907 { 1908 tcp_t *acceptor; 1909 tcp_t *eager; 1910 tcp_t *tcp; 1911 struct T_conn_res *tcr; 1912 t_uscalar_t acceptor_id; 1913 t_scalar_t seqnum; 1914 mblk_t *opt_mp = NULL; /* T_OPTMGMT_REQ messages */ 1915 mblk_t *ok_mp; 1916 mblk_t *mp1; 1917 tcp_stack_t *tcps = listener->tcp_tcps; 1918 1919 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tcr)) { 1920 tcp_err_ack(listener, mp, TPROTO, 0); 1921 return; 1922 } 1923 tcr = (struct T_conn_res *)mp->b_rptr; 1924 1925 /* 1926 * Under ILP32 the stream head points tcr->ACCEPTOR_id at the 1927 * read side queue of the streams device underneath us i.e. the 1928 * read side queue of 'ip'. Since we can't deference QUEUE_ptr we 1929 * look it up in the queue_hash. Under LP64 it sends down the 1930 * minor_t of the accepting endpoint. 1931 * 1932 * Once the acceptor/eager are modified (in tcp_accept_swap) the 1933 * fanout hash lock is held. 1934 * This prevents any thread from entering the acceptor queue from 1935 * below (since it has not been hard bound yet i.e. any inbound 1936 * packets will arrive on the listener or default tcp queue and 1937 * go through tcp_lookup). 1938 * The CONN_INC_REF will prevent the acceptor from closing. 1939 * 1940 * XXX It is still possible for a tli application to send down data 1941 * on the accepting stream while another thread calls t_accept. 1942 * This should not be a problem for well-behaved applications since 1943 * the T_OK_ACK is sent after the queue swapping is completed. 1944 * 1945 * If the accepting fd is the same as the listening fd, avoid 1946 * queue hash lookup since that will return an eager listener in a 1947 * already established state. 1948 */ 1949 acceptor_id = tcr->ACCEPTOR_id; 1950 mutex_enter(&listener->tcp_eager_lock); 1951 if (listener->tcp_acceptor_id == acceptor_id) { 1952 eager = listener->tcp_eager_next_q; 1953 /* only count how many T_CONN_INDs so don't count q0 */ 1954 if ((listener->tcp_conn_req_cnt_q != 1) || 1955 (eager->tcp_conn_req_seqnum != tcr->SEQ_number)) { 1956 mutex_exit(&listener->tcp_eager_lock); 1957 tcp_err_ack(listener, mp, TBADF, 0); 1958 return; 1959 } 1960 if (listener->tcp_conn_req_cnt_q0 != 0) { 1961 /* Throw away all the eagers on q0. */ 1962 tcp_eager_cleanup(listener, 1); 1963 } 1964 if (listener->tcp_syn_defense) { 1965 listener->tcp_syn_defense = B_FALSE; 1966 if (listener->tcp_ip_addr_cache != NULL) { 1967 kmem_free(listener->tcp_ip_addr_cache, 1968 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 1969 listener->tcp_ip_addr_cache = NULL; 1970 } 1971 } 1972 /* 1973 * Transfer tcp_conn_req_max to the eager so that when 1974 * a disconnect occurs we can revert the endpoint to the 1975 * listen state. 1976 */ 1977 eager->tcp_conn_req_max = listener->tcp_conn_req_max; 1978 ASSERT(listener->tcp_conn_req_cnt_q0 == 0); 1979 /* 1980 * Get a reference on the acceptor just like the 1981 * tcp_acceptor_hash_lookup below. 1982 */ 1983 acceptor = listener; 1984 CONN_INC_REF(acceptor->tcp_connp); 1985 } else { 1986 acceptor = tcp_acceptor_hash_lookup(acceptor_id, tcps); 1987 if (acceptor == NULL) { 1988 if (listener->tcp_debug) { 1989 (void) strlog(TCP_MOD_ID, 0, 1, 1990 SL_ERROR|SL_TRACE, 1991 "tcp_accept: did not find acceptor 0x%x\n", 1992 acceptor_id); 1993 } 1994 mutex_exit(&listener->tcp_eager_lock); 1995 tcp_err_ack(listener, mp, TPROVMISMATCH, 0); 1996 return; 1997 } 1998 /* 1999 * Verify acceptor state. The acceptable states for an acceptor 2000 * include TCPS_IDLE and TCPS_BOUND. 2001 */ 2002 switch (acceptor->tcp_state) { 2003 case TCPS_IDLE: 2004 /* FALLTHRU */ 2005 case TCPS_BOUND: 2006 break; 2007 default: 2008 CONN_DEC_REF(acceptor->tcp_connp); 2009 mutex_exit(&listener->tcp_eager_lock); 2010 tcp_err_ack(listener, mp, TOUTSTATE, 0); 2011 return; 2012 } 2013 } 2014 2015 /* The listener must be in TCPS_LISTEN */ 2016 if (listener->tcp_state != TCPS_LISTEN) { 2017 CONN_DEC_REF(acceptor->tcp_connp); 2018 mutex_exit(&listener->tcp_eager_lock); 2019 tcp_err_ack(listener, mp, TOUTSTATE, 0); 2020 return; 2021 } 2022 2023 /* 2024 * Rendezvous with an eager connection request packet hanging off 2025 * 'tcp' that has the 'seqnum' tag. We tagged the detached open 2026 * tcp structure when the connection packet arrived in 2027 * tcp_conn_request(). 2028 */ 2029 seqnum = tcr->SEQ_number; 2030 eager = listener; 2031 do { 2032 eager = eager->tcp_eager_next_q; 2033 if (eager == NULL) { 2034 CONN_DEC_REF(acceptor->tcp_connp); 2035 mutex_exit(&listener->tcp_eager_lock); 2036 tcp_err_ack(listener, mp, TBADSEQ, 0); 2037 return; 2038 } 2039 } while (eager->tcp_conn_req_seqnum != seqnum); 2040 mutex_exit(&listener->tcp_eager_lock); 2041 2042 /* 2043 * At this point, both acceptor and listener have 2 ref 2044 * that they begin with. Acceptor has one additional ref 2045 * we placed in lookup while listener has 3 additional 2046 * ref for being behind the squeue (tcp_accept() is 2047 * done on listener's squeue); being in classifier hash; 2048 * and eager's ref on listener. 2049 */ 2050 ASSERT(listener->tcp_connp->conn_ref >= 5); 2051 ASSERT(acceptor->tcp_connp->conn_ref >= 3); 2052 2053 /* 2054 * The eager at this point is set in its own squeue and 2055 * could easily have been killed (tcp_accept_finish will 2056 * deal with that) because of a TH_RST so we can only 2057 * ASSERT for a single ref. 2058 */ 2059 ASSERT(eager->tcp_connp->conn_ref >= 1); 2060 2061 /* Pre allocate the stroptions mblk also */ 2062 opt_mp = allocb(sizeof (struct stroptions), BPRI_HI); 2063 if (opt_mp == NULL) { 2064 CONN_DEC_REF(acceptor->tcp_connp); 2065 CONN_DEC_REF(eager->tcp_connp); 2066 tcp_err_ack(listener, mp, TSYSERR, ENOMEM); 2067 return; 2068 } 2069 DB_TYPE(opt_mp) = M_SETOPTS; 2070 opt_mp->b_wptr += sizeof (struct stroptions); 2071 2072 /* 2073 * Prepare for inheriting IPV6_BOUND_IF and IPV6_RECVPKTINFO 2074 * from listener to acceptor. The message is chained on opt_mp 2075 * which will be sent onto eager's squeue. 2076 */ 2077 if (listener->tcp_bound_if != 0) { 2078 /* allocate optmgmt req */ 2079 mp1 = tcp_setsockopt_mp(IPPROTO_IPV6, 2080 IPV6_BOUND_IF, (char *)&listener->tcp_bound_if, 2081 sizeof (int)); 2082 if (mp1 != NULL) 2083 linkb(opt_mp, mp1); 2084 } 2085 if (listener->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) { 2086 uint_t on = 1; 2087 2088 /* allocate optmgmt req */ 2089 mp1 = tcp_setsockopt_mp(IPPROTO_IPV6, 2090 IPV6_RECVPKTINFO, (char *)&on, sizeof (on)); 2091 if (mp1 != NULL) 2092 linkb(opt_mp, mp1); 2093 } 2094 2095 /* Re-use mp1 to hold a copy of mp, in case reallocb fails */ 2096 if ((mp1 = copymsg(mp)) == NULL) { 2097 CONN_DEC_REF(acceptor->tcp_connp); 2098 CONN_DEC_REF(eager->tcp_connp); 2099 freemsg(opt_mp); 2100 tcp_err_ack(listener, mp, TSYSERR, ENOMEM); 2101 return; 2102 } 2103 2104 tcr = (struct T_conn_res *)mp1->b_rptr; 2105 2106 /* 2107 * This is an expanded version of mi_tpi_ok_ack_alloc() 2108 * which allocates a larger mblk and appends the new 2109 * local address to the ok_ack. The address is copied by 2110 * soaccept() for getsockname(). 2111 */ 2112 { 2113 int extra; 2114 2115 extra = (eager->tcp_family == AF_INET) ? 2116 sizeof (sin_t) : sizeof (sin6_t); 2117 2118 /* 2119 * Try to re-use mp, if possible. Otherwise, allocate 2120 * an mblk and return it as ok_mp. In any case, mp 2121 * is no longer usable upon return. 2122 */ 2123 if ((ok_mp = mi_tpi_ok_ack_alloc_extra(mp, extra)) == NULL) { 2124 CONN_DEC_REF(acceptor->tcp_connp); 2125 CONN_DEC_REF(eager->tcp_connp); 2126 freemsg(opt_mp); 2127 /* Original mp has been freed by now, so use mp1 */ 2128 tcp_err_ack(listener, mp1, TSYSERR, ENOMEM); 2129 return; 2130 } 2131 2132 mp = NULL; /* We should never use mp after this point */ 2133 2134 switch (extra) { 2135 case sizeof (sin_t): { 2136 sin_t *sin = (sin_t *)ok_mp->b_wptr; 2137 2138 ok_mp->b_wptr += extra; 2139 sin->sin_family = AF_INET; 2140 sin->sin_port = eager->tcp_lport; 2141 sin->sin_addr.s_addr = 2142 eager->tcp_ipha->ipha_src; 2143 break; 2144 } 2145 case sizeof (sin6_t): { 2146 sin6_t *sin6 = (sin6_t *)ok_mp->b_wptr; 2147 2148 ok_mp->b_wptr += extra; 2149 sin6->sin6_family = AF_INET6; 2150 sin6->sin6_port = eager->tcp_lport; 2151 if (eager->tcp_ipversion == IPV4_VERSION) { 2152 sin6->sin6_flowinfo = 0; 2153 IN6_IPADDR_TO_V4MAPPED( 2154 eager->tcp_ipha->ipha_src, 2155 &sin6->sin6_addr); 2156 } else { 2157 ASSERT(eager->tcp_ip6h != NULL); 2158 sin6->sin6_flowinfo = 2159 eager->tcp_ip6h->ip6_vcf & 2160 ~IPV6_VERS_AND_FLOW_MASK; 2161 sin6->sin6_addr = 2162 eager->tcp_ip6h->ip6_src; 2163 } 2164 sin6->sin6_scope_id = 0; 2165 sin6->__sin6_src_id = 0; 2166 break; 2167 } 2168 default: 2169 break; 2170 } 2171 ASSERT(ok_mp->b_wptr <= ok_mp->b_datap->db_lim); 2172 } 2173 2174 /* 2175 * If there are no options we know that the T_CONN_RES will 2176 * succeed. However, we can't send the T_OK_ACK upstream until 2177 * the tcp_accept_swap is done since it would be dangerous to 2178 * let the application start using the new fd prior to the swap. 2179 */ 2180 tcp_accept_swap(listener, acceptor, eager); 2181 2182 /* 2183 * tcp_accept_swap unlinks eager from listener but does not drop 2184 * the eager's reference on the listener. 2185 */ 2186 ASSERT(eager->tcp_listener == NULL); 2187 ASSERT(listener->tcp_connp->conn_ref >= 5); 2188 2189 /* 2190 * The eager is now associated with its own queue. Insert in 2191 * the hash so that the connection can be reused for a future 2192 * T_CONN_RES. 2193 */ 2194 tcp_acceptor_hash_insert(acceptor_id, eager); 2195 2196 /* 2197 * We now do the processing of options with T_CONN_RES. 2198 * We delay till now since we wanted to have queue to pass to 2199 * option processing routines that points back to the right 2200 * instance structure which does not happen until after 2201 * tcp_accept_swap(). 2202 * 2203 * Note: 2204 * The sanity of the logic here assumes that whatever options 2205 * are appropriate to inherit from listner=>eager are done 2206 * before this point, and whatever were to be overridden (or not) 2207 * in transfer logic from eager=>acceptor in tcp_accept_swap(). 2208 * [ Warning: acceptor endpoint can have T_OPTMGMT_REQ done to it 2209 * before its ACCEPTOR_id comes down in T_CONN_RES ] 2210 * This may not be true at this point in time but can be fixed 2211 * independently. This option processing code starts with 2212 * the instantiated acceptor instance and the final queue at 2213 * this point. 2214 */ 2215 2216 if (tcr->OPT_length != 0) { 2217 /* Options to process */ 2218 int t_error = 0; 2219 int sys_error = 0; 2220 int do_disconnect = 0; 2221 2222 if (tcp_conprim_opt_process(eager, mp1, 2223 &do_disconnect, &t_error, &sys_error) < 0) { 2224 eager->tcp_accept_error = 1; 2225 if (do_disconnect) { 2226 /* 2227 * An option failed which does not allow 2228 * connection to be accepted. 2229 * 2230 * We allow T_CONN_RES to succeed and 2231 * put a T_DISCON_IND on the eager queue. 2232 */ 2233 ASSERT(t_error == 0 && sys_error == 0); 2234 eager->tcp_send_discon_ind = 1; 2235 } else { 2236 ASSERT(t_error != 0); 2237 freemsg(ok_mp); 2238 /* 2239 * Original mp was either freed or set 2240 * to ok_mp above, so use mp1 instead. 2241 */ 2242 tcp_err_ack(listener, mp1, t_error, sys_error); 2243 goto finish; 2244 } 2245 } 2246 /* 2247 * Most likely success in setting options (except if 2248 * eager->tcp_send_discon_ind set). 2249 * mp1 option buffer represented by OPT_length/offset 2250 * potentially modified and contains results of setting 2251 * options at this point 2252 */ 2253 } 2254 2255 /* We no longer need mp1, since all options processing has passed */ 2256 freemsg(mp1); 2257 2258 putnext(listener->tcp_rq, ok_mp); 2259 2260 mutex_enter(&listener->tcp_eager_lock); 2261 if (listener->tcp_eager_prev_q0->tcp_conn_def_q0) { 2262 tcp_t *tail; 2263 mblk_t *conn_ind; 2264 2265 /* 2266 * This path should not be executed if listener and 2267 * acceptor streams are the same. 2268 */ 2269 ASSERT(listener != acceptor); 2270 2271 tcp = listener->tcp_eager_prev_q0; 2272 /* 2273 * listener->tcp_eager_prev_q0 points to the TAIL of the 2274 * deferred T_conn_ind queue. We need to get to the head of 2275 * the queue in order to send up T_conn_ind the same order as 2276 * how the 3WHS is completed. 2277 */ 2278 while (tcp != listener) { 2279 if (!tcp->tcp_eager_prev_q0->tcp_conn_def_q0) 2280 break; 2281 else 2282 tcp = tcp->tcp_eager_prev_q0; 2283 } 2284 ASSERT(tcp != listener); 2285 conn_ind = tcp->tcp_conn.tcp_eager_conn_ind; 2286 ASSERT(conn_ind != NULL); 2287 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 2288 2289 /* Move from q0 to q */ 2290 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 2291 listener->tcp_conn_req_cnt_q0--; 2292 listener->tcp_conn_req_cnt_q++; 2293 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 2294 tcp->tcp_eager_prev_q0; 2295 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 2296 tcp->tcp_eager_next_q0; 2297 tcp->tcp_eager_prev_q0 = NULL; 2298 tcp->tcp_eager_next_q0 = NULL; 2299 tcp->tcp_conn_def_q0 = B_FALSE; 2300 2301 /* Make sure the tcp isn't in the list of droppables */ 2302 ASSERT(tcp->tcp_eager_next_drop_q0 == NULL && 2303 tcp->tcp_eager_prev_drop_q0 == NULL); 2304 2305 /* 2306 * Insert at end of the queue because sockfs sends 2307 * down T_CONN_RES in chronological order. Leaving 2308 * the older conn indications at front of the queue 2309 * helps reducing search time. 2310 */ 2311 tail = listener->tcp_eager_last_q; 2312 if (tail != NULL) 2313 tail->tcp_eager_next_q = tcp; 2314 else 2315 listener->tcp_eager_next_q = tcp; 2316 listener->tcp_eager_last_q = tcp; 2317 tcp->tcp_eager_next_q = NULL; 2318 mutex_exit(&listener->tcp_eager_lock); 2319 putnext(tcp->tcp_rq, conn_ind); 2320 } else { 2321 mutex_exit(&listener->tcp_eager_lock); 2322 } 2323 2324 /* 2325 * Done with the acceptor - free it 2326 * 2327 * Note: from this point on, no access to listener should be made 2328 * as listener can be equal to acceptor. 2329 */ 2330 finish: 2331 ASSERT(acceptor->tcp_detached); 2332 ASSERT(tcps->tcps_g_q != NULL); 2333 acceptor->tcp_rq = tcps->tcps_g_q; 2334 acceptor->tcp_wq = WR(tcps->tcps_g_q); 2335 (void) tcp_clean_death(acceptor, 0, 2); 2336 CONN_DEC_REF(acceptor->tcp_connp); 2337 2338 /* 2339 * In case we already received a FIN we have to make tcp_rput send 2340 * the ordrel_ind. This will also send up a window update if the window 2341 * has opened up. 2342 * 2343 * In the normal case of a successful connection acceptance 2344 * we give the O_T_BIND_REQ to the read side put procedure as an 2345 * indication that this was just accepted. This tells tcp_rput to 2346 * pass up any data queued in tcp_rcv_list. 2347 * 2348 * In the fringe case where options sent with T_CONN_RES failed and 2349 * we required, we would be indicating a T_DISCON_IND to blow 2350 * away this connection. 2351 */ 2352 2353 /* 2354 * XXX: we currently have a problem if XTI application closes the 2355 * acceptor stream in between. This problem exists in on10-gate also 2356 * and is well know but nothing can be done short of major rewrite 2357 * to fix it. Now it is possible to take care of it by assigning TLI/XTI 2358 * eager same squeue as listener (we can distinguish non socket 2359 * listeners at the time of handling a SYN in tcp_conn_request) 2360 * and do most of the work that tcp_accept_finish does here itself 2361 * and then get behind the acceptor squeue to access the acceptor 2362 * queue. 2363 */ 2364 /* 2365 * We already have a ref on tcp so no need to do one before squeue_fill 2366 */ 2367 squeue_fill(eager->tcp_connp->conn_sqp, opt_mp, 2368 tcp_accept_finish, eager->tcp_connp, SQTAG_TCP_ACCEPT_FINISH); 2369 } 2370 2371 /* 2372 * Swap information between the eager and acceptor for a TLI/XTI client. 2373 * The sockfs accept is done on the acceptor stream and control goes 2374 * through tcp_wput_accept() and tcp_accept()/tcp_accept_swap() is not 2375 * called. In either case, both the eager and listener are in their own 2376 * perimeter (squeue) and the code has to deal with potential race. 2377 * 2378 * See the block comment on top of tcp_accept() and tcp_wput_accept(). 2379 */ 2380 static void 2381 tcp_accept_swap(tcp_t *listener, tcp_t *acceptor, tcp_t *eager) 2382 { 2383 conn_t *econnp, *aconnp; 2384 2385 ASSERT(eager->tcp_rq == listener->tcp_rq); 2386 ASSERT(eager->tcp_detached && !acceptor->tcp_detached); 2387 ASSERT(!eager->tcp_hard_bound); 2388 ASSERT(!TCP_IS_SOCKET(acceptor)); 2389 ASSERT(!TCP_IS_SOCKET(eager)); 2390 ASSERT(!TCP_IS_SOCKET(listener)); 2391 2392 acceptor->tcp_detached = B_TRUE; 2393 /* 2394 * To permit stream re-use by TLI/XTI, the eager needs a copy of 2395 * the acceptor id. 2396 */ 2397 eager->tcp_acceptor_id = acceptor->tcp_acceptor_id; 2398 2399 /* remove eager from listen list... */ 2400 mutex_enter(&listener->tcp_eager_lock); 2401 tcp_eager_unlink(eager); 2402 ASSERT(eager->tcp_eager_next_q == NULL && 2403 eager->tcp_eager_last_q == NULL); 2404 ASSERT(eager->tcp_eager_next_q0 == NULL && 2405 eager->tcp_eager_prev_q0 == NULL); 2406 mutex_exit(&listener->tcp_eager_lock); 2407 eager->tcp_rq = acceptor->tcp_rq; 2408 eager->tcp_wq = acceptor->tcp_wq; 2409 2410 econnp = eager->tcp_connp; 2411 aconnp = acceptor->tcp_connp; 2412 2413 eager->tcp_rq->q_ptr = econnp; 2414 eager->tcp_wq->q_ptr = econnp; 2415 2416 /* 2417 * In the TLI/XTI loopback case, we are inside the listener's squeue, 2418 * which might be a different squeue from our peer TCP instance. 2419 * For TCP Fusion, the peer expects that whenever tcp_detached is 2420 * clear, our TCP queues point to the acceptor's queues. Thus, use 2421 * membar_producer() to ensure that the assignments of tcp_rq/tcp_wq 2422 * above reach global visibility prior to the clearing of tcp_detached. 2423 */ 2424 membar_producer(); 2425 eager->tcp_detached = B_FALSE; 2426 2427 ASSERT(eager->tcp_ack_tid == 0); 2428 2429 econnp->conn_dev = aconnp->conn_dev; 2430 econnp->conn_minor_arena = aconnp->conn_minor_arena; 2431 ASSERT(econnp->conn_minor_arena != NULL); 2432 if (eager->tcp_cred != NULL) 2433 crfree(eager->tcp_cred); 2434 eager->tcp_cred = econnp->conn_cred = aconnp->conn_cred; 2435 ASSERT(econnp->conn_netstack == aconnp->conn_netstack); 2436 ASSERT(eager->tcp_tcps == acceptor->tcp_tcps); 2437 2438 aconnp->conn_cred = NULL; 2439 2440 econnp->conn_zoneid = aconnp->conn_zoneid; 2441 econnp->conn_allzones = aconnp->conn_allzones; 2442 2443 econnp->conn_mac_exempt = aconnp->conn_mac_exempt; 2444 aconnp->conn_mac_exempt = B_FALSE; 2445 2446 ASSERT(aconnp->conn_peercred == NULL); 2447 2448 /* Do the IPC initialization */ 2449 CONN_INC_REF(econnp); 2450 2451 econnp->conn_multicast_loop = aconnp->conn_multicast_loop; 2452 econnp->conn_af_isv6 = aconnp->conn_af_isv6; 2453 econnp->conn_pkt_isv6 = aconnp->conn_pkt_isv6; 2454 2455 /* Done with old IPC. Drop its ref on its connp */ 2456 CONN_DEC_REF(aconnp); 2457 } 2458 2459 2460 /* 2461 * Adapt to the information, such as rtt and rtt_sd, provided from the 2462 * ire cached in conn_cache_ire. If no ire cached, do a ire lookup. 2463 * 2464 * Checks for multicast and broadcast destination address. 2465 * Returns zero on failure; non-zero if ok. 2466 * 2467 * Note that the MSS calculation here is based on the info given in 2468 * the IRE. We do not do any calculation based on TCP options. They 2469 * will be handled in tcp_rput_other() and tcp_rput_data() when TCP 2470 * knows which options to use. 2471 * 2472 * Note on how TCP gets its parameters for a connection. 2473 * 2474 * When a tcp_t structure is allocated, it gets all the default parameters. 2475 * In tcp_adapt_ire(), it gets those metric parameters, like rtt, rtt_sd, 2476 * spipe, rpipe, ... from the route metrics. Route metric overrides the 2477 * default. But if there is an associated tcp_host_param, it will override 2478 * the metrics. 2479 * 2480 * An incoming SYN with a multicast or broadcast destination address, is dropped 2481 * in 1 of 2 places. 2482 * 2483 * 1. If the packet was received over the wire it is dropped in 2484 * ip_rput_process_broadcast() 2485 * 2486 * 2. If the packet was received through internal IP loopback, i.e. the packet 2487 * was generated and received on the same machine, it is dropped in 2488 * ip_wput_local() 2489 * 2490 * An incoming SYN with a multicast or broadcast source address is always 2491 * dropped in tcp_adapt_ire. The same logic in tcp_adapt_ire also serves to 2492 * reject an attempt to connect to a broadcast or multicast (destination) 2493 * address. 2494 */ 2495 static int 2496 tcp_adapt_ire(tcp_t *tcp, mblk_t *ire_mp) 2497 { 2498 tcp_hsp_t *hsp; 2499 ire_t *ire; 2500 ire_t *sire = NULL; 2501 iulp_t *ire_uinfo = NULL; 2502 uint32_t mss_max; 2503 uint32_t mss; 2504 boolean_t tcp_detached = TCP_IS_DETACHED(tcp); 2505 conn_t *connp = tcp->tcp_connp; 2506 boolean_t ire_cacheable = B_FALSE; 2507 zoneid_t zoneid = connp->conn_zoneid; 2508 int match_flags = MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT | 2509 MATCH_IRE_SECATTR; 2510 ts_label_t *tsl = crgetlabel(CONN_CRED(connp)); 2511 ill_t *ill = NULL; 2512 boolean_t incoming = (ire_mp == NULL); 2513 tcp_stack_t *tcps = tcp->tcp_tcps; 2514 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 2515 2516 ASSERT(connp->conn_ire_cache == NULL); 2517 2518 if (tcp->tcp_ipversion == IPV4_VERSION) { 2519 2520 if (CLASSD(tcp->tcp_connp->conn_rem)) { 2521 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 2522 return (0); 2523 } 2524 /* 2525 * If IP_NEXTHOP is set, then look for an IRE_CACHE 2526 * for the destination with the nexthop as gateway. 2527 * ire_ctable_lookup() is used because this particular 2528 * ire, if it exists, will be marked private. 2529 * If that is not available, use the interface ire 2530 * for the nexthop. 2531 * 2532 * TSol: tcp_update_label will detect label mismatches based 2533 * only on the destination's label, but that would not 2534 * detect label mismatches based on the security attributes 2535 * of routes or next hop gateway. Hence we need to pass the 2536 * label to ire_ftable_lookup below in order to locate the 2537 * right prefix (and/or) ire cache. Similarly we also need 2538 * pass the label to the ire_cache_lookup below to locate 2539 * the right ire that also matches on the label. 2540 */ 2541 if (tcp->tcp_connp->conn_nexthop_set) { 2542 ire = ire_ctable_lookup(tcp->tcp_connp->conn_rem, 2543 tcp->tcp_connp->conn_nexthop_v4, 0, NULL, zoneid, 2544 tsl, MATCH_IRE_MARK_PRIVATE_ADDR | MATCH_IRE_GW, 2545 ipst); 2546 if (ire == NULL) { 2547 ire = ire_ftable_lookup( 2548 tcp->tcp_connp->conn_nexthop_v4, 2549 0, 0, IRE_INTERFACE, NULL, NULL, zoneid, 0, 2550 tsl, match_flags, ipst); 2551 if (ire == NULL) 2552 return (0); 2553 } else { 2554 ire_uinfo = &ire->ire_uinfo; 2555 } 2556 } else { 2557 ire = ire_cache_lookup(tcp->tcp_connp->conn_rem, 2558 zoneid, tsl, ipst); 2559 if (ire != NULL) { 2560 ire_cacheable = B_TRUE; 2561 ire_uinfo = (ire_mp != NULL) ? 2562 &((ire_t *)ire_mp->b_rptr)->ire_uinfo: 2563 &ire->ire_uinfo; 2564 2565 } else { 2566 if (ire_mp == NULL) { 2567 ire = ire_ftable_lookup( 2568 tcp->tcp_connp->conn_rem, 2569 0, 0, 0, NULL, &sire, zoneid, 0, 2570 tsl, (MATCH_IRE_RECURSIVE | 2571 MATCH_IRE_DEFAULT), ipst); 2572 if (ire == NULL) 2573 return (0); 2574 ire_uinfo = (sire != NULL) ? 2575 &sire->ire_uinfo : 2576 &ire->ire_uinfo; 2577 } else { 2578 ire = (ire_t *)ire_mp->b_rptr; 2579 ire_uinfo = 2580 &((ire_t *) 2581 ire_mp->b_rptr)->ire_uinfo; 2582 } 2583 } 2584 } 2585 ASSERT(ire != NULL); 2586 2587 if ((ire->ire_src_addr == INADDR_ANY) || 2588 (ire->ire_type & IRE_BROADCAST)) { 2589 /* 2590 * ire->ire_mp is non null when ire_mp passed in is used 2591 * ire->ire_mp is set in ip_bind_insert_ire[_v6](). 2592 */ 2593 if (ire->ire_mp == NULL) 2594 ire_refrele(ire); 2595 if (sire != NULL) 2596 ire_refrele(sire); 2597 return (0); 2598 } 2599 2600 if (tcp->tcp_ipha->ipha_src == INADDR_ANY) { 2601 ipaddr_t src_addr; 2602 2603 /* 2604 * ip_bind_connected() has stored the correct source 2605 * address in conn_src. 2606 */ 2607 src_addr = tcp->tcp_connp->conn_src; 2608 tcp->tcp_ipha->ipha_src = src_addr; 2609 /* 2610 * Copy of the src addr. in tcp_t is needed 2611 * for the lookup funcs. 2612 */ 2613 IN6_IPADDR_TO_V4MAPPED(src_addr, &tcp->tcp_ip_src_v6); 2614 } 2615 /* 2616 * Set the fragment bit so that IP will tell us if the MTU 2617 * should change. IP tells us the latest setting of 2618 * ip_path_mtu_discovery through ire_frag_flag. 2619 */ 2620 if (ipst->ips_ip_path_mtu_discovery) { 2621 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 2622 htons(IPH_DF); 2623 } 2624 /* 2625 * If ire_uinfo is NULL, this is the IRE_INTERFACE case 2626 * for IP_NEXTHOP. No cache ire has been found for the 2627 * destination and we are working with the nexthop's 2628 * interface ire. Since we need to forward all packets 2629 * to the nexthop first, we "blindly" set tcp_localnet 2630 * to false, eventhough the destination may also be 2631 * onlink. 2632 */ 2633 if (ire_uinfo == NULL) 2634 tcp->tcp_localnet = 0; 2635 else 2636 tcp->tcp_localnet = (ire->ire_gateway_addr == 0); 2637 } else { 2638 /* 2639 * For incoming connection ire_mp = NULL 2640 * For outgoing connection ire_mp != NULL 2641 * Technically we should check conn_incoming_ill 2642 * when ire_mp is NULL and conn_outgoing_ill when 2643 * ire_mp is non-NULL. But this is performance 2644 * critical path and for IPV*_BOUND_IF, outgoing 2645 * and incoming ill are always set to the same value. 2646 */ 2647 ill_t *dst_ill = NULL; 2648 ipif_t *dst_ipif = NULL; 2649 2650 ASSERT(connp->conn_outgoing_ill == connp->conn_incoming_ill); 2651 2652 if (connp->conn_outgoing_ill != NULL) { 2653 /* Outgoing or incoming path */ 2654 int err; 2655 2656 dst_ill = conn_get_held_ill(connp, 2657 &connp->conn_outgoing_ill, &err); 2658 if (err == ILL_LOOKUP_FAILED || dst_ill == NULL) { 2659 ip1dbg(("tcp_adapt_ire: ill_lookup failed\n")); 2660 return (0); 2661 } 2662 match_flags |= MATCH_IRE_ILL; 2663 dst_ipif = dst_ill->ill_ipif; 2664 } 2665 ire = ire_ctable_lookup_v6(&tcp->tcp_connp->conn_remv6, 2666 0, 0, dst_ipif, zoneid, tsl, match_flags, ipst); 2667 2668 if (ire != NULL) { 2669 ire_cacheable = B_TRUE; 2670 ire_uinfo = (ire_mp != NULL) ? 2671 &((ire_t *)ire_mp->b_rptr)->ire_uinfo: 2672 &ire->ire_uinfo; 2673 } else { 2674 if (ire_mp == NULL) { 2675 ire = ire_ftable_lookup_v6( 2676 &tcp->tcp_connp->conn_remv6, 2677 0, 0, 0, dst_ipif, &sire, zoneid, 2678 0, tsl, match_flags, ipst); 2679 if (ire == NULL) { 2680 if (dst_ill != NULL) 2681 ill_refrele(dst_ill); 2682 return (0); 2683 } 2684 ire_uinfo = (sire != NULL) ? &sire->ire_uinfo : 2685 &ire->ire_uinfo; 2686 } else { 2687 ire = (ire_t *)ire_mp->b_rptr; 2688 ire_uinfo = 2689 &((ire_t *)ire_mp->b_rptr)->ire_uinfo; 2690 } 2691 } 2692 if (dst_ill != NULL) 2693 ill_refrele(dst_ill); 2694 2695 ASSERT(ire != NULL); 2696 ASSERT(ire_uinfo != NULL); 2697 2698 if (IN6_IS_ADDR_UNSPECIFIED(&ire->ire_src_addr_v6) || 2699 IN6_IS_ADDR_MULTICAST(&ire->ire_addr_v6)) { 2700 /* 2701 * ire->ire_mp is non null when ire_mp passed in is used 2702 * ire->ire_mp is set in ip_bind_insert_ire[_v6](). 2703 */ 2704 if (ire->ire_mp == NULL) 2705 ire_refrele(ire); 2706 if (sire != NULL) 2707 ire_refrele(sire); 2708 return (0); 2709 } 2710 2711 if (IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip6h->ip6_src)) { 2712 in6_addr_t src_addr; 2713 2714 /* 2715 * ip_bind_connected_v6() has stored the correct source 2716 * address per IPv6 addr. selection policy in 2717 * conn_src_v6. 2718 */ 2719 src_addr = tcp->tcp_connp->conn_srcv6; 2720 2721 tcp->tcp_ip6h->ip6_src = src_addr; 2722 /* 2723 * Copy of the src addr. in tcp_t is needed 2724 * for the lookup funcs. 2725 */ 2726 tcp->tcp_ip_src_v6 = src_addr; 2727 ASSERT(IN6_ARE_ADDR_EQUAL(&tcp->tcp_ip6h->ip6_src, 2728 &connp->conn_srcv6)); 2729 } 2730 tcp->tcp_localnet = 2731 IN6_IS_ADDR_UNSPECIFIED(&ire->ire_gateway_addr_v6); 2732 } 2733 2734 /* 2735 * This allows applications to fail quickly when connections are made 2736 * to dead hosts. Hosts can be labeled dead by adding a reject route 2737 * with both the RTF_REJECT and RTF_PRIVATE flags set. 2738 */ 2739 if ((ire->ire_flags & RTF_REJECT) && 2740 (ire->ire_flags & RTF_PRIVATE)) 2741 goto error; 2742 2743 /* 2744 * Make use of the cached rtt and rtt_sd values to calculate the 2745 * initial RTO. Note that they are already initialized in 2746 * tcp_init_values(). 2747 * If ire_uinfo is NULL, i.e., we do not have a cache ire for 2748 * IP_NEXTHOP, but instead are using the interface ire for the 2749 * nexthop, then we do not use the ire_uinfo from that ire to 2750 * do any initializations. 2751 */ 2752 if (ire_uinfo != NULL) { 2753 if (ire_uinfo->iulp_rtt != 0) { 2754 clock_t rto; 2755 2756 tcp->tcp_rtt_sa = ire_uinfo->iulp_rtt; 2757 tcp->tcp_rtt_sd = ire_uinfo->iulp_rtt_sd; 2758 rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 2759 tcps->tcps_rexmit_interval_extra + 2760 (tcp->tcp_rtt_sa >> 5); 2761 2762 if (rto > tcps->tcps_rexmit_interval_max) { 2763 tcp->tcp_rto = tcps->tcps_rexmit_interval_max; 2764 } else if (rto < tcps->tcps_rexmit_interval_min) { 2765 tcp->tcp_rto = tcps->tcps_rexmit_interval_min; 2766 } else { 2767 tcp->tcp_rto = rto; 2768 } 2769 } 2770 if (ire_uinfo->iulp_ssthresh != 0) 2771 tcp->tcp_cwnd_ssthresh = ire_uinfo->iulp_ssthresh; 2772 else 2773 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN; 2774 if (ire_uinfo->iulp_spipe > 0) { 2775 tcp->tcp_xmit_hiwater = MIN(ire_uinfo->iulp_spipe, 2776 tcps->tcps_max_buf); 2777 if (tcps->tcps_snd_lowat_fraction != 0) 2778 tcp->tcp_xmit_lowater = tcp->tcp_xmit_hiwater / 2779 tcps->tcps_snd_lowat_fraction; 2780 (void) tcp_maxpsz_set(tcp, B_TRUE); 2781 } 2782 /* 2783 * Note that up till now, acceptor always inherits receive 2784 * window from the listener. But if there is a metrics 2785 * associated with a host, we should use that instead of 2786 * inheriting it from listener. Thus we need to pass this 2787 * info back to the caller. 2788 */ 2789 if (ire_uinfo->iulp_rpipe > 0) { 2790 tcp->tcp_rwnd = MIN(ire_uinfo->iulp_rpipe, 2791 tcps->tcps_max_buf); 2792 } 2793 2794 if (ire_uinfo->iulp_rtomax > 0) { 2795 tcp->tcp_second_timer_threshold = 2796 ire_uinfo->iulp_rtomax; 2797 } 2798 2799 /* 2800 * Use the metric option settings, iulp_tstamp_ok and 2801 * iulp_wscale_ok, only for active open. What this means 2802 * is that if the other side uses timestamp or window 2803 * scale option, TCP will also use those options. That 2804 * is for passive open. If the application sets a 2805 * large window, window scale is enabled regardless of 2806 * the value in iulp_wscale_ok. This is the behavior 2807 * since 2.6. So we keep it. 2808 * The only case left in passive open processing is the 2809 * check for SACK. 2810 * For ECN, it should probably be like SACK. But the 2811 * current value is binary, so we treat it like the other 2812 * cases. The metric only controls active open.For passive 2813 * open, the ndd param, tcp_ecn_permitted, controls the 2814 * behavior. 2815 */ 2816 if (!tcp_detached) { 2817 /* 2818 * The if check means that the following can only 2819 * be turned on by the metrics only IRE, but not off. 2820 */ 2821 if (ire_uinfo->iulp_tstamp_ok) 2822 tcp->tcp_snd_ts_ok = B_TRUE; 2823 if (ire_uinfo->iulp_wscale_ok) 2824 tcp->tcp_snd_ws_ok = B_TRUE; 2825 if (ire_uinfo->iulp_sack == 2) 2826 tcp->tcp_snd_sack_ok = B_TRUE; 2827 if (ire_uinfo->iulp_ecn_ok) 2828 tcp->tcp_ecn_ok = B_TRUE; 2829 } else { 2830 /* 2831 * Passive open. 2832 * 2833 * As above, the if check means that SACK can only be 2834 * turned on by the metric only IRE. 2835 */ 2836 if (ire_uinfo->iulp_sack > 0) { 2837 tcp->tcp_snd_sack_ok = B_TRUE; 2838 } 2839 } 2840 } 2841 2842 2843 /* 2844 * XXX: Note that currently, ire_max_frag can be as small as 68 2845 * because of PMTUd. So tcp_mss may go to negative if combined 2846 * length of all those options exceeds 28 bytes. But because 2847 * of the tcp_mss_min check below, we may not have a problem if 2848 * tcp_mss_min is of a reasonable value. The default is 1 so 2849 * the negative problem still exists. And the check defeats PMTUd. 2850 * In fact, if PMTUd finds that the MSS should be smaller than 2851 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min 2852 * value. 2853 * 2854 * We do not deal with that now. All those problems related to 2855 * PMTUd will be fixed later. 2856 */ 2857 ASSERT(ire->ire_max_frag != 0); 2858 mss = tcp->tcp_if_mtu = ire->ire_max_frag; 2859 if (tcp->tcp_ipp_fields & IPPF_USE_MIN_MTU) { 2860 if (tcp->tcp_ipp_use_min_mtu == IPV6_USE_MIN_MTU_NEVER) { 2861 mss = MIN(mss, IPV6_MIN_MTU); 2862 } 2863 } 2864 2865 /* Sanity check for MSS value. */ 2866 if (tcp->tcp_ipversion == IPV4_VERSION) 2867 mss_max = tcps->tcps_mss_max_ipv4; 2868 else 2869 mss_max = tcps->tcps_mss_max_ipv6; 2870 2871 if (tcp->tcp_ipversion == IPV6_VERSION && 2872 (ire->ire_frag_flag & IPH_FRAG_HDR)) { 2873 /* 2874 * After receiving an ICMPv6 "packet too big" message with a 2875 * MTU < 1280, and for multirouted IPv6 packets, the IP layer 2876 * will insert a 8-byte fragment header in every packet; we 2877 * reduce the MSS by that amount here. 2878 */ 2879 mss -= sizeof (ip6_frag_t); 2880 } 2881 2882 if (tcp->tcp_ipsec_overhead == 0) 2883 tcp->tcp_ipsec_overhead = conn_ipsec_length(connp); 2884 2885 mss -= tcp->tcp_ipsec_overhead; 2886 2887 if (mss < tcps->tcps_mss_min) 2888 mss = tcps->tcps_mss_min; 2889 if (mss > mss_max) 2890 mss = mss_max; 2891 2892 /* Note that this is the maximum MSS, excluding all options. */ 2893 tcp->tcp_mss = mss; 2894 2895 /* 2896 * Initialize the ISS here now that we have the full connection ID. 2897 * The RFC 1948 method of initial sequence number generation requires 2898 * knowledge of the full connection ID before setting the ISS. 2899 */ 2900 2901 tcp_iss_init(tcp); 2902 2903 if (ire->ire_type & (IRE_LOOPBACK | IRE_LOCAL)) 2904 tcp->tcp_loopback = B_TRUE; 2905 2906 if (tcp->tcp_ipversion == IPV4_VERSION) { 2907 hsp = tcp_hsp_lookup(tcp->tcp_remote, tcps); 2908 } else { 2909 hsp = tcp_hsp_lookup_ipv6(&tcp->tcp_remote_v6, tcps); 2910 } 2911 2912 if (hsp != NULL) { 2913 /* Only modify if we're going to make them bigger */ 2914 if (hsp->tcp_hsp_sendspace > tcp->tcp_xmit_hiwater) { 2915 tcp->tcp_xmit_hiwater = hsp->tcp_hsp_sendspace; 2916 if (tcps->tcps_snd_lowat_fraction != 0) 2917 tcp->tcp_xmit_lowater = tcp->tcp_xmit_hiwater / 2918 tcps->tcps_snd_lowat_fraction; 2919 } 2920 2921 if (hsp->tcp_hsp_recvspace > tcp->tcp_rwnd) { 2922 tcp->tcp_rwnd = hsp->tcp_hsp_recvspace; 2923 } 2924 2925 /* Copy timestamp flag only for active open */ 2926 if (!tcp_detached) 2927 tcp->tcp_snd_ts_ok = hsp->tcp_hsp_tstamp; 2928 } 2929 2930 if (sire != NULL) 2931 IRE_REFRELE(sire); 2932 2933 /* 2934 * If we got an IRE_CACHE and an ILL, go through their properties; 2935 * otherwise, this is deferred until later when we have an IRE_CACHE. 2936 */ 2937 if (tcp->tcp_loopback || 2938 (ire_cacheable && (ill = ire_to_ill(ire)) != NULL)) { 2939 /* 2940 * For incoming, see if this tcp may be MDT-capable. For 2941 * outgoing, this process has been taken care of through 2942 * tcp_rput_other. 2943 */ 2944 tcp_ire_ill_check(tcp, ire, ill, incoming); 2945 tcp->tcp_ire_ill_check_done = B_TRUE; 2946 } 2947 2948 mutex_enter(&connp->conn_lock); 2949 /* 2950 * Make sure that conn is not marked incipient 2951 * for incoming connections. A blind 2952 * removal of incipient flag is cheaper than 2953 * check and removal. 2954 */ 2955 connp->conn_state_flags &= ~CONN_INCIPIENT; 2956 2957 /* 2958 * Must not cache forwarding table routes 2959 * or recache an IRE after the conn_t has 2960 * had conn_ire_cache cleared and is flagged 2961 * unusable, (see the CONN_CACHE_IRE() macro). 2962 */ 2963 if (ire_cacheable && CONN_CACHE_IRE(connp)) { 2964 rw_enter(&ire->ire_bucket->irb_lock, RW_READER); 2965 if (!(ire->ire_marks & IRE_MARK_CONDEMNED)) { 2966 connp->conn_ire_cache = ire; 2967 IRE_UNTRACE_REF(ire); 2968 rw_exit(&ire->ire_bucket->irb_lock); 2969 mutex_exit(&connp->conn_lock); 2970 return (1); 2971 } 2972 rw_exit(&ire->ire_bucket->irb_lock); 2973 } 2974 mutex_exit(&connp->conn_lock); 2975 2976 if (ire->ire_mp == NULL) 2977 ire_refrele(ire); 2978 return (1); 2979 2980 error: 2981 if (ire->ire_mp == NULL) 2982 ire_refrele(ire); 2983 if (sire != NULL) 2984 ire_refrele(sire); 2985 return (0); 2986 } 2987 2988 /* 2989 * tcp_bind is called (holding the writer lock) by tcp_wput_proto to process a 2990 * O_T_BIND_REQ/T_BIND_REQ message. 2991 */ 2992 static void 2993 tcp_bind(tcp_t *tcp, mblk_t *mp) 2994 { 2995 sin_t *sin; 2996 sin6_t *sin6; 2997 mblk_t *mp1; 2998 in_port_t requested_port; 2999 in_port_t allocated_port; 3000 struct T_bind_req *tbr; 3001 boolean_t bind_to_req_port_only; 3002 boolean_t backlog_update = B_FALSE; 3003 boolean_t user_specified; 3004 in6_addr_t v6addr; 3005 ipaddr_t v4addr; 3006 uint_t origipversion; 3007 int err; 3008 queue_t *q = tcp->tcp_wq; 3009 conn_t *connp = tcp->tcp_connp; 3010 mlp_type_t addrtype, mlptype; 3011 zone_t *zone; 3012 cred_t *cr; 3013 in_port_t mlp_port; 3014 tcp_stack_t *tcps = tcp->tcp_tcps; 3015 3016 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 3017 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tbr)) { 3018 if (tcp->tcp_debug) { 3019 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 3020 "tcp_bind: bad req, len %u", 3021 (uint_t)(mp->b_wptr - mp->b_rptr)); 3022 } 3023 tcp_err_ack(tcp, mp, TPROTO, 0); 3024 return; 3025 } 3026 /* Make sure the largest address fits */ 3027 mp1 = reallocb(mp, sizeof (struct T_bind_ack) + sizeof (sin6_t) + 1, 1); 3028 if (mp1 == NULL) { 3029 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 3030 return; 3031 } 3032 mp = mp1; 3033 tbr = (struct T_bind_req *)mp->b_rptr; 3034 if (tcp->tcp_state >= TCPS_BOUND) { 3035 if ((tcp->tcp_state == TCPS_BOUND || 3036 tcp->tcp_state == TCPS_LISTEN) && 3037 tcp->tcp_conn_req_max != tbr->CONIND_number && 3038 tbr->CONIND_number > 0) { 3039 /* 3040 * Handle listen() increasing CONIND_number. 3041 * This is more "liberal" then what the TPI spec 3042 * requires but is needed to avoid a t_unbind 3043 * when handling listen() since the port number 3044 * might be "stolen" between the unbind and bind. 3045 */ 3046 backlog_update = B_TRUE; 3047 goto do_bind; 3048 } 3049 if (tcp->tcp_debug) { 3050 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 3051 "tcp_bind: bad state, %d", tcp->tcp_state); 3052 } 3053 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 3054 return; 3055 } 3056 origipversion = tcp->tcp_ipversion; 3057 3058 switch (tbr->ADDR_length) { 3059 case 0: /* request for a generic port */ 3060 tbr->ADDR_offset = sizeof (struct T_bind_req); 3061 if (tcp->tcp_family == AF_INET) { 3062 tbr->ADDR_length = sizeof (sin_t); 3063 sin = (sin_t *)&tbr[1]; 3064 *sin = sin_null; 3065 sin->sin_family = AF_INET; 3066 mp->b_wptr = (uchar_t *)&sin[1]; 3067 tcp->tcp_ipversion = IPV4_VERSION; 3068 IN6_IPADDR_TO_V4MAPPED(INADDR_ANY, &v6addr); 3069 } else { 3070 ASSERT(tcp->tcp_family == AF_INET6); 3071 tbr->ADDR_length = sizeof (sin6_t); 3072 sin6 = (sin6_t *)&tbr[1]; 3073 *sin6 = sin6_null; 3074 sin6->sin6_family = AF_INET6; 3075 mp->b_wptr = (uchar_t *)&sin6[1]; 3076 tcp->tcp_ipversion = IPV6_VERSION; 3077 V6_SET_ZERO(v6addr); 3078 } 3079 requested_port = 0; 3080 break; 3081 3082 case sizeof (sin_t): /* Complete IPv4 address */ 3083 sin = (sin_t *)mi_offset_param(mp, tbr->ADDR_offset, 3084 sizeof (sin_t)); 3085 if (sin == NULL || !OK_32PTR((char *)sin)) { 3086 if (tcp->tcp_debug) { 3087 (void) strlog(TCP_MOD_ID, 0, 1, 3088 SL_ERROR|SL_TRACE, 3089 "tcp_bind: bad address parameter, " 3090 "offset %d, len %d", 3091 tbr->ADDR_offset, tbr->ADDR_length); 3092 } 3093 tcp_err_ack(tcp, mp, TPROTO, 0); 3094 return; 3095 } 3096 /* 3097 * With sockets sockfs will accept bogus sin_family in 3098 * bind() and replace it with the family used in the socket 3099 * call. 3100 */ 3101 if (sin->sin_family != AF_INET || 3102 tcp->tcp_family != AF_INET) { 3103 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 3104 return; 3105 } 3106 requested_port = ntohs(sin->sin_port); 3107 tcp->tcp_ipversion = IPV4_VERSION; 3108 v4addr = sin->sin_addr.s_addr; 3109 IN6_IPADDR_TO_V4MAPPED(v4addr, &v6addr); 3110 break; 3111 3112 case sizeof (sin6_t): /* Complete IPv6 address */ 3113 sin6 = (sin6_t *)mi_offset_param(mp, 3114 tbr->ADDR_offset, sizeof (sin6_t)); 3115 if (sin6 == NULL || !OK_32PTR((char *)sin6)) { 3116 if (tcp->tcp_debug) { 3117 (void) strlog(TCP_MOD_ID, 0, 1, 3118 SL_ERROR|SL_TRACE, 3119 "tcp_bind: bad IPv6 address parameter, " 3120 "offset %d, len %d", tbr->ADDR_offset, 3121 tbr->ADDR_length); 3122 } 3123 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 3124 return; 3125 } 3126 if (sin6->sin6_family != AF_INET6 || 3127 tcp->tcp_family != AF_INET6) { 3128 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 3129 return; 3130 } 3131 requested_port = ntohs(sin6->sin6_port); 3132 tcp->tcp_ipversion = IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr) ? 3133 IPV4_VERSION : IPV6_VERSION; 3134 v6addr = sin6->sin6_addr; 3135 break; 3136 3137 default: 3138 if (tcp->tcp_debug) { 3139 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 3140 "tcp_bind: bad address length, %d", 3141 tbr->ADDR_length); 3142 } 3143 tcp_err_ack(tcp, mp, TBADADDR, 0); 3144 return; 3145 } 3146 tcp->tcp_bound_source_v6 = v6addr; 3147 3148 /* Check for change in ipversion */ 3149 if (origipversion != tcp->tcp_ipversion) { 3150 ASSERT(tcp->tcp_family == AF_INET6); 3151 err = tcp->tcp_ipversion == IPV6_VERSION ? 3152 tcp_header_init_ipv6(tcp) : tcp_header_init_ipv4(tcp); 3153 if (err) { 3154 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 3155 return; 3156 } 3157 } 3158 3159 /* 3160 * Initialize family specific fields. Copy of the src addr. 3161 * in tcp_t is needed for the lookup funcs. 3162 */ 3163 if (tcp->tcp_ipversion == IPV6_VERSION) { 3164 tcp->tcp_ip6h->ip6_src = v6addr; 3165 } else { 3166 IN6_V4MAPPED_TO_IPADDR(&v6addr, tcp->tcp_ipha->ipha_src); 3167 } 3168 tcp->tcp_ip_src_v6 = v6addr; 3169 3170 /* 3171 * For O_T_BIND_REQ: 3172 * Verify that the target port/addr is available, or choose 3173 * another. 3174 * For T_BIND_REQ: 3175 * Verify that the target port/addr is available or fail. 3176 * In both cases when it succeeds the tcp is inserted in the 3177 * bind hash table. This ensures that the operation is atomic 3178 * under the lock on the hash bucket. 3179 */ 3180 bind_to_req_port_only = requested_port != 0 && 3181 tbr->PRIM_type != O_T_BIND_REQ; 3182 /* 3183 * Get a valid port (within the anonymous range and should not 3184 * be a privileged one) to use if the user has not given a port. 3185 * If multiple threads are here, they may all start with 3186 * with the same initial port. But, it should be fine as long as 3187 * tcp_bindi will ensure that no two threads will be assigned 3188 * the same port. 3189 * 3190 * NOTE: XXX If a privileged process asks for an anonymous port, we 3191 * still check for ports only in the range > tcp_smallest_non_priv_port, 3192 * unless TCP_ANONPRIVBIND option is set. 3193 */ 3194 mlptype = mlptSingle; 3195 mlp_port = requested_port; 3196 if (requested_port == 0) { 3197 requested_port = tcp->tcp_anon_priv_bind ? 3198 tcp_get_next_priv_port(tcp) : 3199 tcp_update_next_port(tcps->tcps_next_port_to_try, 3200 tcp, B_TRUE); 3201 if (requested_port == 0) { 3202 tcp_err_ack(tcp, mp, TNOADDR, 0); 3203 return; 3204 } 3205 user_specified = B_FALSE; 3206 3207 /* 3208 * If the user went through one of the RPC interfaces to create 3209 * this socket and RPC is MLP in this zone, then give him an 3210 * anonymous MLP. 3211 */ 3212 cr = DB_CREDDEF(mp, tcp->tcp_cred); 3213 if (connp->conn_anon_mlp && is_system_labeled()) { 3214 zone = crgetzone(cr); 3215 addrtype = tsol_mlp_addr_type(zone->zone_id, 3216 IPV6_VERSION, &v6addr, 3217 tcps->tcps_netstack->netstack_ip); 3218 if (addrtype == mlptSingle) { 3219 tcp_err_ack(tcp, mp, TNOADDR, 0); 3220 return; 3221 } 3222 mlptype = tsol_mlp_port_type(zone, IPPROTO_TCP, 3223 PMAPPORT, addrtype); 3224 mlp_port = PMAPPORT; 3225 } 3226 } else { 3227 int i; 3228 boolean_t priv = B_FALSE; 3229 3230 /* 3231 * If the requested_port is in the well-known privileged range, 3232 * verify that the stream was opened by a privileged user. 3233 * Note: No locks are held when inspecting tcp_g_*epriv_ports 3234 * but instead the code relies on: 3235 * - the fact that the address of the array and its size never 3236 * changes 3237 * - the atomic assignment of the elements of the array 3238 */ 3239 cr = DB_CREDDEF(mp, tcp->tcp_cred); 3240 if (requested_port < tcps->tcps_smallest_nonpriv_port) { 3241 priv = B_TRUE; 3242 } else { 3243 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 3244 if (requested_port == 3245 tcps->tcps_g_epriv_ports[i]) { 3246 priv = B_TRUE; 3247 break; 3248 } 3249 } 3250 } 3251 if (priv) { 3252 if (secpolicy_net_privaddr(cr, requested_port, 3253 IPPROTO_TCP) != 0) { 3254 if (tcp->tcp_debug) { 3255 (void) strlog(TCP_MOD_ID, 0, 1, 3256 SL_ERROR|SL_TRACE, 3257 "tcp_bind: no priv for port %d", 3258 requested_port); 3259 } 3260 tcp_err_ack(tcp, mp, TACCES, 0); 3261 return; 3262 } 3263 } 3264 user_specified = B_TRUE; 3265 3266 if (is_system_labeled()) { 3267 zone = crgetzone(cr); 3268 addrtype = tsol_mlp_addr_type(zone->zone_id, 3269 IPV6_VERSION, &v6addr, 3270 tcps->tcps_netstack->netstack_ip); 3271 if (addrtype == mlptSingle) { 3272 tcp_err_ack(tcp, mp, TNOADDR, 0); 3273 return; 3274 } 3275 mlptype = tsol_mlp_port_type(zone, IPPROTO_TCP, 3276 requested_port, addrtype); 3277 } 3278 } 3279 3280 if (mlptype != mlptSingle) { 3281 if (secpolicy_net_bindmlp(cr) != 0) { 3282 if (tcp->tcp_debug) { 3283 (void) strlog(TCP_MOD_ID, 0, 1, 3284 SL_ERROR|SL_TRACE, 3285 "tcp_bind: no priv for multilevel port %d", 3286 requested_port); 3287 } 3288 tcp_err_ack(tcp, mp, TACCES, 0); 3289 return; 3290 } 3291 3292 /* 3293 * If we're specifically binding a shared IP address and the 3294 * port is MLP on shared addresses, then check to see if this 3295 * zone actually owns the MLP. Reject if not. 3296 */ 3297 if (mlptype == mlptShared && addrtype == mlptShared) { 3298 /* 3299 * No need to handle exclusive-stack zones since 3300 * ALL_ZONES only applies to the shared stack. 3301 */ 3302 zoneid_t mlpzone; 3303 3304 mlpzone = tsol_mlp_findzone(IPPROTO_TCP, 3305 htons(mlp_port)); 3306 if (connp->conn_zoneid != mlpzone) { 3307 if (tcp->tcp_debug) { 3308 (void) strlog(TCP_MOD_ID, 0, 1, 3309 SL_ERROR|SL_TRACE, 3310 "tcp_bind: attempt to bind port " 3311 "%d on shared addr in zone %d " 3312 "(should be %d)", 3313 mlp_port, connp->conn_zoneid, 3314 mlpzone); 3315 } 3316 tcp_err_ack(tcp, mp, TACCES, 0); 3317 return; 3318 } 3319 } 3320 3321 if (!user_specified) { 3322 err = tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3323 requested_port, B_TRUE); 3324 if (err != 0) { 3325 if (tcp->tcp_debug) { 3326 (void) strlog(TCP_MOD_ID, 0, 1, 3327 SL_ERROR|SL_TRACE, 3328 "tcp_bind: cannot establish anon " 3329 "MLP for port %d", 3330 requested_port); 3331 } 3332 tcp_err_ack(tcp, mp, TSYSERR, err); 3333 return; 3334 } 3335 connp->conn_anon_port = B_TRUE; 3336 } 3337 connp->conn_mlp_type = mlptype; 3338 } 3339 3340 allocated_port = tcp_bindi(tcp, requested_port, &v6addr, 3341 tcp->tcp_reuseaddr, B_FALSE, bind_to_req_port_only, user_specified); 3342 3343 if (allocated_port == 0) { 3344 connp->conn_mlp_type = mlptSingle; 3345 if (connp->conn_anon_port) { 3346 connp->conn_anon_port = B_FALSE; 3347 (void) tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3348 requested_port, B_FALSE); 3349 } 3350 if (bind_to_req_port_only) { 3351 if (tcp->tcp_debug) { 3352 (void) strlog(TCP_MOD_ID, 0, 1, 3353 SL_ERROR|SL_TRACE, 3354 "tcp_bind: requested addr busy"); 3355 } 3356 tcp_err_ack(tcp, mp, TADDRBUSY, 0); 3357 } else { 3358 /* If we are out of ports, fail the bind. */ 3359 if (tcp->tcp_debug) { 3360 (void) strlog(TCP_MOD_ID, 0, 1, 3361 SL_ERROR|SL_TRACE, 3362 "tcp_bind: out of ports?"); 3363 } 3364 tcp_err_ack(tcp, mp, TNOADDR, 0); 3365 } 3366 return; 3367 } 3368 ASSERT(tcp->tcp_state == TCPS_BOUND); 3369 do_bind: 3370 if (!backlog_update) { 3371 if (tcp->tcp_family == AF_INET) 3372 sin->sin_port = htons(allocated_port); 3373 else 3374 sin6->sin6_port = htons(allocated_port); 3375 } 3376 if (tcp->tcp_family == AF_INET) { 3377 if (tbr->CONIND_number != 0) { 3378 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3379 sizeof (sin_t)); 3380 } else { 3381 /* Just verify the local IP address */ 3382 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, IP_ADDR_LEN); 3383 } 3384 } else { 3385 if (tbr->CONIND_number != 0) { 3386 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3387 sizeof (sin6_t)); 3388 } else { 3389 /* Just verify the local IP address */ 3390 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3391 IPV6_ADDR_LEN); 3392 } 3393 } 3394 if (mp1 == NULL) { 3395 if (connp->conn_anon_port) { 3396 connp->conn_anon_port = B_FALSE; 3397 (void) tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3398 requested_port, B_FALSE); 3399 } 3400 connp->conn_mlp_type = mlptSingle; 3401 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 3402 return; 3403 } 3404 3405 tbr->PRIM_type = T_BIND_ACK; 3406 mp->b_datap->db_type = M_PCPROTO; 3407 3408 /* Chain in the reply mp for tcp_rput() */ 3409 mp1->b_cont = mp; 3410 mp = mp1; 3411 3412 tcp->tcp_conn_req_max = tbr->CONIND_number; 3413 if (tcp->tcp_conn_req_max) { 3414 if (tcp->tcp_conn_req_max < tcps->tcps_conn_req_min) 3415 tcp->tcp_conn_req_max = tcps->tcps_conn_req_min; 3416 if (tcp->tcp_conn_req_max > tcps->tcps_conn_req_max_q) 3417 tcp->tcp_conn_req_max = tcps->tcps_conn_req_max_q; 3418 /* 3419 * If this is a listener, do not reset the eager list 3420 * and other stuffs. Note that we don't check if the 3421 * existing eager list meets the new tcp_conn_req_max 3422 * requirement. 3423 */ 3424 if (tcp->tcp_state != TCPS_LISTEN) { 3425 tcp->tcp_state = TCPS_LISTEN; 3426 /* Initialize the chain. Don't need the eager_lock */ 3427 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp; 3428 tcp->tcp_eager_next_drop_q0 = tcp; 3429 tcp->tcp_eager_prev_drop_q0 = tcp; 3430 tcp->tcp_second_ctimer_threshold = 3431 tcps->tcps_ip_abort_linterval; 3432 } 3433 } 3434 3435 /* 3436 * We can call ip_bind directly which returns a T_BIND_ACK mp. The 3437 * processing continues in tcp_rput_other(). 3438 * 3439 * We need to make sure that the conn_recv is set to a non-null 3440 * value before we insert the conn into the classifier table. 3441 * This is to avoid a race with an incoming packet which does an 3442 * ipcl_classify(). 3443 */ 3444 connp->conn_recv = tcp_conn_request; 3445 if (tcp->tcp_family == AF_INET6) { 3446 ASSERT(tcp->tcp_connp->conn_af_isv6); 3447 mp = ip_bind_v6(q, mp, tcp->tcp_connp, &tcp->tcp_sticky_ipp); 3448 } else { 3449 ASSERT(!tcp->tcp_connp->conn_af_isv6); 3450 mp = ip_bind_v4(q, mp, tcp->tcp_connp); 3451 } 3452 /* 3453 * If the bind cannot complete immediately 3454 * IP will arrange to call tcp_rput_other 3455 * when the bind completes. 3456 */ 3457 if (mp != NULL) { 3458 tcp_rput_other(tcp, mp); 3459 } else { 3460 /* 3461 * Bind will be resumed later. Need to ensure 3462 * that conn doesn't disappear when that happens. 3463 * This will be decremented in ip_resume_tcp_bind(). 3464 */ 3465 CONN_INC_REF(tcp->tcp_connp); 3466 } 3467 } 3468 3469 3470 /* 3471 * If the "bind_to_req_port_only" parameter is set, if the requested port 3472 * number is available, return it, If not return 0 3473 * 3474 * If "bind_to_req_port_only" parameter is not set and 3475 * If the requested port number is available, return it. If not, return 3476 * the first anonymous port we happen across. If no anonymous ports are 3477 * available, return 0. addr is the requested local address, if any. 3478 * 3479 * In either case, when succeeding update the tcp_t to record the port number 3480 * and insert it in the bind hash table. 3481 * 3482 * Note that TCP over IPv4 and IPv6 sockets can use the same port number 3483 * without setting SO_REUSEADDR. This is needed so that they 3484 * can be viewed as two independent transport protocols. 3485 */ 3486 static in_port_t 3487 tcp_bindi(tcp_t *tcp, in_port_t port, const in6_addr_t *laddr, 3488 int reuseaddr, boolean_t quick_connect, 3489 boolean_t bind_to_req_port_only, boolean_t user_specified) 3490 { 3491 /* number of times we have run around the loop */ 3492 int count = 0; 3493 /* maximum number of times to run around the loop */ 3494 int loopmax; 3495 conn_t *connp = tcp->tcp_connp; 3496 zoneid_t zoneid = connp->conn_zoneid; 3497 tcp_stack_t *tcps = tcp->tcp_tcps; 3498 3499 /* 3500 * Lookup for free addresses is done in a loop and "loopmax" 3501 * influences how long we spin in the loop 3502 */ 3503 if (bind_to_req_port_only) { 3504 /* 3505 * If the requested port is busy, don't bother to look 3506 * for a new one. Setting loop maximum count to 1 has 3507 * that effect. 3508 */ 3509 loopmax = 1; 3510 } else { 3511 /* 3512 * If the requested port is busy, look for a free one 3513 * in the anonymous port range. 3514 * Set loopmax appropriately so that one does not look 3515 * forever in the case all of the anonymous ports are in use. 3516 */ 3517 if (tcp->tcp_anon_priv_bind) { 3518 /* 3519 * loopmax = 3520 * (IPPORT_RESERVED-1) - tcp_min_anonpriv_port + 1 3521 */ 3522 loopmax = IPPORT_RESERVED - 3523 tcps->tcps_min_anonpriv_port; 3524 } else { 3525 loopmax = (tcps->tcps_largest_anon_port - 3526 tcps->tcps_smallest_anon_port + 1); 3527 } 3528 } 3529 do { 3530 uint16_t lport; 3531 tf_t *tbf; 3532 tcp_t *ltcp; 3533 conn_t *lconnp; 3534 3535 lport = htons(port); 3536 3537 /* 3538 * Ensure that the tcp_t is not currently in the bind hash. 3539 * Hold the lock on the hash bucket to ensure that 3540 * the duplicate check plus the insertion is an atomic 3541 * operation. 3542 * 3543 * This function does an inline lookup on the bind hash list 3544 * Make sure that we access only members of tcp_t 3545 * and that we don't look at tcp_tcp, since we are not 3546 * doing a CONN_INC_REF. 3547 */ 3548 tcp_bind_hash_remove(tcp); 3549 tbf = &tcps->tcps_bind_fanout[TCP_BIND_HASH(lport)]; 3550 mutex_enter(&tbf->tf_lock); 3551 for (ltcp = tbf->tf_tcp; ltcp != NULL; 3552 ltcp = ltcp->tcp_bind_hash) { 3553 boolean_t not_socket; 3554 boolean_t exclbind; 3555 3556 if (lport != ltcp->tcp_lport) 3557 continue; 3558 3559 lconnp = ltcp->tcp_connp; 3560 3561 /* 3562 * On a labeled system, we must treat bindings to ports 3563 * on shared IP addresses by sockets with MAC exemption 3564 * privilege as being in all zones, as there's 3565 * otherwise no way to identify the right receiver. 3566 */ 3567 if (!(IPCL_ZONE_MATCH(ltcp->tcp_connp, zoneid) || 3568 IPCL_ZONE_MATCH(connp, 3569 ltcp->tcp_connp->conn_zoneid)) && 3570 !lconnp->conn_mac_exempt && 3571 !connp->conn_mac_exempt) 3572 continue; 3573 3574 /* 3575 * If TCP_EXCLBIND is set for either the bound or 3576 * binding endpoint, the semantics of bind 3577 * is changed according to the following. 3578 * 3579 * spec = specified address (v4 or v6) 3580 * unspec = unspecified address (v4 or v6) 3581 * A = specified addresses are different for endpoints 3582 * 3583 * bound bind to allowed 3584 * ------------------------------------- 3585 * unspec unspec no 3586 * unspec spec no 3587 * spec unspec no 3588 * spec spec yes if A 3589 * 3590 * For labeled systems, SO_MAC_EXEMPT behaves the same 3591 * as TCP_EXCLBIND, except that zoneid is ignored. 3592 * 3593 * Note: 3594 * 3595 * 1. Because of TLI semantics, an endpoint can go 3596 * back from, say TCP_ESTABLISHED to TCPS_LISTEN or 3597 * TCPS_BOUND, depending on whether it is originally 3598 * a listener or not. That is why we need to check 3599 * for states greater than or equal to TCPS_BOUND 3600 * here. 3601 * 3602 * 2. Ideally, we should only check for state equals 3603 * to TCPS_LISTEN. And the following check should be 3604 * added. 3605 * 3606 * if (ltcp->tcp_state == TCPS_LISTEN || 3607 * !reuseaddr || !ltcp->tcp_reuseaddr) { 3608 * ... 3609 * } 3610 * 3611 * The semantics will be changed to this. If the 3612 * endpoint on the list is in state not equal to 3613 * TCPS_LISTEN and both endpoints have SO_REUSEADDR 3614 * set, let the bind succeed. 3615 * 3616 * Because of (1), we cannot do that for TLI 3617 * endpoints. But we can do that for socket endpoints. 3618 * If in future, we can change this going back 3619 * semantics, we can use the above check for TLI also. 3620 */ 3621 not_socket = !(TCP_IS_SOCKET(ltcp) && 3622 TCP_IS_SOCKET(tcp)); 3623 exclbind = ltcp->tcp_exclbind || tcp->tcp_exclbind; 3624 3625 if (lconnp->conn_mac_exempt || connp->conn_mac_exempt || 3626 (exclbind && (not_socket || 3627 ltcp->tcp_state <= TCPS_ESTABLISHED))) { 3628 if (V6_OR_V4_INADDR_ANY( 3629 ltcp->tcp_bound_source_v6) || 3630 V6_OR_V4_INADDR_ANY(*laddr) || 3631 IN6_ARE_ADDR_EQUAL(laddr, 3632 <cp->tcp_bound_source_v6)) { 3633 break; 3634 } 3635 continue; 3636 } 3637 3638 /* 3639 * Check ipversion to allow IPv4 and IPv6 sockets to 3640 * have disjoint port number spaces, if *_EXCLBIND 3641 * is not set and only if the application binds to a 3642 * specific port. We use the same autoassigned port 3643 * number space for IPv4 and IPv6 sockets. 3644 */ 3645 if (tcp->tcp_ipversion != ltcp->tcp_ipversion && 3646 bind_to_req_port_only) 3647 continue; 3648 3649 /* 3650 * Ideally, we should make sure that the source 3651 * address, remote address, and remote port in the 3652 * four tuple for this tcp-connection is unique. 3653 * However, trying to find out the local source 3654 * address would require too much code duplication 3655 * with IP, since IP needs needs to have that code 3656 * to support userland TCP implementations. 3657 */ 3658 if (quick_connect && 3659 (ltcp->tcp_state > TCPS_LISTEN) && 3660 ((tcp->tcp_fport != ltcp->tcp_fport) || 3661 !IN6_ARE_ADDR_EQUAL(&tcp->tcp_remote_v6, 3662 <cp->tcp_remote_v6))) 3663 continue; 3664 3665 if (!reuseaddr) { 3666 /* 3667 * No socket option SO_REUSEADDR. 3668 * If existing port is bound to 3669 * a non-wildcard IP address 3670 * and the requesting stream is 3671 * bound to a distinct 3672 * different IP addresses 3673 * (non-wildcard, also), keep 3674 * going. 3675 */ 3676 if (!V6_OR_V4_INADDR_ANY(*laddr) && 3677 !V6_OR_V4_INADDR_ANY( 3678 ltcp->tcp_bound_source_v6) && 3679 !IN6_ARE_ADDR_EQUAL(laddr, 3680 <cp->tcp_bound_source_v6)) 3681 continue; 3682 if (ltcp->tcp_state >= TCPS_BOUND) { 3683 /* 3684 * This port is being used and 3685 * its state is >= TCPS_BOUND, 3686 * so we can't bind to it. 3687 */ 3688 break; 3689 } 3690 } else { 3691 /* 3692 * socket option SO_REUSEADDR is set on the 3693 * binding tcp_t. 3694 * 3695 * If two streams are bound to 3696 * same IP address or both addr 3697 * and bound source are wildcards 3698 * (INADDR_ANY), we want to stop 3699 * searching. 3700 * We have found a match of IP source 3701 * address and source port, which is 3702 * refused regardless of the 3703 * SO_REUSEADDR setting, so we break. 3704 */ 3705 if (IN6_ARE_ADDR_EQUAL(laddr, 3706 <cp->tcp_bound_source_v6) && 3707 (ltcp->tcp_state == TCPS_LISTEN || 3708 ltcp->tcp_state == TCPS_BOUND)) 3709 break; 3710 } 3711 } 3712 if (ltcp != NULL) { 3713 /* The port number is busy */ 3714 mutex_exit(&tbf->tf_lock); 3715 } else { 3716 /* 3717 * This port is ours. Insert in fanout and mark as 3718 * bound to prevent others from getting the port 3719 * number. 3720 */ 3721 tcp->tcp_state = TCPS_BOUND; 3722 tcp->tcp_lport = htons(port); 3723 *(uint16_t *)tcp->tcp_tcph->th_lport = tcp->tcp_lport; 3724 3725 ASSERT(&tcps->tcps_bind_fanout[TCP_BIND_HASH( 3726 tcp->tcp_lport)] == tbf); 3727 tcp_bind_hash_insert(tbf, tcp, 1); 3728 3729 mutex_exit(&tbf->tf_lock); 3730 3731 /* 3732 * We don't want tcp_next_port_to_try to "inherit" 3733 * a port number supplied by the user in a bind. 3734 */ 3735 if (user_specified) 3736 return (port); 3737 3738 /* 3739 * This is the only place where tcp_next_port_to_try 3740 * is updated. After the update, it may or may not 3741 * be in the valid range. 3742 */ 3743 if (!tcp->tcp_anon_priv_bind) 3744 tcps->tcps_next_port_to_try = port + 1; 3745 return (port); 3746 } 3747 3748 if (tcp->tcp_anon_priv_bind) { 3749 port = tcp_get_next_priv_port(tcp); 3750 } else { 3751 if (count == 0 && user_specified) { 3752 /* 3753 * We may have to return an anonymous port. So 3754 * get one to start with. 3755 */ 3756 port = 3757 tcp_update_next_port( 3758 tcps->tcps_next_port_to_try, 3759 tcp, B_TRUE); 3760 user_specified = B_FALSE; 3761 } else { 3762 port = tcp_update_next_port(port + 1, tcp, 3763 B_FALSE); 3764 } 3765 } 3766 if (port == 0) 3767 break; 3768 3769 /* 3770 * Don't let this loop run forever in the case where 3771 * all of the anonymous ports are in use. 3772 */ 3773 } while (++count < loopmax); 3774 return (0); 3775 } 3776 3777 /* 3778 * tcp_clean_death / tcp_close_detached must not be called more than once 3779 * on a tcp. Thus every function that potentially calls tcp_clean_death 3780 * must check for the tcp state before calling tcp_clean_death. 3781 * Eg. tcp_input, tcp_rput_data, tcp_eager_kill, tcp_clean_death_wrapper, 3782 * tcp_timer_handler, all check for the tcp state. 3783 */ 3784 /* ARGSUSED */ 3785 void 3786 tcp_clean_death_wrapper(void *arg, mblk_t *mp, void *arg2) 3787 { 3788 tcp_t *tcp = ((conn_t *)arg)->conn_tcp; 3789 3790 freemsg(mp); 3791 if (tcp->tcp_state > TCPS_BOUND) 3792 (void) tcp_clean_death(((conn_t *)arg)->conn_tcp, 3793 ETIMEDOUT, 5); 3794 } 3795 3796 /* 3797 * We are dying for some reason. Try to do it gracefully. (May be called 3798 * as writer.) 3799 * 3800 * Return -1 if the structure was not cleaned up (if the cleanup had to be 3801 * done by a service procedure). 3802 * TBD - Should the return value distinguish between the tcp_t being 3803 * freed and it being reinitialized? 3804 */ 3805 static int 3806 tcp_clean_death(tcp_t *tcp, int err, uint8_t tag) 3807 { 3808 mblk_t *mp; 3809 queue_t *q; 3810 tcp_stack_t *tcps = tcp->tcp_tcps; 3811 3812 TCP_CLD_STAT(tag); 3813 3814 #if TCP_TAG_CLEAN_DEATH 3815 tcp->tcp_cleandeathtag = tag; 3816 #endif 3817 3818 if (tcp->tcp_fused) 3819 tcp_unfuse(tcp); 3820 3821 if (tcp->tcp_linger_tid != 0 && 3822 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) { 3823 tcp_stop_lingering(tcp); 3824 } 3825 3826 ASSERT(tcp != NULL); 3827 ASSERT((tcp->tcp_family == AF_INET && 3828 tcp->tcp_ipversion == IPV4_VERSION) || 3829 (tcp->tcp_family == AF_INET6 && 3830 (tcp->tcp_ipversion == IPV4_VERSION || 3831 tcp->tcp_ipversion == IPV6_VERSION))); 3832 3833 if (TCP_IS_DETACHED(tcp)) { 3834 if (tcp->tcp_hard_binding) { 3835 /* 3836 * Its an eager that we are dealing with. We close the 3837 * eager but in case a conn_ind has already gone to the 3838 * listener, let tcp_accept_finish() send a discon_ind 3839 * to the listener and drop the last reference. If the 3840 * listener doesn't even know about the eager i.e. the 3841 * conn_ind hasn't gone up, blow away the eager and drop 3842 * the last reference as well. If the conn_ind has gone 3843 * up, state should be BOUND. tcp_accept_finish 3844 * will figure out that the connection has received a 3845 * RST and will send a DISCON_IND to the application. 3846 */ 3847 tcp_closei_local(tcp); 3848 if (!tcp->tcp_tconnind_started) { 3849 CONN_DEC_REF(tcp->tcp_connp); 3850 } else { 3851 tcp->tcp_state = TCPS_BOUND; 3852 } 3853 } else { 3854 tcp_close_detached(tcp); 3855 } 3856 return (0); 3857 } 3858 3859 TCP_STAT(tcps, tcp_clean_death_nondetached); 3860 3861 /* 3862 * If T_ORDREL_IND has not been sent yet (done when service routine 3863 * is run) postpone cleaning up the endpoint until service routine 3864 * has sent up the T_ORDREL_IND. Avoid clearing out an existing 3865 * client_errno since tcp_close uses the client_errno field. 3866 */ 3867 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 3868 if (err != 0) 3869 tcp->tcp_client_errno = err; 3870 3871 tcp->tcp_deferred_clean_death = B_TRUE; 3872 return (-1); 3873 } 3874 3875 q = tcp->tcp_rq; 3876 3877 /* Trash all inbound data */ 3878 flushq(q, FLUSHALL); 3879 3880 /* 3881 * If we are at least part way open and there is error 3882 * (err==0 implies no error) 3883 * notify our client by a T_DISCON_IND. 3884 */ 3885 if ((tcp->tcp_state >= TCPS_SYN_SENT) && err) { 3886 if (tcp->tcp_state >= TCPS_ESTABLISHED && 3887 !TCP_IS_SOCKET(tcp)) { 3888 /* 3889 * Send M_FLUSH according to TPI. Because sockets will 3890 * (and must) ignore FLUSHR we do that only for TPI 3891 * endpoints and sockets in STREAMS mode. 3892 */ 3893 (void) putnextctl1(q, M_FLUSH, FLUSHR); 3894 } 3895 if (tcp->tcp_debug) { 3896 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 3897 "tcp_clean_death: discon err %d", err); 3898 } 3899 mp = mi_tpi_discon_ind(NULL, err, 0); 3900 if (mp != NULL) { 3901 putnext(q, mp); 3902 } else { 3903 if (tcp->tcp_debug) { 3904 (void) strlog(TCP_MOD_ID, 0, 1, 3905 SL_ERROR|SL_TRACE, 3906 "tcp_clean_death, sending M_ERROR"); 3907 } 3908 (void) putnextctl1(q, M_ERROR, EPROTO); 3909 } 3910 if (tcp->tcp_state <= TCPS_SYN_RCVD) { 3911 /* SYN_SENT or SYN_RCVD */ 3912 BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails); 3913 } else if (tcp->tcp_state <= TCPS_CLOSE_WAIT) { 3914 /* ESTABLISHED or CLOSE_WAIT */ 3915 BUMP_MIB(&tcps->tcps_mib, tcpEstabResets); 3916 } 3917 } 3918 3919 tcp_reinit(tcp); 3920 return (-1); 3921 } 3922 3923 /* 3924 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout 3925 * to expire, stop the wait and finish the close. 3926 */ 3927 static void 3928 tcp_stop_lingering(tcp_t *tcp) 3929 { 3930 clock_t delta = 0; 3931 tcp_stack_t *tcps = tcp->tcp_tcps; 3932 3933 tcp->tcp_linger_tid = 0; 3934 if (tcp->tcp_state > TCPS_LISTEN) { 3935 tcp_acceptor_hash_remove(tcp); 3936 mutex_enter(&tcp->tcp_non_sq_lock); 3937 if (tcp->tcp_flow_stopped) { 3938 tcp_clrqfull(tcp); 3939 } 3940 mutex_exit(&tcp->tcp_non_sq_lock); 3941 3942 if (tcp->tcp_timer_tid != 0) { 3943 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 3944 tcp->tcp_timer_tid = 0; 3945 } 3946 /* 3947 * Need to cancel those timers which will not be used when 3948 * TCP is detached. This has to be done before the tcp_wq 3949 * is set to the global queue. 3950 */ 3951 tcp_timers_stop(tcp); 3952 3953 3954 tcp->tcp_detached = B_TRUE; 3955 ASSERT(tcps->tcps_g_q != NULL); 3956 tcp->tcp_rq = tcps->tcps_g_q; 3957 tcp->tcp_wq = WR(tcps->tcps_g_q); 3958 3959 if (tcp->tcp_state == TCPS_TIME_WAIT) { 3960 tcp_time_wait_append(tcp); 3961 TCP_DBGSTAT(tcps, tcp_detach_time_wait); 3962 goto finish; 3963 } 3964 3965 /* 3966 * If delta is zero the timer event wasn't executed and was 3967 * successfully canceled. In this case we need to restart it 3968 * with the minimal delta possible. 3969 */ 3970 if (delta >= 0) { 3971 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer, 3972 delta ? delta : 1); 3973 } 3974 } else { 3975 tcp_closei_local(tcp); 3976 CONN_DEC_REF(tcp->tcp_connp); 3977 } 3978 finish: 3979 /* Signal closing thread that it can complete close */ 3980 mutex_enter(&tcp->tcp_closelock); 3981 tcp->tcp_detached = B_TRUE; 3982 ASSERT(tcps->tcps_g_q != NULL); 3983 tcp->tcp_rq = tcps->tcps_g_q; 3984 tcp->tcp_wq = WR(tcps->tcps_g_q); 3985 tcp->tcp_closed = 1; 3986 cv_signal(&tcp->tcp_closecv); 3987 mutex_exit(&tcp->tcp_closelock); 3988 } 3989 3990 /* 3991 * Handle lingering timeouts. This function is called when the SO_LINGER timeout 3992 * expires. 3993 */ 3994 static void 3995 tcp_close_linger_timeout(void *arg) 3996 { 3997 conn_t *connp = (conn_t *)arg; 3998 tcp_t *tcp = connp->conn_tcp; 3999 4000 tcp->tcp_client_errno = ETIMEDOUT; 4001 tcp_stop_lingering(tcp); 4002 } 4003 4004 static int 4005 tcp_close(queue_t *q, int flags) 4006 { 4007 conn_t *connp = Q_TO_CONN(q); 4008 tcp_t *tcp = connp->conn_tcp; 4009 mblk_t *mp = &tcp->tcp_closemp; 4010 boolean_t conn_ioctl_cleanup_reqd = B_FALSE; 4011 mblk_t *bp; 4012 4013 ASSERT(WR(q)->q_next == NULL); 4014 ASSERT(connp->conn_ref >= 2); 4015 4016 /* 4017 * We are being closed as /dev/tcp or /dev/tcp6. 4018 * 4019 * Mark the conn as closing. ill_pending_mp_add will not 4020 * add any mp to the pending mp list, after this conn has 4021 * started closing. Same for sq_pending_mp_add 4022 */ 4023 mutex_enter(&connp->conn_lock); 4024 connp->conn_state_flags |= CONN_CLOSING; 4025 if (connp->conn_oper_pending_ill != NULL) 4026 conn_ioctl_cleanup_reqd = B_TRUE; 4027 CONN_INC_REF_LOCKED(connp); 4028 mutex_exit(&connp->conn_lock); 4029 tcp->tcp_closeflags = (uint8_t)flags; 4030 ASSERT(connp->conn_ref >= 3); 4031 4032 /* 4033 * tcp_closemp_used is used below without any protection of a lock 4034 * as we don't expect any one else to use it concurrently at this 4035 * point otherwise it would be a major defect. 4036 */ 4037 4038 if (mp->b_prev == NULL) 4039 tcp->tcp_closemp_used = B_TRUE; 4040 else 4041 cmn_err(CE_PANIC, "tcp_close: concurrent use of tcp_closemp: " 4042 "connp %p tcp %p\n", (void *)connp, (void *)tcp); 4043 4044 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15); 4045 4046 (*tcp_squeue_close_proc)(connp->conn_sqp, mp, 4047 tcp_close_output, connp, SQTAG_IP_TCP_CLOSE); 4048 4049 mutex_enter(&tcp->tcp_closelock); 4050 while (!tcp->tcp_closed) { 4051 if (!cv_wait_sig(&tcp->tcp_closecv, &tcp->tcp_closelock)) { 4052 /* 4053 * The cv_wait_sig() was interrupted. We now do the 4054 * following: 4055 * 4056 * 1) If the endpoint was lingering, we allow this 4057 * to be interrupted by cancelling the linger timeout 4058 * and closing normally. 4059 * 4060 * 2) Revert to calling cv_wait() 4061 * 4062 * We revert to using cv_wait() to avoid an 4063 * infinite loop which can occur if the calling 4064 * thread is higher priority than the squeue worker 4065 * thread and is bound to the same cpu. 4066 */ 4067 if (tcp->tcp_linger && tcp->tcp_lingertime > 0) { 4068 mutex_exit(&tcp->tcp_closelock); 4069 /* Entering squeue, bump ref count. */ 4070 CONN_INC_REF(connp); 4071 bp = allocb_wait(0, BPRI_HI, STR_NOSIG, NULL); 4072 squeue_enter(connp->conn_sqp, bp, 4073 tcp_linger_interrupted, connp, 4074 SQTAG_IP_TCP_CLOSE); 4075 mutex_enter(&tcp->tcp_closelock); 4076 } 4077 break; 4078 } 4079 } 4080 while (!tcp->tcp_closed) 4081 cv_wait(&tcp->tcp_closecv, &tcp->tcp_closelock); 4082 mutex_exit(&tcp->tcp_closelock); 4083 4084 /* 4085 * In the case of listener streams that have eagers in the q or q0 4086 * we wait for the eagers to drop their reference to us. tcp_rq and 4087 * tcp_wq of the eagers point to our queues. By waiting for the 4088 * refcnt to drop to 1, we are sure that the eagers have cleaned 4089 * up their queue pointers and also dropped their references to us. 4090 */ 4091 if (tcp->tcp_wait_for_eagers) { 4092 mutex_enter(&connp->conn_lock); 4093 while (connp->conn_ref != 1) { 4094 cv_wait(&connp->conn_cv, &connp->conn_lock); 4095 } 4096 mutex_exit(&connp->conn_lock); 4097 } 4098 /* 4099 * ioctl cleanup. The mp is queued in the 4100 * ill_pending_mp or in the sq_pending_mp. 4101 */ 4102 if (conn_ioctl_cleanup_reqd) 4103 conn_ioctl_cleanup(connp); 4104 4105 qprocsoff(q); 4106 inet_minor_free(connp->conn_minor_arena, connp->conn_dev); 4107 4108 tcp->tcp_cpid = -1; 4109 4110 /* 4111 * Drop IP's reference on the conn. This is the last reference 4112 * on the connp if the state was less than established. If the 4113 * connection has gone into timewait state, then we will have 4114 * one ref for the TCP and one more ref (total of two) for the 4115 * classifier connected hash list (a timewait connections stays 4116 * in connected hash till closed). 4117 * 4118 * We can't assert the references because there might be other 4119 * transient reference places because of some walkers or queued 4120 * packets in squeue for the timewait state. 4121 */ 4122 CONN_DEC_REF(connp); 4123 q->q_ptr = WR(q)->q_ptr = NULL; 4124 return (0); 4125 } 4126 4127 static int 4128 tcpclose_accept(queue_t *q) 4129 { 4130 vmem_t *minor_arena; 4131 dev_t conn_dev; 4132 4133 ASSERT(WR(q)->q_qinfo == &tcp_acceptor_winit); 4134 4135 /* 4136 * We had opened an acceptor STREAM for sockfs which is 4137 * now being closed due to some error. 4138 */ 4139 qprocsoff(q); 4140 4141 minor_arena = (vmem_t *)WR(q)->q_ptr; 4142 conn_dev = (dev_t)RD(q)->q_ptr; 4143 ASSERT(minor_arena != NULL); 4144 ASSERT(conn_dev != 0); 4145 inet_minor_free(minor_arena, conn_dev); 4146 q->q_ptr = WR(q)->q_ptr = NULL; 4147 return (0); 4148 } 4149 4150 /* 4151 * Called by tcp_close() routine via squeue when lingering is 4152 * interrupted by a signal. 4153 */ 4154 4155 /* ARGSUSED */ 4156 static void 4157 tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2) 4158 { 4159 conn_t *connp = (conn_t *)arg; 4160 tcp_t *tcp = connp->conn_tcp; 4161 4162 freeb(mp); 4163 if (tcp->tcp_linger_tid != 0 && 4164 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) { 4165 tcp_stop_lingering(tcp); 4166 tcp->tcp_client_errno = EINTR; 4167 } 4168 } 4169 4170 /* 4171 * Called by streams close routine via squeues when our client blows off her 4172 * descriptor, we take this to mean: "close the stream state NOW, close the tcp 4173 * connection politely" When SO_LINGER is set (with a non-zero linger time and 4174 * it is not a nonblocking socket) then this routine sleeps until the FIN is 4175 * acked. 4176 * 4177 * NOTE: tcp_close potentially returns error when lingering. 4178 * However, the stream head currently does not pass these errors 4179 * to the application. 4.4BSD only returns EINTR and EWOULDBLOCK 4180 * errors to the application (from tsleep()) and not errors 4181 * like ECONNRESET caused by receiving a reset packet. 4182 */ 4183 4184 /* ARGSUSED */ 4185 static void 4186 tcp_close_output(void *arg, mblk_t *mp, void *arg2) 4187 { 4188 char *msg; 4189 conn_t *connp = (conn_t *)arg; 4190 tcp_t *tcp = connp->conn_tcp; 4191 clock_t delta = 0; 4192 tcp_stack_t *tcps = tcp->tcp_tcps; 4193 4194 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 4195 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 4196 4197 /* Cancel any pending timeout */ 4198 if (tcp->tcp_ordrelid != 0) { 4199 if (tcp->tcp_timeout) { 4200 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ordrelid); 4201 } 4202 tcp->tcp_ordrelid = 0; 4203 tcp->tcp_timeout = B_FALSE; 4204 } 4205 4206 mutex_enter(&tcp->tcp_eager_lock); 4207 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) { 4208 /* Cleanup for listener */ 4209 tcp_eager_cleanup(tcp, 0); 4210 tcp->tcp_wait_for_eagers = 1; 4211 } 4212 mutex_exit(&tcp->tcp_eager_lock); 4213 4214 connp->conn_mdt_ok = B_FALSE; 4215 tcp->tcp_mdt = B_FALSE; 4216 4217 connp->conn_lso_ok = B_FALSE; 4218 tcp->tcp_lso = B_FALSE; 4219 4220 msg = NULL; 4221 switch (tcp->tcp_state) { 4222 case TCPS_CLOSED: 4223 case TCPS_IDLE: 4224 case TCPS_BOUND: 4225 case TCPS_LISTEN: 4226 break; 4227 case TCPS_SYN_SENT: 4228 msg = "tcp_close, during connect"; 4229 break; 4230 case TCPS_SYN_RCVD: 4231 /* 4232 * Close during the connect 3-way handshake 4233 * but here there may or may not be pending data 4234 * already on queue. Process almost same as in 4235 * the ESTABLISHED state. 4236 */ 4237 /* FALLTHRU */ 4238 default: 4239 if (tcp->tcp_fused) 4240 tcp_unfuse(tcp); 4241 4242 /* 4243 * If SO_LINGER has set a zero linger time, abort the 4244 * connection with a reset. 4245 */ 4246 if (tcp->tcp_linger && tcp->tcp_lingertime == 0) { 4247 msg = "tcp_close, zero lingertime"; 4248 break; 4249 } 4250 4251 ASSERT(tcp->tcp_hard_bound || tcp->tcp_hard_binding); 4252 /* 4253 * Abort connection if there is unread data queued. 4254 */ 4255 if (tcp->tcp_rcv_list || tcp->tcp_reass_head) { 4256 msg = "tcp_close, unread data"; 4257 break; 4258 } 4259 /* 4260 * tcp_hard_bound is now cleared thus all packets go through 4261 * tcp_lookup. This fact is used by tcp_detach below. 4262 * 4263 * We have done a qwait() above which could have possibly 4264 * drained more messages in turn causing transition to a 4265 * different state. Check whether we have to do the rest 4266 * of the processing or not. 4267 */ 4268 if (tcp->tcp_state <= TCPS_LISTEN) 4269 break; 4270 4271 /* 4272 * Transmit the FIN before detaching the tcp_t. 4273 * After tcp_detach returns this queue/perimeter 4274 * no longer owns the tcp_t thus others can modify it. 4275 */ 4276 (void) tcp_xmit_end(tcp); 4277 4278 /* 4279 * If lingering on close then wait until the fin is acked, 4280 * the SO_LINGER time passes, or a reset is sent/received. 4281 */ 4282 if (tcp->tcp_linger && tcp->tcp_lingertime > 0 && 4283 !(tcp->tcp_fin_acked) && 4284 tcp->tcp_state >= TCPS_ESTABLISHED) { 4285 if (tcp->tcp_closeflags & (FNDELAY|FNONBLOCK)) { 4286 tcp->tcp_client_errno = EWOULDBLOCK; 4287 } else if (tcp->tcp_client_errno == 0) { 4288 4289 ASSERT(tcp->tcp_linger_tid == 0); 4290 4291 tcp->tcp_linger_tid = TCP_TIMER(tcp, 4292 tcp_close_linger_timeout, 4293 tcp->tcp_lingertime * hz); 4294 4295 /* tcp_close_linger_timeout will finish close */ 4296 if (tcp->tcp_linger_tid == 0) 4297 tcp->tcp_client_errno = ENOSR; 4298 else 4299 return; 4300 } 4301 4302 /* 4303 * Check if we need to detach or just close 4304 * the instance. 4305 */ 4306 if (tcp->tcp_state <= TCPS_LISTEN) 4307 break; 4308 } 4309 4310 /* 4311 * Make sure that no other thread will access the tcp_rq of 4312 * this instance (through lookups etc.) as tcp_rq will go 4313 * away shortly. 4314 */ 4315 tcp_acceptor_hash_remove(tcp); 4316 4317 mutex_enter(&tcp->tcp_non_sq_lock); 4318 if (tcp->tcp_flow_stopped) { 4319 tcp_clrqfull(tcp); 4320 } 4321 mutex_exit(&tcp->tcp_non_sq_lock); 4322 4323 if (tcp->tcp_timer_tid != 0) { 4324 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 4325 tcp->tcp_timer_tid = 0; 4326 } 4327 /* 4328 * Need to cancel those timers which will not be used when 4329 * TCP is detached. This has to be done before the tcp_wq 4330 * is set to the global queue. 4331 */ 4332 tcp_timers_stop(tcp); 4333 4334 tcp->tcp_detached = B_TRUE; 4335 if (tcp->tcp_state == TCPS_TIME_WAIT) { 4336 tcp_time_wait_append(tcp); 4337 TCP_DBGSTAT(tcps, tcp_detach_time_wait); 4338 ASSERT(connp->conn_ref >= 3); 4339 goto finish; 4340 } 4341 4342 /* 4343 * If delta is zero the timer event wasn't executed and was 4344 * successfully canceled. In this case we need to restart it 4345 * with the minimal delta possible. 4346 */ 4347 if (delta >= 0) 4348 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer, 4349 delta ? delta : 1); 4350 4351 ASSERT(connp->conn_ref >= 3); 4352 goto finish; 4353 } 4354 4355 /* Detach did not complete. Still need to remove q from stream. */ 4356 if (msg) { 4357 if (tcp->tcp_state == TCPS_ESTABLISHED || 4358 tcp->tcp_state == TCPS_CLOSE_WAIT) 4359 BUMP_MIB(&tcps->tcps_mib, tcpEstabResets); 4360 if (tcp->tcp_state == TCPS_SYN_SENT || 4361 tcp->tcp_state == TCPS_SYN_RCVD) 4362 BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails); 4363 tcp_xmit_ctl(msg, tcp, tcp->tcp_snxt, 0, TH_RST); 4364 } 4365 4366 tcp_closei_local(tcp); 4367 CONN_DEC_REF(connp); 4368 ASSERT(connp->conn_ref >= 2); 4369 4370 finish: 4371 /* 4372 * Although packets are always processed on the correct 4373 * tcp's perimeter and access is serialized via squeue's, 4374 * IP still needs a queue when sending packets in time_wait 4375 * state so use WR(tcps_g_q) till ip_output() can be 4376 * changed to deal with just connp. For read side, we 4377 * could have set tcp_rq to NULL but there are some cases 4378 * in tcp_rput_data() from early days of this code which 4379 * do a putnext without checking if tcp is closed. Those 4380 * need to be identified before both tcp_rq and tcp_wq 4381 * can be set to NULL and tcps_g_q can disappear forever. 4382 */ 4383 mutex_enter(&tcp->tcp_closelock); 4384 /* 4385 * Don't change the queues in the case of a listener that has 4386 * eagers in its q or q0. It could surprise the eagers. 4387 * Instead wait for the eagers outside the squeue. 4388 */ 4389 if (!tcp->tcp_wait_for_eagers) { 4390 tcp->tcp_detached = B_TRUE; 4391 /* 4392 * When default queue is closing we set tcps_g_q to NULL 4393 * after the close is done. 4394 */ 4395 ASSERT(tcps->tcps_g_q != NULL); 4396 tcp->tcp_rq = tcps->tcps_g_q; 4397 tcp->tcp_wq = WR(tcps->tcps_g_q); 4398 } 4399 4400 /* Signal tcp_close() to finish closing. */ 4401 tcp->tcp_closed = 1; 4402 cv_signal(&tcp->tcp_closecv); 4403 mutex_exit(&tcp->tcp_closelock); 4404 } 4405 4406 4407 /* 4408 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp. 4409 * Some stream heads get upset if they see these later on as anything but NULL. 4410 */ 4411 static void 4412 tcp_close_mpp(mblk_t **mpp) 4413 { 4414 mblk_t *mp; 4415 4416 if ((mp = *mpp) != NULL) { 4417 do { 4418 mp->b_next = NULL; 4419 mp->b_prev = NULL; 4420 } while ((mp = mp->b_cont) != NULL); 4421 4422 mp = *mpp; 4423 *mpp = NULL; 4424 freemsg(mp); 4425 } 4426 } 4427 4428 /* Do detached close. */ 4429 static void 4430 tcp_close_detached(tcp_t *tcp) 4431 { 4432 if (tcp->tcp_fused) 4433 tcp_unfuse(tcp); 4434 4435 /* 4436 * Clustering code serializes TCP disconnect callbacks and 4437 * cluster tcp list walks by blocking a TCP disconnect callback 4438 * if a cluster tcp list walk is in progress. This ensures 4439 * accurate accounting of TCPs in the cluster code even though 4440 * the TCP list walk itself is not atomic. 4441 */ 4442 tcp_closei_local(tcp); 4443 CONN_DEC_REF(tcp->tcp_connp); 4444 } 4445 4446 /* 4447 * Stop all TCP timers, and free the timer mblks if requested. 4448 */ 4449 void 4450 tcp_timers_stop(tcp_t *tcp) 4451 { 4452 if (tcp->tcp_timer_tid != 0) { 4453 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 4454 tcp->tcp_timer_tid = 0; 4455 } 4456 if (tcp->tcp_ka_tid != 0) { 4457 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ka_tid); 4458 tcp->tcp_ka_tid = 0; 4459 } 4460 if (tcp->tcp_ack_tid != 0) { 4461 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 4462 tcp->tcp_ack_tid = 0; 4463 } 4464 if (tcp->tcp_push_tid != 0) { 4465 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 4466 tcp->tcp_push_tid = 0; 4467 } 4468 } 4469 4470 /* 4471 * The tcp_t is going away. Remove it from all lists and set it 4472 * to TCPS_CLOSED. The freeing up of memory is deferred until 4473 * tcp_inactive. This is needed since a thread in tcp_rput might have 4474 * done a CONN_INC_REF on this structure before it was removed from the 4475 * hashes. 4476 */ 4477 static void 4478 tcp_closei_local(tcp_t *tcp) 4479 { 4480 ire_t *ire; 4481 conn_t *connp = tcp->tcp_connp; 4482 tcp_stack_t *tcps = tcp->tcp_tcps; 4483 4484 if (!TCP_IS_SOCKET(tcp)) 4485 tcp_acceptor_hash_remove(tcp); 4486 4487 UPDATE_MIB(&tcps->tcps_mib, tcpHCInSegs, tcp->tcp_ibsegs); 4488 tcp->tcp_ibsegs = 0; 4489 UPDATE_MIB(&tcps->tcps_mib, tcpHCOutSegs, tcp->tcp_obsegs); 4490 tcp->tcp_obsegs = 0; 4491 4492 /* 4493 * If we are an eager connection hanging off a listener that 4494 * hasn't formally accepted the connection yet, get off his 4495 * list and blow off any data that we have accumulated. 4496 */ 4497 if (tcp->tcp_listener != NULL) { 4498 tcp_t *listener = tcp->tcp_listener; 4499 mutex_enter(&listener->tcp_eager_lock); 4500 /* 4501 * tcp_tconnind_started == B_TRUE means that the 4502 * conn_ind has already gone to listener. At 4503 * this point, eager will be closed but we 4504 * leave it in listeners eager list so that 4505 * if listener decides to close without doing 4506 * accept, we can clean this up. In tcp_wput_accept 4507 * we take care of the case of accept on closed 4508 * eager. 4509 */ 4510 if (!tcp->tcp_tconnind_started) { 4511 tcp_eager_unlink(tcp); 4512 mutex_exit(&listener->tcp_eager_lock); 4513 /* 4514 * We don't want to have any pointers to the 4515 * listener queue, after we have released our 4516 * reference on the listener 4517 */ 4518 ASSERT(tcps->tcps_g_q != NULL); 4519 tcp->tcp_rq = tcps->tcps_g_q; 4520 tcp->tcp_wq = WR(tcps->tcps_g_q); 4521 CONN_DEC_REF(listener->tcp_connp); 4522 } else { 4523 mutex_exit(&listener->tcp_eager_lock); 4524 } 4525 } 4526 4527 /* Stop all the timers */ 4528 tcp_timers_stop(tcp); 4529 4530 if (tcp->tcp_state == TCPS_LISTEN) { 4531 if (tcp->tcp_ip_addr_cache) { 4532 kmem_free((void *)tcp->tcp_ip_addr_cache, 4533 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 4534 tcp->tcp_ip_addr_cache = NULL; 4535 } 4536 } 4537 mutex_enter(&tcp->tcp_non_sq_lock); 4538 if (tcp->tcp_flow_stopped) 4539 tcp_clrqfull(tcp); 4540 mutex_exit(&tcp->tcp_non_sq_lock); 4541 4542 tcp_bind_hash_remove(tcp); 4543 /* 4544 * If the tcp_time_wait_collector (which runs outside the squeue) 4545 * is trying to remove this tcp from the time wait list, we will 4546 * block in tcp_time_wait_remove while trying to acquire the 4547 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also 4548 * requires the ipcl_hash_remove to be ordered after the 4549 * tcp_time_wait_remove for the refcnt checks to work correctly. 4550 */ 4551 if (tcp->tcp_state == TCPS_TIME_WAIT) 4552 (void) tcp_time_wait_remove(tcp, NULL); 4553 CL_INET_DISCONNECT(tcp); 4554 ipcl_hash_remove(connp); 4555 4556 /* 4557 * Delete the cached ire in conn_ire_cache and also mark 4558 * the conn as CONDEMNED 4559 */ 4560 mutex_enter(&connp->conn_lock); 4561 connp->conn_state_flags |= CONN_CONDEMNED; 4562 ire = connp->conn_ire_cache; 4563 connp->conn_ire_cache = NULL; 4564 mutex_exit(&connp->conn_lock); 4565 if (ire != NULL) 4566 IRE_REFRELE_NOTR(ire); 4567 4568 /* Need to cleanup any pending ioctls */ 4569 ASSERT(tcp->tcp_time_wait_next == NULL); 4570 ASSERT(tcp->tcp_time_wait_prev == NULL); 4571 ASSERT(tcp->tcp_time_wait_expire == 0); 4572 tcp->tcp_state = TCPS_CLOSED; 4573 4574 /* Release any SSL context */ 4575 if (tcp->tcp_kssl_ent != NULL) { 4576 kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); 4577 tcp->tcp_kssl_ent = NULL; 4578 } 4579 if (tcp->tcp_kssl_ctx != NULL) { 4580 kssl_release_ctx(tcp->tcp_kssl_ctx); 4581 tcp->tcp_kssl_ctx = NULL; 4582 } 4583 tcp->tcp_kssl_pending = B_FALSE; 4584 4585 tcp_ipsec_cleanup(tcp); 4586 } 4587 4588 /* 4589 * tcp is dying (called from ipcl_conn_destroy and error cases). 4590 * Free the tcp_t in either case. 4591 */ 4592 void 4593 tcp_free(tcp_t *tcp) 4594 { 4595 mblk_t *mp; 4596 ip6_pkt_t *ipp; 4597 4598 ASSERT(tcp != NULL); 4599 ASSERT(tcp->tcp_ptpahn == NULL && tcp->tcp_acceptor_hash == NULL); 4600 4601 tcp->tcp_rq = NULL; 4602 tcp->tcp_wq = NULL; 4603 4604 tcp_close_mpp(&tcp->tcp_xmit_head); 4605 tcp_close_mpp(&tcp->tcp_reass_head); 4606 if (tcp->tcp_rcv_list != NULL) { 4607 /* Free b_next chain */ 4608 tcp_close_mpp(&tcp->tcp_rcv_list); 4609 } 4610 if ((mp = tcp->tcp_urp_mp) != NULL) { 4611 freemsg(mp); 4612 } 4613 if ((mp = tcp->tcp_urp_mark_mp) != NULL) { 4614 freemsg(mp); 4615 } 4616 4617 if (tcp->tcp_fused_sigurg_mp != NULL) { 4618 freeb(tcp->tcp_fused_sigurg_mp); 4619 tcp->tcp_fused_sigurg_mp = NULL; 4620 } 4621 4622 if (tcp->tcp_sack_info != NULL) { 4623 if (tcp->tcp_notsack_list != NULL) { 4624 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 4625 } 4626 bzero(tcp->tcp_sack_info, sizeof (tcp_sack_info_t)); 4627 } 4628 4629 if (tcp->tcp_hopopts != NULL) { 4630 mi_free(tcp->tcp_hopopts); 4631 tcp->tcp_hopopts = NULL; 4632 tcp->tcp_hopoptslen = 0; 4633 } 4634 ASSERT(tcp->tcp_hopoptslen == 0); 4635 if (tcp->tcp_dstopts != NULL) { 4636 mi_free(tcp->tcp_dstopts); 4637 tcp->tcp_dstopts = NULL; 4638 tcp->tcp_dstoptslen = 0; 4639 } 4640 ASSERT(tcp->tcp_dstoptslen == 0); 4641 if (tcp->tcp_rtdstopts != NULL) { 4642 mi_free(tcp->tcp_rtdstopts); 4643 tcp->tcp_rtdstopts = NULL; 4644 tcp->tcp_rtdstoptslen = 0; 4645 } 4646 ASSERT(tcp->tcp_rtdstoptslen == 0); 4647 if (tcp->tcp_rthdr != NULL) { 4648 mi_free(tcp->tcp_rthdr); 4649 tcp->tcp_rthdr = NULL; 4650 tcp->tcp_rthdrlen = 0; 4651 } 4652 ASSERT(tcp->tcp_rthdrlen == 0); 4653 4654 ipp = &tcp->tcp_sticky_ipp; 4655 if (ipp->ipp_fields & (IPPF_HOPOPTS | IPPF_RTDSTOPTS | IPPF_DSTOPTS | 4656 IPPF_RTHDR)) 4657 ip6_pkt_free(ipp); 4658 4659 /* 4660 * Free memory associated with the tcp/ip header template. 4661 */ 4662 4663 if (tcp->tcp_iphc != NULL) 4664 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 4665 4666 /* 4667 * Following is really a blowing away a union. 4668 * It happens to have exactly two members of identical size 4669 * the following code is enough. 4670 */ 4671 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind); 4672 4673 if (tcp->tcp_tracebuf != NULL) { 4674 kmem_free(tcp->tcp_tracebuf, sizeof (tcptrch_t)); 4675 tcp->tcp_tracebuf = NULL; 4676 } 4677 } 4678 4679 4680 /* 4681 * Put a connection confirmation message upstream built from the 4682 * address information within 'iph' and 'tcph'. Report our success or failure. 4683 */ 4684 static boolean_t 4685 tcp_conn_con(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph, mblk_t *idmp, 4686 mblk_t **defermp) 4687 { 4688 sin_t sin; 4689 sin6_t sin6; 4690 mblk_t *mp; 4691 char *optp = NULL; 4692 int optlen = 0; 4693 cred_t *cr; 4694 4695 if (defermp != NULL) 4696 *defermp = NULL; 4697 4698 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) { 4699 /* 4700 * Return in T_CONN_CON results of option negotiation through 4701 * the T_CONN_REQ. Note: If there is an real end-to-end option 4702 * negotiation, then what is received from remote end needs 4703 * to be taken into account but there is no such thing (yet?) 4704 * in our TCP/IP. 4705 * Note: We do not use mi_offset_param() here as 4706 * tcp_opts_conn_req contents do not directly come from 4707 * an application and are either generated in kernel or 4708 * from user input that was already verified. 4709 */ 4710 mp = tcp->tcp_conn.tcp_opts_conn_req; 4711 optp = (char *)(mp->b_rptr + 4712 ((struct T_conn_req *)mp->b_rptr)->OPT_offset); 4713 optlen = (int) 4714 ((struct T_conn_req *)mp->b_rptr)->OPT_length; 4715 } 4716 4717 if (IPH_HDR_VERSION(iphdr) == IPV4_VERSION) { 4718 ipha_t *ipha = (ipha_t *)iphdr; 4719 4720 /* packet is IPv4 */ 4721 if (tcp->tcp_family == AF_INET) { 4722 sin = sin_null; 4723 sin.sin_addr.s_addr = ipha->ipha_src; 4724 sin.sin_port = *(uint16_t *)tcph->th_lport; 4725 sin.sin_family = AF_INET; 4726 mp = mi_tpi_conn_con(NULL, (char *)&sin, 4727 (int)sizeof (sin_t), optp, optlen); 4728 } else { 4729 sin6 = sin6_null; 4730 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &sin6.sin6_addr); 4731 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4732 sin6.sin6_family = AF_INET6; 4733 mp = mi_tpi_conn_con(NULL, (char *)&sin6, 4734 (int)sizeof (sin6_t), optp, optlen); 4735 4736 } 4737 } else { 4738 ip6_t *ip6h = (ip6_t *)iphdr; 4739 4740 ASSERT(IPH_HDR_VERSION(iphdr) == IPV6_VERSION); 4741 ASSERT(tcp->tcp_family == AF_INET6); 4742 sin6 = sin6_null; 4743 sin6.sin6_addr = ip6h->ip6_src; 4744 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4745 sin6.sin6_family = AF_INET6; 4746 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 4747 mp = mi_tpi_conn_con(NULL, (char *)&sin6, 4748 (int)sizeof (sin6_t), optp, optlen); 4749 } 4750 4751 if (!mp) 4752 return (B_FALSE); 4753 4754 if ((cr = DB_CRED(idmp)) != NULL) { 4755 mblk_setcred(mp, cr); 4756 DB_CPID(mp) = DB_CPID(idmp); 4757 } 4758 4759 if (defermp == NULL) 4760 putnext(tcp->tcp_rq, mp); 4761 else 4762 *defermp = mp; 4763 4764 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 4765 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 4766 return (B_TRUE); 4767 } 4768 4769 /* 4770 * Defense for the SYN attack - 4771 * 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest 4772 * one from the list of droppable eagers. This list is a subset of q0. 4773 * see comments before the definition of MAKE_DROPPABLE(). 4774 * 2. Don't drop a SYN request before its first timeout. This gives every 4775 * request at least til the first timeout to complete its 3-way handshake. 4776 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many 4777 * requests currently on the queue that has timed out. This will be used 4778 * as an indicator of whether an attack is under way, so that appropriate 4779 * actions can be taken. (It's incremented in tcp_timer() and decremented 4780 * either when eager goes into ESTABLISHED, or gets freed up.) 4781 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on 4782 * # of timeout drops back to <= q0len/32 => SYN alert off 4783 */ 4784 static boolean_t 4785 tcp_drop_q0(tcp_t *tcp) 4786 { 4787 tcp_t *eager; 4788 mblk_t *mp; 4789 tcp_stack_t *tcps = tcp->tcp_tcps; 4790 4791 ASSERT(MUTEX_HELD(&tcp->tcp_eager_lock)); 4792 ASSERT(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0); 4793 4794 /* Pick oldest eager from the list of droppable eagers */ 4795 eager = tcp->tcp_eager_prev_drop_q0; 4796 4797 /* If list is empty. return B_FALSE */ 4798 if (eager == tcp) { 4799 return (B_FALSE); 4800 } 4801 4802 /* If allocated, the mp will be freed in tcp_clean_death_wrapper() */ 4803 if ((mp = allocb(0, BPRI_HI)) == NULL) 4804 return (B_FALSE); 4805 4806 /* 4807 * Take this eager out from the list of droppable eagers since we are 4808 * going to drop it. 4809 */ 4810 MAKE_UNDROPPABLE(eager); 4811 4812 if (tcp->tcp_debug) { 4813 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 4814 "tcp_drop_q0: listen half-open queue (max=%d) overflow" 4815 " (%d pending) on %s, drop one", tcps->tcps_conn_req_max_q0, 4816 tcp->tcp_conn_req_cnt_q0, 4817 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4818 } 4819 4820 BUMP_MIB(&tcps->tcps_mib, tcpHalfOpenDrop); 4821 4822 /* Put a reference on the conn as we are enqueueing it in the sqeue */ 4823 CONN_INC_REF(eager->tcp_connp); 4824 4825 /* Mark the IRE created for this SYN request temporary */ 4826 tcp_ip_ire_mark_advice(eager); 4827 squeue_fill(eager->tcp_connp->conn_sqp, mp, 4828 tcp_clean_death_wrapper, eager->tcp_connp, SQTAG_TCP_DROP_Q0); 4829 4830 return (B_TRUE); 4831 } 4832 4833 int 4834 tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 4835 tcph_t *tcph, uint_t ipvers, mblk_t *idmp) 4836 { 4837 tcp_t *ltcp = lconnp->conn_tcp; 4838 tcp_t *tcp = connp->conn_tcp; 4839 mblk_t *tpi_mp; 4840 ipha_t *ipha; 4841 ip6_t *ip6h; 4842 sin6_t sin6; 4843 in6_addr_t v6dst; 4844 int err; 4845 int ifindex = 0; 4846 cred_t *cr; 4847 tcp_stack_t *tcps = tcp->tcp_tcps; 4848 4849 if (ipvers == IPV4_VERSION) { 4850 ipha = (ipha_t *)mp->b_rptr; 4851 4852 connp->conn_send = ip_output; 4853 connp->conn_recv = tcp_input; 4854 4855 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_srcv6); 4856 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_remv6); 4857 4858 sin6 = sin6_null; 4859 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &sin6.sin6_addr); 4860 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &v6dst); 4861 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4862 sin6.sin6_family = AF_INET6; 4863 sin6.__sin6_src_id = ip_srcid_find_addr(&v6dst, 4864 lconnp->conn_zoneid, tcps->tcps_netstack); 4865 if (tcp->tcp_recvdstaddr) { 4866 sin6_t sin6d; 4867 4868 sin6d = sin6_null; 4869 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, 4870 &sin6d.sin6_addr); 4871 sin6d.sin6_port = *(uint16_t *)tcph->th_fport; 4872 sin6d.sin6_family = AF_INET; 4873 tpi_mp = mi_tpi_extconn_ind(NULL, 4874 (char *)&sin6d, sizeof (sin6_t), 4875 (char *)&tcp, 4876 (t_scalar_t)sizeof (intptr_t), 4877 (char *)&sin6d, sizeof (sin6_t), 4878 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4879 } else { 4880 tpi_mp = mi_tpi_conn_ind(NULL, 4881 (char *)&sin6, sizeof (sin6_t), 4882 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4883 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4884 } 4885 } else { 4886 ip6h = (ip6_t *)mp->b_rptr; 4887 4888 connp->conn_send = ip_output_v6; 4889 connp->conn_recv = tcp_input; 4890 4891 connp->conn_srcv6 = ip6h->ip6_dst; 4892 connp->conn_remv6 = ip6h->ip6_src; 4893 4894 /* db_cksumstuff is set at ip_fanout_tcp_v6 */ 4895 ifindex = (int)DB_CKSUMSTUFF(mp); 4896 DB_CKSUMSTUFF(mp) = 0; 4897 4898 sin6 = sin6_null; 4899 sin6.sin6_addr = ip6h->ip6_src; 4900 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4901 sin6.sin6_family = AF_INET6; 4902 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 4903 sin6.__sin6_src_id = ip_srcid_find_addr(&ip6h->ip6_dst, 4904 lconnp->conn_zoneid, tcps->tcps_netstack); 4905 4906 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 4907 /* Pass up the scope_id of remote addr */ 4908 sin6.sin6_scope_id = ifindex; 4909 } else { 4910 sin6.sin6_scope_id = 0; 4911 } 4912 if (tcp->tcp_recvdstaddr) { 4913 sin6_t sin6d; 4914 4915 sin6d = sin6_null; 4916 sin6.sin6_addr = ip6h->ip6_dst; 4917 sin6d.sin6_port = *(uint16_t *)tcph->th_fport; 4918 sin6d.sin6_family = AF_INET; 4919 tpi_mp = mi_tpi_extconn_ind(NULL, 4920 (char *)&sin6d, sizeof (sin6_t), 4921 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4922 (char *)&sin6d, sizeof (sin6_t), 4923 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4924 } else { 4925 tpi_mp = mi_tpi_conn_ind(NULL, 4926 (char *)&sin6, sizeof (sin6_t), 4927 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4928 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4929 } 4930 } 4931 4932 if (tpi_mp == NULL) 4933 return (ENOMEM); 4934 4935 connp->conn_fport = *(uint16_t *)tcph->th_lport; 4936 connp->conn_lport = *(uint16_t *)tcph->th_fport; 4937 connp->conn_flags |= (IPCL_TCP6|IPCL_EAGER); 4938 connp->conn_fully_bound = B_FALSE; 4939 4940 if (tcps->tcps_trace) 4941 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_NOSLEEP); 4942 4943 /* Inherit information from the "parent" */ 4944 tcp->tcp_ipversion = ltcp->tcp_ipversion; 4945 tcp->tcp_family = ltcp->tcp_family; 4946 tcp->tcp_wq = ltcp->tcp_wq; 4947 tcp->tcp_rq = ltcp->tcp_rq; 4948 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 4949 tcp->tcp_detached = B_TRUE; 4950 if ((err = tcp_init_values(tcp)) != 0) { 4951 freemsg(tpi_mp); 4952 return (err); 4953 } 4954 4955 if (ipvers == IPV4_VERSION) { 4956 if ((err = tcp_header_init_ipv4(tcp)) != 0) { 4957 freemsg(tpi_mp); 4958 return (err); 4959 } 4960 ASSERT(tcp->tcp_ipha != NULL); 4961 } else { 4962 /* ifindex must be already set */ 4963 ASSERT(ifindex != 0); 4964 4965 if (ltcp->tcp_bound_if != 0) { 4966 /* 4967 * Set newtcp's bound_if equal to 4968 * listener's value. If ifindex is 4969 * not the same as ltcp->tcp_bound_if, 4970 * it must be a packet for the ipmp group 4971 * of interfaces 4972 */ 4973 tcp->tcp_bound_if = ltcp->tcp_bound_if; 4974 } else if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 4975 tcp->tcp_bound_if = ifindex; 4976 } 4977 4978 tcp->tcp_ipv6_recvancillary = ltcp->tcp_ipv6_recvancillary; 4979 tcp->tcp_recvifindex = 0; 4980 tcp->tcp_recvhops = 0xffffffffU; 4981 ASSERT(tcp->tcp_ip6h != NULL); 4982 } 4983 4984 tcp->tcp_lport = ltcp->tcp_lport; 4985 4986 if (ltcp->tcp_ipversion == tcp->tcp_ipversion) { 4987 if (tcp->tcp_iphc_len != ltcp->tcp_iphc_len) { 4988 /* 4989 * Listener had options of some sort; eager inherits. 4990 * Free up the eager template and allocate one 4991 * of the right size. 4992 */ 4993 if (tcp->tcp_hdr_grown) { 4994 kmem_free(tcp->tcp_iphc, tcp->tcp_iphc_len); 4995 } else { 4996 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 4997 kmem_cache_free(tcp_iphc_cache, tcp->tcp_iphc); 4998 } 4999 tcp->tcp_iphc = kmem_zalloc(ltcp->tcp_iphc_len, 5000 KM_NOSLEEP); 5001 if (tcp->tcp_iphc == NULL) { 5002 tcp->tcp_iphc_len = 0; 5003 freemsg(tpi_mp); 5004 return (ENOMEM); 5005 } 5006 tcp->tcp_iphc_len = ltcp->tcp_iphc_len; 5007 tcp->tcp_hdr_grown = B_TRUE; 5008 } 5009 tcp->tcp_hdr_len = ltcp->tcp_hdr_len; 5010 tcp->tcp_ip_hdr_len = ltcp->tcp_ip_hdr_len; 5011 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 5012 tcp->tcp_ip6_hops = ltcp->tcp_ip6_hops; 5013 tcp->tcp_ip6_vcf = ltcp->tcp_ip6_vcf; 5014 5015 /* 5016 * Copy the IP+TCP header template from listener to eager 5017 */ 5018 bcopy(ltcp->tcp_iphc, tcp->tcp_iphc, ltcp->tcp_hdr_len); 5019 if (tcp->tcp_ipversion == IPV6_VERSION) { 5020 if (((ip6i_t *)(tcp->tcp_iphc))->ip6i_nxt == 5021 IPPROTO_RAW) { 5022 tcp->tcp_ip6h = 5023 (ip6_t *)(tcp->tcp_iphc + 5024 sizeof (ip6i_t)); 5025 } else { 5026 tcp->tcp_ip6h = 5027 (ip6_t *)(tcp->tcp_iphc); 5028 } 5029 tcp->tcp_ipha = NULL; 5030 } else { 5031 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 5032 tcp->tcp_ip6h = NULL; 5033 } 5034 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + 5035 tcp->tcp_ip_hdr_len); 5036 } else { 5037 /* 5038 * only valid case when ipversion of listener and 5039 * eager differ is when listener is IPv6 and 5040 * eager is IPv4. 5041 * Eager header template has been initialized to the 5042 * maximum v4 header sizes, which includes space for 5043 * TCP and IP options. 5044 */ 5045 ASSERT((ltcp->tcp_ipversion == IPV6_VERSION) && 5046 (tcp->tcp_ipversion == IPV4_VERSION)); 5047 ASSERT(tcp->tcp_iphc_len >= 5048 TCP_MAX_COMBINED_HEADER_LENGTH); 5049 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 5050 /* copy IP header fields individually */ 5051 tcp->tcp_ipha->ipha_ttl = 5052 ltcp->tcp_ip6h->ip6_hops; 5053 bcopy(ltcp->tcp_tcph->th_lport, 5054 tcp->tcp_tcph->th_lport, sizeof (ushort_t)); 5055 } 5056 5057 bcopy(tcph->th_lport, tcp->tcp_tcph->th_fport, sizeof (in_port_t)); 5058 bcopy(tcp->tcp_tcph->th_fport, &tcp->tcp_fport, 5059 sizeof (in_port_t)); 5060 5061 if (ltcp->tcp_lport == 0) { 5062 tcp->tcp_lport = *(in_port_t *)tcph->th_fport; 5063 bcopy(tcph->th_fport, tcp->tcp_tcph->th_lport, 5064 sizeof (in_port_t)); 5065 } 5066 5067 if (tcp->tcp_ipversion == IPV4_VERSION) { 5068 ASSERT(ipha != NULL); 5069 tcp->tcp_ipha->ipha_dst = ipha->ipha_src; 5070 tcp->tcp_ipha->ipha_src = ipha->ipha_dst; 5071 5072 /* Source routing option copyover (reverse it) */ 5073 if (tcps->tcps_rev_src_routes) 5074 tcp_opt_reverse(tcp, ipha); 5075 } else { 5076 ASSERT(ip6h != NULL); 5077 tcp->tcp_ip6h->ip6_dst = ip6h->ip6_src; 5078 tcp->tcp_ip6h->ip6_src = ip6h->ip6_dst; 5079 } 5080 5081 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 5082 ASSERT(!tcp->tcp_tconnind_started); 5083 /* 5084 * If the SYN contains a credential, it's a loopback packet; attach 5085 * the credential to the TPI message. 5086 */ 5087 if ((cr = DB_CRED(idmp)) != NULL) { 5088 mblk_setcred(tpi_mp, cr); 5089 DB_CPID(tpi_mp) = DB_CPID(idmp); 5090 } 5091 tcp->tcp_conn.tcp_eager_conn_ind = tpi_mp; 5092 5093 /* Inherit the listener's SSL protection state */ 5094 5095 if ((tcp->tcp_kssl_ent = ltcp->tcp_kssl_ent) != NULL) { 5096 kssl_hold_ent(tcp->tcp_kssl_ent); 5097 tcp->tcp_kssl_pending = B_TRUE; 5098 } 5099 5100 return (0); 5101 } 5102 5103 5104 int 5105 tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, ipha_t *ipha, 5106 tcph_t *tcph, mblk_t *idmp) 5107 { 5108 tcp_t *ltcp = lconnp->conn_tcp; 5109 tcp_t *tcp = connp->conn_tcp; 5110 sin_t sin; 5111 mblk_t *tpi_mp = NULL; 5112 int err; 5113 cred_t *cr; 5114 tcp_stack_t *tcps = tcp->tcp_tcps; 5115 5116 sin = sin_null; 5117 sin.sin_addr.s_addr = ipha->ipha_src; 5118 sin.sin_port = *(uint16_t *)tcph->th_lport; 5119 sin.sin_family = AF_INET; 5120 if (ltcp->tcp_recvdstaddr) { 5121 sin_t sind; 5122 5123 sind = sin_null; 5124 sind.sin_addr.s_addr = ipha->ipha_dst; 5125 sind.sin_port = *(uint16_t *)tcph->th_fport; 5126 sind.sin_family = AF_INET; 5127 tpi_mp = mi_tpi_extconn_ind(NULL, 5128 (char *)&sind, sizeof (sin_t), (char *)&tcp, 5129 (t_scalar_t)sizeof (intptr_t), (char *)&sind, 5130 sizeof (sin_t), (t_scalar_t)ltcp->tcp_conn_req_seqnum); 5131 } else { 5132 tpi_mp = mi_tpi_conn_ind(NULL, 5133 (char *)&sin, sizeof (sin_t), 5134 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 5135 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 5136 } 5137 5138 if (tpi_mp == NULL) { 5139 return (ENOMEM); 5140 } 5141 5142 connp->conn_flags |= (IPCL_TCP4|IPCL_EAGER); 5143 connp->conn_send = ip_output; 5144 connp->conn_recv = tcp_input; 5145 connp->conn_fully_bound = B_FALSE; 5146 5147 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_srcv6); 5148 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_remv6); 5149 connp->conn_fport = *(uint16_t *)tcph->th_lport; 5150 connp->conn_lport = *(uint16_t *)tcph->th_fport; 5151 5152 if (tcps->tcps_trace) { 5153 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_NOSLEEP); 5154 } 5155 5156 /* Inherit information from the "parent" */ 5157 tcp->tcp_ipversion = ltcp->tcp_ipversion; 5158 tcp->tcp_family = ltcp->tcp_family; 5159 tcp->tcp_wq = ltcp->tcp_wq; 5160 tcp->tcp_rq = ltcp->tcp_rq; 5161 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 5162 tcp->tcp_detached = B_TRUE; 5163 if ((err = tcp_init_values(tcp)) != 0) { 5164 freemsg(tpi_mp); 5165 return (err); 5166 } 5167 5168 /* 5169 * Let's make sure that eager tcp template has enough space to 5170 * copy IPv4 listener's tcp template. Since the conn_t structure is 5171 * preserved and tcp_iphc_len is also preserved, an eager conn_t may 5172 * have a tcp_template of total len TCP_MAX_COMBINED_HEADER_LENGTH or 5173 * more (in case of re-allocation of conn_t with tcp-IPv6 template with 5174 * extension headers or with ip6i_t struct). Note that bcopy() below 5175 * copies listener tcp's hdr_len which cannot be greater than TCP_MAX_ 5176 * COMBINED_HEADER_LENGTH as this listener must be a IPv4 listener. 5177 */ 5178 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 5179 ASSERT(ltcp->tcp_hdr_len <= TCP_MAX_COMBINED_HEADER_LENGTH); 5180 5181 tcp->tcp_hdr_len = ltcp->tcp_hdr_len; 5182 tcp->tcp_ip_hdr_len = ltcp->tcp_ip_hdr_len; 5183 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 5184 tcp->tcp_ttl = ltcp->tcp_ttl; 5185 tcp->tcp_tos = ltcp->tcp_tos; 5186 5187 /* Copy the IP+TCP header template from listener to eager */ 5188 bcopy(ltcp->tcp_iphc, tcp->tcp_iphc, ltcp->tcp_hdr_len); 5189 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 5190 tcp->tcp_ip6h = NULL; 5191 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + 5192 tcp->tcp_ip_hdr_len); 5193 5194 /* Initialize the IP addresses and Ports */ 5195 tcp->tcp_ipha->ipha_dst = ipha->ipha_src; 5196 tcp->tcp_ipha->ipha_src = ipha->ipha_dst; 5197 bcopy(tcph->th_lport, tcp->tcp_tcph->th_fport, sizeof (in_port_t)); 5198 bcopy(tcph->th_fport, tcp->tcp_tcph->th_lport, sizeof (in_port_t)); 5199 5200 /* Source routing option copyover (reverse it) */ 5201 if (tcps->tcps_rev_src_routes) 5202 tcp_opt_reverse(tcp, ipha); 5203 5204 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 5205 ASSERT(!tcp->tcp_tconnind_started); 5206 5207 /* 5208 * If the SYN contains a credential, it's a loopback packet; attach 5209 * the credential to the TPI message. 5210 */ 5211 if ((cr = DB_CRED(idmp)) != NULL) { 5212 mblk_setcred(tpi_mp, cr); 5213 DB_CPID(tpi_mp) = DB_CPID(idmp); 5214 } 5215 tcp->tcp_conn.tcp_eager_conn_ind = tpi_mp; 5216 5217 /* Inherit the listener's SSL protection state */ 5218 if ((tcp->tcp_kssl_ent = ltcp->tcp_kssl_ent) != NULL) { 5219 kssl_hold_ent(tcp->tcp_kssl_ent); 5220 tcp->tcp_kssl_pending = B_TRUE; 5221 } 5222 5223 return (0); 5224 } 5225 5226 /* 5227 * sets up conn for ipsec. 5228 * if the first mblk is M_CTL it is consumed and mpp is updated. 5229 * in case of error mpp is freed. 5230 */ 5231 conn_t * 5232 tcp_get_ipsec_conn(tcp_t *tcp, squeue_t *sqp, mblk_t **mpp) 5233 { 5234 conn_t *connp = tcp->tcp_connp; 5235 conn_t *econnp; 5236 squeue_t *new_sqp; 5237 mblk_t *first_mp = *mpp; 5238 mblk_t *mp = *mpp; 5239 boolean_t mctl_present = B_FALSE; 5240 uint_t ipvers; 5241 5242 econnp = tcp_get_conn(sqp, tcp->tcp_tcps); 5243 if (econnp == NULL) { 5244 freemsg(first_mp); 5245 return (NULL); 5246 } 5247 if (DB_TYPE(mp) == M_CTL) { 5248 if (mp->b_cont == NULL || 5249 mp->b_cont->b_datap->db_type != M_DATA) { 5250 freemsg(first_mp); 5251 return (NULL); 5252 } 5253 mp = mp->b_cont; 5254 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) == 0) { 5255 freemsg(first_mp); 5256 return (NULL); 5257 } 5258 5259 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 5260 first_mp->b_datap->db_struioflag &= ~STRUIO_POLICY; 5261 mctl_present = B_TRUE; 5262 } else { 5263 ASSERT(mp->b_datap->db_struioflag & STRUIO_POLICY); 5264 mp->b_datap->db_struioflag &= ~STRUIO_POLICY; 5265 } 5266 5267 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 5268 DB_CKSUMSTART(mp) = 0; 5269 5270 ASSERT(OK_32PTR(mp->b_rptr)); 5271 ipvers = IPH_HDR_VERSION(mp->b_rptr); 5272 if (ipvers == IPV4_VERSION) { 5273 uint16_t *up; 5274 uint32_t ports; 5275 ipha_t *ipha; 5276 5277 ipha = (ipha_t *)mp->b_rptr; 5278 up = (uint16_t *)((uchar_t *)ipha + 5279 IPH_HDR_LENGTH(ipha) + TCP_PORTS_OFFSET); 5280 ports = *(uint32_t *)up; 5281 IPCL_TCP_EAGER_INIT(econnp, IPPROTO_TCP, 5282 ipha->ipha_dst, ipha->ipha_src, ports); 5283 } else { 5284 uint16_t *up; 5285 uint32_t ports; 5286 uint16_t ip_hdr_len; 5287 uint8_t *nexthdrp; 5288 ip6_t *ip6h; 5289 tcph_t *tcph; 5290 5291 ip6h = (ip6_t *)mp->b_rptr; 5292 if (ip6h->ip6_nxt == IPPROTO_TCP) { 5293 ip_hdr_len = IPV6_HDR_LEN; 5294 } else if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ip_hdr_len, 5295 &nexthdrp) || *nexthdrp != IPPROTO_TCP) { 5296 CONN_DEC_REF(econnp); 5297 freemsg(first_mp); 5298 return (NULL); 5299 } 5300 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5301 up = (uint16_t *)tcph->th_lport; 5302 ports = *(uint32_t *)up; 5303 IPCL_TCP_EAGER_INIT_V6(econnp, IPPROTO_TCP, 5304 ip6h->ip6_dst, ip6h->ip6_src, ports); 5305 } 5306 5307 /* 5308 * The caller already ensured that there is a sqp present. 5309 */ 5310 econnp->conn_sqp = new_sqp; 5311 5312 if (connp->conn_policy != NULL) { 5313 ipsec_in_t *ii; 5314 ii = (ipsec_in_t *)(first_mp->b_rptr); 5315 ASSERT(ii->ipsec_in_policy == NULL); 5316 IPPH_REFHOLD(connp->conn_policy); 5317 ii->ipsec_in_policy = connp->conn_policy; 5318 5319 first_mp->b_datap->db_type = IPSEC_POLICY_SET; 5320 if (!ip_bind_ipsec_policy_set(econnp, first_mp)) { 5321 CONN_DEC_REF(econnp); 5322 freemsg(first_mp); 5323 return (NULL); 5324 } 5325 } 5326 5327 if (ipsec_conn_cache_policy(econnp, ipvers == IPV4_VERSION) != 0) { 5328 CONN_DEC_REF(econnp); 5329 freemsg(first_mp); 5330 return (NULL); 5331 } 5332 5333 /* 5334 * If we know we have some policy, pass the "IPSEC" 5335 * options size TCP uses this adjust the MSS. 5336 */ 5337 econnp->conn_tcp->tcp_ipsec_overhead = conn_ipsec_length(econnp); 5338 if (mctl_present) { 5339 freeb(first_mp); 5340 *mpp = mp; 5341 } 5342 5343 return (econnp); 5344 } 5345 5346 /* 5347 * tcp_get_conn/tcp_free_conn 5348 * 5349 * tcp_get_conn is used to get a clean tcp connection structure. 5350 * It tries to reuse the connections put on the freelist by the 5351 * time_wait_collector failing which it goes to kmem_cache. This 5352 * way has two benefits compared to just allocating from and 5353 * freeing to kmem_cache. 5354 * 1) The time_wait_collector can free (which includes the cleanup) 5355 * outside the squeue. So when the interrupt comes, we have a clean 5356 * connection sitting in the freelist. Obviously, this buys us 5357 * performance. 5358 * 5359 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_conn_request 5360 * has multiple disadvantages - tying up the squeue during alloc, and the 5361 * fact that IPSec policy initialization has to happen here which 5362 * requires us sending a M_CTL and checking for it i.e. real ugliness. 5363 * But allocating the conn/tcp in IP land is also not the best since 5364 * we can't check the 'q' and 'q0' which are protected by squeue and 5365 * blindly allocate memory which might have to be freed here if we are 5366 * not allowed to accept the connection. By using the freelist and 5367 * putting the conn/tcp back in freelist, we don't pay a penalty for 5368 * allocating memory without checking 'q/q0' and freeing it if we can't 5369 * accept the connection. 5370 * 5371 * Care should be taken to put the conn back in the same squeue's freelist 5372 * from which it was allocated. Best results are obtained if conn is 5373 * allocated from listener's squeue and freed to the same. Time wait 5374 * collector will free up the freelist is the connection ends up sitting 5375 * there for too long. 5376 */ 5377 void * 5378 tcp_get_conn(void *arg, tcp_stack_t *tcps) 5379 { 5380 tcp_t *tcp = NULL; 5381 conn_t *connp = NULL; 5382 squeue_t *sqp = (squeue_t *)arg; 5383 tcp_squeue_priv_t *tcp_time_wait; 5384 netstack_t *ns; 5385 5386 tcp_time_wait = 5387 *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP)); 5388 5389 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 5390 tcp = tcp_time_wait->tcp_free_list; 5391 ASSERT((tcp != NULL) ^ (tcp_time_wait->tcp_free_list_cnt == 0)); 5392 if (tcp != NULL) { 5393 tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next; 5394 tcp_time_wait->tcp_free_list_cnt--; 5395 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 5396 tcp->tcp_time_wait_next = NULL; 5397 connp = tcp->tcp_connp; 5398 connp->conn_flags |= IPCL_REUSED; 5399 5400 ASSERT(tcp->tcp_tcps == NULL); 5401 ASSERT(connp->conn_netstack == NULL); 5402 ns = tcps->tcps_netstack; 5403 netstack_hold(ns); 5404 connp->conn_netstack = ns; 5405 tcp->tcp_tcps = tcps; 5406 TCPS_REFHOLD(tcps); 5407 ipcl_globalhash_insert(connp); 5408 return ((void *)connp); 5409 } 5410 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 5411 if ((connp = ipcl_conn_create(IPCL_TCPCONN, KM_NOSLEEP, 5412 tcps->tcps_netstack)) == NULL) 5413 return (NULL); 5414 tcp = connp->conn_tcp; 5415 tcp->tcp_tcps = tcps; 5416 TCPS_REFHOLD(tcps); 5417 return ((void *)connp); 5418 } 5419 5420 /* 5421 * Update the cached label for the given tcp_t. This should be called once per 5422 * connection, and before any packets are sent or tcp_process_options is 5423 * invoked. Returns B_FALSE if the correct label could not be constructed. 5424 */ 5425 static boolean_t 5426 tcp_update_label(tcp_t *tcp, const cred_t *cr) 5427 { 5428 conn_t *connp = tcp->tcp_connp; 5429 5430 if (tcp->tcp_ipversion == IPV4_VERSION) { 5431 uchar_t optbuf[IP_MAX_OPT_LENGTH]; 5432 int added; 5433 5434 if (tsol_compute_label(cr, tcp->tcp_remote, optbuf, 5435 connp->conn_mac_exempt, 5436 tcp->tcp_tcps->tcps_netstack->netstack_ip) != 0) 5437 return (B_FALSE); 5438 5439 added = tsol_remove_secopt(tcp->tcp_ipha, tcp->tcp_hdr_len); 5440 if (added == -1) 5441 return (B_FALSE); 5442 tcp->tcp_hdr_len += added; 5443 tcp->tcp_tcph = (tcph_t *)((uchar_t *)tcp->tcp_tcph + added); 5444 tcp->tcp_ip_hdr_len += added; 5445 if ((tcp->tcp_label_len = optbuf[IPOPT_OLEN]) != 0) { 5446 tcp->tcp_label_len = (tcp->tcp_label_len + 3) & ~3; 5447 added = tsol_prepend_option(optbuf, tcp->tcp_ipha, 5448 tcp->tcp_hdr_len); 5449 if (added == -1) 5450 return (B_FALSE); 5451 tcp->tcp_hdr_len += added; 5452 tcp->tcp_tcph = (tcph_t *) 5453 ((uchar_t *)tcp->tcp_tcph + added); 5454 tcp->tcp_ip_hdr_len += added; 5455 } 5456 } else { 5457 uchar_t optbuf[TSOL_MAX_IPV6_OPTION]; 5458 5459 if (tsol_compute_label_v6(cr, &tcp->tcp_remote_v6, optbuf, 5460 connp->conn_mac_exempt, 5461 tcp->tcp_tcps->tcps_netstack->netstack_ip) != 0) 5462 return (B_FALSE); 5463 if (tsol_update_sticky(&tcp->tcp_sticky_ipp, 5464 &tcp->tcp_label_len, optbuf) != 0) 5465 return (B_FALSE); 5466 if (tcp_build_hdrs(tcp->tcp_rq, tcp) != 0) 5467 return (B_FALSE); 5468 } 5469 5470 connp->conn_ulp_labeled = 1; 5471 5472 return (B_TRUE); 5473 } 5474 5475 /* BEGIN CSTYLED */ 5476 /* 5477 * 5478 * The sockfs ACCEPT path: 5479 * ======================= 5480 * 5481 * The eager is now established in its own perimeter as soon as SYN is 5482 * received in tcp_conn_request(). When sockfs receives conn_ind, it 5483 * completes the accept processing on the acceptor STREAM. The sending 5484 * of conn_ind part is common for both sockfs listener and a TLI/XTI 5485 * listener but a TLI/XTI listener completes the accept processing 5486 * on the listener perimeter. 5487 * 5488 * Common control flow for 3 way handshake: 5489 * ---------------------------------------- 5490 * 5491 * incoming SYN (listener perimeter) -> tcp_rput_data() 5492 * -> tcp_conn_request() 5493 * 5494 * incoming SYN-ACK-ACK (eager perim) -> tcp_rput_data() 5495 * send T_CONN_IND (listener perim) -> tcp_send_conn_ind() 5496 * 5497 * Sockfs ACCEPT Path: 5498 * ------------------- 5499 * 5500 * open acceptor stream (tcp_open allocates tcp_wput_accept() 5501 * as STREAM entry point) 5502 * 5503 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_wput_accept() 5504 * 5505 * tcp_wput_accept() extracts the eager and makes the q->q_ptr <-> eager 5506 * association (we are not behind eager's squeue but sockfs is protecting us 5507 * and no one knows about this stream yet. The STREAMS entry point q->q_info 5508 * is changed to point at tcp_wput(). 5509 * 5510 * tcp_wput_accept() sends any deferred eagers via tcp_send_pending() to 5511 * listener (done on listener's perimeter). 5512 * 5513 * tcp_wput_accept() calls tcp_accept_finish() on eagers perimeter to finish 5514 * accept. 5515 * 5516 * TLI/XTI client ACCEPT path: 5517 * --------------------------- 5518 * 5519 * soaccept() sends T_CONN_RES on the listener STREAM. 5520 * 5521 * tcp_accept() -> tcp_accept_swap() complete the processing and send 5522 * the bind_mp to eager perimeter to finish accept (tcp_rput_other()). 5523 * 5524 * Locks: 5525 * ====== 5526 * 5527 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and 5528 * and listeners->tcp_eager_next_q. 5529 * 5530 * Referencing: 5531 * ============ 5532 * 5533 * 1) We start out in tcp_conn_request by eager placing a ref on 5534 * listener and listener adding eager to listeners->tcp_eager_next_q0. 5535 * 5536 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before 5537 * doing so we place a ref on the eager. This ref is finally dropped at the 5538 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the 5539 * reference is dropped by the squeue framework. 5540 * 5541 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish 5542 * 5543 * The reference must be released by the same entity that added the reference 5544 * In the above scheme, the eager is the entity that adds and releases the 5545 * references. Note that tcp_accept_finish executes in the squeue of the eager 5546 * (albeit after it is attached to the acceptor stream). Though 1. executes 5547 * in the listener's squeue, the eager is nascent at this point and the 5548 * reference can be considered to have been added on behalf of the eager. 5549 * 5550 * Eager getting a Reset or listener closing: 5551 * ========================================== 5552 * 5553 * Once the listener and eager are linked, the listener never does the unlink. 5554 * If the listener needs to close, tcp_eager_cleanup() is called which queues 5555 * a message on all eager perimeter. The eager then does the unlink, clears 5556 * any pointers to the listener's queue and drops the reference to the 5557 * listener. The listener waits in tcp_close outside the squeue until its 5558 * refcount has dropped to 1. This ensures that the listener has waited for 5559 * all eagers to clear their association with the listener. 5560 * 5561 * Similarly, if eager decides to go away, it can unlink itself and close. 5562 * When the T_CONN_RES comes down, we check if eager has closed. Note that 5563 * the reference to eager is still valid because of the extra ref we put 5564 * in tcp_send_conn_ind. 5565 * 5566 * Listener can always locate the eager under the protection 5567 * of the listener->tcp_eager_lock, and then do a refhold 5568 * on the eager during the accept processing. 5569 * 5570 * The acceptor stream accesses the eager in the accept processing 5571 * based on the ref placed on eager before sending T_conn_ind. 5572 * The only entity that can negate this refhold is a listener close 5573 * which is mutually exclusive with an active acceptor stream. 5574 * 5575 * Eager's reference on the listener 5576 * =================================== 5577 * 5578 * If the accept happens (even on a closed eager) the eager drops its 5579 * reference on the listener at the start of tcp_accept_finish. If the 5580 * eager is killed due to an incoming RST before the T_conn_ind is sent up, 5581 * the reference is dropped in tcp_closei_local. If the listener closes, 5582 * the reference is dropped in tcp_eager_kill. In all cases the reference 5583 * is dropped while executing in the eager's context (squeue). 5584 */ 5585 /* END CSTYLED */ 5586 5587 /* Process the SYN packet, mp, directed at the listener 'tcp' */ 5588 5589 /* 5590 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN. 5591 * tcp_rput_data will not see any SYN packets. 5592 */ 5593 /* ARGSUSED */ 5594 void 5595 tcp_conn_request(void *arg, mblk_t *mp, void *arg2) 5596 { 5597 tcph_t *tcph; 5598 uint32_t seg_seq; 5599 tcp_t *eager; 5600 uint_t ipvers; 5601 ipha_t *ipha; 5602 ip6_t *ip6h; 5603 int err; 5604 conn_t *econnp = NULL; 5605 squeue_t *new_sqp; 5606 mblk_t *mp1; 5607 uint_t ip_hdr_len; 5608 conn_t *connp = (conn_t *)arg; 5609 tcp_t *tcp = connp->conn_tcp; 5610 cred_t *credp; 5611 tcp_stack_t *tcps = tcp->tcp_tcps; 5612 ip_stack_t *ipst; 5613 5614 if (tcp->tcp_state != TCPS_LISTEN) 5615 goto error2; 5616 5617 ASSERT((tcp->tcp_connp->conn_flags & IPCL_BOUND) != 0); 5618 5619 mutex_enter(&tcp->tcp_eager_lock); 5620 if (tcp->tcp_conn_req_cnt_q >= tcp->tcp_conn_req_max) { 5621 mutex_exit(&tcp->tcp_eager_lock); 5622 TCP_STAT(tcps, tcp_listendrop); 5623 BUMP_MIB(&tcps->tcps_mib, tcpListenDrop); 5624 if (tcp->tcp_debug) { 5625 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 5626 "tcp_conn_request: listen backlog (max=%d) " 5627 "overflow (%d pending) on %s", 5628 tcp->tcp_conn_req_max, tcp->tcp_conn_req_cnt_q, 5629 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 5630 } 5631 goto error2; 5632 } 5633 5634 if (tcp->tcp_conn_req_cnt_q0 >= 5635 tcp->tcp_conn_req_max + tcps->tcps_conn_req_max_q0) { 5636 /* 5637 * Q0 is full. Drop a pending half-open req from the queue 5638 * to make room for the new SYN req. Also mark the time we 5639 * drop a SYN. 5640 * 5641 * A more aggressive defense against SYN attack will 5642 * be to set the "tcp_syn_defense" flag now. 5643 */ 5644 TCP_STAT(tcps, tcp_listendropq0); 5645 tcp->tcp_last_rcv_lbolt = lbolt64; 5646 if (!tcp_drop_q0(tcp)) { 5647 mutex_exit(&tcp->tcp_eager_lock); 5648 BUMP_MIB(&tcps->tcps_mib, tcpListenDropQ0); 5649 if (tcp->tcp_debug) { 5650 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 5651 "tcp_conn_request: listen half-open queue " 5652 "(max=%d) full (%d pending) on %s", 5653 tcps->tcps_conn_req_max_q0, 5654 tcp->tcp_conn_req_cnt_q0, 5655 tcp_display(tcp, NULL, 5656 DISP_PORT_ONLY)); 5657 } 5658 goto error2; 5659 } 5660 } 5661 mutex_exit(&tcp->tcp_eager_lock); 5662 5663 /* 5664 * IP adds STRUIO_EAGER and ensures that the received packet is 5665 * M_DATA even if conn_ipv6_recvpktinfo is enabled or for ip6 5666 * link local address. If IPSec is enabled, db_struioflag has 5667 * STRUIO_POLICY set (mutually exclusive from STRUIO_EAGER); 5668 * otherwise an error case if neither of them is set. 5669 */ 5670 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 5671 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 5672 DB_CKSUMSTART(mp) = 0; 5673 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 5674 econnp = (conn_t *)tcp_get_conn(arg2, tcps); 5675 if (econnp == NULL) 5676 goto error2; 5677 ASSERT(econnp->conn_netstack == connp->conn_netstack); 5678 econnp->conn_sqp = new_sqp; 5679 } else if ((mp->b_datap->db_struioflag & STRUIO_POLICY) != 0) { 5680 /* 5681 * mp is updated in tcp_get_ipsec_conn(). 5682 */ 5683 econnp = tcp_get_ipsec_conn(tcp, arg2, &mp); 5684 if (econnp == NULL) { 5685 /* 5686 * mp freed by tcp_get_ipsec_conn. 5687 */ 5688 return; 5689 } 5690 ASSERT(econnp->conn_netstack == connp->conn_netstack); 5691 } else { 5692 goto error2; 5693 } 5694 5695 ASSERT(DB_TYPE(mp) == M_DATA); 5696 5697 ipvers = IPH_HDR_VERSION(mp->b_rptr); 5698 ASSERT(ipvers == IPV6_VERSION || ipvers == IPV4_VERSION); 5699 ASSERT(OK_32PTR(mp->b_rptr)); 5700 if (ipvers == IPV4_VERSION) { 5701 ipha = (ipha_t *)mp->b_rptr; 5702 ip_hdr_len = IPH_HDR_LENGTH(ipha); 5703 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5704 } else { 5705 ip6h = (ip6_t *)mp->b_rptr; 5706 ip_hdr_len = ip_hdr_length_v6(mp, ip6h); 5707 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5708 } 5709 5710 if (tcp->tcp_family == AF_INET) { 5711 ASSERT(ipvers == IPV4_VERSION); 5712 err = tcp_conn_create_v4(connp, econnp, ipha, tcph, mp); 5713 } else { 5714 err = tcp_conn_create_v6(connp, econnp, mp, tcph, ipvers, mp); 5715 } 5716 5717 if (err) 5718 goto error3; 5719 5720 eager = econnp->conn_tcp; 5721 5722 /* Inherit various TCP parameters from the listener */ 5723 eager->tcp_naglim = tcp->tcp_naglim; 5724 eager->tcp_first_timer_threshold = 5725 tcp->tcp_first_timer_threshold; 5726 eager->tcp_second_timer_threshold = 5727 tcp->tcp_second_timer_threshold; 5728 5729 eager->tcp_first_ctimer_threshold = 5730 tcp->tcp_first_ctimer_threshold; 5731 eager->tcp_second_ctimer_threshold = 5732 tcp->tcp_second_ctimer_threshold; 5733 5734 /* 5735 * tcp_adapt_ire() may change tcp_rwnd according to the ire metrics. 5736 * If it does not, the eager's receive window will be set to the 5737 * listener's receive window later in this function. 5738 */ 5739 eager->tcp_rwnd = 0; 5740 5741 /* 5742 * Inherit listener's tcp_init_cwnd. Need to do this before 5743 * calling tcp_process_options() where tcp_mss_set() is called 5744 * to set the initial cwnd. 5745 */ 5746 eager->tcp_init_cwnd = tcp->tcp_init_cwnd; 5747 5748 /* 5749 * Zones: tcp_adapt_ire() and tcp_send_data() both need the 5750 * zone id before the accept is completed in tcp_wput_accept(). 5751 */ 5752 econnp->conn_zoneid = connp->conn_zoneid; 5753 econnp->conn_allzones = connp->conn_allzones; 5754 5755 /* Copy nexthop information from listener to eager */ 5756 if (connp->conn_nexthop_set) { 5757 econnp->conn_nexthop_set = connp->conn_nexthop_set; 5758 econnp->conn_nexthop_v4 = connp->conn_nexthop_v4; 5759 } 5760 5761 /* 5762 * TSOL: tsol_input_proc() needs the eager's cred before the 5763 * eager is accepted 5764 */ 5765 econnp->conn_cred = eager->tcp_cred = credp = connp->conn_cred; 5766 crhold(credp); 5767 5768 /* 5769 * If the caller has the process-wide flag set, then default to MAC 5770 * exempt mode. This allows read-down to unlabeled hosts. 5771 */ 5772 if (getpflags(NET_MAC_AWARE, credp) != 0) 5773 econnp->conn_mac_exempt = B_TRUE; 5774 5775 if (is_system_labeled()) { 5776 cred_t *cr; 5777 5778 if (connp->conn_mlp_type != mlptSingle) { 5779 cr = econnp->conn_peercred = DB_CRED(mp); 5780 if (cr != NULL) 5781 crhold(cr); 5782 else 5783 cr = econnp->conn_cred; 5784 DTRACE_PROBE2(mlp_syn_accept, conn_t *, 5785 econnp, cred_t *, cr) 5786 } else { 5787 cr = econnp->conn_cred; 5788 DTRACE_PROBE2(syn_accept, conn_t *, 5789 econnp, cred_t *, cr) 5790 } 5791 5792 if (!tcp_update_label(eager, cr)) { 5793 DTRACE_PROBE3( 5794 tx__ip__log__error__connrequest__tcp, 5795 char *, "eager connp(1) label on SYN mp(2) failed", 5796 conn_t *, econnp, mblk_t *, mp); 5797 goto error3; 5798 } 5799 } 5800 5801 eager->tcp_hard_binding = B_TRUE; 5802 5803 tcp_bind_hash_insert(&tcps->tcps_bind_fanout[ 5804 TCP_BIND_HASH(eager->tcp_lport)], eager, 0); 5805 5806 CL_INET_CONNECT(eager); 5807 5808 /* 5809 * No need to check for multicast destination since ip will only pass 5810 * up multicasts to those that have expressed interest 5811 * TODO: what about rejecting broadcasts? 5812 * Also check that source is not a multicast or broadcast address. 5813 */ 5814 eager->tcp_state = TCPS_SYN_RCVD; 5815 5816 5817 /* 5818 * There should be no ire in the mp as we are being called after 5819 * receiving the SYN. 5820 */ 5821 ASSERT(tcp_ire_mp(mp) == NULL); 5822 5823 /* 5824 * Adapt our mss, ttl, ... according to information provided in IRE. 5825 */ 5826 5827 if (tcp_adapt_ire(eager, NULL) == 0) { 5828 /* Undo the bind_hash_insert */ 5829 tcp_bind_hash_remove(eager); 5830 goto error3; 5831 } 5832 5833 /* Process all TCP options. */ 5834 tcp_process_options(eager, tcph); 5835 5836 /* Is the other end ECN capable? */ 5837 if (tcps->tcps_ecn_permitted >= 1 && 5838 (tcph->th_flags[0] & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) { 5839 eager->tcp_ecn_ok = B_TRUE; 5840 } 5841 5842 /* 5843 * listener->tcp_rq->q_hiwat should be the default window size or a 5844 * window size changed via SO_RCVBUF option. First round up the 5845 * eager's tcp_rwnd to the nearest MSS. Then find out the window 5846 * scale option value if needed. Call tcp_rwnd_set() to finish the 5847 * setting. 5848 * 5849 * Note if there is a rpipe metric associated with the remote host, 5850 * we should not inherit receive window size from listener. 5851 */ 5852 eager->tcp_rwnd = MSS_ROUNDUP( 5853 (eager->tcp_rwnd == 0 ? tcp->tcp_rq->q_hiwat : 5854 eager->tcp_rwnd), eager->tcp_mss); 5855 if (eager->tcp_snd_ws_ok) 5856 tcp_set_ws_value(eager); 5857 /* 5858 * Note that this is the only place tcp_rwnd_set() is called for 5859 * accepting a connection. We need to call it here instead of 5860 * after the 3-way handshake because we need to tell the other 5861 * side our rwnd in the SYN-ACK segment. 5862 */ 5863 (void) tcp_rwnd_set(eager, eager->tcp_rwnd); 5864 5865 /* 5866 * We eliminate the need for sockfs to send down a T_SVR4_OPTMGMT_REQ 5867 * via soaccept()->soinheritoptions() which essentially applies 5868 * all the listener options to the new STREAM. The options that we 5869 * need to take care of are: 5870 * SO_DEBUG, SO_REUSEADDR, SO_KEEPALIVE, SO_DONTROUTE, SO_BROADCAST, 5871 * SO_USELOOPBACK, SO_OOBINLINE, SO_DGRAM_ERRIND, SO_LINGER, 5872 * SO_SNDBUF, SO_RCVBUF. 5873 * 5874 * SO_RCVBUF: tcp_rwnd_set() above takes care of it. 5875 * SO_SNDBUF: Set the tcp_xmit_hiwater for the eager. When 5876 * tcp_maxpsz_set() gets called later from 5877 * tcp_accept_finish(), the option takes effect. 5878 * 5879 */ 5880 /* Set the TCP options */ 5881 eager->tcp_xmit_hiwater = tcp->tcp_xmit_hiwater; 5882 eager->tcp_dgram_errind = tcp->tcp_dgram_errind; 5883 eager->tcp_oobinline = tcp->tcp_oobinline; 5884 eager->tcp_reuseaddr = tcp->tcp_reuseaddr; 5885 eager->tcp_broadcast = tcp->tcp_broadcast; 5886 eager->tcp_useloopback = tcp->tcp_useloopback; 5887 eager->tcp_dontroute = tcp->tcp_dontroute; 5888 eager->tcp_linger = tcp->tcp_linger; 5889 eager->tcp_lingertime = tcp->tcp_lingertime; 5890 if (tcp->tcp_ka_enabled) 5891 eager->tcp_ka_enabled = 1; 5892 5893 /* Set the IP options */ 5894 econnp->conn_broadcast = connp->conn_broadcast; 5895 econnp->conn_loopback = connp->conn_loopback; 5896 econnp->conn_dontroute = connp->conn_dontroute; 5897 econnp->conn_reuseaddr = connp->conn_reuseaddr; 5898 5899 /* Put a ref on the listener for the eager. */ 5900 CONN_INC_REF(connp); 5901 mutex_enter(&tcp->tcp_eager_lock); 5902 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = eager; 5903 eager->tcp_eager_next_q0 = tcp->tcp_eager_next_q0; 5904 tcp->tcp_eager_next_q0 = eager; 5905 eager->tcp_eager_prev_q0 = tcp; 5906 5907 /* Set tcp_listener before adding it to tcp_conn_fanout */ 5908 eager->tcp_listener = tcp; 5909 eager->tcp_saved_listener = tcp; 5910 5911 /* 5912 * Tag this detached tcp vector for later retrieval 5913 * by our listener client in tcp_accept(). 5914 */ 5915 eager->tcp_conn_req_seqnum = tcp->tcp_conn_req_seqnum; 5916 tcp->tcp_conn_req_cnt_q0++; 5917 if (++tcp->tcp_conn_req_seqnum == -1) { 5918 /* 5919 * -1 is "special" and defined in TPI as something 5920 * that should never be used in T_CONN_IND 5921 */ 5922 ++tcp->tcp_conn_req_seqnum; 5923 } 5924 mutex_exit(&tcp->tcp_eager_lock); 5925 5926 if (tcp->tcp_syn_defense) { 5927 /* Don't drop the SYN that comes from a good IP source */ 5928 ipaddr_t *addr_cache = (ipaddr_t *)(tcp->tcp_ip_addr_cache); 5929 if (addr_cache != NULL && eager->tcp_remote == 5930 addr_cache[IP_ADDR_CACHE_HASH(eager->tcp_remote)]) { 5931 eager->tcp_dontdrop = B_TRUE; 5932 } 5933 } 5934 5935 /* 5936 * We need to insert the eager in its own perimeter but as soon 5937 * as we do that, we expose the eager to the classifier and 5938 * should not touch any field outside the eager's perimeter. 5939 * So do all the work necessary before inserting the eager 5940 * in its own perimeter. Be optimistic that ipcl_conn_insert() 5941 * will succeed but undo everything if it fails. 5942 */ 5943 seg_seq = ABE32_TO_U32(tcph->th_seq); 5944 eager->tcp_irs = seg_seq; 5945 eager->tcp_rack = seg_seq; 5946 eager->tcp_rnxt = seg_seq + 1; 5947 U32_TO_ABE32(eager->tcp_rnxt, eager->tcp_tcph->th_ack); 5948 BUMP_MIB(&tcps->tcps_mib, tcpPassiveOpens); 5949 eager->tcp_state = TCPS_SYN_RCVD; 5950 mp1 = tcp_xmit_mp(eager, eager->tcp_xmit_head, eager->tcp_mss, 5951 NULL, NULL, eager->tcp_iss, B_FALSE, NULL, B_FALSE); 5952 if (mp1 == NULL) { 5953 /* 5954 * Increment the ref count as we are going to 5955 * enqueueing an mp in squeue 5956 */ 5957 CONN_INC_REF(econnp); 5958 goto error; 5959 } 5960 DB_CPID(mp1) = tcp->tcp_cpid; 5961 eager->tcp_cpid = tcp->tcp_cpid; 5962 eager->tcp_open_time = lbolt64; 5963 5964 /* 5965 * We need to start the rto timer. In normal case, we start 5966 * the timer after sending the packet on the wire (or at 5967 * least believing that packet was sent by waiting for 5968 * CALL_IP_WPUT() to return). Since this is the first packet 5969 * being sent on the wire for the eager, our initial tcp_rto 5970 * is at least tcp_rexmit_interval_min which is a fairly 5971 * large value to allow the algorithm to adjust slowly to large 5972 * fluctuations of RTT during first few transmissions. 5973 * 5974 * Starting the timer first and then sending the packet in this 5975 * case shouldn't make much difference since tcp_rexmit_interval_min 5976 * is of the order of several 100ms and starting the timer 5977 * first and then sending the packet will result in difference 5978 * of few micro seconds. 5979 * 5980 * Without this optimization, we are forced to hold the fanout 5981 * lock across the ipcl_bind_insert() and sending the packet 5982 * so that we don't race against an incoming packet (maybe RST) 5983 * for this eager. 5984 * 5985 * It is necessary to acquire an extra reference on the eager 5986 * at this point and hold it until after tcp_send_data() to 5987 * ensure against an eager close race. 5988 */ 5989 5990 CONN_INC_REF(eager->tcp_connp); 5991 5992 TCP_RECORD_TRACE(eager, mp1, TCP_TRACE_SEND_PKT); 5993 TCP_TIMER_RESTART(eager, eager->tcp_rto); 5994 5995 5996 /* 5997 * Insert the eager in its own perimeter now. We are ready to deal 5998 * with any packets on eager. 5999 */ 6000 if (eager->tcp_ipversion == IPV4_VERSION) { 6001 if (ipcl_conn_insert(econnp, IPPROTO_TCP, 0, 0, 0) != 0) { 6002 goto error; 6003 } 6004 } else { 6005 if (ipcl_conn_insert_v6(econnp, IPPROTO_TCP, 0, 0, 0, 0) != 0) { 6006 goto error; 6007 } 6008 } 6009 6010 /* mark conn as fully-bound */ 6011 econnp->conn_fully_bound = B_TRUE; 6012 6013 /* Send the SYN-ACK */ 6014 tcp_send_data(eager, eager->tcp_wq, mp1); 6015 CONN_DEC_REF(eager->tcp_connp); 6016 freemsg(mp); 6017 6018 return; 6019 error: 6020 freemsg(mp1); 6021 eager->tcp_closemp_used = B_TRUE; 6022 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 6023 squeue_fill(econnp->conn_sqp, &eager->tcp_closemp, tcp_eager_kill, 6024 econnp, SQTAG_TCP_CONN_REQ_2); 6025 6026 /* 6027 * If a connection already exists, send the mp to that connections so 6028 * that it can be appropriately dealt with. 6029 */ 6030 ipst = tcps->tcps_netstack->netstack_ip; 6031 6032 if ((econnp = ipcl_classify(mp, connp->conn_zoneid, ipst)) != NULL) { 6033 if (!IPCL_IS_CONNECTED(econnp)) { 6034 /* 6035 * Something bad happened. ipcl_conn_insert() 6036 * failed because a connection already existed 6037 * in connected hash but we can't find it 6038 * anymore (someone blew it away). Just 6039 * free this message and hopefully remote 6040 * will retransmit at which time the SYN can be 6041 * treated as a new connection or dealth with 6042 * a TH_RST if a connection already exists. 6043 */ 6044 CONN_DEC_REF(econnp); 6045 freemsg(mp); 6046 } else { 6047 squeue_fill(econnp->conn_sqp, mp, tcp_input, 6048 econnp, SQTAG_TCP_CONN_REQ_1); 6049 } 6050 } else { 6051 /* Nobody wants this packet */ 6052 freemsg(mp); 6053 } 6054 return; 6055 error3: 6056 CONN_DEC_REF(econnp); 6057 error2: 6058 freemsg(mp); 6059 } 6060 6061 /* 6062 * In an ideal case of vertical partition in NUMA architecture, its 6063 * beneficial to have the listener and all the incoming connections 6064 * tied to the same squeue. The other constraint is that incoming 6065 * connections should be tied to the squeue attached to interrupted 6066 * CPU for obvious locality reason so this leaves the listener to 6067 * be tied to the same squeue. Our only problem is that when listener 6068 * is binding, the CPU that will get interrupted by the NIC whose 6069 * IP address the listener is binding to is not even known. So 6070 * the code below allows us to change that binding at the time the 6071 * CPU is interrupted by virtue of incoming connection's squeue. 6072 * 6073 * This is usefull only in case of a listener bound to a specific IP 6074 * address. For other kind of listeners, they get bound the 6075 * very first time and there is no attempt to rebind them. 6076 */ 6077 void 6078 tcp_conn_request_unbound(void *arg, mblk_t *mp, void *arg2) 6079 { 6080 conn_t *connp = (conn_t *)arg; 6081 squeue_t *sqp = (squeue_t *)arg2; 6082 squeue_t *new_sqp; 6083 uint32_t conn_flags; 6084 6085 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 6086 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 6087 } else { 6088 goto done; 6089 } 6090 6091 if (connp->conn_fanout == NULL) 6092 goto done; 6093 6094 if (!(connp->conn_flags & IPCL_FULLY_BOUND)) { 6095 mutex_enter(&connp->conn_fanout->connf_lock); 6096 mutex_enter(&connp->conn_lock); 6097 /* 6098 * No one from read or write side can access us now 6099 * except for already queued packets on this squeue. 6100 * But since we haven't changed the squeue yet, they 6101 * can't execute. If they are processed after we have 6102 * changed the squeue, they are sent back to the 6103 * correct squeue down below. 6104 * But a listner close can race with processing of 6105 * incoming SYN. If incoming SYN processing changes 6106 * the squeue then the listener close which is waiting 6107 * to enter the squeue would operate on the wrong 6108 * squeue. Hence we don't change the squeue here unless 6109 * the refcount is exactly the minimum refcount. The 6110 * minimum refcount of 4 is counted as - 1 each for 6111 * TCP and IP, 1 for being in the classifier hash, and 6112 * 1 for the mblk being processed. 6113 */ 6114 6115 if (connp->conn_ref != 4 || 6116 connp->conn_tcp->tcp_state != TCPS_LISTEN) { 6117 mutex_exit(&connp->conn_lock); 6118 mutex_exit(&connp->conn_fanout->connf_lock); 6119 goto done; 6120 } 6121 if (connp->conn_sqp != new_sqp) { 6122 while (connp->conn_sqp != new_sqp) 6123 (void) casptr(&connp->conn_sqp, sqp, new_sqp); 6124 } 6125 6126 do { 6127 conn_flags = connp->conn_flags; 6128 conn_flags |= IPCL_FULLY_BOUND; 6129 (void) cas32(&connp->conn_flags, connp->conn_flags, 6130 conn_flags); 6131 } while (!(connp->conn_flags & IPCL_FULLY_BOUND)); 6132 6133 mutex_exit(&connp->conn_fanout->connf_lock); 6134 mutex_exit(&connp->conn_lock); 6135 } 6136 6137 done: 6138 if (connp->conn_sqp != sqp) { 6139 CONN_INC_REF(connp); 6140 squeue_fill(connp->conn_sqp, mp, 6141 connp->conn_recv, connp, SQTAG_TCP_CONN_REQ_UNBOUND); 6142 } else { 6143 tcp_conn_request(connp, mp, sqp); 6144 } 6145 } 6146 6147 /* 6148 * Successful connect request processing begins when our client passes 6149 * a T_CONN_REQ message into tcp_wput() and ends when tcp_rput() passes 6150 * our T_OK_ACK reply message upstream. The control flow looks like this: 6151 * upstream -> tcp_wput() -> tcp_wput_proto() -> tcp_connect() -> IP 6152 * upstream <- tcp_rput() <- IP 6153 * After various error checks are completed, tcp_connect() lays 6154 * the target address and port into the composite header template, 6155 * preallocates the T_OK_ACK reply message, construct a full 12 byte bind 6156 * request followed by an IRE request, and passes the three mblk message 6157 * down to IP looking like this: 6158 * O_T_BIND_REQ for IP --> IRE req --> T_OK_ACK for our client 6159 * Processing continues in tcp_rput() when we receive the following message: 6160 * T_BIND_ACK from IP --> IRE ack --> T_OK_ACK for our client 6161 * After consuming the first two mblks, tcp_rput() calls tcp_timer(), 6162 * to fire off the connection request, and then passes the T_OK_ACK mblk 6163 * upstream that we filled in below. There are, of course, numerous 6164 * error conditions along the way which truncate the processing described 6165 * above. 6166 */ 6167 static void 6168 tcp_connect(tcp_t *tcp, mblk_t *mp) 6169 { 6170 sin_t *sin; 6171 sin6_t *sin6; 6172 queue_t *q = tcp->tcp_wq; 6173 struct T_conn_req *tcr; 6174 ipaddr_t *dstaddrp; 6175 in_port_t dstport; 6176 uint_t srcid; 6177 6178 tcr = (struct T_conn_req *)mp->b_rptr; 6179 6180 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 6181 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tcr)) { 6182 tcp_err_ack(tcp, mp, TPROTO, 0); 6183 return; 6184 } 6185 6186 /* 6187 * Determine packet type based on type of address passed in 6188 * the request should contain an IPv4 or IPv6 address. 6189 * Make sure that address family matches the type of 6190 * family of the the address passed down 6191 */ 6192 switch (tcr->DEST_length) { 6193 default: 6194 tcp_err_ack(tcp, mp, TBADADDR, 0); 6195 return; 6196 6197 case (sizeof (sin_t) - sizeof (sin->sin_zero)): { 6198 /* 6199 * XXX: The check for valid DEST_length was not there 6200 * in earlier releases and some buggy 6201 * TLI apps (e.g Sybase) got away with not feeding 6202 * in sin_zero part of address. 6203 * We allow that bug to keep those buggy apps humming. 6204 * Test suites require the check on DEST_length. 6205 * We construct a new mblk with valid DEST_length 6206 * free the original so the rest of the code does 6207 * not have to keep track of this special shorter 6208 * length address case. 6209 */ 6210 mblk_t *nmp; 6211 struct T_conn_req *ntcr; 6212 sin_t *nsin; 6213 6214 nmp = allocb(sizeof (struct T_conn_req) + sizeof (sin_t) + 6215 tcr->OPT_length, BPRI_HI); 6216 if (nmp == NULL) { 6217 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 6218 return; 6219 } 6220 ntcr = (struct T_conn_req *)nmp->b_rptr; 6221 bzero(ntcr, sizeof (struct T_conn_req)); /* zero fill */ 6222 ntcr->PRIM_type = T_CONN_REQ; 6223 ntcr->DEST_length = sizeof (sin_t); 6224 ntcr->DEST_offset = sizeof (struct T_conn_req); 6225 6226 nsin = (sin_t *)((uchar_t *)ntcr + ntcr->DEST_offset); 6227 *nsin = sin_null; 6228 /* Get pointer to shorter address to copy from original mp */ 6229 sin = (sin_t *)mi_offset_param(mp, tcr->DEST_offset, 6230 tcr->DEST_length); /* extract DEST_length worth of sin_t */ 6231 if (sin == NULL || !OK_32PTR((char *)sin)) { 6232 freemsg(nmp); 6233 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6234 return; 6235 } 6236 nsin->sin_family = sin->sin_family; 6237 nsin->sin_port = sin->sin_port; 6238 nsin->sin_addr = sin->sin_addr; 6239 /* Note:nsin->sin_zero zero-fill with sin_null assign above */ 6240 nmp->b_wptr = (uchar_t *)&nsin[1]; 6241 if (tcr->OPT_length != 0) { 6242 ntcr->OPT_length = tcr->OPT_length; 6243 ntcr->OPT_offset = nmp->b_wptr - nmp->b_rptr; 6244 bcopy((uchar_t *)tcr + tcr->OPT_offset, 6245 (uchar_t *)ntcr + ntcr->OPT_offset, 6246 tcr->OPT_length); 6247 nmp->b_wptr += tcr->OPT_length; 6248 } 6249 freemsg(mp); /* original mp freed */ 6250 mp = nmp; /* re-initialize original variables */ 6251 tcr = ntcr; 6252 } 6253 /* FALLTHRU */ 6254 6255 case sizeof (sin_t): 6256 sin = (sin_t *)mi_offset_param(mp, tcr->DEST_offset, 6257 sizeof (sin_t)); 6258 if (sin == NULL || !OK_32PTR((char *)sin)) { 6259 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6260 return; 6261 } 6262 if (tcp->tcp_family != AF_INET || 6263 sin->sin_family != AF_INET) { 6264 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6265 return; 6266 } 6267 if (sin->sin_port == 0) { 6268 tcp_err_ack(tcp, mp, TBADADDR, 0); 6269 return; 6270 } 6271 if (tcp->tcp_connp && tcp->tcp_connp->conn_ipv6_v6only) { 6272 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6273 return; 6274 } 6275 6276 break; 6277 6278 case sizeof (sin6_t): 6279 sin6 = (sin6_t *)mi_offset_param(mp, tcr->DEST_offset, 6280 sizeof (sin6_t)); 6281 if (sin6 == NULL || !OK_32PTR((char *)sin6)) { 6282 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6283 return; 6284 } 6285 if (tcp->tcp_family != AF_INET6 || 6286 sin6->sin6_family != AF_INET6) { 6287 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6288 return; 6289 } 6290 if (sin6->sin6_port == 0) { 6291 tcp_err_ack(tcp, mp, TBADADDR, 0); 6292 return; 6293 } 6294 break; 6295 } 6296 /* 6297 * TODO: If someone in TCPS_TIME_WAIT has this dst/port we 6298 * should key on their sequence number and cut them loose. 6299 */ 6300 6301 /* 6302 * If options passed in, feed it for verification and handling 6303 */ 6304 if (tcr->OPT_length != 0) { 6305 mblk_t *ok_mp; 6306 mblk_t *discon_mp; 6307 mblk_t *conn_opts_mp; 6308 int t_error, sys_error, do_disconnect; 6309 6310 conn_opts_mp = NULL; 6311 6312 if (tcp_conprim_opt_process(tcp, mp, 6313 &do_disconnect, &t_error, &sys_error) < 0) { 6314 if (do_disconnect) { 6315 ASSERT(t_error == 0 && sys_error == 0); 6316 discon_mp = mi_tpi_discon_ind(NULL, 6317 ECONNREFUSED, 0); 6318 if (!discon_mp) { 6319 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, 6320 TSYSERR, ENOMEM); 6321 return; 6322 } 6323 ok_mp = mi_tpi_ok_ack_alloc(mp); 6324 if (!ok_mp) { 6325 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6326 TSYSERR, ENOMEM); 6327 return; 6328 } 6329 qreply(q, ok_mp); 6330 qreply(q, discon_mp); /* no flush! */ 6331 } else { 6332 ASSERT(t_error != 0); 6333 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, t_error, 6334 sys_error); 6335 } 6336 return; 6337 } 6338 /* 6339 * Success in setting options, the mp option buffer represented 6340 * by OPT_length/offset has been potentially modified and 6341 * contains results of option processing. We copy it in 6342 * another mp to save it for potentially influencing returning 6343 * it in T_CONN_CONN. 6344 */ 6345 if (tcr->OPT_length != 0) { /* there are resulting options */ 6346 conn_opts_mp = copyb(mp); 6347 if (!conn_opts_mp) { 6348 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, 6349 TSYSERR, ENOMEM); 6350 return; 6351 } 6352 ASSERT(tcp->tcp_conn.tcp_opts_conn_req == NULL); 6353 tcp->tcp_conn.tcp_opts_conn_req = conn_opts_mp; 6354 /* 6355 * Note: 6356 * These resulting option negotiation can include any 6357 * end-to-end negotiation options but there no such 6358 * thing (yet?) in our TCP/IP. 6359 */ 6360 } 6361 } 6362 6363 /* 6364 * If we're connecting to an IPv4-mapped IPv6 address, we need to 6365 * make sure that the template IP header in the tcp structure is an 6366 * IPv4 header, and that the tcp_ipversion is IPV4_VERSION. We 6367 * need to this before we call tcp_bindi() so that the port lookup 6368 * code will look for ports in the correct port space (IPv4 and 6369 * IPv6 have separate port spaces). 6370 */ 6371 if (tcp->tcp_family == AF_INET6 && tcp->tcp_ipversion == IPV6_VERSION && 6372 IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6373 int err = 0; 6374 6375 err = tcp_header_init_ipv4(tcp); 6376 if (err != 0) { 6377 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6378 goto connect_failed; 6379 } 6380 if (tcp->tcp_lport != 0) 6381 *(uint16_t *)tcp->tcp_tcph->th_lport = tcp->tcp_lport; 6382 } 6383 6384 switch (tcp->tcp_state) { 6385 case TCPS_IDLE: 6386 /* 6387 * We support quick connect, refer to comments in 6388 * tcp_connect_*() 6389 */ 6390 /* FALLTHRU */ 6391 case TCPS_BOUND: 6392 case TCPS_LISTEN: 6393 if (tcp->tcp_family == AF_INET6) { 6394 if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6395 tcp_connect_ipv6(tcp, mp, 6396 &sin6->sin6_addr, 6397 sin6->sin6_port, sin6->sin6_flowinfo, 6398 sin6->__sin6_src_id, sin6->sin6_scope_id); 6399 return; 6400 } 6401 /* 6402 * Destination adress is mapped IPv6 address. 6403 * Source bound address should be unspecified or 6404 * IPv6 mapped address as well. 6405 */ 6406 if (!IN6_IS_ADDR_UNSPECIFIED( 6407 &tcp->tcp_bound_source_v6) && 6408 !IN6_IS_ADDR_V4MAPPED(&tcp->tcp_bound_source_v6)) { 6409 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, 6410 EADDRNOTAVAIL); 6411 break; 6412 } 6413 dstaddrp = &V4_PART_OF_V6((sin6->sin6_addr)); 6414 dstport = sin6->sin6_port; 6415 srcid = sin6->__sin6_src_id; 6416 } else { 6417 dstaddrp = &sin->sin_addr.s_addr; 6418 dstport = sin->sin_port; 6419 srcid = 0; 6420 } 6421 6422 tcp_connect_ipv4(tcp, mp, dstaddrp, dstport, srcid); 6423 return; 6424 default: 6425 mp = mi_tpi_err_ack_alloc(mp, TOUTSTATE, 0); 6426 break; 6427 } 6428 /* 6429 * Note: Code below is the "failure" case 6430 */ 6431 /* return error ack and blow away saved option results if any */ 6432 connect_failed: 6433 if (mp != NULL) 6434 putnext(tcp->tcp_rq, mp); 6435 else { 6436 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6437 TSYSERR, ENOMEM); 6438 } 6439 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6440 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6441 } 6442 6443 /* 6444 * Handle connect to IPv4 destinations, including connections for AF_INET6 6445 * sockets connecting to IPv4 mapped IPv6 destinations. 6446 */ 6447 static void 6448 tcp_connect_ipv4(tcp_t *tcp, mblk_t *mp, ipaddr_t *dstaddrp, in_port_t dstport, 6449 uint_t srcid) 6450 { 6451 tcph_t *tcph; 6452 mblk_t *mp1; 6453 ipaddr_t dstaddr = *dstaddrp; 6454 int32_t oldstate; 6455 uint16_t lport; 6456 tcp_stack_t *tcps = tcp->tcp_tcps; 6457 6458 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 6459 6460 /* Check for attempt to connect to INADDR_ANY */ 6461 if (dstaddr == INADDR_ANY) { 6462 /* 6463 * SunOS 4.x and 4.3 BSD allow an application 6464 * to connect a TCP socket to INADDR_ANY. 6465 * When they do this, the kernel picks the 6466 * address of one interface and uses it 6467 * instead. The kernel usually ends up 6468 * picking the address of the loopback 6469 * interface. This is an undocumented feature. 6470 * However, we provide the same thing here 6471 * in order to have source and binary 6472 * compatibility with SunOS 4.x. 6473 * Update the T_CONN_REQ (sin/sin6) since it is used to 6474 * generate the T_CONN_CON. 6475 */ 6476 dstaddr = htonl(INADDR_LOOPBACK); 6477 *dstaddrp = dstaddr; 6478 } 6479 6480 /* Handle __sin6_src_id if socket not bound to an IP address */ 6481 if (srcid != 0 && tcp->tcp_ipha->ipha_src == INADDR_ANY) { 6482 ip_srcid_find_id(srcid, &tcp->tcp_ip_src_v6, 6483 tcp->tcp_connp->conn_zoneid, tcps->tcps_netstack); 6484 IN6_V4MAPPED_TO_IPADDR(&tcp->tcp_ip_src_v6, 6485 tcp->tcp_ipha->ipha_src); 6486 } 6487 6488 /* 6489 * Don't let an endpoint connect to itself. Note that 6490 * the test here does not catch the case where the 6491 * source IP addr was left unspecified by the user. In 6492 * this case, the source addr is set in tcp_adapt_ire() 6493 * using the reply to the T_BIND message that we send 6494 * down to IP here and the check is repeated in tcp_rput_other. 6495 */ 6496 if (dstaddr == tcp->tcp_ipha->ipha_src && 6497 dstport == tcp->tcp_lport) { 6498 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6499 goto failed; 6500 } 6501 6502 tcp->tcp_ipha->ipha_dst = dstaddr; 6503 IN6_IPADDR_TO_V4MAPPED(dstaddr, &tcp->tcp_remote_v6); 6504 6505 /* 6506 * Massage a source route if any putting the first hop 6507 * in iph_dst. Compute a starting value for the checksum which 6508 * takes into account that the original iph_dst should be 6509 * included in the checksum but that ip will include the 6510 * first hop in the source route in the tcp checksum. 6511 */ 6512 tcp->tcp_sum = ip_massage_options(tcp->tcp_ipha, tcps->tcps_netstack); 6513 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 6514 tcp->tcp_sum -= ((tcp->tcp_ipha->ipha_dst >> 16) + 6515 (tcp->tcp_ipha->ipha_dst & 0xffff)); 6516 if ((int)tcp->tcp_sum < 0) 6517 tcp->tcp_sum--; 6518 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 6519 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 6520 (tcp->tcp_sum >> 16)); 6521 tcph = tcp->tcp_tcph; 6522 *(uint16_t *)tcph->th_fport = dstport; 6523 tcp->tcp_fport = dstport; 6524 6525 oldstate = tcp->tcp_state; 6526 /* 6527 * At this point the remote destination address and remote port fields 6528 * in the tcp-four-tuple have been filled in the tcp structure. Now we 6529 * have to see which state tcp was in so we can take apropriate action. 6530 */ 6531 if (oldstate == TCPS_IDLE) { 6532 /* 6533 * We support a quick connect capability here, allowing 6534 * clients to transition directly from IDLE to SYN_SENT 6535 * tcp_bindi will pick an unused port, insert the connection 6536 * in the bind hash and transition to BOUND state. 6537 */ 6538 lport = tcp_update_next_port(tcps->tcps_next_port_to_try, 6539 tcp, B_TRUE); 6540 lport = tcp_bindi(tcp, lport, &tcp->tcp_ip_src_v6, 0, B_TRUE, 6541 B_FALSE, B_FALSE); 6542 if (lport == 0) { 6543 mp = mi_tpi_err_ack_alloc(mp, TNOADDR, 0); 6544 goto failed; 6545 } 6546 } 6547 tcp->tcp_state = TCPS_SYN_SENT; 6548 6549 /* 6550 * TODO: allow data with connect requests 6551 * by unlinking M_DATA trailers here and 6552 * linking them in behind the T_OK_ACK mblk. 6553 * The tcp_rput() bind ack handler would then 6554 * feed them to tcp_wput_data() rather than call 6555 * tcp_timer(). 6556 */ 6557 mp = mi_tpi_ok_ack_alloc(mp); 6558 if (!mp) { 6559 tcp->tcp_state = oldstate; 6560 goto failed; 6561 } 6562 if (tcp->tcp_family == AF_INET) { 6563 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 6564 sizeof (ipa_conn_t)); 6565 } else { 6566 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 6567 sizeof (ipa6_conn_t)); 6568 } 6569 if (mp1) { 6570 /* 6571 * We need to make sure that the conn_recv is set to a non-null 6572 * value before we insert the conn_t into the classifier table. 6573 * This is to avoid a race with an incoming packet which does 6574 * an ipcl_classify(). 6575 */ 6576 tcp->tcp_connp->conn_recv = tcp_input; 6577 6578 /* Hang onto the T_OK_ACK for later. */ 6579 linkb(mp1, mp); 6580 mblk_setcred(mp1, tcp->tcp_cred); 6581 if (tcp->tcp_family == AF_INET) 6582 mp1 = ip_bind_v4(tcp->tcp_wq, mp1, tcp->tcp_connp); 6583 else { 6584 mp1 = ip_bind_v6(tcp->tcp_wq, mp1, tcp->tcp_connp, 6585 &tcp->tcp_sticky_ipp); 6586 } 6587 BUMP_MIB(&tcps->tcps_mib, tcpActiveOpens); 6588 tcp->tcp_active_open = 1; 6589 /* 6590 * If the bind cannot complete immediately 6591 * IP will arrange to call tcp_rput_other 6592 * when the bind completes. 6593 */ 6594 if (mp1 != NULL) 6595 tcp_rput_other(tcp, mp1); 6596 return; 6597 } 6598 /* Error case */ 6599 tcp->tcp_state = oldstate; 6600 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6601 6602 failed: 6603 /* return error ack and blow away saved option results if any */ 6604 if (mp != NULL) 6605 putnext(tcp->tcp_rq, mp); 6606 else { 6607 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6608 TSYSERR, ENOMEM); 6609 } 6610 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6611 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6612 6613 } 6614 6615 /* 6616 * Handle connect to IPv6 destinations. 6617 */ 6618 static void 6619 tcp_connect_ipv6(tcp_t *tcp, mblk_t *mp, in6_addr_t *dstaddrp, 6620 in_port_t dstport, uint32_t flowinfo, uint_t srcid, uint32_t scope_id) 6621 { 6622 tcph_t *tcph; 6623 mblk_t *mp1; 6624 ip6_rthdr_t *rth; 6625 int32_t oldstate; 6626 uint16_t lport; 6627 tcp_stack_t *tcps = tcp->tcp_tcps; 6628 6629 ASSERT(tcp->tcp_family == AF_INET6); 6630 6631 /* 6632 * If we're here, it means that the destination address is a native 6633 * IPv6 address. Return an error if tcp_ipversion is not IPv6. A 6634 * reason why it might not be IPv6 is if the socket was bound to an 6635 * IPv4-mapped IPv6 address. 6636 */ 6637 if (tcp->tcp_ipversion != IPV6_VERSION) { 6638 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6639 goto failed; 6640 } 6641 6642 /* 6643 * Interpret a zero destination to mean loopback. 6644 * Update the T_CONN_REQ (sin/sin6) since it is used to 6645 * generate the T_CONN_CON. 6646 */ 6647 if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp)) { 6648 *dstaddrp = ipv6_loopback; 6649 } 6650 6651 /* Handle __sin6_src_id if socket not bound to an IP address */ 6652 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip6h->ip6_src)) { 6653 ip_srcid_find_id(srcid, &tcp->tcp_ip6h->ip6_src, 6654 tcp->tcp_connp->conn_zoneid, tcps->tcps_netstack); 6655 tcp->tcp_ip_src_v6 = tcp->tcp_ip6h->ip6_src; 6656 } 6657 6658 /* 6659 * Take care of the scope_id now and add ip6i_t 6660 * if ip6i_t is not already allocated through TCP 6661 * sticky options. At this point tcp_ip6h does not 6662 * have dst info, thus use dstaddrp. 6663 */ 6664 if (scope_id != 0 && 6665 IN6_IS_ADDR_LINKSCOPE(dstaddrp)) { 6666 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 6667 ip6i_t *ip6i; 6668 6669 ipp->ipp_ifindex = scope_id; 6670 ip6i = (ip6i_t *)tcp->tcp_iphc; 6671 6672 if ((ipp->ipp_fields & IPPF_HAS_IP6I) && 6673 ip6i != NULL && (ip6i->ip6i_nxt == IPPROTO_RAW)) { 6674 /* Already allocated */ 6675 ip6i->ip6i_flags |= IP6I_IFINDEX; 6676 ip6i->ip6i_ifindex = ipp->ipp_ifindex; 6677 ipp->ipp_fields |= IPPF_SCOPE_ID; 6678 } else { 6679 int reterr; 6680 6681 ipp->ipp_fields |= IPPF_SCOPE_ID; 6682 if (ipp->ipp_fields & IPPF_HAS_IP6I) 6683 ip2dbg(("tcp_connect_v6: SCOPE_ID set\n")); 6684 reterr = tcp_build_hdrs(tcp->tcp_rq, tcp); 6685 if (reterr != 0) 6686 goto failed; 6687 ip1dbg(("tcp_connect_ipv6: tcp_bld_hdrs returned\n")); 6688 } 6689 } 6690 6691 /* 6692 * Don't let an endpoint connect to itself. Note that 6693 * the test here does not catch the case where the 6694 * source IP addr was left unspecified by the user. In 6695 * this case, the source addr is set in tcp_adapt_ire() 6696 * using the reply to the T_BIND message that we send 6697 * down to IP here and the check is repeated in tcp_rput_other. 6698 */ 6699 if (IN6_ARE_ADDR_EQUAL(dstaddrp, &tcp->tcp_ip6h->ip6_src) && 6700 (dstport == tcp->tcp_lport)) { 6701 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6702 goto failed; 6703 } 6704 6705 tcp->tcp_ip6h->ip6_dst = *dstaddrp; 6706 tcp->tcp_remote_v6 = *dstaddrp; 6707 tcp->tcp_ip6h->ip6_vcf = 6708 (IPV6_DEFAULT_VERS_AND_FLOW & IPV6_VERS_AND_FLOW_MASK) | 6709 (flowinfo & ~IPV6_VERS_AND_FLOW_MASK); 6710 6711 6712 /* 6713 * Massage a routing header (if present) putting the first hop 6714 * in ip6_dst. Compute a starting value for the checksum which 6715 * takes into account that the original ip6_dst should be 6716 * included in the checksum but that ip will include the 6717 * first hop in the source route in the tcp checksum. 6718 */ 6719 rth = ip_find_rthdr_v6(tcp->tcp_ip6h, (uint8_t *)tcp->tcp_tcph); 6720 if (rth != NULL) { 6721 tcp->tcp_sum = ip_massage_options_v6(tcp->tcp_ip6h, rth, 6722 tcps->tcps_netstack); 6723 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 6724 (tcp->tcp_sum >> 16)); 6725 } else { 6726 tcp->tcp_sum = 0; 6727 } 6728 6729 tcph = tcp->tcp_tcph; 6730 *(uint16_t *)tcph->th_fport = dstport; 6731 tcp->tcp_fport = dstport; 6732 6733 oldstate = tcp->tcp_state; 6734 /* 6735 * At this point the remote destination address and remote port fields 6736 * in the tcp-four-tuple have been filled in the tcp structure. Now we 6737 * have to see which state tcp was in so we can take apropriate action. 6738 */ 6739 if (oldstate == TCPS_IDLE) { 6740 /* 6741 * We support a quick connect capability here, allowing 6742 * clients to transition directly from IDLE to SYN_SENT 6743 * tcp_bindi will pick an unused port, insert the connection 6744 * in the bind hash and transition to BOUND state. 6745 */ 6746 lport = tcp_update_next_port(tcps->tcps_next_port_to_try, 6747 tcp, B_TRUE); 6748 lport = tcp_bindi(tcp, lport, &tcp->tcp_ip_src_v6, 0, B_TRUE, 6749 B_FALSE, B_FALSE); 6750 if (lport == 0) { 6751 mp = mi_tpi_err_ack_alloc(mp, TNOADDR, 0); 6752 goto failed; 6753 } 6754 } 6755 tcp->tcp_state = TCPS_SYN_SENT; 6756 /* 6757 * TODO: allow data with connect requests 6758 * by unlinking M_DATA trailers here and 6759 * linking them in behind the T_OK_ACK mblk. 6760 * The tcp_rput() bind ack handler would then 6761 * feed them to tcp_wput_data() rather than call 6762 * tcp_timer(). 6763 */ 6764 mp = mi_tpi_ok_ack_alloc(mp); 6765 if (!mp) { 6766 tcp->tcp_state = oldstate; 6767 goto failed; 6768 } 6769 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, sizeof (ipa6_conn_t)); 6770 if (mp1) { 6771 /* 6772 * We need to make sure that the conn_recv is set to a non-null 6773 * value before we insert the conn_t into the classifier table. 6774 * This is to avoid a race with an incoming packet which does 6775 * an ipcl_classify(). 6776 */ 6777 tcp->tcp_connp->conn_recv = tcp_input; 6778 6779 /* Hang onto the T_OK_ACK for later. */ 6780 linkb(mp1, mp); 6781 mblk_setcred(mp1, tcp->tcp_cred); 6782 mp1 = ip_bind_v6(tcp->tcp_wq, mp1, tcp->tcp_connp, 6783 &tcp->tcp_sticky_ipp); 6784 BUMP_MIB(&tcps->tcps_mib, tcpActiveOpens); 6785 tcp->tcp_active_open = 1; 6786 /* ip_bind_v6() may return ACK or ERROR */ 6787 if (mp1 != NULL) 6788 tcp_rput_other(tcp, mp1); 6789 return; 6790 } 6791 /* Error case */ 6792 tcp->tcp_state = oldstate; 6793 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6794 6795 failed: 6796 /* return error ack and blow away saved option results if any */ 6797 if (mp != NULL) 6798 putnext(tcp->tcp_rq, mp); 6799 else { 6800 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6801 TSYSERR, ENOMEM); 6802 } 6803 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6804 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6805 } 6806 6807 /* 6808 * We need a stream q for detached closing tcp connections 6809 * to use. Our client hereby indicates that this q is the 6810 * one to use. 6811 */ 6812 static void 6813 tcp_def_q_set(tcp_t *tcp, mblk_t *mp) 6814 { 6815 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 6816 queue_t *q = tcp->tcp_wq; 6817 tcp_stack_t *tcps = tcp->tcp_tcps; 6818 6819 #ifdef NS_DEBUG 6820 (void) printf("TCP_IOC_DEFAULT_Q for stack %d\n", 6821 tcps->tcps_netstack->netstack_stackid); 6822 #endif 6823 mp->b_datap->db_type = M_IOCACK; 6824 iocp->ioc_count = 0; 6825 mutex_enter(&tcps->tcps_g_q_lock); 6826 if (tcps->tcps_g_q != NULL) { 6827 mutex_exit(&tcps->tcps_g_q_lock); 6828 iocp->ioc_error = EALREADY; 6829 } else { 6830 mblk_t *mp1; 6831 6832 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 0); 6833 if (mp1 == NULL) { 6834 mutex_exit(&tcps->tcps_g_q_lock); 6835 iocp->ioc_error = ENOMEM; 6836 } else { 6837 tcps->tcps_g_q = tcp->tcp_rq; 6838 mutex_exit(&tcps->tcps_g_q_lock); 6839 iocp->ioc_error = 0; 6840 iocp->ioc_rval = 0; 6841 /* 6842 * We are passing tcp_sticky_ipp as NULL 6843 * as it is not useful for tcp_default queue 6844 * 6845 * Set conn_recv just in case. 6846 */ 6847 tcp->tcp_connp->conn_recv = tcp_conn_request; 6848 6849 mp1 = ip_bind_v6(q, mp1, tcp->tcp_connp, NULL); 6850 if (mp1 != NULL) 6851 tcp_rput_other(tcp, mp1); 6852 } 6853 } 6854 qreply(q, mp); 6855 } 6856 6857 /* 6858 * Our client hereby directs us to reject the connection request 6859 * that tcp_conn_request() marked with 'seqnum'. Rejection consists 6860 * of sending the appropriate RST, not an ICMP error. 6861 */ 6862 static void 6863 tcp_disconnect(tcp_t *tcp, mblk_t *mp) 6864 { 6865 tcp_t *ltcp = NULL; 6866 t_scalar_t seqnum; 6867 conn_t *connp; 6868 tcp_stack_t *tcps = tcp->tcp_tcps; 6869 6870 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 6871 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_discon_req)) { 6872 tcp_err_ack(tcp, mp, TPROTO, 0); 6873 return; 6874 } 6875 6876 /* 6877 * Right now, upper modules pass down a T_DISCON_REQ to TCP, 6878 * when the stream is in BOUND state. Do not send a reset, 6879 * since the destination IP address is not valid, and it can 6880 * be the initialized value of all zeros (broadcast address). 6881 * 6882 * If TCP has sent down a bind request to IP and has not 6883 * received the reply, reject the request. Otherwise, TCP 6884 * will be confused. 6885 */ 6886 if (tcp->tcp_state <= TCPS_BOUND || tcp->tcp_hard_binding) { 6887 if (tcp->tcp_debug) { 6888 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 6889 "tcp_disconnect: bad state, %d", tcp->tcp_state); 6890 } 6891 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 6892 return; 6893 } 6894 6895 seqnum = ((struct T_discon_req *)mp->b_rptr)->SEQ_number; 6896 6897 if (seqnum == -1 || tcp->tcp_conn_req_max == 0) { 6898 6899 /* 6900 * According to TPI, for non-listeners, ignore seqnum 6901 * and disconnect. 6902 * Following interpretation of -1 seqnum is historical 6903 * and implied TPI ? (TPI only states that for T_CONN_IND, 6904 * a valid seqnum should not be -1). 6905 * 6906 * -1 means disconnect everything 6907 * regardless even on a listener. 6908 */ 6909 6910 int old_state = tcp->tcp_state; 6911 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 6912 6913 /* 6914 * The connection can't be on the tcp_time_wait_head list 6915 * since it is not detached. 6916 */ 6917 ASSERT(tcp->tcp_time_wait_next == NULL); 6918 ASSERT(tcp->tcp_time_wait_prev == NULL); 6919 ASSERT(tcp->tcp_time_wait_expire == 0); 6920 ltcp = NULL; 6921 /* 6922 * If it used to be a listener, check to make sure no one else 6923 * has taken the port before switching back to LISTEN state. 6924 */ 6925 if (tcp->tcp_ipversion == IPV4_VERSION) { 6926 connp = ipcl_lookup_listener_v4(tcp->tcp_lport, 6927 tcp->tcp_ipha->ipha_src, 6928 tcp->tcp_connp->conn_zoneid, ipst); 6929 if (connp != NULL) 6930 ltcp = connp->conn_tcp; 6931 } else { 6932 /* Allow tcp_bound_if listeners? */ 6933 connp = ipcl_lookup_listener_v6(tcp->tcp_lport, 6934 &tcp->tcp_ip6h->ip6_src, 0, 6935 tcp->tcp_connp->conn_zoneid, ipst); 6936 if (connp != NULL) 6937 ltcp = connp->conn_tcp; 6938 } 6939 if (tcp->tcp_conn_req_max && ltcp == NULL) { 6940 tcp->tcp_state = TCPS_LISTEN; 6941 } else if (old_state > TCPS_BOUND) { 6942 tcp->tcp_conn_req_max = 0; 6943 tcp->tcp_state = TCPS_BOUND; 6944 } 6945 if (ltcp != NULL) 6946 CONN_DEC_REF(ltcp->tcp_connp); 6947 if (old_state == TCPS_SYN_SENT || old_state == TCPS_SYN_RCVD) { 6948 BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails); 6949 } else if (old_state == TCPS_ESTABLISHED || 6950 old_state == TCPS_CLOSE_WAIT) { 6951 BUMP_MIB(&tcps->tcps_mib, tcpEstabResets); 6952 } 6953 6954 if (tcp->tcp_fused) 6955 tcp_unfuse(tcp); 6956 6957 mutex_enter(&tcp->tcp_eager_lock); 6958 if ((tcp->tcp_conn_req_cnt_q0 != 0) || 6959 (tcp->tcp_conn_req_cnt_q != 0)) { 6960 tcp_eager_cleanup(tcp, 0); 6961 } 6962 mutex_exit(&tcp->tcp_eager_lock); 6963 6964 tcp_xmit_ctl("tcp_disconnect", tcp, tcp->tcp_snxt, 6965 tcp->tcp_rnxt, TH_RST | TH_ACK); 6966 6967 tcp_reinit(tcp); 6968 6969 if (old_state >= TCPS_ESTABLISHED) { 6970 /* Send M_FLUSH according to TPI */ 6971 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 6972 } 6973 mp = mi_tpi_ok_ack_alloc(mp); 6974 if (mp) 6975 putnext(tcp->tcp_rq, mp); 6976 return; 6977 } else if (!tcp_eager_blowoff(tcp, seqnum)) { 6978 tcp_err_ack(tcp, mp, TBADSEQ, 0); 6979 return; 6980 } 6981 if (tcp->tcp_state >= TCPS_ESTABLISHED) { 6982 /* Send M_FLUSH according to TPI */ 6983 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 6984 } 6985 mp = mi_tpi_ok_ack_alloc(mp); 6986 if (mp) 6987 putnext(tcp->tcp_rq, mp); 6988 } 6989 6990 /* 6991 * Diagnostic routine used to return a string associated with the tcp state. 6992 * Note that if the caller does not supply a buffer, it will use an internal 6993 * static string. This means that if multiple threads call this function at 6994 * the same time, output can be corrupted... Note also that this function 6995 * does not check the size of the supplied buffer. The caller has to make 6996 * sure that it is big enough. 6997 */ 6998 static char * 6999 tcp_display(tcp_t *tcp, char *sup_buf, char format) 7000 { 7001 char buf1[30]; 7002 static char priv_buf[INET6_ADDRSTRLEN * 2 + 80]; 7003 char *buf; 7004 char *cp; 7005 in6_addr_t local, remote; 7006 char local_addrbuf[INET6_ADDRSTRLEN]; 7007 char remote_addrbuf[INET6_ADDRSTRLEN]; 7008 7009 if (sup_buf != NULL) 7010 buf = sup_buf; 7011 else 7012 buf = priv_buf; 7013 7014 if (tcp == NULL) 7015 return ("NULL_TCP"); 7016 switch (tcp->tcp_state) { 7017 case TCPS_CLOSED: 7018 cp = "TCP_CLOSED"; 7019 break; 7020 case TCPS_IDLE: 7021 cp = "TCP_IDLE"; 7022 break; 7023 case TCPS_BOUND: 7024 cp = "TCP_BOUND"; 7025 break; 7026 case TCPS_LISTEN: 7027 cp = "TCP_LISTEN"; 7028 break; 7029 case TCPS_SYN_SENT: 7030 cp = "TCP_SYN_SENT"; 7031 break; 7032 case TCPS_SYN_RCVD: 7033 cp = "TCP_SYN_RCVD"; 7034 break; 7035 case TCPS_ESTABLISHED: 7036 cp = "TCP_ESTABLISHED"; 7037 break; 7038 case TCPS_CLOSE_WAIT: 7039 cp = "TCP_CLOSE_WAIT"; 7040 break; 7041 case TCPS_FIN_WAIT_1: 7042 cp = "TCP_FIN_WAIT_1"; 7043 break; 7044 case TCPS_CLOSING: 7045 cp = "TCP_CLOSING"; 7046 break; 7047 case TCPS_LAST_ACK: 7048 cp = "TCP_LAST_ACK"; 7049 break; 7050 case TCPS_FIN_WAIT_2: 7051 cp = "TCP_FIN_WAIT_2"; 7052 break; 7053 case TCPS_TIME_WAIT: 7054 cp = "TCP_TIME_WAIT"; 7055 break; 7056 default: 7057 (void) mi_sprintf(buf1, "TCPUnkState(%d)", tcp->tcp_state); 7058 cp = buf1; 7059 break; 7060 } 7061 switch (format) { 7062 case DISP_ADDR_AND_PORT: 7063 if (tcp->tcp_ipversion == IPV4_VERSION) { 7064 /* 7065 * Note that we use the remote address in the tcp_b 7066 * structure. This means that it will print out 7067 * the real destination address, not the next hop's 7068 * address if source routing is used. 7069 */ 7070 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ip_src, &local); 7071 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_remote, &remote); 7072 7073 } else { 7074 local = tcp->tcp_ip_src_v6; 7075 remote = tcp->tcp_remote_v6; 7076 } 7077 (void) inet_ntop(AF_INET6, &local, local_addrbuf, 7078 sizeof (local_addrbuf)); 7079 (void) inet_ntop(AF_INET6, &remote, remote_addrbuf, 7080 sizeof (remote_addrbuf)); 7081 (void) mi_sprintf(buf, "[%s.%u, %s.%u] %s", 7082 local_addrbuf, ntohs(tcp->tcp_lport), remote_addrbuf, 7083 ntohs(tcp->tcp_fport), cp); 7084 break; 7085 case DISP_PORT_ONLY: 7086 default: 7087 (void) mi_sprintf(buf, "[%u, %u] %s", 7088 ntohs(tcp->tcp_lport), ntohs(tcp->tcp_fport), cp); 7089 break; 7090 } 7091 7092 return (buf); 7093 } 7094 7095 /* 7096 * Called via squeue to get on to eager's perimeter. It sends a 7097 * TH_RST if eager is in the fanout table. The listener wants the 7098 * eager to disappear either by means of tcp_eager_blowoff() or 7099 * tcp_eager_cleanup() being called. tcp_eager_kill() can also be 7100 * called (via squeue) if the eager cannot be inserted in the 7101 * fanout table in tcp_conn_request(). 7102 */ 7103 /* ARGSUSED */ 7104 void 7105 tcp_eager_kill(void *arg, mblk_t *mp, void *arg2) 7106 { 7107 conn_t *econnp = (conn_t *)arg; 7108 tcp_t *eager = econnp->conn_tcp; 7109 tcp_t *listener = eager->tcp_listener; 7110 tcp_stack_t *tcps = eager->tcp_tcps; 7111 7112 /* 7113 * We could be called because listener is closing. Since 7114 * the eager is using listener's queue's, its not safe. 7115 * Better use the default queue just to send the TH_RST 7116 * out. 7117 */ 7118 ASSERT(tcps->tcps_g_q != NULL); 7119 eager->tcp_rq = tcps->tcps_g_q; 7120 eager->tcp_wq = WR(tcps->tcps_g_q); 7121 7122 /* 7123 * An eager's conn_fanout will be NULL if it's a duplicate 7124 * for an existing 4-tuples in the conn fanout table. 7125 * We don't want to send an RST out in such case. 7126 */ 7127 if (econnp->conn_fanout != NULL && eager->tcp_state > TCPS_LISTEN) { 7128 tcp_xmit_ctl("tcp_eager_kill, can't wait", 7129 eager, eager->tcp_snxt, 0, TH_RST); 7130 } 7131 7132 /* We are here because listener wants this eager gone */ 7133 if (listener != NULL) { 7134 mutex_enter(&listener->tcp_eager_lock); 7135 tcp_eager_unlink(eager); 7136 if (eager->tcp_tconnind_started) { 7137 /* 7138 * The eager has sent a conn_ind up to the 7139 * listener but listener decides to close 7140 * instead. We need to drop the extra ref 7141 * placed on eager in tcp_rput_data() before 7142 * sending the conn_ind to listener. 7143 */ 7144 CONN_DEC_REF(econnp); 7145 } 7146 mutex_exit(&listener->tcp_eager_lock); 7147 CONN_DEC_REF(listener->tcp_connp); 7148 } 7149 7150 if (eager->tcp_state > TCPS_BOUND) 7151 tcp_close_detached(eager); 7152 } 7153 7154 /* 7155 * Reset any eager connection hanging off this listener marked 7156 * with 'seqnum' and then reclaim it's resources. 7157 */ 7158 static boolean_t 7159 tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum) 7160 { 7161 tcp_t *eager; 7162 mblk_t *mp; 7163 tcp_stack_t *tcps = listener->tcp_tcps; 7164 7165 TCP_STAT(tcps, tcp_eager_blowoff_calls); 7166 eager = listener; 7167 mutex_enter(&listener->tcp_eager_lock); 7168 do { 7169 eager = eager->tcp_eager_next_q; 7170 if (eager == NULL) { 7171 mutex_exit(&listener->tcp_eager_lock); 7172 return (B_FALSE); 7173 } 7174 } while (eager->tcp_conn_req_seqnum != seqnum); 7175 7176 if (eager->tcp_closemp_used) { 7177 mutex_exit(&listener->tcp_eager_lock); 7178 return (B_TRUE); 7179 } 7180 eager->tcp_closemp_used = B_TRUE; 7181 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 7182 CONN_INC_REF(eager->tcp_connp); 7183 mutex_exit(&listener->tcp_eager_lock); 7184 mp = &eager->tcp_closemp; 7185 squeue_fill(eager->tcp_connp->conn_sqp, mp, tcp_eager_kill, 7186 eager->tcp_connp, SQTAG_TCP_EAGER_BLOWOFF); 7187 return (B_TRUE); 7188 } 7189 7190 /* 7191 * Reset any eager connection hanging off this listener 7192 * and then reclaim it's resources. 7193 */ 7194 static void 7195 tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only) 7196 { 7197 tcp_t *eager; 7198 mblk_t *mp; 7199 tcp_stack_t *tcps = listener->tcp_tcps; 7200 7201 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 7202 7203 if (!q0_only) { 7204 /* First cleanup q */ 7205 TCP_STAT(tcps, tcp_eager_blowoff_q); 7206 eager = listener->tcp_eager_next_q; 7207 while (eager != NULL) { 7208 if (!eager->tcp_closemp_used) { 7209 eager->tcp_closemp_used = B_TRUE; 7210 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 7211 CONN_INC_REF(eager->tcp_connp); 7212 mp = &eager->tcp_closemp; 7213 squeue_fill(eager->tcp_connp->conn_sqp, mp, 7214 tcp_eager_kill, eager->tcp_connp, 7215 SQTAG_TCP_EAGER_CLEANUP); 7216 } 7217 eager = eager->tcp_eager_next_q; 7218 } 7219 } 7220 /* Then cleanup q0 */ 7221 TCP_STAT(tcps, tcp_eager_blowoff_q0); 7222 eager = listener->tcp_eager_next_q0; 7223 while (eager != listener) { 7224 if (!eager->tcp_closemp_used) { 7225 eager->tcp_closemp_used = B_TRUE; 7226 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 7227 CONN_INC_REF(eager->tcp_connp); 7228 mp = &eager->tcp_closemp; 7229 squeue_fill(eager->tcp_connp->conn_sqp, mp, 7230 tcp_eager_kill, eager->tcp_connp, 7231 SQTAG_TCP_EAGER_CLEANUP_Q0); 7232 } 7233 eager = eager->tcp_eager_next_q0; 7234 } 7235 } 7236 7237 /* 7238 * If we are an eager connection hanging off a listener that hasn't 7239 * formally accepted the connection yet, get off his list and blow off 7240 * any data that we have accumulated. 7241 */ 7242 static void 7243 tcp_eager_unlink(tcp_t *tcp) 7244 { 7245 tcp_t *listener = tcp->tcp_listener; 7246 7247 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 7248 ASSERT(listener != NULL); 7249 if (tcp->tcp_eager_next_q0 != NULL) { 7250 ASSERT(tcp->tcp_eager_prev_q0 != NULL); 7251 7252 /* Remove the eager tcp from q0 */ 7253 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 7254 tcp->tcp_eager_prev_q0; 7255 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 7256 tcp->tcp_eager_next_q0; 7257 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 7258 listener->tcp_conn_req_cnt_q0--; 7259 7260 tcp->tcp_eager_next_q0 = NULL; 7261 tcp->tcp_eager_prev_q0 = NULL; 7262 7263 /* 7264 * Take the eager out, if it is in the list of droppable 7265 * eagers. 7266 */ 7267 MAKE_UNDROPPABLE(tcp); 7268 7269 if (tcp->tcp_syn_rcvd_timeout != 0) { 7270 /* we have timed out before */ 7271 ASSERT(listener->tcp_syn_rcvd_timeout > 0); 7272 listener->tcp_syn_rcvd_timeout--; 7273 } 7274 } else { 7275 tcp_t **tcpp = &listener->tcp_eager_next_q; 7276 tcp_t *prev = NULL; 7277 7278 for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) { 7279 if (tcpp[0] == tcp) { 7280 if (listener->tcp_eager_last_q == tcp) { 7281 /* 7282 * If we are unlinking the last 7283 * element on the list, adjust 7284 * tail pointer. Set tail pointer 7285 * to nil when list is empty. 7286 */ 7287 ASSERT(tcp->tcp_eager_next_q == NULL); 7288 if (listener->tcp_eager_last_q == 7289 listener->tcp_eager_next_q) { 7290 listener->tcp_eager_last_q = 7291 NULL; 7292 } else { 7293 /* 7294 * We won't get here if there 7295 * is only one eager in the 7296 * list. 7297 */ 7298 ASSERT(prev != NULL); 7299 listener->tcp_eager_last_q = 7300 prev; 7301 } 7302 } 7303 tcpp[0] = tcp->tcp_eager_next_q; 7304 tcp->tcp_eager_next_q = NULL; 7305 tcp->tcp_eager_last_q = NULL; 7306 ASSERT(listener->tcp_conn_req_cnt_q > 0); 7307 listener->tcp_conn_req_cnt_q--; 7308 break; 7309 } 7310 prev = tcpp[0]; 7311 } 7312 } 7313 tcp->tcp_listener = NULL; 7314 } 7315 7316 /* Shorthand to generate and send TPI error acks to our client */ 7317 static void 7318 tcp_err_ack(tcp_t *tcp, mblk_t *mp, int t_error, int sys_error) 7319 { 7320 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL) 7321 putnext(tcp->tcp_rq, mp); 7322 } 7323 7324 /* Shorthand to generate and send TPI error acks to our client */ 7325 static void 7326 tcp_err_ack_prim(tcp_t *tcp, mblk_t *mp, int primitive, 7327 int t_error, int sys_error) 7328 { 7329 struct T_error_ack *teackp; 7330 7331 if ((mp = tpi_ack_alloc(mp, sizeof (struct T_error_ack), 7332 M_PCPROTO, T_ERROR_ACK)) != NULL) { 7333 teackp = (struct T_error_ack *)mp->b_rptr; 7334 teackp->ERROR_prim = primitive; 7335 teackp->TLI_error = t_error; 7336 teackp->UNIX_error = sys_error; 7337 putnext(tcp->tcp_rq, mp); 7338 } 7339 } 7340 7341 /* 7342 * Note: No locks are held when inspecting tcp_g_*epriv_ports 7343 * but instead the code relies on: 7344 * - the fact that the address of the array and its size never changes 7345 * - the atomic assignment of the elements of the array 7346 */ 7347 /* ARGSUSED */ 7348 static int 7349 tcp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 7350 { 7351 int i; 7352 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 7353 7354 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7355 if (tcps->tcps_g_epriv_ports[i] != 0) 7356 (void) mi_mpprintf(mp, "%d ", 7357 tcps->tcps_g_epriv_ports[i]); 7358 } 7359 return (0); 7360 } 7361 7362 /* 7363 * Hold a lock while changing tcp_g_epriv_ports to prevent multiple 7364 * threads from changing it at the same time. 7365 */ 7366 /* ARGSUSED */ 7367 static int 7368 tcp_extra_priv_ports_add(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 7369 cred_t *cr) 7370 { 7371 long new_value; 7372 int i; 7373 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 7374 7375 /* 7376 * Fail the request if the new value does not lie within the 7377 * port number limits. 7378 */ 7379 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 7380 new_value <= 0 || new_value >= 65536) { 7381 return (EINVAL); 7382 } 7383 7384 mutex_enter(&tcps->tcps_epriv_port_lock); 7385 /* Check if the value is already in the list */ 7386 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7387 if (new_value == tcps->tcps_g_epriv_ports[i]) { 7388 mutex_exit(&tcps->tcps_epriv_port_lock); 7389 return (EEXIST); 7390 } 7391 } 7392 /* Find an empty slot */ 7393 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7394 if (tcps->tcps_g_epriv_ports[i] == 0) 7395 break; 7396 } 7397 if (i == tcps->tcps_g_num_epriv_ports) { 7398 mutex_exit(&tcps->tcps_epriv_port_lock); 7399 return (EOVERFLOW); 7400 } 7401 /* Set the new value */ 7402 tcps->tcps_g_epriv_ports[i] = (uint16_t)new_value; 7403 mutex_exit(&tcps->tcps_epriv_port_lock); 7404 return (0); 7405 } 7406 7407 /* 7408 * Hold a lock while changing tcp_g_epriv_ports to prevent multiple 7409 * threads from changing it at the same time. 7410 */ 7411 /* ARGSUSED */ 7412 static int 7413 tcp_extra_priv_ports_del(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 7414 cred_t *cr) 7415 { 7416 long new_value; 7417 int i; 7418 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 7419 7420 /* 7421 * Fail the request if the new value does not lie within the 7422 * port number limits. 7423 */ 7424 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || new_value <= 0 || 7425 new_value >= 65536) { 7426 return (EINVAL); 7427 } 7428 7429 mutex_enter(&tcps->tcps_epriv_port_lock); 7430 /* Check that the value is already in the list */ 7431 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7432 if (tcps->tcps_g_epriv_ports[i] == new_value) 7433 break; 7434 } 7435 if (i == tcps->tcps_g_num_epriv_ports) { 7436 mutex_exit(&tcps->tcps_epriv_port_lock); 7437 return (ESRCH); 7438 } 7439 /* Clear the value */ 7440 tcps->tcps_g_epriv_ports[i] = 0; 7441 mutex_exit(&tcps->tcps_epriv_port_lock); 7442 return (0); 7443 } 7444 7445 /* Return the TPI/TLI equivalent of our current tcp_state */ 7446 static int 7447 tcp_tpistate(tcp_t *tcp) 7448 { 7449 switch (tcp->tcp_state) { 7450 case TCPS_IDLE: 7451 return (TS_UNBND); 7452 case TCPS_LISTEN: 7453 /* 7454 * Return whether there are outstanding T_CONN_IND waiting 7455 * for the matching T_CONN_RES. Therefore don't count q0. 7456 */ 7457 if (tcp->tcp_conn_req_cnt_q > 0) 7458 return (TS_WRES_CIND); 7459 else 7460 return (TS_IDLE); 7461 case TCPS_BOUND: 7462 return (TS_IDLE); 7463 case TCPS_SYN_SENT: 7464 return (TS_WCON_CREQ); 7465 case TCPS_SYN_RCVD: 7466 /* 7467 * Note: assumption: this has to the active open SYN_RCVD. 7468 * The passive instance is detached in SYN_RCVD stage of 7469 * incoming connection processing so we cannot get request 7470 * for T_info_ack on it. 7471 */ 7472 return (TS_WACK_CRES); 7473 case TCPS_ESTABLISHED: 7474 return (TS_DATA_XFER); 7475 case TCPS_CLOSE_WAIT: 7476 return (TS_WREQ_ORDREL); 7477 case TCPS_FIN_WAIT_1: 7478 return (TS_WIND_ORDREL); 7479 case TCPS_FIN_WAIT_2: 7480 return (TS_WIND_ORDREL); 7481 7482 case TCPS_CLOSING: 7483 case TCPS_LAST_ACK: 7484 case TCPS_TIME_WAIT: 7485 case TCPS_CLOSED: 7486 /* 7487 * Following TS_WACK_DREQ7 is a rendition of "not 7488 * yet TS_IDLE" TPI state. There is no best match to any 7489 * TPI state for TCPS_{CLOSING, LAST_ACK, TIME_WAIT} but we 7490 * choose a value chosen that will map to TLI/XTI level 7491 * state of TSTATECHNG (state is process of changing) which 7492 * captures what this dummy state represents. 7493 */ 7494 return (TS_WACK_DREQ7); 7495 default: 7496 cmn_err(CE_WARN, "tcp_tpistate: strange state (%d) %s", 7497 tcp->tcp_state, tcp_display(tcp, NULL, 7498 DISP_PORT_ONLY)); 7499 return (TS_UNBND); 7500 } 7501 } 7502 7503 static void 7504 tcp_copy_info(struct T_info_ack *tia, tcp_t *tcp) 7505 { 7506 tcp_stack_t *tcps = tcp->tcp_tcps; 7507 7508 if (tcp->tcp_family == AF_INET6) 7509 *tia = tcp_g_t_info_ack_v6; 7510 else 7511 *tia = tcp_g_t_info_ack; 7512 tia->CURRENT_state = tcp_tpistate(tcp); 7513 tia->OPT_size = tcp_max_optsize; 7514 if (tcp->tcp_mss == 0) { 7515 /* Not yet set - tcp_open does not set mss */ 7516 if (tcp->tcp_ipversion == IPV4_VERSION) 7517 tia->TIDU_size = tcps->tcps_mss_def_ipv4; 7518 else 7519 tia->TIDU_size = tcps->tcps_mss_def_ipv6; 7520 } else { 7521 tia->TIDU_size = tcp->tcp_mss; 7522 } 7523 /* TODO: Default ETSDU is 1. Is that correct for tcp? */ 7524 } 7525 7526 /* 7527 * This routine responds to T_CAPABILITY_REQ messages. It is called by 7528 * tcp_wput. Much of the T_CAPABILITY_ACK information is copied from 7529 * tcp_g_t_info_ack. The current state of the stream is copied from 7530 * tcp_state. 7531 */ 7532 static void 7533 tcp_capability_req(tcp_t *tcp, mblk_t *mp) 7534 { 7535 t_uscalar_t cap_bits1; 7536 struct T_capability_ack *tcap; 7537 7538 if (MBLKL(mp) < sizeof (struct T_capability_req)) { 7539 freemsg(mp); 7540 return; 7541 } 7542 7543 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 7544 7545 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 7546 mp->b_datap->db_type, T_CAPABILITY_ACK); 7547 if (mp == NULL) 7548 return; 7549 7550 tcap = (struct T_capability_ack *)mp->b_rptr; 7551 tcap->CAP_bits1 = 0; 7552 7553 if (cap_bits1 & TC1_INFO) { 7554 tcp_copy_info(&tcap->INFO_ack, tcp); 7555 tcap->CAP_bits1 |= TC1_INFO; 7556 } 7557 7558 if (cap_bits1 & TC1_ACCEPTOR_ID) { 7559 tcap->ACCEPTOR_id = tcp->tcp_acceptor_id; 7560 tcap->CAP_bits1 |= TC1_ACCEPTOR_ID; 7561 } 7562 7563 putnext(tcp->tcp_rq, mp); 7564 } 7565 7566 /* 7567 * This routine responds to T_INFO_REQ messages. It is called by tcp_wput. 7568 * Most of the T_INFO_ACK information is copied from tcp_g_t_info_ack. 7569 * The current state of the stream is copied from tcp_state. 7570 */ 7571 static void 7572 tcp_info_req(tcp_t *tcp, mblk_t *mp) 7573 { 7574 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO, 7575 T_INFO_ACK); 7576 if (!mp) { 7577 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 7578 return; 7579 } 7580 tcp_copy_info((struct T_info_ack *)mp->b_rptr, tcp); 7581 putnext(tcp->tcp_rq, mp); 7582 } 7583 7584 /* Respond to the TPI addr request */ 7585 static void 7586 tcp_addr_req(tcp_t *tcp, mblk_t *mp) 7587 { 7588 sin_t *sin; 7589 mblk_t *ackmp; 7590 struct T_addr_ack *taa; 7591 7592 /* Make it large enough for worst case */ 7593 ackmp = reallocb(mp, sizeof (struct T_addr_ack) + 7594 2 * sizeof (sin6_t), 1); 7595 if (ackmp == NULL) { 7596 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 7597 return; 7598 } 7599 7600 if (tcp->tcp_ipversion == IPV6_VERSION) { 7601 tcp_addr_req_ipv6(tcp, ackmp); 7602 return; 7603 } 7604 taa = (struct T_addr_ack *)ackmp->b_rptr; 7605 7606 bzero(taa, sizeof (struct T_addr_ack)); 7607 ackmp->b_wptr = (uchar_t *)&taa[1]; 7608 7609 taa->PRIM_type = T_ADDR_ACK; 7610 ackmp->b_datap->db_type = M_PCPROTO; 7611 7612 /* 7613 * Note: Following code assumes 32 bit alignment of basic 7614 * data structures like sin_t and struct T_addr_ack. 7615 */ 7616 if (tcp->tcp_state >= TCPS_BOUND) { 7617 /* 7618 * Fill in local address 7619 */ 7620 taa->LOCADDR_length = sizeof (sin_t); 7621 taa->LOCADDR_offset = sizeof (*taa); 7622 7623 sin = (sin_t *)&taa[1]; 7624 7625 /* Fill zeroes and then intialize non-zero fields */ 7626 *sin = sin_null; 7627 7628 sin->sin_family = AF_INET; 7629 7630 sin->sin_addr.s_addr = tcp->tcp_ipha->ipha_src; 7631 sin->sin_port = *(uint16_t *)tcp->tcp_tcph->th_lport; 7632 7633 ackmp->b_wptr = (uchar_t *)&sin[1]; 7634 7635 if (tcp->tcp_state >= TCPS_SYN_RCVD) { 7636 /* 7637 * Fill in Remote address 7638 */ 7639 taa->REMADDR_length = sizeof (sin_t); 7640 taa->REMADDR_offset = ROUNDUP32(taa->LOCADDR_offset + 7641 taa->LOCADDR_length); 7642 7643 sin = (sin_t *)(ackmp->b_rptr + taa->REMADDR_offset); 7644 *sin = sin_null; 7645 sin->sin_family = AF_INET; 7646 sin->sin_addr.s_addr = tcp->tcp_remote; 7647 sin->sin_port = tcp->tcp_fport; 7648 7649 ackmp->b_wptr = (uchar_t *)&sin[1]; 7650 } 7651 } 7652 putnext(tcp->tcp_rq, ackmp); 7653 } 7654 7655 /* Assumes that tcp_addr_req gets enough space and alignment */ 7656 static void 7657 tcp_addr_req_ipv6(tcp_t *tcp, mblk_t *ackmp) 7658 { 7659 sin6_t *sin6; 7660 struct T_addr_ack *taa; 7661 7662 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 7663 ASSERT(OK_32PTR(ackmp->b_rptr)); 7664 ASSERT(ackmp->b_wptr - ackmp->b_rptr >= sizeof (struct T_addr_ack) + 7665 2 * sizeof (sin6_t)); 7666 7667 taa = (struct T_addr_ack *)ackmp->b_rptr; 7668 7669 bzero(taa, sizeof (struct T_addr_ack)); 7670 ackmp->b_wptr = (uchar_t *)&taa[1]; 7671 7672 taa->PRIM_type = T_ADDR_ACK; 7673 ackmp->b_datap->db_type = M_PCPROTO; 7674 7675 /* 7676 * Note: Following code assumes 32 bit alignment of basic 7677 * data structures like sin6_t and struct T_addr_ack. 7678 */ 7679 if (tcp->tcp_state >= TCPS_BOUND) { 7680 /* 7681 * Fill in local address 7682 */ 7683 taa->LOCADDR_length = sizeof (sin6_t); 7684 taa->LOCADDR_offset = sizeof (*taa); 7685 7686 sin6 = (sin6_t *)&taa[1]; 7687 *sin6 = sin6_null; 7688 7689 sin6->sin6_family = AF_INET6; 7690 sin6->sin6_addr = tcp->tcp_ip6h->ip6_src; 7691 sin6->sin6_port = tcp->tcp_lport; 7692 7693 ackmp->b_wptr = (uchar_t *)&sin6[1]; 7694 7695 if (tcp->tcp_state >= TCPS_SYN_RCVD) { 7696 /* 7697 * Fill in Remote address 7698 */ 7699 taa->REMADDR_length = sizeof (sin6_t); 7700 taa->REMADDR_offset = ROUNDUP32(taa->LOCADDR_offset + 7701 taa->LOCADDR_length); 7702 7703 sin6 = (sin6_t *)(ackmp->b_rptr + taa->REMADDR_offset); 7704 *sin6 = sin6_null; 7705 sin6->sin6_family = AF_INET6; 7706 sin6->sin6_flowinfo = 7707 tcp->tcp_ip6h->ip6_vcf & 7708 ~IPV6_VERS_AND_FLOW_MASK; 7709 sin6->sin6_addr = tcp->tcp_remote_v6; 7710 sin6->sin6_port = tcp->tcp_fport; 7711 7712 ackmp->b_wptr = (uchar_t *)&sin6[1]; 7713 } 7714 } 7715 putnext(tcp->tcp_rq, ackmp); 7716 } 7717 7718 /* 7719 * Handle reinitialization of a tcp structure. 7720 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE. 7721 */ 7722 static void 7723 tcp_reinit(tcp_t *tcp) 7724 { 7725 mblk_t *mp; 7726 int err; 7727 tcp_stack_t *tcps = tcp->tcp_tcps; 7728 7729 TCP_STAT(tcps, tcp_reinit_calls); 7730 7731 /* tcp_reinit should never be called for detached tcp_t's */ 7732 ASSERT(tcp->tcp_listener == NULL); 7733 ASSERT((tcp->tcp_family == AF_INET && 7734 tcp->tcp_ipversion == IPV4_VERSION) || 7735 (tcp->tcp_family == AF_INET6 && 7736 (tcp->tcp_ipversion == IPV4_VERSION || 7737 tcp->tcp_ipversion == IPV6_VERSION))); 7738 7739 /* Cancel outstanding timers */ 7740 tcp_timers_stop(tcp); 7741 7742 /* 7743 * Reset everything in the state vector, after updating global 7744 * MIB data from instance counters. 7745 */ 7746 UPDATE_MIB(&tcps->tcps_mib, tcpHCInSegs, tcp->tcp_ibsegs); 7747 tcp->tcp_ibsegs = 0; 7748 UPDATE_MIB(&tcps->tcps_mib, tcpHCOutSegs, tcp->tcp_obsegs); 7749 tcp->tcp_obsegs = 0; 7750 7751 tcp_close_mpp(&tcp->tcp_xmit_head); 7752 if (tcp->tcp_snd_zcopy_aware) 7753 tcp_zcopy_notify(tcp); 7754 tcp->tcp_xmit_last = tcp->tcp_xmit_tail = NULL; 7755 tcp->tcp_unsent = tcp->tcp_xmit_tail_unsent = 0; 7756 mutex_enter(&tcp->tcp_non_sq_lock); 7757 if (tcp->tcp_flow_stopped && 7758 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 7759 tcp_clrqfull(tcp); 7760 } 7761 mutex_exit(&tcp->tcp_non_sq_lock); 7762 tcp_close_mpp(&tcp->tcp_reass_head); 7763 tcp->tcp_reass_tail = NULL; 7764 if (tcp->tcp_rcv_list != NULL) { 7765 /* Free b_next chain */ 7766 tcp_close_mpp(&tcp->tcp_rcv_list); 7767 tcp->tcp_rcv_last_head = NULL; 7768 tcp->tcp_rcv_last_tail = NULL; 7769 tcp->tcp_rcv_cnt = 0; 7770 } 7771 tcp->tcp_rcv_last_tail = NULL; 7772 7773 if ((mp = tcp->tcp_urp_mp) != NULL) { 7774 freemsg(mp); 7775 tcp->tcp_urp_mp = NULL; 7776 } 7777 if ((mp = tcp->tcp_urp_mark_mp) != NULL) { 7778 freemsg(mp); 7779 tcp->tcp_urp_mark_mp = NULL; 7780 } 7781 if (tcp->tcp_fused_sigurg_mp != NULL) { 7782 freeb(tcp->tcp_fused_sigurg_mp); 7783 tcp->tcp_fused_sigurg_mp = NULL; 7784 } 7785 7786 /* 7787 * Following is a union with two members which are 7788 * identical types and size so the following cleanup 7789 * is enough. 7790 */ 7791 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind); 7792 7793 CL_INET_DISCONNECT(tcp); 7794 7795 /* 7796 * The connection can't be on the tcp_time_wait_head list 7797 * since it is not detached. 7798 */ 7799 ASSERT(tcp->tcp_time_wait_next == NULL); 7800 ASSERT(tcp->tcp_time_wait_prev == NULL); 7801 ASSERT(tcp->tcp_time_wait_expire == 0); 7802 7803 if (tcp->tcp_kssl_pending) { 7804 tcp->tcp_kssl_pending = B_FALSE; 7805 7806 /* Don't reset if the initialized by bind. */ 7807 if (tcp->tcp_kssl_ent != NULL) { 7808 kssl_release_ent(tcp->tcp_kssl_ent, NULL, 7809 KSSL_NO_PROXY); 7810 } 7811 } 7812 if (tcp->tcp_kssl_ctx != NULL) { 7813 kssl_release_ctx(tcp->tcp_kssl_ctx); 7814 tcp->tcp_kssl_ctx = NULL; 7815 } 7816 7817 /* 7818 * Reset/preserve other values 7819 */ 7820 tcp_reinit_values(tcp); 7821 ipcl_hash_remove(tcp->tcp_connp); 7822 conn_delete_ire(tcp->tcp_connp, NULL); 7823 tcp_ipsec_cleanup(tcp); 7824 7825 if (tcp->tcp_conn_req_max != 0) { 7826 /* 7827 * This is the case when a TLI program uses the same 7828 * transport end point to accept a connection. This 7829 * makes the TCP both a listener and acceptor. When 7830 * this connection is closed, we need to set the state 7831 * back to TCPS_LISTEN. Make sure that the eager list 7832 * is reinitialized. 7833 * 7834 * Note that this stream is still bound to the four 7835 * tuples of the previous connection in IP. If a new 7836 * SYN with different foreign address comes in, IP will 7837 * not find it and will send it to the global queue. In 7838 * the global queue, TCP will do a tcp_lookup_listener() 7839 * to find this stream. This works because this stream 7840 * is only removed from connected hash. 7841 * 7842 */ 7843 tcp->tcp_state = TCPS_LISTEN; 7844 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp; 7845 tcp->tcp_eager_next_drop_q0 = tcp; 7846 tcp->tcp_eager_prev_drop_q0 = tcp; 7847 tcp->tcp_connp->conn_recv = tcp_conn_request; 7848 if (tcp->tcp_family == AF_INET6) { 7849 ASSERT(tcp->tcp_connp->conn_af_isv6); 7850 (void) ipcl_bind_insert_v6(tcp->tcp_connp, IPPROTO_TCP, 7851 &tcp->tcp_ip6h->ip6_src, tcp->tcp_lport); 7852 } else { 7853 ASSERT(!tcp->tcp_connp->conn_af_isv6); 7854 (void) ipcl_bind_insert(tcp->tcp_connp, IPPROTO_TCP, 7855 tcp->tcp_ipha->ipha_src, tcp->tcp_lport); 7856 } 7857 } else { 7858 tcp->tcp_state = TCPS_BOUND; 7859 } 7860 7861 /* 7862 * Initialize to default values 7863 * Can't fail since enough header template space already allocated 7864 * at open(). 7865 */ 7866 err = tcp_init_values(tcp); 7867 ASSERT(err == 0); 7868 /* Restore state in tcp_tcph */ 7869 bcopy(&tcp->tcp_lport, tcp->tcp_tcph->th_lport, TCP_PORT_LEN); 7870 if (tcp->tcp_ipversion == IPV4_VERSION) 7871 tcp->tcp_ipha->ipha_src = tcp->tcp_bound_source; 7872 else 7873 tcp->tcp_ip6h->ip6_src = tcp->tcp_bound_source_v6; 7874 /* 7875 * Copy of the src addr. in tcp_t is needed in tcp_t 7876 * since the lookup funcs can only lookup on tcp_t 7877 */ 7878 tcp->tcp_ip_src_v6 = tcp->tcp_bound_source_v6; 7879 7880 ASSERT(tcp->tcp_ptpbhn != NULL); 7881 tcp->tcp_rq->q_hiwat = tcps->tcps_recv_hiwat; 7882 tcp->tcp_rwnd = tcps->tcps_recv_hiwat; 7883 tcp->tcp_mss = tcp->tcp_ipversion != IPV4_VERSION ? 7884 tcps->tcps_mss_def_ipv6 : tcps->tcps_mss_def_ipv4; 7885 } 7886 7887 /* 7888 * Force values to zero that need be zero. 7889 * Do not touch values asociated with the BOUND or LISTEN state 7890 * since the connection will end up in that state after the reinit. 7891 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t 7892 * structure! 7893 */ 7894 static void 7895 tcp_reinit_values(tcp) 7896 tcp_t *tcp; 7897 { 7898 tcp_stack_t *tcps = tcp->tcp_tcps; 7899 7900 #ifndef lint 7901 #define DONTCARE(x) 7902 #define PRESERVE(x) 7903 #else 7904 #define DONTCARE(x) ((x) = (x)) 7905 #define PRESERVE(x) ((x) = (x)) 7906 #endif /* lint */ 7907 7908 PRESERVE(tcp->tcp_bind_hash); 7909 PRESERVE(tcp->tcp_ptpbhn); 7910 PRESERVE(tcp->tcp_acceptor_hash); 7911 PRESERVE(tcp->tcp_ptpahn); 7912 7913 /* Should be ASSERT NULL on these with new code! */ 7914 ASSERT(tcp->tcp_time_wait_next == NULL); 7915 ASSERT(tcp->tcp_time_wait_prev == NULL); 7916 ASSERT(tcp->tcp_time_wait_expire == 0); 7917 PRESERVE(tcp->tcp_state); 7918 PRESERVE(tcp->tcp_rq); 7919 PRESERVE(tcp->tcp_wq); 7920 7921 ASSERT(tcp->tcp_xmit_head == NULL); 7922 ASSERT(tcp->tcp_xmit_last == NULL); 7923 ASSERT(tcp->tcp_unsent == 0); 7924 ASSERT(tcp->tcp_xmit_tail == NULL); 7925 ASSERT(tcp->tcp_xmit_tail_unsent == 0); 7926 7927 tcp->tcp_snxt = 0; /* Displayed in mib */ 7928 tcp->tcp_suna = 0; /* Displayed in mib */ 7929 tcp->tcp_swnd = 0; 7930 DONTCARE(tcp->tcp_cwnd); /* Init in tcp_mss_set */ 7931 7932 ASSERT(tcp->tcp_ibsegs == 0); 7933 ASSERT(tcp->tcp_obsegs == 0); 7934 7935 if (tcp->tcp_iphc != NULL) { 7936 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 7937 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 7938 } 7939 7940 DONTCARE(tcp->tcp_naglim); /* Init in tcp_init_values */ 7941 DONTCARE(tcp->tcp_hdr_len); /* Init in tcp_init_values */ 7942 DONTCARE(tcp->tcp_ipha); 7943 DONTCARE(tcp->tcp_ip6h); 7944 DONTCARE(tcp->tcp_ip_hdr_len); 7945 DONTCARE(tcp->tcp_tcph); 7946 DONTCARE(tcp->tcp_tcp_hdr_len); /* Init in tcp_init_values */ 7947 tcp->tcp_valid_bits = 0; 7948 7949 DONTCARE(tcp->tcp_xmit_hiwater); /* Init in tcp_init_values */ 7950 DONTCARE(tcp->tcp_timer_backoff); /* Init in tcp_init_values */ 7951 DONTCARE(tcp->tcp_last_recv_time); /* Init in tcp_init_values */ 7952 tcp->tcp_last_rcv_lbolt = 0; 7953 7954 tcp->tcp_init_cwnd = 0; 7955 7956 tcp->tcp_urp_last_valid = 0; 7957 tcp->tcp_hard_binding = 0; 7958 tcp->tcp_hard_bound = 0; 7959 PRESERVE(tcp->tcp_cred); 7960 PRESERVE(tcp->tcp_cpid); 7961 PRESERVE(tcp->tcp_open_time); 7962 PRESERVE(tcp->tcp_exclbind); 7963 7964 tcp->tcp_fin_acked = 0; 7965 tcp->tcp_fin_rcvd = 0; 7966 tcp->tcp_fin_sent = 0; 7967 tcp->tcp_ordrel_done = 0; 7968 7969 tcp->tcp_debug = 0; 7970 tcp->tcp_dontroute = 0; 7971 tcp->tcp_broadcast = 0; 7972 7973 tcp->tcp_useloopback = 0; 7974 tcp->tcp_reuseaddr = 0; 7975 tcp->tcp_oobinline = 0; 7976 tcp->tcp_dgram_errind = 0; 7977 7978 tcp->tcp_detached = 0; 7979 tcp->tcp_bind_pending = 0; 7980 tcp->tcp_unbind_pending = 0; 7981 tcp->tcp_deferred_clean_death = 0; 7982 7983 tcp->tcp_snd_ws_ok = B_FALSE; 7984 tcp->tcp_snd_ts_ok = B_FALSE; 7985 tcp->tcp_linger = 0; 7986 tcp->tcp_ka_enabled = 0; 7987 tcp->tcp_zero_win_probe = 0; 7988 7989 tcp->tcp_loopback = 0; 7990 tcp->tcp_localnet = 0; 7991 tcp->tcp_syn_defense = 0; 7992 tcp->tcp_set_timer = 0; 7993 7994 tcp->tcp_active_open = 0; 7995 ASSERT(tcp->tcp_timeout == B_FALSE); 7996 tcp->tcp_rexmit = B_FALSE; 7997 tcp->tcp_xmit_zc_clean = B_FALSE; 7998 7999 tcp->tcp_snd_sack_ok = B_FALSE; 8000 PRESERVE(tcp->tcp_recvdstaddr); 8001 tcp->tcp_hwcksum = B_FALSE; 8002 8003 tcp->tcp_ire_ill_check_done = B_FALSE; 8004 DONTCARE(tcp->tcp_maxpsz); /* Init in tcp_init_values */ 8005 8006 tcp->tcp_mdt = B_FALSE; 8007 tcp->tcp_mdt_hdr_head = 0; 8008 tcp->tcp_mdt_hdr_tail = 0; 8009 8010 tcp->tcp_conn_def_q0 = 0; 8011 tcp->tcp_ip_forward_progress = B_FALSE; 8012 tcp->tcp_anon_priv_bind = 0; 8013 tcp->tcp_ecn_ok = B_FALSE; 8014 8015 tcp->tcp_cwr = B_FALSE; 8016 tcp->tcp_ecn_echo_on = B_FALSE; 8017 8018 if (tcp->tcp_sack_info != NULL) { 8019 if (tcp->tcp_notsack_list != NULL) { 8020 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 8021 } 8022 kmem_cache_free(tcp_sack_info_cache, tcp->tcp_sack_info); 8023 tcp->tcp_sack_info = NULL; 8024 } 8025 8026 tcp->tcp_rcv_ws = 0; 8027 tcp->tcp_snd_ws = 0; 8028 tcp->tcp_ts_recent = 0; 8029 tcp->tcp_rnxt = 0; /* Displayed in mib */ 8030 DONTCARE(tcp->tcp_rwnd); /* Set in tcp_reinit() */ 8031 tcp->tcp_if_mtu = 0; 8032 8033 ASSERT(tcp->tcp_reass_head == NULL); 8034 ASSERT(tcp->tcp_reass_tail == NULL); 8035 8036 tcp->tcp_cwnd_cnt = 0; 8037 8038 ASSERT(tcp->tcp_rcv_list == NULL); 8039 ASSERT(tcp->tcp_rcv_last_head == NULL); 8040 ASSERT(tcp->tcp_rcv_last_tail == NULL); 8041 ASSERT(tcp->tcp_rcv_cnt == 0); 8042 8043 DONTCARE(tcp->tcp_cwnd_ssthresh); /* Init in tcp_adapt_ire */ 8044 DONTCARE(tcp->tcp_cwnd_max); /* Init in tcp_init_values */ 8045 tcp->tcp_csuna = 0; 8046 8047 tcp->tcp_rto = 0; /* Displayed in MIB */ 8048 DONTCARE(tcp->tcp_rtt_sa); /* Init in tcp_init_values */ 8049 DONTCARE(tcp->tcp_rtt_sd); /* Init in tcp_init_values */ 8050 tcp->tcp_rtt_update = 0; 8051 8052 DONTCARE(tcp->tcp_swl1); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */ 8053 DONTCARE(tcp->tcp_swl2); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */ 8054 8055 tcp->tcp_rack = 0; /* Displayed in mib */ 8056 tcp->tcp_rack_cnt = 0; 8057 tcp->tcp_rack_cur_max = 0; 8058 tcp->tcp_rack_abs_max = 0; 8059 8060 tcp->tcp_max_swnd = 0; 8061 8062 ASSERT(tcp->tcp_listener == NULL); 8063 8064 DONTCARE(tcp->tcp_xmit_lowater); /* Init in tcp_init_values */ 8065 8066 DONTCARE(tcp->tcp_irs); /* tcp_valid_bits cleared */ 8067 DONTCARE(tcp->tcp_iss); /* tcp_valid_bits cleared */ 8068 DONTCARE(tcp->tcp_fss); /* tcp_valid_bits cleared */ 8069 DONTCARE(tcp->tcp_urg); /* tcp_valid_bits cleared */ 8070 8071 ASSERT(tcp->tcp_conn_req_cnt_q == 0); 8072 ASSERT(tcp->tcp_conn_req_cnt_q0 == 0); 8073 PRESERVE(tcp->tcp_conn_req_max); 8074 PRESERVE(tcp->tcp_conn_req_seqnum); 8075 8076 DONTCARE(tcp->tcp_ip_hdr_len); /* Init in tcp_init_values */ 8077 DONTCARE(tcp->tcp_first_timer_threshold); /* Init in tcp_init_values */ 8078 DONTCARE(tcp->tcp_second_timer_threshold); /* Init in tcp_init_values */ 8079 DONTCARE(tcp->tcp_first_ctimer_threshold); /* Init in tcp_init_values */ 8080 DONTCARE(tcp->tcp_second_ctimer_threshold); /* in tcp_init_values */ 8081 8082 tcp->tcp_lingertime = 0; 8083 8084 DONTCARE(tcp->tcp_urp_last); /* tcp_urp_last_valid is cleared */ 8085 ASSERT(tcp->tcp_urp_mp == NULL); 8086 ASSERT(tcp->tcp_urp_mark_mp == NULL); 8087 ASSERT(tcp->tcp_fused_sigurg_mp == NULL); 8088 8089 ASSERT(tcp->tcp_eager_next_q == NULL); 8090 ASSERT(tcp->tcp_eager_last_q == NULL); 8091 ASSERT((tcp->tcp_eager_next_q0 == NULL && 8092 tcp->tcp_eager_prev_q0 == NULL) || 8093 tcp->tcp_eager_next_q0 == tcp->tcp_eager_prev_q0); 8094 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 8095 8096 ASSERT((tcp->tcp_eager_next_drop_q0 == NULL && 8097 tcp->tcp_eager_prev_drop_q0 == NULL) || 8098 tcp->tcp_eager_next_drop_q0 == tcp->tcp_eager_prev_drop_q0); 8099 8100 tcp->tcp_client_errno = 0; 8101 8102 DONTCARE(tcp->tcp_sum); /* Init in tcp_init_values */ 8103 8104 tcp->tcp_remote_v6 = ipv6_all_zeros; /* Displayed in MIB */ 8105 8106 PRESERVE(tcp->tcp_bound_source_v6); 8107 tcp->tcp_last_sent_len = 0; 8108 tcp->tcp_dupack_cnt = 0; 8109 8110 tcp->tcp_fport = 0; /* Displayed in MIB */ 8111 PRESERVE(tcp->tcp_lport); 8112 8113 PRESERVE(tcp->tcp_acceptor_lockp); 8114 8115 ASSERT(tcp->tcp_ordrelid == 0); 8116 PRESERVE(tcp->tcp_acceptor_id); 8117 DONTCARE(tcp->tcp_ipsec_overhead); 8118 8119 /* 8120 * If tcp_tracing flag is ON (i.e. We have a trace buffer 8121 * in tcp structure and now tracing), Re-initialize all 8122 * members of tcp_traceinfo. 8123 */ 8124 if (tcp->tcp_tracebuf != NULL) { 8125 bzero(tcp->tcp_tracebuf, sizeof (tcptrch_t)); 8126 } 8127 8128 PRESERVE(tcp->tcp_family); 8129 if (tcp->tcp_family == AF_INET6) { 8130 tcp->tcp_ipversion = IPV6_VERSION; 8131 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 8132 } else { 8133 tcp->tcp_ipversion = IPV4_VERSION; 8134 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 8135 } 8136 8137 tcp->tcp_bound_if = 0; 8138 tcp->tcp_ipv6_recvancillary = 0; 8139 tcp->tcp_recvifindex = 0; 8140 tcp->tcp_recvhops = 0; 8141 tcp->tcp_closed = 0; 8142 tcp->tcp_cleandeathtag = 0; 8143 if (tcp->tcp_hopopts != NULL) { 8144 mi_free(tcp->tcp_hopopts); 8145 tcp->tcp_hopopts = NULL; 8146 tcp->tcp_hopoptslen = 0; 8147 } 8148 ASSERT(tcp->tcp_hopoptslen == 0); 8149 if (tcp->tcp_dstopts != NULL) { 8150 mi_free(tcp->tcp_dstopts); 8151 tcp->tcp_dstopts = NULL; 8152 tcp->tcp_dstoptslen = 0; 8153 } 8154 ASSERT(tcp->tcp_dstoptslen == 0); 8155 if (tcp->tcp_rtdstopts != NULL) { 8156 mi_free(tcp->tcp_rtdstopts); 8157 tcp->tcp_rtdstopts = NULL; 8158 tcp->tcp_rtdstoptslen = 0; 8159 } 8160 ASSERT(tcp->tcp_rtdstoptslen == 0); 8161 if (tcp->tcp_rthdr != NULL) { 8162 mi_free(tcp->tcp_rthdr); 8163 tcp->tcp_rthdr = NULL; 8164 tcp->tcp_rthdrlen = 0; 8165 } 8166 ASSERT(tcp->tcp_rthdrlen == 0); 8167 PRESERVE(tcp->tcp_drop_opt_ack_cnt); 8168 8169 /* Reset fusion-related fields */ 8170 tcp->tcp_fused = B_FALSE; 8171 tcp->tcp_unfusable = B_FALSE; 8172 tcp->tcp_fused_sigurg = B_FALSE; 8173 tcp->tcp_direct_sockfs = B_FALSE; 8174 tcp->tcp_fuse_syncstr_stopped = B_FALSE; 8175 tcp->tcp_fuse_syncstr_plugged = B_FALSE; 8176 tcp->tcp_loopback_peer = NULL; 8177 tcp->tcp_fuse_rcv_hiwater = 0; 8178 tcp->tcp_fuse_rcv_unread_hiwater = 0; 8179 tcp->tcp_fuse_rcv_unread_cnt = 0; 8180 8181 tcp->tcp_lso = B_FALSE; 8182 8183 tcp->tcp_in_ack_unsent = 0; 8184 tcp->tcp_cork = B_FALSE; 8185 tcp->tcp_tconnind_started = B_FALSE; 8186 8187 PRESERVE(tcp->tcp_squeue_bytes); 8188 8189 ASSERT(tcp->tcp_kssl_ctx == NULL); 8190 ASSERT(!tcp->tcp_kssl_pending); 8191 PRESERVE(tcp->tcp_kssl_ent); 8192 8193 tcp->tcp_closemp_used = B_FALSE; 8194 8195 #ifdef DEBUG 8196 DONTCARE(tcp->tcmp_stk[0]); 8197 #endif 8198 8199 8200 #undef DONTCARE 8201 #undef PRESERVE 8202 } 8203 8204 /* 8205 * Allocate necessary resources and initialize state vector. 8206 * Guaranteed not to fail so that when an error is returned, 8207 * the caller doesn't need to do any additional cleanup. 8208 */ 8209 int 8210 tcp_init(tcp_t *tcp, queue_t *q) 8211 { 8212 int err; 8213 8214 tcp->tcp_rq = q; 8215 tcp->tcp_wq = WR(q); 8216 tcp->tcp_state = TCPS_IDLE; 8217 if ((err = tcp_init_values(tcp)) != 0) 8218 tcp_timers_stop(tcp); 8219 return (err); 8220 } 8221 8222 static int 8223 tcp_init_values(tcp_t *tcp) 8224 { 8225 int err; 8226 tcp_stack_t *tcps = tcp->tcp_tcps; 8227 8228 ASSERT((tcp->tcp_family == AF_INET && 8229 tcp->tcp_ipversion == IPV4_VERSION) || 8230 (tcp->tcp_family == AF_INET6 && 8231 (tcp->tcp_ipversion == IPV4_VERSION || 8232 tcp->tcp_ipversion == IPV6_VERSION))); 8233 8234 /* 8235 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO 8236 * will be close to tcp_rexmit_interval_initial. By doing this, we 8237 * allow the algorithm to adjust slowly to large fluctuations of RTT 8238 * during first few transmissions of a connection as seen in slow 8239 * links. 8240 */ 8241 tcp->tcp_rtt_sa = tcps->tcps_rexmit_interval_initial << 2; 8242 tcp->tcp_rtt_sd = tcps->tcps_rexmit_interval_initial >> 1; 8243 tcp->tcp_rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 8244 tcps->tcps_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5) + 8245 tcps->tcps_conn_grace_period; 8246 if (tcp->tcp_rto < tcps->tcps_rexmit_interval_min) 8247 tcp->tcp_rto = tcps->tcps_rexmit_interval_min; 8248 tcp->tcp_timer_backoff = 0; 8249 tcp->tcp_ms_we_have_waited = 0; 8250 tcp->tcp_last_recv_time = lbolt; 8251 tcp->tcp_cwnd_max = tcps->tcps_cwnd_max_; 8252 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN; 8253 tcp->tcp_snd_burst = TCP_CWND_INFINITE; 8254 8255 tcp->tcp_maxpsz = tcps->tcps_maxpsz_multiplier; 8256 8257 tcp->tcp_first_timer_threshold = tcps->tcps_ip_notify_interval; 8258 tcp->tcp_first_ctimer_threshold = tcps->tcps_ip_notify_cinterval; 8259 tcp->tcp_second_timer_threshold = tcps->tcps_ip_abort_interval; 8260 /* 8261 * Fix it to tcp_ip_abort_linterval later if it turns out to be a 8262 * passive open. 8263 */ 8264 tcp->tcp_second_ctimer_threshold = tcps->tcps_ip_abort_cinterval; 8265 8266 tcp->tcp_naglim = tcps->tcps_naglim_def; 8267 8268 /* NOTE: ISS is now set in tcp_adapt_ire(). */ 8269 8270 tcp->tcp_mdt_hdr_head = 0; 8271 tcp->tcp_mdt_hdr_tail = 0; 8272 8273 /* Reset fusion-related fields */ 8274 tcp->tcp_fused = B_FALSE; 8275 tcp->tcp_unfusable = B_FALSE; 8276 tcp->tcp_fused_sigurg = B_FALSE; 8277 tcp->tcp_direct_sockfs = B_FALSE; 8278 tcp->tcp_fuse_syncstr_stopped = B_FALSE; 8279 tcp->tcp_fuse_syncstr_plugged = B_FALSE; 8280 tcp->tcp_loopback_peer = NULL; 8281 tcp->tcp_fuse_rcv_hiwater = 0; 8282 tcp->tcp_fuse_rcv_unread_hiwater = 0; 8283 tcp->tcp_fuse_rcv_unread_cnt = 0; 8284 8285 /* Initialize the header template */ 8286 if (tcp->tcp_ipversion == IPV4_VERSION) { 8287 err = tcp_header_init_ipv4(tcp); 8288 } else { 8289 err = tcp_header_init_ipv6(tcp); 8290 } 8291 if (err) 8292 return (err); 8293 8294 /* 8295 * Init the window scale to the max so tcp_rwnd_set() won't pare 8296 * down tcp_rwnd. tcp_adapt_ire() will set the right value later. 8297 */ 8298 tcp->tcp_rcv_ws = TCP_MAX_WINSHIFT; 8299 tcp->tcp_xmit_lowater = tcps->tcps_xmit_lowat; 8300 tcp->tcp_xmit_hiwater = tcps->tcps_xmit_hiwat; 8301 8302 tcp->tcp_cork = B_FALSE; 8303 /* 8304 * Init the tcp_debug option. This value determines whether TCP 8305 * calls strlog() to print out debug messages. Doing this 8306 * initialization here means that this value is not inherited thru 8307 * tcp_reinit(). 8308 */ 8309 tcp->tcp_debug = tcps->tcps_dbg; 8310 8311 tcp->tcp_ka_interval = tcps->tcps_keepalive_interval; 8312 tcp->tcp_ka_abort_thres = tcps->tcps_keepalive_abort_interval; 8313 8314 return (0); 8315 } 8316 8317 /* 8318 * Initialize the IPv4 header. Loses any record of any IP options. 8319 */ 8320 static int 8321 tcp_header_init_ipv4(tcp_t *tcp) 8322 { 8323 tcph_t *tcph; 8324 uint32_t sum; 8325 conn_t *connp; 8326 tcp_stack_t *tcps = tcp->tcp_tcps; 8327 8328 /* 8329 * This is a simple initialization. If there's 8330 * already a template, it should never be too small, 8331 * so reuse it. Otherwise, allocate space for the new one. 8332 */ 8333 if (tcp->tcp_iphc == NULL) { 8334 ASSERT(tcp->tcp_iphc_len == 0); 8335 tcp->tcp_iphc_len = TCP_MAX_COMBINED_HEADER_LENGTH; 8336 tcp->tcp_iphc = kmem_cache_alloc(tcp_iphc_cache, KM_NOSLEEP); 8337 if (tcp->tcp_iphc == NULL) { 8338 tcp->tcp_iphc_len = 0; 8339 return (ENOMEM); 8340 } 8341 } 8342 8343 /* options are gone; may need a new label */ 8344 connp = tcp->tcp_connp; 8345 connp->conn_mlp_type = mlptSingle; 8346 connp->conn_ulp_labeled = !is_system_labeled(); 8347 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 8348 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 8349 tcp->tcp_ip6h = NULL; 8350 tcp->tcp_ipversion = IPV4_VERSION; 8351 tcp->tcp_hdr_len = sizeof (ipha_t) + sizeof (tcph_t); 8352 tcp->tcp_tcp_hdr_len = sizeof (tcph_t); 8353 tcp->tcp_ip_hdr_len = sizeof (ipha_t); 8354 tcp->tcp_ipha->ipha_length = htons(sizeof (ipha_t) + sizeof (tcph_t)); 8355 tcp->tcp_ipha->ipha_version_and_hdr_length 8356 = (IP_VERSION << 4) | IP_SIMPLE_HDR_LENGTH_IN_WORDS; 8357 tcp->tcp_ipha->ipha_ident = 0; 8358 8359 tcp->tcp_ttl = (uchar_t)tcps->tcps_ipv4_ttl; 8360 tcp->tcp_tos = 0; 8361 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 0; 8362 tcp->tcp_ipha->ipha_ttl = (uchar_t)tcps->tcps_ipv4_ttl; 8363 tcp->tcp_ipha->ipha_protocol = IPPROTO_TCP; 8364 8365 tcph = (tcph_t *)(tcp->tcp_iphc + sizeof (ipha_t)); 8366 tcp->tcp_tcph = tcph; 8367 tcph->th_offset_and_rsrvd[0] = (5 << 4); 8368 /* 8369 * IP wants our header length in the checksum field to 8370 * allow it to perform a single pseudo-header+checksum 8371 * calculation on behalf of TCP. 8372 * Include the adjustment for a source route once IP_OPTIONS is set. 8373 */ 8374 sum = sizeof (tcph_t) + tcp->tcp_sum; 8375 sum = (sum >> 16) + (sum & 0xFFFF); 8376 U16_TO_ABE16(sum, tcph->th_sum); 8377 return (0); 8378 } 8379 8380 /* 8381 * Initialize the IPv6 header. Loses any record of any IPv6 extension headers. 8382 */ 8383 static int 8384 tcp_header_init_ipv6(tcp_t *tcp) 8385 { 8386 tcph_t *tcph; 8387 uint32_t sum; 8388 conn_t *connp; 8389 tcp_stack_t *tcps = tcp->tcp_tcps; 8390 8391 /* 8392 * This is a simple initialization. If there's 8393 * already a template, it should never be too small, 8394 * so reuse it. Otherwise, allocate space for the new one. 8395 * Ensure that there is enough space to "downgrade" the tcp_t 8396 * to an IPv4 tcp_t. This requires having space for a full load 8397 * of IPv4 options, as well as a full load of TCP options 8398 * (TCP_MAX_COMBINED_HEADER_LENGTH, 120 bytes); this is more space 8399 * than a v6 header and a TCP header with a full load of TCP options 8400 * (IPV6_HDR_LEN is 40 bytes; TCP_MAX_HDR_LENGTH is 60 bytes). 8401 * We want to avoid reallocation in the "downgraded" case when 8402 * processing outbound IPv4 options. 8403 */ 8404 if (tcp->tcp_iphc == NULL) { 8405 ASSERT(tcp->tcp_iphc_len == 0); 8406 tcp->tcp_iphc_len = TCP_MAX_COMBINED_HEADER_LENGTH; 8407 tcp->tcp_iphc = kmem_cache_alloc(tcp_iphc_cache, KM_NOSLEEP); 8408 if (tcp->tcp_iphc == NULL) { 8409 tcp->tcp_iphc_len = 0; 8410 return (ENOMEM); 8411 } 8412 } 8413 8414 /* options are gone; may need a new label */ 8415 connp = tcp->tcp_connp; 8416 connp->conn_mlp_type = mlptSingle; 8417 connp->conn_ulp_labeled = !is_system_labeled(); 8418 8419 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 8420 tcp->tcp_ipversion = IPV6_VERSION; 8421 tcp->tcp_hdr_len = IPV6_HDR_LEN + sizeof (tcph_t); 8422 tcp->tcp_tcp_hdr_len = sizeof (tcph_t); 8423 tcp->tcp_ip_hdr_len = IPV6_HDR_LEN; 8424 tcp->tcp_ip6h = (ip6_t *)tcp->tcp_iphc; 8425 tcp->tcp_ipha = NULL; 8426 8427 /* Initialize the header template */ 8428 8429 tcp->tcp_ip6h->ip6_vcf = IPV6_DEFAULT_VERS_AND_FLOW; 8430 tcp->tcp_ip6h->ip6_plen = ntohs(sizeof (tcph_t)); 8431 tcp->tcp_ip6h->ip6_nxt = IPPROTO_TCP; 8432 tcp->tcp_ip6h->ip6_hops = (uint8_t)tcps->tcps_ipv6_hoplimit; 8433 8434 tcph = (tcph_t *)(tcp->tcp_iphc + IPV6_HDR_LEN); 8435 tcp->tcp_tcph = tcph; 8436 tcph->th_offset_and_rsrvd[0] = (5 << 4); 8437 /* 8438 * IP wants our header length in the checksum field to 8439 * allow it to perform a single psuedo-header+checksum 8440 * calculation on behalf of TCP. 8441 * Include the adjustment for a source route when IPV6_RTHDR is set. 8442 */ 8443 sum = sizeof (tcph_t) + tcp->tcp_sum; 8444 sum = (sum >> 16) + (sum & 0xFFFF); 8445 U16_TO_ABE16(sum, tcph->th_sum); 8446 return (0); 8447 } 8448 8449 /* At minimum we need 8 bytes in the TCP header for the lookup */ 8450 #define ICMP_MIN_TCP_HDR 8 8451 8452 /* 8453 * tcp_icmp_error is called by tcp_rput_other to process ICMP error messages 8454 * passed up by IP. The message is always received on the correct tcp_t. 8455 * Assumes that IP has pulled up everything up to and including the ICMP header. 8456 */ 8457 void 8458 tcp_icmp_error(tcp_t *tcp, mblk_t *mp) 8459 { 8460 icmph_t *icmph; 8461 ipha_t *ipha; 8462 int iph_hdr_length; 8463 tcph_t *tcph; 8464 boolean_t ipsec_mctl = B_FALSE; 8465 boolean_t secure; 8466 mblk_t *first_mp = mp; 8467 uint32_t new_mss; 8468 uint32_t ratio; 8469 size_t mp_size = MBLKL(mp); 8470 uint32_t seg_seq; 8471 tcp_stack_t *tcps = tcp->tcp_tcps; 8472 8473 /* Assume IP provides aligned packets - otherwise toss */ 8474 if (!OK_32PTR(mp->b_rptr)) { 8475 freemsg(mp); 8476 return; 8477 } 8478 8479 /* 8480 * Since ICMP errors are normal data marked with M_CTL when sent 8481 * to TCP or UDP, we have to look for a IPSEC_IN value to identify 8482 * packets starting with an ipsec_info_t, see ipsec_info.h. 8483 */ 8484 if ((mp_size == sizeof (ipsec_info_t)) && 8485 (((ipsec_info_t *)mp->b_rptr)->ipsec_info_type == IPSEC_IN)) { 8486 ASSERT(mp->b_cont != NULL); 8487 mp = mp->b_cont; 8488 /* IP should have done this */ 8489 ASSERT(OK_32PTR(mp->b_rptr)); 8490 mp_size = MBLKL(mp); 8491 ipsec_mctl = B_TRUE; 8492 } 8493 8494 /* 8495 * Verify that we have a complete outer IP header. If not, drop it. 8496 */ 8497 if (mp_size < sizeof (ipha_t)) { 8498 noticmpv4: 8499 freemsg(first_mp); 8500 return; 8501 } 8502 8503 ipha = (ipha_t *)mp->b_rptr; 8504 /* 8505 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent 8506 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6. 8507 */ 8508 switch (IPH_HDR_VERSION(ipha)) { 8509 case IPV6_VERSION: 8510 tcp_icmp_error_ipv6(tcp, first_mp, ipsec_mctl); 8511 return; 8512 case IPV4_VERSION: 8513 break; 8514 default: 8515 goto noticmpv4; 8516 } 8517 8518 /* Skip past the outer IP and ICMP headers */ 8519 iph_hdr_length = IPH_HDR_LENGTH(ipha); 8520 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 8521 /* 8522 * If we don't have the correct outer IP header length or if the ULP 8523 * is not IPPROTO_ICMP or if we don't have a complete inner IP header 8524 * send it upstream. 8525 */ 8526 if (iph_hdr_length < sizeof (ipha_t) || 8527 ipha->ipha_protocol != IPPROTO_ICMP || 8528 (ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) { 8529 goto noticmpv4; 8530 } 8531 ipha = (ipha_t *)&icmph[1]; 8532 8533 /* Skip past the inner IP and find the ULP header */ 8534 iph_hdr_length = IPH_HDR_LENGTH(ipha); 8535 tcph = (tcph_t *)((char *)ipha + iph_hdr_length); 8536 /* 8537 * If we don't have the correct inner IP header length or if the ULP 8538 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR 8539 * bytes of TCP header, drop it. 8540 */ 8541 if (iph_hdr_length < sizeof (ipha_t) || 8542 ipha->ipha_protocol != IPPROTO_TCP || 8543 (uchar_t *)tcph + ICMP_MIN_TCP_HDR > mp->b_wptr) { 8544 goto noticmpv4; 8545 } 8546 8547 if (TCP_IS_DETACHED_NONEAGER(tcp)) { 8548 if (ipsec_mctl) { 8549 secure = ipsec_in_is_secure(first_mp); 8550 } else { 8551 secure = B_FALSE; 8552 } 8553 if (secure) { 8554 /* 8555 * If we are willing to accept this in clear 8556 * we don't have to verify policy. 8557 */ 8558 if (!ipsec_inbound_accept_clear(mp, ipha, NULL)) { 8559 if (!tcp_check_policy(tcp, first_mp, 8560 ipha, NULL, secure, ipsec_mctl)) { 8561 /* 8562 * tcp_check_policy called 8563 * ip_drop_packet() on failure. 8564 */ 8565 return; 8566 } 8567 } 8568 } 8569 } else if (ipsec_mctl) { 8570 /* 8571 * This is a hard_bound connection. IP has already 8572 * verified policy. We don't have to do it again. 8573 */ 8574 freeb(first_mp); 8575 first_mp = mp; 8576 ipsec_mctl = B_FALSE; 8577 } 8578 8579 seg_seq = ABE32_TO_U32(tcph->th_seq); 8580 /* 8581 * TCP SHOULD check that the TCP sequence number contained in 8582 * payload of the ICMP error message is within the range 8583 * SND.UNA <= SEG.SEQ < SND.NXT. 8584 */ 8585 if (SEQ_LT(seg_seq, tcp->tcp_suna) || SEQ_GEQ(seg_seq, tcp->tcp_snxt)) { 8586 /* 8587 * If the ICMP message is bogus, should we kill the 8588 * connection, or should we just drop the bogus ICMP 8589 * message? It would probably make more sense to just 8590 * drop the message so that if this one managed to get 8591 * in, the real connection should not suffer. 8592 */ 8593 goto noticmpv4; 8594 } 8595 8596 switch (icmph->icmph_type) { 8597 case ICMP_DEST_UNREACHABLE: 8598 switch (icmph->icmph_code) { 8599 case ICMP_FRAGMENTATION_NEEDED: 8600 /* 8601 * Reduce the MSS based on the new MTU. This will 8602 * eliminate any fragmentation locally. 8603 * N.B. There may well be some funny side-effects on 8604 * the local send policy and the remote receive policy. 8605 * Pending further research, we provide 8606 * tcp_ignore_path_mtu just in case this proves 8607 * disastrous somewhere. 8608 * 8609 * After updating the MSS, retransmit part of the 8610 * dropped segment using the new mss by calling 8611 * tcp_wput_data(). Need to adjust all those 8612 * params to make sure tcp_wput_data() work properly. 8613 */ 8614 if (tcps->tcps_ignore_path_mtu) 8615 break; 8616 8617 /* 8618 * Decrease the MSS by time stamp options 8619 * IP options and IPSEC options. tcp_hdr_len 8620 * includes time stamp option and IP option 8621 * length. 8622 */ 8623 8624 new_mss = ntohs(icmph->icmph_du_mtu) - 8625 tcp->tcp_hdr_len - tcp->tcp_ipsec_overhead; 8626 8627 /* 8628 * Only update the MSS if the new one is 8629 * smaller than the previous one. This is 8630 * to avoid problems when getting multiple 8631 * ICMP errors for the same MTU. 8632 */ 8633 if (new_mss >= tcp->tcp_mss) 8634 break; 8635 8636 /* 8637 * Stop doing PMTU if new_mss is less than 68 8638 * or less than tcp_mss_min. 8639 * The value 68 comes from rfc 1191. 8640 */ 8641 if (new_mss < MAX(68, tcps->tcps_mss_min)) 8642 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 8643 0; 8644 8645 ratio = tcp->tcp_cwnd / tcp->tcp_mss; 8646 ASSERT(ratio >= 1); 8647 tcp_mss_set(tcp, new_mss, B_TRUE); 8648 8649 /* 8650 * Make sure we have something to 8651 * send. 8652 */ 8653 if (SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) && 8654 (tcp->tcp_xmit_head != NULL)) { 8655 /* 8656 * Shrink tcp_cwnd in 8657 * proportion to the old MSS/new MSS. 8658 */ 8659 tcp->tcp_cwnd = ratio * tcp->tcp_mss; 8660 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 8661 (tcp->tcp_unsent == 0)) { 8662 tcp->tcp_rexmit_max = tcp->tcp_fss; 8663 } else { 8664 tcp->tcp_rexmit_max = tcp->tcp_snxt; 8665 } 8666 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 8667 tcp->tcp_rexmit = B_TRUE; 8668 tcp->tcp_dupack_cnt = 0; 8669 tcp->tcp_snd_burst = TCP_CWND_SS; 8670 tcp_ss_rexmit(tcp); 8671 } 8672 break; 8673 case ICMP_PORT_UNREACHABLE: 8674 case ICMP_PROTOCOL_UNREACHABLE: 8675 switch (tcp->tcp_state) { 8676 case TCPS_SYN_SENT: 8677 case TCPS_SYN_RCVD: 8678 /* 8679 * ICMP can snipe away incipient 8680 * TCP connections as long as 8681 * seq number is same as initial 8682 * send seq number. 8683 */ 8684 if (seg_seq == tcp->tcp_iss) { 8685 (void) tcp_clean_death(tcp, 8686 ECONNREFUSED, 6); 8687 } 8688 break; 8689 } 8690 break; 8691 case ICMP_HOST_UNREACHABLE: 8692 case ICMP_NET_UNREACHABLE: 8693 /* Record the error in case we finally time out. */ 8694 if (icmph->icmph_code == ICMP_HOST_UNREACHABLE) 8695 tcp->tcp_client_errno = EHOSTUNREACH; 8696 else 8697 tcp->tcp_client_errno = ENETUNREACH; 8698 if (tcp->tcp_state == TCPS_SYN_RCVD) { 8699 if (tcp->tcp_listener != NULL && 8700 tcp->tcp_listener->tcp_syn_defense) { 8701 /* 8702 * Ditch the half-open connection if we 8703 * suspect a SYN attack is under way. 8704 */ 8705 tcp_ip_ire_mark_advice(tcp); 8706 (void) tcp_clean_death(tcp, 8707 tcp->tcp_client_errno, 7); 8708 } 8709 } 8710 break; 8711 default: 8712 break; 8713 } 8714 break; 8715 case ICMP_SOURCE_QUENCH: { 8716 /* 8717 * use a global boolean to control 8718 * whether TCP should respond to ICMP_SOURCE_QUENCH. 8719 * The default is false. 8720 */ 8721 if (tcp_icmp_source_quench) { 8722 /* 8723 * Reduce the sending rate as if we got a 8724 * retransmit timeout 8725 */ 8726 uint32_t npkt; 8727 8728 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / 8729 tcp->tcp_mss; 8730 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * tcp->tcp_mss; 8731 tcp->tcp_cwnd = tcp->tcp_mss; 8732 tcp->tcp_cwnd_cnt = 0; 8733 } 8734 break; 8735 } 8736 } 8737 freemsg(first_mp); 8738 } 8739 8740 /* 8741 * tcp_icmp_error_ipv6 is called by tcp_rput_other to process ICMPv6 8742 * error messages passed up by IP. 8743 * Assumes that IP has pulled up all the extension headers as well 8744 * as the ICMPv6 header. 8745 */ 8746 static void 8747 tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, boolean_t ipsec_mctl) 8748 { 8749 icmp6_t *icmp6; 8750 ip6_t *ip6h; 8751 uint16_t iph_hdr_length; 8752 tcpha_t *tcpha; 8753 uint8_t *nexthdrp; 8754 uint32_t new_mss; 8755 uint32_t ratio; 8756 boolean_t secure; 8757 mblk_t *first_mp = mp; 8758 size_t mp_size; 8759 uint32_t seg_seq; 8760 tcp_stack_t *tcps = tcp->tcp_tcps; 8761 8762 /* 8763 * The caller has determined if this is an IPSEC_IN packet and 8764 * set ipsec_mctl appropriately (see tcp_icmp_error). 8765 */ 8766 if (ipsec_mctl) 8767 mp = mp->b_cont; 8768 8769 mp_size = MBLKL(mp); 8770 8771 /* 8772 * Verify that we have a complete IP header. If not, send it upstream. 8773 */ 8774 if (mp_size < sizeof (ip6_t)) { 8775 noticmpv6: 8776 freemsg(first_mp); 8777 return; 8778 } 8779 8780 /* 8781 * Verify this is an ICMPV6 packet, else send it upstream. 8782 */ 8783 ip6h = (ip6_t *)mp->b_rptr; 8784 if (ip6h->ip6_nxt == IPPROTO_ICMPV6) { 8785 iph_hdr_length = IPV6_HDR_LEN; 8786 } else if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, 8787 &nexthdrp) || 8788 *nexthdrp != IPPROTO_ICMPV6) { 8789 goto noticmpv6; 8790 } 8791 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length]; 8792 ip6h = (ip6_t *)&icmp6[1]; 8793 /* 8794 * Verify if we have a complete ICMP and inner IP header. 8795 */ 8796 if ((uchar_t *)&ip6h[1] > mp->b_wptr) 8797 goto noticmpv6; 8798 8799 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) 8800 goto noticmpv6; 8801 tcpha = (tcpha_t *)((char *)ip6h + iph_hdr_length); 8802 /* 8803 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't 8804 * have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the 8805 * packet. 8806 */ 8807 if ((*nexthdrp != IPPROTO_TCP) || 8808 ((uchar_t *)tcpha + ICMP_MIN_TCP_HDR) > mp->b_wptr) { 8809 goto noticmpv6; 8810 } 8811 8812 /* 8813 * ICMP errors come on the right queue or come on 8814 * listener/global queue for detached connections and 8815 * get switched to the right queue. If it comes on the 8816 * right queue, policy check has already been done by IP 8817 * and thus free the first_mp without verifying the policy. 8818 * If it has come for a non-hard bound connection, we need 8819 * to verify policy as IP may not have done it. 8820 */ 8821 if (!tcp->tcp_hard_bound) { 8822 if (ipsec_mctl) { 8823 secure = ipsec_in_is_secure(first_mp); 8824 } else { 8825 secure = B_FALSE; 8826 } 8827 if (secure) { 8828 /* 8829 * If we are willing to accept this in clear 8830 * we don't have to verify policy. 8831 */ 8832 if (!ipsec_inbound_accept_clear(mp, NULL, ip6h)) { 8833 if (!tcp_check_policy(tcp, first_mp, 8834 NULL, ip6h, secure, ipsec_mctl)) { 8835 /* 8836 * tcp_check_policy called 8837 * ip_drop_packet() on failure. 8838 */ 8839 return; 8840 } 8841 } 8842 } 8843 } else if (ipsec_mctl) { 8844 /* 8845 * This is a hard_bound connection. IP has already 8846 * verified policy. We don't have to do it again. 8847 */ 8848 freeb(first_mp); 8849 first_mp = mp; 8850 ipsec_mctl = B_FALSE; 8851 } 8852 8853 seg_seq = ntohl(tcpha->tha_seq); 8854 /* 8855 * TCP SHOULD check that the TCP sequence number contained in 8856 * payload of the ICMP error message is within the range 8857 * SND.UNA <= SEG.SEQ < SND.NXT. 8858 */ 8859 if (SEQ_LT(seg_seq, tcp->tcp_suna) || SEQ_GEQ(seg_seq, tcp->tcp_snxt)) { 8860 /* 8861 * If the ICMP message is bogus, should we kill the 8862 * connection, or should we just drop the bogus ICMP 8863 * message? It would probably make more sense to just 8864 * drop the message so that if this one managed to get 8865 * in, the real connection should not suffer. 8866 */ 8867 goto noticmpv6; 8868 } 8869 8870 switch (icmp6->icmp6_type) { 8871 case ICMP6_PACKET_TOO_BIG: 8872 /* 8873 * Reduce the MSS based on the new MTU. This will 8874 * eliminate any fragmentation locally. 8875 * N.B. There may well be some funny side-effects on 8876 * the local send policy and the remote receive policy. 8877 * Pending further research, we provide 8878 * tcp_ignore_path_mtu just in case this proves 8879 * disastrous somewhere. 8880 * 8881 * After updating the MSS, retransmit part of the 8882 * dropped segment using the new mss by calling 8883 * tcp_wput_data(). Need to adjust all those 8884 * params to make sure tcp_wput_data() work properly. 8885 */ 8886 if (tcps->tcps_ignore_path_mtu) 8887 break; 8888 8889 /* 8890 * Decrease the MSS by time stamp options 8891 * IP options and IPSEC options. tcp_hdr_len 8892 * includes time stamp option and IP option 8893 * length. 8894 */ 8895 new_mss = ntohs(icmp6->icmp6_mtu) - tcp->tcp_hdr_len - 8896 tcp->tcp_ipsec_overhead; 8897 8898 /* 8899 * Only update the MSS if the new one is 8900 * smaller than the previous one. This is 8901 * to avoid problems when getting multiple 8902 * ICMP errors for the same MTU. 8903 */ 8904 if (new_mss >= tcp->tcp_mss) 8905 break; 8906 8907 ratio = tcp->tcp_cwnd / tcp->tcp_mss; 8908 ASSERT(ratio >= 1); 8909 tcp_mss_set(tcp, new_mss, B_TRUE); 8910 8911 /* 8912 * Make sure we have something to 8913 * send. 8914 */ 8915 if (SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) && 8916 (tcp->tcp_xmit_head != NULL)) { 8917 /* 8918 * Shrink tcp_cwnd in 8919 * proportion to the old MSS/new MSS. 8920 */ 8921 tcp->tcp_cwnd = ratio * tcp->tcp_mss; 8922 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 8923 (tcp->tcp_unsent == 0)) { 8924 tcp->tcp_rexmit_max = tcp->tcp_fss; 8925 } else { 8926 tcp->tcp_rexmit_max = tcp->tcp_snxt; 8927 } 8928 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 8929 tcp->tcp_rexmit = B_TRUE; 8930 tcp->tcp_dupack_cnt = 0; 8931 tcp->tcp_snd_burst = TCP_CWND_SS; 8932 tcp_ss_rexmit(tcp); 8933 } 8934 break; 8935 8936 case ICMP6_DST_UNREACH: 8937 switch (icmp6->icmp6_code) { 8938 case ICMP6_DST_UNREACH_NOPORT: 8939 if (((tcp->tcp_state == TCPS_SYN_SENT) || 8940 (tcp->tcp_state == TCPS_SYN_RCVD)) && 8941 (seg_seq == tcp->tcp_iss)) { 8942 (void) tcp_clean_death(tcp, 8943 ECONNREFUSED, 8); 8944 } 8945 break; 8946 8947 case ICMP6_DST_UNREACH_ADMIN: 8948 case ICMP6_DST_UNREACH_NOROUTE: 8949 case ICMP6_DST_UNREACH_BEYONDSCOPE: 8950 case ICMP6_DST_UNREACH_ADDR: 8951 /* Record the error in case we finally time out. */ 8952 tcp->tcp_client_errno = EHOSTUNREACH; 8953 if (((tcp->tcp_state == TCPS_SYN_SENT) || 8954 (tcp->tcp_state == TCPS_SYN_RCVD)) && 8955 (seg_seq == tcp->tcp_iss)) { 8956 if (tcp->tcp_listener != NULL && 8957 tcp->tcp_listener->tcp_syn_defense) { 8958 /* 8959 * Ditch the half-open connection if we 8960 * suspect a SYN attack is under way. 8961 */ 8962 tcp_ip_ire_mark_advice(tcp); 8963 (void) tcp_clean_death(tcp, 8964 tcp->tcp_client_errno, 9); 8965 } 8966 } 8967 8968 8969 break; 8970 default: 8971 break; 8972 } 8973 break; 8974 8975 case ICMP6_PARAM_PROB: 8976 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */ 8977 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER && 8978 (uchar_t *)ip6h + icmp6->icmp6_pptr == 8979 (uchar_t *)nexthdrp) { 8980 if (tcp->tcp_state == TCPS_SYN_SENT || 8981 tcp->tcp_state == TCPS_SYN_RCVD) { 8982 (void) tcp_clean_death(tcp, 8983 ECONNREFUSED, 10); 8984 } 8985 break; 8986 } 8987 break; 8988 8989 case ICMP6_TIME_EXCEEDED: 8990 default: 8991 break; 8992 } 8993 freemsg(first_mp); 8994 } 8995 8996 /* 8997 * IP recognizes seven kinds of bind requests: 8998 * 8999 * - A zero-length address binds only to the protocol number. 9000 * 9001 * - A 4-byte address is treated as a request to 9002 * validate that the address is a valid local IPv4 9003 * address, appropriate for an application to bind to. 9004 * IP does the verification, but does not make any note 9005 * of the address at this time. 9006 * 9007 * - A 16-byte address contains is treated as a request 9008 * to validate a local IPv6 address, as the 4-byte 9009 * address case above. 9010 * 9011 * - A 16-byte sockaddr_in to validate the local IPv4 address and also 9012 * use it for the inbound fanout of packets. 9013 * 9014 * - A 24-byte sockaddr_in6 to validate the local IPv6 address and also 9015 * use it for the inbound fanout of packets. 9016 * 9017 * - A 12-byte address (ipa_conn_t) containing complete IPv4 fanout 9018 * information consisting of local and remote addresses 9019 * and ports. In this case, the addresses are both 9020 * validated as appropriate for this operation, and, if 9021 * so, the information is retained for use in the 9022 * inbound fanout. 9023 * 9024 * - A 36-byte address address (ipa6_conn_t) containing complete IPv6 9025 * fanout information, like the 12-byte case above. 9026 * 9027 * IP will also fill in the IRE request mblk with information 9028 * regarding our peer. In all cases, we notify IP of our protocol 9029 * type by appending a single protocol byte to the bind request. 9030 */ 9031 static mblk_t * 9032 tcp_ip_bind_mp(tcp_t *tcp, t_scalar_t bind_prim, t_scalar_t addr_length) 9033 { 9034 char *cp; 9035 mblk_t *mp; 9036 struct T_bind_req *tbr; 9037 ipa_conn_t *ac; 9038 ipa6_conn_t *ac6; 9039 sin_t *sin; 9040 sin6_t *sin6; 9041 9042 ASSERT(bind_prim == O_T_BIND_REQ || bind_prim == T_BIND_REQ); 9043 ASSERT((tcp->tcp_family == AF_INET && 9044 tcp->tcp_ipversion == IPV4_VERSION) || 9045 (tcp->tcp_family == AF_INET6 && 9046 (tcp->tcp_ipversion == IPV4_VERSION || 9047 tcp->tcp_ipversion == IPV6_VERSION))); 9048 9049 mp = allocb(sizeof (*tbr) + addr_length + 1, BPRI_HI); 9050 if (!mp) 9051 return (mp); 9052 mp->b_datap->db_type = M_PROTO; 9053 tbr = (struct T_bind_req *)mp->b_rptr; 9054 tbr->PRIM_type = bind_prim; 9055 tbr->ADDR_offset = sizeof (*tbr); 9056 tbr->CONIND_number = 0; 9057 tbr->ADDR_length = addr_length; 9058 cp = (char *)&tbr[1]; 9059 switch (addr_length) { 9060 case sizeof (ipa_conn_t): 9061 ASSERT(tcp->tcp_family == AF_INET); 9062 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 9063 9064 mp->b_cont = allocb(sizeof (ire_t), BPRI_HI); 9065 if (mp->b_cont == NULL) { 9066 freemsg(mp); 9067 return (NULL); 9068 } 9069 mp->b_cont->b_wptr += sizeof (ire_t); 9070 mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE; 9071 9072 /* cp known to be 32 bit aligned */ 9073 ac = (ipa_conn_t *)cp; 9074 ac->ac_laddr = tcp->tcp_ipha->ipha_src; 9075 ac->ac_faddr = tcp->tcp_remote; 9076 ac->ac_fport = tcp->tcp_fport; 9077 ac->ac_lport = tcp->tcp_lport; 9078 tcp->tcp_hard_binding = 1; 9079 break; 9080 9081 case sizeof (ipa6_conn_t): 9082 ASSERT(tcp->tcp_family == AF_INET6); 9083 9084 mp->b_cont = allocb(sizeof (ire_t), BPRI_HI); 9085 if (mp->b_cont == NULL) { 9086 freemsg(mp); 9087 return (NULL); 9088 } 9089 mp->b_cont->b_wptr += sizeof (ire_t); 9090 mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE; 9091 9092 /* cp known to be 32 bit aligned */ 9093 ac6 = (ipa6_conn_t *)cp; 9094 if (tcp->tcp_ipversion == IPV4_VERSION) { 9095 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 9096 &ac6->ac6_laddr); 9097 } else { 9098 ac6->ac6_laddr = tcp->tcp_ip6h->ip6_src; 9099 } 9100 ac6->ac6_faddr = tcp->tcp_remote_v6; 9101 ac6->ac6_fport = tcp->tcp_fport; 9102 ac6->ac6_lport = tcp->tcp_lport; 9103 tcp->tcp_hard_binding = 1; 9104 break; 9105 9106 case sizeof (sin_t): 9107 /* 9108 * NOTE: IPV6_ADDR_LEN also has same size. 9109 * Use family to discriminate. 9110 */ 9111 if (tcp->tcp_family == AF_INET) { 9112 sin = (sin_t *)cp; 9113 9114 *sin = sin_null; 9115 sin->sin_family = AF_INET; 9116 sin->sin_addr.s_addr = tcp->tcp_bound_source; 9117 sin->sin_port = tcp->tcp_lport; 9118 break; 9119 } else { 9120 *(in6_addr_t *)cp = tcp->tcp_bound_source_v6; 9121 } 9122 break; 9123 9124 case sizeof (sin6_t): 9125 ASSERT(tcp->tcp_family == AF_INET6); 9126 sin6 = (sin6_t *)cp; 9127 9128 *sin6 = sin6_null; 9129 sin6->sin6_family = AF_INET6; 9130 sin6->sin6_addr = tcp->tcp_bound_source_v6; 9131 sin6->sin6_port = tcp->tcp_lport; 9132 break; 9133 9134 case IP_ADDR_LEN: 9135 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 9136 *(uint32_t *)cp = tcp->tcp_ipha->ipha_src; 9137 break; 9138 9139 } 9140 /* Add protocol number to end */ 9141 cp[addr_length] = (char)IPPROTO_TCP; 9142 mp->b_wptr = (uchar_t *)&cp[addr_length + 1]; 9143 return (mp); 9144 } 9145 9146 /* 9147 * Notify IP that we are having trouble with this connection. IP should 9148 * blow the IRE away and start over. 9149 */ 9150 static void 9151 tcp_ip_notify(tcp_t *tcp) 9152 { 9153 struct iocblk *iocp; 9154 ipid_t *ipid; 9155 mblk_t *mp; 9156 9157 /* IPv6 has NUD thus notification to delete the IRE is not needed */ 9158 if (tcp->tcp_ipversion == IPV6_VERSION) 9159 return; 9160 9161 mp = mkiocb(IP_IOCTL); 9162 if (mp == NULL) 9163 return; 9164 9165 iocp = (struct iocblk *)mp->b_rptr; 9166 iocp->ioc_count = sizeof (ipid_t) + sizeof (tcp->tcp_ipha->ipha_dst); 9167 9168 mp->b_cont = allocb(iocp->ioc_count, BPRI_HI); 9169 if (!mp->b_cont) { 9170 freeb(mp); 9171 return; 9172 } 9173 9174 ipid = (ipid_t *)mp->b_cont->b_rptr; 9175 mp->b_cont->b_wptr += iocp->ioc_count; 9176 bzero(ipid, sizeof (*ipid)); 9177 ipid->ipid_cmd = IP_IOC_IRE_DELETE_NO_REPLY; 9178 ipid->ipid_ire_type = IRE_CACHE; 9179 ipid->ipid_addr_offset = sizeof (ipid_t); 9180 ipid->ipid_addr_length = sizeof (tcp->tcp_ipha->ipha_dst); 9181 /* 9182 * Note: in the case of source routing we want to blow away the 9183 * route to the first source route hop. 9184 */ 9185 bcopy(&tcp->tcp_ipha->ipha_dst, &ipid[1], 9186 sizeof (tcp->tcp_ipha->ipha_dst)); 9187 9188 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 9189 } 9190 9191 /* Unlink and return any mblk that looks like it contains an ire */ 9192 static mblk_t * 9193 tcp_ire_mp(mblk_t *mp) 9194 { 9195 mblk_t *prev_mp; 9196 9197 for (;;) { 9198 prev_mp = mp; 9199 mp = mp->b_cont; 9200 if (mp == NULL) 9201 break; 9202 switch (DB_TYPE(mp)) { 9203 case IRE_DB_TYPE: 9204 case IRE_DB_REQ_TYPE: 9205 if (prev_mp != NULL) 9206 prev_mp->b_cont = mp->b_cont; 9207 mp->b_cont = NULL; 9208 return (mp); 9209 default: 9210 break; 9211 } 9212 } 9213 return (mp); 9214 } 9215 9216 /* 9217 * Timer callback routine for keepalive probe. We do a fake resend of 9218 * last ACKed byte. Then set a timer using RTO. When the timer expires, 9219 * check to see if we have heard anything from the other end for the last 9220 * RTO period. If we have, set the timer to expire for another 9221 * tcp_keepalive_intrvl and check again. If we have not, set a timer using 9222 * RTO << 1 and check again when it expires. Keep exponentially increasing 9223 * the timeout if we have not heard from the other side. If for more than 9224 * (tcp_ka_interval + tcp_ka_abort_thres) we have not heard anything, 9225 * kill the connection unless the keepalive abort threshold is 0. In 9226 * that case, we will probe "forever." 9227 */ 9228 static void 9229 tcp_keepalive_killer(void *arg) 9230 { 9231 mblk_t *mp; 9232 conn_t *connp = (conn_t *)arg; 9233 tcp_t *tcp = connp->conn_tcp; 9234 int32_t firetime; 9235 int32_t idletime; 9236 int32_t ka_intrvl; 9237 tcp_stack_t *tcps = tcp->tcp_tcps; 9238 9239 tcp->tcp_ka_tid = 0; 9240 9241 if (tcp->tcp_fused) 9242 return; 9243 9244 BUMP_MIB(&tcps->tcps_mib, tcpTimKeepalive); 9245 ka_intrvl = tcp->tcp_ka_interval; 9246 9247 /* 9248 * Keepalive probe should only be sent if the application has not 9249 * done a close on the connection. 9250 */ 9251 if (tcp->tcp_state > TCPS_CLOSE_WAIT) { 9252 return; 9253 } 9254 /* Timer fired too early, restart it. */ 9255 if (tcp->tcp_state < TCPS_ESTABLISHED) { 9256 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 9257 MSEC_TO_TICK(ka_intrvl)); 9258 return; 9259 } 9260 9261 idletime = TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time); 9262 /* 9263 * If we have not heard from the other side for a long 9264 * time, kill the connection unless the keepalive abort 9265 * threshold is 0. In that case, we will probe "forever." 9266 */ 9267 if (tcp->tcp_ka_abort_thres != 0 && 9268 idletime > (ka_intrvl + tcp->tcp_ka_abort_thres)) { 9269 BUMP_MIB(&tcps->tcps_mib, tcpTimKeepaliveDrop); 9270 (void) tcp_clean_death(tcp, tcp->tcp_client_errno ? 9271 tcp->tcp_client_errno : ETIMEDOUT, 11); 9272 return; 9273 } 9274 9275 if (tcp->tcp_snxt == tcp->tcp_suna && 9276 idletime >= ka_intrvl) { 9277 /* Fake resend of last ACKed byte. */ 9278 mblk_t *mp1 = allocb(1, BPRI_LO); 9279 9280 if (mp1 != NULL) { 9281 *mp1->b_wptr++ = '\0'; 9282 mp = tcp_xmit_mp(tcp, mp1, 1, NULL, NULL, 9283 tcp->tcp_suna - 1, B_FALSE, NULL, B_TRUE); 9284 freeb(mp1); 9285 /* 9286 * if allocation failed, fall through to start the 9287 * timer back. 9288 */ 9289 if (mp != NULL) { 9290 TCP_RECORD_TRACE(tcp, mp, 9291 TCP_TRACE_SEND_PKT); 9292 tcp_send_data(tcp, tcp->tcp_wq, mp); 9293 BUMP_MIB(&tcps->tcps_mib, 9294 tcpTimKeepaliveProbe); 9295 if (tcp->tcp_ka_last_intrvl != 0) { 9296 int max; 9297 /* 9298 * We should probe again at least 9299 * in ka_intrvl, but not more than 9300 * tcp_rexmit_interval_max. 9301 */ 9302 max = tcps->tcps_rexmit_interval_max; 9303 firetime = MIN(ka_intrvl - 1, 9304 tcp->tcp_ka_last_intrvl << 1); 9305 if (firetime > max) 9306 firetime = max; 9307 } else { 9308 firetime = tcp->tcp_rto; 9309 } 9310 tcp->tcp_ka_tid = TCP_TIMER(tcp, 9311 tcp_keepalive_killer, 9312 MSEC_TO_TICK(firetime)); 9313 tcp->tcp_ka_last_intrvl = firetime; 9314 return; 9315 } 9316 } 9317 } else { 9318 tcp->tcp_ka_last_intrvl = 0; 9319 } 9320 9321 /* firetime can be negative if (mp1 == NULL || mp == NULL) */ 9322 if ((firetime = ka_intrvl - idletime) < 0) { 9323 firetime = ka_intrvl; 9324 } 9325 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 9326 MSEC_TO_TICK(firetime)); 9327 } 9328 9329 int 9330 tcp_maxpsz_set(tcp_t *tcp, boolean_t set_maxblk) 9331 { 9332 queue_t *q = tcp->tcp_rq; 9333 int32_t mss = tcp->tcp_mss; 9334 int maxpsz; 9335 9336 if (TCP_IS_DETACHED(tcp)) 9337 return (mss); 9338 9339 if (tcp->tcp_fused) { 9340 maxpsz = tcp_fuse_maxpsz_set(tcp); 9341 mss = INFPSZ; 9342 } else if (tcp->tcp_mdt || tcp->tcp_lso || tcp->tcp_maxpsz == 0) { 9343 /* 9344 * Set the sd_qn_maxpsz according to the socket send buffer 9345 * size, and sd_maxblk to INFPSZ (-1). This will essentially 9346 * instruct the stream head to copyin user data into contiguous 9347 * kernel-allocated buffers without breaking it up into smaller 9348 * chunks. We round up the buffer size to the nearest SMSS. 9349 */ 9350 maxpsz = MSS_ROUNDUP(tcp->tcp_xmit_hiwater, mss); 9351 if (tcp->tcp_kssl_ctx == NULL) 9352 mss = INFPSZ; 9353 else 9354 mss = SSL3_MAX_RECORD_LEN; 9355 } else { 9356 /* 9357 * Set sd_qn_maxpsz to approx half the (receivers) buffer 9358 * (and a multiple of the mss). This instructs the stream 9359 * head to break down larger than SMSS writes into SMSS- 9360 * size mblks, up to tcp_maxpsz_multiplier mblks at a time. 9361 */ 9362 maxpsz = tcp->tcp_maxpsz * mss; 9363 if (maxpsz > tcp->tcp_xmit_hiwater/2) { 9364 maxpsz = tcp->tcp_xmit_hiwater/2; 9365 /* Round up to nearest mss */ 9366 maxpsz = MSS_ROUNDUP(maxpsz, mss); 9367 } 9368 } 9369 (void) setmaxps(q, maxpsz); 9370 tcp->tcp_wq->q_maxpsz = maxpsz; 9371 9372 if (set_maxblk) 9373 (void) mi_set_sth_maxblk(q, mss); 9374 9375 return (mss); 9376 } 9377 9378 /* 9379 * Extract option values from a tcp header. We put any found values into the 9380 * tcpopt struct and return a bitmask saying which options were found. 9381 */ 9382 static int 9383 tcp_parse_options(tcph_t *tcph, tcp_opt_t *tcpopt) 9384 { 9385 uchar_t *endp; 9386 int len; 9387 uint32_t mss; 9388 uchar_t *up = (uchar_t *)tcph; 9389 int found = 0; 9390 int32_t sack_len; 9391 tcp_seq sack_begin, sack_end; 9392 tcp_t *tcp; 9393 9394 endp = up + TCP_HDR_LENGTH(tcph); 9395 up += TCP_MIN_HEADER_LENGTH; 9396 while (up < endp) { 9397 len = endp - up; 9398 switch (*up) { 9399 case TCPOPT_EOL: 9400 break; 9401 9402 case TCPOPT_NOP: 9403 up++; 9404 continue; 9405 9406 case TCPOPT_MAXSEG: 9407 if (len < TCPOPT_MAXSEG_LEN || 9408 up[1] != TCPOPT_MAXSEG_LEN) 9409 break; 9410 9411 mss = BE16_TO_U16(up+2); 9412 /* Caller must handle tcp_mss_min and tcp_mss_max_* */ 9413 tcpopt->tcp_opt_mss = mss; 9414 found |= TCP_OPT_MSS_PRESENT; 9415 9416 up += TCPOPT_MAXSEG_LEN; 9417 continue; 9418 9419 case TCPOPT_WSCALE: 9420 if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN) 9421 break; 9422 9423 if (up[2] > TCP_MAX_WINSHIFT) 9424 tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT; 9425 else 9426 tcpopt->tcp_opt_wscale = up[2]; 9427 found |= TCP_OPT_WSCALE_PRESENT; 9428 9429 up += TCPOPT_WS_LEN; 9430 continue; 9431 9432 case TCPOPT_SACK_PERMITTED: 9433 if (len < TCPOPT_SACK_OK_LEN || 9434 up[1] != TCPOPT_SACK_OK_LEN) 9435 break; 9436 found |= TCP_OPT_SACK_OK_PRESENT; 9437 up += TCPOPT_SACK_OK_LEN; 9438 continue; 9439 9440 case TCPOPT_SACK: 9441 if (len <= 2 || up[1] <= 2 || len < up[1]) 9442 break; 9443 9444 /* If TCP is not interested in SACK blks... */ 9445 if ((tcp = tcpopt->tcp) == NULL) { 9446 up += up[1]; 9447 continue; 9448 } 9449 sack_len = up[1] - TCPOPT_HEADER_LEN; 9450 up += TCPOPT_HEADER_LEN; 9451 9452 /* 9453 * If the list is empty, allocate one and assume 9454 * nothing is sack'ed. 9455 */ 9456 ASSERT(tcp->tcp_sack_info != NULL); 9457 if (tcp->tcp_notsack_list == NULL) { 9458 tcp_notsack_update(&(tcp->tcp_notsack_list), 9459 tcp->tcp_suna, tcp->tcp_snxt, 9460 &(tcp->tcp_num_notsack_blk), 9461 &(tcp->tcp_cnt_notsack_list)); 9462 9463 /* 9464 * Make sure tcp_notsack_list is not NULL. 9465 * This happens when kmem_alloc(KM_NOSLEEP) 9466 * returns NULL. 9467 */ 9468 if (tcp->tcp_notsack_list == NULL) { 9469 up += sack_len; 9470 continue; 9471 } 9472 tcp->tcp_fack = tcp->tcp_suna; 9473 } 9474 9475 while (sack_len > 0) { 9476 if (up + 8 > endp) { 9477 up = endp; 9478 break; 9479 } 9480 sack_begin = BE32_TO_U32(up); 9481 up += 4; 9482 sack_end = BE32_TO_U32(up); 9483 up += 4; 9484 sack_len -= 8; 9485 /* 9486 * Bounds checking. Make sure the SACK 9487 * info is within tcp_suna and tcp_snxt. 9488 * If this SACK blk is out of bound, ignore 9489 * it but continue to parse the following 9490 * blks. 9491 */ 9492 if (SEQ_LEQ(sack_end, sack_begin) || 9493 SEQ_LT(sack_begin, tcp->tcp_suna) || 9494 SEQ_GT(sack_end, tcp->tcp_snxt)) { 9495 continue; 9496 } 9497 tcp_notsack_insert(&(tcp->tcp_notsack_list), 9498 sack_begin, sack_end, 9499 &(tcp->tcp_num_notsack_blk), 9500 &(tcp->tcp_cnt_notsack_list)); 9501 if (SEQ_GT(sack_end, tcp->tcp_fack)) { 9502 tcp->tcp_fack = sack_end; 9503 } 9504 } 9505 found |= TCP_OPT_SACK_PRESENT; 9506 continue; 9507 9508 case TCPOPT_TSTAMP: 9509 if (len < TCPOPT_TSTAMP_LEN || 9510 up[1] != TCPOPT_TSTAMP_LEN) 9511 break; 9512 9513 tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2); 9514 tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6); 9515 9516 found |= TCP_OPT_TSTAMP_PRESENT; 9517 9518 up += TCPOPT_TSTAMP_LEN; 9519 continue; 9520 9521 default: 9522 if (len <= 1 || len < (int)up[1] || up[1] == 0) 9523 break; 9524 up += up[1]; 9525 continue; 9526 } 9527 break; 9528 } 9529 return (found); 9530 } 9531 9532 /* 9533 * Set the mss associated with a particular tcp based on its current value, 9534 * and a new one passed in. Observe minimums and maximums, and reset 9535 * other state variables that we want to view as multiples of mss. 9536 * 9537 * This function is called in various places mainly because 9538 * 1) Various stuffs, tcp_mss, tcp_cwnd, ... need to be adjusted when the 9539 * other side's SYN/SYN-ACK packet arrives. 9540 * 2) PMTUd may get us a new MSS. 9541 * 3) If the other side stops sending us timestamp option, we need to 9542 * increase the MSS size to use the extra bytes available. 9543 * 9544 * do_ss is used to control whether we will be doing slow start or 9545 * not if there is a change in the mss. Note that for some events like 9546 * tcp_paws_check() we allow the tcp_cwnd to adjust to the new mss but 9547 * do not perform a slow start specifically. 9548 */ 9549 static void 9550 tcp_mss_set(tcp_t *tcp, uint32_t mss, boolean_t do_ss) 9551 { 9552 uint32_t mss_max; 9553 tcp_stack_t *tcps = tcp->tcp_tcps; 9554 9555 if (tcp->tcp_ipversion == IPV4_VERSION) 9556 mss_max = tcps->tcps_mss_max_ipv4; 9557 else 9558 mss_max = tcps->tcps_mss_max_ipv6; 9559 9560 if (mss < tcps->tcps_mss_min) 9561 mss = tcps->tcps_mss_min; 9562 if (mss > mss_max) 9563 mss = mss_max; 9564 /* 9565 * Unless naglim has been set by our client to 9566 * a non-mss value, force naglim to track mss. 9567 * This can help to aggregate small writes. 9568 */ 9569 if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim) 9570 tcp->tcp_naglim = mss; 9571 /* 9572 * TCP should be able to buffer at least 4 MSS data for obvious 9573 * performance reason. 9574 */ 9575 if ((mss << 2) > tcp->tcp_xmit_hiwater) 9576 tcp->tcp_xmit_hiwater = mss << 2; 9577 9578 /* 9579 * Check if we need to apply the tcp_init_cwnd here. If 9580 * it is set and the MSS gets bigger (should not happen 9581 * normally), we need to adjust the resulting tcp_cwnd properly. 9582 * The new tcp_cwnd should not get bigger. 9583 */ 9584 /* 9585 * We need to avoid setting tcp_cwnd to its slow start value 9586 * unnecessarily. However we have to let the tcp_cwnd adjust 9587 * to the modified mss. 9588 */ 9589 if (tcp->tcp_init_cwnd == 0 && do_ss) { 9590 tcp->tcp_cwnd = MIN(tcps->tcps_slow_start_initial * 9591 mss, MIN(4 * mss, MAX(2 * mss, 4380 / mss * mss))); 9592 } else { 9593 if (tcp->tcp_mss < mss) { 9594 tcp->tcp_cwnd = MAX(1, 9595 (tcp->tcp_init_cwnd * tcp->tcp_mss / 9596 mss)) * mss; 9597 } else { 9598 tcp->tcp_cwnd = tcp->tcp_init_cwnd * mss; 9599 } 9600 } 9601 tcp->tcp_mss = mss; 9602 tcp->tcp_cwnd_cnt = 0; 9603 (void) tcp_maxpsz_set(tcp, B_TRUE); 9604 } 9605 9606 /* For /dev/tcp aka AF_INET open */ 9607 static int 9608 tcp_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 9609 { 9610 return (tcp_open(q, devp, flag, sflag, credp, B_FALSE)); 9611 } 9612 9613 /* For /dev/tcp6 aka AF_INET6 open */ 9614 static int 9615 tcp_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 9616 { 9617 return (tcp_open(q, devp, flag, sflag, credp, B_TRUE)); 9618 } 9619 9620 static int 9621 tcp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp, 9622 boolean_t isv6) 9623 { 9624 tcp_t *tcp = NULL; 9625 conn_t *connp; 9626 int err; 9627 vmem_t *minor_arena = NULL; 9628 dev_t conn_dev; 9629 zoneid_t zoneid; 9630 tcp_stack_t *tcps = NULL; 9631 9632 if (q->q_ptr != NULL) 9633 return (0); 9634 9635 if (sflag == MODOPEN) 9636 return (EINVAL); 9637 9638 if (!(flag & SO_ACCEPTOR)) { 9639 /* 9640 * Special case for install: miniroot needs to be able to 9641 * access files via NFS as though it were always in the 9642 * global zone. 9643 */ 9644 if (credp == kcred && nfs_global_client_only != 0) { 9645 zoneid = GLOBAL_ZONEID; 9646 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)-> 9647 netstack_tcp; 9648 ASSERT(tcps != NULL); 9649 } else { 9650 netstack_t *ns; 9651 9652 ns = netstack_find_by_cred(credp); 9653 ASSERT(ns != NULL); 9654 tcps = ns->netstack_tcp; 9655 ASSERT(tcps != NULL); 9656 9657 /* 9658 * For exclusive stacks we set the zoneid to zero 9659 * to make TCP operate as if in the global zone. 9660 */ 9661 if (tcps->tcps_netstack->netstack_stackid != 9662 GLOBAL_NETSTACKID) 9663 zoneid = GLOBAL_ZONEID; 9664 else 9665 zoneid = crgetzoneid(credp); 9666 } 9667 /* 9668 * For stackid zero this is done from strplumb.c, but 9669 * non-zero stackids are handled here. 9670 */ 9671 if (tcps->tcps_g_q == NULL && 9672 tcps->tcps_netstack->netstack_stackid != 9673 GLOBAL_NETSTACKID) { 9674 tcp_g_q_setup(tcps); 9675 } 9676 } 9677 9678 if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) && 9679 ((conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) { 9680 minor_arena = ip_minor_arena_la; 9681 } else { 9682 /* 9683 * Either minor numbers in the large arena were exhausted 9684 * or a non socket application is doing the open. 9685 * Try to allocate from the small arena. 9686 */ 9687 if ((conn_dev = inet_minor_alloc(ip_minor_arena_sa)) == 0) { 9688 if (tcps != NULL) 9689 netstack_rele(tcps->tcps_netstack); 9690 return (EBUSY); 9691 } 9692 minor_arena = ip_minor_arena_sa; 9693 } 9694 ASSERT(minor_arena != NULL); 9695 9696 *devp = makedevice(getemajor(*devp), (minor_t)conn_dev); 9697 9698 if (flag & SO_ACCEPTOR) { 9699 /* No netstack_find_by_cred, hence no netstack_rele needed */ 9700 ASSERT(tcps == NULL); 9701 q->q_qinfo = &tcp_acceptor_rinit; 9702 /* 9703 * the conn_dev and minor_arena will be subsequently used by 9704 * tcp_wput_accept() and tcpclose_accept() to figure out the 9705 * minor device number for this connection from the q_ptr. 9706 */ 9707 RD(q)->q_ptr = (void *)conn_dev; 9708 WR(q)->q_qinfo = &tcp_acceptor_winit; 9709 WR(q)->q_ptr = (void *)minor_arena; 9710 qprocson(q); 9711 return (0); 9712 } 9713 9714 connp = (conn_t *)tcp_get_conn(IP_SQUEUE_GET(lbolt), tcps); 9715 /* 9716 * Both tcp_get_conn and netstack_find_by_cred incremented refcnt, 9717 * so we drop it by one. 9718 */ 9719 netstack_rele(tcps->tcps_netstack); 9720 if (connp == NULL) { 9721 inet_minor_free(minor_arena, conn_dev); 9722 q->q_ptr = NULL; 9723 return (ENOSR); 9724 } 9725 connp->conn_sqp = IP_SQUEUE_GET(lbolt); 9726 tcp = connp->conn_tcp; 9727 9728 q->q_ptr = WR(q)->q_ptr = connp; 9729 if (isv6) { 9730 connp->conn_flags |= (IPCL_TCP6|IPCL_ISV6); 9731 connp->conn_send = ip_output_v6; 9732 connp->conn_af_isv6 = B_TRUE; 9733 connp->conn_pkt_isv6 = B_TRUE; 9734 connp->conn_src_preferences = IPV6_PREFER_SRC_DEFAULT; 9735 tcp->tcp_ipversion = IPV6_VERSION; 9736 tcp->tcp_family = AF_INET6; 9737 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 9738 } else { 9739 connp->conn_flags |= IPCL_TCP4; 9740 connp->conn_send = ip_output; 9741 connp->conn_af_isv6 = B_FALSE; 9742 connp->conn_pkt_isv6 = B_FALSE; 9743 tcp->tcp_ipversion = IPV4_VERSION; 9744 tcp->tcp_family = AF_INET; 9745 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 9746 } 9747 9748 /* 9749 * TCP keeps a copy of cred for cache locality reasons but 9750 * we put a reference only once. If connp->conn_cred 9751 * becomes invalid, tcp_cred should also be set to NULL. 9752 */ 9753 tcp->tcp_cred = connp->conn_cred = credp; 9754 crhold(connp->conn_cred); 9755 tcp->tcp_cpid = curproc->p_pid; 9756 tcp->tcp_open_time = lbolt64; 9757 connp->conn_zoneid = zoneid; 9758 connp->conn_mlp_type = mlptSingle; 9759 connp->conn_ulp_labeled = !is_system_labeled(); 9760 ASSERT(connp->conn_netstack == tcps->tcps_netstack); 9761 ASSERT(tcp->tcp_tcps == tcps); 9762 9763 /* 9764 * If the caller has the process-wide flag set, then default to MAC 9765 * exempt mode. This allows read-down to unlabeled hosts. 9766 */ 9767 if (getpflags(NET_MAC_AWARE, credp) != 0) 9768 connp->conn_mac_exempt = B_TRUE; 9769 9770 connp->conn_dev = conn_dev; 9771 connp->conn_minor_arena = minor_arena; 9772 9773 ASSERT(q->q_qinfo == &tcp_rinitv4 || q->q_qinfo == &tcp_rinitv6); 9774 ASSERT(WR(q)->q_qinfo == &tcp_winit); 9775 9776 if (flag & SO_SOCKSTR) { 9777 /* 9778 * No need to insert a socket in tcp acceptor hash. 9779 * If it was a socket acceptor stream, we dealt with 9780 * it above. A socket listener can never accept a 9781 * connection and doesn't need acceptor_id. 9782 */ 9783 connp->conn_flags |= IPCL_SOCKET; 9784 tcp->tcp_issocket = 1; 9785 WR(q)->q_qinfo = &tcp_sock_winit; 9786 } else { 9787 #ifdef _ILP32 9788 tcp->tcp_acceptor_id = (t_uscalar_t)RD(q); 9789 #else 9790 tcp->tcp_acceptor_id = conn_dev; 9791 #endif /* _ILP32 */ 9792 tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp); 9793 } 9794 9795 if (tcps->tcps_trace) 9796 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_SLEEP); 9797 9798 err = tcp_init(tcp, q); 9799 if (err != 0) { 9800 inet_minor_free(connp->conn_minor_arena, connp->conn_dev); 9801 tcp_acceptor_hash_remove(tcp); 9802 CONN_DEC_REF(connp); 9803 q->q_ptr = WR(q)->q_ptr = NULL; 9804 return (err); 9805 } 9806 9807 RD(q)->q_hiwat = tcps->tcps_recv_hiwat; 9808 tcp->tcp_rwnd = tcps->tcps_recv_hiwat; 9809 9810 /* Non-zero default values */ 9811 connp->conn_multicast_loop = IP_DEFAULT_MULTICAST_LOOP; 9812 /* 9813 * Put the ref for TCP. Ref for IP was already put 9814 * by ipcl_conn_create. Also Make the conn_t globally 9815 * visible to walkers 9816 */ 9817 mutex_enter(&connp->conn_lock); 9818 CONN_INC_REF_LOCKED(connp); 9819 ASSERT(connp->conn_ref == 2); 9820 connp->conn_state_flags &= ~CONN_INCIPIENT; 9821 mutex_exit(&connp->conn_lock); 9822 9823 qprocson(q); 9824 return (0); 9825 } 9826 9827 /* 9828 * Some TCP options can be "set" by requesting them in the option 9829 * buffer. This is needed for XTI feature test though we do not 9830 * allow it in general. We interpret that this mechanism is more 9831 * applicable to OSI protocols and need not be allowed in general. 9832 * This routine filters out options for which it is not allowed (most) 9833 * and lets through those (few) for which it is. [ The XTI interface 9834 * test suite specifics will imply that any XTI_GENERIC level XTI_* if 9835 * ever implemented will have to be allowed here ]. 9836 */ 9837 static boolean_t 9838 tcp_allow_connopt_set(int level, int name) 9839 { 9840 9841 switch (level) { 9842 case IPPROTO_TCP: 9843 switch (name) { 9844 case TCP_NODELAY: 9845 return (B_TRUE); 9846 default: 9847 return (B_FALSE); 9848 } 9849 /*NOTREACHED*/ 9850 default: 9851 return (B_FALSE); 9852 } 9853 /*NOTREACHED*/ 9854 } 9855 9856 /* 9857 * This routine gets default values of certain options whose default 9858 * values are maintained by protocol specific code 9859 */ 9860 /* ARGSUSED */ 9861 int 9862 tcp_opt_default(queue_t *q, int level, int name, uchar_t *ptr) 9863 { 9864 int32_t *i1 = (int32_t *)ptr; 9865 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 9866 9867 switch (level) { 9868 case IPPROTO_TCP: 9869 switch (name) { 9870 case TCP_NOTIFY_THRESHOLD: 9871 *i1 = tcps->tcps_ip_notify_interval; 9872 break; 9873 case TCP_ABORT_THRESHOLD: 9874 *i1 = tcps->tcps_ip_abort_interval; 9875 break; 9876 case TCP_CONN_NOTIFY_THRESHOLD: 9877 *i1 = tcps->tcps_ip_notify_cinterval; 9878 break; 9879 case TCP_CONN_ABORT_THRESHOLD: 9880 *i1 = tcps->tcps_ip_abort_cinterval; 9881 break; 9882 default: 9883 return (-1); 9884 } 9885 break; 9886 case IPPROTO_IP: 9887 switch (name) { 9888 case IP_TTL: 9889 *i1 = tcps->tcps_ipv4_ttl; 9890 break; 9891 default: 9892 return (-1); 9893 } 9894 break; 9895 case IPPROTO_IPV6: 9896 switch (name) { 9897 case IPV6_UNICAST_HOPS: 9898 *i1 = tcps->tcps_ipv6_hoplimit; 9899 break; 9900 default: 9901 return (-1); 9902 } 9903 break; 9904 default: 9905 return (-1); 9906 } 9907 return (sizeof (int)); 9908 } 9909 9910 9911 /* 9912 * TCP routine to get the values of options. 9913 */ 9914 int 9915 tcp_opt_get(queue_t *q, int level, int name, uchar_t *ptr) 9916 { 9917 int *i1 = (int *)ptr; 9918 conn_t *connp = Q_TO_CONN(q); 9919 tcp_t *tcp = connp->conn_tcp; 9920 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 9921 9922 switch (level) { 9923 case SOL_SOCKET: 9924 switch (name) { 9925 case SO_LINGER: { 9926 struct linger *lgr = (struct linger *)ptr; 9927 9928 lgr->l_onoff = tcp->tcp_linger ? SO_LINGER : 0; 9929 lgr->l_linger = tcp->tcp_lingertime; 9930 } 9931 return (sizeof (struct linger)); 9932 case SO_DEBUG: 9933 *i1 = tcp->tcp_debug ? SO_DEBUG : 0; 9934 break; 9935 case SO_KEEPALIVE: 9936 *i1 = tcp->tcp_ka_enabled ? SO_KEEPALIVE : 0; 9937 break; 9938 case SO_DONTROUTE: 9939 *i1 = tcp->tcp_dontroute ? SO_DONTROUTE : 0; 9940 break; 9941 case SO_USELOOPBACK: 9942 *i1 = tcp->tcp_useloopback ? SO_USELOOPBACK : 0; 9943 break; 9944 case SO_BROADCAST: 9945 *i1 = tcp->tcp_broadcast ? SO_BROADCAST : 0; 9946 break; 9947 case SO_REUSEADDR: 9948 *i1 = tcp->tcp_reuseaddr ? SO_REUSEADDR : 0; 9949 break; 9950 case SO_OOBINLINE: 9951 *i1 = tcp->tcp_oobinline ? SO_OOBINLINE : 0; 9952 break; 9953 case SO_DGRAM_ERRIND: 9954 *i1 = tcp->tcp_dgram_errind ? SO_DGRAM_ERRIND : 0; 9955 break; 9956 case SO_TYPE: 9957 *i1 = SOCK_STREAM; 9958 break; 9959 case SO_SNDBUF: 9960 *i1 = tcp->tcp_xmit_hiwater; 9961 break; 9962 case SO_RCVBUF: 9963 *i1 = RD(q)->q_hiwat; 9964 break; 9965 case SO_SND_COPYAVOID: 9966 *i1 = tcp->tcp_snd_zcopy_on ? 9967 SO_SND_COPYAVOID : 0; 9968 break; 9969 case SO_ALLZONES: 9970 *i1 = connp->conn_allzones ? 1 : 0; 9971 break; 9972 case SO_ANON_MLP: 9973 *i1 = connp->conn_anon_mlp; 9974 break; 9975 case SO_MAC_EXEMPT: 9976 *i1 = connp->conn_mac_exempt; 9977 break; 9978 case SO_EXCLBIND: 9979 *i1 = tcp->tcp_exclbind ? SO_EXCLBIND : 0; 9980 break; 9981 case SO_PROTOTYPE: 9982 *i1 = IPPROTO_TCP; 9983 break; 9984 case SO_DOMAIN: 9985 *i1 = tcp->tcp_family; 9986 break; 9987 default: 9988 return (-1); 9989 } 9990 break; 9991 case IPPROTO_TCP: 9992 switch (name) { 9993 case TCP_NODELAY: 9994 *i1 = (tcp->tcp_naglim == 1) ? TCP_NODELAY : 0; 9995 break; 9996 case TCP_MAXSEG: 9997 *i1 = tcp->tcp_mss; 9998 break; 9999 case TCP_NOTIFY_THRESHOLD: 10000 *i1 = (int)tcp->tcp_first_timer_threshold; 10001 break; 10002 case TCP_ABORT_THRESHOLD: 10003 *i1 = tcp->tcp_second_timer_threshold; 10004 break; 10005 case TCP_CONN_NOTIFY_THRESHOLD: 10006 *i1 = tcp->tcp_first_ctimer_threshold; 10007 break; 10008 case TCP_CONN_ABORT_THRESHOLD: 10009 *i1 = tcp->tcp_second_ctimer_threshold; 10010 break; 10011 case TCP_RECVDSTADDR: 10012 *i1 = tcp->tcp_recvdstaddr; 10013 break; 10014 case TCP_ANONPRIVBIND: 10015 *i1 = tcp->tcp_anon_priv_bind; 10016 break; 10017 case TCP_EXCLBIND: 10018 *i1 = tcp->tcp_exclbind ? TCP_EXCLBIND : 0; 10019 break; 10020 case TCP_INIT_CWND: 10021 *i1 = tcp->tcp_init_cwnd; 10022 break; 10023 case TCP_KEEPALIVE_THRESHOLD: 10024 *i1 = tcp->tcp_ka_interval; 10025 break; 10026 case TCP_KEEPALIVE_ABORT_THRESHOLD: 10027 *i1 = tcp->tcp_ka_abort_thres; 10028 break; 10029 case TCP_CORK: 10030 *i1 = tcp->tcp_cork; 10031 break; 10032 default: 10033 return (-1); 10034 } 10035 break; 10036 case IPPROTO_IP: 10037 if (tcp->tcp_family != AF_INET) 10038 return (-1); 10039 switch (name) { 10040 case IP_OPTIONS: 10041 case T_IP_OPTIONS: { 10042 /* 10043 * This is compatible with BSD in that in only return 10044 * the reverse source route with the final destination 10045 * as the last entry. The first 4 bytes of the option 10046 * will contain the final destination. 10047 */ 10048 int opt_len; 10049 10050 opt_len = (char *)tcp->tcp_tcph - (char *)tcp->tcp_ipha; 10051 opt_len -= tcp->tcp_label_len + IP_SIMPLE_HDR_LENGTH; 10052 ASSERT(opt_len >= 0); 10053 /* Caller ensures enough space */ 10054 if (opt_len > 0) { 10055 /* 10056 * TODO: Do we have to handle getsockopt on an 10057 * initiator as well? 10058 */ 10059 return (ip_opt_get_user(tcp->tcp_ipha, ptr)); 10060 } 10061 return (0); 10062 } 10063 case IP_TOS: 10064 case T_IP_TOS: 10065 *i1 = (int)tcp->tcp_ipha->ipha_type_of_service; 10066 break; 10067 case IP_TTL: 10068 *i1 = (int)tcp->tcp_ipha->ipha_ttl; 10069 break; 10070 case IP_NEXTHOP: 10071 /* Handled at IP level */ 10072 return (-EINVAL); 10073 default: 10074 return (-1); 10075 } 10076 break; 10077 case IPPROTO_IPV6: 10078 /* 10079 * IPPROTO_IPV6 options are only supported for sockets 10080 * that are using IPv6 on the wire. 10081 */ 10082 if (tcp->tcp_ipversion != IPV6_VERSION) { 10083 return (-1); 10084 } 10085 switch (name) { 10086 case IPV6_UNICAST_HOPS: 10087 *i1 = (unsigned int) tcp->tcp_ip6h->ip6_hops; 10088 break; /* goto sizeof (int) option return */ 10089 case IPV6_BOUND_IF: 10090 /* Zero if not set */ 10091 *i1 = tcp->tcp_bound_if; 10092 break; /* goto sizeof (int) option return */ 10093 case IPV6_RECVPKTINFO: 10094 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) 10095 *i1 = 1; 10096 else 10097 *i1 = 0; 10098 break; /* goto sizeof (int) option return */ 10099 case IPV6_RECVTCLASS: 10100 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVTCLASS) 10101 *i1 = 1; 10102 else 10103 *i1 = 0; 10104 break; /* goto sizeof (int) option return */ 10105 case IPV6_RECVHOPLIMIT: 10106 if (tcp->tcp_ipv6_recvancillary & 10107 TCP_IPV6_RECVHOPLIMIT) 10108 *i1 = 1; 10109 else 10110 *i1 = 0; 10111 break; /* goto sizeof (int) option return */ 10112 case IPV6_RECVHOPOPTS: 10113 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPOPTS) 10114 *i1 = 1; 10115 else 10116 *i1 = 0; 10117 break; /* goto sizeof (int) option return */ 10118 case IPV6_RECVDSTOPTS: 10119 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVDSTOPTS) 10120 *i1 = 1; 10121 else 10122 *i1 = 0; 10123 break; /* goto sizeof (int) option return */ 10124 case _OLD_IPV6_RECVDSTOPTS: 10125 if (tcp->tcp_ipv6_recvancillary & 10126 TCP_OLD_IPV6_RECVDSTOPTS) 10127 *i1 = 1; 10128 else 10129 *i1 = 0; 10130 break; /* goto sizeof (int) option return */ 10131 case IPV6_RECVRTHDR: 10132 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTHDR) 10133 *i1 = 1; 10134 else 10135 *i1 = 0; 10136 break; /* goto sizeof (int) option return */ 10137 case IPV6_RECVRTHDRDSTOPTS: 10138 if (tcp->tcp_ipv6_recvancillary & 10139 TCP_IPV6_RECVRTDSTOPTS) 10140 *i1 = 1; 10141 else 10142 *i1 = 0; 10143 break; /* goto sizeof (int) option return */ 10144 case IPV6_PKTINFO: { 10145 /* XXX assumes that caller has room for max size! */ 10146 struct in6_pktinfo *pkti; 10147 10148 pkti = (struct in6_pktinfo *)ptr; 10149 if (ipp->ipp_fields & IPPF_IFINDEX) 10150 pkti->ipi6_ifindex = ipp->ipp_ifindex; 10151 else 10152 pkti->ipi6_ifindex = 0; 10153 if (ipp->ipp_fields & IPPF_ADDR) 10154 pkti->ipi6_addr = ipp->ipp_addr; 10155 else 10156 pkti->ipi6_addr = ipv6_all_zeros; 10157 return (sizeof (struct in6_pktinfo)); 10158 } 10159 case IPV6_TCLASS: 10160 if (ipp->ipp_fields & IPPF_TCLASS) 10161 *i1 = ipp->ipp_tclass; 10162 else 10163 *i1 = IPV6_FLOW_TCLASS( 10164 IPV6_DEFAULT_VERS_AND_FLOW); 10165 break; /* goto sizeof (int) option return */ 10166 case IPV6_NEXTHOP: { 10167 sin6_t *sin6 = (sin6_t *)ptr; 10168 10169 if (!(ipp->ipp_fields & IPPF_NEXTHOP)) 10170 return (0); 10171 *sin6 = sin6_null; 10172 sin6->sin6_family = AF_INET6; 10173 sin6->sin6_addr = ipp->ipp_nexthop; 10174 return (sizeof (sin6_t)); 10175 } 10176 case IPV6_HOPOPTS: 10177 if (!(ipp->ipp_fields & IPPF_HOPOPTS)) 10178 return (0); 10179 if (ipp->ipp_hopoptslen <= tcp->tcp_label_len) 10180 return (0); 10181 bcopy((char *)ipp->ipp_hopopts + tcp->tcp_label_len, 10182 ptr, ipp->ipp_hopoptslen - tcp->tcp_label_len); 10183 if (tcp->tcp_label_len > 0) { 10184 ptr[0] = ((char *)ipp->ipp_hopopts)[0]; 10185 ptr[1] = (ipp->ipp_hopoptslen - 10186 tcp->tcp_label_len + 7) / 8 - 1; 10187 } 10188 return (ipp->ipp_hopoptslen - tcp->tcp_label_len); 10189 case IPV6_RTHDRDSTOPTS: 10190 if (!(ipp->ipp_fields & IPPF_RTDSTOPTS)) 10191 return (0); 10192 bcopy(ipp->ipp_rtdstopts, ptr, ipp->ipp_rtdstoptslen); 10193 return (ipp->ipp_rtdstoptslen); 10194 case IPV6_RTHDR: 10195 if (!(ipp->ipp_fields & IPPF_RTHDR)) 10196 return (0); 10197 bcopy(ipp->ipp_rthdr, ptr, ipp->ipp_rthdrlen); 10198 return (ipp->ipp_rthdrlen); 10199 case IPV6_DSTOPTS: 10200 if (!(ipp->ipp_fields & IPPF_DSTOPTS)) 10201 return (0); 10202 bcopy(ipp->ipp_dstopts, ptr, ipp->ipp_dstoptslen); 10203 return (ipp->ipp_dstoptslen); 10204 case IPV6_SRC_PREFERENCES: 10205 return (ip6_get_src_preferences(connp, 10206 (uint32_t *)ptr)); 10207 case IPV6_PATHMTU: { 10208 struct ip6_mtuinfo *mtuinfo = (struct ip6_mtuinfo *)ptr; 10209 10210 if (tcp->tcp_state < TCPS_ESTABLISHED) 10211 return (-1); 10212 10213 return (ip_fill_mtuinfo(&connp->conn_remv6, 10214 connp->conn_fport, mtuinfo, 10215 connp->conn_netstack)); 10216 } 10217 default: 10218 return (-1); 10219 } 10220 break; 10221 default: 10222 return (-1); 10223 } 10224 return (sizeof (int)); 10225 } 10226 10227 /* 10228 * We declare as 'int' rather than 'void' to satisfy pfi_t arg requirements. 10229 * Parameters are assumed to be verified by the caller. 10230 */ 10231 /* ARGSUSED */ 10232 int 10233 tcp_opt_set(queue_t *q, uint_t optset_context, int level, int name, 10234 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 10235 void *thisdg_attrs, cred_t *cr, mblk_t *mblk) 10236 { 10237 conn_t *connp = Q_TO_CONN(q); 10238 tcp_t *tcp = connp->conn_tcp; 10239 int *i1 = (int *)invalp; 10240 boolean_t onoff = (*i1 == 0) ? 0 : 1; 10241 boolean_t checkonly; 10242 int reterr; 10243 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 10244 10245 switch (optset_context) { 10246 case SETFN_OPTCOM_CHECKONLY: 10247 checkonly = B_TRUE; 10248 /* 10249 * Note: Implies T_CHECK semantics for T_OPTCOM_REQ 10250 * inlen != 0 implies value supplied and 10251 * we have to "pretend" to set it. 10252 * inlen == 0 implies that there is no 10253 * value part in T_CHECK request and just validation 10254 * done elsewhere should be enough, we just return here. 10255 */ 10256 if (inlen == 0) { 10257 *outlenp = 0; 10258 return (0); 10259 } 10260 break; 10261 case SETFN_OPTCOM_NEGOTIATE: 10262 checkonly = B_FALSE; 10263 break; 10264 case SETFN_UD_NEGOTIATE: /* error on conn-oriented transports ? */ 10265 case SETFN_CONN_NEGOTIATE: 10266 checkonly = B_FALSE; 10267 /* 10268 * Negotiating local and "association-related" options 10269 * from other (T_CONN_REQ, T_CONN_RES,T_UNITDATA_REQ) 10270 * primitives is allowed by XTI, but we choose 10271 * to not implement this style negotiation for Internet 10272 * protocols (We interpret it is a must for OSI world but 10273 * optional for Internet protocols) for all options. 10274 * [ Will do only for the few options that enable test 10275 * suites that our XTI implementation of this feature 10276 * works for transports that do allow it ] 10277 */ 10278 if (!tcp_allow_connopt_set(level, name)) { 10279 *outlenp = 0; 10280 return (EINVAL); 10281 } 10282 break; 10283 default: 10284 /* 10285 * We should never get here 10286 */ 10287 *outlenp = 0; 10288 return (EINVAL); 10289 } 10290 10291 ASSERT((optset_context != SETFN_OPTCOM_CHECKONLY) || 10292 (optset_context == SETFN_OPTCOM_CHECKONLY && inlen != 0)); 10293 10294 /* 10295 * For TCP, we should have no ancillary data sent down 10296 * (sendmsg isn't supported for SOCK_STREAM), so thisdg_attrs 10297 * has to be zero. 10298 */ 10299 ASSERT(thisdg_attrs == NULL); 10300 10301 /* 10302 * For fixed length options, no sanity check 10303 * of passed in length is done. It is assumed *_optcom_req() 10304 * routines do the right thing. 10305 */ 10306 10307 switch (level) { 10308 case SOL_SOCKET: 10309 switch (name) { 10310 case SO_LINGER: { 10311 struct linger *lgr = (struct linger *)invalp; 10312 10313 if (!checkonly) { 10314 if (lgr->l_onoff) { 10315 tcp->tcp_linger = 1; 10316 tcp->tcp_lingertime = lgr->l_linger; 10317 } else { 10318 tcp->tcp_linger = 0; 10319 tcp->tcp_lingertime = 0; 10320 } 10321 /* struct copy */ 10322 *(struct linger *)outvalp = *lgr; 10323 } else { 10324 if (!lgr->l_onoff) { 10325 ((struct linger *) 10326 outvalp)->l_onoff = 0; 10327 ((struct linger *) 10328 outvalp)->l_linger = 0; 10329 } else { 10330 /* struct copy */ 10331 *(struct linger *)outvalp = *lgr; 10332 } 10333 } 10334 *outlenp = sizeof (struct linger); 10335 return (0); 10336 } 10337 case SO_DEBUG: 10338 if (!checkonly) 10339 tcp->tcp_debug = onoff; 10340 break; 10341 case SO_KEEPALIVE: 10342 if (checkonly) { 10343 /* T_CHECK case */ 10344 break; 10345 } 10346 10347 if (!onoff) { 10348 if (tcp->tcp_ka_enabled) { 10349 if (tcp->tcp_ka_tid != 0) { 10350 (void) TCP_TIMER_CANCEL(tcp, 10351 tcp->tcp_ka_tid); 10352 tcp->tcp_ka_tid = 0; 10353 } 10354 tcp->tcp_ka_enabled = 0; 10355 } 10356 break; 10357 } 10358 if (!tcp->tcp_ka_enabled) { 10359 /* Crank up the keepalive timer */ 10360 tcp->tcp_ka_last_intrvl = 0; 10361 tcp->tcp_ka_tid = TCP_TIMER(tcp, 10362 tcp_keepalive_killer, 10363 MSEC_TO_TICK(tcp->tcp_ka_interval)); 10364 tcp->tcp_ka_enabled = 1; 10365 } 10366 break; 10367 case SO_DONTROUTE: 10368 /* 10369 * SO_DONTROUTE, SO_USELOOPBACK, and SO_BROADCAST are 10370 * only of interest to IP. We track them here only so 10371 * that we can report their current value. 10372 */ 10373 if (!checkonly) { 10374 tcp->tcp_dontroute = onoff; 10375 tcp->tcp_connp->conn_dontroute = onoff; 10376 } 10377 break; 10378 case SO_USELOOPBACK: 10379 if (!checkonly) { 10380 tcp->tcp_useloopback = onoff; 10381 tcp->tcp_connp->conn_loopback = onoff; 10382 } 10383 break; 10384 case SO_BROADCAST: 10385 if (!checkonly) { 10386 tcp->tcp_broadcast = onoff; 10387 tcp->tcp_connp->conn_broadcast = onoff; 10388 } 10389 break; 10390 case SO_REUSEADDR: 10391 if (!checkonly) { 10392 tcp->tcp_reuseaddr = onoff; 10393 tcp->tcp_connp->conn_reuseaddr = onoff; 10394 } 10395 break; 10396 case SO_OOBINLINE: 10397 if (!checkonly) 10398 tcp->tcp_oobinline = onoff; 10399 break; 10400 case SO_DGRAM_ERRIND: 10401 if (!checkonly) 10402 tcp->tcp_dgram_errind = onoff; 10403 break; 10404 case SO_SNDBUF: { 10405 if (*i1 > tcps->tcps_max_buf) { 10406 *outlenp = 0; 10407 return (ENOBUFS); 10408 } 10409 if (checkonly) 10410 break; 10411 10412 tcp->tcp_xmit_hiwater = *i1; 10413 if (tcps->tcps_snd_lowat_fraction != 0) 10414 tcp->tcp_xmit_lowater = 10415 tcp->tcp_xmit_hiwater / 10416 tcps->tcps_snd_lowat_fraction; 10417 (void) tcp_maxpsz_set(tcp, B_TRUE); 10418 /* 10419 * If we are flow-controlled, recheck the condition. 10420 * There are apps that increase SO_SNDBUF size when 10421 * flow-controlled (EWOULDBLOCK), and expect the flow 10422 * control condition to be lifted right away. 10423 */ 10424 mutex_enter(&tcp->tcp_non_sq_lock); 10425 if (tcp->tcp_flow_stopped && 10426 TCP_UNSENT_BYTES(tcp) < tcp->tcp_xmit_hiwater) { 10427 tcp_clrqfull(tcp); 10428 } 10429 mutex_exit(&tcp->tcp_non_sq_lock); 10430 break; 10431 } 10432 case SO_RCVBUF: 10433 if (*i1 > tcps->tcps_max_buf) { 10434 *outlenp = 0; 10435 return (ENOBUFS); 10436 } 10437 /* Silently ignore zero */ 10438 if (!checkonly && *i1 != 0) { 10439 *i1 = MSS_ROUNDUP(*i1, tcp->tcp_mss); 10440 (void) tcp_rwnd_set(tcp, *i1); 10441 } 10442 /* 10443 * XXX should we return the rwnd here 10444 * and tcp_opt_get ? 10445 */ 10446 break; 10447 case SO_SND_COPYAVOID: 10448 if (!checkonly) { 10449 /* we only allow enable at most once for now */ 10450 if (tcp->tcp_loopback || 10451 (!tcp->tcp_snd_zcopy_aware && 10452 (onoff != 1 || !tcp_zcopy_check(tcp)))) { 10453 *outlenp = 0; 10454 return (EOPNOTSUPP); 10455 } 10456 tcp->tcp_snd_zcopy_aware = 1; 10457 } 10458 break; 10459 case SO_ALLZONES: 10460 /* Handled at the IP level */ 10461 return (-EINVAL); 10462 case SO_ANON_MLP: 10463 if (!checkonly) { 10464 mutex_enter(&connp->conn_lock); 10465 connp->conn_anon_mlp = onoff; 10466 mutex_exit(&connp->conn_lock); 10467 } 10468 break; 10469 case SO_MAC_EXEMPT: 10470 if (secpolicy_net_mac_aware(cr) != 0 || 10471 IPCL_IS_BOUND(connp)) 10472 return (EACCES); 10473 if (!checkonly) { 10474 mutex_enter(&connp->conn_lock); 10475 connp->conn_mac_exempt = onoff; 10476 mutex_exit(&connp->conn_lock); 10477 } 10478 break; 10479 case SO_EXCLBIND: 10480 if (!checkonly) 10481 tcp->tcp_exclbind = onoff; 10482 break; 10483 default: 10484 *outlenp = 0; 10485 return (EINVAL); 10486 } 10487 break; 10488 case IPPROTO_TCP: 10489 switch (name) { 10490 case TCP_NODELAY: 10491 if (!checkonly) 10492 tcp->tcp_naglim = *i1 ? 1 : tcp->tcp_mss; 10493 break; 10494 case TCP_NOTIFY_THRESHOLD: 10495 if (!checkonly) 10496 tcp->tcp_first_timer_threshold = *i1; 10497 break; 10498 case TCP_ABORT_THRESHOLD: 10499 if (!checkonly) 10500 tcp->tcp_second_timer_threshold = *i1; 10501 break; 10502 case TCP_CONN_NOTIFY_THRESHOLD: 10503 if (!checkonly) 10504 tcp->tcp_first_ctimer_threshold = *i1; 10505 break; 10506 case TCP_CONN_ABORT_THRESHOLD: 10507 if (!checkonly) 10508 tcp->tcp_second_ctimer_threshold = *i1; 10509 break; 10510 case TCP_RECVDSTADDR: 10511 if (tcp->tcp_state > TCPS_LISTEN) 10512 return (EOPNOTSUPP); 10513 if (!checkonly) 10514 tcp->tcp_recvdstaddr = onoff; 10515 break; 10516 case TCP_ANONPRIVBIND: 10517 if ((reterr = secpolicy_net_privaddr(cr, 0, 10518 IPPROTO_TCP)) != 0) { 10519 *outlenp = 0; 10520 return (reterr); 10521 } 10522 if (!checkonly) { 10523 tcp->tcp_anon_priv_bind = onoff; 10524 } 10525 break; 10526 case TCP_EXCLBIND: 10527 if (!checkonly) 10528 tcp->tcp_exclbind = onoff; 10529 break; /* goto sizeof (int) option return */ 10530 case TCP_INIT_CWND: { 10531 uint32_t init_cwnd = *((uint32_t *)invalp); 10532 10533 if (checkonly) 10534 break; 10535 10536 /* 10537 * Only allow socket with network configuration 10538 * privilege to set the initial cwnd to be larger 10539 * than allowed by RFC 3390. 10540 */ 10541 if (init_cwnd <= MIN(4, MAX(2, 4380 / tcp->tcp_mss))) { 10542 tcp->tcp_init_cwnd = init_cwnd; 10543 break; 10544 } 10545 if ((reterr = secpolicy_ip_config(cr, B_TRUE)) != 0) { 10546 *outlenp = 0; 10547 return (reterr); 10548 } 10549 if (init_cwnd > TCP_MAX_INIT_CWND) { 10550 *outlenp = 0; 10551 return (EINVAL); 10552 } 10553 tcp->tcp_init_cwnd = init_cwnd; 10554 break; 10555 } 10556 case TCP_KEEPALIVE_THRESHOLD: 10557 if (checkonly) 10558 break; 10559 10560 if (*i1 < tcps->tcps_keepalive_interval_low || 10561 *i1 > tcps->tcps_keepalive_interval_high) { 10562 *outlenp = 0; 10563 return (EINVAL); 10564 } 10565 if (*i1 != tcp->tcp_ka_interval) { 10566 tcp->tcp_ka_interval = *i1; 10567 /* 10568 * Check if we need to restart the 10569 * keepalive timer. 10570 */ 10571 if (tcp->tcp_ka_tid != 0) { 10572 ASSERT(tcp->tcp_ka_enabled); 10573 (void) TCP_TIMER_CANCEL(tcp, 10574 tcp->tcp_ka_tid); 10575 tcp->tcp_ka_last_intrvl = 0; 10576 tcp->tcp_ka_tid = TCP_TIMER(tcp, 10577 tcp_keepalive_killer, 10578 MSEC_TO_TICK(tcp->tcp_ka_interval)); 10579 } 10580 } 10581 break; 10582 case TCP_KEEPALIVE_ABORT_THRESHOLD: 10583 if (!checkonly) { 10584 if (*i1 < 10585 tcps->tcps_keepalive_abort_interval_low || 10586 *i1 > 10587 tcps->tcps_keepalive_abort_interval_high) { 10588 *outlenp = 0; 10589 return (EINVAL); 10590 } 10591 tcp->tcp_ka_abort_thres = *i1; 10592 } 10593 break; 10594 case TCP_CORK: 10595 if (!checkonly) { 10596 /* 10597 * if tcp->tcp_cork was set and is now 10598 * being unset, we have to make sure that 10599 * the remaining data gets sent out. Also 10600 * unset tcp->tcp_cork so that tcp_wput_data() 10601 * can send data even if it is less than mss 10602 */ 10603 if (tcp->tcp_cork && onoff == 0 && 10604 tcp->tcp_unsent > 0) { 10605 tcp->tcp_cork = B_FALSE; 10606 tcp_wput_data(tcp, NULL, B_FALSE); 10607 } 10608 tcp->tcp_cork = onoff; 10609 } 10610 break; 10611 default: 10612 *outlenp = 0; 10613 return (EINVAL); 10614 } 10615 break; 10616 case IPPROTO_IP: 10617 if (tcp->tcp_family != AF_INET) { 10618 *outlenp = 0; 10619 return (ENOPROTOOPT); 10620 } 10621 switch (name) { 10622 case IP_OPTIONS: 10623 case T_IP_OPTIONS: 10624 reterr = tcp_opt_set_header(tcp, checkonly, 10625 invalp, inlen); 10626 if (reterr) { 10627 *outlenp = 0; 10628 return (reterr); 10629 } 10630 /* OK return - copy input buffer into output buffer */ 10631 if (invalp != outvalp) { 10632 /* don't trust bcopy for identical src/dst */ 10633 bcopy(invalp, outvalp, inlen); 10634 } 10635 *outlenp = inlen; 10636 return (0); 10637 case IP_TOS: 10638 case T_IP_TOS: 10639 if (!checkonly) { 10640 tcp->tcp_ipha->ipha_type_of_service = 10641 (uchar_t)*i1; 10642 tcp->tcp_tos = (uchar_t)*i1; 10643 } 10644 break; 10645 case IP_TTL: 10646 if (!checkonly) { 10647 tcp->tcp_ipha->ipha_ttl = (uchar_t)*i1; 10648 tcp->tcp_ttl = (uchar_t)*i1; 10649 } 10650 break; 10651 case IP_BOUND_IF: 10652 case IP_NEXTHOP: 10653 /* Handled at the IP level */ 10654 return (-EINVAL); 10655 case IP_SEC_OPT: 10656 /* 10657 * We should not allow policy setting after 10658 * we start listening for connections. 10659 */ 10660 if (tcp->tcp_state == TCPS_LISTEN) { 10661 return (EINVAL); 10662 } else { 10663 /* Handled at the IP level */ 10664 return (-EINVAL); 10665 } 10666 default: 10667 *outlenp = 0; 10668 return (EINVAL); 10669 } 10670 break; 10671 case IPPROTO_IPV6: { 10672 ip6_pkt_t *ipp; 10673 10674 /* 10675 * IPPROTO_IPV6 options are only supported for sockets 10676 * that are using IPv6 on the wire. 10677 */ 10678 if (tcp->tcp_ipversion != IPV6_VERSION) { 10679 *outlenp = 0; 10680 return (ENOPROTOOPT); 10681 } 10682 /* 10683 * Only sticky options; no ancillary data 10684 */ 10685 ASSERT(thisdg_attrs == NULL); 10686 ipp = &tcp->tcp_sticky_ipp; 10687 10688 switch (name) { 10689 case IPV6_UNICAST_HOPS: 10690 /* -1 means use default */ 10691 if (*i1 < -1 || *i1 > IPV6_MAX_HOPS) { 10692 *outlenp = 0; 10693 return (EINVAL); 10694 } 10695 if (!checkonly) { 10696 if (*i1 == -1) { 10697 tcp->tcp_ip6h->ip6_hops = 10698 ipp->ipp_unicast_hops = 10699 (uint8_t)tcps->tcps_ipv6_hoplimit; 10700 ipp->ipp_fields &= ~IPPF_UNICAST_HOPS; 10701 /* Pass modified value to IP. */ 10702 *i1 = tcp->tcp_ip6h->ip6_hops; 10703 } else { 10704 tcp->tcp_ip6h->ip6_hops = 10705 ipp->ipp_unicast_hops = 10706 (uint8_t)*i1; 10707 ipp->ipp_fields |= IPPF_UNICAST_HOPS; 10708 } 10709 reterr = tcp_build_hdrs(q, tcp); 10710 if (reterr != 0) 10711 return (reterr); 10712 } 10713 break; 10714 case IPV6_BOUND_IF: 10715 if (!checkonly) { 10716 int error = 0; 10717 10718 tcp->tcp_bound_if = *i1; 10719 error = ip_opt_set_ill(tcp->tcp_connp, *i1, 10720 B_TRUE, checkonly, level, name, mblk); 10721 if (error != 0) { 10722 *outlenp = 0; 10723 return (error); 10724 } 10725 } 10726 break; 10727 /* 10728 * Set boolean switches for ancillary data delivery 10729 */ 10730 case IPV6_RECVPKTINFO: 10731 if (!checkonly) { 10732 if (onoff) 10733 tcp->tcp_ipv6_recvancillary |= 10734 TCP_IPV6_RECVPKTINFO; 10735 else 10736 tcp->tcp_ipv6_recvancillary &= 10737 ~TCP_IPV6_RECVPKTINFO; 10738 /* Force it to be sent up with the next msg */ 10739 tcp->tcp_recvifindex = 0; 10740 } 10741 break; 10742 case IPV6_RECVTCLASS: 10743 if (!checkonly) { 10744 if (onoff) 10745 tcp->tcp_ipv6_recvancillary |= 10746 TCP_IPV6_RECVTCLASS; 10747 else 10748 tcp->tcp_ipv6_recvancillary &= 10749 ~TCP_IPV6_RECVTCLASS; 10750 } 10751 break; 10752 case IPV6_RECVHOPLIMIT: 10753 if (!checkonly) { 10754 if (onoff) 10755 tcp->tcp_ipv6_recvancillary |= 10756 TCP_IPV6_RECVHOPLIMIT; 10757 else 10758 tcp->tcp_ipv6_recvancillary &= 10759 ~TCP_IPV6_RECVHOPLIMIT; 10760 /* Force it to be sent up with the next msg */ 10761 tcp->tcp_recvhops = 0xffffffffU; 10762 } 10763 break; 10764 case IPV6_RECVHOPOPTS: 10765 if (!checkonly) { 10766 if (onoff) 10767 tcp->tcp_ipv6_recvancillary |= 10768 TCP_IPV6_RECVHOPOPTS; 10769 else 10770 tcp->tcp_ipv6_recvancillary &= 10771 ~TCP_IPV6_RECVHOPOPTS; 10772 } 10773 break; 10774 case IPV6_RECVDSTOPTS: 10775 if (!checkonly) { 10776 if (onoff) 10777 tcp->tcp_ipv6_recvancillary |= 10778 TCP_IPV6_RECVDSTOPTS; 10779 else 10780 tcp->tcp_ipv6_recvancillary &= 10781 ~TCP_IPV6_RECVDSTOPTS; 10782 } 10783 break; 10784 case _OLD_IPV6_RECVDSTOPTS: 10785 if (!checkonly) { 10786 if (onoff) 10787 tcp->tcp_ipv6_recvancillary |= 10788 TCP_OLD_IPV6_RECVDSTOPTS; 10789 else 10790 tcp->tcp_ipv6_recvancillary &= 10791 ~TCP_OLD_IPV6_RECVDSTOPTS; 10792 } 10793 break; 10794 case IPV6_RECVRTHDR: 10795 if (!checkonly) { 10796 if (onoff) 10797 tcp->tcp_ipv6_recvancillary |= 10798 TCP_IPV6_RECVRTHDR; 10799 else 10800 tcp->tcp_ipv6_recvancillary &= 10801 ~TCP_IPV6_RECVRTHDR; 10802 } 10803 break; 10804 case IPV6_RECVRTHDRDSTOPTS: 10805 if (!checkonly) { 10806 if (onoff) 10807 tcp->tcp_ipv6_recvancillary |= 10808 TCP_IPV6_RECVRTDSTOPTS; 10809 else 10810 tcp->tcp_ipv6_recvancillary &= 10811 ~TCP_IPV6_RECVRTDSTOPTS; 10812 } 10813 break; 10814 case IPV6_PKTINFO: 10815 if (inlen != 0 && inlen != sizeof (struct in6_pktinfo)) 10816 return (EINVAL); 10817 if (checkonly) 10818 break; 10819 10820 if (inlen == 0) { 10821 ipp->ipp_fields &= ~(IPPF_IFINDEX|IPPF_ADDR); 10822 } else { 10823 struct in6_pktinfo *pkti; 10824 10825 pkti = (struct in6_pktinfo *)invalp; 10826 /* 10827 * RFC 3542 states that ipi6_addr must be 10828 * the unspecified address when setting the 10829 * IPV6_PKTINFO sticky socket option on a 10830 * TCP socket. 10831 */ 10832 if (!IN6_IS_ADDR_UNSPECIFIED(&pkti->ipi6_addr)) 10833 return (EINVAL); 10834 /* 10835 * ip6_set_pktinfo() validates the source 10836 * address and interface index. 10837 */ 10838 reterr = ip6_set_pktinfo(cr, tcp->tcp_connp, 10839 pkti, mblk); 10840 if (reterr != 0) 10841 return (reterr); 10842 ipp->ipp_ifindex = pkti->ipi6_ifindex; 10843 ipp->ipp_addr = pkti->ipi6_addr; 10844 if (ipp->ipp_ifindex != 0) 10845 ipp->ipp_fields |= IPPF_IFINDEX; 10846 else 10847 ipp->ipp_fields &= ~IPPF_IFINDEX; 10848 if (!IN6_IS_ADDR_UNSPECIFIED(&ipp->ipp_addr)) 10849 ipp->ipp_fields |= IPPF_ADDR; 10850 else 10851 ipp->ipp_fields &= ~IPPF_ADDR; 10852 } 10853 reterr = tcp_build_hdrs(q, tcp); 10854 if (reterr != 0) 10855 return (reterr); 10856 break; 10857 case IPV6_TCLASS: 10858 if (inlen != 0 && inlen != sizeof (int)) 10859 return (EINVAL); 10860 if (checkonly) 10861 break; 10862 10863 if (inlen == 0) { 10864 ipp->ipp_fields &= ~IPPF_TCLASS; 10865 } else { 10866 if (*i1 > 255 || *i1 < -1) 10867 return (EINVAL); 10868 if (*i1 == -1) { 10869 ipp->ipp_tclass = 0; 10870 *i1 = 0; 10871 } else { 10872 ipp->ipp_tclass = *i1; 10873 } 10874 ipp->ipp_fields |= IPPF_TCLASS; 10875 } 10876 reterr = tcp_build_hdrs(q, tcp); 10877 if (reterr != 0) 10878 return (reterr); 10879 break; 10880 case IPV6_NEXTHOP: 10881 /* 10882 * IP will verify that the nexthop is reachable 10883 * and fail for sticky options. 10884 */ 10885 if (inlen != 0 && inlen != sizeof (sin6_t)) 10886 return (EINVAL); 10887 if (checkonly) 10888 break; 10889 10890 if (inlen == 0) { 10891 ipp->ipp_fields &= ~IPPF_NEXTHOP; 10892 } else { 10893 sin6_t *sin6 = (sin6_t *)invalp; 10894 10895 if (sin6->sin6_family != AF_INET6) 10896 return (EAFNOSUPPORT); 10897 if (IN6_IS_ADDR_V4MAPPED( 10898 &sin6->sin6_addr)) 10899 return (EADDRNOTAVAIL); 10900 ipp->ipp_nexthop = sin6->sin6_addr; 10901 if (!IN6_IS_ADDR_UNSPECIFIED( 10902 &ipp->ipp_nexthop)) 10903 ipp->ipp_fields |= IPPF_NEXTHOP; 10904 else 10905 ipp->ipp_fields &= ~IPPF_NEXTHOP; 10906 } 10907 reterr = tcp_build_hdrs(q, tcp); 10908 if (reterr != 0) 10909 return (reterr); 10910 break; 10911 case IPV6_HOPOPTS: { 10912 ip6_hbh_t *hopts = (ip6_hbh_t *)invalp; 10913 10914 /* 10915 * Sanity checks - minimum size, size a multiple of 10916 * eight bytes, and matching size passed in. 10917 */ 10918 if (inlen != 0 && 10919 inlen != (8 * (hopts->ip6h_len + 1))) 10920 return (EINVAL); 10921 10922 if (checkonly) 10923 break; 10924 10925 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10926 (uchar_t **)&ipp->ipp_hopopts, 10927 &ipp->ipp_hopoptslen, tcp->tcp_label_len); 10928 if (reterr != 0) 10929 return (reterr); 10930 if (ipp->ipp_hopoptslen == 0) 10931 ipp->ipp_fields &= ~IPPF_HOPOPTS; 10932 else 10933 ipp->ipp_fields |= IPPF_HOPOPTS; 10934 reterr = tcp_build_hdrs(q, tcp); 10935 if (reterr != 0) 10936 return (reterr); 10937 break; 10938 } 10939 case IPV6_RTHDRDSTOPTS: { 10940 ip6_dest_t *dopts = (ip6_dest_t *)invalp; 10941 10942 /* 10943 * Sanity checks - minimum size, size a multiple of 10944 * eight bytes, and matching size passed in. 10945 */ 10946 if (inlen != 0 && 10947 inlen != (8 * (dopts->ip6d_len + 1))) 10948 return (EINVAL); 10949 10950 if (checkonly) 10951 break; 10952 10953 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10954 (uchar_t **)&ipp->ipp_rtdstopts, 10955 &ipp->ipp_rtdstoptslen, 0); 10956 if (reterr != 0) 10957 return (reterr); 10958 if (ipp->ipp_rtdstoptslen == 0) 10959 ipp->ipp_fields &= ~IPPF_RTDSTOPTS; 10960 else 10961 ipp->ipp_fields |= IPPF_RTDSTOPTS; 10962 reterr = tcp_build_hdrs(q, tcp); 10963 if (reterr != 0) 10964 return (reterr); 10965 break; 10966 } 10967 case IPV6_DSTOPTS: { 10968 ip6_dest_t *dopts = (ip6_dest_t *)invalp; 10969 10970 /* 10971 * Sanity checks - minimum size, size a multiple of 10972 * eight bytes, and matching size passed in. 10973 */ 10974 if (inlen != 0 && 10975 inlen != (8 * (dopts->ip6d_len + 1))) 10976 return (EINVAL); 10977 10978 if (checkonly) 10979 break; 10980 10981 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10982 (uchar_t **)&ipp->ipp_dstopts, 10983 &ipp->ipp_dstoptslen, 0); 10984 if (reterr != 0) 10985 return (reterr); 10986 if (ipp->ipp_dstoptslen == 0) 10987 ipp->ipp_fields &= ~IPPF_DSTOPTS; 10988 else 10989 ipp->ipp_fields |= IPPF_DSTOPTS; 10990 reterr = tcp_build_hdrs(q, tcp); 10991 if (reterr != 0) 10992 return (reterr); 10993 break; 10994 } 10995 case IPV6_RTHDR: { 10996 ip6_rthdr_t *rt = (ip6_rthdr_t *)invalp; 10997 10998 /* 10999 * Sanity checks - minimum size, size a multiple of 11000 * eight bytes, and matching size passed in. 11001 */ 11002 if (inlen != 0 && 11003 inlen != (8 * (rt->ip6r_len + 1))) 11004 return (EINVAL); 11005 11006 if (checkonly) 11007 break; 11008 11009 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 11010 (uchar_t **)&ipp->ipp_rthdr, 11011 &ipp->ipp_rthdrlen, 0); 11012 if (reterr != 0) 11013 return (reterr); 11014 if (ipp->ipp_rthdrlen == 0) 11015 ipp->ipp_fields &= ~IPPF_RTHDR; 11016 else 11017 ipp->ipp_fields |= IPPF_RTHDR; 11018 reterr = tcp_build_hdrs(q, tcp); 11019 if (reterr != 0) 11020 return (reterr); 11021 break; 11022 } 11023 case IPV6_V6ONLY: 11024 if (!checkonly) 11025 tcp->tcp_connp->conn_ipv6_v6only = onoff; 11026 break; 11027 case IPV6_USE_MIN_MTU: 11028 if (inlen != sizeof (int)) 11029 return (EINVAL); 11030 11031 if (*i1 < -1 || *i1 > 1) 11032 return (EINVAL); 11033 11034 if (checkonly) 11035 break; 11036 11037 ipp->ipp_fields |= IPPF_USE_MIN_MTU; 11038 ipp->ipp_use_min_mtu = *i1; 11039 break; 11040 case IPV6_BOUND_PIF: 11041 /* Handled at the IP level */ 11042 return (-EINVAL); 11043 case IPV6_SEC_OPT: 11044 /* 11045 * We should not allow policy setting after 11046 * we start listening for connections. 11047 */ 11048 if (tcp->tcp_state == TCPS_LISTEN) { 11049 return (EINVAL); 11050 } else { 11051 /* Handled at the IP level */ 11052 return (-EINVAL); 11053 } 11054 case IPV6_SRC_PREFERENCES: 11055 if (inlen != sizeof (uint32_t)) 11056 return (EINVAL); 11057 reterr = ip6_set_src_preferences(tcp->tcp_connp, 11058 *(uint32_t *)invalp); 11059 if (reterr != 0) { 11060 *outlenp = 0; 11061 return (reterr); 11062 } 11063 break; 11064 default: 11065 *outlenp = 0; 11066 return (EINVAL); 11067 } 11068 break; 11069 } /* end IPPROTO_IPV6 */ 11070 default: 11071 *outlenp = 0; 11072 return (EINVAL); 11073 } 11074 /* 11075 * Common case of OK return with outval same as inval 11076 */ 11077 if (invalp != outvalp) { 11078 /* don't trust bcopy for identical src/dst */ 11079 (void) bcopy(invalp, outvalp, inlen); 11080 } 11081 *outlenp = inlen; 11082 return (0); 11083 } 11084 11085 /* 11086 * Update tcp_sticky_hdrs based on tcp_sticky_ipp. 11087 * The headers include ip6i_t (if needed), ip6_t, any sticky extension 11088 * headers, and the maximum size tcp header (to avoid reallocation 11089 * on the fly for additional tcp options). 11090 * Returns failure if can't allocate memory. 11091 */ 11092 static int 11093 tcp_build_hdrs(queue_t *q, tcp_t *tcp) 11094 { 11095 char *hdrs; 11096 uint_t hdrs_len; 11097 ip6i_t *ip6i; 11098 char buf[TCP_MAX_HDR_LENGTH]; 11099 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 11100 in6_addr_t src, dst; 11101 tcp_stack_t *tcps = tcp->tcp_tcps; 11102 11103 /* 11104 * save the existing tcp header and source/dest IP addresses 11105 */ 11106 bcopy(tcp->tcp_tcph, buf, tcp->tcp_tcp_hdr_len); 11107 src = tcp->tcp_ip6h->ip6_src; 11108 dst = tcp->tcp_ip6h->ip6_dst; 11109 hdrs_len = ip_total_hdrs_len_v6(ipp) + TCP_MAX_HDR_LENGTH; 11110 ASSERT(hdrs_len != 0); 11111 if (hdrs_len > tcp->tcp_iphc_len) { 11112 /* Need to reallocate */ 11113 hdrs = kmem_zalloc(hdrs_len, KM_NOSLEEP); 11114 if (hdrs == NULL) 11115 return (ENOMEM); 11116 if (tcp->tcp_iphc != NULL) { 11117 if (tcp->tcp_hdr_grown) { 11118 kmem_free(tcp->tcp_iphc, tcp->tcp_iphc_len); 11119 } else { 11120 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 11121 kmem_cache_free(tcp_iphc_cache, tcp->tcp_iphc); 11122 } 11123 tcp->tcp_iphc_len = 0; 11124 } 11125 ASSERT(tcp->tcp_iphc_len == 0); 11126 tcp->tcp_iphc = hdrs; 11127 tcp->tcp_iphc_len = hdrs_len; 11128 tcp->tcp_hdr_grown = B_TRUE; 11129 } 11130 ip_build_hdrs_v6((uchar_t *)tcp->tcp_iphc, 11131 hdrs_len - TCP_MAX_HDR_LENGTH, ipp, IPPROTO_TCP); 11132 11133 /* Set header fields not in ipp */ 11134 if (ipp->ipp_fields & IPPF_HAS_IP6I) { 11135 ip6i = (ip6i_t *)tcp->tcp_iphc; 11136 tcp->tcp_ip6h = (ip6_t *)&ip6i[1]; 11137 } else { 11138 tcp->tcp_ip6h = (ip6_t *)tcp->tcp_iphc; 11139 } 11140 /* 11141 * tcp->tcp_ip_hdr_len will include ip6i_t if there is one. 11142 * 11143 * tcp->tcp_tcp_hdr_len doesn't change here. 11144 */ 11145 tcp->tcp_ip_hdr_len = hdrs_len - TCP_MAX_HDR_LENGTH; 11146 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + tcp->tcp_ip_hdr_len); 11147 tcp->tcp_hdr_len = tcp->tcp_ip_hdr_len + tcp->tcp_tcp_hdr_len; 11148 11149 bcopy(buf, tcp->tcp_tcph, tcp->tcp_tcp_hdr_len); 11150 11151 tcp->tcp_ip6h->ip6_src = src; 11152 tcp->tcp_ip6h->ip6_dst = dst; 11153 11154 /* 11155 * If the hop limit was not set by ip_build_hdrs_v6(), set it to 11156 * the default value for TCP. 11157 */ 11158 if (!(ipp->ipp_fields & IPPF_UNICAST_HOPS)) 11159 tcp->tcp_ip6h->ip6_hops = tcps->tcps_ipv6_hoplimit; 11160 11161 /* 11162 * If we're setting extension headers after a connection 11163 * has been established, and if we have a routing header 11164 * among the extension headers, call ip_massage_options_v6 to 11165 * manipulate the routing header/ip6_dst set the checksum 11166 * difference in the tcp header template. 11167 * (This happens in tcp_connect_ipv6 if the routing header 11168 * is set prior to the connect.) 11169 * Set the tcp_sum to zero first in case we've cleared a 11170 * routing header or don't have one at all. 11171 */ 11172 tcp->tcp_sum = 0; 11173 if ((tcp->tcp_state >= TCPS_SYN_SENT) && 11174 (tcp->tcp_ipp_fields & IPPF_RTHDR)) { 11175 ip6_rthdr_t *rth = ip_find_rthdr_v6(tcp->tcp_ip6h, 11176 (uint8_t *)tcp->tcp_tcph); 11177 if (rth != NULL) { 11178 tcp->tcp_sum = ip_massage_options_v6(tcp->tcp_ip6h, 11179 rth, tcps->tcps_netstack); 11180 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 11181 (tcp->tcp_sum >> 16)); 11182 } 11183 } 11184 11185 /* Try to get everything in a single mblk */ 11186 (void) mi_set_sth_wroff(RD(q), hdrs_len + tcps->tcps_wroff_xtra); 11187 return (0); 11188 } 11189 11190 /* 11191 * Transfer any source route option from ipha to buf/dst in reversed form. 11192 */ 11193 static int 11194 tcp_opt_rev_src_route(ipha_t *ipha, char *buf, uchar_t *dst) 11195 { 11196 ipoptp_t opts; 11197 uchar_t *opt; 11198 uint8_t optval; 11199 uint8_t optlen; 11200 uint32_t len = 0; 11201 11202 for (optval = ipoptp_first(&opts, ipha); 11203 optval != IPOPT_EOL; 11204 optval = ipoptp_next(&opts)) { 11205 opt = opts.ipoptp_cur; 11206 optlen = opts.ipoptp_len; 11207 switch (optval) { 11208 int off1, off2; 11209 case IPOPT_SSRR: 11210 case IPOPT_LSRR: 11211 11212 /* Reverse source route */ 11213 /* 11214 * First entry should be the next to last one in the 11215 * current source route (the last entry is our 11216 * address.) 11217 * The last entry should be the final destination. 11218 */ 11219 buf[IPOPT_OPTVAL] = (uint8_t)optval; 11220 buf[IPOPT_OLEN] = (uint8_t)optlen; 11221 off1 = IPOPT_MINOFF_SR - 1; 11222 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1; 11223 if (off2 < 0) { 11224 /* No entries in source route */ 11225 break; 11226 } 11227 bcopy(opt + off2, dst, IP_ADDR_LEN); 11228 /* 11229 * Note: use src since ipha has not had its src 11230 * and dst reversed (it is in the state it was 11231 * received. 11232 */ 11233 bcopy(&ipha->ipha_src, buf + off2, 11234 IP_ADDR_LEN); 11235 off2 -= IP_ADDR_LEN; 11236 11237 while (off2 > 0) { 11238 bcopy(opt + off2, buf + off1, 11239 IP_ADDR_LEN); 11240 off1 += IP_ADDR_LEN; 11241 off2 -= IP_ADDR_LEN; 11242 } 11243 buf[IPOPT_OFFSET] = IPOPT_MINOFF_SR; 11244 buf += optlen; 11245 len += optlen; 11246 break; 11247 } 11248 } 11249 done: 11250 /* Pad the resulting options */ 11251 while (len & 0x3) { 11252 *buf++ = IPOPT_EOL; 11253 len++; 11254 } 11255 return (len); 11256 } 11257 11258 11259 /* 11260 * Extract and revert a source route from ipha (if any) 11261 * and then update the relevant fields in both tcp_t and the standard header. 11262 */ 11263 static void 11264 tcp_opt_reverse(tcp_t *tcp, ipha_t *ipha) 11265 { 11266 char buf[TCP_MAX_HDR_LENGTH]; 11267 uint_t tcph_len; 11268 int len; 11269 11270 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 11271 len = IPH_HDR_LENGTH(ipha); 11272 if (len == IP_SIMPLE_HDR_LENGTH) 11273 /* Nothing to do */ 11274 return; 11275 if (len > IP_SIMPLE_HDR_LENGTH + TCP_MAX_IP_OPTIONS_LENGTH || 11276 (len & 0x3)) 11277 return; 11278 11279 tcph_len = tcp->tcp_tcp_hdr_len; 11280 bcopy(tcp->tcp_tcph, buf, tcph_len); 11281 tcp->tcp_sum = (tcp->tcp_ipha->ipha_dst >> 16) + 11282 (tcp->tcp_ipha->ipha_dst & 0xffff); 11283 len = tcp_opt_rev_src_route(ipha, (char *)tcp->tcp_ipha + 11284 IP_SIMPLE_HDR_LENGTH, (uchar_t *)&tcp->tcp_ipha->ipha_dst); 11285 len += IP_SIMPLE_HDR_LENGTH; 11286 tcp->tcp_sum -= ((tcp->tcp_ipha->ipha_dst >> 16) + 11287 (tcp->tcp_ipha->ipha_dst & 0xffff)); 11288 if ((int)tcp->tcp_sum < 0) 11289 tcp->tcp_sum--; 11290 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 11291 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16)); 11292 tcp->tcp_tcph = (tcph_t *)((char *)tcp->tcp_ipha + len); 11293 bcopy(buf, tcp->tcp_tcph, tcph_len); 11294 tcp->tcp_ip_hdr_len = len; 11295 tcp->tcp_ipha->ipha_version_and_hdr_length = 11296 (IP_VERSION << 4) | (len >> 2); 11297 len += tcph_len; 11298 tcp->tcp_hdr_len = len; 11299 } 11300 11301 /* 11302 * Copy the standard header into its new location, 11303 * lay in the new options and then update the relevant 11304 * fields in both tcp_t and the standard header. 11305 */ 11306 static int 11307 tcp_opt_set_header(tcp_t *tcp, boolean_t checkonly, uchar_t *ptr, uint_t len) 11308 { 11309 uint_t tcph_len; 11310 uint8_t *ip_optp; 11311 tcph_t *new_tcph; 11312 tcp_stack_t *tcps = tcp->tcp_tcps; 11313 11314 if ((len > TCP_MAX_IP_OPTIONS_LENGTH) || (len & 0x3)) 11315 return (EINVAL); 11316 11317 if (len > IP_MAX_OPT_LENGTH - tcp->tcp_label_len) 11318 return (EINVAL); 11319 11320 if (checkonly) { 11321 /* 11322 * do not really set, just pretend to - T_CHECK 11323 */ 11324 return (0); 11325 } 11326 11327 ip_optp = (uint8_t *)tcp->tcp_ipha + IP_SIMPLE_HDR_LENGTH; 11328 if (tcp->tcp_label_len > 0) { 11329 int padlen; 11330 uint8_t opt; 11331 11332 /* convert list termination to no-ops */ 11333 padlen = tcp->tcp_label_len - ip_optp[IPOPT_OLEN]; 11334 ip_optp += ip_optp[IPOPT_OLEN]; 11335 opt = len > 0 ? IPOPT_NOP : IPOPT_EOL; 11336 while (--padlen >= 0) 11337 *ip_optp++ = opt; 11338 } 11339 tcph_len = tcp->tcp_tcp_hdr_len; 11340 new_tcph = (tcph_t *)(ip_optp + len); 11341 ovbcopy(tcp->tcp_tcph, new_tcph, tcph_len); 11342 tcp->tcp_tcph = new_tcph; 11343 bcopy(ptr, ip_optp, len); 11344 11345 len += IP_SIMPLE_HDR_LENGTH + tcp->tcp_label_len; 11346 11347 tcp->tcp_ip_hdr_len = len; 11348 tcp->tcp_ipha->ipha_version_and_hdr_length = 11349 (IP_VERSION << 4) | (len >> 2); 11350 tcp->tcp_hdr_len = len + tcph_len; 11351 if (!TCP_IS_DETACHED(tcp)) { 11352 /* Always allocate room for all options. */ 11353 (void) mi_set_sth_wroff(tcp->tcp_rq, 11354 TCP_MAX_COMBINED_HEADER_LENGTH + tcps->tcps_wroff_xtra); 11355 } 11356 return (0); 11357 } 11358 11359 /* Get callback routine passed to nd_load by tcp_param_register */ 11360 /* ARGSUSED */ 11361 static int 11362 tcp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 11363 { 11364 tcpparam_t *tcppa = (tcpparam_t *)cp; 11365 11366 (void) mi_mpprintf(mp, "%u", tcppa->tcp_param_val); 11367 return (0); 11368 } 11369 11370 /* 11371 * Walk through the param array specified registering each element with the 11372 * named dispatch handler. 11373 */ 11374 static boolean_t 11375 tcp_param_register(IDP *ndp, tcpparam_t *tcppa, int cnt, tcp_stack_t *tcps) 11376 { 11377 for (; cnt-- > 0; tcppa++) { 11378 if (tcppa->tcp_param_name && tcppa->tcp_param_name[0]) { 11379 if (!nd_load(ndp, tcppa->tcp_param_name, 11380 tcp_param_get, tcp_param_set, 11381 (caddr_t)tcppa)) { 11382 nd_free(ndp); 11383 return (B_FALSE); 11384 } 11385 } 11386 } 11387 tcps->tcps_wroff_xtra_param = kmem_zalloc(sizeof (tcpparam_t), 11388 KM_SLEEP); 11389 bcopy(&lcl_tcp_wroff_xtra_param, tcps->tcps_wroff_xtra_param, 11390 sizeof (tcpparam_t)); 11391 if (!nd_load(ndp, tcps->tcps_wroff_xtra_param->tcp_param_name, 11392 tcp_param_get, tcp_param_set_aligned, 11393 (caddr_t)tcps->tcps_wroff_xtra_param)) { 11394 nd_free(ndp); 11395 return (B_FALSE); 11396 } 11397 tcps->tcps_mdt_head_param = kmem_zalloc(sizeof (tcpparam_t), 11398 KM_SLEEP); 11399 bcopy(&lcl_tcp_mdt_head_param, tcps->tcps_mdt_head_param, 11400 sizeof (tcpparam_t)); 11401 if (!nd_load(ndp, tcps->tcps_mdt_head_param->tcp_param_name, 11402 tcp_param_get, tcp_param_set_aligned, 11403 (caddr_t)tcps->tcps_mdt_head_param)) { 11404 nd_free(ndp); 11405 return (B_FALSE); 11406 } 11407 tcps->tcps_mdt_tail_param = kmem_zalloc(sizeof (tcpparam_t), 11408 KM_SLEEP); 11409 bcopy(&lcl_tcp_mdt_tail_param, tcps->tcps_mdt_tail_param, 11410 sizeof (tcpparam_t)); 11411 if (!nd_load(ndp, tcps->tcps_mdt_tail_param->tcp_param_name, 11412 tcp_param_get, tcp_param_set_aligned, 11413 (caddr_t)tcps->tcps_mdt_tail_param)) { 11414 nd_free(ndp); 11415 return (B_FALSE); 11416 } 11417 tcps->tcps_mdt_max_pbufs_param = kmem_zalloc(sizeof (tcpparam_t), 11418 KM_SLEEP); 11419 bcopy(&lcl_tcp_mdt_max_pbufs_param, tcps->tcps_mdt_max_pbufs_param, 11420 sizeof (tcpparam_t)); 11421 if (!nd_load(ndp, tcps->tcps_mdt_max_pbufs_param->tcp_param_name, 11422 tcp_param_get, tcp_param_set_aligned, 11423 (caddr_t)tcps->tcps_mdt_max_pbufs_param)) { 11424 nd_free(ndp); 11425 return (B_FALSE); 11426 } 11427 if (!nd_load(ndp, "tcp_extra_priv_ports", 11428 tcp_extra_priv_ports_get, NULL, NULL)) { 11429 nd_free(ndp); 11430 return (B_FALSE); 11431 } 11432 if (!nd_load(ndp, "tcp_extra_priv_ports_add", 11433 NULL, tcp_extra_priv_ports_add, NULL)) { 11434 nd_free(ndp); 11435 return (B_FALSE); 11436 } 11437 if (!nd_load(ndp, "tcp_extra_priv_ports_del", 11438 NULL, tcp_extra_priv_ports_del, NULL)) { 11439 nd_free(ndp); 11440 return (B_FALSE); 11441 } 11442 if (!nd_load(ndp, "tcp_status", tcp_status_report, NULL, 11443 NULL)) { 11444 nd_free(ndp); 11445 return (B_FALSE); 11446 } 11447 if (!nd_load(ndp, "tcp_bind_hash", tcp_bind_hash_report, 11448 NULL, NULL)) { 11449 nd_free(ndp); 11450 return (B_FALSE); 11451 } 11452 if (!nd_load(ndp, "tcp_listen_hash", 11453 tcp_listen_hash_report, NULL, NULL)) { 11454 nd_free(ndp); 11455 return (B_FALSE); 11456 } 11457 if (!nd_load(ndp, "tcp_conn_hash", tcp_conn_hash_report, 11458 NULL, NULL)) { 11459 nd_free(ndp); 11460 return (B_FALSE); 11461 } 11462 if (!nd_load(ndp, "tcp_acceptor_hash", 11463 tcp_acceptor_hash_report, NULL, NULL)) { 11464 nd_free(ndp); 11465 return (B_FALSE); 11466 } 11467 if (!nd_load(ndp, "tcp_host_param", tcp_host_param_report, 11468 tcp_host_param_set, NULL)) { 11469 nd_free(ndp); 11470 return (B_FALSE); 11471 } 11472 if (!nd_load(ndp, "tcp_host_param_ipv6", 11473 tcp_host_param_report, tcp_host_param_set_ipv6, NULL)) { 11474 nd_free(ndp); 11475 return (B_FALSE); 11476 } 11477 if (!nd_load(ndp, "tcp_1948_phrase", NULL, 11478 tcp_1948_phrase_set, NULL)) { 11479 nd_free(ndp); 11480 return (B_FALSE); 11481 } 11482 if (!nd_load(ndp, "tcp_reserved_port_list", 11483 tcp_reserved_port_list, NULL, NULL)) { 11484 nd_free(ndp); 11485 return (B_FALSE); 11486 } 11487 /* 11488 * Dummy ndd variables - only to convey obsolescence information 11489 * through printing of their name (no get or set routines) 11490 * XXX Remove in future releases ? 11491 */ 11492 if (!nd_load(ndp, 11493 "tcp_close_wait_interval(obsoleted - " 11494 "use tcp_time_wait_interval)", NULL, NULL, NULL)) { 11495 nd_free(ndp); 11496 return (B_FALSE); 11497 } 11498 return (B_TRUE); 11499 } 11500 11501 /* ndd set routine for tcp_wroff_xtra, tcp_mdt_hdr_{head,tail}_min. */ 11502 /* ARGSUSED */ 11503 static int 11504 tcp_param_set_aligned(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 11505 cred_t *cr) 11506 { 11507 long new_value; 11508 tcpparam_t *tcppa = (tcpparam_t *)cp; 11509 11510 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 11511 new_value < tcppa->tcp_param_min || 11512 new_value > tcppa->tcp_param_max) { 11513 return (EINVAL); 11514 } 11515 /* 11516 * Need to make sure new_value is a multiple of 4. If it is not, 11517 * round it up. For future 64 bit requirement, we actually make it 11518 * a multiple of 8. 11519 */ 11520 if (new_value & 0x7) { 11521 new_value = (new_value & ~0x7) + 0x8; 11522 } 11523 tcppa->tcp_param_val = new_value; 11524 return (0); 11525 } 11526 11527 /* Set callback routine passed to nd_load by tcp_param_register */ 11528 /* ARGSUSED */ 11529 static int 11530 tcp_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr) 11531 { 11532 long new_value; 11533 tcpparam_t *tcppa = (tcpparam_t *)cp; 11534 11535 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 11536 new_value < tcppa->tcp_param_min || 11537 new_value > tcppa->tcp_param_max) { 11538 return (EINVAL); 11539 } 11540 tcppa->tcp_param_val = new_value; 11541 return (0); 11542 } 11543 11544 /* 11545 * Add a new piece to the tcp reassembly queue. If the gap at the beginning 11546 * is filled, return as much as we can. The message passed in may be 11547 * multi-part, chained using b_cont. "start" is the starting sequence 11548 * number for this piece. 11549 */ 11550 static mblk_t * 11551 tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start) 11552 { 11553 uint32_t end; 11554 mblk_t *mp1; 11555 mblk_t *mp2; 11556 mblk_t *next_mp; 11557 uint32_t u1; 11558 tcp_stack_t *tcps = tcp->tcp_tcps; 11559 11560 /* Walk through all the new pieces. */ 11561 do { 11562 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 11563 (uintptr_t)INT_MAX); 11564 end = start + (int)(mp->b_wptr - mp->b_rptr); 11565 next_mp = mp->b_cont; 11566 if (start == end) { 11567 /* Empty. Blast it. */ 11568 freeb(mp); 11569 continue; 11570 } 11571 mp->b_cont = NULL; 11572 TCP_REASS_SET_SEQ(mp, start); 11573 TCP_REASS_SET_END(mp, end); 11574 mp1 = tcp->tcp_reass_tail; 11575 if (!mp1) { 11576 tcp->tcp_reass_tail = mp; 11577 tcp->tcp_reass_head = mp; 11578 BUMP_MIB(&tcps->tcps_mib, tcpInDataUnorderSegs); 11579 UPDATE_MIB(&tcps->tcps_mib, 11580 tcpInDataUnorderBytes, end - start); 11581 continue; 11582 } 11583 /* New stuff completely beyond tail? */ 11584 if (SEQ_GEQ(start, TCP_REASS_END(mp1))) { 11585 /* Link it on end. */ 11586 mp1->b_cont = mp; 11587 tcp->tcp_reass_tail = mp; 11588 BUMP_MIB(&tcps->tcps_mib, tcpInDataUnorderSegs); 11589 UPDATE_MIB(&tcps->tcps_mib, 11590 tcpInDataUnorderBytes, end - start); 11591 continue; 11592 } 11593 mp1 = tcp->tcp_reass_head; 11594 u1 = TCP_REASS_SEQ(mp1); 11595 /* New stuff at the front? */ 11596 if (SEQ_LT(start, u1)) { 11597 /* Yes... Check for overlap. */ 11598 mp->b_cont = mp1; 11599 tcp->tcp_reass_head = mp; 11600 tcp_reass_elim_overlap(tcp, mp); 11601 continue; 11602 } 11603 /* 11604 * The new piece fits somewhere between the head and tail. 11605 * We find our slot, where mp1 precedes us and mp2 trails. 11606 */ 11607 for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) { 11608 u1 = TCP_REASS_SEQ(mp2); 11609 if (SEQ_LEQ(start, u1)) 11610 break; 11611 } 11612 /* Link ourselves in */ 11613 mp->b_cont = mp2; 11614 mp1->b_cont = mp; 11615 11616 /* Trim overlap with following mblk(s) first */ 11617 tcp_reass_elim_overlap(tcp, mp); 11618 11619 /* Trim overlap with preceding mblk */ 11620 tcp_reass_elim_overlap(tcp, mp1); 11621 11622 } while (start = end, mp = next_mp); 11623 mp1 = tcp->tcp_reass_head; 11624 /* Anything ready to go? */ 11625 if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt) 11626 return (NULL); 11627 /* Eat what we can off the queue */ 11628 for (;;) { 11629 mp = mp1->b_cont; 11630 end = TCP_REASS_END(mp1); 11631 TCP_REASS_SET_SEQ(mp1, 0); 11632 TCP_REASS_SET_END(mp1, 0); 11633 if (!mp) { 11634 tcp->tcp_reass_tail = NULL; 11635 break; 11636 } 11637 if (end != TCP_REASS_SEQ(mp)) { 11638 mp1->b_cont = NULL; 11639 break; 11640 } 11641 mp1 = mp; 11642 } 11643 mp1 = tcp->tcp_reass_head; 11644 tcp->tcp_reass_head = mp; 11645 return (mp1); 11646 } 11647 11648 /* Eliminate any overlap that mp may have over later mblks */ 11649 static void 11650 tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp) 11651 { 11652 uint32_t end; 11653 mblk_t *mp1; 11654 uint32_t u1; 11655 tcp_stack_t *tcps = tcp->tcp_tcps; 11656 11657 end = TCP_REASS_END(mp); 11658 while ((mp1 = mp->b_cont) != NULL) { 11659 u1 = TCP_REASS_SEQ(mp1); 11660 if (!SEQ_GT(end, u1)) 11661 break; 11662 if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) { 11663 mp->b_wptr -= end - u1; 11664 TCP_REASS_SET_END(mp, u1); 11665 BUMP_MIB(&tcps->tcps_mib, tcpInDataPartDupSegs); 11666 UPDATE_MIB(&tcps->tcps_mib, 11667 tcpInDataPartDupBytes, end - u1); 11668 break; 11669 } 11670 mp->b_cont = mp1->b_cont; 11671 TCP_REASS_SET_SEQ(mp1, 0); 11672 TCP_REASS_SET_END(mp1, 0); 11673 freeb(mp1); 11674 BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs); 11675 UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes, end - u1); 11676 } 11677 if (!mp1) 11678 tcp->tcp_reass_tail = mp; 11679 } 11680 11681 /* 11682 * Send up all messages queued on tcp_rcv_list. 11683 */ 11684 static uint_t 11685 tcp_rcv_drain(queue_t *q, tcp_t *tcp) 11686 { 11687 mblk_t *mp; 11688 uint_t ret = 0; 11689 uint_t thwin; 11690 #ifdef DEBUG 11691 uint_t cnt = 0; 11692 #endif 11693 tcp_stack_t *tcps = tcp->tcp_tcps; 11694 11695 /* Can't drain on an eager connection */ 11696 if (tcp->tcp_listener != NULL) 11697 return (ret); 11698 11699 /* 11700 * Handle two cases here: we are currently fused or we were 11701 * previously fused and have some urgent data to be delivered 11702 * upstream. The latter happens because we either ran out of 11703 * memory or were detached and therefore sending the SIGURG was 11704 * deferred until this point. In either case we pass control 11705 * over to tcp_fuse_rcv_drain() since it may need to complete 11706 * some work. 11707 */ 11708 if ((tcp->tcp_fused || tcp->tcp_fused_sigurg)) { 11709 ASSERT(tcp->tcp_fused_sigurg_mp != NULL); 11710 if (tcp_fuse_rcv_drain(q, tcp, tcp->tcp_fused ? NULL : 11711 &tcp->tcp_fused_sigurg_mp)) 11712 return (ret); 11713 } 11714 11715 while ((mp = tcp->tcp_rcv_list) != NULL) { 11716 tcp->tcp_rcv_list = mp->b_next; 11717 mp->b_next = NULL; 11718 #ifdef DEBUG 11719 cnt += msgdsize(mp); 11720 #endif 11721 /* Does this need SSL processing first? */ 11722 if ((tcp->tcp_kssl_ctx != NULL) && (DB_TYPE(mp) == M_DATA)) { 11723 DTRACE_PROBE1(kssl_mblk__ksslinput_rcvdrain, 11724 mblk_t *, mp); 11725 tcp_kssl_input(tcp, mp); 11726 continue; 11727 } 11728 putnext(q, mp); 11729 } 11730 ASSERT(cnt == tcp->tcp_rcv_cnt); 11731 tcp->tcp_rcv_last_head = NULL; 11732 tcp->tcp_rcv_last_tail = NULL; 11733 tcp->tcp_rcv_cnt = 0; 11734 11735 /* Learn the latest rwnd information that we sent to the other side. */ 11736 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 11737 << tcp->tcp_rcv_ws; 11738 /* This is peer's calculated send window (our receive window). */ 11739 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 11740 /* 11741 * Increase the receive window to max. But we need to do receiver 11742 * SWS avoidance. This means that we need to check the increase of 11743 * of receive window is at least 1 MSS. 11744 */ 11745 if (canputnext(q) && (q->q_hiwat - thwin >= tcp->tcp_mss)) { 11746 /* 11747 * If the window that the other side knows is less than max 11748 * deferred acks segments, send an update immediately. 11749 */ 11750 if (thwin < tcp->tcp_rack_cur_max * tcp->tcp_mss) { 11751 BUMP_MIB(&tcps->tcps_mib, tcpOutWinUpdate); 11752 ret = TH_ACK_NEEDED; 11753 } 11754 tcp->tcp_rwnd = q->q_hiwat; 11755 } 11756 /* No need for the push timer now. */ 11757 if (tcp->tcp_push_tid != 0) { 11758 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 11759 tcp->tcp_push_tid = 0; 11760 } 11761 return (ret); 11762 } 11763 11764 /* 11765 * Queue data on tcp_rcv_list which is a b_next chain. 11766 * tcp_rcv_last_head/tail is the last element of this chain. 11767 * Each element of the chain is a b_cont chain. 11768 * 11769 * M_DATA messages are added to the current element. 11770 * Other messages are added as new (b_next) elements. 11771 */ 11772 void 11773 tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len) 11774 { 11775 ASSERT(seg_len == msgdsize(mp)); 11776 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_rcv_last_head != NULL); 11777 11778 if (tcp->tcp_rcv_list == NULL) { 11779 ASSERT(tcp->tcp_rcv_last_head == NULL); 11780 tcp->tcp_rcv_list = mp; 11781 tcp->tcp_rcv_last_head = mp; 11782 } else if (DB_TYPE(mp) == DB_TYPE(tcp->tcp_rcv_last_head)) { 11783 tcp->tcp_rcv_last_tail->b_cont = mp; 11784 } else { 11785 tcp->tcp_rcv_last_head->b_next = mp; 11786 tcp->tcp_rcv_last_head = mp; 11787 } 11788 11789 while (mp->b_cont) 11790 mp = mp->b_cont; 11791 11792 tcp->tcp_rcv_last_tail = mp; 11793 tcp->tcp_rcv_cnt += seg_len; 11794 tcp->tcp_rwnd -= seg_len; 11795 } 11796 11797 /* 11798 * DEFAULT TCP ENTRY POINT via squeue on READ side. 11799 * 11800 * This is the default entry function into TCP on the read side. TCP is 11801 * always entered via squeue i.e. using squeue's for mutual exclusion. 11802 * When classifier does a lookup to find the tcp, it also puts a reference 11803 * on the conn structure associated so the tcp is guaranteed to exist 11804 * when we come here. We still need to check the state because it might 11805 * as well has been closed. The squeue processing function i.e. squeue_enter, 11806 * squeue_enter_nodrain, or squeue_drain is responsible for doing the 11807 * CONN_DEC_REF. 11808 * 11809 * Apart from the default entry point, IP also sends packets directly to 11810 * tcp_rput_data for AF_INET fast path and tcp_conn_request for incoming 11811 * connections. 11812 */ 11813 void 11814 tcp_input(void *arg, mblk_t *mp, void *arg2) 11815 { 11816 conn_t *connp = (conn_t *)arg; 11817 tcp_t *tcp = (tcp_t *)connp->conn_tcp; 11818 11819 /* arg2 is the sqp */ 11820 ASSERT(arg2 != NULL); 11821 ASSERT(mp != NULL); 11822 11823 /* 11824 * Don't accept any input on a closed tcp as this TCP logically does 11825 * not exist on the system. Don't proceed further with this TCP. 11826 * For eg. this packet could trigger another close of this tcp 11827 * which would be disastrous for tcp_refcnt. tcp_close_detached / 11828 * tcp_clean_death / tcp_closei_local must be called at most once 11829 * on a TCP. In this case we need to refeed the packet into the 11830 * classifier and figure out where the packet should go. Need to 11831 * preserve the recv_ill somehow. Until we figure that out, for 11832 * now just drop the packet if we can't classify the packet. 11833 */ 11834 if (tcp->tcp_state == TCPS_CLOSED || 11835 tcp->tcp_state == TCPS_BOUND) { 11836 conn_t *new_connp; 11837 ip_stack_t *ipst = tcp->tcp_tcps->tcps_netstack->netstack_ip; 11838 11839 new_connp = ipcl_classify(mp, connp->conn_zoneid, ipst); 11840 if (new_connp != NULL) { 11841 tcp_reinput(new_connp, mp, arg2); 11842 return; 11843 } 11844 /* We failed to classify. For now just drop the packet */ 11845 freemsg(mp); 11846 return; 11847 } 11848 11849 if (DB_TYPE(mp) == M_DATA) 11850 tcp_rput_data(connp, mp, arg2); 11851 else 11852 tcp_rput_common(tcp, mp); 11853 } 11854 11855 /* 11856 * The read side put procedure. 11857 * The packets passed up by ip are assume to be aligned according to 11858 * OK_32PTR and the IP+TCP headers fitting in the first mblk. 11859 */ 11860 static void 11861 tcp_rput_common(tcp_t *tcp, mblk_t *mp) 11862 { 11863 /* 11864 * tcp_rput_data() does not expect M_CTL except for the case 11865 * where tcp_ipv6_recvancillary is set and we get a IN_PKTINFO 11866 * type. Need to make sure that any other M_CTLs don't make 11867 * it to tcp_rput_data since it is not expecting any and doesn't 11868 * check for it. 11869 */ 11870 if (DB_TYPE(mp) == M_CTL) { 11871 switch (*(uint32_t *)(mp->b_rptr)) { 11872 case TCP_IOC_ABORT_CONN: 11873 /* 11874 * Handle connection abort request. 11875 */ 11876 tcp_ioctl_abort_handler(tcp, mp); 11877 return; 11878 case IPSEC_IN: 11879 /* 11880 * Only secure icmp arrive in TCP and they 11881 * don't go through data path. 11882 */ 11883 tcp_icmp_error(tcp, mp); 11884 return; 11885 case IN_PKTINFO: 11886 /* 11887 * Handle IPV6_RECVPKTINFO socket option on AF_INET6 11888 * sockets that are receiving IPv4 traffic. tcp 11889 */ 11890 ASSERT(tcp->tcp_family == AF_INET6); 11891 ASSERT(tcp->tcp_ipv6_recvancillary & 11892 TCP_IPV6_RECVPKTINFO); 11893 tcp_rput_data(tcp->tcp_connp, mp, 11894 tcp->tcp_connp->conn_sqp); 11895 return; 11896 case MDT_IOC_INFO_UPDATE: 11897 /* 11898 * Handle Multidata information update; the 11899 * following routine will free the message. 11900 */ 11901 if (tcp->tcp_connp->conn_mdt_ok) { 11902 tcp_mdt_update(tcp, 11903 &((ip_mdt_info_t *)mp->b_rptr)->mdt_capab, 11904 B_FALSE); 11905 } 11906 freemsg(mp); 11907 return; 11908 case LSO_IOC_INFO_UPDATE: 11909 /* 11910 * Handle LSO information update; the following 11911 * routine will free the message. 11912 */ 11913 if (tcp->tcp_connp->conn_lso_ok) { 11914 tcp_lso_update(tcp, 11915 &((ip_lso_info_t *)mp->b_rptr)->lso_capab); 11916 } 11917 freemsg(mp); 11918 return; 11919 default: 11920 /* 11921 * tcp_icmp_err() will process the M_CTL packets. 11922 * Non-ICMP packets, if any, will be discarded in 11923 * tcp_icmp_err(). We will process the ICMP packet 11924 * even if we are TCP_IS_DETACHED_NONEAGER as the 11925 * incoming ICMP packet may result in changing 11926 * the tcp_mss, which we would need if we have 11927 * packets to retransmit. 11928 */ 11929 tcp_icmp_error(tcp, mp); 11930 return; 11931 } 11932 } 11933 11934 /* No point processing the message if tcp is already closed */ 11935 if (TCP_IS_DETACHED_NONEAGER(tcp)) { 11936 freemsg(mp); 11937 return; 11938 } 11939 11940 tcp_rput_other(tcp, mp); 11941 } 11942 11943 11944 /* The minimum of smoothed mean deviation in RTO calculation. */ 11945 #define TCP_SD_MIN 400 11946 11947 /* 11948 * Set RTO for this connection. The formula is from Jacobson and Karels' 11949 * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names 11950 * are the same as those in Appendix A.2 of that paper. 11951 * 11952 * m = new measurement 11953 * sa = smoothed RTT average (8 * average estimates). 11954 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates). 11955 */ 11956 static void 11957 tcp_set_rto(tcp_t *tcp, clock_t rtt) 11958 { 11959 long m = TICK_TO_MSEC(rtt); 11960 clock_t sa = tcp->tcp_rtt_sa; 11961 clock_t sv = tcp->tcp_rtt_sd; 11962 clock_t rto; 11963 tcp_stack_t *tcps = tcp->tcp_tcps; 11964 11965 BUMP_MIB(&tcps->tcps_mib, tcpRttUpdate); 11966 tcp->tcp_rtt_update++; 11967 11968 /* tcp_rtt_sa is not 0 means this is a new sample. */ 11969 if (sa != 0) { 11970 /* 11971 * Update average estimator: 11972 * new rtt = 7/8 old rtt + 1/8 Error 11973 */ 11974 11975 /* m is now Error in estimate. */ 11976 m -= sa >> 3; 11977 if ((sa += m) <= 0) { 11978 /* 11979 * Don't allow the smoothed average to be negative. 11980 * We use 0 to denote reinitialization of the 11981 * variables. 11982 */ 11983 sa = 1; 11984 } 11985 11986 /* 11987 * Update deviation estimator: 11988 * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev) 11989 */ 11990 if (m < 0) 11991 m = -m; 11992 m -= sv >> 2; 11993 sv += m; 11994 } else { 11995 /* 11996 * This follows BSD's implementation. So the reinitialized 11997 * RTO is 3 * m. We cannot go less than 2 because if the 11998 * link is bandwidth dominated, doubling the window size 11999 * during slow start means doubling the RTT. We want to be 12000 * more conservative when we reinitialize our estimates. 3 12001 * is just a convenient number. 12002 */ 12003 sa = m << 3; 12004 sv = m << 1; 12005 } 12006 if (sv < TCP_SD_MIN) { 12007 /* 12008 * We do not know that if sa captures the delay ACK 12009 * effect as in a long train of segments, a receiver 12010 * does not delay its ACKs. So set the minimum of sv 12011 * to be TCP_SD_MIN, which is default to 400 ms, twice 12012 * of BSD DATO. That means the minimum of mean 12013 * deviation is 100 ms. 12014 * 12015 */ 12016 sv = TCP_SD_MIN; 12017 } 12018 tcp->tcp_rtt_sa = sa; 12019 tcp->tcp_rtt_sd = sv; 12020 /* 12021 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv) 12022 * 12023 * Add tcp_rexmit_interval extra in case of extreme environment 12024 * where the algorithm fails to work. The default value of 12025 * tcp_rexmit_interval_extra should be 0. 12026 * 12027 * As we use a finer grained clock than BSD and update 12028 * RTO for every ACKs, add in another .25 of RTT to the 12029 * deviation of RTO to accomodate burstiness of 1/4 of 12030 * window size. 12031 */ 12032 rto = (sa >> 3) + sv + tcps->tcps_rexmit_interval_extra + (sa >> 5); 12033 12034 if (rto > tcps->tcps_rexmit_interval_max) { 12035 tcp->tcp_rto = tcps->tcps_rexmit_interval_max; 12036 } else if (rto < tcps->tcps_rexmit_interval_min) { 12037 tcp->tcp_rto = tcps->tcps_rexmit_interval_min; 12038 } else { 12039 tcp->tcp_rto = rto; 12040 } 12041 12042 /* Now, we can reset tcp_timer_backoff to use the new RTO... */ 12043 tcp->tcp_timer_backoff = 0; 12044 } 12045 12046 /* 12047 * tcp_get_seg_mp() is called to get the pointer to a segment in the 12048 * send queue which starts at the given seq. no. 12049 * 12050 * Parameters: 12051 * tcp_t *tcp: the tcp instance pointer. 12052 * uint32_t seq: the starting seq. no of the requested segment. 12053 * int32_t *off: after the execution, *off will be the offset to 12054 * the returned mblk which points to the requested seq no. 12055 * It is the caller's responsibility to send in a non-null off. 12056 * 12057 * Return: 12058 * A mblk_t pointer pointing to the requested segment in send queue. 12059 */ 12060 static mblk_t * 12061 tcp_get_seg_mp(tcp_t *tcp, uint32_t seq, int32_t *off) 12062 { 12063 int32_t cnt; 12064 mblk_t *mp; 12065 12066 /* Defensive coding. Make sure we don't send incorrect data. */ 12067 if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt)) 12068 return (NULL); 12069 12070 cnt = seq - tcp->tcp_suna; 12071 mp = tcp->tcp_xmit_head; 12072 while (cnt > 0 && mp != NULL) { 12073 cnt -= mp->b_wptr - mp->b_rptr; 12074 if (cnt < 0) { 12075 cnt += mp->b_wptr - mp->b_rptr; 12076 break; 12077 } 12078 mp = mp->b_cont; 12079 } 12080 ASSERT(mp != NULL); 12081 *off = cnt; 12082 return (mp); 12083 } 12084 12085 /* 12086 * This function handles all retransmissions if SACK is enabled for this 12087 * connection. First it calculates how many segments can be retransmitted 12088 * based on tcp_pipe. Then it goes thru the notsack list to find eligible 12089 * segments. A segment is eligible if sack_cnt for that segment is greater 12090 * than or equal tcp_dupack_fast_retransmit. After it has retransmitted 12091 * all eligible segments, it checks to see if TCP can send some new segments 12092 * (fast recovery). If it can, set the appropriate flag for tcp_rput_data(). 12093 * 12094 * Parameters: 12095 * tcp_t *tcp: the tcp structure of the connection. 12096 * uint_t *flags: in return, appropriate value will be set for 12097 * tcp_rput_data(). 12098 */ 12099 static void 12100 tcp_sack_rxmit(tcp_t *tcp, uint_t *flags) 12101 { 12102 notsack_blk_t *notsack_blk; 12103 int32_t usable_swnd; 12104 int32_t mss; 12105 uint32_t seg_len; 12106 mblk_t *xmit_mp; 12107 tcp_stack_t *tcps = tcp->tcp_tcps; 12108 12109 ASSERT(tcp->tcp_sack_info != NULL); 12110 ASSERT(tcp->tcp_notsack_list != NULL); 12111 ASSERT(tcp->tcp_rexmit == B_FALSE); 12112 12113 /* Defensive coding in case there is a bug... */ 12114 if (tcp->tcp_notsack_list == NULL) { 12115 return; 12116 } 12117 notsack_blk = tcp->tcp_notsack_list; 12118 mss = tcp->tcp_mss; 12119 12120 /* 12121 * Limit the num of outstanding data in the network to be 12122 * tcp_cwnd_ssthresh, which is half of the original congestion wnd. 12123 */ 12124 usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe; 12125 12126 /* At least retransmit 1 MSS of data. */ 12127 if (usable_swnd <= 0) { 12128 usable_swnd = mss; 12129 } 12130 12131 /* Make sure no new RTT samples will be taken. */ 12132 tcp->tcp_csuna = tcp->tcp_snxt; 12133 12134 notsack_blk = tcp->tcp_notsack_list; 12135 while (usable_swnd > 0) { 12136 mblk_t *snxt_mp, *tmp_mp; 12137 tcp_seq begin = tcp->tcp_sack_snxt; 12138 tcp_seq end; 12139 int32_t off; 12140 12141 for (; notsack_blk != NULL; notsack_blk = notsack_blk->next) { 12142 if (SEQ_GT(notsack_blk->end, begin) && 12143 (notsack_blk->sack_cnt >= 12144 tcps->tcps_dupack_fast_retransmit)) { 12145 end = notsack_blk->end; 12146 if (SEQ_LT(begin, notsack_blk->begin)) { 12147 begin = notsack_blk->begin; 12148 } 12149 break; 12150 } 12151 } 12152 /* 12153 * All holes are filled. Manipulate tcp_cwnd to send more 12154 * if we can. Note that after the SACK recovery, tcp_cwnd is 12155 * set to tcp_cwnd_ssthresh. 12156 */ 12157 if (notsack_blk == NULL) { 12158 usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe; 12159 if (usable_swnd <= 0 || tcp->tcp_unsent == 0) { 12160 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna; 12161 ASSERT(tcp->tcp_cwnd > 0); 12162 return; 12163 } else { 12164 usable_swnd = usable_swnd / mss; 12165 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna + 12166 MAX(usable_swnd * mss, mss); 12167 *flags |= TH_XMIT_NEEDED; 12168 return; 12169 } 12170 } 12171 12172 /* 12173 * Note that we may send more than usable_swnd allows here 12174 * because of round off, but no more than 1 MSS of data. 12175 */ 12176 seg_len = end - begin; 12177 if (seg_len > mss) 12178 seg_len = mss; 12179 snxt_mp = tcp_get_seg_mp(tcp, begin, &off); 12180 ASSERT(snxt_mp != NULL); 12181 /* This should not happen. Defensive coding again... */ 12182 if (snxt_mp == NULL) { 12183 return; 12184 } 12185 12186 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, seg_len, &off, 12187 &tmp_mp, begin, B_TRUE, &seg_len, B_TRUE); 12188 if (xmit_mp == NULL) 12189 return; 12190 12191 usable_swnd -= seg_len; 12192 tcp->tcp_pipe += seg_len; 12193 tcp->tcp_sack_snxt = begin + seg_len; 12194 TCP_RECORD_TRACE(tcp, xmit_mp, TCP_TRACE_SEND_PKT); 12195 tcp_send_data(tcp, tcp->tcp_wq, xmit_mp); 12196 12197 /* 12198 * Update the send timestamp to avoid false retransmission. 12199 */ 12200 snxt_mp->b_prev = (mblk_t *)lbolt; 12201 12202 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 12203 UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, seg_len); 12204 BUMP_MIB(&tcps->tcps_mib, tcpOutSackRetransSegs); 12205 /* 12206 * Update tcp_rexmit_max to extend this SACK recovery phase. 12207 * This happens when new data sent during fast recovery is 12208 * also lost. If TCP retransmits those new data, it needs 12209 * to extend SACK recover phase to avoid starting another 12210 * fast retransmit/recovery unnecessarily. 12211 */ 12212 if (SEQ_GT(tcp->tcp_sack_snxt, tcp->tcp_rexmit_max)) { 12213 tcp->tcp_rexmit_max = tcp->tcp_sack_snxt; 12214 } 12215 } 12216 } 12217 12218 /* 12219 * This function handles policy checking at TCP level for non-hard_bound/ 12220 * detached connections. 12221 */ 12222 static boolean_t 12223 tcp_check_policy(tcp_t *tcp, mblk_t *first_mp, ipha_t *ipha, ip6_t *ip6h, 12224 boolean_t secure, boolean_t mctl_present) 12225 { 12226 ipsec_latch_t *ipl = NULL; 12227 ipsec_action_t *act = NULL; 12228 mblk_t *data_mp; 12229 ipsec_in_t *ii; 12230 const char *reason; 12231 kstat_named_t *counter; 12232 tcp_stack_t *tcps = tcp->tcp_tcps; 12233 ipsec_stack_t *ipss; 12234 ip_stack_t *ipst; 12235 12236 ASSERT(mctl_present || !secure); 12237 12238 ASSERT((ipha == NULL && ip6h != NULL) || 12239 (ip6h == NULL && ipha != NULL)); 12240 12241 /* 12242 * We don't necessarily have an ipsec_in_act action to verify 12243 * policy because of assymetrical policy where we have only 12244 * outbound policy and no inbound policy (possible with global 12245 * policy). 12246 */ 12247 if (!secure) { 12248 if (act == NULL || act->ipa_act.ipa_type == IPSEC_ACT_BYPASS || 12249 act->ipa_act.ipa_type == IPSEC_ACT_CLEAR) 12250 return (B_TRUE); 12251 ipsec_log_policy_failure(IPSEC_POLICY_MISMATCH, 12252 "tcp_check_policy", ipha, ip6h, secure, 12253 tcps->tcps_netstack); 12254 ipss = tcps->tcps_netstack->netstack_ipsec; 12255 12256 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, 12257 DROPPER(ipss, ipds_tcp_clear), 12258 &tcps->tcps_dropper); 12259 return (B_FALSE); 12260 } 12261 12262 /* 12263 * We have a secure packet. 12264 */ 12265 if (act == NULL) { 12266 ipsec_log_policy_failure(IPSEC_POLICY_NOT_NEEDED, 12267 "tcp_check_policy", ipha, ip6h, secure, 12268 tcps->tcps_netstack); 12269 ipss = tcps->tcps_netstack->netstack_ipsec; 12270 12271 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, 12272 DROPPER(ipss, ipds_tcp_secure), 12273 &tcps->tcps_dropper); 12274 return (B_FALSE); 12275 } 12276 12277 /* 12278 * XXX This whole routine is currently incorrect. ipl should 12279 * be set to the latch pointer, but is currently not set, so 12280 * we initialize it to NULL to avoid picking up random garbage. 12281 */ 12282 if (ipl == NULL) 12283 return (B_TRUE); 12284 12285 data_mp = first_mp->b_cont; 12286 12287 ii = (ipsec_in_t *)first_mp->b_rptr; 12288 12289 ipst = tcps->tcps_netstack->netstack_ip; 12290 12291 if (ipsec_check_ipsecin_latch(ii, data_mp, ipl, ipha, ip6h, &reason, 12292 &counter, tcp->tcp_connp)) { 12293 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded); 12294 return (B_TRUE); 12295 } 12296 (void) strlog(TCP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE, 12297 "tcp inbound policy mismatch: %s, packet dropped\n", 12298 reason); 12299 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed); 12300 12301 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, counter, 12302 &tcps->tcps_dropper); 12303 return (B_FALSE); 12304 } 12305 12306 /* 12307 * tcp_ss_rexmit() is called in tcp_rput_data() to do slow start 12308 * retransmission after a timeout. 12309 * 12310 * To limit the number of duplicate segments, we limit the number of segment 12311 * to be sent in one time to tcp_snd_burst, the burst variable. 12312 */ 12313 static void 12314 tcp_ss_rexmit(tcp_t *tcp) 12315 { 12316 uint32_t snxt; 12317 uint32_t smax; 12318 int32_t win; 12319 int32_t mss; 12320 int32_t off; 12321 int32_t burst = tcp->tcp_snd_burst; 12322 mblk_t *snxt_mp; 12323 tcp_stack_t *tcps = tcp->tcp_tcps; 12324 12325 /* 12326 * Note that tcp_rexmit can be set even though TCP has retransmitted 12327 * all unack'ed segments. 12328 */ 12329 if (SEQ_LT(tcp->tcp_rexmit_nxt, tcp->tcp_rexmit_max)) { 12330 smax = tcp->tcp_rexmit_max; 12331 snxt = tcp->tcp_rexmit_nxt; 12332 if (SEQ_LT(snxt, tcp->tcp_suna)) { 12333 snxt = tcp->tcp_suna; 12334 } 12335 win = MIN(tcp->tcp_cwnd, tcp->tcp_swnd); 12336 win -= snxt - tcp->tcp_suna; 12337 mss = tcp->tcp_mss; 12338 snxt_mp = tcp_get_seg_mp(tcp, snxt, &off); 12339 12340 while (SEQ_LT(snxt, smax) && (win > 0) && 12341 (burst > 0) && (snxt_mp != NULL)) { 12342 mblk_t *xmit_mp; 12343 mblk_t *old_snxt_mp = snxt_mp; 12344 uint32_t cnt = mss; 12345 12346 if (win < cnt) { 12347 cnt = win; 12348 } 12349 if (SEQ_GT(snxt + cnt, smax)) { 12350 cnt = smax - snxt; 12351 } 12352 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, cnt, &off, 12353 &snxt_mp, snxt, B_TRUE, &cnt, B_TRUE); 12354 if (xmit_mp == NULL) 12355 return; 12356 12357 tcp_send_data(tcp, tcp->tcp_wq, xmit_mp); 12358 12359 snxt += cnt; 12360 win -= cnt; 12361 /* 12362 * Update the send timestamp to avoid false 12363 * retransmission. 12364 */ 12365 old_snxt_mp->b_prev = (mblk_t *)lbolt; 12366 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 12367 UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, cnt); 12368 12369 tcp->tcp_rexmit_nxt = snxt; 12370 burst--; 12371 } 12372 /* 12373 * If we have transmitted all we have at the time 12374 * we started the retranmission, we can leave 12375 * the rest of the job to tcp_wput_data(). But we 12376 * need to check the send window first. If the 12377 * win is not 0, go on with tcp_wput_data(). 12378 */ 12379 if (SEQ_LT(snxt, smax) || win == 0) { 12380 return; 12381 } 12382 } 12383 /* Only call tcp_wput_data() if there is data to be sent. */ 12384 if (tcp->tcp_unsent) { 12385 tcp_wput_data(tcp, NULL, B_FALSE); 12386 } 12387 } 12388 12389 /* 12390 * Process all TCP option in SYN segment. Note that this function should 12391 * be called after tcp_adapt_ire() is called so that the necessary info 12392 * from IRE is already set in the tcp structure. 12393 * 12394 * This function sets up the correct tcp_mss value according to the 12395 * MSS option value and our header size. It also sets up the window scale 12396 * and timestamp values, and initialize SACK info blocks. But it does not 12397 * change receive window size after setting the tcp_mss value. The caller 12398 * should do the appropriate change. 12399 */ 12400 void 12401 tcp_process_options(tcp_t *tcp, tcph_t *tcph) 12402 { 12403 int options; 12404 tcp_opt_t tcpopt; 12405 uint32_t mss_max; 12406 char *tmp_tcph; 12407 tcp_stack_t *tcps = tcp->tcp_tcps; 12408 12409 tcpopt.tcp = NULL; 12410 options = tcp_parse_options(tcph, &tcpopt); 12411 12412 /* 12413 * Process MSS option. Note that MSS option value does not account 12414 * for IP or TCP options. This means that it is equal to MTU - minimum 12415 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for 12416 * IPv6. 12417 */ 12418 if (!(options & TCP_OPT_MSS_PRESENT)) { 12419 if (tcp->tcp_ipversion == IPV4_VERSION) 12420 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv4; 12421 else 12422 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv6; 12423 } else { 12424 if (tcp->tcp_ipversion == IPV4_VERSION) 12425 mss_max = tcps->tcps_mss_max_ipv4; 12426 else 12427 mss_max = tcps->tcps_mss_max_ipv6; 12428 if (tcpopt.tcp_opt_mss < tcps->tcps_mss_min) 12429 tcpopt.tcp_opt_mss = tcps->tcps_mss_min; 12430 else if (tcpopt.tcp_opt_mss > mss_max) 12431 tcpopt.tcp_opt_mss = mss_max; 12432 } 12433 12434 /* Process Window Scale option. */ 12435 if (options & TCP_OPT_WSCALE_PRESENT) { 12436 tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale; 12437 tcp->tcp_snd_ws_ok = B_TRUE; 12438 } else { 12439 tcp->tcp_snd_ws = B_FALSE; 12440 tcp->tcp_snd_ws_ok = B_FALSE; 12441 tcp->tcp_rcv_ws = B_FALSE; 12442 } 12443 12444 /* Process Timestamp option. */ 12445 if ((options & TCP_OPT_TSTAMP_PRESENT) && 12446 (tcp->tcp_snd_ts_ok || TCP_IS_DETACHED(tcp))) { 12447 tmp_tcph = (char *)tcp->tcp_tcph; 12448 12449 tcp->tcp_snd_ts_ok = B_TRUE; 12450 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 12451 tcp->tcp_last_rcv_lbolt = lbolt64; 12452 ASSERT(OK_32PTR(tmp_tcph)); 12453 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 12454 12455 /* Fill in our template header with basic timestamp option. */ 12456 tmp_tcph += tcp->tcp_tcp_hdr_len; 12457 tmp_tcph[0] = TCPOPT_NOP; 12458 tmp_tcph[1] = TCPOPT_NOP; 12459 tmp_tcph[2] = TCPOPT_TSTAMP; 12460 tmp_tcph[3] = TCPOPT_TSTAMP_LEN; 12461 tcp->tcp_hdr_len += TCPOPT_REAL_TS_LEN; 12462 tcp->tcp_tcp_hdr_len += TCPOPT_REAL_TS_LEN; 12463 tcp->tcp_tcph->th_offset_and_rsrvd[0] += (3 << 4); 12464 } else { 12465 tcp->tcp_snd_ts_ok = B_FALSE; 12466 } 12467 12468 /* 12469 * Process SACK options. If SACK is enabled for this connection, 12470 * then allocate the SACK info structure. Note the following ways 12471 * when tcp_snd_sack_ok is set to true. 12472 * 12473 * For active connection: in tcp_adapt_ire() called in 12474 * tcp_rput_other(), or in tcp_rput_other() when tcp_sack_permitted 12475 * is checked. 12476 * 12477 * For passive connection: in tcp_adapt_ire() called in 12478 * tcp_accept_comm(). 12479 * 12480 * That's the reason why the extra TCP_IS_DETACHED() check is there. 12481 * That check makes sure that if we did not send a SACK OK option, 12482 * we will not enable SACK for this connection even though the other 12483 * side sends us SACK OK option. For active connection, the SACK 12484 * info structure has already been allocated. So we need to free 12485 * it if SACK is disabled. 12486 */ 12487 if ((options & TCP_OPT_SACK_OK_PRESENT) && 12488 (tcp->tcp_snd_sack_ok || 12489 (tcps->tcps_sack_permitted != 0 && TCP_IS_DETACHED(tcp)))) { 12490 /* This should be true only in the passive case. */ 12491 if (tcp->tcp_sack_info == NULL) { 12492 ASSERT(TCP_IS_DETACHED(tcp)); 12493 tcp->tcp_sack_info = 12494 kmem_cache_alloc(tcp_sack_info_cache, KM_NOSLEEP); 12495 } 12496 if (tcp->tcp_sack_info == NULL) { 12497 tcp->tcp_snd_sack_ok = B_FALSE; 12498 } else { 12499 tcp->tcp_snd_sack_ok = B_TRUE; 12500 if (tcp->tcp_snd_ts_ok) { 12501 tcp->tcp_max_sack_blk = 3; 12502 } else { 12503 tcp->tcp_max_sack_blk = 4; 12504 } 12505 } 12506 } else { 12507 /* 12508 * Resetting tcp_snd_sack_ok to B_FALSE so that 12509 * no SACK info will be used for this 12510 * connection. This assumes that SACK usage 12511 * permission is negotiated. This may need 12512 * to be changed once this is clarified. 12513 */ 12514 if (tcp->tcp_sack_info != NULL) { 12515 ASSERT(tcp->tcp_notsack_list == NULL); 12516 kmem_cache_free(tcp_sack_info_cache, 12517 tcp->tcp_sack_info); 12518 tcp->tcp_sack_info = NULL; 12519 } 12520 tcp->tcp_snd_sack_ok = B_FALSE; 12521 } 12522 12523 /* 12524 * Now we know the exact TCP/IP header length, subtract 12525 * that from tcp_mss to get our side's MSS. 12526 */ 12527 tcp->tcp_mss -= tcp->tcp_hdr_len; 12528 /* 12529 * Here we assume that the other side's header size will be equal to 12530 * our header size. We calculate the real MSS accordingly. Need to 12531 * take into additional stuffs IPsec puts in. 12532 * 12533 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header) 12534 */ 12535 tcpopt.tcp_opt_mss -= tcp->tcp_hdr_len + tcp->tcp_ipsec_overhead - 12536 ((tcp->tcp_ipversion == IPV4_VERSION ? 12537 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) + TCP_MIN_HEADER_LENGTH); 12538 12539 /* 12540 * Set MSS to the smaller one of both ends of the connection. 12541 * We should not have called tcp_mss_set() before, but our 12542 * side of the MSS should have been set to a proper value 12543 * by tcp_adapt_ire(). tcp_mss_set() will also set up the 12544 * STREAM head parameters properly. 12545 * 12546 * If we have a larger-than-16-bit window but the other side 12547 * didn't want to do window scale, tcp_rwnd_set() will take 12548 * care of that. 12549 */ 12550 tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss), B_TRUE); 12551 } 12552 12553 /* 12554 * Sends the T_CONN_IND to the listener. The caller calls this 12555 * functions via squeue to get inside the listener's perimeter 12556 * once the 3 way hand shake is done a T_CONN_IND needs to be 12557 * sent. As an optimization, the caller can call this directly 12558 * if listener's perimeter is same as eager's. 12559 */ 12560 /* ARGSUSED */ 12561 void 12562 tcp_send_conn_ind(void *arg, mblk_t *mp, void *arg2) 12563 { 12564 conn_t *lconnp = (conn_t *)arg; 12565 tcp_t *listener = lconnp->conn_tcp; 12566 tcp_t *tcp; 12567 struct T_conn_ind *conn_ind; 12568 ipaddr_t *addr_cache; 12569 boolean_t need_send_conn_ind = B_FALSE; 12570 tcp_stack_t *tcps = listener->tcp_tcps; 12571 12572 /* retrieve the eager */ 12573 conn_ind = (struct T_conn_ind *)mp->b_rptr; 12574 ASSERT(conn_ind->OPT_offset != 0 && 12575 conn_ind->OPT_length == sizeof (intptr_t)); 12576 bcopy(mp->b_rptr + conn_ind->OPT_offset, &tcp, 12577 conn_ind->OPT_length); 12578 12579 /* 12580 * TLI/XTI applications will get confused by 12581 * sending eager as an option since it violates 12582 * the option semantics. So remove the eager as 12583 * option since TLI/XTI app doesn't need it anyway. 12584 */ 12585 if (!TCP_IS_SOCKET(listener)) { 12586 conn_ind->OPT_length = 0; 12587 conn_ind->OPT_offset = 0; 12588 } 12589 if (listener->tcp_state == TCPS_CLOSED || 12590 TCP_IS_DETACHED(listener)) { 12591 /* 12592 * If listener has closed, it would have caused a 12593 * a cleanup/blowoff to happen for the eager. We 12594 * just need to return. 12595 */ 12596 freemsg(mp); 12597 return; 12598 } 12599 12600 12601 /* 12602 * if the conn_req_q is full defer passing up the 12603 * T_CONN_IND until space is availabe after t_accept() 12604 * processing 12605 */ 12606 mutex_enter(&listener->tcp_eager_lock); 12607 12608 /* 12609 * Take the eager out, if it is in the list of droppable eagers 12610 * as we are here because the 3W handshake is over. 12611 */ 12612 MAKE_UNDROPPABLE(tcp); 12613 12614 if (listener->tcp_conn_req_cnt_q < listener->tcp_conn_req_max) { 12615 tcp_t *tail; 12616 12617 /* 12618 * The eager already has an extra ref put in tcp_rput_data 12619 * so that it stays till accept comes back even though it 12620 * might get into TCPS_CLOSED as a result of a TH_RST etc. 12621 */ 12622 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 12623 listener->tcp_conn_req_cnt_q0--; 12624 listener->tcp_conn_req_cnt_q++; 12625 12626 /* Move from SYN_RCVD to ESTABLISHED list */ 12627 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 12628 tcp->tcp_eager_prev_q0; 12629 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 12630 tcp->tcp_eager_next_q0; 12631 tcp->tcp_eager_prev_q0 = NULL; 12632 tcp->tcp_eager_next_q0 = NULL; 12633 12634 /* 12635 * Insert at end of the queue because sockfs 12636 * sends down T_CONN_RES in chronological 12637 * order. Leaving the older conn indications 12638 * at front of the queue helps reducing search 12639 * time. 12640 */ 12641 tail = listener->tcp_eager_last_q; 12642 if (tail != NULL) 12643 tail->tcp_eager_next_q = tcp; 12644 else 12645 listener->tcp_eager_next_q = tcp; 12646 listener->tcp_eager_last_q = tcp; 12647 tcp->tcp_eager_next_q = NULL; 12648 /* 12649 * Delay sending up the T_conn_ind until we are 12650 * done with the eager. Once we have have sent up 12651 * the T_conn_ind, the accept can potentially complete 12652 * any time and release the refhold we have on the eager. 12653 */ 12654 need_send_conn_ind = B_TRUE; 12655 } else { 12656 /* 12657 * Defer connection on q0 and set deferred 12658 * connection bit true 12659 */ 12660 tcp->tcp_conn_def_q0 = B_TRUE; 12661 12662 /* take tcp out of q0 ... */ 12663 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 12664 tcp->tcp_eager_next_q0; 12665 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 12666 tcp->tcp_eager_prev_q0; 12667 12668 /* ... and place it at the end of q0 */ 12669 tcp->tcp_eager_prev_q0 = listener->tcp_eager_prev_q0; 12670 tcp->tcp_eager_next_q0 = listener; 12671 listener->tcp_eager_prev_q0->tcp_eager_next_q0 = tcp; 12672 listener->tcp_eager_prev_q0 = tcp; 12673 tcp->tcp_conn.tcp_eager_conn_ind = mp; 12674 } 12675 12676 /* we have timed out before */ 12677 if (tcp->tcp_syn_rcvd_timeout != 0) { 12678 tcp->tcp_syn_rcvd_timeout = 0; 12679 listener->tcp_syn_rcvd_timeout--; 12680 if (listener->tcp_syn_defense && 12681 listener->tcp_syn_rcvd_timeout <= 12682 (tcps->tcps_conn_req_max_q0 >> 5) && 12683 10*MINUTES < TICK_TO_MSEC(lbolt64 - 12684 listener->tcp_last_rcv_lbolt)) { 12685 /* 12686 * Turn off the defense mode if we 12687 * believe the SYN attack is over. 12688 */ 12689 listener->tcp_syn_defense = B_FALSE; 12690 if (listener->tcp_ip_addr_cache) { 12691 kmem_free((void *)listener->tcp_ip_addr_cache, 12692 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 12693 listener->tcp_ip_addr_cache = NULL; 12694 } 12695 } 12696 } 12697 addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache); 12698 if (addr_cache != NULL) { 12699 /* 12700 * We have finished a 3-way handshake with this 12701 * remote host. This proves the IP addr is good. 12702 * Cache it! 12703 */ 12704 addr_cache[IP_ADDR_CACHE_HASH( 12705 tcp->tcp_remote)] = tcp->tcp_remote; 12706 } 12707 mutex_exit(&listener->tcp_eager_lock); 12708 if (need_send_conn_ind) 12709 putnext(listener->tcp_rq, mp); 12710 } 12711 12712 mblk_t * 12713 tcp_find_pktinfo(tcp_t *tcp, mblk_t *mp, uint_t *ipversp, uint_t *ip_hdr_lenp, 12714 uint_t *ifindexp, ip6_pkt_t *ippp) 12715 { 12716 ip_pktinfo_t *pinfo; 12717 ip6_t *ip6h; 12718 uchar_t *rptr; 12719 mblk_t *first_mp = mp; 12720 boolean_t mctl_present = B_FALSE; 12721 uint_t ifindex = 0; 12722 ip6_pkt_t ipp; 12723 uint_t ipvers; 12724 uint_t ip_hdr_len; 12725 tcp_stack_t *tcps = tcp->tcp_tcps; 12726 12727 rptr = mp->b_rptr; 12728 ASSERT(OK_32PTR(rptr)); 12729 ASSERT(tcp != NULL); 12730 ipp.ipp_fields = 0; 12731 12732 switch DB_TYPE(mp) { 12733 case M_CTL: 12734 mp = mp->b_cont; 12735 if (mp == NULL) { 12736 freemsg(first_mp); 12737 return (NULL); 12738 } 12739 if (DB_TYPE(mp) != M_DATA) { 12740 freemsg(first_mp); 12741 return (NULL); 12742 } 12743 mctl_present = B_TRUE; 12744 break; 12745 case M_DATA: 12746 break; 12747 default: 12748 cmn_err(CE_NOTE, "tcp_find_pktinfo: unknown db_type"); 12749 freemsg(mp); 12750 return (NULL); 12751 } 12752 ipvers = IPH_HDR_VERSION(rptr); 12753 if (ipvers == IPV4_VERSION) { 12754 if (tcp == NULL) { 12755 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12756 goto done; 12757 } 12758 12759 ipp.ipp_fields |= IPPF_HOPLIMIT; 12760 ipp.ipp_hoplimit = ((ipha_t *)rptr)->ipha_ttl; 12761 12762 /* 12763 * If we have IN_PKTINFO in an M_CTL and tcp_ipv6_recvancillary 12764 * has TCP_IPV6_RECVPKTINFO set, pass I/F index along in ipp. 12765 */ 12766 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) && 12767 mctl_present) { 12768 pinfo = (ip_pktinfo_t *)first_mp->b_rptr; 12769 if ((MBLKL(first_mp) == sizeof (ip_pktinfo_t)) && 12770 (pinfo->ip_pkt_ulp_type == IN_PKTINFO) && 12771 (pinfo->ip_pkt_flags & IPF_RECVIF)) { 12772 ipp.ipp_fields |= IPPF_IFINDEX; 12773 ipp.ipp_ifindex = pinfo->ip_pkt_ifindex; 12774 ifindex = pinfo->ip_pkt_ifindex; 12775 } 12776 freeb(first_mp); 12777 mctl_present = B_FALSE; 12778 } 12779 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12780 } else { 12781 ip6h = (ip6_t *)rptr; 12782 12783 ASSERT(ipvers == IPV6_VERSION); 12784 ipp.ipp_fields = IPPF_HOPLIMIT | IPPF_TCLASS; 12785 ipp.ipp_tclass = (ip6h->ip6_flow & 0x0FF00000) >> 20; 12786 ipp.ipp_hoplimit = ip6h->ip6_hops; 12787 12788 if (ip6h->ip6_nxt != IPPROTO_TCP) { 12789 uint8_t nexthdrp; 12790 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 12791 12792 /* Look for ifindex information */ 12793 if (ip6h->ip6_nxt == IPPROTO_RAW) { 12794 ip6i_t *ip6i = (ip6i_t *)ip6h; 12795 if ((uchar_t *)&ip6i[1] > mp->b_wptr) { 12796 BUMP_MIB(&ipst->ips_ip_mib, tcpInErrs); 12797 freemsg(first_mp); 12798 return (NULL); 12799 } 12800 12801 if (ip6i->ip6i_flags & IP6I_IFINDEX) { 12802 ASSERT(ip6i->ip6i_ifindex != 0); 12803 ipp.ipp_fields |= IPPF_IFINDEX; 12804 ipp.ipp_ifindex = ip6i->ip6i_ifindex; 12805 ifindex = ip6i->ip6i_ifindex; 12806 } 12807 rptr = (uchar_t *)&ip6i[1]; 12808 mp->b_rptr = rptr; 12809 if (rptr == mp->b_wptr) { 12810 mblk_t *mp1; 12811 mp1 = mp->b_cont; 12812 freeb(mp); 12813 mp = mp1; 12814 rptr = mp->b_rptr; 12815 } 12816 if (MBLKL(mp) < IPV6_HDR_LEN + 12817 sizeof (tcph_t)) { 12818 BUMP_MIB(&ipst->ips_ip_mib, tcpInErrs); 12819 freemsg(first_mp); 12820 return (NULL); 12821 } 12822 ip6h = (ip6_t *)rptr; 12823 } 12824 12825 /* 12826 * Find any potentially interesting extension headers 12827 * as well as the length of the IPv6 + extension 12828 * headers. 12829 */ 12830 ip_hdr_len = ip_find_hdr_v6(mp, ip6h, &ipp, &nexthdrp); 12831 /* Verify if this is a TCP packet */ 12832 if (nexthdrp != IPPROTO_TCP) { 12833 BUMP_MIB(&ipst->ips_ip_mib, tcpInErrs); 12834 freemsg(first_mp); 12835 return (NULL); 12836 } 12837 } else { 12838 ip_hdr_len = IPV6_HDR_LEN; 12839 } 12840 } 12841 12842 done: 12843 if (ipversp != NULL) 12844 *ipversp = ipvers; 12845 if (ip_hdr_lenp != NULL) 12846 *ip_hdr_lenp = ip_hdr_len; 12847 if (ippp != NULL) 12848 *ippp = ipp; 12849 if (ifindexp != NULL) 12850 *ifindexp = ifindex; 12851 if (mctl_present) { 12852 freeb(first_mp); 12853 } 12854 return (mp); 12855 } 12856 12857 /* 12858 * Handle M_DATA messages from IP. Its called directly from IP via 12859 * squeue for AF_INET type sockets fast path. No M_CTL are expected 12860 * in this path. 12861 * 12862 * For everything else (including AF_INET6 sockets with 'tcp_ipversion' 12863 * v4 and v6), we are called through tcp_input() and a M_CTL can 12864 * be present for options but tcp_find_pktinfo() deals with it. We 12865 * only expect M_DATA packets after tcp_find_pktinfo() is done. 12866 * 12867 * The first argument is always the connp/tcp to which the mp belongs. 12868 * There are no exceptions to this rule. The caller has already put 12869 * a reference on this connp/tcp and once tcp_rput_data() returns, 12870 * the squeue will do the refrele. 12871 * 12872 * The TH_SYN for the listener directly go to tcp_conn_request via 12873 * squeue. 12874 * 12875 * sqp: NULL = recursive, sqp != NULL means called from squeue 12876 */ 12877 void 12878 tcp_rput_data(void *arg, mblk_t *mp, void *arg2) 12879 { 12880 int32_t bytes_acked; 12881 int32_t gap; 12882 mblk_t *mp1; 12883 uint_t flags; 12884 uint32_t new_swnd = 0; 12885 uchar_t *iphdr; 12886 uchar_t *rptr; 12887 int32_t rgap; 12888 uint32_t seg_ack; 12889 int seg_len; 12890 uint_t ip_hdr_len; 12891 uint32_t seg_seq; 12892 tcph_t *tcph; 12893 int urp; 12894 tcp_opt_t tcpopt; 12895 uint_t ipvers; 12896 ip6_pkt_t ipp; 12897 boolean_t ofo_seg = B_FALSE; /* Out of order segment */ 12898 uint32_t cwnd; 12899 uint32_t add; 12900 int npkt; 12901 int mss; 12902 conn_t *connp = (conn_t *)arg; 12903 squeue_t *sqp = (squeue_t *)arg2; 12904 tcp_t *tcp = connp->conn_tcp; 12905 tcp_stack_t *tcps = tcp->tcp_tcps; 12906 12907 /* 12908 * RST from fused tcp loopback peer should trigger an unfuse. 12909 */ 12910 if (tcp->tcp_fused) { 12911 TCP_STAT(tcps, tcp_fusion_aborted); 12912 tcp_unfuse(tcp); 12913 } 12914 12915 iphdr = mp->b_rptr; 12916 rptr = mp->b_rptr; 12917 ASSERT(OK_32PTR(rptr)); 12918 12919 /* 12920 * An AF_INET socket is not capable of receiving any pktinfo. Do inline 12921 * processing here. For rest call tcp_find_pktinfo to fill up the 12922 * necessary information. 12923 */ 12924 if (IPCL_IS_TCP4(connp)) { 12925 ipvers = IPV4_VERSION; 12926 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12927 } else { 12928 mp = tcp_find_pktinfo(tcp, mp, &ipvers, &ip_hdr_len, 12929 NULL, &ipp); 12930 if (mp == NULL) { 12931 TCP_STAT(tcps, tcp_rput_v6_error); 12932 return; 12933 } 12934 iphdr = mp->b_rptr; 12935 rptr = mp->b_rptr; 12936 } 12937 ASSERT(DB_TYPE(mp) == M_DATA); 12938 12939 tcph = (tcph_t *)&rptr[ip_hdr_len]; 12940 seg_seq = ABE32_TO_U32(tcph->th_seq); 12941 seg_ack = ABE32_TO_U32(tcph->th_ack); 12942 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 12943 seg_len = (int)(mp->b_wptr - rptr) - 12944 (ip_hdr_len + TCP_HDR_LENGTH(tcph)); 12945 if ((mp1 = mp->b_cont) != NULL && mp1->b_datap->db_type == M_DATA) { 12946 do { 12947 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 12948 (uintptr_t)INT_MAX); 12949 seg_len += (int)(mp1->b_wptr - mp1->b_rptr); 12950 } while ((mp1 = mp1->b_cont) != NULL && 12951 mp1->b_datap->db_type == M_DATA); 12952 } 12953 12954 if (tcp->tcp_state == TCPS_TIME_WAIT) { 12955 tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack, 12956 seg_len, tcph); 12957 return; 12958 } 12959 12960 if (sqp != NULL) { 12961 /* 12962 * This is the correct place to update tcp_last_recv_time. Note 12963 * that it is also updated for tcp structure that belongs to 12964 * global and listener queues which do not really need updating. 12965 * But that should not cause any harm. And it is updated for 12966 * all kinds of incoming segments, not only for data segments. 12967 */ 12968 tcp->tcp_last_recv_time = lbolt; 12969 } 12970 12971 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 12972 12973 BUMP_LOCAL(tcp->tcp_ibsegs); 12974 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_RECV_PKT); 12975 12976 if ((flags & TH_URG) && sqp != NULL) { 12977 /* 12978 * TCP can't handle urgent pointers that arrive before 12979 * the connection has been accept()ed since it can't 12980 * buffer OOB data. Discard segment if this happens. 12981 * 12982 * We can't just rely on a non-null tcp_listener to indicate 12983 * that the accept() has completed since unlinking of the 12984 * eager and completion of the accept are not atomic. 12985 * tcp_detached, when it is not set (B_FALSE) indicates 12986 * that the accept() has completed. 12987 * 12988 * Nor can it reassemble urgent pointers, so discard 12989 * if it's not the next segment expected. 12990 * 12991 * Otherwise, collapse chain into one mblk (discard if 12992 * that fails). This makes sure the headers, retransmitted 12993 * data, and new data all are in the same mblk. 12994 */ 12995 ASSERT(mp != NULL); 12996 if (tcp->tcp_detached || !pullupmsg(mp, -1)) { 12997 freemsg(mp); 12998 return; 12999 } 13000 /* Update pointers into message */ 13001 iphdr = rptr = mp->b_rptr; 13002 tcph = (tcph_t *)&rptr[ip_hdr_len]; 13003 if (SEQ_GT(seg_seq, tcp->tcp_rnxt)) { 13004 /* 13005 * Since we can't handle any data with this urgent 13006 * pointer that is out of sequence, we expunge 13007 * the data. This allows us to still register 13008 * the urgent mark and generate the M_PCSIG, 13009 * which we can do. 13010 */ 13011 mp->b_wptr = (uchar_t *)tcph + TCP_HDR_LENGTH(tcph); 13012 seg_len = 0; 13013 } 13014 } 13015 13016 switch (tcp->tcp_state) { 13017 case TCPS_SYN_SENT: 13018 if (flags & TH_ACK) { 13019 /* 13020 * Note that our stack cannot send data before a 13021 * connection is established, therefore the 13022 * following check is valid. Otherwise, it has 13023 * to be changed. 13024 */ 13025 if (SEQ_LEQ(seg_ack, tcp->tcp_iss) || 13026 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 13027 freemsg(mp); 13028 if (flags & TH_RST) 13029 return; 13030 tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq", 13031 tcp, seg_ack, 0, TH_RST); 13032 return; 13033 } 13034 ASSERT(tcp->tcp_suna + 1 == seg_ack); 13035 } 13036 if (flags & TH_RST) { 13037 freemsg(mp); 13038 if (flags & TH_ACK) 13039 (void) tcp_clean_death(tcp, 13040 ECONNREFUSED, 13); 13041 return; 13042 } 13043 if (!(flags & TH_SYN)) { 13044 freemsg(mp); 13045 return; 13046 } 13047 13048 /* Process all TCP options. */ 13049 tcp_process_options(tcp, tcph); 13050 /* 13051 * The following changes our rwnd to be a multiple of the 13052 * MIN(peer MSS, our MSS) for performance reason. 13053 */ 13054 (void) tcp_rwnd_set(tcp, MSS_ROUNDUP(tcp->tcp_rq->q_hiwat, 13055 tcp->tcp_mss)); 13056 13057 /* Is the other end ECN capable? */ 13058 if (tcp->tcp_ecn_ok) { 13059 if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) { 13060 tcp->tcp_ecn_ok = B_FALSE; 13061 } 13062 } 13063 /* 13064 * Clear ECN flags because it may interfere with later 13065 * processing. 13066 */ 13067 flags &= ~(TH_ECE|TH_CWR); 13068 13069 tcp->tcp_irs = seg_seq; 13070 tcp->tcp_rack = seg_seq; 13071 tcp->tcp_rnxt = seg_seq + 1; 13072 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 13073 if (!TCP_IS_DETACHED(tcp)) { 13074 /* Allocate room for SACK options if needed. */ 13075 if (tcp->tcp_snd_sack_ok) { 13076 (void) mi_set_sth_wroff(tcp->tcp_rq, 13077 tcp->tcp_hdr_len + TCPOPT_MAX_SACK_LEN + 13078 (tcp->tcp_loopback ? 0 : 13079 tcps->tcps_wroff_xtra)); 13080 } else { 13081 (void) mi_set_sth_wroff(tcp->tcp_rq, 13082 tcp->tcp_hdr_len + 13083 (tcp->tcp_loopback ? 0 : 13084 tcps->tcps_wroff_xtra)); 13085 } 13086 } 13087 if (flags & TH_ACK) { 13088 /* 13089 * If we can't get the confirmation upstream, pretend 13090 * we didn't even see this one. 13091 * 13092 * XXX: how can we pretend we didn't see it if we 13093 * have updated rnxt et. al. 13094 * 13095 * For loopback we defer sending up the T_CONN_CON 13096 * until after some checks below. 13097 */ 13098 mp1 = NULL; 13099 if (!tcp_conn_con(tcp, iphdr, tcph, mp, 13100 tcp->tcp_loopback ? &mp1 : NULL)) { 13101 freemsg(mp); 13102 return; 13103 } 13104 /* SYN was acked - making progress */ 13105 if (tcp->tcp_ipversion == IPV6_VERSION) 13106 tcp->tcp_ip_forward_progress = B_TRUE; 13107 13108 /* One for the SYN */ 13109 tcp->tcp_suna = tcp->tcp_iss + 1; 13110 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 13111 tcp->tcp_state = TCPS_ESTABLISHED; 13112 13113 /* 13114 * If SYN was retransmitted, need to reset all 13115 * retransmission info. This is because this 13116 * segment will be treated as a dup ACK. 13117 */ 13118 if (tcp->tcp_rexmit) { 13119 tcp->tcp_rexmit = B_FALSE; 13120 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 13121 tcp->tcp_rexmit_max = tcp->tcp_snxt; 13122 tcp->tcp_snd_burst = tcp->tcp_localnet ? 13123 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 13124 tcp->tcp_ms_we_have_waited = 0; 13125 13126 /* 13127 * Set tcp_cwnd back to 1 MSS, per 13128 * recommendation from 13129 * draft-floyd-incr-init-win-01.txt, 13130 * Increasing TCP's Initial Window. 13131 */ 13132 tcp->tcp_cwnd = tcp->tcp_mss; 13133 } 13134 13135 tcp->tcp_swl1 = seg_seq; 13136 tcp->tcp_swl2 = seg_ack; 13137 13138 new_swnd = BE16_TO_U16(tcph->th_win); 13139 tcp->tcp_swnd = new_swnd; 13140 if (new_swnd > tcp->tcp_max_swnd) 13141 tcp->tcp_max_swnd = new_swnd; 13142 13143 /* 13144 * Always send the three-way handshake ack immediately 13145 * in order to make the connection complete as soon as 13146 * possible on the accepting host. 13147 */ 13148 flags |= TH_ACK_NEEDED; 13149 13150 /* 13151 * Special case for loopback. At this point we have 13152 * received SYN-ACK from the remote endpoint. In 13153 * order to ensure that both endpoints reach the 13154 * fused state prior to any data exchange, the final 13155 * ACK needs to be sent before we indicate T_CONN_CON 13156 * to the module upstream. 13157 */ 13158 if (tcp->tcp_loopback) { 13159 mblk_t *ack_mp; 13160 13161 ASSERT(!tcp->tcp_unfusable); 13162 ASSERT(mp1 != NULL); 13163 /* 13164 * For loopback, we always get a pure SYN-ACK 13165 * and only need to send back the final ACK 13166 * with no data (this is because the other 13167 * tcp is ours and we don't do T/TCP). This 13168 * final ACK triggers the passive side to 13169 * perform fusion in ESTABLISHED state. 13170 */ 13171 if ((ack_mp = tcp_ack_mp(tcp)) != NULL) { 13172 if (tcp->tcp_ack_tid != 0) { 13173 (void) TCP_TIMER_CANCEL(tcp, 13174 tcp->tcp_ack_tid); 13175 tcp->tcp_ack_tid = 0; 13176 } 13177 TCP_RECORD_TRACE(tcp, ack_mp, 13178 TCP_TRACE_SEND_PKT); 13179 tcp_send_data(tcp, tcp->tcp_wq, ack_mp); 13180 BUMP_LOCAL(tcp->tcp_obsegs); 13181 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 13182 13183 /* Send up T_CONN_CON */ 13184 putnext(tcp->tcp_rq, mp1); 13185 13186 freemsg(mp); 13187 return; 13188 } 13189 /* 13190 * Forget fusion; we need to handle more 13191 * complex cases below. Send the deferred 13192 * T_CONN_CON message upstream and proceed 13193 * as usual. Mark this tcp as not capable 13194 * of fusion. 13195 */ 13196 TCP_STAT(tcps, tcp_fusion_unfusable); 13197 tcp->tcp_unfusable = B_TRUE; 13198 putnext(tcp->tcp_rq, mp1); 13199 } 13200 13201 /* 13202 * Check to see if there is data to be sent. If 13203 * yes, set the transmit flag. Then check to see 13204 * if received data processing needs to be done. 13205 * If not, go straight to xmit_check. This short 13206 * cut is OK as we don't support T/TCP. 13207 */ 13208 if (tcp->tcp_unsent) 13209 flags |= TH_XMIT_NEEDED; 13210 13211 if (seg_len == 0 && !(flags & TH_URG)) { 13212 freemsg(mp); 13213 goto xmit_check; 13214 } 13215 13216 flags &= ~TH_SYN; 13217 seg_seq++; 13218 break; 13219 } 13220 tcp->tcp_state = TCPS_SYN_RCVD; 13221 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss, 13222 NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 13223 if (mp1) { 13224 DB_CPID(mp1) = tcp->tcp_cpid; 13225 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 13226 tcp_send_data(tcp, tcp->tcp_wq, mp1); 13227 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 13228 } 13229 freemsg(mp); 13230 return; 13231 case TCPS_SYN_RCVD: 13232 if (flags & TH_ACK) { 13233 /* 13234 * In this state, a SYN|ACK packet is either bogus 13235 * because the other side must be ACKing our SYN which 13236 * indicates it has seen the ACK for their SYN and 13237 * shouldn't retransmit it or we're crossing SYNs 13238 * on active open. 13239 */ 13240 if ((flags & TH_SYN) && !tcp->tcp_active_open) { 13241 freemsg(mp); 13242 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn", 13243 tcp, seg_ack, 0, TH_RST); 13244 return; 13245 } 13246 /* 13247 * NOTE: RFC 793 pg. 72 says this should be 13248 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt 13249 * but that would mean we have an ack that ignored 13250 * our SYN. 13251 */ 13252 if (SEQ_LEQ(seg_ack, tcp->tcp_suna) || 13253 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 13254 freemsg(mp); 13255 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack", 13256 tcp, seg_ack, 0, TH_RST); 13257 return; 13258 } 13259 } 13260 break; 13261 case TCPS_LISTEN: 13262 /* 13263 * Only a TLI listener can come through this path when a 13264 * acceptor is going back to be a listener and a packet 13265 * for the acceptor hits the classifier. For a socket 13266 * listener, this can never happen because a listener 13267 * can never accept connection on itself and hence a 13268 * socket acceptor can not go back to being a listener. 13269 */ 13270 ASSERT(!TCP_IS_SOCKET(tcp)); 13271 /*FALLTHRU*/ 13272 case TCPS_CLOSED: 13273 case TCPS_BOUND: { 13274 conn_t *new_connp; 13275 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 13276 13277 new_connp = ipcl_classify(mp, connp->conn_zoneid, ipst); 13278 if (new_connp != NULL) { 13279 tcp_reinput(new_connp, mp, connp->conn_sqp); 13280 return; 13281 } 13282 /* We failed to classify. For now just drop the packet */ 13283 freemsg(mp); 13284 return; 13285 } 13286 case TCPS_IDLE: 13287 /* 13288 * Handle the case where the tcp_clean_death() has happened 13289 * on a connection (application hasn't closed yet) but a packet 13290 * was already queued on squeue before tcp_clean_death() 13291 * was processed. Calling tcp_clean_death() twice on same 13292 * connection can result in weird behaviour. 13293 */ 13294 freemsg(mp); 13295 return; 13296 default: 13297 break; 13298 } 13299 13300 /* 13301 * Already on the correct queue/perimeter. 13302 * If this is a detached connection and not an eager 13303 * connection hanging off a listener then new data 13304 * (past the FIN) will cause a reset. 13305 * We do a special check here where it 13306 * is out of the main line, rather than check 13307 * if we are detached every time we see new 13308 * data down below. 13309 */ 13310 if (TCP_IS_DETACHED_NONEAGER(tcp) && 13311 (seg_len > 0 && SEQ_GT(seg_seq + seg_len, tcp->tcp_rnxt))) { 13312 BUMP_MIB(&tcps->tcps_mib, tcpInClosed); 13313 TCP_RECORD_TRACE(tcp, 13314 mp, TCP_TRACE_RECV_PKT); 13315 13316 freemsg(mp); 13317 /* 13318 * This could be an SSL closure alert. We're detached so just 13319 * acknowledge it this last time. 13320 */ 13321 if (tcp->tcp_kssl_ctx != NULL) { 13322 kssl_release_ctx(tcp->tcp_kssl_ctx); 13323 tcp->tcp_kssl_ctx = NULL; 13324 13325 tcp->tcp_rnxt += seg_len; 13326 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 13327 flags |= TH_ACK_NEEDED; 13328 goto ack_check; 13329 } 13330 13331 tcp_xmit_ctl("new data when detached", tcp, 13332 tcp->tcp_snxt, 0, TH_RST); 13333 (void) tcp_clean_death(tcp, EPROTO, 12); 13334 return; 13335 } 13336 13337 mp->b_rptr = (uchar_t *)tcph + TCP_HDR_LENGTH(tcph); 13338 urp = BE16_TO_U16(tcph->th_urp) - TCP_OLD_URP_INTERPRETATION; 13339 new_swnd = BE16_TO_U16(tcph->th_win) << 13340 ((tcph->th_flags[0] & TH_SYN) ? 0 : tcp->tcp_snd_ws); 13341 13342 if (tcp->tcp_snd_ts_ok) { 13343 if (!tcp_paws_check(tcp, tcph, &tcpopt)) { 13344 /* 13345 * This segment is not acceptable. 13346 * Drop it and send back an ACK. 13347 */ 13348 freemsg(mp); 13349 flags |= TH_ACK_NEEDED; 13350 goto ack_check; 13351 } 13352 } else if (tcp->tcp_snd_sack_ok) { 13353 ASSERT(tcp->tcp_sack_info != NULL); 13354 tcpopt.tcp = tcp; 13355 /* 13356 * SACK info in already updated in tcp_parse_options. Ignore 13357 * all other TCP options... 13358 */ 13359 (void) tcp_parse_options(tcph, &tcpopt); 13360 } 13361 try_again:; 13362 mss = tcp->tcp_mss; 13363 gap = seg_seq - tcp->tcp_rnxt; 13364 rgap = tcp->tcp_rwnd - (gap + seg_len); 13365 /* 13366 * gap is the amount of sequence space between what we expect to see 13367 * and what we got for seg_seq. A positive value for gap means 13368 * something got lost. A negative value means we got some old stuff. 13369 */ 13370 if (gap < 0) { 13371 /* Old stuff present. Is the SYN in there? */ 13372 if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) && 13373 (seg_len != 0)) { 13374 flags &= ~TH_SYN; 13375 seg_seq++; 13376 urp--; 13377 /* Recompute the gaps after noting the SYN. */ 13378 goto try_again; 13379 } 13380 BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs); 13381 UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes, 13382 (seg_len > -gap ? -gap : seg_len)); 13383 /* Remove the old stuff from seg_len. */ 13384 seg_len += gap; 13385 /* 13386 * Anything left? 13387 * Make sure to check for unack'd FIN when rest of data 13388 * has been previously ack'd. 13389 */ 13390 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 13391 /* 13392 * Resets are only valid if they lie within our offered 13393 * window. If the RST bit is set, we just ignore this 13394 * segment. 13395 */ 13396 if (flags & TH_RST) { 13397 freemsg(mp); 13398 return; 13399 } 13400 13401 /* 13402 * The arriving of dup data packets indicate that we 13403 * may have postponed an ack for too long, or the other 13404 * side's RTT estimate is out of shape. Start acking 13405 * more often. 13406 */ 13407 if (SEQ_GEQ(seg_seq + seg_len - gap, tcp->tcp_rack) && 13408 tcp->tcp_rack_cnt >= 1 && 13409 tcp->tcp_rack_abs_max > 2) { 13410 tcp->tcp_rack_abs_max--; 13411 } 13412 tcp->tcp_rack_cur_max = 1; 13413 13414 /* 13415 * This segment is "unacceptable". None of its 13416 * sequence space lies within our advertized window. 13417 * 13418 * Adjust seg_len to the original value for tracing. 13419 */ 13420 seg_len -= gap; 13421 if (tcp->tcp_debug) { 13422 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13423 "tcp_rput: unacceptable, gap %d, rgap %d, " 13424 "flags 0x%x, seg_seq %u, seg_ack %u, " 13425 "seg_len %d, rnxt %u, snxt %u, %s", 13426 gap, rgap, flags, seg_seq, seg_ack, 13427 seg_len, tcp->tcp_rnxt, tcp->tcp_snxt, 13428 tcp_display(tcp, NULL, 13429 DISP_ADDR_AND_PORT)); 13430 } 13431 13432 /* 13433 * Arrange to send an ACK in response to the 13434 * unacceptable segment per RFC 793 page 69. There 13435 * is only one small difference between ours and the 13436 * acceptability test in the RFC - we accept ACK-only 13437 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK 13438 * will be generated. 13439 * 13440 * Note that we have to ACK an ACK-only packet at least 13441 * for stacks that send 0-length keep-alives with 13442 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122, 13443 * section 4.2.3.6. As long as we don't ever generate 13444 * an unacceptable packet in response to an incoming 13445 * packet that is unacceptable, it should not cause 13446 * "ACK wars". 13447 */ 13448 flags |= TH_ACK_NEEDED; 13449 13450 /* 13451 * Continue processing this segment in order to use the 13452 * ACK information it contains, but skip all other 13453 * sequence-number processing. Processing the ACK 13454 * information is necessary in order to 13455 * re-synchronize connections that may have lost 13456 * synchronization. 13457 * 13458 * We clear seg_len and flag fields related to 13459 * sequence number processing as they are not 13460 * to be trusted for an unacceptable segment. 13461 */ 13462 seg_len = 0; 13463 flags &= ~(TH_SYN | TH_FIN | TH_URG); 13464 goto process_ack; 13465 } 13466 13467 /* Fix seg_seq, and chew the gap off the front. */ 13468 seg_seq = tcp->tcp_rnxt; 13469 urp += gap; 13470 do { 13471 mblk_t *mp2; 13472 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 13473 (uintptr_t)UINT_MAX); 13474 gap += (uint_t)(mp->b_wptr - mp->b_rptr); 13475 if (gap > 0) { 13476 mp->b_rptr = mp->b_wptr - gap; 13477 break; 13478 } 13479 mp2 = mp; 13480 mp = mp->b_cont; 13481 freeb(mp2); 13482 } while (gap < 0); 13483 /* 13484 * If the urgent data has already been acknowledged, we 13485 * should ignore TH_URG below 13486 */ 13487 if (urp < 0) 13488 flags &= ~TH_URG; 13489 } 13490 /* 13491 * rgap is the amount of stuff received out of window. A negative 13492 * value is the amount out of window. 13493 */ 13494 if (rgap < 0) { 13495 mblk_t *mp2; 13496 13497 if (tcp->tcp_rwnd == 0) { 13498 BUMP_MIB(&tcps->tcps_mib, tcpInWinProbe); 13499 } else { 13500 BUMP_MIB(&tcps->tcps_mib, tcpInDataPastWinSegs); 13501 UPDATE_MIB(&tcps->tcps_mib, 13502 tcpInDataPastWinBytes, -rgap); 13503 } 13504 13505 /* 13506 * seg_len does not include the FIN, so if more than 13507 * just the FIN is out of window, we act like we don't 13508 * see it. (If just the FIN is out of window, rgap 13509 * will be zero and we will go ahead and acknowledge 13510 * the FIN.) 13511 */ 13512 flags &= ~TH_FIN; 13513 13514 /* Fix seg_len and make sure there is something left. */ 13515 seg_len += rgap; 13516 if (seg_len <= 0) { 13517 /* 13518 * Resets are only valid if they lie within our offered 13519 * window. If the RST bit is set, we just ignore this 13520 * segment. 13521 */ 13522 if (flags & TH_RST) { 13523 freemsg(mp); 13524 return; 13525 } 13526 13527 /* Per RFC 793, we need to send back an ACK. */ 13528 flags |= TH_ACK_NEEDED; 13529 13530 /* 13531 * Send SIGURG as soon as possible i.e. even 13532 * if the TH_URG was delivered in a window probe 13533 * packet (which will be unacceptable). 13534 * 13535 * We generate a signal if none has been generated 13536 * for this connection or if this is a new urgent 13537 * byte. Also send a zero-length "unmarked" message 13538 * to inform SIOCATMARK that this is not the mark. 13539 * 13540 * tcp_urp_last_valid is cleared when the T_exdata_ind 13541 * is sent up. This plus the check for old data 13542 * (gap >= 0) handles the wraparound of the sequence 13543 * number space without having to always track the 13544 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks 13545 * this max in its rcv_up variable). 13546 * 13547 * This prevents duplicate SIGURGS due to a "late" 13548 * zero-window probe when the T_EXDATA_IND has already 13549 * been sent up. 13550 */ 13551 if ((flags & TH_URG) && 13552 (!tcp->tcp_urp_last_valid || SEQ_GT(urp + seg_seq, 13553 tcp->tcp_urp_last))) { 13554 mp1 = allocb(0, BPRI_MED); 13555 if (mp1 == NULL) { 13556 freemsg(mp); 13557 return; 13558 } 13559 if (!TCP_IS_DETACHED(tcp) && 13560 !putnextctl1(tcp->tcp_rq, M_PCSIG, 13561 SIGURG)) { 13562 /* Try again on the rexmit. */ 13563 freemsg(mp1); 13564 freemsg(mp); 13565 return; 13566 } 13567 /* 13568 * If the next byte would be the mark 13569 * then mark with MARKNEXT else mark 13570 * with NOTMARKNEXT. 13571 */ 13572 if (gap == 0 && urp == 0) 13573 mp1->b_flag |= MSGMARKNEXT; 13574 else 13575 mp1->b_flag |= MSGNOTMARKNEXT; 13576 freemsg(tcp->tcp_urp_mark_mp); 13577 tcp->tcp_urp_mark_mp = mp1; 13578 flags |= TH_SEND_URP_MARK; 13579 tcp->tcp_urp_last_valid = B_TRUE; 13580 tcp->tcp_urp_last = urp + seg_seq; 13581 } 13582 /* 13583 * If this is a zero window probe, continue to 13584 * process the ACK part. But we need to set seg_len 13585 * to 0 to avoid data processing. Otherwise just 13586 * drop the segment and send back an ACK. 13587 */ 13588 if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) { 13589 flags &= ~(TH_SYN | TH_URG); 13590 seg_len = 0; 13591 goto process_ack; 13592 } else { 13593 freemsg(mp); 13594 goto ack_check; 13595 } 13596 } 13597 /* Pitch out of window stuff off the end. */ 13598 rgap = seg_len; 13599 mp2 = mp; 13600 do { 13601 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 13602 (uintptr_t)INT_MAX); 13603 rgap -= (int)(mp2->b_wptr - mp2->b_rptr); 13604 if (rgap < 0) { 13605 mp2->b_wptr += rgap; 13606 if ((mp1 = mp2->b_cont) != NULL) { 13607 mp2->b_cont = NULL; 13608 freemsg(mp1); 13609 } 13610 break; 13611 } 13612 } while ((mp2 = mp2->b_cont) != NULL); 13613 } 13614 ok:; 13615 /* 13616 * TCP should check ECN info for segments inside the window only. 13617 * Therefore the check should be done here. 13618 */ 13619 if (tcp->tcp_ecn_ok) { 13620 if (flags & TH_CWR) { 13621 tcp->tcp_ecn_echo_on = B_FALSE; 13622 } 13623 /* 13624 * Note that both ECN_CE and CWR can be set in the 13625 * same segment. In this case, we once again turn 13626 * on ECN_ECHO. 13627 */ 13628 if (tcp->tcp_ipversion == IPV4_VERSION) { 13629 uchar_t tos = ((ipha_t *)rptr)->ipha_type_of_service; 13630 13631 if ((tos & IPH_ECN_CE) == IPH_ECN_CE) { 13632 tcp->tcp_ecn_echo_on = B_TRUE; 13633 } 13634 } else { 13635 uint32_t vcf = ((ip6_t *)rptr)->ip6_vcf; 13636 13637 if ((vcf & htonl(IPH_ECN_CE << 20)) == 13638 htonl(IPH_ECN_CE << 20)) { 13639 tcp->tcp_ecn_echo_on = B_TRUE; 13640 } 13641 } 13642 } 13643 13644 /* 13645 * Check whether we can update tcp_ts_recent. This test is 13646 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 13647 * Extensions for High Performance: An Update", Internet Draft. 13648 */ 13649 if (tcp->tcp_snd_ts_ok && 13650 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 13651 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 13652 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 13653 tcp->tcp_last_rcv_lbolt = lbolt64; 13654 } 13655 13656 if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) { 13657 /* 13658 * FIN in an out of order segment. We record this in 13659 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq. 13660 * Clear the FIN so that any check on FIN flag will fail. 13661 * Remember that FIN also counts in the sequence number 13662 * space. So we need to ack out of order FIN only segments. 13663 */ 13664 if (flags & TH_FIN) { 13665 tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID; 13666 tcp->tcp_ofo_fin_seq = seg_seq + seg_len; 13667 flags &= ~TH_FIN; 13668 flags |= TH_ACK_NEEDED; 13669 } 13670 if (seg_len > 0) { 13671 /* Fill in the SACK blk list. */ 13672 if (tcp->tcp_snd_sack_ok) { 13673 ASSERT(tcp->tcp_sack_info != NULL); 13674 tcp_sack_insert(tcp->tcp_sack_list, 13675 seg_seq, seg_seq + seg_len, 13676 &(tcp->tcp_num_sack_blk)); 13677 } 13678 13679 /* 13680 * Attempt reassembly and see if we have something 13681 * ready to go. 13682 */ 13683 mp = tcp_reass(tcp, mp, seg_seq); 13684 /* Always ack out of order packets */ 13685 flags |= TH_ACK_NEEDED | TH_PUSH; 13686 if (mp) { 13687 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 13688 (uintptr_t)INT_MAX); 13689 seg_len = mp->b_cont ? msgdsize(mp) : 13690 (int)(mp->b_wptr - mp->b_rptr); 13691 seg_seq = tcp->tcp_rnxt; 13692 /* 13693 * A gap is filled and the seq num and len 13694 * of the gap match that of a previously 13695 * received FIN, put the FIN flag back in. 13696 */ 13697 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 13698 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 13699 flags |= TH_FIN; 13700 tcp->tcp_valid_bits &= 13701 ~TCP_OFO_FIN_VALID; 13702 } 13703 } else { 13704 /* 13705 * Keep going even with NULL mp. 13706 * There may be a useful ACK or something else 13707 * we don't want to miss. 13708 * 13709 * But TCP should not perform fast retransmit 13710 * because of the ack number. TCP uses 13711 * seg_len == 0 to determine if it is a pure 13712 * ACK. And this is not a pure ACK. 13713 */ 13714 seg_len = 0; 13715 ofo_seg = B_TRUE; 13716 } 13717 } 13718 } else if (seg_len > 0) { 13719 BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs); 13720 UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, seg_len); 13721 /* 13722 * If an out of order FIN was received before, and the seq 13723 * num and len of the new segment match that of the FIN, 13724 * put the FIN flag back in. 13725 */ 13726 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 13727 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 13728 flags |= TH_FIN; 13729 tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID; 13730 } 13731 } 13732 if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) { 13733 if (flags & TH_RST) { 13734 freemsg(mp); 13735 switch (tcp->tcp_state) { 13736 case TCPS_SYN_RCVD: 13737 (void) tcp_clean_death(tcp, ECONNREFUSED, 14); 13738 break; 13739 case TCPS_ESTABLISHED: 13740 case TCPS_FIN_WAIT_1: 13741 case TCPS_FIN_WAIT_2: 13742 case TCPS_CLOSE_WAIT: 13743 (void) tcp_clean_death(tcp, ECONNRESET, 15); 13744 break; 13745 case TCPS_CLOSING: 13746 case TCPS_LAST_ACK: 13747 (void) tcp_clean_death(tcp, 0, 16); 13748 break; 13749 default: 13750 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 13751 (void) tcp_clean_death(tcp, ENXIO, 17); 13752 break; 13753 } 13754 return; 13755 } 13756 if (flags & TH_SYN) { 13757 /* 13758 * See RFC 793, Page 71 13759 * 13760 * The seq number must be in the window as it should 13761 * be "fixed" above. If it is outside window, it should 13762 * be already rejected. Note that we allow seg_seq to be 13763 * rnxt + rwnd because we want to accept 0 window probe. 13764 */ 13765 ASSERT(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) && 13766 SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd)); 13767 freemsg(mp); 13768 /* 13769 * If the ACK flag is not set, just use our snxt as the 13770 * seq number of the RST segment. 13771 */ 13772 if (!(flags & TH_ACK)) { 13773 seg_ack = tcp->tcp_snxt; 13774 } 13775 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 13776 TH_RST|TH_ACK); 13777 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 13778 (void) tcp_clean_death(tcp, ECONNRESET, 18); 13779 return; 13780 } 13781 /* 13782 * urp could be -1 when the urp field in the packet is 0 13783 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent 13784 * byte was at seg_seq - 1, in which case we ignore the urgent flag. 13785 */ 13786 if (flags & TH_URG && urp >= 0) { 13787 if (!tcp->tcp_urp_last_valid || 13788 SEQ_GT(urp + seg_seq, tcp->tcp_urp_last)) { 13789 /* 13790 * If we haven't generated the signal yet for this 13791 * urgent pointer value, do it now. Also, send up a 13792 * zero-length M_DATA indicating whether or not this is 13793 * the mark. The latter is not needed when a 13794 * T_EXDATA_IND is sent up. However, if there are 13795 * allocation failures this code relies on the sender 13796 * retransmitting and the socket code for determining 13797 * the mark should not block waiting for the peer to 13798 * transmit. Thus, for simplicity we always send up the 13799 * mark indication. 13800 */ 13801 mp1 = allocb(0, BPRI_MED); 13802 if (mp1 == NULL) { 13803 freemsg(mp); 13804 return; 13805 } 13806 if (!TCP_IS_DETACHED(tcp) && 13807 !putnextctl1(tcp->tcp_rq, M_PCSIG, SIGURG)) { 13808 /* Try again on the rexmit. */ 13809 freemsg(mp1); 13810 freemsg(mp); 13811 return; 13812 } 13813 /* 13814 * Mark with NOTMARKNEXT for now. 13815 * The code below will change this to MARKNEXT 13816 * if we are at the mark. 13817 * 13818 * If there are allocation failures (e.g. in dupmsg 13819 * below) the next time tcp_rput_data sees the urgent 13820 * segment it will send up the MSG*MARKNEXT message. 13821 */ 13822 mp1->b_flag |= MSGNOTMARKNEXT; 13823 freemsg(tcp->tcp_urp_mark_mp); 13824 tcp->tcp_urp_mark_mp = mp1; 13825 flags |= TH_SEND_URP_MARK; 13826 #ifdef DEBUG 13827 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13828 "tcp_rput: sent M_PCSIG 2 seq %x urp %x " 13829 "last %x, %s", 13830 seg_seq, urp, tcp->tcp_urp_last, 13831 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 13832 #endif /* DEBUG */ 13833 tcp->tcp_urp_last_valid = B_TRUE; 13834 tcp->tcp_urp_last = urp + seg_seq; 13835 } else if (tcp->tcp_urp_mark_mp != NULL) { 13836 /* 13837 * An allocation failure prevented the previous 13838 * tcp_rput_data from sending up the allocated 13839 * MSG*MARKNEXT message - send it up this time 13840 * around. 13841 */ 13842 flags |= TH_SEND_URP_MARK; 13843 } 13844 13845 /* 13846 * If the urgent byte is in this segment, make sure that it is 13847 * all by itself. This makes it much easier to deal with the 13848 * possibility of an allocation failure on the T_exdata_ind. 13849 * Note that seg_len is the number of bytes in the segment, and 13850 * urp is the offset into the segment of the urgent byte. 13851 * urp < seg_len means that the urgent byte is in this segment. 13852 */ 13853 if (urp < seg_len) { 13854 if (seg_len != 1) { 13855 uint32_t tmp_rnxt; 13856 /* 13857 * Break it up and feed it back in. 13858 * Re-attach the IP header. 13859 */ 13860 mp->b_rptr = iphdr; 13861 if (urp > 0) { 13862 /* 13863 * There is stuff before the urgent 13864 * byte. 13865 */ 13866 mp1 = dupmsg(mp); 13867 if (!mp1) { 13868 /* 13869 * Trim from urgent byte on. 13870 * The rest will come back. 13871 */ 13872 (void) adjmsg(mp, 13873 urp - seg_len); 13874 tcp_rput_data(connp, 13875 mp, NULL); 13876 return; 13877 } 13878 (void) adjmsg(mp1, urp - seg_len); 13879 /* Feed this piece back in. */ 13880 tmp_rnxt = tcp->tcp_rnxt; 13881 tcp_rput_data(connp, mp1, NULL); 13882 /* 13883 * If the data passed back in was not 13884 * processed (ie: bad ACK) sending 13885 * the remainder back in will cause a 13886 * loop. In this case, drop the 13887 * packet and let the sender try 13888 * sending a good packet. 13889 */ 13890 if (tmp_rnxt == tcp->tcp_rnxt) { 13891 freemsg(mp); 13892 return; 13893 } 13894 } 13895 if (urp != seg_len - 1) { 13896 uint32_t tmp_rnxt; 13897 /* 13898 * There is stuff after the urgent 13899 * byte. 13900 */ 13901 mp1 = dupmsg(mp); 13902 if (!mp1) { 13903 /* 13904 * Trim everything beyond the 13905 * urgent byte. The rest will 13906 * come back. 13907 */ 13908 (void) adjmsg(mp, 13909 urp + 1 - seg_len); 13910 tcp_rput_data(connp, 13911 mp, NULL); 13912 return; 13913 } 13914 (void) adjmsg(mp1, urp + 1 - seg_len); 13915 tmp_rnxt = tcp->tcp_rnxt; 13916 tcp_rput_data(connp, mp1, NULL); 13917 /* 13918 * If the data passed back in was not 13919 * processed (ie: bad ACK) sending 13920 * the remainder back in will cause a 13921 * loop. In this case, drop the 13922 * packet and let the sender try 13923 * sending a good packet. 13924 */ 13925 if (tmp_rnxt == tcp->tcp_rnxt) { 13926 freemsg(mp); 13927 return; 13928 } 13929 } 13930 tcp_rput_data(connp, mp, NULL); 13931 return; 13932 } 13933 /* 13934 * This segment contains only the urgent byte. We 13935 * have to allocate the T_exdata_ind, if we can. 13936 */ 13937 if (!tcp->tcp_urp_mp) { 13938 struct T_exdata_ind *tei; 13939 mp1 = allocb(sizeof (struct T_exdata_ind), 13940 BPRI_MED); 13941 if (!mp1) { 13942 /* 13943 * Sigh... It'll be back. 13944 * Generate any MSG*MARK message now. 13945 */ 13946 freemsg(mp); 13947 seg_len = 0; 13948 if (flags & TH_SEND_URP_MARK) { 13949 13950 13951 ASSERT(tcp->tcp_urp_mark_mp); 13952 tcp->tcp_urp_mark_mp->b_flag &= 13953 ~MSGNOTMARKNEXT; 13954 tcp->tcp_urp_mark_mp->b_flag |= 13955 MSGMARKNEXT; 13956 } 13957 goto ack_check; 13958 } 13959 mp1->b_datap->db_type = M_PROTO; 13960 tei = (struct T_exdata_ind *)mp1->b_rptr; 13961 tei->PRIM_type = T_EXDATA_IND; 13962 tei->MORE_flag = 0; 13963 mp1->b_wptr = (uchar_t *)&tei[1]; 13964 tcp->tcp_urp_mp = mp1; 13965 #ifdef DEBUG 13966 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13967 "tcp_rput: allocated exdata_ind %s", 13968 tcp_display(tcp, NULL, 13969 DISP_PORT_ONLY)); 13970 #endif /* DEBUG */ 13971 /* 13972 * There is no need to send a separate MSG*MARK 13973 * message since the T_EXDATA_IND will be sent 13974 * now. 13975 */ 13976 flags &= ~TH_SEND_URP_MARK; 13977 freemsg(tcp->tcp_urp_mark_mp); 13978 tcp->tcp_urp_mark_mp = NULL; 13979 } 13980 /* 13981 * Now we are all set. On the next putnext upstream, 13982 * tcp_urp_mp will be non-NULL and will get prepended 13983 * to what has to be this piece containing the urgent 13984 * byte. If for any reason we abort this segment below, 13985 * if it comes back, we will have this ready, or it 13986 * will get blown off in close. 13987 */ 13988 } else if (urp == seg_len) { 13989 /* 13990 * The urgent byte is the next byte after this sequence 13991 * number. If there is data it is marked with 13992 * MSGMARKNEXT and any tcp_urp_mark_mp is discarded 13993 * since it is not needed. Otherwise, if the code 13994 * above just allocated a zero-length tcp_urp_mark_mp 13995 * message, that message is tagged with MSGMARKNEXT. 13996 * Sending up these MSGMARKNEXT messages makes 13997 * SIOCATMARK work correctly even though 13998 * the T_EXDATA_IND will not be sent up until the 13999 * urgent byte arrives. 14000 */ 14001 if (seg_len != 0) { 14002 flags |= TH_MARKNEXT_NEEDED; 14003 freemsg(tcp->tcp_urp_mark_mp); 14004 tcp->tcp_urp_mark_mp = NULL; 14005 flags &= ~TH_SEND_URP_MARK; 14006 } else if (tcp->tcp_urp_mark_mp != NULL) { 14007 flags |= TH_SEND_URP_MARK; 14008 tcp->tcp_urp_mark_mp->b_flag &= 14009 ~MSGNOTMARKNEXT; 14010 tcp->tcp_urp_mark_mp->b_flag |= MSGMARKNEXT; 14011 } 14012 #ifdef DEBUG 14013 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 14014 "tcp_rput: AT MARK, len %d, flags 0x%x, %s", 14015 seg_len, flags, 14016 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 14017 #endif /* DEBUG */ 14018 } else { 14019 /* Data left until we hit mark */ 14020 #ifdef DEBUG 14021 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 14022 "tcp_rput: URP %d bytes left, %s", 14023 urp - seg_len, tcp_display(tcp, NULL, 14024 DISP_PORT_ONLY)); 14025 #endif /* DEBUG */ 14026 } 14027 } 14028 14029 process_ack: 14030 if (!(flags & TH_ACK)) { 14031 freemsg(mp); 14032 goto xmit_check; 14033 } 14034 } 14035 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 14036 14037 if (tcp->tcp_ipversion == IPV6_VERSION && bytes_acked > 0) 14038 tcp->tcp_ip_forward_progress = B_TRUE; 14039 if (tcp->tcp_state == TCPS_SYN_RCVD) { 14040 if ((tcp->tcp_conn.tcp_eager_conn_ind != NULL) && 14041 ((tcp->tcp_kssl_ent == NULL) || !tcp->tcp_kssl_pending)) { 14042 /* 3-way handshake complete - pass up the T_CONN_IND */ 14043 tcp_t *listener = tcp->tcp_listener; 14044 mblk_t *mp = tcp->tcp_conn.tcp_eager_conn_ind; 14045 14046 tcp->tcp_tconnind_started = B_TRUE; 14047 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 14048 /* 14049 * We are here means eager is fine but it can 14050 * get a TH_RST at any point between now and till 14051 * accept completes and disappear. We need to 14052 * ensure that reference to eager is valid after 14053 * we get out of eager's perimeter. So we do 14054 * an extra refhold. 14055 */ 14056 CONN_INC_REF(connp); 14057 14058 /* 14059 * The listener also exists because of the refhold 14060 * done in tcp_conn_request. Its possible that it 14061 * might have closed. We will check that once we 14062 * get inside listeners context. 14063 */ 14064 CONN_INC_REF(listener->tcp_connp); 14065 if (listener->tcp_connp->conn_sqp == 14066 connp->conn_sqp) { 14067 tcp_send_conn_ind(listener->tcp_connp, mp, 14068 listener->tcp_connp->conn_sqp); 14069 CONN_DEC_REF(listener->tcp_connp); 14070 } else if (!tcp->tcp_loopback) { 14071 squeue_fill(listener->tcp_connp->conn_sqp, mp, 14072 tcp_send_conn_ind, 14073 listener->tcp_connp, SQTAG_TCP_CONN_IND); 14074 } else { 14075 squeue_enter(listener->tcp_connp->conn_sqp, mp, 14076 tcp_send_conn_ind, listener->tcp_connp, 14077 SQTAG_TCP_CONN_IND); 14078 } 14079 } 14080 14081 if (tcp->tcp_active_open) { 14082 /* 14083 * We are seeing the final ack in the three way 14084 * hand shake of a active open'ed connection 14085 * so we must send up a T_CONN_CON 14086 */ 14087 if (!tcp_conn_con(tcp, iphdr, tcph, mp, NULL)) { 14088 freemsg(mp); 14089 return; 14090 } 14091 /* 14092 * Don't fuse the loopback endpoints for 14093 * simultaneous active opens. 14094 */ 14095 if (tcp->tcp_loopback) { 14096 TCP_STAT(tcps, tcp_fusion_unfusable); 14097 tcp->tcp_unfusable = B_TRUE; 14098 } 14099 } 14100 14101 tcp->tcp_suna = tcp->tcp_iss + 1; /* One for the SYN */ 14102 bytes_acked--; 14103 /* SYN was acked - making progress */ 14104 if (tcp->tcp_ipversion == IPV6_VERSION) 14105 tcp->tcp_ip_forward_progress = B_TRUE; 14106 14107 /* 14108 * If SYN was retransmitted, need to reset all 14109 * retransmission info as this segment will be 14110 * treated as a dup ACK. 14111 */ 14112 if (tcp->tcp_rexmit) { 14113 tcp->tcp_rexmit = B_FALSE; 14114 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 14115 tcp->tcp_rexmit_max = tcp->tcp_snxt; 14116 tcp->tcp_snd_burst = tcp->tcp_localnet ? 14117 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 14118 tcp->tcp_ms_we_have_waited = 0; 14119 tcp->tcp_cwnd = mss; 14120 } 14121 14122 /* 14123 * We set the send window to zero here. 14124 * This is needed if there is data to be 14125 * processed already on the queue. 14126 * Later (at swnd_update label), the 14127 * "new_swnd > tcp_swnd" condition is satisfied 14128 * the XMIT_NEEDED flag is set in the current 14129 * (SYN_RCVD) state. This ensures tcp_wput_data() is 14130 * called if there is already data on queue in 14131 * this state. 14132 */ 14133 tcp->tcp_swnd = 0; 14134 14135 if (new_swnd > tcp->tcp_max_swnd) 14136 tcp->tcp_max_swnd = new_swnd; 14137 tcp->tcp_swl1 = seg_seq; 14138 tcp->tcp_swl2 = seg_ack; 14139 tcp->tcp_state = TCPS_ESTABLISHED; 14140 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 14141 14142 /* Fuse when both sides are in ESTABLISHED state */ 14143 if (tcp->tcp_loopback && do_tcp_fusion) 14144 tcp_fuse(tcp, iphdr, tcph); 14145 14146 } 14147 /* This code follows 4.4BSD-Lite2 mostly. */ 14148 if (bytes_acked < 0) 14149 goto est; 14150 14151 /* 14152 * If TCP is ECN capable and the congestion experience bit is 14153 * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be 14154 * done once per window (or more loosely, per RTT). 14155 */ 14156 if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max)) 14157 tcp->tcp_cwr = B_FALSE; 14158 if (tcp->tcp_ecn_ok && (flags & TH_ECE)) { 14159 if (!tcp->tcp_cwr) { 14160 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / mss; 14161 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss; 14162 tcp->tcp_cwnd = npkt * mss; 14163 /* 14164 * If the cwnd is 0, use the timer to clock out 14165 * new segments. This is required by the ECN spec. 14166 */ 14167 if (npkt == 0) { 14168 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 14169 /* 14170 * This makes sure that when the ACK comes 14171 * back, we will increase tcp_cwnd by 1 MSS. 14172 */ 14173 tcp->tcp_cwnd_cnt = 0; 14174 } 14175 tcp->tcp_cwr = B_TRUE; 14176 /* 14177 * This marks the end of the current window of in 14178 * flight data. That is why we don't use 14179 * tcp_suna + tcp_swnd. Only data in flight can 14180 * provide ECN info. 14181 */ 14182 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 14183 tcp->tcp_ecn_cwr_sent = B_FALSE; 14184 } 14185 } 14186 14187 mp1 = tcp->tcp_xmit_head; 14188 if (bytes_acked == 0) { 14189 if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) { 14190 int dupack_cnt; 14191 14192 BUMP_MIB(&tcps->tcps_mib, tcpInDupAck); 14193 /* 14194 * Fast retransmit. When we have seen exactly three 14195 * identical ACKs while we have unacked data 14196 * outstanding we take it as a hint that our peer 14197 * dropped something. 14198 * 14199 * If TCP is retransmitting, don't do fast retransmit. 14200 */ 14201 if (mp1 && tcp->tcp_suna != tcp->tcp_snxt && 14202 ! tcp->tcp_rexmit) { 14203 /* Do Limited Transmit */ 14204 if ((dupack_cnt = ++tcp->tcp_dupack_cnt) < 14205 tcps->tcps_dupack_fast_retransmit) { 14206 /* 14207 * RFC 3042 14208 * 14209 * What we need to do is temporarily 14210 * increase tcp_cwnd so that new 14211 * data can be sent if it is allowed 14212 * by the receive window (tcp_rwnd). 14213 * tcp_wput_data() will take care of 14214 * the rest. 14215 * 14216 * If the connection is SACK capable, 14217 * only do limited xmit when there 14218 * is SACK info. 14219 * 14220 * Note how tcp_cwnd is incremented. 14221 * The first dup ACK will increase 14222 * it by 1 MSS. The second dup ACK 14223 * will increase it by 2 MSS. This 14224 * means that only 1 new segment will 14225 * be sent for each dup ACK. 14226 */ 14227 if (tcp->tcp_unsent > 0 && 14228 (!tcp->tcp_snd_sack_ok || 14229 (tcp->tcp_snd_sack_ok && 14230 tcp->tcp_notsack_list != NULL))) { 14231 tcp->tcp_cwnd += mss << 14232 (tcp->tcp_dupack_cnt - 1); 14233 flags |= TH_LIMIT_XMIT; 14234 } 14235 } else if (dupack_cnt == 14236 tcps->tcps_dupack_fast_retransmit) { 14237 14238 /* 14239 * If we have reduced tcp_ssthresh 14240 * because of ECN, do not reduce it again 14241 * unless it is already one window of data 14242 * away. After one window of data, tcp_cwr 14243 * should then be cleared. Note that 14244 * for non ECN capable connection, tcp_cwr 14245 * should always be false. 14246 * 14247 * Adjust cwnd since the duplicate 14248 * ack indicates that a packet was 14249 * dropped (due to congestion.) 14250 */ 14251 if (!tcp->tcp_cwr) { 14252 npkt = ((tcp->tcp_snxt - 14253 tcp->tcp_suna) >> 1) / mss; 14254 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 14255 mss; 14256 tcp->tcp_cwnd = (npkt + 14257 tcp->tcp_dupack_cnt) * mss; 14258 } 14259 if (tcp->tcp_ecn_ok) { 14260 tcp->tcp_cwr = B_TRUE; 14261 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 14262 tcp->tcp_ecn_cwr_sent = B_FALSE; 14263 } 14264 14265 /* 14266 * We do Hoe's algorithm. Refer to her 14267 * paper "Improving the Start-up Behavior 14268 * of a Congestion Control Scheme for TCP," 14269 * appeared in SIGCOMM'96. 14270 * 14271 * Save highest seq no we have sent so far. 14272 * Be careful about the invisible FIN byte. 14273 */ 14274 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 14275 (tcp->tcp_unsent == 0)) { 14276 tcp->tcp_rexmit_max = tcp->tcp_fss; 14277 } else { 14278 tcp->tcp_rexmit_max = tcp->tcp_snxt; 14279 } 14280 14281 /* 14282 * Do not allow bursty traffic during. 14283 * fast recovery. Refer to Fall and Floyd's 14284 * paper "Simulation-based Comparisons of 14285 * Tahoe, Reno and SACK TCP" (in CCR?) 14286 * This is a best current practise. 14287 */ 14288 tcp->tcp_snd_burst = TCP_CWND_SS; 14289 14290 /* 14291 * For SACK: 14292 * Calculate tcp_pipe, which is the 14293 * estimated number of bytes in 14294 * network. 14295 * 14296 * tcp_fack is the highest sack'ed seq num 14297 * TCP has received. 14298 * 14299 * tcp_pipe is explained in the above quoted 14300 * Fall and Floyd's paper. tcp_fack is 14301 * explained in Mathis and Mahdavi's 14302 * "Forward Acknowledgment: Refining TCP 14303 * Congestion Control" in SIGCOMM '96. 14304 */ 14305 if (tcp->tcp_snd_sack_ok) { 14306 ASSERT(tcp->tcp_sack_info != NULL); 14307 if (tcp->tcp_notsack_list != NULL) { 14308 tcp->tcp_pipe = tcp->tcp_snxt - 14309 tcp->tcp_fack; 14310 tcp->tcp_sack_snxt = seg_ack; 14311 flags |= TH_NEED_SACK_REXMIT; 14312 } else { 14313 /* 14314 * Always initialize tcp_pipe 14315 * even though we don't have 14316 * any SACK info. If later 14317 * we get SACK info and 14318 * tcp_pipe is not initialized, 14319 * funny things will happen. 14320 */ 14321 tcp->tcp_pipe = 14322 tcp->tcp_cwnd_ssthresh; 14323 } 14324 } else { 14325 flags |= TH_REXMIT_NEEDED; 14326 } /* tcp_snd_sack_ok */ 14327 14328 } else { 14329 /* 14330 * Here we perform congestion 14331 * avoidance, but NOT slow start. 14332 * This is known as the Fast 14333 * Recovery Algorithm. 14334 */ 14335 if (tcp->tcp_snd_sack_ok && 14336 tcp->tcp_notsack_list != NULL) { 14337 flags |= TH_NEED_SACK_REXMIT; 14338 tcp->tcp_pipe -= mss; 14339 if (tcp->tcp_pipe < 0) 14340 tcp->tcp_pipe = 0; 14341 } else { 14342 /* 14343 * We know that one more packet has 14344 * left the pipe thus we can update 14345 * cwnd. 14346 */ 14347 cwnd = tcp->tcp_cwnd + mss; 14348 if (cwnd > tcp->tcp_cwnd_max) 14349 cwnd = tcp->tcp_cwnd_max; 14350 tcp->tcp_cwnd = cwnd; 14351 if (tcp->tcp_unsent > 0) 14352 flags |= TH_XMIT_NEEDED; 14353 } 14354 } 14355 } 14356 } else if (tcp->tcp_zero_win_probe) { 14357 /* 14358 * If the window has opened, need to arrange 14359 * to send additional data. 14360 */ 14361 if (new_swnd != 0) { 14362 /* tcp_suna != tcp_snxt */ 14363 /* Packet contains a window update */ 14364 BUMP_MIB(&tcps->tcps_mib, tcpInWinUpdate); 14365 tcp->tcp_zero_win_probe = 0; 14366 tcp->tcp_timer_backoff = 0; 14367 tcp->tcp_ms_we_have_waited = 0; 14368 14369 /* 14370 * Transmit starting with tcp_suna since 14371 * the one byte probe is not ack'ed. 14372 * If TCP has sent more than one identical 14373 * probe, tcp_rexmit will be set. That means 14374 * tcp_ss_rexmit() will send out the one 14375 * byte along with new data. Otherwise, 14376 * fake the retransmission. 14377 */ 14378 flags |= TH_XMIT_NEEDED; 14379 if (!tcp->tcp_rexmit) { 14380 tcp->tcp_rexmit = B_TRUE; 14381 tcp->tcp_dupack_cnt = 0; 14382 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 14383 tcp->tcp_rexmit_max = tcp->tcp_suna + 1; 14384 } 14385 } 14386 } 14387 goto swnd_update; 14388 } 14389 14390 /* 14391 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73. 14392 * If the ACK value acks something that we have not yet sent, it might 14393 * be an old duplicate segment. Send an ACK to re-synchronize the 14394 * other side. 14395 * Note: reset in response to unacceptable ACK in SYN_RECEIVE 14396 * state is handled above, so we can always just drop the segment and 14397 * send an ACK here. 14398 * 14399 * Should we send ACKs in response to ACK only segments? 14400 */ 14401 if (SEQ_GT(seg_ack, tcp->tcp_snxt)) { 14402 BUMP_MIB(&tcps->tcps_mib, tcpInAckUnsent); 14403 /* drop the received segment */ 14404 freemsg(mp); 14405 14406 /* 14407 * Send back an ACK. If tcp_drop_ack_unsent_cnt is 14408 * greater than 0, check if the number of such 14409 * bogus ACks is greater than that count. If yes, 14410 * don't send back any ACK. This prevents TCP from 14411 * getting into an ACK storm if somehow an attacker 14412 * successfully spoofs an acceptable segment to our 14413 * peer. 14414 */ 14415 if (tcp_drop_ack_unsent_cnt > 0 && 14416 ++tcp->tcp_in_ack_unsent > tcp_drop_ack_unsent_cnt) { 14417 TCP_STAT(tcps, tcp_in_ack_unsent_drop); 14418 return; 14419 } 14420 mp = tcp_ack_mp(tcp); 14421 if (mp != NULL) { 14422 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 14423 BUMP_LOCAL(tcp->tcp_obsegs); 14424 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 14425 tcp_send_data(tcp, tcp->tcp_wq, mp); 14426 } 14427 return; 14428 } 14429 14430 /* 14431 * TCP gets a new ACK, update the notsack'ed list to delete those 14432 * blocks that are covered by this ACK. 14433 */ 14434 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 14435 tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack, 14436 &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list)); 14437 } 14438 14439 /* 14440 * If we got an ACK after fast retransmit, check to see 14441 * if it is a partial ACK. If it is not and the congestion 14442 * window was inflated to account for the other side's 14443 * cached packets, retract it. If it is, do Hoe's algorithm. 14444 */ 14445 if (tcp->tcp_dupack_cnt >= tcps->tcps_dupack_fast_retransmit) { 14446 ASSERT(tcp->tcp_rexmit == B_FALSE); 14447 if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) { 14448 tcp->tcp_dupack_cnt = 0; 14449 /* 14450 * Restore the orig tcp_cwnd_ssthresh after 14451 * fast retransmit phase. 14452 */ 14453 if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) { 14454 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh; 14455 } 14456 tcp->tcp_rexmit_max = seg_ack; 14457 tcp->tcp_cwnd_cnt = 0; 14458 tcp->tcp_snd_burst = tcp->tcp_localnet ? 14459 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 14460 14461 /* 14462 * Remove all notsack info to avoid confusion with 14463 * the next fast retrasnmit/recovery phase. 14464 */ 14465 if (tcp->tcp_snd_sack_ok && 14466 tcp->tcp_notsack_list != NULL) { 14467 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 14468 } 14469 } else { 14470 if (tcp->tcp_snd_sack_ok && 14471 tcp->tcp_notsack_list != NULL) { 14472 flags |= TH_NEED_SACK_REXMIT; 14473 tcp->tcp_pipe -= mss; 14474 if (tcp->tcp_pipe < 0) 14475 tcp->tcp_pipe = 0; 14476 } else { 14477 /* 14478 * Hoe's algorithm: 14479 * 14480 * Retransmit the unack'ed segment and 14481 * restart fast recovery. Note that we 14482 * need to scale back tcp_cwnd to the 14483 * original value when we started fast 14484 * recovery. This is to prevent overly 14485 * aggressive behaviour in sending new 14486 * segments. 14487 */ 14488 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh + 14489 tcps->tcps_dupack_fast_retransmit * mss; 14490 tcp->tcp_cwnd_cnt = tcp->tcp_cwnd; 14491 flags |= TH_REXMIT_NEEDED; 14492 } 14493 } 14494 } else { 14495 tcp->tcp_dupack_cnt = 0; 14496 if (tcp->tcp_rexmit) { 14497 /* 14498 * TCP is retranmitting. If the ACK ack's all 14499 * outstanding data, update tcp_rexmit_max and 14500 * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt 14501 * to the correct value. 14502 * 14503 * Note that SEQ_LEQ() is used. This is to avoid 14504 * unnecessary fast retransmit caused by dup ACKs 14505 * received when TCP does slow start retransmission 14506 * after a time out. During this phase, TCP may 14507 * send out segments which are already received. 14508 * This causes dup ACKs to be sent back. 14509 */ 14510 if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) { 14511 if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) { 14512 tcp->tcp_rexmit_nxt = seg_ack; 14513 } 14514 if (seg_ack != tcp->tcp_rexmit_max) { 14515 flags |= TH_XMIT_NEEDED; 14516 } 14517 } else { 14518 tcp->tcp_rexmit = B_FALSE; 14519 tcp->tcp_xmit_zc_clean = B_FALSE; 14520 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 14521 tcp->tcp_snd_burst = tcp->tcp_localnet ? 14522 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 14523 } 14524 tcp->tcp_ms_we_have_waited = 0; 14525 } 14526 } 14527 14528 BUMP_MIB(&tcps->tcps_mib, tcpInAckSegs); 14529 UPDATE_MIB(&tcps->tcps_mib, tcpInAckBytes, bytes_acked); 14530 tcp->tcp_suna = seg_ack; 14531 if (tcp->tcp_zero_win_probe != 0) { 14532 tcp->tcp_zero_win_probe = 0; 14533 tcp->tcp_timer_backoff = 0; 14534 } 14535 14536 /* 14537 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed. 14538 * Note that it cannot be the SYN being ack'ed. The code flow 14539 * will not reach here. 14540 */ 14541 if (mp1 == NULL) { 14542 goto fin_acked; 14543 } 14544 14545 /* 14546 * Update the congestion window. 14547 * 14548 * If TCP is not ECN capable or TCP is ECN capable but the 14549 * congestion experience bit is not set, increase the tcp_cwnd as 14550 * usual. 14551 */ 14552 if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) { 14553 cwnd = tcp->tcp_cwnd; 14554 add = mss; 14555 14556 if (cwnd >= tcp->tcp_cwnd_ssthresh) { 14557 /* 14558 * This is to prevent an increase of less than 1 MSS of 14559 * tcp_cwnd. With partial increase, tcp_wput_data() 14560 * may send out tinygrams in order to preserve mblk 14561 * boundaries. 14562 * 14563 * By initializing tcp_cwnd_cnt to new tcp_cwnd and 14564 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is 14565 * increased by 1 MSS for every RTTs. 14566 */ 14567 if (tcp->tcp_cwnd_cnt <= 0) { 14568 tcp->tcp_cwnd_cnt = cwnd + add; 14569 } else { 14570 tcp->tcp_cwnd_cnt -= add; 14571 add = 0; 14572 } 14573 } 14574 tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max); 14575 } 14576 14577 /* See if the latest urgent data has been acknowledged */ 14578 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && 14579 SEQ_GT(seg_ack, tcp->tcp_urg)) 14580 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 14581 14582 /* Can we update the RTT estimates? */ 14583 if (tcp->tcp_snd_ts_ok) { 14584 /* Ignore zero timestamp echo-reply. */ 14585 if (tcpopt.tcp_opt_ts_ecr != 0) { 14586 tcp_set_rto(tcp, (int32_t)lbolt - 14587 (int32_t)tcpopt.tcp_opt_ts_ecr); 14588 } 14589 14590 /* If needed, restart the timer. */ 14591 if (tcp->tcp_set_timer == 1) { 14592 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 14593 tcp->tcp_set_timer = 0; 14594 } 14595 /* 14596 * Update tcp_csuna in case the other side stops sending 14597 * us timestamps. 14598 */ 14599 tcp->tcp_csuna = tcp->tcp_snxt; 14600 } else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) { 14601 /* 14602 * An ACK sequence we haven't seen before, so get the RTT 14603 * and update the RTO. But first check if the timestamp is 14604 * valid to use. 14605 */ 14606 if ((mp1->b_next != NULL) && 14607 SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next))) 14608 tcp_set_rto(tcp, (int32_t)lbolt - 14609 (int32_t)(intptr_t)mp1->b_prev); 14610 else 14611 BUMP_MIB(&tcps->tcps_mib, tcpRttNoUpdate); 14612 14613 /* Remeber the last sequence to be ACKed */ 14614 tcp->tcp_csuna = seg_ack; 14615 if (tcp->tcp_set_timer == 1) { 14616 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 14617 tcp->tcp_set_timer = 0; 14618 } 14619 } else { 14620 BUMP_MIB(&tcps->tcps_mib, tcpRttNoUpdate); 14621 } 14622 14623 /* Eat acknowledged bytes off the xmit queue. */ 14624 for (;;) { 14625 mblk_t *mp2; 14626 uchar_t *wptr; 14627 14628 wptr = mp1->b_wptr; 14629 ASSERT((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX); 14630 bytes_acked -= (int)(wptr - mp1->b_rptr); 14631 if (bytes_acked < 0) { 14632 mp1->b_rptr = wptr + bytes_acked; 14633 /* 14634 * Set a new timestamp if all the bytes timed by the 14635 * old timestamp have been ack'ed. 14636 */ 14637 if (SEQ_GT(seg_ack, 14638 (uint32_t)(uintptr_t)(mp1->b_next))) { 14639 mp1->b_prev = (mblk_t *)(uintptr_t)lbolt; 14640 mp1->b_next = NULL; 14641 } 14642 break; 14643 } 14644 mp1->b_next = NULL; 14645 mp1->b_prev = NULL; 14646 mp2 = mp1; 14647 mp1 = mp1->b_cont; 14648 14649 /* 14650 * This notification is required for some zero-copy 14651 * clients to maintain a copy semantic. After the data 14652 * is ack'ed, client is safe to modify or reuse the buffer. 14653 */ 14654 if (tcp->tcp_snd_zcopy_aware && 14655 (mp2->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 14656 tcp_zcopy_notify(tcp); 14657 freeb(mp2); 14658 if (bytes_acked == 0) { 14659 if (mp1 == NULL) { 14660 /* Everything is ack'ed, clear the tail. */ 14661 tcp->tcp_xmit_tail = NULL; 14662 /* 14663 * Cancel the timer unless we are still 14664 * waiting for an ACK for the FIN packet. 14665 */ 14666 if (tcp->tcp_timer_tid != 0 && 14667 tcp->tcp_snxt == tcp->tcp_suna) { 14668 (void) TCP_TIMER_CANCEL(tcp, 14669 tcp->tcp_timer_tid); 14670 tcp->tcp_timer_tid = 0; 14671 } 14672 goto pre_swnd_update; 14673 } 14674 if (mp2 != tcp->tcp_xmit_tail) 14675 break; 14676 tcp->tcp_xmit_tail = mp1; 14677 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 14678 (uintptr_t)INT_MAX); 14679 tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr - 14680 mp1->b_rptr); 14681 break; 14682 } 14683 if (mp1 == NULL) { 14684 /* 14685 * More was acked but there is nothing more 14686 * outstanding. This means that the FIN was 14687 * just acked or that we're talking to a clown. 14688 */ 14689 fin_acked: 14690 ASSERT(tcp->tcp_fin_sent); 14691 tcp->tcp_xmit_tail = NULL; 14692 if (tcp->tcp_fin_sent) { 14693 /* FIN was acked - making progress */ 14694 if (tcp->tcp_ipversion == IPV6_VERSION && 14695 !tcp->tcp_fin_acked) 14696 tcp->tcp_ip_forward_progress = B_TRUE; 14697 tcp->tcp_fin_acked = B_TRUE; 14698 if (tcp->tcp_linger_tid != 0 && 14699 TCP_TIMER_CANCEL(tcp, 14700 tcp->tcp_linger_tid) >= 0) { 14701 tcp_stop_lingering(tcp); 14702 freemsg(mp); 14703 mp = NULL; 14704 } 14705 } else { 14706 /* 14707 * We should never get here because 14708 * we have already checked that the 14709 * number of bytes ack'ed should be 14710 * smaller than or equal to what we 14711 * have sent so far (it is the 14712 * acceptability check of the ACK). 14713 * We can only get here if the send 14714 * queue is corrupted. 14715 * 14716 * Terminate the connection and 14717 * panic the system. It is better 14718 * for us to panic instead of 14719 * continuing to avoid other disaster. 14720 */ 14721 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 14722 tcp->tcp_rnxt, TH_RST|TH_ACK); 14723 panic("Memory corruption " 14724 "detected for connection %s.", 14725 tcp_display(tcp, NULL, 14726 DISP_ADDR_AND_PORT)); 14727 /*NOTREACHED*/ 14728 } 14729 goto pre_swnd_update; 14730 } 14731 ASSERT(mp2 != tcp->tcp_xmit_tail); 14732 } 14733 if (tcp->tcp_unsent) { 14734 flags |= TH_XMIT_NEEDED; 14735 } 14736 pre_swnd_update: 14737 tcp->tcp_xmit_head = mp1; 14738 swnd_update: 14739 /* 14740 * The following check is different from most other implementations. 14741 * For bi-directional transfer, when segments are dropped, the 14742 * "normal" check will not accept a window update in those 14743 * retransmitted segemnts. Failing to do that, TCP may send out 14744 * segments which are outside receiver's window. As TCP accepts 14745 * the ack in those retransmitted segments, if the window update in 14746 * the same segment is not accepted, TCP will incorrectly calculates 14747 * that it can send more segments. This can create a deadlock 14748 * with the receiver if its window becomes zero. 14749 */ 14750 if (SEQ_LT(tcp->tcp_swl2, seg_ack) || 14751 SEQ_LT(tcp->tcp_swl1, seg_seq) || 14752 (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) { 14753 /* 14754 * The criteria for update is: 14755 * 14756 * 1. the segment acknowledges some data. Or 14757 * 2. the segment is new, i.e. it has a higher seq num. Or 14758 * 3. the segment is not old and the advertised window is 14759 * larger than the previous advertised window. 14760 */ 14761 if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd) 14762 flags |= TH_XMIT_NEEDED; 14763 tcp->tcp_swnd = new_swnd; 14764 if (new_swnd > tcp->tcp_max_swnd) 14765 tcp->tcp_max_swnd = new_swnd; 14766 tcp->tcp_swl1 = seg_seq; 14767 tcp->tcp_swl2 = seg_ack; 14768 } 14769 est: 14770 if (tcp->tcp_state > TCPS_ESTABLISHED) { 14771 14772 switch (tcp->tcp_state) { 14773 case TCPS_FIN_WAIT_1: 14774 if (tcp->tcp_fin_acked) { 14775 tcp->tcp_state = TCPS_FIN_WAIT_2; 14776 /* 14777 * We implement the non-standard BSD/SunOS 14778 * FIN_WAIT_2 flushing algorithm. 14779 * If there is no user attached to this 14780 * TCP endpoint, then this TCP struct 14781 * could hang around forever in FIN_WAIT_2 14782 * state if the peer forgets to send us 14783 * a FIN. To prevent this, we wait only 14784 * 2*MSL (a convenient time value) for 14785 * the FIN to arrive. If it doesn't show up, 14786 * we flush the TCP endpoint. This algorithm, 14787 * though a violation of RFC-793, has worked 14788 * for over 10 years in BSD systems. 14789 * Note: SunOS 4.x waits 675 seconds before 14790 * flushing the FIN_WAIT_2 connection. 14791 */ 14792 TCP_TIMER_RESTART(tcp, 14793 tcps->tcps_fin_wait_2_flush_interval); 14794 } 14795 break; 14796 case TCPS_FIN_WAIT_2: 14797 break; /* Shutdown hook? */ 14798 case TCPS_LAST_ACK: 14799 freemsg(mp); 14800 if (tcp->tcp_fin_acked) { 14801 (void) tcp_clean_death(tcp, 0, 19); 14802 return; 14803 } 14804 goto xmit_check; 14805 case TCPS_CLOSING: 14806 if (tcp->tcp_fin_acked) { 14807 tcp->tcp_state = TCPS_TIME_WAIT; 14808 /* 14809 * Unconditionally clear the exclusive binding 14810 * bit so this TIME-WAIT connection won't 14811 * interfere with new ones. 14812 */ 14813 tcp->tcp_exclbind = 0; 14814 if (!TCP_IS_DETACHED(tcp)) { 14815 TCP_TIMER_RESTART(tcp, 14816 tcps->tcps_time_wait_interval); 14817 } else { 14818 tcp_time_wait_append(tcp); 14819 TCP_DBGSTAT(tcps, tcp_rput_time_wait); 14820 } 14821 } 14822 /*FALLTHRU*/ 14823 case TCPS_CLOSE_WAIT: 14824 freemsg(mp); 14825 goto xmit_check; 14826 default: 14827 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 14828 break; 14829 } 14830 } 14831 if (flags & TH_FIN) { 14832 /* Make sure we ack the fin */ 14833 flags |= TH_ACK_NEEDED; 14834 if (!tcp->tcp_fin_rcvd) { 14835 tcp->tcp_fin_rcvd = B_TRUE; 14836 tcp->tcp_rnxt++; 14837 tcph = tcp->tcp_tcph; 14838 U32_TO_ABE32(tcp->tcp_rnxt, tcph->th_ack); 14839 14840 /* 14841 * Generate the ordrel_ind at the end unless we 14842 * are an eager guy. 14843 * In the eager case tcp_rsrv will do this when run 14844 * after tcp_accept is done. 14845 */ 14846 if (tcp->tcp_listener == NULL && 14847 !TCP_IS_DETACHED(tcp) && (!tcp->tcp_hard_binding)) 14848 flags |= TH_ORDREL_NEEDED; 14849 switch (tcp->tcp_state) { 14850 case TCPS_SYN_RCVD: 14851 case TCPS_ESTABLISHED: 14852 tcp->tcp_state = TCPS_CLOSE_WAIT; 14853 /* Keepalive? */ 14854 break; 14855 case TCPS_FIN_WAIT_1: 14856 if (!tcp->tcp_fin_acked) { 14857 tcp->tcp_state = TCPS_CLOSING; 14858 break; 14859 } 14860 /* FALLTHRU */ 14861 case TCPS_FIN_WAIT_2: 14862 tcp->tcp_state = TCPS_TIME_WAIT; 14863 /* 14864 * Unconditionally clear the exclusive binding 14865 * bit so this TIME-WAIT connection won't 14866 * interfere with new ones. 14867 */ 14868 tcp->tcp_exclbind = 0; 14869 if (!TCP_IS_DETACHED(tcp)) { 14870 TCP_TIMER_RESTART(tcp, 14871 tcps->tcps_time_wait_interval); 14872 } else { 14873 tcp_time_wait_append(tcp); 14874 TCP_DBGSTAT(tcps, tcp_rput_time_wait); 14875 } 14876 if (seg_len) { 14877 /* 14878 * implies data piggybacked on FIN. 14879 * break to handle data. 14880 */ 14881 break; 14882 } 14883 freemsg(mp); 14884 goto ack_check; 14885 } 14886 } 14887 } 14888 if (mp == NULL) 14889 goto xmit_check; 14890 if (seg_len == 0) { 14891 freemsg(mp); 14892 goto xmit_check; 14893 } 14894 if (mp->b_rptr == mp->b_wptr) { 14895 /* 14896 * The header has been consumed, so we remove the 14897 * zero-length mblk here. 14898 */ 14899 mp1 = mp; 14900 mp = mp->b_cont; 14901 freeb(mp1); 14902 } 14903 tcph = tcp->tcp_tcph; 14904 tcp->tcp_rack_cnt++; 14905 { 14906 uint32_t cur_max; 14907 14908 cur_max = tcp->tcp_rack_cur_max; 14909 if (tcp->tcp_rack_cnt >= cur_max) { 14910 /* 14911 * We have more unacked data than we should - send 14912 * an ACK now. 14913 */ 14914 flags |= TH_ACK_NEEDED; 14915 cur_max++; 14916 if (cur_max > tcp->tcp_rack_abs_max) 14917 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 14918 else 14919 tcp->tcp_rack_cur_max = cur_max; 14920 } else if (TCP_IS_DETACHED(tcp)) { 14921 /* We don't have an ACK timer for detached TCP. */ 14922 flags |= TH_ACK_NEEDED; 14923 } else if (seg_len < mss) { 14924 /* 14925 * If we get a segment that is less than an mss, and we 14926 * already have unacknowledged data, and the amount 14927 * unacknowledged is not a multiple of mss, then we 14928 * better generate an ACK now. Otherwise, this may be 14929 * the tail piece of a transaction, and we would rather 14930 * wait for the response. 14931 */ 14932 uint32_t udif; 14933 ASSERT((uintptr_t)(tcp->tcp_rnxt - tcp->tcp_rack) <= 14934 (uintptr_t)INT_MAX); 14935 udif = (int)(tcp->tcp_rnxt - tcp->tcp_rack); 14936 if (udif && (udif % mss)) 14937 flags |= TH_ACK_NEEDED; 14938 else 14939 flags |= TH_ACK_TIMER_NEEDED; 14940 } else { 14941 /* Start delayed ack timer */ 14942 flags |= TH_ACK_TIMER_NEEDED; 14943 } 14944 } 14945 tcp->tcp_rnxt += seg_len; 14946 U32_TO_ABE32(tcp->tcp_rnxt, tcph->th_ack); 14947 14948 /* Update SACK list */ 14949 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 14950 tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt, 14951 &(tcp->tcp_num_sack_blk)); 14952 } 14953 14954 if (tcp->tcp_urp_mp) { 14955 tcp->tcp_urp_mp->b_cont = mp; 14956 mp = tcp->tcp_urp_mp; 14957 tcp->tcp_urp_mp = NULL; 14958 /* Ready for a new signal. */ 14959 tcp->tcp_urp_last_valid = B_FALSE; 14960 #ifdef DEBUG 14961 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 14962 "tcp_rput: sending exdata_ind %s", 14963 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 14964 #endif /* DEBUG */ 14965 } 14966 14967 /* 14968 * Check for ancillary data changes compared to last segment. 14969 */ 14970 if (tcp->tcp_ipv6_recvancillary != 0) { 14971 mp = tcp_rput_add_ancillary(tcp, mp, &ipp); 14972 if (mp == NULL) 14973 return; 14974 } 14975 14976 if (tcp->tcp_listener || tcp->tcp_hard_binding) { 14977 /* 14978 * Side queue inbound data until the accept happens. 14979 * tcp_accept/tcp_rput drains this when the accept happens. 14980 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or 14981 * T_EXDATA_IND) it is queued on b_next. 14982 * XXX Make urgent data use this. Requires: 14983 * Removing tcp_listener check for TH_URG 14984 * Making M_PCPROTO and MARK messages skip the eager case 14985 */ 14986 14987 if (tcp->tcp_kssl_pending) { 14988 DTRACE_PROBE1(kssl_mblk__ksslinput_pending, 14989 mblk_t *, mp); 14990 tcp_kssl_input(tcp, mp); 14991 } else { 14992 tcp_rcv_enqueue(tcp, mp, seg_len); 14993 } 14994 } else { 14995 if (mp->b_datap->db_type != M_DATA || 14996 (flags & TH_MARKNEXT_NEEDED)) { 14997 if (tcp->tcp_rcv_list != NULL) { 14998 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 14999 } 15000 ASSERT(tcp->tcp_rcv_list == NULL || 15001 tcp->tcp_fused_sigurg); 15002 if (flags & TH_MARKNEXT_NEEDED) { 15003 #ifdef DEBUG 15004 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 15005 "tcp_rput: sending MSGMARKNEXT %s", 15006 tcp_display(tcp, NULL, 15007 DISP_PORT_ONLY)); 15008 #endif /* DEBUG */ 15009 mp->b_flag |= MSGMARKNEXT; 15010 flags &= ~TH_MARKNEXT_NEEDED; 15011 } 15012 15013 /* Does this need SSL processing first? */ 15014 if ((tcp->tcp_kssl_ctx != NULL) && 15015 (DB_TYPE(mp) == M_DATA)) { 15016 DTRACE_PROBE1(kssl_mblk__ksslinput_data1, 15017 mblk_t *, mp); 15018 tcp_kssl_input(tcp, mp); 15019 } else { 15020 putnext(tcp->tcp_rq, mp); 15021 if (!canputnext(tcp->tcp_rq)) 15022 tcp->tcp_rwnd -= seg_len; 15023 } 15024 } else if ((flags & (TH_PUSH|TH_FIN)) || 15025 tcp->tcp_rcv_cnt + seg_len >= tcp->tcp_rq->q_hiwat >> 3) { 15026 if (tcp->tcp_rcv_list != NULL) { 15027 /* 15028 * Enqueue the new segment first and then 15029 * call tcp_rcv_drain() to send all data 15030 * up. The other way to do this is to 15031 * send all queued data up and then call 15032 * putnext() to send the new segment up. 15033 * This way can remove the else part later 15034 * on. 15035 * 15036 * We don't this to avoid one more call to 15037 * canputnext() as tcp_rcv_drain() needs to 15038 * call canputnext(). 15039 */ 15040 tcp_rcv_enqueue(tcp, mp, seg_len); 15041 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 15042 } else { 15043 /* Does this need SSL processing first? */ 15044 if ((tcp->tcp_kssl_ctx != NULL) && 15045 (DB_TYPE(mp) == M_DATA)) { 15046 DTRACE_PROBE1( 15047 kssl_mblk__ksslinput_data2, 15048 mblk_t *, mp); 15049 tcp_kssl_input(tcp, mp); 15050 } else { 15051 putnext(tcp->tcp_rq, mp); 15052 if (!canputnext(tcp->tcp_rq)) 15053 tcp->tcp_rwnd -= seg_len; 15054 } 15055 } 15056 } else { 15057 /* 15058 * Enqueue all packets when processing an mblk 15059 * from the co queue and also enqueue normal packets. 15060 * For packets which belong to SSL stream do SSL 15061 * processing first. 15062 */ 15063 if ((tcp->tcp_kssl_ctx != NULL) && 15064 (DB_TYPE(mp) == M_DATA)) { 15065 DTRACE_PROBE1(kssl_mblk__tcpksslin3, 15066 mblk_t *, mp); 15067 tcp_kssl_input(tcp, mp); 15068 } else { 15069 tcp_rcv_enqueue(tcp, mp, seg_len); 15070 } 15071 } 15072 /* 15073 * Make sure the timer is running if we have data waiting 15074 * for a push bit. This provides resiliency against 15075 * implementations that do not correctly generate push bits. 15076 */ 15077 if (tcp->tcp_rcv_list != NULL && tcp->tcp_push_tid == 0) { 15078 /* 15079 * The connection may be closed at this point, so don't 15080 * do anything for a detached tcp. 15081 */ 15082 if (!TCP_IS_DETACHED(tcp)) 15083 tcp->tcp_push_tid = TCP_TIMER(tcp, 15084 tcp_push_timer, 15085 MSEC_TO_TICK( 15086 tcps->tcps_push_timer_interval)); 15087 } 15088 } 15089 xmit_check: 15090 /* Is there anything left to do? */ 15091 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 15092 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED| 15093 TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_ACK_TIMER_NEEDED| 15094 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 15095 goto done; 15096 15097 /* Any transmit work to do and a non-zero window? */ 15098 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT| 15099 TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) { 15100 if (flags & TH_REXMIT_NEEDED) { 15101 uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna; 15102 15103 BUMP_MIB(&tcps->tcps_mib, tcpOutFastRetrans); 15104 if (snd_size > mss) 15105 snd_size = mss; 15106 if (snd_size > tcp->tcp_swnd) 15107 snd_size = tcp->tcp_swnd; 15108 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size, 15109 NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size, 15110 B_TRUE); 15111 15112 if (mp1 != NULL) { 15113 tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt; 15114 tcp->tcp_csuna = tcp->tcp_snxt; 15115 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 15116 UPDATE_MIB(&tcps->tcps_mib, 15117 tcpRetransBytes, snd_size); 15118 TCP_RECORD_TRACE(tcp, mp1, 15119 TCP_TRACE_SEND_PKT); 15120 tcp_send_data(tcp, tcp->tcp_wq, mp1); 15121 } 15122 } 15123 if (flags & TH_NEED_SACK_REXMIT) { 15124 tcp_sack_rxmit(tcp, &flags); 15125 } 15126 /* 15127 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send 15128 * out new segment. Note that tcp_rexmit should not be 15129 * set, otherwise TH_LIMIT_XMIT should not be set. 15130 */ 15131 if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) { 15132 if (!tcp->tcp_rexmit) { 15133 tcp_wput_data(tcp, NULL, B_FALSE); 15134 } else { 15135 tcp_ss_rexmit(tcp); 15136 } 15137 } 15138 /* 15139 * Adjust tcp_cwnd back to normal value after sending 15140 * new data segments. 15141 */ 15142 if (flags & TH_LIMIT_XMIT) { 15143 tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1); 15144 /* 15145 * This will restart the timer. Restarting the 15146 * timer is used to avoid a timeout before the 15147 * limited transmitted segment's ACK gets back. 15148 */ 15149 if (tcp->tcp_xmit_head != NULL) 15150 tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt; 15151 } 15152 15153 /* Anything more to do? */ 15154 if ((flags & (TH_ACK_NEEDED|TH_ACK_TIMER_NEEDED| 15155 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 15156 goto done; 15157 } 15158 ack_check: 15159 if (flags & TH_SEND_URP_MARK) { 15160 ASSERT(tcp->tcp_urp_mark_mp); 15161 /* 15162 * Send up any queued data and then send the mark message 15163 */ 15164 if (tcp->tcp_rcv_list != NULL) { 15165 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 15166 } 15167 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 15168 15169 mp1 = tcp->tcp_urp_mark_mp; 15170 tcp->tcp_urp_mark_mp = NULL; 15171 #ifdef DEBUG 15172 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 15173 "tcp_rput: sending zero-length %s %s", 15174 ((mp1->b_flag & MSGMARKNEXT) ? "MSGMARKNEXT" : 15175 "MSGNOTMARKNEXT"), 15176 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 15177 #endif /* DEBUG */ 15178 putnext(tcp->tcp_rq, mp1); 15179 flags &= ~TH_SEND_URP_MARK; 15180 } 15181 if (flags & TH_ACK_NEEDED) { 15182 /* 15183 * Time to send an ack for some reason. 15184 */ 15185 mp1 = tcp_ack_mp(tcp); 15186 15187 if (mp1 != NULL) { 15188 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 15189 tcp_send_data(tcp, tcp->tcp_wq, mp1); 15190 BUMP_LOCAL(tcp->tcp_obsegs); 15191 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 15192 } 15193 if (tcp->tcp_ack_tid != 0) { 15194 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 15195 tcp->tcp_ack_tid = 0; 15196 } 15197 } 15198 if (flags & TH_ACK_TIMER_NEEDED) { 15199 /* 15200 * Arrange for deferred ACK or push wait timeout. 15201 * Start timer if it is not already running. 15202 */ 15203 if (tcp->tcp_ack_tid == 0) { 15204 tcp->tcp_ack_tid = TCP_TIMER(tcp, tcp_ack_timer, 15205 MSEC_TO_TICK(tcp->tcp_localnet ? 15206 (clock_t)tcps->tcps_local_dack_interval : 15207 (clock_t)tcps->tcps_deferred_ack_interval)); 15208 } 15209 } 15210 if (flags & TH_ORDREL_NEEDED) { 15211 /* 15212 * Send up the ordrel_ind unless we are an eager guy. 15213 * In the eager case tcp_rsrv will do this when run 15214 * after tcp_accept is done. 15215 */ 15216 ASSERT(tcp->tcp_listener == NULL); 15217 if (tcp->tcp_rcv_list != NULL) { 15218 /* 15219 * Push any mblk(s) enqueued from co processing. 15220 */ 15221 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 15222 } 15223 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 15224 if ((mp1 = mi_tpi_ordrel_ind()) != NULL) { 15225 tcp->tcp_ordrel_done = B_TRUE; 15226 putnext(tcp->tcp_rq, mp1); 15227 if (tcp->tcp_deferred_clean_death) { 15228 /* 15229 * tcp_clean_death was deferred 15230 * for T_ORDREL_IND - do it now 15231 */ 15232 (void) tcp_clean_death(tcp, 15233 tcp->tcp_client_errno, 20); 15234 tcp->tcp_deferred_clean_death = B_FALSE; 15235 } 15236 } else { 15237 /* 15238 * Run the orderly release in the 15239 * service routine. 15240 */ 15241 qenable(tcp->tcp_rq); 15242 /* 15243 * Caveat(XXX): The machine may be so 15244 * overloaded that tcp_rsrv() is not scheduled 15245 * until after the endpoint has transitioned 15246 * to TCPS_TIME_WAIT 15247 * and tcp_time_wait_interval expires. Then 15248 * tcp_timer() will blow away state in tcp_t 15249 * and T_ORDREL_IND will never be delivered 15250 * upstream. Unlikely but potentially 15251 * a problem. 15252 */ 15253 } 15254 } 15255 done: 15256 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 15257 } 15258 15259 /* 15260 * This function does PAWS protection check. Returns B_TRUE if the 15261 * segment passes the PAWS test, else returns B_FALSE. 15262 */ 15263 boolean_t 15264 tcp_paws_check(tcp_t *tcp, tcph_t *tcph, tcp_opt_t *tcpoptp) 15265 { 15266 uint8_t flags; 15267 int options; 15268 uint8_t *up; 15269 15270 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 15271 /* 15272 * If timestamp option is aligned nicely, get values inline, 15273 * otherwise call general routine to parse. Only do that 15274 * if timestamp is the only option. 15275 */ 15276 if (TCP_HDR_LENGTH(tcph) == (uint32_t)TCP_MIN_HEADER_LENGTH + 15277 TCPOPT_REAL_TS_LEN && 15278 OK_32PTR((up = ((uint8_t *)tcph) + 15279 TCP_MIN_HEADER_LENGTH)) && 15280 *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) { 15281 tcpoptp->tcp_opt_ts_val = ABE32_TO_U32((up+4)); 15282 tcpoptp->tcp_opt_ts_ecr = ABE32_TO_U32((up+8)); 15283 15284 options = TCP_OPT_TSTAMP_PRESENT; 15285 } else { 15286 if (tcp->tcp_snd_sack_ok) { 15287 tcpoptp->tcp = tcp; 15288 } else { 15289 tcpoptp->tcp = NULL; 15290 } 15291 options = tcp_parse_options(tcph, tcpoptp); 15292 } 15293 15294 if (options & TCP_OPT_TSTAMP_PRESENT) { 15295 /* 15296 * Do PAWS per RFC 1323 section 4.2. Accept RST 15297 * regardless of the timestamp, page 18 RFC 1323.bis. 15298 */ 15299 if ((flags & TH_RST) == 0 && 15300 TSTMP_LT(tcpoptp->tcp_opt_ts_val, 15301 tcp->tcp_ts_recent)) { 15302 if (TSTMP_LT(lbolt64, tcp->tcp_last_rcv_lbolt + 15303 PAWS_TIMEOUT)) { 15304 /* This segment is not acceptable. */ 15305 return (B_FALSE); 15306 } else { 15307 /* 15308 * Connection has been idle for 15309 * too long. Reset the timestamp 15310 * and assume the segment is valid. 15311 */ 15312 tcp->tcp_ts_recent = 15313 tcpoptp->tcp_opt_ts_val; 15314 } 15315 } 15316 } else { 15317 /* 15318 * If we don't get a timestamp on every packet, we 15319 * figure we can't really trust 'em, so we stop sending 15320 * and parsing them. 15321 */ 15322 tcp->tcp_snd_ts_ok = B_FALSE; 15323 15324 tcp->tcp_hdr_len -= TCPOPT_REAL_TS_LEN; 15325 tcp->tcp_tcp_hdr_len -= TCPOPT_REAL_TS_LEN; 15326 tcp->tcp_tcph->th_offset_and_rsrvd[0] -= (3 << 4); 15327 /* 15328 * Adjust the tcp_mss accordingly. We also need to 15329 * adjust tcp_cwnd here in accordance with the new mss. 15330 * But we avoid doing a slow start here so as to not 15331 * to lose on the transfer rate built up so far. 15332 */ 15333 tcp_mss_set(tcp, tcp->tcp_mss + TCPOPT_REAL_TS_LEN, B_FALSE); 15334 if (tcp->tcp_snd_sack_ok) { 15335 ASSERT(tcp->tcp_sack_info != NULL); 15336 tcp->tcp_max_sack_blk = 4; 15337 } 15338 } 15339 return (B_TRUE); 15340 } 15341 15342 /* 15343 * Attach ancillary data to a received TCP segments for the 15344 * ancillary pieces requested by the application that are 15345 * different than they were in the previous data segment. 15346 * 15347 * Save the "current" values once memory allocation is ok so that 15348 * when memory allocation fails we can just wait for the next data segment. 15349 */ 15350 static mblk_t * 15351 tcp_rput_add_ancillary(tcp_t *tcp, mblk_t *mp, ip6_pkt_t *ipp) 15352 { 15353 struct T_optdata_ind *todi; 15354 int optlen; 15355 uchar_t *optptr; 15356 struct T_opthdr *toh; 15357 uint_t addflag; /* Which pieces to add */ 15358 mblk_t *mp1; 15359 15360 optlen = 0; 15361 addflag = 0; 15362 /* If app asked for pktinfo and the index has changed ... */ 15363 if ((ipp->ipp_fields & IPPF_IFINDEX) && 15364 ipp->ipp_ifindex != tcp->tcp_recvifindex && 15365 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO)) { 15366 optlen += sizeof (struct T_opthdr) + 15367 sizeof (struct in6_pktinfo); 15368 addflag |= TCP_IPV6_RECVPKTINFO; 15369 } 15370 /* If app asked for hoplimit and it has changed ... */ 15371 if ((ipp->ipp_fields & IPPF_HOPLIMIT) && 15372 ipp->ipp_hoplimit != tcp->tcp_recvhops && 15373 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPLIMIT)) { 15374 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 15375 addflag |= TCP_IPV6_RECVHOPLIMIT; 15376 } 15377 /* If app asked for tclass and it has changed ... */ 15378 if ((ipp->ipp_fields & IPPF_TCLASS) && 15379 ipp->ipp_tclass != tcp->tcp_recvtclass && 15380 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVTCLASS)) { 15381 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 15382 addflag |= TCP_IPV6_RECVTCLASS; 15383 } 15384 /* 15385 * If app asked for hopbyhop headers and it has changed ... 15386 * For security labels, note that (1) security labels can't change on 15387 * a connected socket at all, (2) we're connected to at most one peer, 15388 * (3) if anything changes, then it must be some other extra option. 15389 */ 15390 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPOPTS) && 15391 ip_cmpbuf(tcp->tcp_hopopts, tcp->tcp_hopoptslen, 15392 (ipp->ipp_fields & IPPF_HOPOPTS), 15393 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) { 15394 optlen += sizeof (struct T_opthdr) + ipp->ipp_hopoptslen - 15395 tcp->tcp_label_len; 15396 addflag |= TCP_IPV6_RECVHOPOPTS; 15397 if (!ip_allocbuf((void **)&tcp->tcp_hopopts, 15398 &tcp->tcp_hopoptslen, (ipp->ipp_fields & IPPF_HOPOPTS), 15399 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) 15400 return (mp); 15401 } 15402 /* If app asked for dst headers before routing headers ... */ 15403 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTDSTOPTS) && 15404 ip_cmpbuf(tcp->tcp_rtdstopts, tcp->tcp_rtdstoptslen, 15405 (ipp->ipp_fields & IPPF_RTDSTOPTS), 15406 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen)) { 15407 optlen += sizeof (struct T_opthdr) + 15408 ipp->ipp_rtdstoptslen; 15409 addflag |= TCP_IPV6_RECVRTDSTOPTS; 15410 if (!ip_allocbuf((void **)&tcp->tcp_rtdstopts, 15411 &tcp->tcp_rtdstoptslen, (ipp->ipp_fields & IPPF_RTDSTOPTS), 15412 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen)) 15413 return (mp); 15414 } 15415 /* If app asked for routing headers and it has changed ... */ 15416 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTHDR) && 15417 ip_cmpbuf(tcp->tcp_rthdr, tcp->tcp_rthdrlen, 15418 (ipp->ipp_fields & IPPF_RTHDR), 15419 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) { 15420 optlen += sizeof (struct T_opthdr) + ipp->ipp_rthdrlen; 15421 addflag |= TCP_IPV6_RECVRTHDR; 15422 if (!ip_allocbuf((void **)&tcp->tcp_rthdr, 15423 &tcp->tcp_rthdrlen, (ipp->ipp_fields & IPPF_RTHDR), 15424 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) 15425 return (mp); 15426 } 15427 /* If app asked for dest headers and it has changed ... */ 15428 if ((tcp->tcp_ipv6_recvancillary & 15429 (TCP_IPV6_RECVDSTOPTS | TCP_OLD_IPV6_RECVDSTOPTS)) && 15430 ip_cmpbuf(tcp->tcp_dstopts, tcp->tcp_dstoptslen, 15431 (ipp->ipp_fields & IPPF_DSTOPTS), 15432 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) { 15433 optlen += sizeof (struct T_opthdr) + ipp->ipp_dstoptslen; 15434 addflag |= TCP_IPV6_RECVDSTOPTS; 15435 if (!ip_allocbuf((void **)&tcp->tcp_dstopts, 15436 &tcp->tcp_dstoptslen, (ipp->ipp_fields & IPPF_DSTOPTS), 15437 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) 15438 return (mp); 15439 } 15440 15441 if (optlen == 0) { 15442 /* Nothing to add */ 15443 return (mp); 15444 } 15445 mp1 = allocb(sizeof (struct T_optdata_ind) + optlen, BPRI_MED); 15446 if (mp1 == NULL) { 15447 /* 15448 * Defer sending ancillary data until the next TCP segment 15449 * arrives. 15450 */ 15451 return (mp); 15452 } 15453 mp1->b_cont = mp; 15454 mp = mp1; 15455 mp->b_wptr += sizeof (*todi) + optlen; 15456 mp->b_datap->db_type = M_PROTO; 15457 todi = (struct T_optdata_ind *)mp->b_rptr; 15458 todi->PRIM_type = T_OPTDATA_IND; 15459 todi->DATA_flag = 1; /* MORE data */ 15460 todi->OPT_length = optlen; 15461 todi->OPT_offset = sizeof (*todi); 15462 optptr = (uchar_t *)&todi[1]; 15463 /* 15464 * If app asked for pktinfo and the index has changed ... 15465 * Note that the local address never changes for the connection. 15466 */ 15467 if (addflag & TCP_IPV6_RECVPKTINFO) { 15468 struct in6_pktinfo *pkti; 15469 15470 toh = (struct T_opthdr *)optptr; 15471 toh->level = IPPROTO_IPV6; 15472 toh->name = IPV6_PKTINFO; 15473 toh->len = sizeof (*toh) + sizeof (*pkti); 15474 toh->status = 0; 15475 optptr += sizeof (*toh); 15476 pkti = (struct in6_pktinfo *)optptr; 15477 if (tcp->tcp_ipversion == IPV6_VERSION) 15478 pkti->ipi6_addr = tcp->tcp_ip6h->ip6_src; 15479 else 15480 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 15481 &pkti->ipi6_addr); 15482 pkti->ipi6_ifindex = ipp->ipp_ifindex; 15483 optptr += sizeof (*pkti); 15484 ASSERT(OK_32PTR(optptr)); 15485 /* Save as "last" value */ 15486 tcp->tcp_recvifindex = ipp->ipp_ifindex; 15487 } 15488 /* If app asked for hoplimit and it has changed ... */ 15489 if (addflag & TCP_IPV6_RECVHOPLIMIT) { 15490 toh = (struct T_opthdr *)optptr; 15491 toh->level = IPPROTO_IPV6; 15492 toh->name = IPV6_HOPLIMIT; 15493 toh->len = sizeof (*toh) + sizeof (uint_t); 15494 toh->status = 0; 15495 optptr += sizeof (*toh); 15496 *(uint_t *)optptr = ipp->ipp_hoplimit; 15497 optptr += sizeof (uint_t); 15498 ASSERT(OK_32PTR(optptr)); 15499 /* Save as "last" value */ 15500 tcp->tcp_recvhops = ipp->ipp_hoplimit; 15501 } 15502 /* If app asked for tclass and it has changed ... */ 15503 if (addflag & TCP_IPV6_RECVTCLASS) { 15504 toh = (struct T_opthdr *)optptr; 15505 toh->level = IPPROTO_IPV6; 15506 toh->name = IPV6_TCLASS; 15507 toh->len = sizeof (*toh) + sizeof (uint_t); 15508 toh->status = 0; 15509 optptr += sizeof (*toh); 15510 *(uint_t *)optptr = ipp->ipp_tclass; 15511 optptr += sizeof (uint_t); 15512 ASSERT(OK_32PTR(optptr)); 15513 /* Save as "last" value */ 15514 tcp->tcp_recvtclass = ipp->ipp_tclass; 15515 } 15516 if (addflag & TCP_IPV6_RECVHOPOPTS) { 15517 toh = (struct T_opthdr *)optptr; 15518 toh->level = IPPROTO_IPV6; 15519 toh->name = IPV6_HOPOPTS; 15520 toh->len = sizeof (*toh) + ipp->ipp_hopoptslen - 15521 tcp->tcp_label_len; 15522 toh->status = 0; 15523 optptr += sizeof (*toh); 15524 bcopy((uchar_t *)ipp->ipp_hopopts + tcp->tcp_label_len, optptr, 15525 ipp->ipp_hopoptslen - tcp->tcp_label_len); 15526 optptr += ipp->ipp_hopoptslen - tcp->tcp_label_len; 15527 ASSERT(OK_32PTR(optptr)); 15528 /* Save as last value */ 15529 ip_savebuf((void **)&tcp->tcp_hopopts, &tcp->tcp_hopoptslen, 15530 (ipp->ipp_fields & IPPF_HOPOPTS), 15531 ipp->ipp_hopopts, ipp->ipp_hopoptslen); 15532 } 15533 if (addflag & TCP_IPV6_RECVRTDSTOPTS) { 15534 toh = (struct T_opthdr *)optptr; 15535 toh->level = IPPROTO_IPV6; 15536 toh->name = IPV6_RTHDRDSTOPTS; 15537 toh->len = sizeof (*toh) + ipp->ipp_rtdstoptslen; 15538 toh->status = 0; 15539 optptr += sizeof (*toh); 15540 bcopy(ipp->ipp_rtdstopts, optptr, ipp->ipp_rtdstoptslen); 15541 optptr += ipp->ipp_rtdstoptslen; 15542 ASSERT(OK_32PTR(optptr)); 15543 /* Save as last value */ 15544 ip_savebuf((void **)&tcp->tcp_rtdstopts, 15545 &tcp->tcp_rtdstoptslen, 15546 (ipp->ipp_fields & IPPF_RTDSTOPTS), 15547 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen); 15548 } 15549 if (addflag & TCP_IPV6_RECVRTHDR) { 15550 toh = (struct T_opthdr *)optptr; 15551 toh->level = IPPROTO_IPV6; 15552 toh->name = IPV6_RTHDR; 15553 toh->len = sizeof (*toh) + ipp->ipp_rthdrlen; 15554 toh->status = 0; 15555 optptr += sizeof (*toh); 15556 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen); 15557 optptr += ipp->ipp_rthdrlen; 15558 ASSERT(OK_32PTR(optptr)); 15559 /* Save as last value */ 15560 ip_savebuf((void **)&tcp->tcp_rthdr, &tcp->tcp_rthdrlen, 15561 (ipp->ipp_fields & IPPF_RTHDR), 15562 ipp->ipp_rthdr, ipp->ipp_rthdrlen); 15563 } 15564 if (addflag & (TCP_IPV6_RECVDSTOPTS | TCP_OLD_IPV6_RECVDSTOPTS)) { 15565 toh = (struct T_opthdr *)optptr; 15566 toh->level = IPPROTO_IPV6; 15567 toh->name = IPV6_DSTOPTS; 15568 toh->len = sizeof (*toh) + ipp->ipp_dstoptslen; 15569 toh->status = 0; 15570 optptr += sizeof (*toh); 15571 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen); 15572 optptr += ipp->ipp_dstoptslen; 15573 ASSERT(OK_32PTR(optptr)); 15574 /* Save as last value */ 15575 ip_savebuf((void **)&tcp->tcp_dstopts, &tcp->tcp_dstoptslen, 15576 (ipp->ipp_fields & IPPF_DSTOPTS), 15577 ipp->ipp_dstopts, ipp->ipp_dstoptslen); 15578 } 15579 ASSERT(optptr == mp->b_wptr); 15580 return (mp); 15581 } 15582 15583 15584 /* 15585 * Handle a *T_BIND_REQ that has failed either due to a T_ERROR_ACK 15586 * or a "bad" IRE detected by tcp_adapt_ire. 15587 * We can't tell if the failure was due to the laddr or the faddr 15588 * thus we clear out all addresses and ports. 15589 */ 15590 static void 15591 tcp_bind_failed(tcp_t *tcp, mblk_t *mp, int error) 15592 { 15593 queue_t *q = tcp->tcp_rq; 15594 tcph_t *tcph; 15595 struct T_error_ack *tea; 15596 conn_t *connp = tcp->tcp_connp; 15597 15598 15599 ASSERT(mp->b_datap->db_type == M_PCPROTO); 15600 15601 if (mp->b_cont) { 15602 freemsg(mp->b_cont); 15603 mp->b_cont = NULL; 15604 } 15605 tea = (struct T_error_ack *)mp->b_rptr; 15606 switch (tea->PRIM_type) { 15607 case T_BIND_ACK: 15608 /* 15609 * Need to unbind with classifier since we were just told that 15610 * our bind succeeded. 15611 */ 15612 tcp->tcp_hard_bound = B_FALSE; 15613 tcp->tcp_hard_binding = B_FALSE; 15614 15615 ipcl_hash_remove(connp); 15616 /* Reuse the mblk if possible */ 15617 ASSERT(mp->b_datap->db_lim - mp->b_datap->db_base >= 15618 sizeof (*tea)); 15619 mp->b_rptr = mp->b_datap->db_base; 15620 mp->b_wptr = mp->b_rptr + sizeof (*tea); 15621 tea = (struct T_error_ack *)mp->b_rptr; 15622 tea->PRIM_type = T_ERROR_ACK; 15623 tea->TLI_error = TSYSERR; 15624 tea->UNIX_error = error; 15625 if (tcp->tcp_state >= TCPS_SYN_SENT) { 15626 tea->ERROR_prim = T_CONN_REQ; 15627 } else { 15628 tea->ERROR_prim = O_T_BIND_REQ; 15629 } 15630 break; 15631 15632 case T_ERROR_ACK: 15633 if (tcp->tcp_state >= TCPS_SYN_SENT) 15634 tea->ERROR_prim = T_CONN_REQ; 15635 break; 15636 default: 15637 panic("tcp_bind_failed: unexpected TPI type"); 15638 /*NOTREACHED*/ 15639 } 15640 15641 tcp->tcp_state = TCPS_IDLE; 15642 if (tcp->tcp_ipversion == IPV4_VERSION) 15643 tcp->tcp_ipha->ipha_src = 0; 15644 else 15645 V6_SET_ZERO(tcp->tcp_ip6h->ip6_src); 15646 /* 15647 * Copy of the src addr. in tcp_t is needed since 15648 * the lookup funcs. can only look at tcp_t 15649 */ 15650 V6_SET_ZERO(tcp->tcp_ip_src_v6); 15651 15652 tcph = tcp->tcp_tcph; 15653 tcph->th_lport[0] = 0; 15654 tcph->th_lport[1] = 0; 15655 tcp_bind_hash_remove(tcp); 15656 bzero(&connp->u_port, sizeof (connp->u_port)); 15657 /* blow away saved option results if any */ 15658 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 15659 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 15660 15661 conn_delete_ire(tcp->tcp_connp, NULL); 15662 putnext(q, mp); 15663 } 15664 15665 /* 15666 * tcp_rput_other is called by tcp_rput to handle everything other than M_DATA 15667 * messages. 15668 */ 15669 void 15670 tcp_rput_other(tcp_t *tcp, mblk_t *mp) 15671 { 15672 mblk_t *mp1; 15673 uchar_t *rptr = mp->b_rptr; 15674 queue_t *q = tcp->tcp_rq; 15675 struct T_error_ack *tea; 15676 uint32_t mss; 15677 mblk_t *syn_mp; 15678 mblk_t *mdti; 15679 mblk_t *lsoi; 15680 int retval; 15681 mblk_t *ire_mp; 15682 tcp_stack_t *tcps = tcp->tcp_tcps; 15683 15684 switch (mp->b_datap->db_type) { 15685 case M_PROTO: 15686 case M_PCPROTO: 15687 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 15688 if ((mp->b_wptr - rptr) < sizeof (t_scalar_t)) 15689 break; 15690 tea = (struct T_error_ack *)rptr; 15691 switch (tea->PRIM_type) { 15692 case T_BIND_ACK: 15693 /* 15694 * Adapt Multidata information, if any. The 15695 * following tcp_mdt_update routine will free 15696 * the message. 15697 */ 15698 if ((mdti = tcp_mdt_info_mp(mp)) != NULL) { 15699 tcp_mdt_update(tcp, &((ip_mdt_info_t *)mdti-> 15700 b_rptr)->mdt_capab, B_TRUE); 15701 freemsg(mdti); 15702 } 15703 15704 /* 15705 * Check to update LSO information with tcp, and 15706 * tcp_lso_update routine will free the message. 15707 */ 15708 if ((lsoi = tcp_lso_info_mp(mp)) != NULL) { 15709 tcp_lso_update(tcp, &((ip_lso_info_t *)lsoi-> 15710 b_rptr)->lso_capab); 15711 freemsg(lsoi); 15712 } 15713 15714 /* Get the IRE, if we had requested for it */ 15715 ire_mp = tcp_ire_mp(mp); 15716 15717 if (tcp->tcp_hard_binding) { 15718 tcp->tcp_hard_binding = B_FALSE; 15719 tcp->tcp_hard_bound = B_TRUE; 15720 CL_INET_CONNECT(tcp); 15721 } else { 15722 if (ire_mp != NULL) 15723 freeb(ire_mp); 15724 goto after_syn_sent; 15725 } 15726 15727 retval = tcp_adapt_ire(tcp, ire_mp); 15728 if (ire_mp != NULL) 15729 freeb(ire_mp); 15730 if (retval == 0) { 15731 tcp_bind_failed(tcp, mp, 15732 (int)((tcp->tcp_state >= TCPS_SYN_SENT) ? 15733 ENETUNREACH : EADDRNOTAVAIL)); 15734 return; 15735 } 15736 /* 15737 * Don't let an endpoint connect to itself. 15738 * Also checked in tcp_connect() but that 15739 * check can't handle the case when the 15740 * local IP address is INADDR_ANY. 15741 */ 15742 if (tcp->tcp_ipversion == IPV4_VERSION) { 15743 if ((tcp->tcp_ipha->ipha_dst == 15744 tcp->tcp_ipha->ipha_src) && 15745 (BE16_EQL(tcp->tcp_tcph->th_lport, 15746 tcp->tcp_tcph->th_fport))) { 15747 tcp_bind_failed(tcp, mp, EADDRNOTAVAIL); 15748 return; 15749 } 15750 } else { 15751 if (IN6_ARE_ADDR_EQUAL( 15752 &tcp->tcp_ip6h->ip6_dst, 15753 &tcp->tcp_ip6h->ip6_src) && 15754 (BE16_EQL(tcp->tcp_tcph->th_lport, 15755 tcp->tcp_tcph->th_fport))) { 15756 tcp_bind_failed(tcp, mp, EADDRNOTAVAIL); 15757 return; 15758 } 15759 } 15760 ASSERT(tcp->tcp_state == TCPS_SYN_SENT); 15761 /* 15762 * This should not be possible! Just for 15763 * defensive coding... 15764 */ 15765 if (tcp->tcp_state != TCPS_SYN_SENT) 15766 goto after_syn_sent; 15767 15768 if (is_system_labeled() && 15769 !tcp_update_label(tcp, CONN_CRED(tcp->tcp_connp))) { 15770 tcp_bind_failed(tcp, mp, EHOSTUNREACH); 15771 return; 15772 } 15773 15774 ASSERT(q == tcp->tcp_rq); 15775 /* 15776 * tcp_adapt_ire() does not adjust 15777 * for TCP/IP header length. 15778 */ 15779 mss = tcp->tcp_mss - tcp->tcp_hdr_len; 15780 15781 /* 15782 * Just make sure our rwnd is at 15783 * least tcp_recv_hiwat_mss * MSS 15784 * large, and round up to the nearest 15785 * MSS. 15786 * 15787 * We do the round up here because 15788 * we need to get the interface 15789 * MTU first before we can do the 15790 * round up. 15791 */ 15792 tcp->tcp_rwnd = MAX(MSS_ROUNDUP(tcp->tcp_rwnd, mss), 15793 tcps->tcps_recv_hiwat_minmss * mss); 15794 q->q_hiwat = tcp->tcp_rwnd; 15795 tcp_set_ws_value(tcp); 15796 U32_TO_ABE16((tcp->tcp_rwnd >> tcp->tcp_rcv_ws), 15797 tcp->tcp_tcph->th_win); 15798 if (tcp->tcp_rcv_ws > 0 || tcps->tcps_wscale_always) 15799 tcp->tcp_snd_ws_ok = B_TRUE; 15800 15801 /* 15802 * Set tcp_snd_ts_ok to true 15803 * so that tcp_xmit_mp will 15804 * include the timestamp 15805 * option in the SYN segment. 15806 */ 15807 if (tcps->tcps_tstamp_always || 15808 (tcp->tcp_rcv_ws && tcps->tcps_tstamp_if_wscale)) { 15809 tcp->tcp_snd_ts_ok = B_TRUE; 15810 } 15811 15812 /* 15813 * tcp_snd_sack_ok can be set in 15814 * tcp_adapt_ire() if the sack metric 15815 * is set. So check it here also. 15816 */ 15817 if (tcps->tcps_sack_permitted == 2 || 15818 tcp->tcp_snd_sack_ok) { 15819 if (tcp->tcp_sack_info == NULL) { 15820 tcp->tcp_sack_info = 15821 kmem_cache_alloc( 15822 tcp_sack_info_cache, 15823 KM_SLEEP); 15824 } 15825 tcp->tcp_snd_sack_ok = B_TRUE; 15826 } 15827 15828 /* 15829 * Should we use ECN? Note that the current 15830 * default value (SunOS 5.9) of tcp_ecn_permitted 15831 * is 1. The reason for doing this is that there 15832 * are equipments out there that will drop ECN 15833 * enabled IP packets. Setting it to 1 avoids 15834 * compatibility problems. 15835 */ 15836 if (tcps->tcps_ecn_permitted == 2) 15837 tcp->tcp_ecn_ok = B_TRUE; 15838 15839 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 15840 syn_mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, 15841 tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 15842 if (syn_mp) { 15843 cred_t *cr; 15844 pid_t pid; 15845 15846 /* 15847 * Obtain the credential from the 15848 * thread calling connect(); the credential 15849 * lives on in the second mblk which 15850 * originated from T_CONN_REQ and is echoed 15851 * with the T_BIND_ACK from ip. If none 15852 * can be found, default to the creator 15853 * of the socket. 15854 */ 15855 if (mp->b_cont == NULL || 15856 (cr = DB_CRED(mp->b_cont)) == NULL) { 15857 cr = tcp->tcp_cred; 15858 pid = tcp->tcp_cpid; 15859 } else { 15860 pid = DB_CPID(mp->b_cont); 15861 } 15862 15863 TCP_RECORD_TRACE(tcp, syn_mp, 15864 TCP_TRACE_SEND_PKT); 15865 mblk_setcred(syn_mp, cr); 15866 DB_CPID(syn_mp) = pid; 15867 tcp_send_data(tcp, tcp->tcp_wq, syn_mp); 15868 } 15869 after_syn_sent: 15870 /* 15871 * A trailer mblk indicates a waiting client upstream. 15872 * We complete here the processing begun in 15873 * either tcp_bind() or tcp_connect() by passing 15874 * upstream the reply message they supplied. 15875 */ 15876 mp1 = mp; 15877 mp = mp->b_cont; 15878 freeb(mp1); 15879 if (mp) 15880 break; 15881 return; 15882 case T_ERROR_ACK: 15883 if (tcp->tcp_debug) { 15884 (void) strlog(TCP_MOD_ID, 0, 1, 15885 SL_TRACE|SL_ERROR, 15886 "tcp_rput_other: case T_ERROR_ACK, " 15887 "ERROR_prim == %d", 15888 tea->ERROR_prim); 15889 } 15890 switch (tea->ERROR_prim) { 15891 case O_T_BIND_REQ: 15892 case T_BIND_REQ: 15893 tcp_bind_failed(tcp, mp, 15894 (int)((tcp->tcp_state >= TCPS_SYN_SENT) ? 15895 ENETUNREACH : EADDRNOTAVAIL)); 15896 return; 15897 case T_UNBIND_REQ: 15898 tcp->tcp_hard_binding = B_FALSE; 15899 tcp->tcp_hard_bound = B_FALSE; 15900 if (mp->b_cont) { 15901 freemsg(mp->b_cont); 15902 mp->b_cont = NULL; 15903 } 15904 if (tcp->tcp_unbind_pending) 15905 tcp->tcp_unbind_pending = 0; 15906 else { 15907 /* From tcp_ip_unbind() - free */ 15908 freemsg(mp); 15909 return; 15910 } 15911 break; 15912 case T_SVR4_OPTMGMT_REQ: 15913 if (tcp->tcp_drop_opt_ack_cnt > 0) { 15914 /* T_OPTMGMT_REQ generated by TCP */ 15915 printf("T_SVR4_OPTMGMT_REQ failed " 15916 "%d/%d - dropped (cnt %d)\n", 15917 tea->TLI_error, tea->UNIX_error, 15918 tcp->tcp_drop_opt_ack_cnt); 15919 freemsg(mp); 15920 tcp->tcp_drop_opt_ack_cnt--; 15921 return; 15922 } 15923 break; 15924 } 15925 if (tea->ERROR_prim == T_SVR4_OPTMGMT_REQ && 15926 tcp->tcp_drop_opt_ack_cnt > 0) { 15927 printf("T_SVR4_OPTMGMT_REQ failed %d/%d " 15928 "- dropped (cnt %d)\n", 15929 tea->TLI_error, tea->UNIX_error, 15930 tcp->tcp_drop_opt_ack_cnt); 15931 freemsg(mp); 15932 tcp->tcp_drop_opt_ack_cnt--; 15933 return; 15934 } 15935 break; 15936 case T_OPTMGMT_ACK: 15937 if (tcp->tcp_drop_opt_ack_cnt > 0) { 15938 /* T_OPTMGMT_REQ generated by TCP */ 15939 freemsg(mp); 15940 tcp->tcp_drop_opt_ack_cnt--; 15941 return; 15942 } 15943 break; 15944 default: 15945 break; 15946 } 15947 break; 15948 case M_FLUSH: 15949 if (*rptr & FLUSHR) 15950 flushq(q, FLUSHDATA); 15951 break; 15952 default: 15953 /* M_CTL will be directly sent to tcp_icmp_error() */ 15954 ASSERT(DB_TYPE(mp) != M_CTL); 15955 break; 15956 } 15957 /* 15958 * Make sure we set this bit before sending the ACK for 15959 * bind. Otherwise accept could possibly run and free 15960 * this tcp struct. 15961 */ 15962 putnext(q, mp); 15963 } 15964 15965 /* 15966 * Called as the result of a qbufcall or a qtimeout to remedy a failure 15967 * to allocate a T_ordrel_ind in tcp_rsrv(). qenable(q) will make 15968 * tcp_rsrv() try again. 15969 */ 15970 static void 15971 tcp_ordrel_kick(void *arg) 15972 { 15973 conn_t *connp = (conn_t *)arg; 15974 tcp_t *tcp = connp->conn_tcp; 15975 15976 tcp->tcp_ordrelid = 0; 15977 tcp->tcp_timeout = B_FALSE; 15978 if (!TCP_IS_DETACHED(tcp) && tcp->tcp_rq != NULL && 15979 tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 15980 qenable(tcp->tcp_rq); 15981 } 15982 } 15983 15984 /* ARGSUSED */ 15985 static void 15986 tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2) 15987 { 15988 conn_t *connp = (conn_t *)arg; 15989 tcp_t *tcp = connp->conn_tcp; 15990 queue_t *q = tcp->tcp_rq; 15991 uint_t thwin; 15992 tcp_stack_t *tcps = tcp->tcp_tcps; 15993 15994 freeb(mp); 15995 15996 TCP_STAT(tcps, tcp_rsrv_calls); 15997 15998 if (TCP_IS_DETACHED(tcp) || q == NULL) { 15999 return; 16000 } 16001 16002 if (tcp->tcp_fused) { 16003 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 16004 16005 ASSERT(tcp->tcp_fused); 16006 ASSERT(peer_tcp != NULL && peer_tcp->tcp_fused); 16007 ASSERT(peer_tcp->tcp_loopback_peer == tcp); 16008 ASSERT(!TCP_IS_DETACHED(tcp)); 16009 ASSERT(tcp->tcp_connp->conn_sqp == 16010 peer_tcp->tcp_connp->conn_sqp); 16011 16012 /* 16013 * Normally we would not get backenabled in synchronous 16014 * streams mode, but in case this happens, we need to plug 16015 * synchronous streams during our drain to prevent a race 16016 * with tcp_fuse_rrw() or tcp_fuse_rinfop(). 16017 */ 16018 TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp); 16019 if (tcp->tcp_rcv_list != NULL) 16020 (void) tcp_rcv_drain(tcp->tcp_rq, tcp); 16021 16022 if (peer_tcp > tcp) { 16023 mutex_enter(&peer_tcp->tcp_non_sq_lock); 16024 mutex_enter(&tcp->tcp_non_sq_lock); 16025 } else { 16026 mutex_enter(&tcp->tcp_non_sq_lock); 16027 mutex_enter(&peer_tcp->tcp_non_sq_lock); 16028 } 16029 16030 if (peer_tcp->tcp_flow_stopped && 16031 (TCP_UNSENT_BYTES(peer_tcp) <= 16032 peer_tcp->tcp_xmit_lowater)) { 16033 tcp_clrqfull(peer_tcp); 16034 } 16035 mutex_exit(&peer_tcp->tcp_non_sq_lock); 16036 mutex_exit(&tcp->tcp_non_sq_lock); 16037 16038 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp); 16039 TCP_STAT(tcps, tcp_fusion_backenabled); 16040 return; 16041 } 16042 16043 if (canputnext(q)) { 16044 tcp->tcp_rwnd = q->q_hiwat; 16045 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 16046 << tcp->tcp_rcv_ws; 16047 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 16048 /* 16049 * Send back a window update immediately if TCP is above 16050 * ESTABLISHED state and the increase of the rcv window 16051 * that the other side knows is at least 1 MSS after flow 16052 * control is lifted. 16053 */ 16054 if (tcp->tcp_state >= TCPS_ESTABLISHED && 16055 (q->q_hiwat - thwin >= tcp->tcp_mss)) { 16056 tcp_xmit_ctl(NULL, tcp, 16057 (tcp->tcp_swnd == 0) ? tcp->tcp_suna : 16058 tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 16059 BUMP_MIB(&tcps->tcps_mib, tcpOutWinUpdate); 16060 } 16061 } 16062 /* Handle a failure to allocate a T_ORDREL_IND here */ 16063 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 16064 ASSERT(tcp->tcp_listener == NULL); 16065 if (tcp->tcp_rcv_list != NULL) { 16066 (void) tcp_rcv_drain(q, tcp); 16067 } 16068 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 16069 mp = mi_tpi_ordrel_ind(); 16070 if (mp) { 16071 tcp->tcp_ordrel_done = B_TRUE; 16072 putnext(q, mp); 16073 if (tcp->tcp_deferred_clean_death) { 16074 /* 16075 * tcp_clean_death was deferred for 16076 * T_ORDREL_IND - do it now 16077 */ 16078 tcp->tcp_deferred_clean_death = B_FALSE; 16079 (void) tcp_clean_death(tcp, 16080 tcp->tcp_client_errno, 22); 16081 } 16082 } else if (!tcp->tcp_timeout && tcp->tcp_ordrelid == 0) { 16083 /* 16084 * If there isn't already a timer running 16085 * start one. Use a 4 second 16086 * timer as a fallback since it can't fail. 16087 */ 16088 tcp->tcp_timeout = B_TRUE; 16089 tcp->tcp_ordrelid = TCP_TIMER(tcp, tcp_ordrel_kick, 16090 MSEC_TO_TICK(4000)); 16091 } 16092 } 16093 } 16094 16095 /* 16096 * The read side service routine is called mostly when we get back-enabled as a 16097 * result of flow control relief. Since we don't actually queue anything in 16098 * TCP, we have no data to send out of here. What we do is clear the receive 16099 * window, and send out a window update. 16100 * This routine is also called to drive an orderly release message upstream 16101 * if the attempt in tcp_rput failed. 16102 */ 16103 static void 16104 tcp_rsrv(queue_t *q) 16105 { 16106 conn_t *connp = Q_TO_CONN(q); 16107 tcp_t *tcp = connp->conn_tcp; 16108 mblk_t *mp; 16109 tcp_stack_t *tcps = tcp->tcp_tcps; 16110 16111 /* No code does a putq on the read side */ 16112 ASSERT(q->q_first == NULL); 16113 16114 /* Nothing to do for the default queue */ 16115 if (q == tcps->tcps_g_q) { 16116 return; 16117 } 16118 16119 mp = allocb(0, BPRI_HI); 16120 if (mp == NULL) { 16121 /* 16122 * We are under memory pressure. Return for now and we 16123 * we will be called again later. 16124 */ 16125 if (!tcp->tcp_timeout && tcp->tcp_ordrelid == 0) { 16126 /* 16127 * If there isn't already a timer running 16128 * start one. Use a 4 second 16129 * timer as a fallback since it can't fail. 16130 */ 16131 tcp->tcp_timeout = B_TRUE; 16132 tcp->tcp_ordrelid = TCP_TIMER(tcp, tcp_ordrel_kick, 16133 MSEC_TO_TICK(4000)); 16134 } 16135 return; 16136 } 16137 CONN_INC_REF(connp); 16138 squeue_enter(connp->conn_sqp, mp, tcp_rsrv_input, connp, 16139 SQTAG_TCP_RSRV); 16140 } 16141 16142 /* 16143 * tcp_rwnd_set() is called to adjust the receive window to a desired value. 16144 * We do not allow the receive window to shrink. After setting rwnd, 16145 * set the flow control hiwat of the stream. 16146 * 16147 * This function is called in 2 cases: 16148 * 16149 * 1) Before data transfer begins, in tcp_accept_comm() for accepting a 16150 * connection (passive open) and in tcp_rput_data() for active connect. 16151 * This is called after tcp_mss_set() when the desired MSS value is known. 16152 * This makes sure that our window size is a mutiple of the other side's 16153 * MSS. 16154 * 2) Handling SO_RCVBUF option. 16155 * 16156 * It is ASSUMED that the requested size is a multiple of the current MSS. 16157 * 16158 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the 16159 * user requests so. 16160 */ 16161 static int 16162 tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd) 16163 { 16164 uint32_t mss = tcp->tcp_mss; 16165 uint32_t old_max_rwnd; 16166 uint32_t max_transmittable_rwnd; 16167 boolean_t tcp_detached = TCP_IS_DETACHED(tcp); 16168 tcp_stack_t *tcps = tcp->tcp_tcps; 16169 16170 if (tcp->tcp_fused) { 16171 size_t sth_hiwat; 16172 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 16173 16174 ASSERT(peer_tcp != NULL); 16175 /* 16176 * Record the stream head's high water mark for 16177 * this endpoint; this is used for flow-control 16178 * purposes in tcp_fuse_output(). 16179 */ 16180 sth_hiwat = tcp_fuse_set_rcv_hiwat(tcp, rwnd); 16181 if (!tcp_detached) 16182 (void) mi_set_sth_hiwat(tcp->tcp_rq, sth_hiwat); 16183 16184 /* 16185 * In the fusion case, the maxpsz stream head value of 16186 * our peer is set according to its send buffer size 16187 * and our receive buffer size; since the latter may 16188 * have changed we need to update the peer's maxpsz. 16189 */ 16190 (void) tcp_maxpsz_set(peer_tcp, B_TRUE); 16191 return (rwnd); 16192 } 16193 16194 if (tcp_detached) 16195 old_max_rwnd = tcp->tcp_rwnd; 16196 else 16197 old_max_rwnd = tcp->tcp_rq->q_hiwat; 16198 16199 /* 16200 * Insist on a receive window that is at least 16201 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid 16202 * funny TCP interactions of Nagle algorithm, SWS avoidance 16203 * and delayed acknowledgement. 16204 */ 16205 rwnd = MAX(rwnd, tcps->tcps_recv_hiwat_minmss * mss); 16206 16207 /* 16208 * If window size info has already been exchanged, TCP should not 16209 * shrink the window. Shrinking window is doable if done carefully. 16210 * We may add that support later. But so far there is not a real 16211 * need to do that. 16212 */ 16213 if (rwnd < old_max_rwnd && tcp->tcp_state > TCPS_SYN_SENT) { 16214 /* MSS may have changed, do a round up again. */ 16215 rwnd = MSS_ROUNDUP(old_max_rwnd, mss); 16216 } 16217 16218 /* 16219 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check 16220 * can be applied even before the window scale option is decided. 16221 */ 16222 max_transmittable_rwnd = TCP_MAXWIN << tcp->tcp_rcv_ws; 16223 if (rwnd > max_transmittable_rwnd) { 16224 rwnd = max_transmittable_rwnd - 16225 (max_transmittable_rwnd % mss); 16226 if (rwnd < mss) 16227 rwnd = max_transmittable_rwnd; 16228 /* 16229 * If we're over the limit we may have to back down tcp_rwnd. 16230 * The increment below won't work for us. So we set all three 16231 * here and the increment below will have no effect. 16232 */ 16233 tcp->tcp_rwnd = old_max_rwnd = rwnd; 16234 } 16235 if (tcp->tcp_localnet) { 16236 tcp->tcp_rack_abs_max = 16237 MIN(tcps->tcps_local_dacks_max, rwnd / mss / 2); 16238 } else { 16239 /* 16240 * For a remote host on a different subnet (through a router), 16241 * we ack every other packet to be conforming to RFC1122. 16242 * tcp_deferred_acks_max is default to 2. 16243 */ 16244 tcp->tcp_rack_abs_max = 16245 MIN(tcps->tcps_deferred_acks_max, rwnd / mss / 2); 16246 } 16247 if (tcp->tcp_rack_cur_max > tcp->tcp_rack_abs_max) 16248 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 16249 else 16250 tcp->tcp_rack_cur_max = 0; 16251 /* 16252 * Increment the current rwnd by the amount the maximum grew (we 16253 * can not overwrite it since we might be in the middle of a 16254 * connection.) 16255 */ 16256 tcp->tcp_rwnd += rwnd - old_max_rwnd; 16257 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, tcp->tcp_tcph->th_win); 16258 if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max) 16259 tcp->tcp_cwnd_max = rwnd; 16260 16261 if (tcp_detached) 16262 return (rwnd); 16263 /* 16264 * We set the maximum receive window into rq->q_hiwat. 16265 * This is not actually used for flow control. 16266 */ 16267 tcp->tcp_rq->q_hiwat = rwnd; 16268 /* 16269 * Set the Stream head high water mark. This doesn't have to be 16270 * here, since we are simply using default values, but we would 16271 * prefer to choose these values algorithmically, with a likely 16272 * relationship to rwnd. 16273 */ 16274 (void) mi_set_sth_hiwat(tcp->tcp_rq, 16275 MAX(rwnd, tcps->tcps_sth_rcv_hiwat)); 16276 return (rwnd); 16277 } 16278 16279 /* 16280 * Return SNMP stuff in buffer in mpdata. 16281 */ 16282 mblk_t * 16283 tcp_snmp_get(queue_t *q, mblk_t *mpctl) 16284 { 16285 mblk_t *mpdata; 16286 mblk_t *mp_conn_ctl = NULL; 16287 mblk_t *mp_conn_tail; 16288 mblk_t *mp_attr_ctl = NULL; 16289 mblk_t *mp_attr_tail; 16290 mblk_t *mp6_conn_ctl = NULL; 16291 mblk_t *mp6_conn_tail; 16292 mblk_t *mp6_attr_ctl = NULL; 16293 mblk_t *mp6_attr_tail; 16294 struct opthdr *optp; 16295 mib2_tcpConnEntry_t tce; 16296 mib2_tcp6ConnEntry_t tce6; 16297 mib2_transportMLPEntry_t mlp; 16298 connf_t *connfp; 16299 int i; 16300 boolean_t ispriv; 16301 zoneid_t zoneid; 16302 int v4_conn_idx; 16303 int v6_conn_idx; 16304 conn_t *connp = Q_TO_CONN(q); 16305 tcp_stack_t *tcps; 16306 ip_stack_t *ipst; 16307 mblk_t *mp2ctl; 16308 16309 /* 16310 * make a copy of the original message 16311 */ 16312 mp2ctl = copymsg(mpctl); 16313 16314 if (mpctl == NULL || 16315 (mpdata = mpctl->b_cont) == NULL || 16316 (mp_conn_ctl = copymsg(mpctl)) == NULL || 16317 (mp_attr_ctl = copymsg(mpctl)) == NULL || 16318 (mp6_conn_ctl = copymsg(mpctl)) == NULL || 16319 (mp6_attr_ctl = copymsg(mpctl)) == NULL) { 16320 freemsg(mp_conn_ctl); 16321 freemsg(mp_attr_ctl); 16322 freemsg(mp6_conn_ctl); 16323 freemsg(mp6_attr_ctl); 16324 freemsg(mpctl); 16325 freemsg(mp2ctl); 16326 return (NULL); 16327 } 16328 16329 ipst = connp->conn_netstack->netstack_ip; 16330 tcps = connp->conn_netstack->netstack_tcp; 16331 16332 /* build table of connections -- need count in fixed part */ 16333 SET_MIB(tcps->tcps_mib.tcpRtoAlgorithm, 4); /* vanj */ 16334 SET_MIB(tcps->tcps_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min); 16335 SET_MIB(tcps->tcps_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max); 16336 SET_MIB(tcps->tcps_mib.tcpMaxConn, -1); 16337 SET_MIB(tcps->tcps_mib.tcpCurrEstab, 0); 16338 16339 ispriv = 16340 secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0; 16341 zoneid = Q_TO_CONN(q)->conn_zoneid; 16342 16343 v4_conn_idx = v6_conn_idx = 0; 16344 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL; 16345 16346 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 16347 ipst = tcps->tcps_netstack->netstack_ip; 16348 16349 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 16350 16351 connp = NULL; 16352 16353 while ((connp = 16354 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16355 tcp_t *tcp; 16356 boolean_t needattr; 16357 16358 if (connp->conn_zoneid != zoneid) 16359 continue; /* not in this zone */ 16360 16361 tcp = connp->conn_tcp; 16362 UPDATE_MIB(&tcps->tcps_mib, 16363 tcpHCInSegs, tcp->tcp_ibsegs); 16364 tcp->tcp_ibsegs = 0; 16365 UPDATE_MIB(&tcps->tcps_mib, 16366 tcpHCOutSegs, tcp->tcp_obsegs); 16367 tcp->tcp_obsegs = 0; 16368 16369 tce6.tcp6ConnState = tce.tcpConnState = 16370 tcp_snmp_state(tcp); 16371 if (tce.tcpConnState == MIB2_TCP_established || 16372 tce.tcpConnState == MIB2_TCP_closeWait) 16373 BUMP_MIB(&tcps->tcps_mib, tcpCurrEstab); 16374 16375 needattr = B_FALSE; 16376 bzero(&mlp, sizeof (mlp)); 16377 if (connp->conn_mlp_type != mlptSingle) { 16378 if (connp->conn_mlp_type == mlptShared || 16379 connp->conn_mlp_type == mlptBoth) 16380 mlp.tme_flags |= MIB2_TMEF_SHARED; 16381 if (connp->conn_mlp_type == mlptPrivate || 16382 connp->conn_mlp_type == mlptBoth) 16383 mlp.tme_flags |= MIB2_TMEF_PRIVATE; 16384 needattr = B_TRUE; 16385 } 16386 if (connp->conn_peercred != NULL) { 16387 ts_label_t *tsl; 16388 16389 tsl = crgetlabel(connp->conn_peercred); 16390 mlp.tme_doi = label2doi(tsl); 16391 mlp.tme_label = *label2bslabel(tsl); 16392 needattr = B_TRUE; 16393 } 16394 16395 /* Create a message to report on IPv6 entries */ 16396 if (tcp->tcp_ipversion == IPV6_VERSION) { 16397 tce6.tcp6ConnLocalAddress = tcp->tcp_ip_src_v6; 16398 tce6.tcp6ConnRemAddress = tcp->tcp_remote_v6; 16399 tce6.tcp6ConnLocalPort = ntohs(tcp->tcp_lport); 16400 tce6.tcp6ConnRemPort = ntohs(tcp->tcp_fport); 16401 tce6.tcp6ConnIfIndex = tcp->tcp_bound_if; 16402 /* Don't want just anybody seeing these... */ 16403 if (ispriv) { 16404 tce6.tcp6ConnEntryInfo.ce_snxt = 16405 tcp->tcp_snxt; 16406 tce6.tcp6ConnEntryInfo.ce_suna = 16407 tcp->tcp_suna; 16408 tce6.tcp6ConnEntryInfo.ce_rnxt = 16409 tcp->tcp_rnxt; 16410 tce6.tcp6ConnEntryInfo.ce_rack = 16411 tcp->tcp_rack; 16412 } else { 16413 /* 16414 * Netstat, unfortunately, uses this to 16415 * get send/receive queue sizes. How to fix? 16416 * Why not compute the difference only? 16417 */ 16418 tce6.tcp6ConnEntryInfo.ce_snxt = 16419 tcp->tcp_snxt - tcp->tcp_suna; 16420 tce6.tcp6ConnEntryInfo.ce_suna = 0; 16421 tce6.tcp6ConnEntryInfo.ce_rnxt = 16422 tcp->tcp_rnxt - tcp->tcp_rack; 16423 tce6.tcp6ConnEntryInfo.ce_rack = 0; 16424 } 16425 16426 tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd; 16427 tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 16428 tce6.tcp6ConnEntryInfo.ce_rto = tcp->tcp_rto; 16429 tce6.tcp6ConnEntryInfo.ce_mss = tcp->tcp_mss; 16430 tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state; 16431 16432 tce6.tcp6ConnCreationProcess = 16433 (tcp->tcp_cpid < 0) ? MIB2_UNKNOWN_PROCESS : 16434 tcp->tcp_cpid; 16435 tce6.tcp6ConnCreationTime = tcp->tcp_open_time; 16436 16437 (void) snmp_append_data2(mp6_conn_ctl->b_cont, 16438 &mp6_conn_tail, (char *)&tce6, sizeof (tce6)); 16439 16440 mlp.tme_connidx = v6_conn_idx++; 16441 if (needattr) 16442 (void) snmp_append_data2(mp6_attr_ctl->b_cont, 16443 &mp6_attr_tail, (char *)&mlp, sizeof (mlp)); 16444 } 16445 /* 16446 * Create an IPv4 table entry for IPv4 entries and also 16447 * for IPv6 entries which are bound to in6addr_any 16448 * but don't have IPV6_V6ONLY set. 16449 * (i.e. anything an IPv4 peer could connect to) 16450 */ 16451 if (tcp->tcp_ipversion == IPV4_VERSION || 16452 (tcp->tcp_state <= TCPS_LISTEN && 16453 !tcp->tcp_connp->conn_ipv6_v6only && 16454 IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip_src_v6))) { 16455 if (tcp->tcp_ipversion == IPV6_VERSION) { 16456 tce.tcpConnRemAddress = INADDR_ANY; 16457 tce.tcpConnLocalAddress = INADDR_ANY; 16458 } else { 16459 tce.tcpConnRemAddress = 16460 tcp->tcp_remote; 16461 tce.tcpConnLocalAddress = 16462 tcp->tcp_ip_src; 16463 } 16464 tce.tcpConnLocalPort = ntohs(tcp->tcp_lport); 16465 tce.tcpConnRemPort = ntohs(tcp->tcp_fport); 16466 /* Don't want just anybody seeing these... */ 16467 if (ispriv) { 16468 tce.tcpConnEntryInfo.ce_snxt = 16469 tcp->tcp_snxt; 16470 tce.tcpConnEntryInfo.ce_suna = 16471 tcp->tcp_suna; 16472 tce.tcpConnEntryInfo.ce_rnxt = 16473 tcp->tcp_rnxt; 16474 tce.tcpConnEntryInfo.ce_rack = 16475 tcp->tcp_rack; 16476 } else { 16477 /* 16478 * Netstat, unfortunately, uses this to 16479 * get send/receive queue sizes. How 16480 * to fix? 16481 * Why not compute the difference only? 16482 */ 16483 tce.tcpConnEntryInfo.ce_snxt = 16484 tcp->tcp_snxt - tcp->tcp_suna; 16485 tce.tcpConnEntryInfo.ce_suna = 0; 16486 tce.tcpConnEntryInfo.ce_rnxt = 16487 tcp->tcp_rnxt - tcp->tcp_rack; 16488 tce.tcpConnEntryInfo.ce_rack = 0; 16489 } 16490 16491 tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd; 16492 tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 16493 tce.tcpConnEntryInfo.ce_rto = tcp->tcp_rto; 16494 tce.tcpConnEntryInfo.ce_mss = tcp->tcp_mss; 16495 tce.tcpConnEntryInfo.ce_state = 16496 tcp->tcp_state; 16497 16498 tce.tcpConnCreationProcess = 16499 (tcp->tcp_cpid < 0) ? MIB2_UNKNOWN_PROCESS : 16500 tcp->tcp_cpid; 16501 tce.tcpConnCreationTime = tcp->tcp_open_time; 16502 16503 (void) snmp_append_data2(mp_conn_ctl->b_cont, 16504 &mp_conn_tail, (char *)&tce, sizeof (tce)); 16505 16506 mlp.tme_connidx = v4_conn_idx++; 16507 if (needattr) 16508 (void) snmp_append_data2( 16509 mp_attr_ctl->b_cont, 16510 &mp_attr_tail, (char *)&mlp, 16511 sizeof (mlp)); 16512 } 16513 } 16514 } 16515 16516 /* fixed length structure for IPv4 and IPv6 counters */ 16517 SET_MIB(tcps->tcps_mib.tcpConnTableSize, sizeof (mib2_tcpConnEntry_t)); 16518 SET_MIB(tcps->tcps_mib.tcp6ConnTableSize, 16519 sizeof (mib2_tcp6ConnEntry_t)); 16520 /* synchronize 32- and 64-bit counters */ 16521 SYNC32_MIB(&tcps->tcps_mib, tcpInSegs, tcpHCInSegs); 16522 SYNC32_MIB(&tcps->tcps_mib, tcpOutSegs, tcpHCOutSegs); 16523 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 16524 optp->level = MIB2_TCP; 16525 optp->name = 0; 16526 (void) snmp_append_data(mpdata, (char *)&tcps->tcps_mib, 16527 sizeof (tcps->tcps_mib)); 16528 optp->len = msgdsize(mpdata); 16529 qreply(q, mpctl); 16530 16531 /* table of connections... */ 16532 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[ 16533 sizeof (struct T_optmgmt_ack)]; 16534 optp->level = MIB2_TCP; 16535 optp->name = MIB2_TCP_CONN; 16536 optp->len = msgdsize(mp_conn_ctl->b_cont); 16537 qreply(q, mp_conn_ctl); 16538 16539 /* table of MLP attributes... */ 16540 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[ 16541 sizeof (struct T_optmgmt_ack)]; 16542 optp->level = MIB2_TCP; 16543 optp->name = EXPER_XPORT_MLP; 16544 optp->len = msgdsize(mp_attr_ctl->b_cont); 16545 if (optp->len == 0) 16546 freemsg(mp_attr_ctl); 16547 else 16548 qreply(q, mp_attr_ctl); 16549 16550 /* table of IPv6 connections... */ 16551 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[ 16552 sizeof (struct T_optmgmt_ack)]; 16553 optp->level = MIB2_TCP6; 16554 optp->name = MIB2_TCP6_CONN; 16555 optp->len = msgdsize(mp6_conn_ctl->b_cont); 16556 qreply(q, mp6_conn_ctl); 16557 16558 /* table of IPv6 MLP attributes... */ 16559 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[ 16560 sizeof (struct T_optmgmt_ack)]; 16561 optp->level = MIB2_TCP6; 16562 optp->name = EXPER_XPORT_MLP; 16563 optp->len = msgdsize(mp6_attr_ctl->b_cont); 16564 if (optp->len == 0) 16565 freemsg(mp6_attr_ctl); 16566 else 16567 qreply(q, mp6_attr_ctl); 16568 return (mp2ctl); 16569 } 16570 16571 /* Return 0 if invalid set request, 1 otherwise, including non-tcp requests */ 16572 /* ARGSUSED */ 16573 int 16574 tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len) 16575 { 16576 mib2_tcpConnEntry_t *tce = (mib2_tcpConnEntry_t *)ptr; 16577 16578 switch (level) { 16579 case MIB2_TCP: 16580 switch (name) { 16581 case 13: 16582 if (tce->tcpConnState != MIB2_TCP_deleteTCB) 16583 return (0); 16584 /* TODO: delete entry defined by tce */ 16585 return (1); 16586 default: 16587 return (0); 16588 } 16589 default: 16590 return (1); 16591 } 16592 } 16593 16594 /* Translate TCP state to MIB2 TCP state. */ 16595 static int 16596 tcp_snmp_state(tcp_t *tcp) 16597 { 16598 if (tcp == NULL) 16599 return (0); 16600 16601 switch (tcp->tcp_state) { 16602 case TCPS_CLOSED: 16603 case TCPS_IDLE: /* RFC1213 doesn't have analogue for IDLE & BOUND */ 16604 case TCPS_BOUND: 16605 return (MIB2_TCP_closed); 16606 case TCPS_LISTEN: 16607 return (MIB2_TCP_listen); 16608 case TCPS_SYN_SENT: 16609 return (MIB2_TCP_synSent); 16610 case TCPS_SYN_RCVD: 16611 return (MIB2_TCP_synReceived); 16612 case TCPS_ESTABLISHED: 16613 return (MIB2_TCP_established); 16614 case TCPS_CLOSE_WAIT: 16615 return (MIB2_TCP_closeWait); 16616 case TCPS_FIN_WAIT_1: 16617 return (MIB2_TCP_finWait1); 16618 case TCPS_CLOSING: 16619 return (MIB2_TCP_closing); 16620 case TCPS_LAST_ACK: 16621 return (MIB2_TCP_lastAck); 16622 case TCPS_FIN_WAIT_2: 16623 return (MIB2_TCP_finWait2); 16624 case TCPS_TIME_WAIT: 16625 return (MIB2_TCP_timeWait); 16626 default: 16627 return (0); 16628 } 16629 } 16630 16631 static char tcp_report_header[] = 16632 "TCP " MI_COL_HDRPAD_STR 16633 "zone dest snxt suna " 16634 "swnd rnxt rack rwnd rto mss w sw rw t " 16635 "recent [lport,fport] state"; 16636 16637 /* 16638 * TCP status report triggered via the Named Dispatch mechanism. 16639 */ 16640 /* ARGSUSED */ 16641 static void 16642 tcp_report_item(mblk_t *mp, tcp_t *tcp, int hashval, tcp_t *thisstream, 16643 cred_t *cr) 16644 { 16645 char hash[10], addrbuf[INET6_ADDRSTRLEN]; 16646 boolean_t ispriv = secpolicy_ip_config(cr, B_TRUE) == 0; 16647 char cflag; 16648 in6_addr_t v6dst; 16649 char buf[80]; 16650 uint_t print_len, buf_len; 16651 16652 buf_len = mp->b_datap->db_lim - mp->b_wptr; 16653 if (buf_len <= 0) 16654 return; 16655 16656 if (hashval >= 0) 16657 (void) sprintf(hash, "%03d ", hashval); 16658 else 16659 hash[0] = '\0'; 16660 16661 /* 16662 * Note that we use the remote address in the tcp_b structure. 16663 * This means that it will print out the real destination address, 16664 * not the next hop's address if source routing is used. This 16665 * avoid the confusion on the output because user may not 16666 * know that source routing is used for a connection. 16667 */ 16668 if (tcp->tcp_ipversion == IPV4_VERSION) { 16669 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_remote, &v6dst); 16670 } else { 16671 v6dst = tcp->tcp_remote_v6; 16672 } 16673 (void) inet_ntop(AF_INET6, &v6dst, addrbuf, sizeof (addrbuf)); 16674 /* 16675 * the ispriv checks are so that normal users cannot determine 16676 * sequence number information using NDD. 16677 */ 16678 16679 if (TCP_IS_DETACHED(tcp)) 16680 cflag = '*'; 16681 else 16682 cflag = ' '; 16683 print_len = snprintf((char *)mp->b_wptr, buf_len, 16684 "%s " MI_COL_PTRFMT_STR "%d %s %08x %08x %010d %08x %08x " 16685 "%010d %05ld %05d %1d %02d %02d %1d %08x %s%c\n", 16686 hash, 16687 (void *)tcp, 16688 tcp->tcp_connp->conn_zoneid, 16689 addrbuf, 16690 (ispriv) ? tcp->tcp_snxt : 0, 16691 (ispriv) ? tcp->tcp_suna : 0, 16692 tcp->tcp_swnd, 16693 (ispriv) ? tcp->tcp_rnxt : 0, 16694 (ispriv) ? tcp->tcp_rack : 0, 16695 tcp->tcp_rwnd, 16696 tcp->tcp_rto, 16697 tcp->tcp_mss, 16698 tcp->tcp_snd_ws_ok, 16699 tcp->tcp_snd_ws, 16700 tcp->tcp_rcv_ws, 16701 tcp->tcp_snd_ts_ok, 16702 tcp->tcp_ts_recent, 16703 tcp_display(tcp, buf, DISP_PORT_ONLY), cflag); 16704 if (print_len < buf_len) { 16705 ((mblk_t *)mp)->b_wptr += print_len; 16706 } else { 16707 ((mblk_t *)mp)->b_wptr += buf_len; 16708 } 16709 } 16710 16711 /* 16712 * TCP status report (for listeners only) triggered via the Named Dispatch 16713 * mechanism. 16714 */ 16715 /* ARGSUSED */ 16716 static void 16717 tcp_report_listener(mblk_t *mp, tcp_t *tcp, int hashval) 16718 { 16719 char addrbuf[INET6_ADDRSTRLEN]; 16720 in6_addr_t v6dst; 16721 uint_t print_len, buf_len; 16722 16723 buf_len = mp->b_datap->db_lim - mp->b_wptr; 16724 if (buf_len <= 0) 16725 return; 16726 16727 if (tcp->tcp_ipversion == IPV4_VERSION) { 16728 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, &v6dst); 16729 (void) inet_ntop(AF_INET6, &v6dst, addrbuf, sizeof (addrbuf)); 16730 } else { 16731 (void) inet_ntop(AF_INET6, &tcp->tcp_ip6h->ip6_src, 16732 addrbuf, sizeof (addrbuf)); 16733 } 16734 print_len = snprintf((char *)mp->b_wptr, buf_len, 16735 "%03d " 16736 MI_COL_PTRFMT_STR 16737 "%d %s %05u %08u %d/%d/%d%c\n", 16738 hashval, (void *)tcp, 16739 tcp->tcp_connp->conn_zoneid, 16740 addrbuf, 16741 (uint_t)BE16_TO_U16(tcp->tcp_tcph->th_lport), 16742 tcp->tcp_conn_req_seqnum, 16743 tcp->tcp_conn_req_cnt_q0, tcp->tcp_conn_req_cnt_q, 16744 tcp->tcp_conn_req_max, 16745 tcp->tcp_syn_defense ? '*' : ' '); 16746 if (print_len < buf_len) { 16747 ((mblk_t *)mp)->b_wptr += print_len; 16748 } else { 16749 ((mblk_t *)mp)->b_wptr += buf_len; 16750 } 16751 } 16752 16753 /* TCP status report triggered via the Named Dispatch mechanism. */ 16754 /* ARGSUSED */ 16755 static int 16756 tcp_status_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16757 { 16758 tcp_t *tcp; 16759 int i; 16760 conn_t *connp; 16761 connf_t *connfp; 16762 zoneid_t zoneid; 16763 tcp_stack_t *tcps; 16764 ip_stack_t *ipst; 16765 16766 zoneid = Q_TO_CONN(q)->conn_zoneid; 16767 tcps = Q_TO_TCP(q)->tcp_tcps; 16768 16769 /* 16770 * Because of the ndd constraint, at most we can have 64K buffer 16771 * to put in all TCP info. So to be more efficient, just 16772 * allocate a 64K buffer here, assuming we need that large buffer. 16773 * This may be a problem as any user can read tcp_status. Therefore 16774 * we limit the rate of doing this using tcp_ndd_get_info_interval. 16775 * This should be OK as normal users should not do this too often. 16776 */ 16777 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16778 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16779 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16780 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16781 return (0); 16782 } 16783 } 16784 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16785 /* The following may work even if we cannot get a large buf. */ 16786 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16787 return (0); 16788 } 16789 16790 (void) mi_mpprintf(mp, "%s", tcp_report_header); 16791 16792 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 16793 16794 ipst = tcps->tcps_netstack->netstack_ip; 16795 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 16796 16797 connp = NULL; 16798 16799 while ((connp = 16800 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16801 tcp = connp->conn_tcp; 16802 if (zoneid != GLOBAL_ZONEID && 16803 zoneid != connp->conn_zoneid) 16804 continue; 16805 tcp_report_item(mp->b_cont, tcp, -1, tcp, 16806 cr); 16807 } 16808 16809 } 16810 16811 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16812 return (0); 16813 } 16814 16815 /* TCP status report triggered via the Named Dispatch mechanism. */ 16816 /* ARGSUSED */ 16817 static int 16818 tcp_bind_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16819 { 16820 tf_t *tbf; 16821 tcp_t *tcp; 16822 int i; 16823 zoneid_t zoneid; 16824 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 16825 16826 zoneid = Q_TO_CONN(q)->conn_zoneid; 16827 16828 /* Refer to comments in tcp_status_report(). */ 16829 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16830 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16831 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16832 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16833 return (0); 16834 } 16835 } 16836 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16837 /* The following may work even if we cannot get a large buf. */ 16838 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16839 return (0); 16840 } 16841 16842 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16843 16844 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) { 16845 tbf = &tcps->tcps_bind_fanout[i]; 16846 mutex_enter(&tbf->tf_lock); 16847 for (tcp = tbf->tf_tcp; tcp != NULL; 16848 tcp = tcp->tcp_bind_hash) { 16849 if (zoneid != GLOBAL_ZONEID && 16850 zoneid != tcp->tcp_connp->conn_zoneid) 16851 continue; 16852 CONN_INC_REF(tcp->tcp_connp); 16853 tcp_report_item(mp->b_cont, tcp, i, 16854 Q_TO_TCP(q), cr); 16855 CONN_DEC_REF(tcp->tcp_connp); 16856 } 16857 mutex_exit(&tbf->tf_lock); 16858 } 16859 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16860 return (0); 16861 } 16862 16863 /* TCP status report triggered via the Named Dispatch mechanism. */ 16864 /* ARGSUSED */ 16865 static int 16866 tcp_listen_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16867 { 16868 connf_t *connfp; 16869 conn_t *connp; 16870 tcp_t *tcp; 16871 int i; 16872 zoneid_t zoneid; 16873 tcp_stack_t *tcps; 16874 ip_stack_t *ipst; 16875 16876 zoneid = Q_TO_CONN(q)->conn_zoneid; 16877 tcps = Q_TO_TCP(q)->tcp_tcps; 16878 16879 /* Refer to comments in tcp_status_report(). */ 16880 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16881 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16882 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16883 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16884 return (0); 16885 } 16886 } 16887 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16888 /* The following may work even if we cannot get a large buf. */ 16889 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16890 return (0); 16891 } 16892 16893 (void) mi_mpprintf(mp, 16894 " TCP " MI_COL_HDRPAD_STR 16895 "zone IP addr port seqnum backlog (q0/q/max)"); 16896 16897 ipst = tcps->tcps_netstack->netstack_ip; 16898 16899 for (i = 0; i < ipst->ips_ipcl_bind_fanout_size; i++) { 16900 connfp = &ipst->ips_ipcl_bind_fanout[i]; 16901 connp = NULL; 16902 while ((connp = 16903 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16904 tcp = connp->conn_tcp; 16905 if (zoneid != GLOBAL_ZONEID && 16906 zoneid != connp->conn_zoneid) 16907 continue; 16908 tcp_report_listener(mp->b_cont, tcp, i); 16909 } 16910 } 16911 16912 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16913 return (0); 16914 } 16915 16916 /* TCP status report triggered via the Named Dispatch mechanism. */ 16917 /* ARGSUSED */ 16918 static int 16919 tcp_conn_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16920 { 16921 connf_t *connfp; 16922 conn_t *connp; 16923 tcp_t *tcp; 16924 int i; 16925 zoneid_t zoneid; 16926 tcp_stack_t *tcps; 16927 ip_stack_t *ipst; 16928 16929 zoneid = Q_TO_CONN(q)->conn_zoneid; 16930 tcps = Q_TO_TCP(q)->tcp_tcps; 16931 ipst = tcps->tcps_netstack->netstack_ip; 16932 16933 /* Refer to comments in tcp_status_report(). */ 16934 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16935 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16936 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16937 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16938 return (0); 16939 } 16940 } 16941 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16942 /* The following may work even if we cannot get a large buf. */ 16943 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16944 return (0); 16945 } 16946 16947 (void) mi_mpprintf(mp, "tcp_conn_hash_size = %d", 16948 ipst->ips_ipcl_conn_fanout_size); 16949 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16950 16951 for (i = 0; i < ipst->ips_ipcl_conn_fanout_size; i++) { 16952 connfp = &ipst->ips_ipcl_conn_fanout[i]; 16953 connp = NULL; 16954 while ((connp = 16955 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16956 tcp = connp->conn_tcp; 16957 if (zoneid != GLOBAL_ZONEID && 16958 zoneid != connp->conn_zoneid) 16959 continue; 16960 tcp_report_item(mp->b_cont, tcp, i, 16961 Q_TO_TCP(q), cr); 16962 } 16963 } 16964 16965 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16966 return (0); 16967 } 16968 16969 /* TCP status report triggered via the Named Dispatch mechanism. */ 16970 /* ARGSUSED */ 16971 static int 16972 tcp_acceptor_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16973 { 16974 tf_t *tf; 16975 tcp_t *tcp; 16976 int i; 16977 zoneid_t zoneid; 16978 tcp_stack_t *tcps; 16979 16980 zoneid = Q_TO_CONN(q)->conn_zoneid; 16981 tcps = Q_TO_TCP(q)->tcp_tcps; 16982 16983 /* Refer to comments in tcp_status_report(). */ 16984 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16985 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16986 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16987 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16988 return (0); 16989 } 16990 } 16991 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16992 /* The following may work even if we cannot get a large buf. */ 16993 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16994 return (0); 16995 } 16996 16997 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16998 16999 for (i = 0; i < TCP_FANOUT_SIZE; i++) { 17000 tf = &tcps->tcps_acceptor_fanout[i]; 17001 mutex_enter(&tf->tf_lock); 17002 for (tcp = tf->tf_tcp; tcp != NULL; 17003 tcp = tcp->tcp_acceptor_hash) { 17004 if (zoneid != GLOBAL_ZONEID && 17005 zoneid != tcp->tcp_connp->conn_zoneid) 17006 continue; 17007 tcp_report_item(mp->b_cont, tcp, i, 17008 Q_TO_TCP(q), cr); 17009 } 17010 mutex_exit(&tf->tf_lock); 17011 } 17012 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 17013 return (0); 17014 } 17015 17016 /* 17017 * tcp_timer is the timer service routine. It handles the retransmission, 17018 * FIN_WAIT_2 flush, and zero window probe timeout events. It figures out 17019 * from the state of the tcp instance what kind of action needs to be done 17020 * at the time it is called. 17021 */ 17022 static void 17023 tcp_timer(void *arg) 17024 { 17025 mblk_t *mp; 17026 clock_t first_threshold; 17027 clock_t second_threshold; 17028 clock_t ms; 17029 uint32_t mss; 17030 conn_t *connp = (conn_t *)arg; 17031 tcp_t *tcp = connp->conn_tcp; 17032 tcp_stack_t *tcps = tcp->tcp_tcps; 17033 17034 tcp->tcp_timer_tid = 0; 17035 17036 if (tcp->tcp_fused) 17037 return; 17038 17039 first_threshold = tcp->tcp_first_timer_threshold; 17040 second_threshold = tcp->tcp_second_timer_threshold; 17041 switch (tcp->tcp_state) { 17042 case TCPS_IDLE: 17043 case TCPS_BOUND: 17044 case TCPS_LISTEN: 17045 return; 17046 case TCPS_SYN_RCVD: { 17047 tcp_t *listener = tcp->tcp_listener; 17048 17049 if (tcp->tcp_syn_rcvd_timeout == 0 && (listener != NULL)) { 17050 ASSERT(tcp->tcp_rq == listener->tcp_rq); 17051 /* it's our first timeout */ 17052 tcp->tcp_syn_rcvd_timeout = 1; 17053 mutex_enter(&listener->tcp_eager_lock); 17054 listener->tcp_syn_rcvd_timeout++; 17055 if (!tcp->tcp_dontdrop && !tcp->tcp_closemp_used) { 17056 /* 17057 * Make this eager available for drop if we 17058 * need to drop one to accomodate a new 17059 * incoming SYN request. 17060 */ 17061 MAKE_DROPPABLE(listener, tcp); 17062 } 17063 if (!listener->tcp_syn_defense && 17064 (listener->tcp_syn_rcvd_timeout > 17065 (tcps->tcps_conn_req_max_q0 >> 2)) && 17066 (tcps->tcps_conn_req_max_q0 > 200)) { 17067 /* We may be under attack. Put on a defense. */ 17068 listener->tcp_syn_defense = B_TRUE; 17069 cmn_err(CE_WARN, "High TCP connect timeout " 17070 "rate! System (port %d) may be under a " 17071 "SYN flood attack!", 17072 BE16_TO_U16(listener->tcp_tcph->th_lport)); 17073 17074 listener->tcp_ip_addr_cache = kmem_zalloc( 17075 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t), 17076 KM_NOSLEEP); 17077 } 17078 mutex_exit(&listener->tcp_eager_lock); 17079 } else if (listener != NULL) { 17080 mutex_enter(&listener->tcp_eager_lock); 17081 tcp->tcp_syn_rcvd_timeout++; 17082 if (tcp->tcp_syn_rcvd_timeout > 1 && 17083 !tcp->tcp_closemp_used) { 17084 /* 17085 * This is our second timeout. Put the tcp in 17086 * the list of droppable eagers to allow it to 17087 * be dropped, if needed. We don't check 17088 * whether tcp_dontdrop is set or not to 17089 * protect ourselve from a SYN attack where a 17090 * remote host can spoof itself as one of the 17091 * good IP source and continue to hold 17092 * resources too long. 17093 */ 17094 MAKE_DROPPABLE(listener, tcp); 17095 } 17096 mutex_exit(&listener->tcp_eager_lock); 17097 } 17098 } 17099 /* FALLTHRU */ 17100 case TCPS_SYN_SENT: 17101 first_threshold = tcp->tcp_first_ctimer_threshold; 17102 second_threshold = tcp->tcp_second_ctimer_threshold; 17103 break; 17104 case TCPS_ESTABLISHED: 17105 case TCPS_FIN_WAIT_1: 17106 case TCPS_CLOSING: 17107 case TCPS_CLOSE_WAIT: 17108 case TCPS_LAST_ACK: 17109 /* If we have data to rexmit */ 17110 if (tcp->tcp_suna != tcp->tcp_snxt) { 17111 clock_t time_to_wait; 17112 17113 BUMP_MIB(&tcps->tcps_mib, tcpTimRetrans); 17114 if (!tcp->tcp_xmit_head) 17115 break; 17116 time_to_wait = lbolt - 17117 (clock_t)tcp->tcp_xmit_head->b_prev; 17118 time_to_wait = tcp->tcp_rto - 17119 TICK_TO_MSEC(time_to_wait); 17120 /* 17121 * If the timer fires too early, 1 clock tick earlier, 17122 * restart the timer. 17123 */ 17124 if (time_to_wait > msec_per_tick) { 17125 TCP_STAT(tcps, tcp_timer_fire_early); 17126 TCP_TIMER_RESTART(tcp, time_to_wait); 17127 return; 17128 } 17129 /* 17130 * When we probe zero windows, we force the swnd open. 17131 * If our peer acks with a closed window swnd will be 17132 * set to zero by tcp_rput(). As long as we are 17133 * receiving acks tcp_rput will 17134 * reset 'tcp_ms_we_have_waited' so as not to trip the 17135 * first and second interval actions. NOTE: the timer 17136 * interval is allowed to continue its exponential 17137 * backoff. 17138 */ 17139 if (tcp->tcp_swnd == 0 || tcp->tcp_zero_win_probe) { 17140 if (tcp->tcp_debug) { 17141 (void) strlog(TCP_MOD_ID, 0, 1, 17142 SL_TRACE, "tcp_timer: zero win"); 17143 } 17144 } else { 17145 /* 17146 * After retransmission, we need to do 17147 * slow start. Set the ssthresh to one 17148 * half of current effective window and 17149 * cwnd to one MSS. Also reset 17150 * tcp_cwnd_cnt. 17151 * 17152 * Note that if tcp_ssthresh is reduced because 17153 * of ECN, do not reduce it again unless it is 17154 * already one window of data away (tcp_cwr 17155 * should then be cleared) or this is a 17156 * timeout for a retransmitted segment. 17157 */ 17158 uint32_t npkt; 17159 17160 if (!tcp->tcp_cwr || tcp->tcp_rexmit) { 17161 npkt = ((tcp->tcp_timer_backoff ? 17162 tcp->tcp_cwnd_ssthresh : 17163 tcp->tcp_snxt - 17164 tcp->tcp_suna) >> 1) / tcp->tcp_mss; 17165 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 17166 tcp->tcp_mss; 17167 } 17168 tcp->tcp_cwnd = tcp->tcp_mss; 17169 tcp->tcp_cwnd_cnt = 0; 17170 if (tcp->tcp_ecn_ok) { 17171 tcp->tcp_cwr = B_TRUE; 17172 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 17173 tcp->tcp_ecn_cwr_sent = B_FALSE; 17174 } 17175 } 17176 break; 17177 } 17178 /* 17179 * We have something to send yet we cannot send. The 17180 * reason can be: 17181 * 17182 * 1. Zero send window: we need to do zero window probe. 17183 * 2. Zero cwnd: because of ECN, we need to "clock out 17184 * segments. 17185 * 3. SWS avoidance: receiver may have shrunk window, 17186 * reset our knowledge. 17187 * 17188 * Note that condition 2 can happen with either 1 or 17189 * 3. But 1 and 3 are exclusive. 17190 */ 17191 if (tcp->tcp_unsent != 0) { 17192 if (tcp->tcp_cwnd == 0) { 17193 /* 17194 * Set tcp_cwnd to 1 MSS so that a 17195 * new segment can be sent out. We 17196 * are "clocking out" new data when 17197 * the network is really congested. 17198 */ 17199 ASSERT(tcp->tcp_ecn_ok); 17200 tcp->tcp_cwnd = tcp->tcp_mss; 17201 } 17202 if (tcp->tcp_swnd == 0) { 17203 /* Extend window for zero window probe */ 17204 tcp->tcp_swnd++; 17205 tcp->tcp_zero_win_probe = B_TRUE; 17206 BUMP_MIB(&tcps->tcps_mib, tcpOutWinProbe); 17207 } else { 17208 /* 17209 * Handle timeout from sender SWS avoidance. 17210 * Reset our knowledge of the max send window 17211 * since the receiver might have reduced its 17212 * receive buffer. Avoid setting tcp_max_swnd 17213 * to one since that will essentially disable 17214 * the SWS checks. 17215 * 17216 * Note that since we don't have a SWS 17217 * state variable, if the timeout is set 17218 * for ECN but not for SWS, this 17219 * code will also be executed. This is 17220 * fine as tcp_max_swnd is updated 17221 * constantly and it will not affect 17222 * anything. 17223 */ 17224 tcp->tcp_max_swnd = MAX(tcp->tcp_swnd, 2); 17225 } 17226 tcp_wput_data(tcp, NULL, B_FALSE); 17227 return; 17228 } 17229 /* Is there a FIN that needs to be to re retransmitted? */ 17230 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 17231 !tcp->tcp_fin_acked) 17232 break; 17233 /* Nothing to do, return without restarting timer. */ 17234 TCP_STAT(tcps, tcp_timer_fire_miss); 17235 return; 17236 case TCPS_FIN_WAIT_2: 17237 /* 17238 * User closed the TCP endpoint and peer ACK'ed our FIN. 17239 * We waited some time for for peer's FIN, but it hasn't 17240 * arrived. We flush the connection now to avoid 17241 * case where the peer has rebooted. 17242 */ 17243 if (TCP_IS_DETACHED(tcp)) { 17244 (void) tcp_clean_death(tcp, 0, 23); 17245 } else { 17246 TCP_TIMER_RESTART(tcp, 17247 tcps->tcps_fin_wait_2_flush_interval); 17248 } 17249 return; 17250 case TCPS_TIME_WAIT: 17251 (void) tcp_clean_death(tcp, 0, 24); 17252 return; 17253 default: 17254 if (tcp->tcp_debug) { 17255 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 17256 "tcp_timer: strange state (%d) %s", 17257 tcp->tcp_state, tcp_display(tcp, NULL, 17258 DISP_PORT_ONLY)); 17259 } 17260 return; 17261 } 17262 if ((ms = tcp->tcp_ms_we_have_waited) > second_threshold) { 17263 /* 17264 * For zero window probe, we need to send indefinitely, 17265 * unless we have not heard from the other side for some 17266 * time... 17267 */ 17268 if ((tcp->tcp_zero_win_probe == 0) || 17269 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) > 17270 second_threshold)) { 17271 BUMP_MIB(&tcps->tcps_mib, tcpTimRetransDrop); 17272 /* 17273 * If TCP is in SYN_RCVD state, send back a 17274 * RST|ACK as BSD does. Note that tcp_zero_win_probe 17275 * should be zero in TCPS_SYN_RCVD state. 17276 */ 17277 if (tcp->tcp_state == TCPS_SYN_RCVD) { 17278 tcp_xmit_ctl("tcp_timer: RST sent on timeout " 17279 "in SYN_RCVD", 17280 tcp, tcp->tcp_snxt, 17281 tcp->tcp_rnxt, TH_RST | TH_ACK); 17282 } 17283 (void) tcp_clean_death(tcp, 17284 tcp->tcp_client_errno ? 17285 tcp->tcp_client_errno : ETIMEDOUT, 25); 17286 return; 17287 } else { 17288 /* 17289 * Set tcp_ms_we_have_waited to second_threshold 17290 * so that in next timeout, we will do the above 17291 * check (lbolt - tcp_last_recv_time). This is 17292 * also to avoid overflow. 17293 * 17294 * We don't need to decrement tcp_timer_backoff 17295 * to avoid overflow because it will be decremented 17296 * later if new timeout value is greater than 17297 * tcp_rexmit_interval_max. In the case when 17298 * tcp_rexmit_interval_max is greater than 17299 * second_threshold, it means that we will wait 17300 * longer than second_threshold to send the next 17301 * window probe. 17302 */ 17303 tcp->tcp_ms_we_have_waited = second_threshold; 17304 } 17305 } else if (ms > first_threshold) { 17306 if (tcp->tcp_snd_zcopy_aware && (!tcp->tcp_xmit_zc_clean) && 17307 tcp->tcp_xmit_head != NULL) { 17308 tcp->tcp_xmit_head = 17309 tcp_zcopy_backoff(tcp, tcp->tcp_xmit_head, 1); 17310 } 17311 /* 17312 * We have been retransmitting for too long... The RTT 17313 * we calculated is probably incorrect. Reinitialize it. 17314 * Need to compensate for 0 tcp_rtt_sa. Reset 17315 * tcp_rtt_update so that we won't accidentally cache a 17316 * bad value. But only do this if this is not a zero 17317 * window probe. 17318 */ 17319 if (tcp->tcp_rtt_sa != 0 && tcp->tcp_zero_win_probe == 0) { 17320 tcp->tcp_rtt_sd += (tcp->tcp_rtt_sa >> 3) + 17321 (tcp->tcp_rtt_sa >> 5); 17322 tcp->tcp_rtt_sa = 0; 17323 tcp_ip_notify(tcp); 17324 tcp->tcp_rtt_update = 0; 17325 } 17326 } 17327 tcp->tcp_timer_backoff++; 17328 if ((ms = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 17329 tcps->tcps_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5)) < 17330 tcps->tcps_rexmit_interval_min) { 17331 /* 17332 * This means the original RTO is tcp_rexmit_interval_min. 17333 * So we will use tcp_rexmit_interval_min as the RTO value 17334 * and do the backoff. 17335 */ 17336 ms = tcps->tcps_rexmit_interval_min << tcp->tcp_timer_backoff; 17337 } else { 17338 ms <<= tcp->tcp_timer_backoff; 17339 } 17340 if (ms > tcps->tcps_rexmit_interval_max) { 17341 ms = tcps->tcps_rexmit_interval_max; 17342 /* 17343 * ms is at max, decrement tcp_timer_backoff to avoid 17344 * overflow. 17345 */ 17346 tcp->tcp_timer_backoff--; 17347 } 17348 tcp->tcp_ms_we_have_waited += ms; 17349 if (tcp->tcp_zero_win_probe == 0) { 17350 tcp->tcp_rto = ms; 17351 } 17352 TCP_TIMER_RESTART(tcp, ms); 17353 /* 17354 * This is after a timeout and tcp_rto is backed off. Set 17355 * tcp_set_timer to 1 so that next time RTO is updated, we will 17356 * restart the timer with a correct value. 17357 */ 17358 tcp->tcp_set_timer = 1; 17359 mss = tcp->tcp_snxt - tcp->tcp_suna; 17360 if (mss > tcp->tcp_mss) 17361 mss = tcp->tcp_mss; 17362 if (mss > tcp->tcp_swnd && tcp->tcp_swnd != 0) 17363 mss = tcp->tcp_swnd; 17364 17365 if ((mp = tcp->tcp_xmit_head) != NULL) 17366 mp->b_prev = (mblk_t *)lbolt; 17367 mp = tcp_xmit_mp(tcp, mp, mss, NULL, NULL, tcp->tcp_suna, B_TRUE, &mss, 17368 B_TRUE); 17369 17370 /* 17371 * When slow start after retransmission begins, start with 17372 * this seq no. tcp_rexmit_max marks the end of special slow 17373 * start phase. tcp_snd_burst controls how many segments 17374 * can be sent because of an ack. 17375 */ 17376 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 17377 tcp->tcp_snd_burst = TCP_CWND_SS; 17378 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 17379 (tcp->tcp_unsent == 0)) { 17380 tcp->tcp_rexmit_max = tcp->tcp_fss; 17381 } else { 17382 tcp->tcp_rexmit_max = tcp->tcp_snxt; 17383 } 17384 tcp->tcp_rexmit = B_TRUE; 17385 tcp->tcp_dupack_cnt = 0; 17386 17387 /* 17388 * Remove all rexmit SACK blk to start from fresh. 17389 */ 17390 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 17391 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 17392 tcp->tcp_num_notsack_blk = 0; 17393 tcp->tcp_cnt_notsack_list = 0; 17394 } 17395 if (mp == NULL) { 17396 return; 17397 } 17398 /* Attach credentials to retransmitted initial SYNs. */ 17399 if (tcp->tcp_state == TCPS_SYN_SENT) { 17400 mblk_setcred(mp, tcp->tcp_cred); 17401 DB_CPID(mp) = tcp->tcp_cpid; 17402 } 17403 17404 tcp->tcp_csuna = tcp->tcp_snxt; 17405 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 17406 UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, mss); 17407 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 17408 tcp_send_data(tcp, tcp->tcp_wq, mp); 17409 17410 } 17411 17412 /* tcp_unbind is called by tcp_wput_proto to handle T_UNBIND_REQ messages. */ 17413 static void 17414 tcp_unbind(tcp_t *tcp, mblk_t *mp) 17415 { 17416 conn_t *connp; 17417 17418 switch (tcp->tcp_state) { 17419 case TCPS_BOUND: 17420 case TCPS_LISTEN: 17421 break; 17422 default: 17423 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 17424 return; 17425 } 17426 17427 /* 17428 * Need to clean up all the eagers since after the unbind, segments 17429 * will no longer be delivered to this listener stream. 17430 */ 17431 mutex_enter(&tcp->tcp_eager_lock); 17432 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) { 17433 tcp_eager_cleanup(tcp, 0); 17434 } 17435 mutex_exit(&tcp->tcp_eager_lock); 17436 17437 if (tcp->tcp_ipversion == IPV4_VERSION) { 17438 tcp->tcp_ipha->ipha_src = 0; 17439 } else { 17440 V6_SET_ZERO(tcp->tcp_ip6h->ip6_src); 17441 } 17442 V6_SET_ZERO(tcp->tcp_ip_src_v6); 17443 bzero(tcp->tcp_tcph->th_lport, sizeof (tcp->tcp_tcph->th_lport)); 17444 tcp_bind_hash_remove(tcp); 17445 tcp->tcp_state = TCPS_IDLE; 17446 tcp->tcp_mdt = B_FALSE; 17447 /* Send M_FLUSH according to TPI */ 17448 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 17449 connp = tcp->tcp_connp; 17450 connp->conn_mdt_ok = B_FALSE; 17451 ipcl_hash_remove(connp); 17452 bzero(&connp->conn_ports, sizeof (connp->conn_ports)); 17453 mp = mi_tpi_ok_ack_alloc(mp); 17454 putnext(tcp->tcp_rq, mp); 17455 } 17456 17457 /* 17458 * Don't let port fall into the privileged range. 17459 * Since the extra privileged ports can be arbitrary we also 17460 * ensure that we exclude those from consideration. 17461 * tcp_g_epriv_ports is not sorted thus we loop over it until 17462 * there are no changes. 17463 * 17464 * Note: No locks are held when inspecting tcp_g_*epriv_ports 17465 * but instead the code relies on: 17466 * - the fact that the address of the array and its size never changes 17467 * - the atomic assignment of the elements of the array 17468 * 17469 * Returns 0 if there are no more ports available. 17470 * 17471 * TS note: skip multilevel ports. 17472 */ 17473 static in_port_t 17474 tcp_update_next_port(in_port_t port, const tcp_t *tcp, boolean_t random) 17475 { 17476 int i; 17477 boolean_t restart = B_FALSE; 17478 tcp_stack_t *tcps = tcp->tcp_tcps; 17479 17480 if (random && tcp_random_anon_port != 0) { 17481 (void) random_get_pseudo_bytes((uint8_t *)&port, 17482 sizeof (in_port_t)); 17483 /* 17484 * Unless changed by a sys admin, the smallest anon port 17485 * is 32768 and the largest anon port is 65535. It is 17486 * very likely (50%) for the random port to be smaller 17487 * than the smallest anon port. When that happens, 17488 * add port % (anon port range) to the smallest anon 17489 * port to get the random port. It should fall into the 17490 * valid anon port range. 17491 */ 17492 if (port < tcps->tcps_smallest_anon_port) { 17493 port = tcps->tcps_smallest_anon_port + 17494 port % (tcps->tcps_largest_anon_port - 17495 tcps->tcps_smallest_anon_port); 17496 } 17497 } 17498 17499 retry: 17500 if (port < tcps->tcps_smallest_anon_port) 17501 port = (in_port_t)tcps->tcps_smallest_anon_port; 17502 17503 if (port > tcps->tcps_largest_anon_port) { 17504 if (restart) 17505 return (0); 17506 restart = B_TRUE; 17507 port = (in_port_t)tcps->tcps_smallest_anon_port; 17508 } 17509 17510 if (port < tcps->tcps_smallest_nonpriv_port) 17511 port = (in_port_t)tcps->tcps_smallest_nonpriv_port; 17512 17513 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 17514 if (port == tcps->tcps_g_epriv_ports[i]) { 17515 port++; 17516 /* 17517 * Make sure whether the port is in the 17518 * valid range. 17519 */ 17520 goto retry; 17521 } 17522 } 17523 if (is_system_labeled() && 17524 (i = tsol_next_port(crgetzone(tcp->tcp_cred), port, 17525 IPPROTO_TCP, B_TRUE)) != 0) { 17526 port = i; 17527 goto retry; 17528 } 17529 return (port); 17530 } 17531 17532 /* 17533 * Return the next anonymous port in the privileged port range for 17534 * bind checking. It starts at IPPORT_RESERVED - 1 and goes 17535 * downwards. This is the same behavior as documented in the userland 17536 * library call rresvport(3N). 17537 * 17538 * TS note: skip multilevel ports. 17539 */ 17540 static in_port_t 17541 tcp_get_next_priv_port(const tcp_t *tcp) 17542 { 17543 static in_port_t next_priv_port = IPPORT_RESERVED - 1; 17544 in_port_t nextport; 17545 boolean_t restart = B_FALSE; 17546 tcp_stack_t *tcps = tcp->tcp_tcps; 17547 retry: 17548 if (next_priv_port < tcps->tcps_min_anonpriv_port || 17549 next_priv_port >= IPPORT_RESERVED) { 17550 next_priv_port = IPPORT_RESERVED - 1; 17551 if (restart) 17552 return (0); 17553 restart = B_TRUE; 17554 } 17555 if (is_system_labeled() && 17556 (nextport = tsol_next_port(crgetzone(tcp->tcp_cred), 17557 next_priv_port, IPPROTO_TCP, B_FALSE)) != 0) { 17558 next_priv_port = nextport; 17559 goto retry; 17560 } 17561 return (next_priv_port--); 17562 } 17563 17564 /* The write side r/w procedure. */ 17565 17566 #if CCS_STATS 17567 struct { 17568 struct { 17569 int64_t count, bytes; 17570 } tot, hit; 17571 } wrw_stats; 17572 #endif 17573 17574 /* 17575 * Call by tcp_wput() to handle all non data, except M_PROTO and M_PCPROTO, 17576 * messages. 17577 */ 17578 /* ARGSUSED */ 17579 static void 17580 tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2) 17581 { 17582 conn_t *connp = (conn_t *)arg; 17583 tcp_t *tcp = connp->conn_tcp; 17584 queue_t *q = tcp->tcp_wq; 17585 17586 ASSERT(DB_TYPE(mp) != M_IOCTL); 17587 /* 17588 * TCP is D_MP and qprocsoff() is done towards the end of the tcp_close. 17589 * Once the close starts, streamhead and sockfs will not let any data 17590 * packets come down (close ensures that there are no threads using the 17591 * queue and no new threads will come down) but since qprocsoff() 17592 * hasn't happened yet, a M_FLUSH or some non data message might 17593 * get reflected back (in response to our own FLUSHRW) and get 17594 * processed after tcp_close() is done. The conn would still be valid 17595 * because a ref would have added but we need to check the state 17596 * before actually processing the packet. 17597 */ 17598 if (TCP_IS_DETACHED(tcp) || (tcp->tcp_state == TCPS_CLOSED)) { 17599 freemsg(mp); 17600 return; 17601 } 17602 17603 switch (DB_TYPE(mp)) { 17604 case M_IOCDATA: 17605 tcp_wput_iocdata(tcp, mp); 17606 break; 17607 case M_FLUSH: 17608 tcp_wput_flush(tcp, mp); 17609 break; 17610 default: 17611 CALL_IP_WPUT(connp, q, mp); 17612 break; 17613 } 17614 } 17615 17616 /* 17617 * The TCP fast path write put procedure. 17618 * NOTE: the logic of the fast path is duplicated from tcp_wput_data() 17619 */ 17620 /* ARGSUSED */ 17621 void 17622 tcp_output(void *arg, mblk_t *mp, void *arg2) 17623 { 17624 int len; 17625 int hdrlen; 17626 int plen; 17627 mblk_t *mp1; 17628 uchar_t *rptr; 17629 uint32_t snxt; 17630 tcph_t *tcph; 17631 struct datab *db; 17632 uint32_t suna; 17633 uint32_t mss; 17634 ipaddr_t *dst; 17635 ipaddr_t *src; 17636 uint32_t sum; 17637 int usable; 17638 conn_t *connp = (conn_t *)arg; 17639 tcp_t *tcp = connp->conn_tcp; 17640 uint32_t msize; 17641 tcp_stack_t *tcps = tcp->tcp_tcps; 17642 17643 /* 17644 * Try and ASSERT the minimum possible references on the 17645 * conn early enough. Since we are executing on write side, 17646 * the connection is obviously not detached and that means 17647 * there is a ref each for TCP and IP. Since we are behind 17648 * the squeue, the minimum references needed are 3. If the 17649 * conn is in classifier hash list, there should be an 17650 * extra ref for that (we check both the possibilities). 17651 */ 17652 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 17653 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 17654 17655 ASSERT(DB_TYPE(mp) == M_DATA); 17656 msize = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp); 17657 17658 mutex_enter(&tcp->tcp_non_sq_lock); 17659 tcp->tcp_squeue_bytes -= msize; 17660 mutex_exit(&tcp->tcp_non_sq_lock); 17661 17662 /* Bypass tcp protocol for fused tcp loopback */ 17663 if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize)) 17664 return; 17665 17666 mss = tcp->tcp_mss; 17667 if (tcp->tcp_xmit_zc_clean) 17668 mp = tcp_zcopy_backoff(tcp, mp, 0); 17669 17670 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 17671 len = (int)(mp->b_wptr - mp->b_rptr); 17672 17673 /* 17674 * Criteria for fast path: 17675 * 17676 * 1. no unsent data 17677 * 2. single mblk in request 17678 * 3. connection established 17679 * 4. data in mblk 17680 * 5. len <= mss 17681 * 6. no tcp_valid bits 17682 */ 17683 if ((tcp->tcp_unsent != 0) || 17684 (tcp->tcp_cork) || 17685 (mp->b_cont != NULL) || 17686 (tcp->tcp_state != TCPS_ESTABLISHED) || 17687 (len == 0) || 17688 (len > mss) || 17689 (tcp->tcp_valid_bits != 0)) { 17690 tcp_wput_data(tcp, mp, B_FALSE); 17691 return; 17692 } 17693 17694 ASSERT(tcp->tcp_xmit_tail_unsent == 0); 17695 ASSERT(tcp->tcp_fin_sent == 0); 17696 17697 /* queue new packet onto retransmission queue */ 17698 if (tcp->tcp_xmit_head == NULL) { 17699 tcp->tcp_xmit_head = mp; 17700 } else { 17701 tcp->tcp_xmit_last->b_cont = mp; 17702 } 17703 tcp->tcp_xmit_last = mp; 17704 tcp->tcp_xmit_tail = mp; 17705 17706 /* find out how much we can send */ 17707 /* BEGIN CSTYLED */ 17708 /* 17709 * un-acked usable 17710 * |--------------|-----------------| 17711 * tcp_suna tcp_snxt tcp_suna+tcp_swnd 17712 */ 17713 /* END CSTYLED */ 17714 17715 /* start sending from tcp_snxt */ 17716 snxt = tcp->tcp_snxt; 17717 17718 /* 17719 * Check to see if this connection has been idled for some 17720 * time and no ACK is expected. If it is, we need to slow 17721 * start again to get back the connection's "self-clock" as 17722 * described in VJ's paper. 17723 * 17724 * Refer to the comment in tcp_mss_set() for the calculation 17725 * of tcp_cwnd after idle. 17726 */ 17727 if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet && 17728 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) { 17729 SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle); 17730 } 17731 17732 usable = tcp->tcp_swnd; /* tcp window size */ 17733 if (usable > tcp->tcp_cwnd) 17734 usable = tcp->tcp_cwnd; /* congestion window smaller */ 17735 usable -= snxt; /* subtract stuff already sent */ 17736 suna = tcp->tcp_suna; 17737 usable += suna; 17738 /* usable can be < 0 if the congestion window is smaller */ 17739 if (len > usable) { 17740 /* Can't send complete M_DATA in one shot */ 17741 goto slow; 17742 } 17743 17744 mutex_enter(&tcp->tcp_non_sq_lock); 17745 if (tcp->tcp_flow_stopped && 17746 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 17747 tcp_clrqfull(tcp); 17748 } 17749 mutex_exit(&tcp->tcp_non_sq_lock); 17750 17751 /* 17752 * determine if anything to send (Nagle). 17753 * 17754 * 1. len < tcp_mss (i.e. small) 17755 * 2. unacknowledged data present 17756 * 3. len < nagle limit 17757 * 4. last packet sent < nagle limit (previous packet sent) 17758 */ 17759 if ((len < mss) && (snxt != suna) && 17760 (len < (int)tcp->tcp_naglim) && 17761 (tcp->tcp_last_sent_len < tcp->tcp_naglim)) { 17762 /* 17763 * This was the first unsent packet and normally 17764 * mss < xmit_hiwater so there is no need to worry 17765 * about flow control. The next packet will go 17766 * through the flow control check in tcp_wput_data(). 17767 */ 17768 /* leftover work from above */ 17769 tcp->tcp_unsent = len; 17770 tcp->tcp_xmit_tail_unsent = len; 17771 17772 return; 17773 } 17774 17775 /* len <= tcp->tcp_mss && len == unsent so no silly window */ 17776 17777 if (snxt == suna) { 17778 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 17779 } 17780 17781 /* we have always sent something */ 17782 tcp->tcp_rack_cnt = 0; 17783 17784 tcp->tcp_snxt = snxt + len; 17785 tcp->tcp_rack = tcp->tcp_rnxt; 17786 17787 if ((mp1 = dupb(mp)) == 0) 17788 goto no_memory; 17789 mp->b_prev = (mblk_t *)(uintptr_t)lbolt; 17790 mp->b_next = (mblk_t *)(uintptr_t)snxt; 17791 17792 /* adjust tcp header information */ 17793 tcph = tcp->tcp_tcph; 17794 tcph->th_flags[0] = (TH_ACK|TH_PUSH); 17795 17796 sum = len + tcp->tcp_tcp_hdr_len + tcp->tcp_sum; 17797 sum = (sum >> 16) + (sum & 0xFFFF); 17798 U16_TO_ABE16(sum, tcph->th_sum); 17799 17800 U32_TO_ABE32(snxt, tcph->th_seq); 17801 17802 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 17803 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len); 17804 BUMP_LOCAL(tcp->tcp_obsegs); 17805 17806 /* Update the latest receive window size in TCP header. */ 17807 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 17808 tcph->th_win); 17809 17810 tcp->tcp_last_sent_len = (ushort_t)len; 17811 17812 plen = len + tcp->tcp_hdr_len; 17813 17814 if (tcp->tcp_ipversion == IPV4_VERSION) { 17815 tcp->tcp_ipha->ipha_length = htons(plen); 17816 } else { 17817 tcp->tcp_ip6h->ip6_plen = htons(plen - 17818 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 17819 } 17820 17821 /* see if we need to allocate a mblk for the headers */ 17822 hdrlen = tcp->tcp_hdr_len; 17823 rptr = mp1->b_rptr - hdrlen; 17824 db = mp1->b_datap; 17825 if ((db->db_ref != 2) || rptr < db->db_base || 17826 (!OK_32PTR(rptr))) { 17827 /* NOTE: we assume allocb returns an OK_32PTR */ 17828 mp = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 17829 tcps->tcps_wroff_xtra, BPRI_MED); 17830 if (!mp) { 17831 freemsg(mp1); 17832 goto no_memory; 17833 } 17834 mp->b_cont = mp1; 17835 mp1 = mp; 17836 /* Leave room for Link Level header */ 17837 /* hdrlen = tcp->tcp_hdr_len; */ 17838 rptr = &mp1->b_rptr[tcps->tcps_wroff_xtra]; 17839 mp1->b_wptr = &rptr[hdrlen]; 17840 } 17841 mp1->b_rptr = rptr; 17842 17843 /* Fill in the timestamp option. */ 17844 if (tcp->tcp_snd_ts_ok) { 17845 U32_TO_BE32((uint32_t)lbolt, 17846 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 17847 U32_TO_BE32(tcp->tcp_ts_recent, 17848 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 17849 } else { 17850 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 17851 } 17852 17853 /* copy header into outgoing packet */ 17854 dst = (ipaddr_t *)rptr; 17855 src = (ipaddr_t *)tcp->tcp_iphc; 17856 dst[0] = src[0]; 17857 dst[1] = src[1]; 17858 dst[2] = src[2]; 17859 dst[3] = src[3]; 17860 dst[4] = src[4]; 17861 dst[5] = src[5]; 17862 dst[6] = src[6]; 17863 dst[7] = src[7]; 17864 dst[8] = src[8]; 17865 dst[9] = src[9]; 17866 if (hdrlen -= 40) { 17867 hdrlen >>= 2; 17868 dst += 10; 17869 src += 10; 17870 do { 17871 *dst++ = *src++; 17872 } while (--hdrlen); 17873 } 17874 17875 /* 17876 * Set the ECN info in the TCP header. Note that this 17877 * is not the template header. 17878 */ 17879 if (tcp->tcp_ecn_ok) { 17880 SET_ECT(tcp, rptr); 17881 17882 tcph = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 17883 if (tcp->tcp_ecn_echo_on) 17884 tcph->th_flags[0] |= TH_ECE; 17885 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 17886 tcph->th_flags[0] |= TH_CWR; 17887 tcp->tcp_ecn_cwr_sent = B_TRUE; 17888 } 17889 } 17890 17891 if (tcp->tcp_ip_forward_progress) { 17892 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 17893 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 17894 tcp->tcp_ip_forward_progress = B_FALSE; 17895 } 17896 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 17897 tcp_send_data(tcp, tcp->tcp_wq, mp1); 17898 return; 17899 17900 /* 17901 * If we ran out of memory, we pretend to have sent the packet 17902 * and that it was lost on the wire. 17903 */ 17904 no_memory: 17905 return; 17906 17907 slow: 17908 /* leftover work from above */ 17909 tcp->tcp_unsent = len; 17910 tcp->tcp_xmit_tail_unsent = len; 17911 tcp_wput_data(tcp, NULL, B_FALSE); 17912 } 17913 17914 /* 17915 * The function called through squeue to get behind eager's perimeter to 17916 * finish the accept processing. 17917 */ 17918 /* ARGSUSED */ 17919 void 17920 tcp_accept_finish(void *arg, mblk_t *mp, void *arg2) 17921 { 17922 conn_t *connp = (conn_t *)arg; 17923 tcp_t *tcp = connp->conn_tcp; 17924 queue_t *q = tcp->tcp_rq; 17925 mblk_t *mp1; 17926 mblk_t *stropt_mp = mp; 17927 struct stroptions *stropt; 17928 uint_t thwin; 17929 tcp_stack_t *tcps = tcp->tcp_tcps; 17930 17931 /* 17932 * Drop the eager's ref on the listener, that was placed when 17933 * this eager began life in tcp_conn_request. 17934 */ 17935 CONN_DEC_REF(tcp->tcp_saved_listener->tcp_connp); 17936 17937 if (tcp->tcp_state <= TCPS_BOUND || tcp->tcp_accept_error) { 17938 /* 17939 * Someone blewoff the eager before we could finish 17940 * the accept. 17941 * 17942 * The only reason eager exists it because we put in 17943 * a ref on it when conn ind went up. We need to send 17944 * a disconnect indication up while the last reference 17945 * on the eager will be dropped by the squeue when we 17946 * return. 17947 */ 17948 ASSERT(tcp->tcp_listener == NULL); 17949 if (tcp->tcp_issocket || tcp->tcp_send_discon_ind) { 17950 struct T_discon_ind *tdi; 17951 17952 (void) putnextctl1(q, M_FLUSH, FLUSHRW); 17953 /* 17954 * Let us reuse the incoming mblk to avoid memory 17955 * allocation failure problems. We know that the 17956 * size of the incoming mblk i.e. stroptions is greater 17957 * than sizeof T_discon_ind. So the reallocb below 17958 * can't fail. 17959 */ 17960 freemsg(mp->b_cont); 17961 mp->b_cont = NULL; 17962 ASSERT(DB_REF(mp) == 1); 17963 mp = reallocb(mp, sizeof (struct T_discon_ind), 17964 B_FALSE); 17965 ASSERT(mp != NULL); 17966 DB_TYPE(mp) = M_PROTO; 17967 ((union T_primitives *)mp->b_rptr)->type = T_DISCON_IND; 17968 tdi = (struct T_discon_ind *)mp->b_rptr; 17969 if (tcp->tcp_issocket) { 17970 tdi->DISCON_reason = ECONNREFUSED; 17971 tdi->SEQ_number = 0; 17972 } else { 17973 tdi->DISCON_reason = ENOPROTOOPT; 17974 tdi->SEQ_number = 17975 tcp->tcp_conn_req_seqnum; 17976 } 17977 mp->b_wptr = mp->b_rptr + sizeof (struct T_discon_ind); 17978 putnext(q, mp); 17979 } else { 17980 freemsg(mp); 17981 } 17982 if (tcp->tcp_hard_binding) { 17983 tcp->tcp_hard_binding = B_FALSE; 17984 tcp->tcp_hard_bound = B_TRUE; 17985 } 17986 tcp->tcp_detached = B_FALSE; 17987 return; 17988 } 17989 17990 mp1 = stropt_mp->b_cont; 17991 stropt_mp->b_cont = NULL; 17992 ASSERT(DB_TYPE(stropt_mp) == M_SETOPTS); 17993 stropt = (struct stroptions *)stropt_mp->b_rptr; 17994 17995 while (mp1 != NULL) { 17996 mp = mp1; 17997 mp1 = mp1->b_cont; 17998 mp->b_cont = NULL; 17999 tcp->tcp_drop_opt_ack_cnt++; 18000 CALL_IP_WPUT(connp, tcp->tcp_wq, mp); 18001 } 18002 mp = NULL; 18003 18004 /* 18005 * For a loopback connection with tcp_direct_sockfs on, note that 18006 * we don't have to protect tcp_rcv_list yet because synchronous 18007 * streams has not yet been enabled and tcp_fuse_rrw() cannot 18008 * possibly race with us. 18009 */ 18010 18011 /* 18012 * Set the max window size (tcp_rq->q_hiwat) of the acceptor 18013 * properly. This is the first time we know of the acceptor' 18014 * queue. So we do it here. 18015 */ 18016 if (tcp->tcp_rcv_list == NULL) { 18017 /* 18018 * Recv queue is empty, tcp_rwnd should not have changed. 18019 * That means it should be equal to the listener's tcp_rwnd. 18020 */ 18021 tcp->tcp_rq->q_hiwat = tcp->tcp_rwnd; 18022 } else { 18023 #ifdef DEBUG 18024 uint_t cnt = 0; 18025 18026 mp1 = tcp->tcp_rcv_list; 18027 while ((mp = mp1) != NULL) { 18028 mp1 = mp->b_next; 18029 cnt += msgdsize(mp); 18030 } 18031 ASSERT(cnt != 0 && tcp->tcp_rcv_cnt == cnt); 18032 #endif 18033 /* There is some data, add them back to get the max. */ 18034 tcp->tcp_rq->q_hiwat = tcp->tcp_rwnd + tcp->tcp_rcv_cnt; 18035 } 18036 18037 stropt->so_flags = SO_HIWAT; 18038 stropt->so_hiwat = MAX(q->q_hiwat, tcps->tcps_sth_rcv_hiwat); 18039 18040 stropt->so_flags |= SO_MAXBLK; 18041 stropt->so_maxblk = tcp_maxpsz_set(tcp, B_FALSE); 18042 18043 /* 18044 * This is the first time we run on the correct 18045 * queue after tcp_accept. So fix all the q parameters 18046 * here. 18047 */ 18048 /* Allocate room for SACK options if needed. */ 18049 stropt->so_flags |= SO_WROFF; 18050 if (tcp->tcp_fused) { 18051 ASSERT(tcp->tcp_loopback); 18052 ASSERT(tcp->tcp_loopback_peer != NULL); 18053 /* 18054 * For fused tcp loopback, set the stream head's write 18055 * offset value to zero since we won't be needing any room 18056 * for TCP/IP headers. This would also improve performance 18057 * since it would reduce the amount of work done by kmem. 18058 * Non-fused tcp loopback case is handled separately below. 18059 */ 18060 stropt->so_wroff = 0; 18061 /* 18062 * Record the stream head's high water mark for this endpoint; 18063 * this is used for flow-control purposes in tcp_fuse_output(). 18064 */ 18065 stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(tcp, q->q_hiwat); 18066 /* 18067 * Update the peer's transmit parameters according to 18068 * our recently calculated high water mark value. 18069 */ 18070 (void) tcp_maxpsz_set(tcp->tcp_loopback_peer, B_TRUE); 18071 } else if (tcp->tcp_snd_sack_ok) { 18072 stropt->so_wroff = tcp->tcp_hdr_len + TCPOPT_MAX_SACK_LEN + 18073 (tcp->tcp_loopback ? 0 : tcps->tcps_wroff_xtra); 18074 } else { 18075 stropt->so_wroff = tcp->tcp_hdr_len + (tcp->tcp_loopback ? 0 : 18076 tcps->tcps_wroff_xtra); 18077 } 18078 18079 /* 18080 * If this is endpoint is handling SSL, then reserve extra 18081 * offset and space at the end. 18082 * Also have the stream head allocate SSL3_MAX_RECORD_LEN packets, 18083 * overriding the previous setting. The extra cost of signing and 18084 * encrypting multiple MSS-size records (12 of them with Ethernet), 18085 * instead of a single contiguous one by the stream head 18086 * largely outweighs the statistical reduction of ACKs, when 18087 * applicable. The peer will also save on decryption and verification 18088 * costs. 18089 */ 18090 if (tcp->tcp_kssl_ctx != NULL) { 18091 stropt->so_wroff += SSL3_WROFFSET; 18092 18093 stropt->so_flags |= SO_TAIL; 18094 stropt->so_tail = SSL3_MAX_TAIL_LEN; 18095 18096 stropt->so_maxblk = SSL3_MAX_RECORD_LEN; 18097 } 18098 18099 /* Send the options up */ 18100 putnext(q, stropt_mp); 18101 18102 /* 18103 * Pass up any data and/or a fin that has been received. 18104 * 18105 * Adjust receive window in case it had decreased 18106 * (because there is data <=> tcp_rcv_list != NULL) 18107 * while the connection was detached. Note that 18108 * in case the eager was flow-controlled, w/o this 18109 * code, the rwnd may never open up again! 18110 */ 18111 if (tcp->tcp_rcv_list != NULL) { 18112 /* We drain directly in case of fused tcp loopback */ 18113 if (!tcp->tcp_fused && canputnext(q)) { 18114 tcp->tcp_rwnd = q->q_hiwat; 18115 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 18116 << tcp->tcp_rcv_ws; 18117 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 18118 if (tcp->tcp_state >= TCPS_ESTABLISHED && 18119 (q->q_hiwat - thwin >= tcp->tcp_mss)) { 18120 tcp_xmit_ctl(NULL, 18121 tcp, (tcp->tcp_swnd == 0) ? 18122 tcp->tcp_suna : tcp->tcp_snxt, 18123 tcp->tcp_rnxt, TH_ACK); 18124 BUMP_MIB(&tcps->tcps_mib, tcpOutWinUpdate); 18125 } 18126 18127 } 18128 (void) tcp_rcv_drain(q, tcp); 18129 18130 /* 18131 * For fused tcp loopback, back-enable peer endpoint 18132 * if it's currently flow-controlled. 18133 */ 18134 if (tcp->tcp_fused) { 18135 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 18136 18137 ASSERT(peer_tcp != NULL); 18138 ASSERT(peer_tcp->tcp_fused); 18139 /* 18140 * In order to change the peer's tcp_flow_stopped, 18141 * we need to take locks for both end points. The 18142 * highest address is taken first. 18143 */ 18144 if (peer_tcp > tcp) { 18145 mutex_enter(&peer_tcp->tcp_non_sq_lock); 18146 mutex_enter(&tcp->tcp_non_sq_lock); 18147 } else { 18148 mutex_enter(&tcp->tcp_non_sq_lock); 18149 mutex_enter(&peer_tcp->tcp_non_sq_lock); 18150 } 18151 if (peer_tcp->tcp_flow_stopped) { 18152 tcp_clrqfull(peer_tcp); 18153 TCP_STAT(tcps, tcp_fusion_backenabled); 18154 } 18155 mutex_exit(&peer_tcp->tcp_non_sq_lock); 18156 mutex_exit(&tcp->tcp_non_sq_lock); 18157 } 18158 } 18159 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 18160 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 18161 mp = mi_tpi_ordrel_ind(); 18162 if (mp) { 18163 tcp->tcp_ordrel_done = B_TRUE; 18164 putnext(q, mp); 18165 if (tcp->tcp_deferred_clean_death) { 18166 /* 18167 * tcp_clean_death was deferred 18168 * for T_ORDREL_IND - do it now 18169 */ 18170 (void) tcp_clean_death(tcp, 18171 tcp->tcp_client_errno, 21); 18172 tcp->tcp_deferred_clean_death = B_FALSE; 18173 } 18174 } else { 18175 /* 18176 * Run the orderly release in the 18177 * service routine. 18178 */ 18179 qenable(q); 18180 } 18181 } 18182 if (tcp->tcp_hard_binding) { 18183 tcp->tcp_hard_binding = B_FALSE; 18184 tcp->tcp_hard_bound = B_TRUE; 18185 } 18186 18187 tcp->tcp_detached = B_FALSE; 18188 18189 /* We can enable synchronous streams now */ 18190 if (tcp->tcp_fused) { 18191 tcp_fuse_syncstr_enable_pair(tcp); 18192 } 18193 18194 if (tcp->tcp_ka_enabled) { 18195 tcp->tcp_ka_last_intrvl = 0; 18196 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 18197 MSEC_TO_TICK(tcp->tcp_ka_interval)); 18198 } 18199 18200 /* 18201 * At this point, eager is fully established and will 18202 * have the following references - 18203 * 18204 * 2 references for connection to exist (1 for TCP and 1 for IP). 18205 * 1 reference for the squeue which will be dropped by the squeue as 18206 * soon as this function returns. 18207 * There will be 1 additonal reference for being in classifier 18208 * hash list provided something bad hasn't happened. 18209 */ 18210 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 18211 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 18212 } 18213 18214 /* 18215 * The function called through squeue to get behind listener's perimeter to 18216 * send a deffered conn_ind. 18217 */ 18218 /* ARGSUSED */ 18219 void 18220 tcp_send_pending(void *arg, mblk_t *mp, void *arg2) 18221 { 18222 conn_t *connp = (conn_t *)arg; 18223 tcp_t *listener = connp->conn_tcp; 18224 18225 if (listener->tcp_state == TCPS_CLOSED || 18226 TCP_IS_DETACHED(listener)) { 18227 /* 18228 * If listener has closed, it would have caused a 18229 * a cleanup/blowoff to happen for the eager. 18230 */ 18231 tcp_t *tcp; 18232 struct T_conn_ind *conn_ind; 18233 18234 conn_ind = (struct T_conn_ind *)mp->b_rptr; 18235 bcopy(mp->b_rptr + conn_ind->OPT_offset, &tcp, 18236 conn_ind->OPT_length); 18237 /* 18238 * We need to drop the ref on eager that was put 18239 * tcp_rput_data() before trying to send the conn_ind 18240 * to listener. The conn_ind was deferred in tcp_send_conn_ind 18241 * and tcp_wput_accept() is sending this deferred conn_ind but 18242 * listener is closed so we drop the ref. 18243 */ 18244 CONN_DEC_REF(tcp->tcp_connp); 18245 freemsg(mp); 18246 return; 18247 } 18248 putnext(listener->tcp_rq, mp); 18249 } 18250 18251 18252 /* 18253 * This is the STREAMS entry point for T_CONN_RES coming down on 18254 * Acceptor STREAM when sockfs listener does accept processing. 18255 * Read the block comment on top of tcp_conn_request(). 18256 */ 18257 void 18258 tcp_wput_accept(queue_t *q, mblk_t *mp) 18259 { 18260 queue_t *rq = RD(q); 18261 struct T_conn_res *conn_res; 18262 tcp_t *eager; 18263 tcp_t *listener; 18264 struct T_ok_ack *ok; 18265 t_scalar_t PRIM_type; 18266 mblk_t *opt_mp; 18267 conn_t *econnp; 18268 18269 ASSERT(DB_TYPE(mp) == M_PROTO); 18270 18271 conn_res = (struct T_conn_res *)mp->b_rptr; 18272 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 18273 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_conn_res)) { 18274 mp = mi_tpi_err_ack_alloc(mp, TPROTO, 0); 18275 if (mp != NULL) 18276 putnext(rq, mp); 18277 return; 18278 } 18279 switch (conn_res->PRIM_type) { 18280 case O_T_CONN_RES: 18281 case T_CONN_RES: 18282 /* 18283 * We pass up an err ack if allocb fails. This will 18284 * cause sockfs to issue a T_DISCON_REQ which will cause 18285 * tcp_eager_blowoff to be called. sockfs will then call 18286 * rq->q_qinfo->qi_qclose to cleanup the acceptor stream. 18287 * we need to do the allocb up here because we have to 18288 * make sure rq->q_qinfo->qi_qclose still points to the 18289 * correct function (tcpclose_accept) in case allocb 18290 * fails. 18291 */ 18292 opt_mp = allocb(sizeof (struct stroptions), BPRI_HI); 18293 if (opt_mp == NULL) { 18294 mp = mi_tpi_err_ack_alloc(mp, TPROTO, 0); 18295 if (mp != NULL) 18296 putnext(rq, mp); 18297 return; 18298 } 18299 18300 bcopy(mp->b_rptr + conn_res->OPT_offset, 18301 &eager, conn_res->OPT_length); 18302 PRIM_type = conn_res->PRIM_type; 18303 mp->b_datap->db_type = M_PCPROTO; 18304 mp->b_wptr = mp->b_rptr + sizeof (struct T_ok_ack); 18305 ok = (struct T_ok_ack *)mp->b_rptr; 18306 ok->PRIM_type = T_OK_ACK; 18307 ok->CORRECT_prim = PRIM_type; 18308 econnp = eager->tcp_connp; 18309 econnp->conn_dev = (dev_t)RD(q)->q_ptr; 18310 econnp->conn_minor_arena = (vmem_t *)(WR(q)->q_ptr); 18311 eager->tcp_rq = rq; 18312 eager->tcp_wq = q; 18313 rq->q_ptr = econnp; 18314 rq->q_qinfo = &tcp_rinitv4; /* No open - same as rinitv6 */ 18315 q->q_ptr = econnp; 18316 q->q_qinfo = &tcp_winit; 18317 listener = eager->tcp_listener; 18318 eager->tcp_issocket = B_TRUE; 18319 18320 econnp->conn_zoneid = listener->tcp_connp->conn_zoneid; 18321 econnp->conn_allzones = listener->tcp_connp->conn_allzones; 18322 ASSERT(econnp->conn_netstack == 18323 listener->tcp_connp->conn_netstack); 18324 ASSERT(eager->tcp_tcps == listener->tcp_tcps); 18325 18326 /* Put the ref for IP */ 18327 CONN_INC_REF(econnp); 18328 18329 /* 18330 * We should have minimum of 3 references on the conn 18331 * at this point. One each for TCP and IP and one for 18332 * the T_conn_ind that was sent up when the 3-way handshake 18333 * completed. In the normal case we would also have another 18334 * reference (making a total of 4) for the conn being in the 18335 * classifier hash list. However the eager could have received 18336 * an RST subsequently and tcp_closei_local could have removed 18337 * the eager from the classifier hash list, hence we can't 18338 * assert that reference. 18339 */ 18340 ASSERT(econnp->conn_ref >= 3); 18341 18342 /* 18343 * Send the new local address also up to sockfs. There 18344 * should already be enough space in the mp that came 18345 * down from soaccept(). 18346 */ 18347 if (eager->tcp_family == AF_INET) { 18348 sin_t *sin; 18349 18350 ASSERT((mp->b_datap->db_lim - mp->b_datap->db_base) >= 18351 (sizeof (struct T_ok_ack) + sizeof (sin_t))); 18352 sin = (sin_t *)mp->b_wptr; 18353 mp->b_wptr += sizeof (sin_t); 18354 sin->sin_family = AF_INET; 18355 sin->sin_port = eager->tcp_lport; 18356 sin->sin_addr.s_addr = eager->tcp_ipha->ipha_src; 18357 } else { 18358 sin6_t *sin6; 18359 18360 ASSERT((mp->b_datap->db_lim - mp->b_datap->db_base) >= 18361 sizeof (struct T_ok_ack) + sizeof (sin6_t)); 18362 sin6 = (sin6_t *)mp->b_wptr; 18363 mp->b_wptr += sizeof (sin6_t); 18364 sin6->sin6_family = AF_INET6; 18365 sin6->sin6_port = eager->tcp_lport; 18366 if (eager->tcp_ipversion == IPV4_VERSION) { 18367 sin6->sin6_flowinfo = 0; 18368 IN6_IPADDR_TO_V4MAPPED( 18369 eager->tcp_ipha->ipha_src, 18370 &sin6->sin6_addr); 18371 } else { 18372 ASSERT(eager->tcp_ip6h != NULL); 18373 sin6->sin6_flowinfo = 18374 eager->tcp_ip6h->ip6_vcf & 18375 ~IPV6_VERS_AND_FLOW_MASK; 18376 sin6->sin6_addr = eager->tcp_ip6h->ip6_src; 18377 } 18378 sin6->sin6_scope_id = 0; 18379 sin6->__sin6_src_id = 0; 18380 } 18381 18382 putnext(rq, mp); 18383 18384 opt_mp->b_datap->db_type = M_SETOPTS; 18385 opt_mp->b_wptr += sizeof (struct stroptions); 18386 18387 /* 18388 * Prepare for inheriting IPV6_BOUND_IF and IPV6_RECVPKTINFO 18389 * from listener to acceptor. The message is chained on the 18390 * bind_mp which tcp_rput_other will send down to IP. 18391 */ 18392 if (listener->tcp_bound_if != 0) { 18393 /* allocate optmgmt req */ 18394 mp = tcp_setsockopt_mp(IPPROTO_IPV6, 18395 IPV6_BOUND_IF, (char *)&listener->tcp_bound_if, 18396 sizeof (int)); 18397 if (mp != NULL) 18398 linkb(opt_mp, mp); 18399 } 18400 if (listener->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) { 18401 uint_t on = 1; 18402 18403 /* allocate optmgmt req */ 18404 mp = tcp_setsockopt_mp(IPPROTO_IPV6, 18405 IPV6_RECVPKTINFO, (char *)&on, sizeof (on)); 18406 if (mp != NULL) 18407 linkb(opt_mp, mp); 18408 } 18409 18410 18411 mutex_enter(&listener->tcp_eager_lock); 18412 18413 if (listener->tcp_eager_prev_q0->tcp_conn_def_q0) { 18414 18415 tcp_t *tail; 18416 tcp_t *tcp; 18417 mblk_t *mp1; 18418 18419 tcp = listener->tcp_eager_prev_q0; 18420 /* 18421 * listener->tcp_eager_prev_q0 points to the TAIL of the 18422 * deferred T_conn_ind queue. We need to get to the head 18423 * of the queue in order to send up T_conn_ind the same 18424 * order as how the 3WHS is completed. 18425 */ 18426 while (tcp != listener) { 18427 if (!tcp->tcp_eager_prev_q0->tcp_conn_def_q0 && 18428 !tcp->tcp_kssl_pending) 18429 break; 18430 else 18431 tcp = tcp->tcp_eager_prev_q0; 18432 } 18433 /* None of the pending eagers can be sent up now */ 18434 if (tcp == listener) 18435 goto no_more_eagers; 18436 18437 mp1 = tcp->tcp_conn.tcp_eager_conn_ind; 18438 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 18439 /* Move from q0 to q */ 18440 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 18441 listener->tcp_conn_req_cnt_q0--; 18442 listener->tcp_conn_req_cnt_q++; 18443 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 18444 tcp->tcp_eager_prev_q0; 18445 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 18446 tcp->tcp_eager_next_q0; 18447 tcp->tcp_eager_prev_q0 = NULL; 18448 tcp->tcp_eager_next_q0 = NULL; 18449 tcp->tcp_conn_def_q0 = B_FALSE; 18450 18451 /* Make sure the tcp isn't in the list of droppables */ 18452 ASSERT(tcp->tcp_eager_next_drop_q0 == NULL && 18453 tcp->tcp_eager_prev_drop_q0 == NULL); 18454 18455 /* 18456 * Insert at end of the queue because sockfs sends 18457 * down T_CONN_RES in chronological order. Leaving 18458 * the older conn indications at front of the queue 18459 * helps reducing search time. 18460 */ 18461 tail = listener->tcp_eager_last_q; 18462 if (tail != NULL) { 18463 tail->tcp_eager_next_q = tcp; 18464 } else { 18465 listener->tcp_eager_next_q = tcp; 18466 } 18467 listener->tcp_eager_last_q = tcp; 18468 tcp->tcp_eager_next_q = NULL; 18469 18470 /* Need to get inside the listener perimeter */ 18471 CONN_INC_REF(listener->tcp_connp); 18472 squeue_fill(listener->tcp_connp->conn_sqp, mp1, 18473 tcp_send_pending, listener->tcp_connp, 18474 SQTAG_TCP_SEND_PENDING); 18475 } 18476 no_more_eagers: 18477 tcp_eager_unlink(eager); 18478 mutex_exit(&listener->tcp_eager_lock); 18479 18480 /* 18481 * At this point, the eager is detached from the listener 18482 * but we still have an extra refs on eager (apart from the 18483 * usual tcp references). The ref was placed in tcp_rput_data 18484 * before sending the conn_ind in tcp_send_conn_ind. 18485 * The ref will be dropped in tcp_accept_finish(). 18486 */ 18487 squeue_enter_nodrain(econnp->conn_sqp, opt_mp, 18488 tcp_accept_finish, econnp, SQTAG_TCP_ACCEPT_FINISH_Q0); 18489 return; 18490 default: 18491 mp = mi_tpi_err_ack_alloc(mp, TNOTSUPPORT, 0); 18492 if (mp != NULL) 18493 putnext(rq, mp); 18494 return; 18495 } 18496 } 18497 18498 void 18499 tcp_wput(queue_t *q, mblk_t *mp) 18500 { 18501 conn_t *connp = Q_TO_CONN(q); 18502 tcp_t *tcp; 18503 void (*output_proc)(); 18504 t_scalar_t type; 18505 uchar_t *rptr; 18506 struct iocblk *iocp; 18507 uint32_t msize; 18508 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 18509 18510 ASSERT(connp->conn_ref >= 2); 18511 18512 switch (DB_TYPE(mp)) { 18513 case M_DATA: 18514 tcp = connp->conn_tcp; 18515 ASSERT(tcp != NULL); 18516 18517 msize = msgdsize(mp); 18518 18519 mutex_enter(&tcp->tcp_non_sq_lock); 18520 tcp->tcp_squeue_bytes += msize; 18521 if (TCP_UNSENT_BYTES(tcp) > tcp->tcp_xmit_hiwater) { 18522 tcp_setqfull(tcp); 18523 } 18524 mutex_exit(&tcp->tcp_non_sq_lock); 18525 18526 CONN_INC_REF(connp); 18527 (*tcp_squeue_wput_proc)(connp->conn_sqp, mp, 18528 tcp_output, connp, SQTAG_TCP_OUTPUT); 18529 return; 18530 case M_PROTO: 18531 case M_PCPROTO: 18532 /* 18533 * if it is a snmp message, don't get behind the squeue 18534 */ 18535 tcp = connp->conn_tcp; 18536 rptr = mp->b_rptr; 18537 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) { 18538 type = ((union T_primitives *)rptr)->type; 18539 } else { 18540 if (tcp->tcp_debug) { 18541 (void) strlog(TCP_MOD_ID, 0, 1, 18542 SL_ERROR|SL_TRACE, 18543 "tcp_wput_proto, dropping one..."); 18544 } 18545 freemsg(mp); 18546 return; 18547 } 18548 if (type == T_SVR4_OPTMGMT_REQ) { 18549 cred_t *cr = DB_CREDDEF(mp, tcp->tcp_cred); 18550 if (snmpcom_req(q, mp, tcp_snmp_set, ip_snmp_get, 18551 cr)) { 18552 /* 18553 * This was a SNMP request 18554 */ 18555 return; 18556 } else { 18557 output_proc = tcp_wput_proto; 18558 } 18559 } else { 18560 output_proc = tcp_wput_proto; 18561 } 18562 break; 18563 case M_IOCTL: 18564 /* 18565 * Most ioctls can be processed right away without going via 18566 * squeues - process them right here. Those that do require 18567 * squeue (currently TCP_IOC_DEFAULT_Q and _SIOCSOCKFALLBACK) 18568 * are processed by tcp_wput_ioctl(). 18569 */ 18570 iocp = (struct iocblk *)mp->b_rptr; 18571 tcp = connp->conn_tcp; 18572 18573 switch (iocp->ioc_cmd) { 18574 case TCP_IOC_ABORT_CONN: 18575 tcp_ioctl_abort_conn(q, mp); 18576 return; 18577 case TI_GETPEERNAME: 18578 if (tcp->tcp_state < TCPS_SYN_RCVD) { 18579 iocp->ioc_error = ENOTCONN; 18580 iocp->ioc_count = 0; 18581 mp->b_datap->db_type = M_IOCACK; 18582 qreply(q, mp); 18583 return; 18584 } 18585 /* FALLTHRU */ 18586 case TI_GETMYNAME: 18587 mi_copyin(q, mp, NULL, 18588 SIZEOF_STRUCT(strbuf, iocp->ioc_flag)); 18589 return; 18590 case ND_SET: 18591 /* nd_getset does the necessary checks */ 18592 case ND_GET: 18593 if (!nd_getset(q, tcps->tcps_g_nd, mp)) { 18594 CALL_IP_WPUT(connp, q, mp); 18595 return; 18596 } 18597 qreply(q, mp); 18598 return; 18599 case TCP_IOC_DEFAULT_Q: 18600 /* 18601 * Wants to be the default wq. Check the credentials 18602 * first, the rest is executed via squeue. 18603 */ 18604 if (secpolicy_ip_config(iocp->ioc_cr, B_FALSE) != 0) { 18605 iocp->ioc_error = EPERM; 18606 iocp->ioc_count = 0; 18607 mp->b_datap->db_type = M_IOCACK; 18608 qreply(q, mp); 18609 return; 18610 } 18611 output_proc = tcp_wput_ioctl; 18612 break; 18613 default: 18614 output_proc = tcp_wput_ioctl; 18615 break; 18616 } 18617 break; 18618 default: 18619 output_proc = tcp_wput_nondata; 18620 break; 18621 } 18622 18623 CONN_INC_REF(connp); 18624 (*tcp_squeue_wput_proc)(connp->conn_sqp, mp, 18625 output_proc, connp, SQTAG_TCP_WPUT_OTHER); 18626 } 18627 18628 /* 18629 * Initial STREAMS write side put() procedure for sockets. It tries to 18630 * handle the T_CAPABILITY_REQ which sockfs sends down while setting 18631 * up the socket without using the squeue. Non T_CAPABILITY_REQ messages 18632 * are handled by tcp_wput() as usual. 18633 * 18634 * All further messages will also be handled by tcp_wput() because we cannot 18635 * be sure that the above short cut is safe later. 18636 */ 18637 static void 18638 tcp_wput_sock(queue_t *wq, mblk_t *mp) 18639 { 18640 conn_t *connp = Q_TO_CONN(wq); 18641 tcp_t *tcp = connp->conn_tcp; 18642 struct T_capability_req *car = (struct T_capability_req *)mp->b_rptr; 18643 18644 ASSERT(wq->q_qinfo == &tcp_sock_winit); 18645 wq->q_qinfo = &tcp_winit; 18646 18647 ASSERT(IPCL_IS_TCP(connp)); 18648 ASSERT(TCP_IS_SOCKET(tcp)); 18649 18650 if (DB_TYPE(mp) == M_PCPROTO && 18651 MBLKL(mp) == sizeof (struct T_capability_req) && 18652 car->PRIM_type == T_CAPABILITY_REQ) { 18653 tcp_capability_req(tcp, mp); 18654 return; 18655 } 18656 18657 tcp_wput(wq, mp); 18658 } 18659 18660 static boolean_t 18661 tcp_zcopy_check(tcp_t *tcp) 18662 { 18663 conn_t *connp = tcp->tcp_connp; 18664 ire_t *ire; 18665 boolean_t zc_enabled = B_FALSE; 18666 tcp_stack_t *tcps = tcp->tcp_tcps; 18667 18668 if (do_tcpzcopy == 2) 18669 zc_enabled = B_TRUE; 18670 else if (tcp->tcp_ipversion == IPV4_VERSION && 18671 IPCL_IS_CONNECTED(connp) && 18672 (connp->conn_flags & IPCL_CHECK_POLICY) == 0 && 18673 connp->conn_dontroute == 0 && 18674 !connp->conn_nexthop_set && 18675 connp->conn_outgoing_ill == NULL && 18676 connp->conn_nofailover_ill == NULL && 18677 do_tcpzcopy == 1) { 18678 /* 18679 * the checks above closely resemble the fast path checks 18680 * in tcp_send_data(). 18681 */ 18682 mutex_enter(&connp->conn_lock); 18683 ire = connp->conn_ire_cache; 18684 ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT)); 18685 if (ire != NULL && !(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18686 IRE_REFHOLD(ire); 18687 if (ire->ire_stq != NULL) { 18688 ill_t *ill = (ill_t *)ire->ire_stq->q_ptr; 18689 18690 zc_enabled = ill && (ill->ill_capabilities & 18691 ILL_CAPAB_ZEROCOPY) && 18692 (ill->ill_zerocopy_capab-> 18693 ill_zerocopy_flags != 0); 18694 } 18695 IRE_REFRELE(ire); 18696 } 18697 mutex_exit(&connp->conn_lock); 18698 } 18699 tcp->tcp_snd_zcopy_on = zc_enabled; 18700 if (!TCP_IS_DETACHED(tcp)) { 18701 if (zc_enabled) { 18702 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMSAFE); 18703 TCP_STAT(tcps, tcp_zcopy_on); 18704 } else { 18705 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMUNSAFE); 18706 TCP_STAT(tcps, tcp_zcopy_off); 18707 } 18708 } 18709 return (zc_enabled); 18710 } 18711 18712 static mblk_t * 18713 tcp_zcopy_disable(tcp_t *tcp, mblk_t *bp) 18714 { 18715 tcp_stack_t *tcps = tcp->tcp_tcps; 18716 18717 if (do_tcpzcopy == 2) 18718 return (bp); 18719 else if (tcp->tcp_snd_zcopy_on) { 18720 tcp->tcp_snd_zcopy_on = B_FALSE; 18721 if (!TCP_IS_DETACHED(tcp)) { 18722 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMUNSAFE); 18723 TCP_STAT(tcps, tcp_zcopy_disable); 18724 } 18725 } 18726 return (tcp_zcopy_backoff(tcp, bp, 0)); 18727 } 18728 18729 /* 18730 * Backoff from a zero-copy mblk by copying data to a new mblk and freeing 18731 * the original desballoca'ed segmapped mblk. 18732 */ 18733 static mblk_t * 18734 tcp_zcopy_backoff(tcp_t *tcp, mblk_t *bp, int fix_xmitlist) 18735 { 18736 mblk_t *head, *tail, *nbp; 18737 tcp_stack_t *tcps = tcp->tcp_tcps; 18738 18739 if (IS_VMLOANED_MBLK(bp)) { 18740 TCP_STAT(tcps, tcp_zcopy_backoff); 18741 if ((head = copyb(bp)) == NULL) { 18742 /* fail to backoff; leave it for the next backoff */ 18743 tcp->tcp_xmit_zc_clean = B_FALSE; 18744 return (bp); 18745 } 18746 if (bp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) { 18747 if (fix_xmitlist) 18748 tcp_zcopy_notify(tcp); 18749 else 18750 head->b_datap->db_struioflag |= STRUIO_ZCNOTIFY; 18751 } 18752 nbp = bp->b_cont; 18753 if (fix_xmitlist) { 18754 head->b_prev = bp->b_prev; 18755 head->b_next = bp->b_next; 18756 if (tcp->tcp_xmit_tail == bp) 18757 tcp->tcp_xmit_tail = head; 18758 } 18759 bp->b_next = NULL; 18760 bp->b_prev = NULL; 18761 freeb(bp); 18762 } else { 18763 head = bp; 18764 nbp = bp->b_cont; 18765 } 18766 tail = head; 18767 while (nbp) { 18768 if (IS_VMLOANED_MBLK(nbp)) { 18769 TCP_STAT(tcps, tcp_zcopy_backoff); 18770 if ((tail->b_cont = copyb(nbp)) == NULL) { 18771 tcp->tcp_xmit_zc_clean = B_FALSE; 18772 tail->b_cont = nbp; 18773 return (head); 18774 } 18775 tail = tail->b_cont; 18776 if (nbp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) { 18777 if (fix_xmitlist) 18778 tcp_zcopy_notify(tcp); 18779 else 18780 tail->b_datap->db_struioflag |= 18781 STRUIO_ZCNOTIFY; 18782 } 18783 bp = nbp; 18784 nbp = nbp->b_cont; 18785 if (fix_xmitlist) { 18786 tail->b_prev = bp->b_prev; 18787 tail->b_next = bp->b_next; 18788 if (tcp->tcp_xmit_tail == bp) 18789 tcp->tcp_xmit_tail = tail; 18790 } 18791 bp->b_next = NULL; 18792 bp->b_prev = NULL; 18793 freeb(bp); 18794 } else { 18795 tail->b_cont = nbp; 18796 tail = nbp; 18797 nbp = nbp->b_cont; 18798 } 18799 } 18800 if (fix_xmitlist) { 18801 tcp->tcp_xmit_last = tail; 18802 tcp->tcp_xmit_zc_clean = B_TRUE; 18803 } 18804 return (head); 18805 } 18806 18807 static void 18808 tcp_zcopy_notify(tcp_t *tcp) 18809 { 18810 struct stdata *stp; 18811 18812 if (tcp->tcp_detached) 18813 return; 18814 stp = STREAM(tcp->tcp_rq); 18815 mutex_enter(&stp->sd_lock); 18816 stp->sd_flag |= STZCNOTIFY; 18817 cv_broadcast(&stp->sd_zcopy_wait); 18818 mutex_exit(&stp->sd_lock); 18819 } 18820 18821 static boolean_t 18822 tcp_send_find_ire(tcp_t *tcp, ipaddr_t *dst, ire_t **irep) 18823 { 18824 ire_t *ire; 18825 conn_t *connp = tcp->tcp_connp; 18826 tcp_stack_t *tcps = tcp->tcp_tcps; 18827 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 18828 18829 mutex_enter(&connp->conn_lock); 18830 ire = connp->conn_ire_cache; 18831 ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT)); 18832 18833 if ((ire != NULL) && 18834 (((dst != NULL) && (ire->ire_addr == *dst)) || ((dst == NULL) && 18835 IN6_ARE_ADDR_EQUAL(&ire->ire_addr_v6, &tcp->tcp_ip6h->ip6_dst))) && 18836 !(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18837 IRE_REFHOLD(ire); 18838 mutex_exit(&connp->conn_lock); 18839 } else { 18840 boolean_t cached = B_FALSE; 18841 ts_label_t *tsl; 18842 18843 /* force a recheck later on */ 18844 tcp->tcp_ire_ill_check_done = B_FALSE; 18845 18846 TCP_DBGSTAT(tcps, tcp_ire_null1); 18847 connp->conn_ire_cache = NULL; 18848 mutex_exit(&connp->conn_lock); 18849 18850 if (ire != NULL) 18851 IRE_REFRELE_NOTR(ire); 18852 18853 tsl = crgetlabel(CONN_CRED(connp)); 18854 ire = (dst ? 18855 ire_cache_lookup(*dst, connp->conn_zoneid, tsl, ipst) : 18856 ire_cache_lookup_v6(&tcp->tcp_ip6h->ip6_dst, 18857 connp->conn_zoneid, tsl, ipst)); 18858 18859 if (ire == NULL) { 18860 TCP_STAT(tcps, tcp_ire_null); 18861 return (B_FALSE); 18862 } 18863 18864 IRE_REFHOLD_NOTR(ire); 18865 /* 18866 * Since we are inside the squeue, there cannot be another 18867 * thread in TCP trying to set the conn_ire_cache now. The 18868 * check for IRE_MARK_CONDEMNED ensures that an interface 18869 * unplumb thread has not yet started cleaning up the conns. 18870 * Hence we don't need to grab the conn lock. 18871 */ 18872 if (CONN_CACHE_IRE(connp)) { 18873 rw_enter(&ire->ire_bucket->irb_lock, RW_READER); 18874 if (!(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18875 TCP_CHECK_IREINFO(tcp, ire); 18876 connp->conn_ire_cache = ire; 18877 cached = B_TRUE; 18878 } 18879 rw_exit(&ire->ire_bucket->irb_lock); 18880 } 18881 18882 /* 18883 * We can continue to use the ire but since it was 18884 * not cached, we should drop the extra reference. 18885 */ 18886 if (!cached) 18887 IRE_REFRELE_NOTR(ire); 18888 18889 /* 18890 * Rampart note: no need to select a new label here, since 18891 * labels are not allowed to change during the life of a TCP 18892 * connection. 18893 */ 18894 } 18895 18896 *irep = ire; 18897 18898 return (B_TRUE); 18899 } 18900 18901 /* 18902 * Called from tcp_send() or tcp_send_data() to find workable IRE. 18903 * 18904 * 0 = success; 18905 * 1 = failed to find ire and ill. 18906 */ 18907 static boolean_t 18908 tcp_send_find_ire_ill(tcp_t *tcp, mblk_t *mp, ire_t **irep, ill_t **illp) 18909 { 18910 ipha_t *ipha; 18911 ipaddr_t dst; 18912 ire_t *ire; 18913 ill_t *ill; 18914 conn_t *connp = tcp->tcp_connp; 18915 mblk_t *ire_fp_mp; 18916 tcp_stack_t *tcps = tcp->tcp_tcps; 18917 18918 if (mp != NULL) 18919 ipha = (ipha_t *)mp->b_rptr; 18920 else 18921 ipha = tcp->tcp_ipha; 18922 dst = ipha->ipha_dst; 18923 18924 if (!tcp_send_find_ire(tcp, &dst, &ire)) 18925 return (B_FALSE); 18926 18927 if ((ire->ire_flags & RTF_MULTIRT) || 18928 (ire->ire_stq == NULL) || 18929 (ire->ire_nce == NULL) || 18930 ((ire_fp_mp = ire->ire_nce->nce_fp_mp) == NULL) || 18931 ((mp != NULL) && (ire->ire_max_frag < ntohs(ipha->ipha_length) || 18932 MBLKL(ire_fp_mp) > MBLKHEAD(mp)))) { 18933 TCP_STAT(tcps, tcp_ip_ire_send); 18934 IRE_REFRELE(ire); 18935 return (B_FALSE); 18936 } 18937 18938 ill = ire_to_ill(ire); 18939 if (connp->conn_outgoing_ill != NULL) { 18940 ill_t *conn_outgoing_ill = NULL; 18941 /* 18942 * Choose a good ill in the group to send the packets on. 18943 */ 18944 ire = conn_set_outgoing_ill(connp, ire, &conn_outgoing_ill); 18945 ill = ire_to_ill(ire); 18946 } 18947 ASSERT(ill != NULL); 18948 18949 if (!tcp->tcp_ire_ill_check_done) { 18950 tcp_ire_ill_check(tcp, ire, ill, B_TRUE); 18951 tcp->tcp_ire_ill_check_done = B_TRUE; 18952 } 18953 18954 *irep = ire; 18955 *illp = ill; 18956 18957 return (B_TRUE); 18958 } 18959 18960 static void 18961 tcp_send_data(tcp_t *tcp, queue_t *q, mblk_t *mp) 18962 { 18963 ipha_t *ipha; 18964 ipaddr_t src; 18965 ipaddr_t dst; 18966 uint32_t cksum; 18967 ire_t *ire; 18968 uint16_t *up; 18969 ill_t *ill; 18970 conn_t *connp = tcp->tcp_connp; 18971 uint32_t hcksum_txflags = 0; 18972 mblk_t *ire_fp_mp; 18973 uint_t ire_fp_mp_len; 18974 tcp_stack_t *tcps = tcp->tcp_tcps; 18975 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 18976 18977 ASSERT(DB_TYPE(mp) == M_DATA); 18978 18979 if (DB_CRED(mp) == NULL) 18980 mblk_setcred(mp, CONN_CRED(connp)); 18981 18982 ipha = (ipha_t *)mp->b_rptr; 18983 src = ipha->ipha_src; 18984 dst = ipha->ipha_dst; 18985 18986 /* 18987 * Drop off fast path for IPv6 and also if options are present or 18988 * we need to resolve a TS label. 18989 */ 18990 if (tcp->tcp_ipversion != IPV4_VERSION || 18991 !IPCL_IS_CONNECTED(connp) || 18992 !CONN_IS_LSO_MD_FASTPATH(connp) || 18993 (connp->conn_flags & IPCL_CHECK_POLICY) != 0 || 18994 !connp->conn_ulp_labeled || 18995 ipha->ipha_ident == IP_HDR_INCLUDED || 18996 ipha->ipha_version_and_hdr_length != IP_SIMPLE_HDR_VERSION || 18997 IPP_ENABLED(IPP_LOCAL_OUT, ipst)) { 18998 if (tcp->tcp_snd_zcopy_aware) 18999 mp = tcp_zcopy_disable(tcp, mp); 19000 TCP_STAT(tcps, tcp_ip_send); 19001 CALL_IP_WPUT(connp, q, mp); 19002 return; 19003 } 19004 19005 if (!tcp_send_find_ire_ill(tcp, mp, &ire, &ill)) { 19006 if (tcp->tcp_snd_zcopy_aware) 19007 mp = tcp_zcopy_backoff(tcp, mp, 0); 19008 CALL_IP_WPUT(connp, q, mp); 19009 return; 19010 } 19011 ire_fp_mp = ire->ire_nce->nce_fp_mp; 19012 ire_fp_mp_len = MBLKL(ire_fp_mp); 19013 19014 ASSERT(ipha->ipha_ident == 0 || ipha->ipha_ident == IP_HDR_INCLUDED); 19015 ipha->ipha_ident = (uint16_t)atomic_add_32_nv(&ire->ire_ident, 1); 19016 #ifndef _BIG_ENDIAN 19017 ipha->ipha_ident = (ipha->ipha_ident << 8) | (ipha->ipha_ident >> 8); 19018 #endif 19019 19020 /* 19021 * Check to see if we need to re-enable LSO/MDT for this connection 19022 * because it was previously disabled due to changes in the ill; 19023 * note that by doing it here, this re-enabling only applies when 19024 * the packet is not dispatched through CALL_IP_WPUT(). 19025 * 19026 * That means for IPv4, it is worth re-enabling LSO/MDT for the fastpath 19027 * case, since that's how we ended up here. For IPv6, we do the 19028 * re-enabling work in ip_xmit_v6(), albeit indirectly via squeue. 19029 */ 19030 if (connp->conn_lso_ok && !tcp->tcp_lso && ILL_LSO_TCP_USABLE(ill)) { 19031 /* 19032 * Restore LSO for this connection, so that next time around 19033 * it is eligible to go through tcp_lsosend() path again. 19034 */ 19035 TCP_STAT(tcps, tcp_lso_enabled); 19036 tcp->tcp_lso = B_TRUE; 19037 ip1dbg(("tcp_send_data: reenabling LSO for connp %p on " 19038 "interface %s\n", (void *)connp, ill->ill_name)); 19039 } else if (connp->conn_mdt_ok && !tcp->tcp_mdt && ILL_MDT_USABLE(ill)) { 19040 /* 19041 * Restore MDT for this connection, so that next time around 19042 * it is eligible to go through tcp_multisend() path again. 19043 */ 19044 TCP_STAT(tcps, tcp_mdt_conn_resumed1); 19045 tcp->tcp_mdt = B_TRUE; 19046 ip1dbg(("tcp_send_data: reenabling MDT for connp %p on " 19047 "interface %s\n", (void *)connp, ill->ill_name)); 19048 } 19049 19050 if (tcp->tcp_snd_zcopy_aware) { 19051 if ((ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) == 0 || 19052 (ill->ill_zerocopy_capab->ill_zerocopy_flags == 0)) 19053 mp = tcp_zcopy_disable(tcp, mp); 19054 /* 19055 * we shouldn't need to reset ipha as the mp containing 19056 * ipha should never be a zero-copy mp. 19057 */ 19058 } 19059 19060 if (ILL_HCKSUM_CAPABLE(ill) && dohwcksum) { 19061 ASSERT(ill->ill_hcksum_capab != NULL); 19062 hcksum_txflags = ill->ill_hcksum_capab->ill_hcksum_txflags; 19063 } 19064 19065 /* pseudo-header checksum (do it in parts for IP header checksum) */ 19066 cksum = (dst >> 16) + (dst & 0xFFFF) + (src >> 16) + (src & 0xFFFF); 19067 19068 ASSERT(ipha->ipha_version_and_hdr_length == IP_SIMPLE_HDR_VERSION); 19069 up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 19070 19071 IP_CKSUM_XMIT_FAST(ire->ire_ipversion, hcksum_txflags, mp, ipha, up, 19072 IPPROTO_TCP, IP_SIMPLE_HDR_LENGTH, ntohs(ipha->ipha_length), cksum); 19073 19074 /* Software checksum? */ 19075 if (DB_CKSUMFLAGS(mp) == 0) { 19076 TCP_STAT(tcps, tcp_out_sw_cksum); 19077 TCP_STAT_UPDATE(tcps, tcp_out_sw_cksum_bytes, 19078 ntohs(ipha->ipha_length) - IP_SIMPLE_HDR_LENGTH); 19079 } 19080 19081 ipha->ipha_fragment_offset_and_flags |= 19082 (uint32_t)htons(ire->ire_frag_flag); 19083 19084 /* Calculate IP header checksum if hardware isn't capable */ 19085 if (!(DB_CKSUMFLAGS(mp) & HCK_IPV4_HDRCKSUM)) { 19086 IP_HDR_CKSUM(ipha, cksum, ((uint32_t *)ipha)[0], 19087 ((uint16_t *)ipha)[4]); 19088 } 19089 19090 ASSERT(DB_TYPE(ire_fp_mp) == M_DATA); 19091 mp->b_rptr = (uchar_t *)ipha - ire_fp_mp_len; 19092 bcopy(ire_fp_mp->b_rptr, mp->b_rptr, ire_fp_mp_len); 19093 19094 UPDATE_OB_PKT_COUNT(ire); 19095 ire->ire_last_used_time = lbolt; 19096 19097 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutRequests); 19098 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits); 19099 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets, 19100 ntohs(ipha->ipha_length)); 19101 19102 if (ILL_DLS_CAPABLE(ill)) { 19103 /* 19104 * Send the packet directly to DLD, where it may be queued 19105 * depending on the availability of transmit resources at 19106 * the media layer. 19107 */ 19108 IP_DLS_ILL_TX(ill, ipha, mp, ipst); 19109 } else { 19110 ill_t *out_ill = (ill_t *)ire->ire_stq->q_ptr; 19111 DTRACE_PROBE4(ip4__physical__out__start, 19112 ill_t *, NULL, ill_t *, out_ill, 19113 ipha_t *, ipha, mblk_t *, mp); 19114 FW_HOOKS(ipst->ips_ip4_physical_out_event, 19115 ipst->ips_ipv4firewall_physical_out, 19116 NULL, out_ill, ipha, mp, mp, 0, ipst); 19117 DTRACE_PROBE1(ip4__physical__out__end, mblk_t *, mp); 19118 if (mp != NULL) 19119 putnext(ire->ire_stq, mp); 19120 } 19121 IRE_REFRELE(ire); 19122 } 19123 19124 /* 19125 * This handles the case when the receiver has shrunk its win. Per RFC 1122 19126 * if the receiver shrinks the window, i.e. moves the right window to the 19127 * left, the we should not send new data, but should retransmit normally the 19128 * old unacked data between suna and suna + swnd. We might has sent data 19129 * that is now outside the new window, pretend that we didn't send it. 19130 */ 19131 static void 19132 tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_count) 19133 { 19134 uint32_t snxt = tcp->tcp_snxt; 19135 mblk_t *xmit_tail; 19136 int32_t offset; 19137 19138 ASSERT(shrunk_count > 0); 19139 19140 /* Pretend we didn't send the data outside the window */ 19141 snxt -= shrunk_count; 19142 19143 /* Get the mblk and the offset in it per the shrunk window */ 19144 xmit_tail = tcp_get_seg_mp(tcp, snxt, &offset); 19145 19146 ASSERT(xmit_tail != NULL); 19147 19148 /* Reset all the values per the now shrunk window */ 19149 tcp->tcp_snxt = snxt; 19150 tcp->tcp_xmit_tail = xmit_tail; 19151 tcp->tcp_xmit_tail_unsent = xmit_tail->b_wptr - xmit_tail->b_rptr - 19152 offset; 19153 tcp->tcp_unsent += shrunk_count; 19154 19155 if (tcp->tcp_suna == tcp->tcp_snxt && tcp->tcp_swnd == 0) 19156 /* 19157 * Make sure the timer is running so that we will probe a zero 19158 * window. 19159 */ 19160 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 19161 } 19162 19163 19164 /* 19165 * The TCP normal data output path. 19166 * NOTE: the logic of the fast path is duplicated from this function. 19167 */ 19168 static void 19169 tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent) 19170 { 19171 int len; 19172 mblk_t *local_time; 19173 mblk_t *mp1; 19174 uint32_t snxt; 19175 int tail_unsent; 19176 int tcpstate; 19177 int usable = 0; 19178 mblk_t *xmit_tail; 19179 queue_t *q = tcp->tcp_wq; 19180 int32_t mss; 19181 int32_t num_sack_blk = 0; 19182 int32_t tcp_hdr_len; 19183 int32_t tcp_tcp_hdr_len; 19184 int mdt_thres; 19185 int rc; 19186 tcp_stack_t *tcps = tcp->tcp_tcps; 19187 ip_stack_t *ipst; 19188 19189 tcpstate = tcp->tcp_state; 19190 if (mp == NULL) { 19191 /* 19192 * tcp_wput_data() with NULL mp should only be called when 19193 * there is unsent data. 19194 */ 19195 ASSERT(tcp->tcp_unsent > 0); 19196 /* Really tacky... but we need this for detached closes. */ 19197 len = tcp->tcp_unsent; 19198 goto data_null; 19199 } 19200 19201 #if CCS_STATS 19202 wrw_stats.tot.count++; 19203 wrw_stats.tot.bytes += msgdsize(mp); 19204 #endif 19205 ASSERT(mp->b_datap->db_type == M_DATA); 19206 /* 19207 * Don't allow data after T_ORDREL_REQ or T_DISCON_REQ, 19208 * or before a connection attempt has begun. 19209 */ 19210 if (tcpstate < TCPS_SYN_SENT || tcpstate > TCPS_CLOSE_WAIT || 19211 (tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) { 19212 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) { 19213 #ifdef DEBUG 19214 cmn_err(CE_WARN, 19215 "tcp_wput_data: data after ordrel, %s", 19216 tcp_display(tcp, NULL, 19217 DISP_ADDR_AND_PORT)); 19218 #else 19219 if (tcp->tcp_debug) { 19220 (void) strlog(TCP_MOD_ID, 0, 1, 19221 SL_TRACE|SL_ERROR, 19222 "tcp_wput_data: data after ordrel, %s\n", 19223 tcp_display(tcp, NULL, 19224 DISP_ADDR_AND_PORT)); 19225 } 19226 #endif /* DEBUG */ 19227 } 19228 if (tcp->tcp_snd_zcopy_aware && 19229 (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) != 0) 19230 tcp_zcopy_notify(tcp); 19231 freemsg(mp); 19232 mutex_enter(&tcp->tcp_non_sq_lock); 19233 if (tcp->tcp_flow_stopped && 19234 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 19235 tcp_clrqfull(tcp); 19236 } 19237 mutex_exit(&tcp->tcp_non_sq_lock); 19238 return; 19239 } 19240 19241 /* Strip empties */ 19242 for (;;) { 19243 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 19244 (uintptr_t)INT_MAX); 19245 len = (int)(mp->b_wptr - mp->b_rptr); 19246 if (len > 0) 19247 break; 19248 mp1 = mp; 19249 mp = mp->b_cont; 19250 freeb(mp1); 19251 if (!mp) { 19252 return; 19253 } 19254 } 19255 19256 /* If we are the first on the list ... */ 19257 if (tcp->tcp_xmit_head == NULL) { 19258 tcp->tcp_xmit_head = mp; 19259 tcp->tcp_xmit_tail = mp; 19260 tcp->tcp_xmit_tail_unsent = len; 19261 } else { 19262 /* If tiny tx and room in txq tail, pullup to save mblks. */ 19263 struct datab *dp; 19264 19265 mp1 = tcp->tcp_xmit_last; 19266 if (len < tcp_tx_pull_len && 19267 (dp = mp1->b_datap)->db_ref == 1 && 19268 dp->db_lim - mp1->b_wptr >= len) { 19269 ASSERT(len > 0); 19270 ASSERT(!mp1->b_cont); 19271 if (len == 1) { 19272 *mp1->b_wptr++ = *mp->b_rptr; 19273 } else { 19274 bcopy(mp->b_rptr, mp1->b_wptr, len); 19275 mp1->b_wptr += len; 19276 } 19277 if (mp1 == tcp->tcp_xmit_tail) 19278 tcp->tcp_xmit_tail_unsent += len; 19279 mp1->b_cont = mp->b_cont; 19280 if (tcp->tcp_snd_zcopy_aware && 19281 (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 19282 mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY; 19283 freeb(mp); 19284 mp = mp1; 19285 } else { 19286 tcp->tcp_xmit_last->b_cont = mp; 19287 } 19288 len += tcp->tcp_unsent; 19289 } 19290 19291 /* Tack on however many more positive length mblks we have */ 19292 if ((mp1 = mp->b_cont) != NULL) { 19293 do { 19294 int tlen; 19295 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 19296 (uintptr_t)INT_MAX); 19297 tlen = (int)(mp1->b_wptr - mp1->b_rptr); 19298 if (tlen <= 0) { 19299 mp->b_cont = mp1->b_cont; 19300 freeb(mp1); 19301 } else { 19302 len += tlen; 19303 mp = mp1; 19304 } 19305 } while ((mp1 = mp->b_cont) != NULL); 19306 } 19307 tcp->tcp_xmit_last = mp; 19308 tcp->tcp_unsent = len; 19309 19310 if (urgent) 19311 usable = 1; 19312 19313 data_null: 19314 snxt = tcp->tcp_snxt; 19315 xmit_tail = tcp->tcp_xmit_tail; 19316 tail_unsent = tcp->tcp_xmit_tail_unsent; 19317 19318 /* 19319 * Note that tcp_mss has been adjusted to take into account the 19320 * timestamp option if applicable. Because SACK options do not 19321 * appear in every TCP segments and they are of variable lengths, 19322 * they cannot be included in tcp_mss. Thus we need to calculate 19323 * the actual segment length when we need to send a segment which 19324 * includes SACK options. 19325 */ 19326 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 19327 int32_t opt_len; 19328 19329 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 19330 tcp->tcp_num_sack_blk); 19331 opt_len = num_sack_blk * sizeof (sack_blk_t) + TCPOPT_NOP_LEN * 19332 2 + TCPOPT_HEADER_LEN; 19333 mss = tcp->tcp_mss - opt_len; 19334 tcp_hdr_len = tcp->tcp_hdr_len + opt_len; 19335 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len + opt_len; 19336 } else { 19337 mss = tcp->tcp_mss; 19338 tcp_hdr_len = tcp->tcp_hdr_len; 19339 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len; 19340 } 19341 19342 if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet && 19343 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) { 19344 SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle); 19345 } 19346 if (tcpstate == TCPS_SYN_RCVD) { 19347 /* 19348 * The three-way connection establishment handshake is not 19349 * complete yet. We want to queue the data for transmission 19350 * after entering ESTABLISHED state (RFC793). A jump to 19351 * "done" label effectively leaves data on the queue. 19352 */ 19353 goto done; 19354 } else { 19355 int usable_r; 19356 19357 /* 19358 * In the special case when cwnd is zero, which can only 19359 * happen if the connection is ECN capable, return now. 19360 * New segments is sent using tcp_timer(). The timer 19361 * is set in tcp_rput_data(). 19362 */ 19363 if (tcp->tcp_cwnd == 0) { 19364 /* 19365 * Note that tcp_cwnd is 0 before 3-way handshake is 19366 * finished. 19367 */ 19368 ASSERT(tcp->tcp_ecn_ok || 19369 tcp->tcp_state < TCPS_ESTABLISHED); 19370 return; 19371 } 19372 19373 /* NOTE: trouble if xmitting while SYN not acked? */ 19374 usable_r = snxt - tcp->tcp_suna; 19375 usable_r = tcp->tcp_swnd - usable_r; 19376 19377 /* 19378 * Check if the receiver has shrunk the window. If 19379 * tcp_wput_data() with NULL mp is called, tcp_fin_sent 19380 * cannot be set as there is unsent data, so FIN cannot 19381 * be sent out. Otherwise, we need to take into account 19382 * of FIN as it consumes an "invisible" sequence number. 19383 */ 19384 ASSERT(tcp->tcp_fin_sent == 0); 19385 if (usable_r < 0) { 19386 /* 19387 * The receiver has shrunk the window and we have sent 19388 * -usable_r date beyond the window, re-adjust. 19389 * 19390 * If TCP window scaling is enabled, there can be 19391 * round down error as the advertised receive window 19392 * is actually right shifted n bits. This means that 19393 * the lower n bits info is wiped out. It will look 19394 * like the window is shrunk. Do a check here to 19395 * see if the shrunk amount is actually within the 19396 * error in window calculation. If it is, just 19397 * return. Note that this check is inside the 19398 * shrunk window check. This makes sure that even 19399 * though tcp_process_shrunk_swnd() is not called, 19400 * we will stop further processing. 19401 */ 19402 if ((-usable_r >> tcp->tcp_snd_ws) > 0) { 19403 tcp_process_shrunk_swnd(tcp, -usable_r); 19404 } 19405 return; 19406 } 19407 19408 /* usable = MIN(swnd, cwnd) - unacked_bytes */ 19409 if (tcp->tcp_swnd > tcp->tcp_cwnd) 19410 usable_r -= tcp->tcp_swnd - tcp->tcp_cwnd; 19411 19412 /* usable = MIN(usable, unsent) */ 19413 if (usable_r > len) 19414 usable_r = len; 19415 19416 /* usable = MAX(usable, {1 for urgent, 0 for data}) */ 19417 if (usable_r > 0) { 19418 usable = usable_r; 19419 } else { 19420 /* Bypass all other unnecessary processing. */ 19421 goto done; 19422 } 19423 } 19424 19425 local_time = (mblk_t *)lbolt; 19426 19427 /* 19428 * "Our" Nagle Algorithm. This is not the same as in the old 19429 * BSD. This is more in line with the true intent of Nagle. 19430 * 19431 * The conditions are: 19432 * 1. The amount of unsent data (or amount of data which can be 19433 * sent, whichever is smaller) is less than Nagle limit. 19434 * 2. The last sent size is also less than Nagle limit. 19435 * 3. There is unack'ed data. 19436 * 4. Urgent pointer is not set. Send urgent data ignoring the 19437 * Nagle algorithm. This reduces the probability that urgent 19438 * bytes get "merged" together. 19439 * 5. The app has not closed the connection. This eliminates the 19440 * wait time of the receiving side waiting for the last piece of 19441 * (small) data. 19442 * 19443 * If all are satisified, exit without sending anything. Note 19444 * that Nagle limit can be smaller than 1 MSS. Nagle limit is 19445 * the smaller of 1 MSS and global tcp_naglim_def (default to be 19446 * 4095). 19447 */ 19448 if (usable < (int)tcp->tcp_naglim && 19449 tcp->tcp_naglim > tcp->tcp_last_sent_len && 19450 snxt != tcp->tcp_suna && 19451 !(tcp->tcp_valid_bits & TCP_URG_VALID) && 19452 !(tcp->tcp_valid_bits & TCP_FSS_VALID)) { 19453 goto done; 19454 } 19455 19456 if (tcp->tcp_cork) { 19457 /* 19458 * if the tcp->tcp_cork option is set, then we have to force 19459 * TCP not to send partial segment (smaller than MSS bytes). 19460 * We are calculating the usable now based on full mss and 19461 * will save the rest of remaining data for later. 19462 */ 19463 if (usable < mss) 19464 goto done; 19465 usable = (usable / mss) * mss; 19466 } 19467 19468 /* Update the latest receive window size in TCP header. */ 19469 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 19470 tcp->tcp_tcph->th_win); 19471 19472 /* 19473 * Determine if it's worthwhile to attempt LSO or MDT, based on: 19474 * 19475 * 1. Simple TCP/IP{v4,v6} (no options). 19476 * 2. IPSEC/IPQoS processing is not needed for the TCP connection. 19477 * 3. If the TCP connection is in ESTABLISHED state. 19478 * 4. The TCP is not detached. 19479 * 19480 * If any of the above conditions have changed during the 19481 * connection, stop using LSO/MDT and restore the stream head 19482 * parameters accordingly. 19483 */ 19484 ipst = tcps->tcps_netstack->netstack_ip; 19485 19486 if ((tcp->tcp_lso || tcp->tcp_mdt) && 19487 ((tcp->tcp_ipversion == IPV4_VERSION && 19488 tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) || 19489 (tcp->tcp_ipversion == IPV6_VERSION && 19490 tcp->tcp_ip_hdr_len != IPV6_HDR_LEN) || 19491 tcp->tcp_state != TCPS_ESTABLISHED || 19492 TCP_IS_DETACHED(tcp) || !CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp) || 19493 CONN_IPSEC_OUT_ENCAPSULATED(tcp->tcp_connp) || 19494 IPP_ENABLED(IPP_LOCAL_OUT, ipst))) { 19495 if (tcp->tcp_lso) { 19496 tcp->tcp_connp->conn_lso_ok = B_FALSE; 19497 tcp->tcp_lso = B_FALSE; 19498 } else { 19499 tcp->tcp_connp->conn_mdt_ok = B_FALSE; 19500 tcp->tcp_mdt = B_FALSE; 19501 } 19502 19503 /* Anything other than detached is considered pathological */ 19504 if (!TCP_IS_DETACHED(tcp)) { 19505 if (tcp->tcp_lso) 19506 TCP_STAT(tcps, tcp_lso_disabled); 19507 else 19508 TCP_STAT(tcps, tcp_mdt_conn_halted1); 19509 (void) tcp_maxpsz_set(tcp, B_TRUE); 19510 } 19511 } 19512 19513 /* Use MDT if sendable amount is greater than the threshold */ 19514 if (tcp->tcp_mdt && 19515 (mdt_thres = mss << tcp_mdt_smss_threshold, usable > mdt_thres) && 19516 (tail_unsent > mdt_thres || (xmit_tail->b_cont != NULL && 19517 MBLKL(xmit_tail->b_cont) > mdt_thres)) && 19518 (tcp->tcp_valid_bits == 0 || 19519 tcp->tcp_valid_bits == TCP_FSS_VALID)) { 19520 ASSERT(tcp->tcp_connp->conn_mdt_ok); 19521 rc = tcp_multisend(q, tcp, mss, tcp_hdr_len, tcp_tcp_hdr_len, 19522 num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail, 19523 local_time, mdt_thres); 19524 } else { 19525 rc = tcp_send(q, tcp, mss, tcp_hdr_len, tcp_tcp_hdr_len, 19526 num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail, 19527 local_time, INT_MAX); 19528 } 19529 19530 /* Pretend that all we were trying to send really got sent */ 19531 if (rc < 0 && tail_unsent < 0) { 19532 do { 19533 xmit_tail = xmit_tail->b_cont; 19534 xmit_tail->b_prev = local_time; 19535 ASSERT((uintptr_t)(xmit_tail->b_wptr - 19536 xmit_tail->b_rptr) <= (uintptr_t)INT_MAX); 19537 tail_unsent += (int)(xmit_tail->b_wptr - 19538 xmit_tail->b_rptr); 19539 } while (tail_unsent < 0); 19540 } 19541 done:; 19542 tcp->tcp_xmit_tail = xmit_tail; 19543 tcp->tcp_xmit_tail_unsent = tail_unsent; 19544 len = tcp->tcp_snxt - snxt; 19545 if (len) { 19546 /* 19547 * If new data was sent, need to update the notsack 19548 * list, which is, afterall, data blocks that have 19549 * not been sack'ed by the receiver. New data is 19550 * not sack'ed. 19551 */ 19552 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 19553 /* len is a negative value. */ 19554 tcp->tcp_pipe -= len; 19555 tcp_notsack_update(&(tcp->tcp_notsack_list), 19556 tcp->tcp_snxt, snxt, 19557 &(tcp->tcp_num_notsack_blk), 19558 &(tcp->tcp_cnt_notsack_list)); 19559 } 19560 tcp->tcp_snxt = snxt + tcp->tcp_fin_sent; 19561 tcp->tcp_rack = tcp->tcp_rnxt; 19562 tcp->tcp_rack_cnt = 0; 19563 if ((snxt + len) == tcp->tcp_suna) { 19564 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 19565 } 19566 } else if (snxt == tcp->tcp_suna && tcp->tcp_swnd == 0) { 19567 /* 19568 * Didn't send anything. Make sure the timer is running 19569 * so that we will probe a zero window. 19570 */ 19571 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 19572 } 19573 /* Note that len is the amount we just sent but with a negative sign */ 19574 tcp->tcp_unsent += len; 19575 mutex_enter(&tcp->tcp_non_sq_lock); 19576 if (tcp->tcp_flow_stopped) { 19577 if (TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 19578 tcp_clrqfull(tcp); 19579 } 19580 } else if (TCP_UNSENT_BYTES(tcp) >= tcp->tcp_xmit_hiwater) { 19581 tcp_setqfull(tcp); 19582 } 19583 mutex_exit(&tcp->tcp_non_sq_lock); 19584 } 19585 19586 /* 19587 * tcp_fill_header is called by tcp_send() and tcp_multisend() to fill the 19588 * outgoing TCP header with the template header, as well as other 19589 * options such as time-stamp, ECN and/or SACK. 19590 */ 19591 static void 19592 tcp_fill_header(tcp_t *tcp, uchar_t *rptr, clock_t now, int num_sack_blk) 19593 { 19594 tcph_t *tcp_tmpl, *tcp_h; 19595 uint32_t *dst, *src; 19596 int hdrlen; 19597 19598 ASSERT(OK_32PTR(rptr)); 19599 19600 /* Template header */ 19601 tcp_tmpl = tcp->tcp_tcph; 19602 19603 /* Header of outgoing packet */ 19604 tcp_h = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 19605 19606 /* dst and src are opaque 32-bit fields, used for copying */ 19607 dst = (uint32_t *)rptr; 19608 src = (uint32_t *)tcp->tcp_iphc; 19609 hdrlen = tcp->tcp_hdr_len; 19610 19611 /* Fill time-stamp option if needed */ 19612 if (tcp->tcp_snd_ts_ok) { 19613 U32_TO_BE32((uint32_t)now, 19614 (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 4); 19615 U32_TO_BE32(tcp->tcp_ts_recent, 19616 (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 8); 19617 } else { 19618 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 19619 } 19620 19621 /* 19622 * Copy the template header; is this really more efficient than 19623 * calling bcopy()? For simple IPv4/TCP, it may be the case, 19624 * but perhaps not for other scenarios. 19625 */ 19626 dst[0] = src[0]; 19627 dst[1] = src[1]; 19628 dst[2] = src[2]; 19629 dst[3] = src[3]; 19630 dst[4] = src[4]; 19631 dst[5] = src[5]; 19632 dst[6] = src[6]; 19633 dst[7] = src[7]; 19634 dst[8] = src[8]; 19635 dst[9] = src[9]; 19636 if (hdrlen -= 40) { 19637 hdrlen >>= 2; 19638 dst += 10; 19639 src += 10; 19640 do { 19641 *dst++ = *src++; 19642 } while (--hdrlen); 19643 } 19644 19645 /* 19646 * Set the ECN info in the TCP header if it is not a zero 19647 * window probe. Zero window probe is only sent in 19648 * tcp_wput_data() and tcp_timer(). 19649 */ 19650 if (tcp->tcp_ecn_ok && !tcp->tcp_zero_win_probe) { 19651 SET_ECT(tcp, rptr); 19652 19653 if (tcp->tcp_ecn_echo_on) 19654 tcp_h->th_flags[0] |= TH_ECE; 19655 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 19656 tcp_h->th_flags[0] |= TH_CWR; 19657 tcp->tcp_ecn_cwr_sent = B_TRUE; 19658 } 19659 } 19660 19661 /* Fill in SACK options */ 19662 if (num_sack_blk > 0) { 19663 uchar_t *wptr = rptr + tcp->tcp_hdr_len; 19664 sack_blk_t *tmp; 19665 int32_t i; 19666 19667 wptr[0] = TCPOPT_NOP; 19668 wptr[1] = TCPOPT_NOP; 19669 wptr[2] = TCPOPT_SACK; 19670 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 19671 sizeof (sack_blk_t); 19672 wptr += TCPOPT_REAL_SACK_LEN; 19673 19674 tmp = tcp->tcp_sack_list; 19675 for (i = 0; i < num_sack_blk; i++) { 19676 U32_TO_BE32(tmp[i].begin, wptr); 19677 wptr += sizeof (tcp_seq); 19678 U32_TO_BE32(tmp[i].end, wptr); 19679 wptr += sizeof (tcp_seq); 19680 } 19681 tcp_h->th_offset_and_rsrvd[0] += 19682 ((num_sack_blk * 2 + 1) << 4); 19683 } 19684 } 19685 19686 /* 19687 * tcp_mdt_add_attrs() is called by tcp_multisend() in order to attach 19688 * the destination address and SAP attribute, and if necessary, the 19689 * hardware checksum offload attribute to a Multidata message. 19690 */ 19691 static int 19692 tcp_mdt_add_attrs(multidata_t *mmd, const mblk_t *dlmp, const boolean_t hwcksum, 19693 const uint32_t start, const uint32_t stuff, const uint32_t end, 19694 const uint32_t flags, tcp_stack_t *tcps) 19695 { 19696 /* Add global destination address & SAP attribute */ 19697 if (dlmp == NULL || !ip_md_addr_attr(mmd, NULL, dlmp)) { 19698 ip1dbg(("tcp_mdt_add_attrs: can't add global physical " 19699 "destination address+SAP\n")); 19700 19701 if (dlmp != NULL) 19702 TCP_STAT(tcps, tcp_mdt_allocfail); 19703 return (-1); 19704 } 19705 19706 /* Add global hwcksum attribute */ 19707 if (hwcksum && 19708 !ip_md_hcksum_attr(mmd, NULL, start, stuff, end, flags)) { 19709 ip1dbg(("tcp_mdt_add_attrs: can't add global hardware " 19710 "checksum attribute\n")); 19711 19712 TCP_STAT(tcps, tcp_mdt_allocfail); 19713 return (-1); 19714 } 19715 19716 return (0); 19717 } 19718 19719 /* 19720 * Smaller and private version of pdescinfo_t used specifically for TCP, 19721 * which allows for only two payload spans per packet. 19722 */ 19723 typedef struct tcp_pdescinfo_s PDESCINFO_STRUCT(2) tcp_pdescinfo_t; 19724 19725 /* 19726 * tcp_multisend() is called by tcp_wput_data() for Multidata Transmit 19727 * scheme, and returns one the following: 19728 * 19729 * -1 = failed allocation. 19730 * 0 = success; burst count reached, or usable send window is too small, 19731 * and that we'd rather wait until later before sending again. 19732 */ 19733 static int 19734 tcp_multisend(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len, 19735 const int tcp_tcp_hdr_len, const int num_sack_blk, int *usable, 19736 uint_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 19737 const int mdt_thres) 19738 { 19739 mblk_t *md_mp_head, *md_mp, *md_pbuf, *md_pbuf_nxt, *md_hbuf; 19740 multidata_t *mmd; 19741 uint_t obsegs, obbytes, hdr_frag_sz; 19742 uint_t cur_hdr_off, cur_pld_off, base_pld_off, first_snxt; 19743 int num_burst_seg, max_pld; 19744 pdesc_t *pkt; 19745 tcp_pdescinfo_t tcp_pkt_info; 19746 pdescinfo_t *pkt_info; 19747 int pbuf_idx, pbuf_idx_nxt; 19748 int seg_len, len, spill, af; 19749 boolean_t add_buffer, zcopy, clusterwide; 19750 boolean_t buf_trunked = B_FALSE; 19751 boolean_t rconfirm = B_FALSE; 19752 boolean_t done = B_FALSE; 19753 uint32_t cksum; 19754 uint32_t hwcksum_flags; 19755 ire_t *ire = NULL; 19756 ill_t *ill; 19757 ipha_t *ipha; 19758 ip6_t *ip6h; 19759 ipaddr_t src, dst; 19760 ill_zerocopy_capab_t *zc_cap = NULL; 19761 uint16_t *up; 19762 int err; 19763 conn_t *connp; 19764 mblk_t *mp, *mp1, *fw_mp_head = NULL; 19765 uchar_t *pld_start; 19766 tcp_stack_t *tcps = tcp->tcp_tcps; 19767 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 19768 19769 #ifdef _BIG_ENDIAN 19770 #define IPVER(ip6h) ((((uint32_t *)ip6h)[0] >> 28) & 0x7) 19771 #else 19772 #define IPVER(ip6h) ((((uint32_t *)ip6h)[0] >> 4) & 0x7) 19773 #endif 19774 19775 #define PREP_NEW_MULTIDATA() { \ 19776 mmd = NULL; \ 19777 md_mp = md_hbuf = NULL; \ 19778 cur_hdr_off = 0; \ 19779 max_pld = tcp->tcp_mdt_max_pld; \ 19780 pbuf_idx = pbuf_idx_nxt = -1; \ 19781 add_buffer = B_TRUE; \ 19782 zcopy = B_FALSE; \ 19783 } 19784 19785 #define PREP_NEW_PBUF() { \ 19786 md_pbuf = md_pbuf_nxt = NULL; \ 19787 pbuf_idx = pbuf_idx_nxt = -1; \ 19788 cur_pld_off = 0; \ 19789 first_snxt = *snxt; \ 19790 ASSERT(*tail_unsent > 0); \ 19791 base_pld_off = MBLKL(*xmit_tail) - *tail_unsent; \ 19792 } 19793 19794 ASSERT(mdt_thres >= mss); 19795 ASSERT(*usable > 0 && *usable > mdt_thres); 19796 ASSERT(tcp->tcp_state == TCPS_ESTABLISHED); 19797 ASSERT(!TCP_IS_DETACHED(tcp)); 19798 ASSERT(tcp->tcp_valid_bits == 0 || 19799 tcp->tcp_valid_bits == TCP_FSS_VALID); 19800 ASSERT((tcp->tcp_ipversion == IPV4_VERSION && 19801 tcp->tcp_ip_hdr_len == IP_SIMPLE_HDR_LENGTH) || 19802 (tcp->tcp_ipversion == IPV6_VERSION && 19803 tcp->tcp_ip_hdr_len == IPV6_HDR_LEN)); 19804 19805 connp = tcp->tcp_connp; 19806 ASSERT(connp != NULL); 19807 ASSERT(CONN_IS_LSO_MD_FASTPATH(connp)); 19808 ASSERT(!CONN_IPSEC_OUT_ENCAPSULATED(connp)); 19809 19810 /* 19811 * Note that tcp will only declare at most 2 payload spans per 19812 * packet, which is much lower than the maximum allowable number 19813 * of packet spans per Multidata. For this reason, we use the 19814 * privately declared and smaller descriptor info structure, in 19815 * order to save some stack space. 19816 */ 19817 pkt_info = (pdescinfo_t *)&tcp_pkt_info; 19818 19819 af = (tcp->tcp_ipversion == IPV4_VERSION) ? AF_INET : AF_INET6; 19820 if (af == AF_INET) { 19821 dst = tcp->tcp_ipha->ipha_dst; 19822 src = tcp->tcp_ipha->ipha_src; 19823 ASSERT(!CLASSD(dst)); 19824 } 19825 ASSERT(af == AF_INET || 19826 !IN6_IS_ADDR_MULTICAST(&tcp->tcp_ip6h->ip6_dst)); 19827 19828 obsegs = obbytes = 0; 19829 num_burst_seg = tcp->tcp_snd_burst; 19830 md_mp_head = NULL; 19831 PREP_NEW_MULTIDATA(); 19832 19833 /* 19834 * Before we go on further, make sure there is an IRE that we can 19835 * use, and that the ILL supports MDT. Otherwise, there's no point 19836 * in proceeding any further, and we should just hand everything 19837 * off to the legacy path. 19838 */ 19839 if (!tcp_send_find_ire(tcp, (af == AF_INET) ? &dst : NULL, &ire)) 19840 goto legacy_send_no_md; 19841 19842 ASSERT(ire != NULL); 19843 ASSERT(af != AF_INET || ire->ire_ipversion == IPV4_VERSION); 19844 ASSERT(af == AF_INET || !IN6_IS_ADDR_V4MAPPED(&(ire->ire_addr_v6))); 19845 ASSERT(af == AF_INET || ire->ire_nce != NULL); 19846 ASSERT(!(ire->ire_type & IRE_BROADCAST)); 19847 /* 19848 * If we do support loopback for MDT (which requires modifications 19849 * to the receiving paths), the following assertions should go away, 19850 * and we would be sending the Multidata to loopback conn later on. 19851 */ 19852 ASSERT(!IRE_IS_LOCAL(ire)); 19853 ASSERT(ire->ire_stq != NULL); 19854 19855 ill = ire_to_ill(ire); 19856 ASSERT(ill != NULL); 19857 ASSERT(!ILL_MDT_CAPABLE(ill) || ill->ill_mdt_capab != NULL); 19858 19859 if (!tcp->tcp_ire_ill_check_done) { 19860 tcp_ire_ill_check(tcp, ire, ill, B_TRUE); 19861 tcp->tcp_ire_ill_check_done = B_TRUE; 19862 } 19863 19864 /* 19865 * If the underlying interface conditions have changed, or if the 19866 * new interface does not support MDT, go back to legacy path. 19867 */ 19868 if (!ILL_MDT_USABLE(ill) || (ire->ire_flags & RTF_MULTIRT) != 0) { 19869 /* don't go through this path anymore for this connection */ 19870 TCP_STAT(tcps, tcp_mdt_conn_halted2); 19871 tcp->tcp_mdt = B_FALSE; 19872 ip1dbg(("tcp_multisend: disabling MDT for connp %p on " 19873 "interface %s\n", (void *)connp, ill->ill_name)); 19874 /* IRE will be released prior to returning */ 19875 goto legacy_send_no_md; 19876 } 19877 19878 if (ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) 19879 zc_cap = ill->ill_zerocopy_capab; 19880 19881 /* 19882 * Check if we can take tcp fast-path. Note that "incomplete" 19883 * ire's (where the link-layer for next hop is not resolved 19884 * or where the fast-path header in nce_fp_mp is not available 19885 * yet) are sent down the legacy (slow) path. 19886 * NOTE: We should fix ip_xmit_v4 to handle M_MULTIDATA 19887 */ 19888 if (ire->ire_nce && ire->ire_nce->nce_state != ND_REACHABLE) { 19889 /* IRE will be released prior to returning */ 19890 goto legacy_send_no_md; 19891 } 19892 19893 /* go to legacy path if interface doesn't support zerocopy */ 19894 if (tcp->tcp_snd_zcopy_aware && do_tcpzcopy != 2 && 19895 (zc_cap == NULL || zc_cap->ill_zerocopy_flags == 0)) { 19896 /* IRE will be released prior to returning */ 19897 goto legacy_send_no_md; 19898 } 19899 19900 /* does the interface support hardware checksum offload? */ 19901 hwcksum_flags = 0; 19902 if (ILL_HCKSUM_CAPABLE(ill) && 19903 (ill->ill_hcksum_capab->ill_hcksum_txflags & 19904 (HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 | HCKSUM_INET_PARTIAL | 19905 HCKSUM_IPHDRCKSUM)) && dohwcksum) { 19906 if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19907 HCKSUM_IPHDRCKSUM) 19908 hwcksum_flags = HCK_IPV4_HDRCKSUM; 19909 19910 if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19911 (HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6)) 19912 hwcksum_flags |= HCK_FULLCKSUM; 19913 else if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19914 HCKSUM_INET_PARTIAL) 19915 hwcksum_flags |= HCK_PARTIALCKSUM; 19916 } 19917 19918 /* 19919 * Each header fragment consists of the leading extra space, 19920 * followed by the TCP/IP header, and the trailing extra space. 19921 * We make sure that each header fragment begins on a 32-bit 19922 * aligned memory address (tcp_mdt_hdr_head is already 32-bit 19923 * aligned in tcp_mdt_update). 19924 */ 19925 hdr_frag_sz = roundup((tcp->tcp_mdt_hdr_head + tcp_hdr_len + 19926 tcp->tcp_mdt_hdr_tail), 4); 19927 19928 /* are we starting from the beginning of data block? */ 19929 if (*tail_unsent == 0) { 19930 *xmit_tail = (*xmit_tail)->b_cont; 19931 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= (uintptr_t)INT_MAX); 19932 *tail_unsent = (int)MBLKL(*xmit_tail); 19933 } 19934 19935 /* 19936 * Here we create one or more Multidata messages, each made up of 19937 * one header buffer and up to N payload buffers. This entire 19938 * operation is done within two loops: 19939 * 19940 * The outer loop mostly deals with creating the Multidata message, 19941 * as well as the header buffer that gets added to it. It also 19942 * links the Multidata messages together such that all of them can 19943 * be sent down to the lower layer in a single putnext call; this 19944 * linking behavior depends on the tcp_mdt_chain tunable. 19945 * 19946 * The inner loop takes an existing Multidata message, and adds 19947 * one or more (up to tcp_mdt_max_pld) payload buffers to it. It 19948 * packetizes those buffers by filling up the corresponding header 19949 * buffer fragments with the proper IP and TCP headers, and by 19950 * describing the layout of each packet in the packet descriptors 19951 * that get added to the Multidata. 19952 */ 19953 do { 19954 /* 19955 * If usable send window is too small, or data blocks in 19956 * transmit list are smaller than our threshold (i.e. app 19957 * performs large writes followed by small ones), we hand 19958 * off the control over to the legacy path. Note that we'll 19959 * get back the control once it encounters a large block. 19960 */ 19961 if (*usable < mss || (*tail_unsent <= mdt_thres && 19962 (*xmit_tail)->b_cont != NULL && 19963 MBLKL((*xmit_tail)->b_cont) <= mdt_thres)) { 19964 /* send down what we've got so far */ 19965 if (md_mp_head != NULL) { 19966 tcp_multisend_data(tcp, ire, ill, md_mp_head, 19967 obsegs, obbytes, &rconfirm); 19968 } 19969 /* 19970 * Pass control over to tcp_send(), but tell it to 19971 * return to us once a large-size transmission is 19972 * possible. 19973 */ 19974 TCP_STAT(tcps, tcp_mdt_legacy_small); 19975 if ((err = tcp_send(q, tcp, mss, tcp_hdr_len, 19976 tcp_tcp_hdr_len, num_sack_blk, usable, snxt, 19977 tail_unsent, xmit_tail, local_time, 19978 mdt_thres)) <= 0) { 19979 /* burst count reached, or alloc failed */ 19980 IRE_REFRELE(ire); 19981 return (err); 19982 } 19983 19984 /* tcp_send() may have sent everything, so check */ 19985 if (*usable <= 0) { 19986 IRE_REFRELE(ire); 19987 return (0); 19988 } 19989 19990 TCP_STAT(tcps, tcp_mdt_legacy_ret); 19991 /* 19992 * We may have delivered the Multidata, so make sure 19993 * to re-initialize before the next round. 19994 */ 19995 md_mp_head = NULL; 19996 obsegs = obbytes = 0; 19997 num_burst_seg = tcp->tcp_snd_burst; 19998 PREP_NEW_MULTIDATA(); 19999 20000 /* are we starting from the beginning of data block? */ 20001 if (*tail_unsent == 0) { 20002 *xmit_tail = (*xmit_tail)->b_cont; 20003 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 20004 (uintptr_t)INT_MAX); 20005 *tail_unsent = (int)MBLKL(*xmit_tail); 20006 } 20007 } 20008 20009 /* 20010 * max_pld limits the number of mblks in tcp's transmit 20011 * queue that can be added to a Multidata message. Once 20012 * this counter reaches zero, no more additional mblks 20013 * can be added to it. What happens afterwards depends 20014 * on whether or not we are set to chain the Multidata 20015 * messages. If we are to link them together, reset 20016 * max_pld to its original value (tcp_mdt_max_pld) and 20017 * prepare to create a new Multidata message which will 20018 * get linked to md_mp_head. Else, leave it alone and 20019 * let the inner loop break on its own. 20020 */ 20021 if (tcp_mdt_chain && max_pld == 0) 20022 PREP_NEW_MULTIDATA(); 20023 20024 /* adding a payload buffer; re-initialize values */ 20025 if (add_buffer) 20026 PREP_NEW_PBUF(); 20027 20028 /* 20029 * If we don't have a Multidata, either because we just 20030 * (re)entered this outer loop, or after we branched off 20031 * to tcp_send above, setup the Multidata and header 20032 * buffer to be used. 20033 */ 20034 if (md_mp == NULL) { 20035 int md_hbuflen; 20036 uint32_t start, stuff; 20037 20038 /* 20039 * Calculate Multidata header buffer size large enough 20040 * to hold all of the headers that can possibly be 20041 * sent at this moment. We'd rather over-estimate 20042 * the size than running out of space; this is okay 20043 * since this buffer is small anyway. 20044 */ 20045 md_hbuflen = (howmany(*usable, mss) + 1) * hdr_frag_sz; 20046 20047 /* 20048 * Start and stuff offset for partial hardware 20049 * checksum offload; these are currently for IPv4. 20050 * For full checksum offload, they are set to zero. 20051 */ 20052 if ((hwcksum_flags & HCK_PARTIALCKSUM)) { 20053 if (af == AF_INET) { 20054 start = IP_SIMPLE_HDR_LENGTH; 20055 stuff = IP_SIMPLE_HDR_LENGTH + 20056 TCP_CHECKSUM_OFFSET; 20057 } else { 20058 start = IPV6_HDR_LEN; 20059 stuff = IPV6_HDR_LEN + 20060 TCP_CHECKSUM_OFFSET; 20061 } 20062 } else { 20063 start = stuff = 0; 20064 } 20065 20066 /* 20067 * Create the header buffer, Multidata, as well as 20068 * any necessary attributes (destination address, 20069 * SAP and hardware checksum offload) that should 20070 * be associated with the Multidata message. 20071 */ 20072 ASSERT(cur_hdr_off == 0); 20073 if ((md_hbuf = allocb(md_hbuflen, BPRI_HI)) == NULL || 20074 ((md_hbuf->b_wptr += md_hbuflen), 20075 (mmd = mmd_alloc(md_hbuf, &md_mp, 20076 KM_NOSLEEP)) == NULL) || (tcp_mdt_add_attrs(mmd, 20077 /* fastpath mblk */ 20078 ire->ire_nce->nce_res_mp, 20079 /* hardware checksum enabled */ 20080 (hwcksum_flags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)), 20081 /* hardware checksum offsets */ 20082 start, stuff, 0, 20083 /* hardware checksum flag */ 20084 hwcksum_flags, tcps) != 0)) { 20085 legacy_send: 20086 if (md_mp != NULL) { 20087 /* Unlink message from the chain */ 20088 if (md_mp_head != NULL) { 20089 err = (intptr_t)rmvb(md_mp_head, 20090 md_mp); 20091 /* 20092 * We can't assert that rmvb 20093 * did not return -1, since we 20094 * may get here before linkb 20095 * happens. We do, however, 20096 * check if we just removed the 20097 * only element in the list. 20098 */ 20099 if (err == 0) 20100 md_mp_head = NULL; 20101 } 20102 /* md_hbuf gets freed automatically */ 20103 TCP_STAT(tcps, tcp_mdt_discarded); 20104 freeb(md_mp); 20105 } else { 20106 /* Either allocb or mmd_alloc failed */ 20107 TCP_STAT(tcps, tcp_mdt_allocfail); 20108 if (md_hbuf != NULL) 20109 freeb(md_hbuf); 20110 } 20111 20112 /* send down what we've got so far */ 20113 if (md_mp_head != NULL) { 20114 tcp_multisend_data(tcp, ire, ill, 20115 md_mp_head, obsegs, obbytes, 20116 &rconfirm); 20117 } 20118 legacy_send_no_md: 20119 if (ire != NULL) 20120 IRE_REFRELE(ire); 20121 /* 20122 * Too bad; let the legacy path handle this. 20123 * We specify INT_MAX for the threshold, since 20124 * we gave up with the Multidata processings 20125 * and let the old path have it all. 20126 */ 20127 TCP_STAT(tcps, tcp_mdt_legacy_all); 20128 return (tcp_send(q, tcp, mss, tcp_hdr_len, 20129 tcp_tcp_hdr_len, num_sack_blk, usable, 20130 snxt, tail_unsent, xmit_tail, local_time, 20131 INT_MAX)); 20132 } 20133 20134 /* link to any existing ones, if applicable */ 20135 TCP_STAT(tcps, tcp_mdt_allocd); 20136 if (md_mp_head == NULL) { 20137 md_mp_head = md_mp; 20138 } else if (tcp_mdt_chain) { 20139 TCP_STAT(tcps, tcp_mdt_linked); 20140 linkb(md_mp_head, md_mp); 20141 } 20142 } 20143 20144 ASSERT(md_mp_head != NULL); 20145 ASSERT(tcp_mdt_chain || md_mp_head->b_cont == NULL); 20146 ASSERT(md_mp != NULL && mmd != NULL); 20147 ASSERT(md_hbuf != NULL); 20148 20149 /* 20150 * Packetize the transmittable portion of the data block; 20151 * each data block is essentially added to the Multidata 20152 * as a payload buffer. We also deal with adding more 20153 * than one payload buffers, which happens when the remaining 20154 * packetized portion of the current payload buffer is less 20155 * than MSS, while the next data block in transmit queue 20156 * has enough data to make up for one. This "spillover" 20157 * case essentially creates a split-packet, where portions 20158 * of the packet's payload fragments may span across two 20159 * virtually discontiguous address blocks. 20160 */ 20161 seg_len = mss; 20162 do { 20163 len = seg_len; 20164 20165 ASSERT(len > 0); 20166 ASSERT(max_pld >= 0); 20167 ASSERT(!add_buffer || cur_pld_off == 0); 20168 20169 /* 20170 * First time around for this payload buffer; note 20171 * in the case of a spillover, the following has 20172 * been done prior to adding the split-packet 20173 * descriptor to Multidata, and we don't want to 20174 * repeat the process. 20175 */ 20176 if (add_buffer) { 20177 ASSERT(mmd != NULL); 20178 ASSERT(md_pbuf == NULL); 20179 ASSERT(md_pbuf_nxt == NULL); 20180 ASSERT(pbuf_idx == -1 && pbuf_idx_nxt == -1); 20181 20182 /* 20183 * Have we reached the limit? We'd get to 20184 * this case when we're not chaining the 20185 * Multidata messages together, and since 20186 * we're done, terminate this loop. 20187 */ 20188 if (max_pld == 0) 20189 break; /* done */ 20190 20191 if ((md_pbuf = dupb(*xmit_tail)) == NULL) { 20192 TCP_STAT(tcps, tcp_mdt_allocfail); 20193 goto legacy_send; /* out_of_mem */ 20194 } 20195 20196 if (IS_VMLOANED_MBLK(md_pbuf) && !zcopy && 20197 zc_cap != NULL) { 20198 if (!ip_md_zcopy_attr(mmd, NULL, 20199 zc_cap->ill_zerocopy_flags)) { 20200 freeb(md_pbuf); 20201 TCP_STAT(tcps, 20202 tcp_mdt_allocfail); 20203 /* out_of_mem */ 20204 goto legacy_send; 20205 } 20206 zcopy = B_TRUE; 20207 } 20208 20209 md_pbuf->b_rptr += base_pld_off; 20210 20211 /* 20212 * Add a payload buffer to the Multidata; this 20213 * operation must not fail, or otherwise our 20214 * logic in this routine is broken. There 20215 * is no memory allocation done by the 20216 * routine, so any returned failure simply 20217 * tells us that we've done something wrong. 20218 * 20219 * A failure tells us that either we're adding 20220 * the same payload buffer more than once, or 20221 * we're trying to add more buffers than 20222 * allowed (max_pld calculation is wrong). 20223 * None of the above cases should happen, and 20224 * we panic because either there's horrible 20225 * heap corruption, and/or programming mistake. 20226 */ 20227 pbuf_idx = mmd_addpldbuf(mmd, md_pbuf); 20228 if (pbuf_idx < 0) { 20229 cmn_err(CE_PANIC, "tcp_multisend: " 20230 "payload buffer logic error " 20231 "detected for tcp %p mmd %p " 20232 "pbuf %p (%d)\n", 20233 (void *)tcp, (void *)mmd, 20234 (void *)md_pbuf, pbuf_idx); 20235 } 20236 20237 ASSERT(max_pld > 0); 20238 --max_pld; 20239 add_buffer = B_FALSE; 20240 } 20241 20242 ASSERT(md_mp_head != NULL); 20243 ASSERT(md_pbuf != NULL); 20244 ASSERT(md_pbuf_nxt == NULL); 20245 ASSERT(pbuf_idx != -1); 20246 ASSERT(pbuf_idx_nxt == -1); 20247 ASSERT(*usable > 0); 20248 20249 /* 20250 * We spillover to the next payload buffer only 20251 * if all of the following is true: 20252 * 20253 * 1. There is not enough data on the current 20254 * payload buffer to make up `len', 20255 * 2. We are allowed to send `len', 20256 * 3. The next payload buffer length is large 20257 * enough to accomodate `spill'. 20258 */ 20259 if ((spill = len - *tail_unsent) > 0 && 20260 *usable >= len && 20261 MBLKL((*xmit_tail)->b_cont) >= spill && 20262 max_pld > 0) { 20263 md_pbuf_nxt = dupb((*xmit_tail)->b_cont); 20264 if (md_pbuf_nxt == NULL) { 20265 TCP_STAT(tcps, tcp_mdt_allocfail); 20266 goto legacy_send; /* out_of_mem */ 20267 } 20268 20269 if (IS_VMLOANED_MBLK(md_pbuf_nxt) && !zcopy && 20270 zc_cap != NULL) { 20271 if (!ip_md_zcopy_attr(mmd, NULL, 20272 zc_cap->ill_zerocopy_flags)) { 20273 freeb(md_pbuf_nxt); 20274 TCP_STAT(tcps, 20275 tcp_mdt_allocfail); 20276 /* out_of_mem */ 20277 goto legacy_send; 20278 } 20279 zcopy = B_TRUE; 20280 } 20281 20282 /* 20283 * See comments above on the first call to 20284 * mmd_addpldbuf for explanation on the panic. 20285 */ 20286 pbuf_idx_nxt = mmd_addpldbuf(mmd, md_pbuf_nxt); 20287 if (pbuf_idx_nxt < 0) { 20288 panic("tcp_multisend: " 20289 "next payload buffer logic error " 20290 "detected for tcp %p mmd %p " 20291 "pbuf %p (%d)\n", 20292 (void *)tcp, (void *)mmd, 20293 (void *)md_pbuf_nxt, pbuf_idx_nxt); 20294 } 20295 20296 ASSERT(max_pld > 0); 20297 --max_pld; 20298 } else if (spill > 0) { 20299 /* 20300 * If there's a spillover, but the following 20301 * xmit_tail couldn't give us enough octets 20302 * to reach "len", then stop the current 20303 * Multidata creation and let the legacy 20304 * tcp_send() path take over. We don't want 20305 * to send the tiny segment as part of this 20306 * Multidata for performance reasons; instead, 20307 * we let the legacy path deal with grouping 20308 * it with the subsequent small mblks. 20309 */ 20310 if (*usable >= len && 20311 MBLKL((*xmit_tail)->b_cont) < spill) { 20312 max_pld = 0; 20313 break; /* done */ 20314 } 20315 20316 /* 20317 * We can't spillover, and we are near 20318 * the end of the current payload buffer, 20319 * so send what's left. 20320 */ 20321 ASSERT(*tail_unsent > 0); 20322 len = *tail_unsent; 20323 } 20324 20325 /* tail_unsent is negated if there is a spillover */ 20326 *tail_unsent -= len; 20327 *usable -= len; 20328 ASSERT(*usable >= 0); 20329 20330 if (*usable < mss) 20331 seg_len = *usable; 20332 /* 20333 * Sender SWS avoidance; see comments in tcp_send(); 20334 * everything else is the same, except that we only 20335 * do this here if there is no more data to be sent 20336 * following the current xmit_tail. We don't check 20337 * for 1-byte urgent data because we shouldn't get 20338 * here if TCP_URG_VALID is set. 20339 */ 20340 if (*usable > 0 && *usable < mss && 20341 ((md_pbuf_nxt == NULL && 20342 (*xmit_tail)->b_cont == NULL) || 20343 (md_pbuf_nxt != NULL && 20344 (*xmit_tail)->b_cont->b_cont == NULL)) && 20345 seg_len < (tcp->tcp_max_swnd >> 1) && 20346 (tcp->tcp_unsent - 20347 ((*snxt + len) - tcp->tcp_snxt)) > seg_len && 20348 !tcp->tcp_zero_win_probe) { 20349 if ((*snxt + len) == tcp->tcp_snxt && 20350 (*snxt + len) == tcp->tcp_suna) { 20351 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 20352 } 20353 done = B_TRUE; 20354 } 20355 20356 /* 20357 * Prime pump for IP's checksumming on our behalf; 20358 * include the adjustment for a source route if any. 20359 * Do this only for software/partial hardware checksum 20360 * offload, as this field gets zeroed out later for 20361 * the full hardware checksum offload case. 20362 */ 20363 if (!(hwcksum_flags & HCK_FULLCKSUM)) { 20364 cksum = len + tcp_tcp_hdr_len + tcp->tcp_sum; 20365 cksum = (cksum >> 16) + (cksum & 0xFFFF); 20366 U16_TO_ABE16(cksum, tcp->tcp_tcph->th_sum); 20367 } 20368 20369 U32_TO_ABE32(*snxt, tcp->tcp_tcph->th_seq); 20370 *snxt += len; 20371 20372 tcp->tcp_tcph->th_flags[0] = TH_ACK; 20373 /* 20374 * We set the PUSH bit only if TCP has no more buffered 20375 * data to be transmitted (or if sender SWS avoidance 20376 * takes place), as opposed to setting it for every 20377 * last packet in the burst. 20378 */ 20379 if (done || 20380 (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) == 0) 20381 tcp->tcp_tcph->th_flags[0] |= TH_PUSH; 20382 20383 /* 20384 * Set FIN bit if this is our last segment; snxt 20385 * already includes its length, and it will not 20386 * be adjusted after this point. 20387 */ 20388 if (tcp->tcp_valid_bits == TCP_FSS_VALID && 20389 *snxt == tcp->tcp_fss) { 20390 if (!tcp->tcp_fin_acked) { 20391 tcp->tcp_tcph->th_flags[0] |= TH_FIN; 20392 BUMP_MIB(&tcps->tcps_mib, 20393 tcpOutControl); 20394 } 20395 if (!tcp->tcp_fin_sent) { 20396 tcp->tcp_fin_sent = B_TRUE; 20397 /* 20398 * tcp state must be ESTABLISHED 20399 * in order for us to get here in 20400 * the first place. 20401 */ 20402 tcp->tcp_state = TCPS_FIN_WAIT_1; 20403 20404 /* 20405 * Upon returning from this routine, 20406 * tcp_wput_data() will set tcp_snxt 20407 * to be equal to snxt + tcp_fin_sent. 20408 * This is essentially the same as 20409 * setting it to tcp_fss + 1. 20410 */ 20411 } 20412 } 20413 20414 tcp->tcp_last_sent_len = (ushort_t)len; 20415 20416 len += tcp_hdr_len; 20417 if (tcp->tcp_ipversion == IPV4_VERSION) 20418 tcp->tcp_ipha->ipha_length = htons(len); 20419 else 20420 tcp->tcp_ip6h->ip6_plen = htons(len - 20421 ((char *)&tcp->tcp_ip6h[1] - 20422 tcp->tcp_iphc)); 20423 20424 pkt_info->flags = (PDESC_HBUF_REF | PDESC_PBUF_REF); 20425 20426 /* setup header fragment */ 20427 PDESC_HDR_ADD(pkt_info, 20428 md_hbuf->b_rptr + cur_hdr_off, /* base */ 20429 tcp->tcp_mdt_hdr_head, /* head room */ 20430 tcp_hdr_len, /* len */ 20431 tcp->tcp_mdt_hdr_tail); /* tail room */ 20432 20433 ASSERT(pkt_info->hdr_lim - pkt_info->hdr_base == 20434 hdr_frag_sz); 20435 ASSERT(MBLKIN(md_hbuf, 20436 (pkt_info->hdr_base - md_hbuf->b_rptr), 20437 PDESC_HDRSIZE(pkt_info))); 20438 20439 /* setup first payload fragment */ 20440 PDESC_PLD_INIT(pkt_info); 20441 PDESC_PLD_SPAN_ADD(pkt_info, 20442 pbuf_idx, /* index */ 20443 md_pbuf->b_rptr + cur_pld_off, /* start */ 20444 tcp->tcp_last_sent_len); /* len */ 20445 20446 /* create a split-packet in case of a spillover */ 20447 if (md_pbuf_nxt != NULL) { 20448 ASSERT(spill > 0); 20449 ASSERT(pbuf_idx_nxt > pbuf_idx); 20450 ASSERT(!add_buffer); 20451 20452 md_pbuf = md_pbuf_nxt; 20453 md_pbuf_nxt = NULL; 20454 pbuf_idx = pbuf_idx_nxt; 20455 pbuf_idx_nxt = -1; 20456 cur_pld_off = spill; 20457 20458 /* trim out first payload fragment */ 20459 PDESC_PLD_SPAN_TRIM(pkt_info, 0, spill); 20460 20461 /* setup second payload fragment */ 20462 PDESC_PLD_SPAN_ADD(pkt_info, 20463 pbuf_idx, /* index */ 20464 md_pbuf->b_rptr, /* start */ 20465 spill); /* len */ 20466 20467 if ((*xmit_tail)->b_next == NULL) { 20468 /* 20469 * Store the lbolt used for RTT 20470 * estimation. We can only record one 20471 * timestamp per mblk so we do it when 20472 * we reach the end of the payload 20473 * buffer. Also we only take a new 20474 * timestamp sample when the previous 20475 * timed data from the same mblk has 20476 * been ack'ed. 20477 */ 20478 (*xmit_tail)->b_prev = local_time; 20479 (*xmit_tail)->b_next = 20480 (mblk_t *)(uintptr_t)first_snxt; 20481 } 20482 20483 first_snxt = *snxt - spill; 20484 20485 /* 20486 * Advance xmit_tail; usable could be 0 by 20487 * the time we got here, but we made sure 20488 * above that we would only spillover to 20489 * the next data block if usable includes 20490 * the spilled-over amount prior to the 20491 * subtraction. Therefore, we are sure 20492 * that xmit_tail->b_cont can't be NULL. 20493 */ 20494 ASSERT((*xmit_tail)->b_cont != NULL); 20495 *xmit_tail = (*xmit_tail)->b_cont; 20496 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 20497 (uintptr_t)INT_MAX); 20498 *tail_unsent = (int)MBLKL(*xmit_tail) - spill; 20499 } else { 20500 cur_pld_off += tcp->tcp_last_sent_len; 20501 } 20502 20503 /* 20504 * Fill in the header using the template header, and 20505 * add options such as time-stamp, ECN and/or SACK, 20506 * as needed. 20507 */ 20508 tcp_fill_header(tcp, pkt_info->hdr_rptr, 20509 (clock_t)local_time, num_sack_blk); 20510 20511 /* take care of some IP header businesses */ 20512 if (af == AF_INET) { 20513 ipha = (ipha_t *)pkt_info->hdr_rptr; 20514 20515 ASSERT(OK_32PTR((uchar_t *)ipha)); 20516 ASSERT(PDESC_HDRL(pkt_info) >= 20517 IP_SIMPLE_HDR_LENGTH); 20518 ASSERT(ipha->ipha_version_and_hdr_length == 20519 IP_SIMPLE_HDR_VERSION); 20520 20521 /* 20522 * Assign ident value for current packet; see 20523 * related comments in ip_wput_ire() about the 20524 * contract private interface with clustering 20525 * group. 20526 */ 20527 clusterwide = B_FALSE; 20528 if (cl_inet_ipident != NULL) { 20529 ASSERT(cl_inet_isclusterwide != NULL); 20530 if ((*cl_inet_isclusterwide)(IPPROTO_IP, 20531 AF_INET, 20532 (uint8_t *)(uintptr_t)src)) { 20533 ipha->ipha_ident = 20534 (*cl_inet_ipident) 20535 (IPPROTO_IP, AF_INET, 20536 (uint8_t *)(uintptr_t)src, 20537 (uint8_t *)(uintptr_t)dst); 20538 clusterwide = B_TRUE; 20539 } 20540 } 20541 20542 if (!clusterwide) { 20543 ipha->ipha_ident = (uint16_t) 20544 atomic_add_32_nv( 20545 &ire->ire_ident, 1); 20546 } 20547 #ifndef _BIG_ENDIAN 20548 ipha->ipha_ident = (ipha->ipha_ident << 8) | 20549 (ipha->ipha_ident >> 8); 20550 #endif 20551 } else { 20552 ip6h = (ip6_t *)pkt_info->hdr_rptr; 20553 20554 ASSERT(OK_32PTR((uchar_t *)ip6h)); 20555 ASSERT(IPVER(ip6h) == IPV6_VERSION); 20556 ASSERT(ip6h->ip6_nxt == IPPROTO_TCP); 20557 ASSERT(PDESC_HDRL(pkt_info) >= 20558 (IPV6_HDR_LEN + TCP_CHECKSUM_OFFSET + 20559 TCP_CHECKSUM_SIZE)); 20560 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 20561 20562 if (tcp->tcp_ip_forward_progress) { 20563 rconfirm = B_TRUE; 20564 tcp->tcp_ip_forward_progress = B_FALSE; 20565 } 20566 } 20567 20568 /* at least one payload span, and at most two */ 20569 ASSERT(pkt_info->pld_cnt > 0 && pkt_info->pld_cnt < 3); 20570 20571 /* add the packet descriptor to Multidata */ 20572 if ((pkt = mmd_addpdesc(mmd, pkt_info, &err, 20573 KM_NOSLEEP)) == NULL) { 20574 /* 20575 * Any failure other than ENOMEM indicates 20576 * that we have passed in invalid pkt_info 20577 * or parameters to mmd_addpdesc, which must 20578 * not happen. 20579 * 20580 * EINVAL is a result of failure on boundary 20581 * checks against the pkt_info contents. It 20582 * should not happen, and we panic because 20583 * either there's horrible heap corruption, 20584 * and/or programming mistake. 20585 */ 20586 if (err != ENOMEM) { 20587 cmn_err(CE_PANIC, "tcp_multisend: " 20588 "pdesc logic error detected for " 20589 "tcp %p mmd %p pinfo %p (%d)\n", 20590 (void *)tcp, (void *)mmd, 20591 (void *)pkt_info, err); 20592 } 20593 TCP_STAT(tcps, tcp_mdt_addpdescfail); 20594 goto legacy_send; /* out_of_mem */ 20595 } 20596 ASSERT(pkt != NULL); 20597 20598 /* calculate IP header and TCP checksums */ 20599 if (af == AF_INET) { 20600 /* calculate pseudo-header checksum */ 20601 cksum = (dst >> 16) + (dst & 0xFFFF) + 20602 (src >> 16) + (src & 0xFFFF); 20603 20604 /* offset for TCP header checksum */ 20605 up = IPH_TCPH_CHECKSUMP(ipha, 20606 IP_SIMPLE_HDR_LENGTH); 20607 } else { 20608 up = (uint16_t *)&ip6h->ip6_src; 20609 20610 /* calculate pseudo-header checksum */ 20611 cksum = up[0] + up[1] + up[2] + up[3] + 20612 up[4] + up[5] + up[6] + up[7] + 20613 up[8] + up[9] + up[10] + up[11] + 20614 up[12] + up[13] + up[14] + up[15]; 20615 20616 /* Fold the initial sum */ 20617 cksum = (cksum & 0xffff) + (cksum >> 16); 20618 20619 up = (uint16_t *)(((uchar_t *)ip6h) + 20620 IPV6_HDR_LEN + TCP_CHECKSUM_OFFSET); 20621 } 20622 20623 if (hwcksum_flags & HCK_FULLCKSUM) { 20624 /* clear checksum field for hardware */ 20625 *up = 0; 20626 } else if (hwcksum_flags & HCK_PARTIALCKSUM) { 20627 uint32_t sum; 20628 20629 /* pseudo-header checksumming */ 20630 sum = *up + cksum + IP_TCP_CSUM_COMP; 20631 sum = (sum & 0xFFFF) + (sum >> 16); 20632 *up = (sum & 0xFFFF) + (sum >> 16); 20633 } else { 20634 /* software checksumming */ 20635 TCP_STAT(tcps, tcp_out_sw_cksum); 20636 TCP_STAT_UPDATE(tcps, tcp_out_sw_cksum_bytes, 20637 tcp->tcp_hdr_len + tcp->tcp_last_sent_len); 20638 *up = IP_MD_CSUM(pkt, tcp->tcp_ip_hdr_len, 20639 cksum + IP_TCP_CSUM_COMP); 20640 if (*up == 0) 20641 *up = 0xFFFF; 20642 } 20643 20644 /* IPv4 header checksum */ 20645 if (af == AF_INET) { 20646 ipha->ipha_fragment_offset_and_flags |= 20647 (uint32_t)htons(ire->ire_frag_flag); 20648 20649 if (hwcksum_flags & HCK_IPV4_HDRCKSUM) { 20650 ipha->ipha_hdr_checksum = 0; 20651 } else { 20652 IP_HDR_CKSUM(ipha, cksum, 20653 ((uint32_t *)ipha)[0], 20654 ((uint16_t *)ipha)[4]); 20655 } 20656 } 20657 20658 if (af == AF_INET && 20659 HOOKS4_INTERESTED_PHYSICAL_OUT(ipst) || 20660 af == AF_INET6 && 20661 HOOKS6_INTERESTED_PHYSICAL_OUT(ipst)) { 20662 /* build header(IP/TCP) mblk for this segment */ 20663 if ((mp = dupb(md_hbuf)) == NULL) 20664 goto legacy_send; 20665 20666 mp->b_rptr = pkt_info->hdr_rptr; 20667 mp->b_wptr = pkt_info->hdr_wptr; 20668 20669 /* build payload mblk for this segment */ 20670 if ((mp1 = dupb(*xmit_tail)) == NULL) { 20671 freemsg(mp); 20672 goto legacy_send; 20673 } 20674 mp1->b_wptr = md_pbuf->b_rptr + cur_pld_off; 20675 mp1->b_rptr = mp1->b_wptr - 20676 tcp->tcp_last_sent_len; 20677 linkb(mp, mp1); 20678 20679 pld_start = mp1->b_rptr; 20680 20681 if (af == AF_INET) { 20682 DTRACE_PROBE4( 20683 ip4__physical__out__start, 20684 ill_t *, NULL, 20685 ill_t *, ill, 20686 ipha_t *, ipha, 20687 mblk_t *, mp); 20688 FW_HOOKS( 20689 ipst->ips_ip4_physical_out_event, 20690 ipst->ips_ipv4firewall_physical_out, 20691 NULL, ill, ipha, mp, mp, 0, ipst); 20692 DTRACE_PROBE1( 20693 ip4__physical__out__end, 20694 mblk_t *, mp); 20695 } else { 20696 DTRACE_PROBE4( 20697 ip6__physical__out_start, 20698 ill_t *, NULL, 20699 ill_t *, ill, 20700 ip6_t *, ip6h, 20701 mblk_t *, mp); 20702 FW_HOOKS6( 20703 ipst->ips_ip6_physical_out_event, 20704 ipst->ips_ipv6firewall_physical_out, 20705 NULL, ill, ip6h, mp, mp, 0, ipst); 20706 DTRACE_PROBE1( 20707 ip6__physical__out__end, 20708 mblk_t *, mp); 20709 } 20710 20711 if (buf_trunked && mp != NULL) { 20712 /* 20713 * Need to pass it to normal path. 20714 */ 20715 CALL_IP_WPUT(tcp->tcp_connp, q, mp); 20716 } else if (mp == NULL || 20717 mp->b_rptr != pkt_info->hdr_rptr || 20718 mp->b_wptr != pkt_info->hdr_wptr || 20719 (mp1 = mp->b_cont) == NULL || 20720 mp1->b_rptr != pld_start || 20721 mp1->b_wptr != pld_start + 20722 tcp->tcp_last_sent_len || 20723 mp1->b_cont != NULL) { 20724 /* 20725 * Need to pass all packets of this 20726 * buffer to normal path, either when 20727 * packet is blocked, or when boundary 20728 * of header buffer or payload buffer 20729 * has been changed by FW_HOOKS[6]. 20730 */ 20731 buf_trunked = B_TRUE; 20732 if (md_mp_head != NULL) { 20733 err = (intptr_t)rmvb(md_mp_head, 20734 md_mp); 20735 if (err == 0) 20736 md_mp_head = NULL; 20737 } 20738 20739 /* send down what we've got so far */ 20740 if (md_mp_head != NULL) { 20741 tcp_multisend_data(tcp, ire, 20742 ill, md_mp_head, obsegs, 20743 obbytes, &rconfirm); 20744 } 20745 md_mp_head = NULL; 20746 20747 if (mp != NULL) 20748 CALL_IP_WPUT(tcp->tcp_connp, 20749 q, mp); 20750 20751 mp1 = fw_mp_head; 20752 do { 20753 mp = mp1; 20754 mp1 = mp1->b_next; 20755 mp->b_next = NULL; 20756 mp->b_prev = NULL; 20757 CALL_IP_WPUT(tcp->tcp_connp, 20758 q, mp); 20759 } while (mp1 != NULL); 20760 20761 fw_mp_head = NULL; 20762 } else { 20763 if (fw_mp_head == NULL) 20764 fw_mp_head = mp; 20765 else 20766 fw_mp_head->b_prev->b_next = mp; 20767 fw_mp_head->b_prev = mp; 20768 } 20769 } 20770 20771 /* advance header offset */ 20772 cur_hdr_off += hdr_frag_sz; 20773 20774 obbytes += tcp->tcp_last_sent_len; 20775 ++obsegs; 20776 } while (!done && *usable > 0 && --num_burst_seg > 0 && 20777 *tail_unsent > 0); 20778 20779 if ((*xmit_tail)->b_next == NULL) { 20780 /* 20781 * Store the lbolt used for RTT estimation. We can only 20782 * record one timestamp per mblk so we do it when we 20783 * reach the end of the payload buffer. Also we only 20784 * take a new timestamp sample when the previous timed 20785 * data from the same mblk has been ack'ed. 20786 */ 20787 (*xmit_tail)->b_prev = local_time; 20788 (*xmit_tail)->b_next = (mblk_t *)(uintptr_t)first_snxt; 20789 } 20790 20791 ASSERT(*tail_unsent >= 0); 20792 if (*tail_unsent > 0) { 20793 /* 20794 * We got here because we broke out of the above 20795 * loop due to of one of the following cases: 20796 * 20797 * 1. len < adjusted MSS (i.e. small), 20798 * 2. Sender SWS avoidance, 20799 * 3. max_pld is zero. 20800 * 20801 * We are done for this Multidata, so trim our 20802 * last payload buffer (if any) accordingly. 20803 */ 20804 if (md_pbuf != NULL) 20805 md_pbuf->b_wptr -= *tail_unsent; 20806 } else if (*usable > 0) { 20807 *xmit_tail = (*xmit_tail)->b_cont; 20808 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 20809 (uintptr_t)INT_MAX); 20810 *tail_unsent = (int)MBLKL(*xmit_tail); 20811 add_buffer = B_TRUE; 20812 } 20813 20814 while (fw_mp_head) { 20815 mp = fw_mp_head; 20816 fw_mp_head = fw_mp_head->b_next; 20817 mp->b_prev = mp->b_next = NULL; 20818 freemsg(mp); 20819 } 20820 if (buf_trunked) { 20821 TCP_STAT(tcps, tcp_mdt_discarded); 20822 freeb(md_mp); 20823 buf_trunked = B_FALSE; 20824 } 20825 } while (!done && *usable > 0 && num_burst_seg > 0 && 20826 (tcp_mdt_chain || max_pld > 0)); 20827 20828 if (md_mp_head != NULL) { 20829 /* send everything down */ 20830 tcp_multisend_data(tcp, ire, ill, md_mp_head, obsegs, obbytes, 20831 &rconfirm); 20832 } 20833 20834 #undef PREP_NEW_MULTIDATA 20835 #undef PREP_NEW_PBUF 20836 #undef IPVER 20837 20838 IRE_REFRELE(ire); 20839 return (0); 20840 } 20841 20842 /* 20843 * A wrapper function for sending one or more Multidata messages down to 20844 * the module below ip; this routine does not release the reference of the 20845 * IRE (caller does that). This routine is analogous to tcp_send_data(). 20846 */ 20847 static void 20848 tcp_multisend_data(tcp_t *tcp, ire_t *ire, const ill_t *ill, mblk_t *md_mp_head, 20849 const uint_t obsegs, const uint_t obbytes, boolean_t *rconfirm) 20850 { 20851 uint64_t delta; 20852 nce_t *nce; 20853 tcp_stack_t *tcps = tcp->tcp_tcps; 20854 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 20855 20856 ASSERT(ire != NULL && ill != NULL); 20857 ASSERT(ire->ire_stq != NULL); 20858 ASSERT(md_mp_head != NULL); 20859 ASSERT(rconfirm != NULL); 20860 20861 /* adjust MIBs and IRE timestamp */ 20862 TCP_RECORD_TRACE(tcp, md_mp_head, TCP_TRACE_SEND_PKT); 20863 tcp->tcp_obsegs += obsegs; 20864 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataSegs, obsegs); 20865 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, obbytes); 20866 TCP_STAT_UPDATE(tcps, tcp_mdt_pkt_out, obsegs); 20867 20868 if (tcp->tcp_ipversion == IPV4_VERSION) { 20869 TCP_STAT_UPDATE(tcps, tcp_mdt_pkt_out_v4, obsegs); 20870 } else { 20871 TCP_STAT_UPDATE(tcps, tcp_mdt_pkt_out_v6, obsegs); 20872 } 20873 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutRequests, obsegs); 20874 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits, obsegs); 20875 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets, obbytes); 20876 20877 ire->ire_ob_pkt_count += obsegs; 20878 if (ire->ire_ipif != NULL) 20879 atomic_add_32(&ire->ire_ipif->ipif_ob_pkt_count, obsegs); 20880 ire->ire_last_used_time = lbolt; 20881 20882 /* send it down */ 20883 if (ILL_DLS_CAPABLE(ill)) { 20884 ill_dls_capab_t *ill_dls = ill->ill_dls_capab; 20885 ill_dls->ill_tx(ill_dls->ill_tx_handle, md_mp_head); 20886 } else { 20887 putnext(ire->ire_stq, md_mp_head); 20888 } 20889 20890 /* we're done for TCP/IPv4 */ 20891 if (tcp->tcp_ipversion == IPV4_VERSION) 20892 return; 20893 20894 nce = ire->ire_nce; 20895 20896 ASSERT(nce != NULL); 20897 ASSERT(!(nce->nce_flags & (NCE_F_NONUD|NCE_F_PERMANENT))); 20898 ASSERT(nce->nce_state != ND_INCOMPLETE); 20899 20900 /* reachability confirmation? */ 20901 if (*rconfirm) { 20902 nce->nce_last = TICK_TO_MSEC(lbolt64); 20903 if (nce->nce_state != ND_REACHABLE) { 20904 mutex_enter(&nce->nce_lock); 20905 nce->nce_state = ND_REACHABLE; 20906 nce->nce_pcnt = ND_MAX_UNICAST_SOLICIT; 20907 mutex_exit(&nce->nce_lock); 20908 (void) untimeout(nce->nce_timeout_id); 20909 if (ip_debug > 2) { 20910 /* ip1dbg */ 20911 pr_addr_dbg("tcp_multisend_data: state " 20912 "for %s changed to REACHABLE\n", 20913 AF_INET6, &ire->ire_addr_v6); 20914 } 20915 } 20916 /* reset transport reachability confirmation */ 20917 *rconfirm = B_FALSE; 20918 } 20919 20920 delta = TICK_TO_MSEC(lbolt64) - nce->nce_last; 20921 ip1dbg(("tcp_multisend_data: delta = %" PRId64 20922 " ill_reachable_time = %d \n", delta, ill->ill_reachable_time)); 20923 20924 if (delta > (uint64_t)ill->ill_reachable_time) { 20925 mutex_enter(&nce->nce_lock); 20926 switch (nce->nce_state) { 20927 case ND_REACHABLE: 20928 case ND_STALE: 20929 /* 20930 * ND_REACHABLE is identical to ND_STALE in this 20931 * specific case. If reachable time has expired for 20932 * this neighbor (delta is greater than reachable 20933 * time), conceptually, the neighbor cache is no 20934 * longer in REACHABLE state, but already in STALE 20935 * state. So the correct transition here is to 20936 * ND_DELAY. 20937 */ 20938 nce->nce_state = ND_DELAY; 20939 mutex_exit(&nce->nce_lock); 20940 NDP_RESTART_TIMER(nce, 20941 ipst->ips_delay_first_probe_time); 20942 if (ip_debug > 3) { 20943 /* ip2dbg */ 20944 pr_addr_dbg("tcp_multisend_data: state " 20945 "for %s changed to DELAY\n", 20946 AF_INET6, &ire->ire_addr_v6); 20947 } 20948 break; 20949 case ND_DELAY: 20950 case ND_PROBE: 20951 mutex_exit(&nce->nce_lock); 20952 /* Timers have already started */ 20953 break; 20954 case ND_UNREACHABLE: 20955 /* 20956 * ndp timer has detected that this nce is 20957 * unreachable and initiated deleting this nce 20958 * and all its associated IREs. This is a race 20959 * where we found the ire before it was deleted 20960 * and have just sent out a packet using this 20961 * unreachable nce. 20962 */ 20963 mutex_exit(&nce->nce_lock); 20964 break; 20965 default: 20966 ASSERT(0); 20967 } 20968 } 20969 } 20970 20971 /* 20972 * Derived from tcp_send_data(). 20973 */ 20974 static void 20975 tcp_lsosend_data(tcp_t *tcp, mblk_t *mp, ire_t *ire, ill_t *ill, const int mss, 20976 int num_lso_seg) 20977 { 20978 ipha_t *ipha; 20979 mblk_t *ire_fp_mp; 20980 uint_t ire_fp_mp_len; 20981 uint32_t hcksum_txflags = 0; 20982 ipaddr_t src; 20983 ipaddr_t dst; 20984 uint32_t cksum; 20985 uint16_t *up; 20986 tcp_stack_t *tcps = tcp->tcp_tcps; 20987 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 20988 20989 ASSERT(DB_TYPE(mp) == M_DATA); 20990 ASSERT(tcp->tcp_state == TCPS_ESTABLISHED); 20991 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 20992 ASSERT(tcp->tcp_connp != NULL); 20993 ASSERT(CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp)); 20994 20995 ipha = (ipha_t *)mp->b_rptr; 20996 src = ipha->ipha_src; 20997 dst = ipha->ipha_dst; 20998 20999 ASSERT(ipha->ipha_ident == 0 || ipha->ipha_ident == IP_HDR_INCLUDED); 21000 ipha->ipha_ident = (uint16_t)atomic_add_32_nv(&ire->ire_ident, 21001 num_lso_seg); 21002 #ifndef _BIG_ENDIAN 21003 ipha->ipha_ident = (ipha->ipha_ident << 8) | (ipha->ipha_ident >> 8); 21004 #endif 21005 if (tcp->tcp_snd_zcopy_aware) { 21006 if ((ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) == 0 || 21007 (ill->ill_zerocopy_capab->ill_zerocopy_flags == 0)) 21008 mp = tcp_zcopy_disable(tcp, mp); 21009 } 21010 21011 if (ILL_HCKSUM_CAPABLE(ill) && dohwcksum) { 21012 ASSERT(ill->ill_hcksum_capab != NULL); 21013 hcksum_txflags = ill->ill_hcksum_capab->ill_hcksum_txflags; 21014 } 21015 21016 /* 21017 * Since the TCP checksum should be recalculated by h/w, we can just 21018 * zero the checksum field for HCK_FULLCKSUM, or calculate partial 21019 * pseudo-header checksum for HCK_PARTIALCKSUM. 21020 * The partial pseudo-header excludes TCP length, that was calculated 21021 * in tcp_send(), so to zero *up before further processing. 21022 */ 21023 cksum = (dst >> 16) + (dst & 0xFFFF) + (src >> 16) + (src & 0xFFFF); 21024 21025 up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 21026 *up = 0; 21027 21028 IP_CKSUM_XMIT_FAST(ire->ire_ipversion, hcksum_txflags, mp, ipha, up, 21029 IPPROTO_TCP, IP_SIMPLE_HDR_LENGTH, ntohs(ipha->ipha_length), cksum); 21030 21031 /* 21032 * Append LSO flag to DB_LSOFLAGS(mp) and set the mss to DB_LSOMSS(mp). 21033 */ 21034 DB_LSOFLAGS(mp) |= HW_LSO; 21035 DB_LSOMSS(mp) = mss; 21036 21037 ipha->ipha_fragment_offset_and_flags |= 21038 (uint32_t)htons(ire->ire_frag_flag); 21039 21040 ire_fp_mp = ire->ire_nce->nce_fp_mp; 21041 ire_fp_mp_len = MBLKL(ire_fp_mp); 21042 ASSERT(DB_TYPE(ire_fp_mp) == M_DATA); 21043 mp->b_rptr = (uchar_t *)ipha - ire_fp_mp_len; 21044 bcopy(ire_fp_mp->b_rptr, mp->b_rptr, ire_fp_mp_len); 21045 21046 UPDATE_OB_PKT_COUNT(ire); 21047 ire->ire_last_used_time = lbolt; 21048 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutRequests); 21049 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits); 21050 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets, 21051 ntohs(ipha->ipha_length)); 21052 21053 if (ILL_DLS_CAPABLE(ill)) { 21054 /* 21055 * Send the packet directly to DLD, where it may be queued 21056 * depending on the availability of transmit resources at 21057 * the media layer. 21058 */ 21059 IP_DLS_ILL_TX(ill, ipha, mp, ipst); 21060 } else { 21061 ill_t *out_ill = (ill_t *)ire->ire_stq->q_ptr; 21062 DTRACE_PROBE4(ip4__physical__out__start, 21063 ill_t *, NULL, ill_t *, out_ill, 21064 ipha_t *, ipha, mblk_t *, mp); 21065 FW_HOOKS(ipst->ips_ip4_physical_out_event, 21066 ipst->ips_ipv4firewall_physical_out, 21067 NULL, out_ill, ipha, mp, mp, 0, ipst); 21068 DTRACE_PROBE1(ip4__physical__out__end, mblk_t *, mp); 21069 if (mp != NULL) 21070 putnext(ire->ire_stq, mp); 21071 } 21072 } 21073 21074 /* 21075 * tcp_send() is called by tcp_wput_data() for non-Multidata transmission 21076 * scheme, and returns one of the following: 21077 * 21078 * -1 = failed allocation. 21079 * 0 = success; burst count reached, or usable send window is too small, 21080 * and that we'd rather wait until later before sending again. 21081 * 1 = success; we are called from tcp_multisend(), and both usable send 21082 * window and tail_unsent are greater than the MDT threshold, and thus 21083 * Multidata Transmit should be used instead. 21084 */ 21085 static int 21086 tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len, 21087 const int tcp_tcp_hdr_len, const int num_sack_blk, int *usable, 21088 uint_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 21089 const int mdt_thres) 21090 { 21091 int num_burst_seg = tcp->tcp_snd_burst; 21092 ire_t *ire = NULL; 21093 ill_t *ill = NULL; 21094 mblk_t *ire_fp_mp = NULL; 21095 uint_t ire_fp_mp_len = 0; 21096 int num_lso_seg = 1; 21097 uint_t lso_usable; 21098 boolean_t do_lso_send = B_FALSE; 21099 tcp_stack_t *tcps = tcp->tcp_tcps; 21100 21101 /* 21102 * Check LSO capability before any further work. And the similar check 21103 * need to be done in for(;;) loop. 21104 * LSO will be deployed when therer is more than one mss of available 21105 * data and a burst transmission is allowed. 21106 */ 21107 if (tcp->tcp_lso && 21108 (tcp->tcp_valid_bits == 0 || 21109 tcp->tcp_valid_bits == TCP_FSS_VALID) && 21110 num_burst_seg >= 2 && (*usable - 1) / mss >= 1) { 21111 /* 21112 * Try to find usable IRE/ILL and do basic check to the ILL. 21113 */ 21114 if (tcp_send_find_ire_ill(tcp, NULL, &ire, &ill)) { 21115 /* 21116 * Enable LSO with this transmission. 21117 * Since IRE has been hold in 21118 * tcp_send_find_ire_ill(), IRE_REFRELE(ire) 21119 * should be called before return. 21120 */ 21121 do_lso_send = B_TRUE; 21122 ire_fp_mp = ire->ire_nce->nce_fp_mp; 21123 ire_fp_mp_len = MBLKL(ire_fp_mp); 21124 /* Round up to multiple of 4 */ 21125 ire_fp_mp_len = ((ire_fp_mp_len + 3) / 4) * 4; 21126 } else { 21127 do_lso_send = B_FALSE; 21128 ill = NULL; 21129 } 21130 } 21131 21132 for (;;) { 21133 struct datab *db; 21134 tcph_t *tcph; 21135 uint32_t sum; 21136 mblk_t *mp, *mp1; 21137 uchar_t *rptr; 21138 int len; 21139 21140 /* 21141 * If we're called by tcp_multisend(), and the amount of 21142 * sendable data as well as the size of current xmit_tail 21143 * is beyond the MDT threshold, return to the caller and 21144 * let the large data transmit be done using MDT. 21145 */ 21146 if (*usable > 0 && *usable > mdt_thres && 21147 (*tail_unsent > mdt_thres || (*tail_unsent == 0 && 21148 MBLKL((*xmit_tail)->b_cont) > mdt_thres))) { 21149 ASSERT(tcp->tcp_mdt); 21150 return (1); /* success; do large send */ 21151 } 21152 21153 if (num_burst_seg == 0) 21154 break; /* success; burst count reached */ 21155 21156 /* 21157 * Calculate the maximum payload length we can send in *one* 21158 * time. 21159 */ 21160 if (do_lso_send) { 21161 /* 21162 * Check whether need to do LSO any more. 21163 */ 21164 if (num_burst_seg >= 2 && (*usable - 1) / mss >= 1) { 21165 lso_usable = MIN(tcp->tcp_lso_max, *usable); 21166 lso_usable = MIN(lso_usable, 21167 num_burst_seg * mss); 21168 21169 num_lso_seg = lso_usable / mss; 21170 if (lso_usable % mss) { 21171 num_lso_seg++; 21172 tcp->tcp_last_sent_len = (ushort_t) 21173 (lso_usable % mss); 21174 } else { 21175 tcp->tcp_last_sent_len = (ushort_t)mss; 21176 } 21177 } else { 21178 do_lso_send = B_FALSE; 21179 num_lso_seg = 1; 21180 lso_usable = mss; 21181 } 21182 } 21183 21184 ASSERT(num_lso_seg <= IP_MAXPACKET / mss + 1); 21185 21186 /* 21187 * Adjust num_burst_seg here. 21188 */ 21189 num_burst_seg -= num_lso_seg; 21190 21191 len = mss; 21192 if (len > *usable) { 21193 ASSERT(do_lso_send == B_FALSE); 21194 21195 len = *usable; 21196 if (len <= 0) { 21197 /* Terminate the loop */ 21198 break; /* success; too small */ 21199 } 21200 /* 21201 * Sender silly-window avoidance. 21202 * Ignore this if we are going to send a 21203 * zero window probe out. 21204 * 21205 * TODO: force data into microscopic window? 21206 * ==> (!pushed || (unsent > usable)) 21207 */ 21208 if (len < (tcp->tcp_max_swnd >> 1) && 21209 (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) > len && 21210 !((tcp->tcp_valid_bits & TCP_URG_VALID) && 21211 len == 1) && (! tcp->tcp_zero_win_probe)) { 21212 /* 21213 * If the retransmit timer is not running 21214 * we start it so that we will retransmit 21215 * in the case when the the receiver has 21216 * decremented the window. 21217 */ 21218 if (*snxt == tcp->tcp_snxt && 21219 *snxt == tcp->tcp_suna) { 21220 /* 21221 * We are not supposed to send 21222 * anything. So let's wait a little 21223 * bit longer before breaking SWS 21224 * avoidance. 21225 * 21226 * What should the value be? 21227 * Suggestion: MAX(init rexmit time, 21228 * tcp->tcp_rto) 21229 */ 21230 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 21231 } 21232 break; /* success; too small */ 21233 } 21234 } 21235 21236 tcph = tcp->tcp_tcph; 21237 21238 /* 21239 * The reason to adjust len here is that we need to set flags 21240 * and calculate checksum. 21241 */ 21242 if (do_lso_send) 21243 len = lso_usable; 21244 21245 *usable -= len; /* Approximate - can be adjusted later */ 21246 if (*usable > 0) 21247 tcph->th_flags[0] = TH_ACK; 21248 else 21249 tcph->th_flags[0] = (TH_ACK | TH_PUSH); 21250 21251 /* 21252 * Prime pump for IP's checksumming on our behalf 21253 * Include the adjustment for a source route if any. 21254 */ 21255 sum = len + tcp_tcp_hdr_len + tcp->tcp_sum; 21256 sum = (sum >> 16) + (sum & 0xFFFF); 21257 U16_TO_ABE16(sum, tcph->th_sum); 21258 21259 U32_TO_ABE32(*snxt, tcph->th_seq); 21260 21261 /* 21262 * Branch off to tcp_xmit_mp() if any of the VALID bits is 21263 * set. For the case when TCP_FSS_VALID is the only valid 21264 * bit (normal active close), branch off only when we think 21265 * that the FIN flag needs to be set. Note for this case, 21266 * that (snxt + len) may not reflect the actual seg_len, 21267 * as len may be further reduced in tcp_xmit_mp(). If len 21268 * gets modified, we will end up here again. 21269 */ 21270 if (tcp->tcp_valid_bits != 0 && 21271 (tcp->tcp_valid_bits != TCP_FSS_VALID || 21272 ((*snxt + len) == tcp->tcp_fss))) { 21273 uchar_t *prev_rptr; 21274 uint32_t prev_snxt = tcp->tcp_snxt; 21275 21276 if (*tail_unsent == 0) { 21277 ASSERT((*xmit_tail)->b_cont != NULL); 21278 *xmit_tail = (*xmit_tail)->b_cont; 21279 prev_rptr = (*xmit_tail)->b_rptr; 21280 *tail_unsent = (int)((*xmit_tail)->b_wptr - 21281 (*xmit_tail)->b_rptr); 21282 } else { 21283 prev_rptr = (*xmit_tail)->b_rptr; 21284 (*xmit_tail)->b_rptr = (*xmit_tail)->b_wptr - 21285 *tail_unsent; 21286 } 21287 mp = tcp_xmit_mp(tcp, *xmit_tail, len, NULL, NULL, 21288 *snxt, B_FALSE, (uint32_t *)&len, B_FALSE); 21289 /* Restore tcp_snxt so we get amount sent right. */ 21290 tcp->tcp_snxt = prev_snxt; 21291 if (prev_rptr == (*xmit_tail)->b_rptr) { 21292 /* 21293 * If the previous timestamp is still in use, 21294 * don't stomp on it. 21295 */ 21296 if ((*xmit_tail)->b_next == NULL) { 21297 (*xmit_tail)->b_prev = local_time; 21298 (*xmit_tail)->b_next = 21299 (mblk_t *)(uintptr_t)(*snxt); 21300 } 21301 } else 21302 (*xmit_tail)->b_rptr = prev_rptr; 21303 21304 if (mp == NULL) { 21305 if (ire != NULL) 21306 IRE_REFRELE(ire); 21307 return (-1); 21308 } 21309 mp1 = mp->b_cont; 21310 21311 if (len <= mss) /* LSO is unusable (!do_lso_send) */ 21312 tcp->tcp_last_sent_len = (ushort_t)len; 21313 while (mp1->b_cont) { 21314 *xmit_tail = (*xmit_tail)->b_cont; 21315 (*xmit_tail)->b_prev = local_time; 21316 (*xmit_tail)->b_next = 21317 (mblk_t *)(uintptr_t)(*snxt); 21318 mp1 = mp1->b_cont; 21319 } 21320 *snxt += len; 21321 *tail_unsent = (*xmit_tail)->b_wptr - mp1->b_wptr; 21322 BUMP_LOCAL(tcp->tcp_obsegs); 21323 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 21324 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len); 21325 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 21326 tcp_send_data(tcp, q, mp); 21327 continue; 21328 } 21329 21330 *snxt += len; /* Adjust later if we don't send all of len */ 21331 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 21332 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len); 21333 21334 if (*tail_unsent) { 21335 /* Are the bytes above us in flight? */ 21336 rptr = (*xmit_tail)->b_wptr - *tail_unsent; 21337 if (rptr != (*xmit_tail)->b_rptr) { 21338 *tail_unsent -= len; 21339 if (len <= mss) /* LSO is unusable */ 21340 tcp->tcp_last_sent_len = (ushort_t)len; 21341 len += tcp_hdr_len; 21342 if (tcp->tcp_ipversion == IPV4_VERSION) 21343 tcp->tcp_ipha->ipha_length = htons(len); 21344 else 21345 tcp->tcp_ip6h->ip6_plen = 21346 htons(len - 21347 ((char *)&tcp->tcp_ip6h[1] - 21348 tcp->tcp_iphc)); 21349 mp = dupb(*xmit_tail); 21350 if (mp == NULL) { 21351 if (ire != NULL) 21352 IRE_REFRELE(ire); 21353 return (-1); /* out_of_mem */ 21354 } 21355 mp->b_rptr = rptr; 21356 /* 21357 * If the old timestamp is no longer in use, 21358 * sample a new timestamp now. 21359 */ 21360 if ((*xmit_tail)->b_next == NULL) { 21361 (*xmit_tail)->b_prev = local_time; 21362 (*xmit_tail)->b_next = 21363 (mblk_t *)(uintptr_t)(*snxt-len); 21364 } 21365 goto must_alloc; 21366 } 21367 } else { 21368 *xmit_tail = (*xmit_tail)->b_cont; 21369 ASSERT((uintptr_t)((*xmit_tail)->b_wptr - 21370 (*xmit_tail)->b_rptr) <= (uintptr_t)INT_MAX); 21371 *tail_unsent = (int)((*xmit_tail)->b_wptr - 21372 (*xmit_tail)->b_rptr); 21373 } 21374 21375 (*xmit_tail)->b_prev = local_time; 21376 (*xmit_tail)->b_next = (mblk_t *)(uintptr_t)(*snxt - len); 21377 21378 *tail_unsent -= len; 21379 if (len <= mss) /* LSO is unusable (!do_lso_send) */ 21380 tcp->tcp_last_sent_len = (ushort_t)len; 21381 21382 len += tcp_hdr_len; 21383 if (tcp->tcp_ipversion == IPV4_VERSION) 21384 tcp->tcp_ipha->ipha_length = htons(len); 21385 else 21386 tcp->tcp_ip6h->ip6_plen = htons(len - 21387 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 21388 21389 mp = dupb(*xmit_tail); 21390 if (mp == NULL) { 21391 if (ire != NULL) 21392 IRE_REFRELE(ire); 21393 return (-1); /* out_of_mem */ 21394 } 21395 21396 len = tcp_hdr_len; 21397 /* 21398 * There are four reasons to allocate a new hdr mblk: 21399 * 1) The bytes above us are in use by another packet 21400 * 2) We don't have good alignment 21401 * 3) The mblk is being shared 21402 * 4) We don't have enough room for a header 21403 */ 21404 rptr = mp->b_rptr - len; 21405 if (!OK_32PTR(rptr) || 21406 ((db = mp->b_datap), db->db_ref != 2) || 21407 rptr < db->db_base + ire_fp_mp_len) { 21408 /* NOTE: we assume allocb returns an OK_32PTR */ 21409 21410 must_alloc:; 21411 mp1 = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 21412 tcps->tcps_wroff_xtra + ire_fp_mp_len, BPRI_MED); 21413 if (mp1 == NULL) { 21414 freemsg(mp); 21415 if (ire != NULL) 21416 IRE_REFRELE(ire); 21417 return (-1); /* out_of_mem */ 21418 } 21419 mp1->b_cont = mp; 21420 mp = mp1; 21421 /* Leave room for Link Level header */ 21422 len = tcp_hdr_len; 21423 rptr = 21424 &mp->b_rptr[tcps->tcps_wroff_xtra + ire_fp_mp_len]; 21425 mp->b_wptr = &rptr[len]; 21426 } 21427 21428 /* 21429 * Fill in the header using the template header, and add 21430 * options such as time-stamp, ECN and/or SACK, as needed. 21431 */ 21432 tcp_fill_header(tcp, rptr, (clock_t)local_time, num_sack_blk); 21433 21434 mp->b_rptr = rptr; 21435 21436 if (*tail_unsent) { 21437 int spill = *tail_unsent; 21438 21439 mp1 = mp->b_cont; 21440 if (mp1 == NULL) 21441 mp1 = mp; 21442 21443 /* 21444 * If we're a little short, tack on more mblks until 21445 * there is no more spillover. 21446 */ 21447 while (spill < 0) { 21448 mblk_t *nmp; 21449 int nmpsz; 21450 21451 nmp = (*xmit_tail)->b_cont; 21452 nmpsz = MBLKL(nmp); 21453 21454 /* 21455 * Excess data in mblk; can we split it? 21456 * If MDT is enabled for the connection, 21457 * keep on splitting as this is a transient 21458 * send path. 21459 */ 21460 if (!do_lso_send && !tcp->tcp_mdt && 21461 (spill + nmpsz > 0)) { 21462 /* 21463 * Don't split if stream head was 21464 * told to break up larger writes 21465 * into smaller ones. 21466 */ 21467 if (tcp->tcp_maxpsz > 0) 21468 break; 21469 21470 /* 21471 * Next mblk is less than SMSS/2 21472 * rounded up to nearest 64-byte; 21473 * let it get sent as part of the 21474 * next segment. 21475 */ 21476 if (tcp->tcp_localnet && 21477 !tcp->tcp_cork && 21478 (nmpsz < roundup((mss >> 1), 64))) 21479 break; 21480 } 21481 21482 *xmit_tail = nmp; 21483 ASSERT((uintptr_t)nmpsz <= (uintptr_t)INT_MAX); 21484 /* Stash for rtt use later */ 21485 (*xmit_tail)->b_prev = local_time; 21486 (*xmit_tail)->b_next = 21487 (mblk_t *)(uintptr_t)(*snxt - len); 21488 mp1->b_cont = dupb(*xmit_tail); 21489 mp1 = mp1->b_cont; 21490 21491 spill += nmpsz; 21492 if (mp1 == NULL) { 21493 *tail_unsent = spill; 21494 freemsg(mp); 21495 if (ire != NULL) 21496 IRE_REFRELE(ire); 21497 return (-1); /* out_of_mem */ 21498 } 21499 } 21500 21501 /* Trim back any surplus on the last mblk */ 21502 if (spill >= 0) { 21503 mp1->b_wptr -= spill; 21504 *tail_unsent = spill; 21505 } else { 21506 /* 21507 * We did not send everything we could in 21508 * order to remain within the b_cont limit. 21509 */ 21510 *usable -= spill; 21511 *snxt += spill; 21512 tcp->tcp_last_sent_len += spill; 21513 UPDATE_MIB(&tcps->tcps_mib, 21514 tcpOutDataBytes, spill); 21515 /* 21516 * Adjust the checksum 21517 */ 21518 tcph = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 21519 sum += spill; 21520 sum = (sum >> 16) + (sum & 0xFFFF); 21521 U16_TO_ABE16(sum, tcph->th_sum); 21522 if (tcp->tcp_ipversion == IPV4_VERSION) { 21523 sum = ntohs( 21524 ((ipha_t *)rptr)->ipha_length) + 21525 spill; 21526 ((ipha_t *)rptr)->ipha_length = 21527 htons(sum); 21528 } else { 21529 sum = ntohs( 21530 ((ip6_t *)rptr)->ip6_plen) + 21531 spill; 21532 ((ip6_t *)rptr)->ip6_plen = 21533 htons(sum); 21534 } 21535 *tail_unsent = 0; 21536 } 21537 } 21538 if (tcp->tcp_ip_forward_progress) { 21539 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 21540 *(uint32_t *)mp->b_rptr |= IP_FORWARD_PROG; 21541 tcp->tcp_ip_forward_progress = B_FALSE; 21542 } 21543 21544 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 21545 if (do_lso_send) { 21546 tcp_lsosend_data(tcp, mp, ire, ill, mss, 21547 num_lso_seg); 21548 tcp->tcp_obsegs += num_lso_seg; 21549 21550 TCP_STAT(tcps, tcp_lso_times); 21551 TCP_STAT_UPDATE(tcps, tcp_lso_pkt_out, num_lso_seg); 21552 } else { 21553 tcp_send_data(tcp, q, mp); 21554 BUMP_LOCAL(tcp->tcp_obsegs); 21555 } 21556 } 21557 21558 if (ire != NULL) 21559 IRE_REFRELE(ire); 21560 return (0); 21561 } 21562 21563 /* Unlink and return any mblk that looks like it contains a MDT info */ 21564 static mblk_t * 21565 tcp_mdt_info_mp(mblk_t *mp) 21566 { 21567 mblk_t *prev_mp; 21568 21569 for (;;) { 21570 prev_mp = mp; 21571 /* no more to process? */ 21572 if ((mp = mp->b_cont) == NULL) 21573 break; 21574 21575 switch (DB_TYPE(mp)) { 21576 case M_CTL: 21577 if (*(uint32_t *)mp->b_rptr != MDT_IOC_INFO_UPDATE) 21578 continue; 21579 ASSERT(prev_mp != NULL); 21580 prev_mp->b_cont = mp->b_cont; 21581 mp->b_cont = NULL; 21582 return (mp); 21583 default: 21584 break; 21585 } 21586 } 21587 return (mp); 21588 } 21589 21590 /* MDT info update routine, called when IP notifies us about MDT */ 21591 static void 21592 tcp_mdt_update(tcp_t *tcp, ill_mdt_capab_t *mdt_capab, boolean_t first) 21593 { 21594 boolean_t prev_state; 21595 tcp_stack_t *tcps = tcp->tcp_tcps; 21596 21597 /* 21598 * IP is telling us to abort MDT on this connection? We know 21599 * this because the capability is only turned off when IP 21600 * encounters some pathological cases, e.g. link-layer change 21601 * where the new driver doesn't support MDT, or in situation 21602 * where MDT usage on the link-layer has been switched off. 21603 * IP would not have sent us the initial MDT_IOC_INFO_UPDATE 21604 * if the link-layer doesn't support MDT, and if it does, it 21605 * will indicate that the feature is to be turned on. 21606 */ 21607 prev_state = tcp->tcp_mdt; 21608 tcp->tcp_mdt = (mdt_capab->ill_mdt_on != 0); 21609 if (!tcp->tcp_mdt && !first) { 21610 TCP_STAT(tcps, tcp_mdt_conn_halted3); 21611 ip1dbg(("tcp_mdt_update: disabling MDT for connp %p\n", 21612 (void *)tcp->tcp_connp)); 21613 } 21614 21615 /* 21616 * We currently only support MDT on simple TCP/{IPv4,IPv6}, 21617 * so disable MDT otherwise. The checks are done here 21618 * and in tcp_wput_data(). 21619 */ 21620 if (tcp->tcp_mdt && 21621 (tcp->tcp_ipversion == IPV4_VERSION && 21622 tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) || 21623 (tcp->tcp_ipversion == IPV6_VERSION && 21624 tcp->tcp_ip_hdr_len != IPV6_HDR_LEN)) 21625 tcp->tcp_mdt = B_FALSE; 21626 21627 if (tcp->tcp_mdt) { 21628 if (mdt_capab->ill_mdt_version != MDT_VERSION_2) { 21629 cmn_err(CE_NOTE, "tcp_mdt_update: unknown MDT " 21630 "version (%d), expected version is %d", 21631 mdt_capab->ill_mdt_version, MDT_VERSION_2); 21632 tcp->tcp_mdt = B_FALSE; 21633 return; 21634 } 21635 21636 /* 21637 * We need the driver to be able to handle at least three 21638 * spans per packet in order for tcp MDT to be utilized. 21639 * The first is for the header portion, while the rest are 21640 * needed to handle a packet that straddles across two 21641 * virtually non-contiguous buffers; a typical tcp packet 21642 * therefore consists of only two spans. Note that we take 21643 * a zero as "don't care". 21644 */ 21645 if (mdt_capab->ill_mdt_span_limit > 0 && 21646 mdt_capab->ill_mdt_span_limit < 3) { 21647 tcp->tcp_mdt = B_FALSE; 21648 return; 21649 } 21650 21651 /* a zero means driver wants default value */ 21652 tcp->tcp_mdt_max_pld = MIN(mdt_capab->ill_mdt_max_pld, 21653 tcps->tcps_mdt_max_pbufs); 21654 if (tcp->tcp_mdt_max_pld == 0) 21655 tcp->tcp_mdt_max_pld = tcps->tcps_mdt_max_pbufs; 21656 21657 /* ensure 32-bit alignment */ 21658 tcp->tcp_mdt_hdr_head = roundup(MAX(tcps->tcps_mdt_hdr_head_min, 21659 mdt_capab->ill_mdt_hdr_head), 4); 21660 tcp->tcp_mdt_hdr_tail = roundup(MAX(tcps->tcps_mdt_hdr_tail_min, 21661 mdt_capab->ill_mdt_hdr_tail), 4); 21662 21663 if (!first && !prev_state) { 21664 TCP_STAT(tcps, tcp_mdt_conn_resumed2); 21665 ip1dbg(("tcp_mdt_update: reenabling MDT for connp %p\n", 21666 (void *)tcp->tcp_connp)); 21667 } 21668 } 21669 } 21670 21671 /* Unlink and return any mblk that looks like it contains a LSO info */ 21672 static mblk_t * 21673 tcp_lso_info_mp(mblk_t *mp) 21674 { 21675 mblk_t *prev_mp; 21676 21677 for (;;) { 21678 prev_mp = mp; 21679 /* no more to process? */ 21680 if ((mp = mp->b_cont) == NULL) 21681 break; 21682 21683 switch (DB_TYPE(mp)) { 21684 case M_CTL: 21685 if (*(uint32_t *)mp->b_rptr != LSO_IOC_INFO_UPDATE) 21686 continue; 21687 ASSERT(prev_mp != NULL); 21688 prev_mp->b_cont = mp->b_cont; 21689 mp->b_cont = NULL; 21690 return (mp); 21691 default: 21692 break; 21693 } 21694 } 21695 21696 return (mp); 21697 } 21698 21699 /* LSO info update routine, called when IP notifies us about LSO */ 21700 static void 21701 tcp_lso_update(tcp_t *tcp, ill_lso_capab_t *lso_capab) 21702 { 21703 tcp_stack_t *tcps = tcp->tcp_tcps; 21704 21705 /* 21706 * IP is telling us to abort LSO on this connection? We know 21707 * this because the capability is only turned off when IP 21708 * encounters some pathological cases, e.g. link-layer change 21709 * where the new NIC/driver doesn't support LSO, or in situation 21710 * where LSO usage on the link-layer has been switched off. 21711 * IP would not have sent us the initial LSO_IOC_INFO_UPDATE 21712 * if the link-layer doesn't support LSO, and if it does, it 21713 * will indicate that the feature is to be turned on. 21714 */ 21715 tcp->tcp_lso = (lso_capab->ill_lso_on != 0); 21716 TCP_STAT(tcps, tcp_lso_enabled); 21717 21718 /* 21719 * We currently only support LSO on simple TCP/IPv4, 21720 * so disable LSO otherwise. The checks are done here 21721 * and in tcp_wput_data(). 21722 */ 21723 if (tcp->tcp_lso && 21724 (tcp->tcp_ipversion == IPV4_VERSION && 21725 tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) || 21726 (tcp->tcp_ipversion == IPV6_VERSION)) { 21727 tcp->tcp_lso = B_FALSE; 21728 TCP_STAT(tcps, tcp_lso_disabled); 21729 } else { 21730 tcp->tcp_lso_max = MIN(TCP_MAX_LSO_LENGTH, 21731 lso_capab->ill_lso_max); 21732 } 21733 } 21734 21735 static void 21736 tcp_ire_ill_check(tcp_t *tcp, ire_t *ire, ill_t *ill, boolean_t check_lso_mdt) 21737 { 21738 conn_t *connp = tcp->tcp_connp; 21739 tcp_stack_t *tcps = tcp->tcp_tcps; 21740 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 21741 21742 ASSERT(ire != NULL); 21743 21744 /* 21745 * We may be in the fastpath here, and although we essentially do 21746 * similar checks as in ip_bind_connected{_v6}/ip_xxinfo_return, 21747 * we try to keep things as brief as possible. After all, these 21748 * are only best-effort checks, and we do more thorough ones prior 21749 * to calling tcp_send()/tcp_multisend(). 21750 */ 21751 if ((ipst->ips_ip_lso_outbound || ipst->ips_ip_multidata_outbound) && 21752 check_lso_mdt && !(ire->ire_type & (IRE_LOCAL | IRE_LOOPBACK)) && 21753 ill != NULL && !CONN_IPSEC_OUT_ENCAPSULATED(connp) && 21754 !(ire->ire_flags & RTF_MULTIRT) && 21755 !IPP_ENABLED(IPP_LOCAL_OUT, ipst) && 21756 CONN_IS_LSO_MD_FASTPATH(connp)) { 21757 if (ipst->ips_ip_lso_outbound && ILL_LSO_CAPABLE(ill)) { 21758 /* Cache the result */ 21759 connp->conn_lso_ok = B_TRUE; 21760 21761 ASSERT(ill->ill_lso_capab != NULL); 21762 if (!ill->ill_lso_capab->ill_lso_on) { 21763 ill->ill_lso_capab->ill_lso_on = 1; 21764 ip1dbg(("tcp_ire_ill_check: connp %p enables " 21765 "LSO for interface %s\n", (void *)connp, 21766 ill->ill_name)); 21767 } 21768 tcp_lso_update(tcp, ill->ill_lso_capab); 21769 } else if (ipst->ips_ip_multidata_outbound && 21770 ILL_MDT_CAPABLE(ill)) { 21771 /* Cache the result */ 21772 connp->conn_mdt_ok = B_TRUE; 21773 21774 ASSERT(ill->ill_mdt_capab != NULL); 21775 if (!ill->ill_mdt_capab->ill_mdt_on) { 21776 ill->ill_mdt_capab->ill_mdt_on = 1; 21777 ip1dbg(("tcp_ire_ill_check: connp %p enables " 21778 "MDT for interface %s\n", (void *)connp, 21779 ill->ill_name)); 21780 } 21781 tcp_mdt_update(tcp, ill->ill_mdt_capab, B_TRUE); 21782 } 21783 } 21784 21785 /* 21786 * The goal is to reduce the number of generated tcp segments by 21787 * setting the maxpsz multiplier to 0; this will have an affect on 21788 * tcp_maxpsz_set(). With this behavior, tcp will pack more data 21789 * into each packet, up to SMSS bytes. Doing this reduces the number 21790 * of outbound segments and incoming ACKs, thus allowing for better 21791 * network and system performance. In contrast the legacy behavior 21792 * may result in sending less than SMSS size, because the last mblk 21793 * for some packets may have more data than needed to make up SMSS, 21794 * and the legacy code refused to "split" it. 21795 * 21796 * We apply the new behavior on following situations: 21797 * 21798 * 1) Loopback connections, 21799 * 2) Connections in which the remote peer is not on local subnet, 21800 * 3) Local subnet connections over the bge interface (see below). 21801 * 21802 * Ideally, we would like this behavior to apply for interfaces other 21803 * than bge. However, doing so would negatively impact drivers which 21804 * perform dynamic mapping and unmapping of DMA resources, which are 21805 * increased by setting the maxpsz multiplier to 0 (more mblks per 21806 * packet will be generated by tcp). The bge driver does not suffer 21807 * from this, as it copies the mblks into pre-mapped buffers, and 21808 * therefore does not require more I/O resources than before. 21809 * 21810 * Otherwise, this behavior is present on all network interfaces when 21811 * the destination endpoint is non-local, since reducing the number 21812 * of packets in general is good for the network. 21813 * 21814 * TODO We need to remove this hard-coded conditional for bge once 21815 * a better "self-tuning" mechanism, or a way to comprehend 21816 * the driver transmit strategy is devised. Until the solution 21817 * is found and well understood, we live with this hack. 21818 */ 21819 if (!tcp_static_maxpsz && 21820 (tcp->tcp_loopback || !tcp->tcp_localnet || 21821 (ill->ill_name_length > 3 && bcmp(ill->ill_name, "bge", 3) == 0))) { 21822 /* override the default value */ 21823 tcp->tcp_maxpsz = 0; 21824 21825 ip3dbg(("tcp_ire_ill_check: connp %p tcp_maxpsz %d on " 21826 "interface %s\n", (void *)connp, tcp->tcp_maxpsz, 21827 ill != NULL ? ill->ill_name : ipif_loopback_name)); 21828 } 21829 21830 /* set the stream head parameters accordingly */ 21831 (void) tcp_maxpsz_set(tcp, B_TRUE); 21832 } 21833 21834 /* tcp_wput_flush is called by tcp_wput_nondata to handle M_FLUSH messages. */ 21835 static void 21836 tcp_wput_flush(tcp_t *tcp, mblk_t *mp) 21837 { 21838 uchar_t fval = *mp->b_rptr; 21839 mblk_t *tail; 21840 queue_t *q = tcp->tcp_wq; 21841 21842 /* TODO: How should flush interact with urgent data? */ 21843 if ((fval & FLUSHW) && tcp->tcp_xmit_head && 21844 !(tcp->tcp_valid_bits & TCP_URG_VALID)) { 21845 /* 21846 * Flush only data that has not yet been put on the wire. If 21847 * we flush data that we have already transmitted, life, as we 21848 * know it, may come to an end. 21849 */ 21850 tail = tcp->tcp_xmit_tail; 21851 tail->b_wptr -= tcp->tcp_xmit_tail_unsent; 21852 tcp->tcp_xmit_tail_unsent = 0; 21853 tcp->tcp_unsent = 0; 21854 if (tail->b_wptr != tail->b_rptr) 21855 tail = tail->b_cont; 21856 if (tail) { 21857 mblk_t **excess = &tcp->tcp_xmit_head; 21858 for (;;) { 21859 mblk_t *mp1 = *excess; 21860 if (mp1 == tail) 21861 break; 21862 tcp->tcp_xmit_tail = mp1; 21863 tcp->tcp_xmit_last = mp1; 21864 excess = &mp1->b_cont; 21865 } 21866 *excess = NULL; 21867 tcp_close_mpp(&tail); 21868 if (tcp->tcp_snd_zcopy_aware) 21869 tcp_zcopy_notify(tcp); 21870 } 21871 /* 21872 * We have no unsent data, so unsent must be less than 21873 * tcp_xmit_lowater, so re-enable flow. 21874 */ 21875 mutex_enter(&tcp->tcp_non_sq_lock); 21876 if (tcp->tcp_flow_stopped) { 21877 tcp_clrqfull(tcp); 21878 } 21879 mutex_exit(&tcp->tcp_non_sq_lock); 21880 } 21881 /* 21882 * TODO: you can't just flush these, you have to increase rwnd for one 21883 * thing. For another, how should urgent data interact? 21884 */ 21885 if (fval & FLUSHR) { 21886 *mp->b_rptr = fval & ~FLUSHW; 21887 /* XXX */ 21888 qreply(q, mp); 21889 return; 21890 } 21891 freemsg(mp); 21892 } 21893 21894 /* 21895 * tcp_wput_iocdata is called by tcp_wput_nondata to handle all M_IOCDATA 21896 * messages. 21897 */ 21898 static void 21899 tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp) 21900 { 21901 mblk_t *mp1; 21902 STRUCT_HANDLE(strbuf, sb); 21903 uint16_t port; 21904 queue_t *q = tcp->tcp_wq; 21905 in6_addr_t v6addr; 21906 ipaddr_t v4addr; 21907 uint32_t flowinfo = 0; 21908 int addrlen; 21909 21910 /* Make sure it is one of ours. */ 21911 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 21912 case TI_GETMYNAME: 21913 case TI_GETPEERNAME: 21914 break; 21915 default: 21916 CALL_IP_WPUT(tcp->tcp_connp, q, mp); 21917 return; 21918 } 21919 switch (mi_copy_state(q, mp, &mp1)) { 21920 case -1: 21921 return; 21922 case MI_COPY_CASE(MI_COPY_IN, 1): 21923 break; 21924 case MI_COPY_CASE(MI_COPY_OUT, 1): 21925 /* Copy out the strbuf. */ 21926 mi_copyout(q, mp); 21927 return; 21928 case MI_COPY_CASE(MI_COPY_OUT, 2): 21929 /* All done. */ 21930 mi_copy_done(q, mp, 0); 21931 return; 21932 default: 21933 mi_copy_done(q, mp, EPROTO); 21934 return; 21935 } 21936 /* Check alignment of the strbuf */ 21937 if (!OK_32PTR(mp1->b_rptr)) { 21938 mi_copy_done(q, mp, EINVAL); 21939 return; 21940 } 21941 21942 STRUCT_SET_HANDLE(sb, ((struct iocblk *)mp->b_rptr)->ioc_flag, 21943 (void *)mp1->b_rptr); 21944 addrlen = tcp->tcp_family == AF_INET ? sizeof (sin_t) : sizeof (sin6_t); 21945 21946 if (STRUCT_FGET(sb, maxlen) < addrlen) { 21947 mi_copy_done(q, mp, EINVAL); 21948 return; 21949 } 21950 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 21951 case TI_GETMYNAME: 21952 if (tcp->tcp_family == AF_INET) { 21953 if (tcp->tcp_ipversion == IPV4_VERSION) { 21954 v4addr = tcp->tcp_ipha->ipha_src; 21955 } else { 21956 /* can't return an address in this case */ 21957 v4addr = 0; 21958 } 21959 } else { 21960 /* tcp->tcp_family == AF_INET6 */ 21961 if (tcp->tcp_ipversion == IPV4_VERSION) { 21962 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 21963 &v6addr); 21964 } else { 21965 v6addr = tcp->tcp_ip6h->ip6_src; 21966 } 21967 } 21968 port = tcp->tcp_lport; 21969 break; 21970 case TI_GETPEERNAME: 21971 if (tcp->tcp_family == AF_INET) { 21972 if (tcp->tcp_ipversion == IPV4_VERSION) { 21973 IN6_V4MAPPED_TO_IPADDR(&tcp->tcp_remote_v6, 21974 v4addr); 21975 } else { 21976 /* can't return an address in this case */ 21977 v4addr = 0; 21978 } 21979 } else { 21980 /* tcp->tcp_family == AF_INET6) */ 21981 v6addr = tcp->tcp_remote_v6; 21982 if (tcp->tcp_ipversion == IPV6_VERSION) { 21983 /* 21984 * No flowinfo if tcp->tcp_ipversion is v4. 21985 * 21986 * flowinfo was already initialized to zero 21987 * where it was declared above, so only 21988 * set it if ipversion is v6. 21989 */ 21990 flowinfo = tcp->tcp_ip6h->ip6_vcf & 21991 ~IPV6_VERS_AND_FLOW_MASK; 21992 } 21993 } 21994 port = tcp->tcp_fport; 21995 break; 21996 default: 21997 mi_copy_done(q, mp, EPROTO); 21998 return; 21999 } 22000 mp1 = mi_copyout_alloc(q, mp, STRUCT_FGETP(sb, buf), addrlen, B_TRUE); 22001 if (!mp1) 22002 return; 22003 22004 if (tcp->tcp_family == AF_INET) { 22005 sin_t *sin; 22006 22007 STRUCT_FSET(sb, len, (int)sizeof (sin_t)); 22008 sin = (sin_t *)mp1->b_rptr; 22009 mp1->b_wptr = (uchar_t *)&sin[1]; 22010 *sin = sin_null; 22011 sin->sin_family = AF_INET; 22012 sin->sin_addr.s_addr = v4addr; 22013 sin->sin_port = port; 22014 } else { 22015 /* tcp->tcp_family == AF_INET6 */ 22016 sin6_t *sin6; 22017 22018 STRUCT_FSET(sb, len, (int)sizeof (sin6_t)); 22019 sin6 = (sin6_t *)mp1->b_rptr; 22020 mp1->b_wptr = (uchar_t *)&sin6[1]; 22021 *sin6 = sin6_null; 22022 sin6->sin6_family = AF_INET6; 22023 sin6->sin6_flowinfo = flowinfo; 22024 sin6->sin6_addr = v6addr; 22025 sin6->sin6_port = port; 22026 } 22027 /* Copy out the address */ 22028 mi_copyout(q, mp); 22029 } 22030 22031 /* 22032 * tcp_wput_ioctl is called by tcp_wput_nondata() to handle all M_IOCTL 22033 * messages. 22034 */ 22035 /* ARGSUSED */ 22036 static void 22037 tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2) 22038 { 22039 conn_t *connp = (conn_t *)arg; 22040 tcp_t *tcp = connp->conn_tcp; 22041 queue_t *q = tcp->tcp_wq; 22042 struct iocblk *iocp; 22043 tcp_stack_t *tcps = tcp->tcp_tcps; 22044 22045 ASSERT(DB_TYPE(mp) == M_IOCTL); 22046 /* 22047 * Try and ASSERT the minimum possible references on the 22048 * conn early enough. Since we are executing on write side, 22049 * the connection is obviously not detached and that means 22050 * there is a ref each for TCP and IP. Since we are behind 22051 * the squeue, the minimum references needed are 3. If the 22052 * conn is in classifier hash list, there should be an 22053 * extra ref for that (we check both the possibilities). 22054 */ 22055 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 22056 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 22057 22058 iocp = (struct iocblk *)mp->b_rptr; 22059 switch (iocp->ioc_cmd) { 22060 case TCP_IOC_DEFAULT_Q: 22061 /* Wants to be the default wq. */ 22062 if (secpolicy_ip_config(iocp->ioc_cr, B_FALSE) != 0) { 22063 iocp->ioc_error = EPERM; 22064 iocp->ioc_count = 0; 22065 mp->b_datap->db_type = M_IOCACK; 22066 qreply(q, mp); 22067 return; 22068 } 22069 tcp_def_q_set(tcp, mp); 22070 return; 22071 case _SIOCSOCKFALLBACK: 22072 /* 22073 * Either sockmod is about to be popped and the socket 22074 * would now be treated as a plain stream, or a module 22075 * is about to be pushed so we could no longer use read- 22076 * side synchronous streams for fused loopback tcp. 22077 * Drain any queued data and disable direct sockfs 22078 * interface from now on. 22079 */ 22080 if (!tcp->tcp_issocket) { 22081 DB_TYPE(mp) = M_IOCNAK; 22082 iocp->ioc_error = EINVAL; 22083 } else { 22084 #ifdef _ILP32 22085 tcp->tcp_acceptor_id = (t_uscalar_t)RD(q); 22086 #else 22087 tcp->tcp_acceptor_id = tcp->tcp_connp->conn_dev; 22088 #endif 22089 /* 22090 * Insert this socket into the acceptor hash. 22091 * We might need it for T_CONN_RES message 22092 */ 22093 tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp); 22094 22095 if (tcp->tcp_fused) { 22096 /* 22097 * This is a fused loopback tcp; disable 22098 * read-side synchronous streams interface 22099 * and drain any queued data. It is okay 22100 * to do this for non-synchronous streams 22101 * fused tcp as well. 22102 */ 22103 tcp_fuse_disable_pair(tcp, B_FALSE); 22104 } 22105 tcp->tcp_issocket = B_FALSE; 22106 TCP_STAT(tcps, tcp_sock_fallback); 22107 22108 DB_TYPE(mp) = M_IOCACK; 22109 iocp->ioc_error = 0; 22110 } 22111 iocp->ioc_count = 0; 22112 iocp->ioc_rval = 0; 22113 qreply(q, mp); 22114 return; 22115 } 22116 CALL_IP_WPUT(connp, q, mp); 22117 } 22118 22119 /* 22120 * This routine is called by tcp_wput() to handle all TPI requests. 22121 */ 22122 /* ARGSUSED */ 22123 static void 22124 tcp_wput_proto(void *arg, mblk_t *mp, void *arg2) 22125 { 22126 conn_t *connp = (conn_t *)arg; 22127 tcp_t *tcp = connp->conn_tcp; 22128 union T_primitives *tprim = (union T_primitives *)mp->b_rptr; 22129 uchar_t *rptr; 22130 t_scalar_t type; 22131 int len; 22132 cred_t *cr = DB_CREDDEF(mp, tcp->tcp_cred); 22133 22134 /* 22135 * Try and ASSERT the minimum possible references on the 22136 * conn early enough. Since we are executing on write side, 22137 * the connection is obviously not detached and that means 22138 * there is a ref each for TCP and IP. Since we are behind 22139 * the squeue, the minimum references needed are 3. If the 22140 * conn is in classifier hash list, there should be an 22141 * extra ref for that (we check both the possibilities). 22142 */ 22143 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 22144 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 22145 22146 rptr = mp->b_rptr; 22147 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 22148 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) { 22149 type = ((union T_primitives *)rptr)->type; 22150 if (type == T_EXDATA_REQ) { 22151 uint32_t msize = msgdsize(mp->b_cont); 22152 22153 len = msize - 1; 22154 if (len < 0) { 22155 freemsg(mp); 22156 return; 22157 } 22158 /* 22159 * Try to force urgent data out on the wire. 22160 * Even if we have unsent data this will 22161 * at least send the urgent flag. 22162 * XXX does not handle more flag correctly. 22163 */ 22164 len += tcp->tcp_unsent; 22165 len += tcp->tcp_snxt; 22166 tcp->tcp_urg = len; 22167 tcp->tcp_valid_bits |= TCP_URG_VALID; 22168 22169 /* Bypass tcp protocol for fused tcp loopback */ 22170 if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize)) 22171 return; 22172 } else if (type != T_DATA_REQ) { 22173 goto non_urgent_data; 22174 } 22175 /* TODO: options, flags, ... from user */ 22176 /* Set length to zero for reclamation below */ 22177 tcp_wput_data(tcp, mp->b_cont, B_TRUE); 22178 freeb(mp); 22179 return; 22180 } else { 22181 if (tcp->tcp_debug) { 22182 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 22183 "tcp_wput_proto, dropping one..."); 22184 } 22185 freemsg(mp); 22186 return; 22187 } 22188 22189 non_urgent_data: 22190 22191 switch ((int)tprim->type) { 22192 case T_SSL_PROXY_BIND_REQ: /* an SSL proxy endpoint bind request */ 22193 /* 22194 * save the kssl_ent_t from the next block, and convert this 22195 * back to a normal bind_req. 22196 */ 22197 if (mp->b_cont != NULL) { 22198 ASSERT(MBLKL(mp->b_cont) >= sizeof (kssl_ent_t)); 22199 22200 if (tcp->tcp_kssl_ent != NULL) { 22201 kssl_release_ent(tcp->tcp_kssl_ent, NULL, 22202 KSSL_NO_PROXY); 22203 tcp->tcp_kssl_ent = NULL; 22204 } 22205 bcopy(mp->b_cont->b_rptr, &tcp->tcp_kssl_ent, 22206 sizeof (kssl_ent_t)); 22207 kssl_hold_ent(tcp->tcp_kssl_ent); 22208 freemsg(mp->b_cont); 22209 mp->b_cont = NULL; 22210 } 22211 tprim->type = T_BIND_REQ; 22212 22213 /* FALLTHROUGH */ 22214 case O_T_BIND_REQ: /* bind request */ 22215 case T_BIND_REQ: /* new semantics bind request */ 22216 tcp_bind(tcp, mp); 22217 break; 22218 case T_UNBIND_REQ: /* unbind request */ 22219 tcp_unbind(tcp, mp); 22220 break; 22221 case O_T_CONN_RES: /* old connection response XXX */ 22222 case T_CONN_RES: /* connection response */ 22223 tcp_accept(tcp, mp); 22224 break; 22225 case T_CONN_REQ: /* connection request */ 22226 tcp_connect(tcp, mp); 22227 break; 22228 case T_DISCON_REQ: /* disconnect request */ 22229 tcp_disconnect(tcp, mp); 22230 break; 22231 case T_CAPABILITY_REQ: 22232 tcp_capability_req(tcp, mp); /* capability request */ 22233 break; 22234 case T_INFO_REQ: /* information request */ 22235 tcp_info_req(tcp, mp); 22236 break; 22237 case T_SVR4_OPTMGMT_REQ: /* manage options req */ 22238 (void) svr4_optcom_req(tcp->tcp_wq, mp, cr, 22239 &tcp_opt_obj, B_TRUE); 22240 break; 22241 case T_OPTMGMT_REQ: 22242 /* 22243 * Note: no support for snmpcom_req() through new 22244 * T_OPTMGMT_REQ. See comments in ip.c 22245 */ 22246 /* Only IP is allowed to return meaningful value */ 22247 (void) tpi_optcom_req(tcp->tcp_wq, mp, cr, &tcp_opt_obj, 22248 B_TRUE); 22249 break; 22250 22251 case T_UNITDATA_REQ: /* unitdata request */ 22252 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0); 22253 break; 22254 case T_ORDREL_REQ: /* orderly release req */ 22255 freemsg(mp); 22256 22257 if (tcp->tcp_fused) 22258 tcp_unfuse(tcp); 22259 22260 if (tcp_xmit_end(tcp) != 0) { 22261 /* 22262 * We were crossing FINs and got a reset from 22263 * the other side. Just ignore it. 22264 */ 22265 if (tcp->tcp_debug) { 22266 (void) strlog(TCP_MOD_ID, 0, 1, 22267 SL_ERROR|SL_TRACE, 22268 "tcp_wput_proto, T_ORDREL_REQ out of " 22269 "state %s", 22270 tcp_display(tcp, NULL, 22271 DISP_ADDR_AND_PORT)); 22272 } 22273 } 22274 break; 22275 case T_ADDR_REQ: 22276 tcp_addr_req(tcp, mp); 22277 break; 22278 default: 22279 if (tcp->tcp_debug) { 22280 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 22281 "tcp_wput_proto, bogus TPI msg, type %d", 22282 tprim->type); 22283 } 22284 /* 22285 * We used to M_ERROR. Sending TNOTSUPPORT gives the user 22286 * to recover. 22287 */ 22288 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0); 22289 break; 22290 } 22291 } 22292 22293 /* 22294 * The TCP write service routine should never be called... 22295 */ 22296 /* ARGSUSED */ 22297 static void 22298 tcp_wsrv(queue_t *q) 22299 { 22300 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 22301 22302 TCP_STAT(tcps, tcp_wsrv_called); 22303 } 22304 22305 /* Non overlapping byte exchanger */ 22306 static void 22307 tcp_xchg(uchar_t *a, uchar_t *b, int len) 22308 { 22309 uchar_t uch; 22310 22311 while (len-- > 0) { 22312 uch = a[len]; 22313 a[len] = b[len]; 22314 b[len] = uch; 22315 } 22316 } 22317 22318 /* 22319 * Send out a control packet on the tcp connection specified. This routine 22320 * is typically called where we need a simple ACK or RST generated. 22321 */ 22322 static void 22323 tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, uint32_t ack, int ctl) 22324 { 22325 uchar_t *rptr; 22326 tcph_t *tcph; 22327 ipha_t *ipha = NULL; 22328 ip6_t *ip6h = NULL; 22329 uint32_t sum; 22330 int tcp_hdr_len; 22331 int tcp_ip_hdr_len; 22332 mblk_t *mp; 22333 tcp_stack_t *tcps = tcp->tcp_tcps; 22334 22335 /* 22336 * Save sum for use in source route later. 22337 */ 22338 ASSERT(tcp != NULL); 22339 sum = tcp->tcp_tcp_hdr_len + tcp->tcp_sum; 22340 tcp_hdr_len = tcp->tcp_hdr_len; 22341 tcp_ip_hdr_len = tcp->tcp_ip_hdr_len; 22342 22343 /* If a text string is passed in with the request, pass it to strlog. */ 22344 if (str != NULL && tcp->tcp_debug) { 22345 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 22346 "tcp_xmit_ctl: '%s', seq 0x%x, ack 0x%x, ctl 0x%x", 22347 str, seq, ack, ctl); 22348 } 22349 mp = allocb(tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + tcps->tcps_wroff_xtra, 22350 BPRI_MED); 22351 if (mp == NULL) { 22352 return; 22353 } 22354 rptr = &mp->b_rptr[tcps->tcps_wroff_xtra]; 22355 mp->b_rptr = rptr; 22356 mp->b_wptr = &rptr[tcp_hdr_len]; 22357 bcopy(tcp->tcp_iphc, rptr, tcp_hdr_len); 22358 22359 if (tcp->tcp_ipversion == IPV4_VERSION) { 22360 ipha = (ipha_t *)rptr; 22361 ipha->ipha_length = htons(tcp_hdr_len); 22362 } else { 22363 ip6h = (ip6_t *)rptr; 22364 ASSERT(tcp != NULL); 22365 ip6h->ip6_plen = htons(tcp->tcp_hdr_len - 22366 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 22367 } 22368 tcph = (tcph_t *)&rptr[tcp_ip_hdr_len]; 22369 tcph->th_flags[0] = (uint8_t)ctl; 22370 if (ctl & TH_RST) { 22371 BUMP_MIB(&tcps->tcps_mib, tcpOutRsts); 22372 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 22373 /* 22374 * Don't send TSopt w/ TH_RST packets per RFC 1323. 22375 */ 22376 if (tcp->tcp_snd_ts_ok && 22377 tcp->tcp_state > TCPS_SYN_SENT) { 22378 mp->b_wptr = &rptr[tcp_hdr_len - TCPOPT_REAL_TS_LEN]; 22379 *(mp->b_wptr) = TCPOPT_EOL; 22380 if (tcp->tcp_ipversion == IPV4_VERSION) { 22381 ipha->ipha_length = htons(tcp_hdr_len - 22382 TCPOPT_REAL_TS_LEN); 22383 } else { 22384 ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) - 22385 TCPOPT_REAL_TS_LEN); 22386 } 22387 tcph->th_offset_and_rsrvd[0] -= (3 << 4); 22388 sum -= TCPOPT_REAL_TS_LEN; 22389 } 22390 } 22391 if (ctl & TH_ACK) { 22392 if (tcp->tcp_snd_ts_ok) { 22393 U32_TO_BE32(lbolt, 22394 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 22395 U32_TO_BE32(tcp->tcp_ts_recent, 22396 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 22397 } 22398 22399 /* Update the latest receive window size in TCP header. */ 22400 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 22401 tcph->th_win); 22402 tcp->tcp_rack = ack; 22403 tcp->tcp_rack_cnt = 0; 22404 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 22405 } 22406 BUMP_LOCAL(tcp->tcp_obsegs); 22407 U32_TO_BE32(seq, tcph->th_seq); 22408 U32_TO_BE32(ack, tcph->th_ack); 22409 /* 22410 * Include the adjustment for a source route if any. 22411 */ 22412 sum = (sum >> 16) + (sum & 0xFFFF); 22413 U16_TO_BE16(sum, tcph->th_sum); 22414 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 22415 tcp_send_data(tcp, tcp->tcp_wq, mp); 22416 } 22417 22418 /* 22419 * If this routine returns B_TRUE, TCP can generate a RST in response 22420 * to a segment. If it returns B_FALSE, TCP should not respond. 22421 */ 22422 static boolean_t 22423 tcp_send_rst_chk(tcp_stack_t *tcps) 22424 { 22425 clock_t now; 22426 22427 /* 22428 * TCP needs to protect itself from generating too many RSTs. 22429 * This can be a DoS attack by sending us random segments 22430 * soliciting RSTs. 22431 * 22432 * What we do here is to have a limit of tcp_rst_sent_rate RSTs 22433 * in each 1 second interval. In this way, TCP still generate 22434 * RSTs in normal cases but when under attack, the impact is 22435 * limited. 22436 */ 22437 if (tcps->tcps_rst_sent_rate_enabled != 0) { 22438 now = lbolt; 22439 /* lbolt can wrap around. */ 22440 if ((tcps->tcps_last_rst_intrvl > now) || 22441 (TICK_TO_MSEC(now - tcps->tcps_last_rst_intrvl) > 22442 1*SECONDS)) { 22443 tcps->tcps_last_rst_intrvl = now; 22444 tcps->tcps_rst_cnt = 1; 22445 } else if (++tcps->tcps_rst_cnt > tcps->tcps_rst_sent_rate) { 22446 return (B_FALSE); 22447 } 22448 } 22449 return (B_TRUE); 22450 } 22451 22452 /* 22453 * Send down the advice IP ioctl to tell IP to mark an IRE temporary. 22454 */ 22455 static void 22456 tcp_ip_ire_mark_advice(tcp_t *tcp) 22457 { 22458 mblk_t *mp; 22459 ipic_t *ipic; 22460 22461 if (tcp->tcp_ipversion == IPV4_VERSION) { 22462 mp = tcp_ip_advise_mblk(&tcp->tcp_ipha->ipha_dst, IP_ADDR_LEN, 22463 &ipic); 22464 } else { 22465 mp = tcp_ip_advise_mblk(&tcp->tcp_ip6h->ip6_dst, IPV6_ADDR_LEN, 22466 &ipic); 22467 } 22468 if (mp == NULL) 22469 return; 22470 ipic->ipic_ire_marks |= IRE_MARK_TEMPORARY; 22471 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 22472 } 22473 22474 /* 22475 * Return an IP advice ioctl mblk and set ipic to be the pointer 22476 * to the advice structure. 22477 */ 22478 static mblk_t * 22479 tcp_ip_advise_mblk(void *addr, int addr_len, ipic_t **ipic) 22480 { 22481 struct iocblk *ioc; 22482 mblk_t *mp, *mp1; 22483 22484 mp = allocb(sizeof (ipic_t) + addr_len, BPRI_HI); 22485 if (mp == NULL) 22486 return (NULL); 22487 bzero(mp->b_rptr, sizeof (ipic_t) + addr_len); 22488 *ipic = (ipic_t *)mp->b_rptr; 22489 (*ipic)->ipic_cmd = IP_IOC_IRE_ADVISE_NO_REPLY; 22490 (*ipic)->ipic_addr_offset = sizeof (ipic_t); 22491 22492 bcopy(addr, *ipic + 1, addr_len); 22493 22494 (*ipic)->ipic_addr_length = addr_len; 22495 mp->b_wptr = &mp->b_rptr[sizeof (ipic_t) + addr_len]; 22496 22497 mp1 = mkiocb(IP_IOCTL); 22498 if (mp1 == NULL) { 22499 freemsg(mp); 22500 return (NULL); 22501 } 22502 mp1->b_cont = mp; 22503 ioc = (struct iocblk *)mp1->b_rptr; 22504 ioc->ioc_count = sizeof (ipic_t) + addr_len; 22505 22506 return (mp1); 22507 } 22508 22509 /* 22510 * Generate a reset based on an inbound packet, connp is set by caller 22511 * when RST is in response to an unexpected inbound packet for which 22512 * there is active tcp state in the system. 22513 * 22514 * IPSEC NOTE : Try to send the reply with the same protection as it came 22515 * in. We still have the ipsec_mp that the packet was attached to. Thus 22516 * the packet will go out at the same level of protection as it came in by 22517 * converting the IPSEC_IN to IPSEC_OUT. 22518 */ 22519 static void 22520 tcp_xmit_early_reset(char *str, mblk_t *mp, uint32_t seq, 22521 uint32_t ack, int ctl, uint_t ip_hdr_len, zoneid_t zoneid, 22522 tcp_stack_t *tcps, conn_t *connp) 22523 { 22524 ipha_t *ipha = NULL; 22525 ip6_t *ip6h = NULL; 22526 ushort_t len; 22527 tcph_t *tcph; 22528 int i; 22529 mblk_t *ipsec_mp; 22530 boolean_t mctl_present; 22531 ipic_t *ipic; 22532 ipaddr_t v4addr; 22533 in6_addr_t v6addr; 22534 int addr_len; 22535 void *addr; 22536 queue_t *q = tcps->tcps_g_q; 22537 tcp_t *tcp; 22538 cred_t *cr; 22539 mblk_t *nmp; 22540 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 22541 22542 if (tcps->tcps_g_q == NULL) { 22543 /* 22544 * For non-zero stackids the default queue isn't created 22545 * until the first open, thus there can be a need to send 22546 * a reset before then. But we can't do that, hence we just 22547 * drop the packet. Later during boot, when the default queue 22548 * has been setup, a retransmitted packet from the peer 22549 * will result in a reset. 22550 */ 22551 ASSERT(tcps->tcps_netstack->netstack_stackid != 22552 GLOBAL_NETSTACKID); 22553 freemsg(mp); 22554 return; 22555 } 22556 22557 if (connp != NULL) 22558 tcp = connp->conn_tcp; 22559 else 22560 tcp = Q_TO_TCP(q); 22561 22562 if (!tcp_send_rst_chk(tcps)) { 22563 tcps->tcps_rst_unsent++; 22564 freemsg(mp); 22565 return; 22566 } 22567 22568 if (mp->b_datap->db_type == M_CTL) { 22569 ipsec_mp = mp; 22570 mp = mp->b_cont; 22571 mctl_present = B_TRUE; 22572 } else { 22573 ipsec_mp = mp; 22574 mctl_present = B_FALSE; 22575 } 22576 22577 if (str && q && tcps->tcps_dbg) { 22578 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 22579 "tcp_xmit_early_reset: '%s', seq 0x%x, ack 0x%x, " 22580 "flags 0x%x", 22581 str, seq, ack, ctl); 22582 } 22583 if (mp->b_datap->db_ref != 1) { 22584 mblk_t *mp1 = copyb(mp); 22585 freemsg(mp); 22586 mp = mp1; 22587 if (!mp) { 22588 if (mctl_present) 22589 freeb(ipsec_mp); 22590 return; 22591 } else { 22592 if (mctl_present) { 22593 ipsec_mp->b_cont = mp; 22594 } else { 22595 ipsec_mp = mp; 22596 } 22597 } 22598 } else if (mp->b_cont) { 22599 freemsg(mp->b_cont); 22600 mp->b_cont = NULL; 22601 } 22602 /* 22603 * We skip reversing source route here. 22604 * (for now we replace all IP options with EOL) 22605 */ 22606 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22607 ipha = (ipha_t *)mp->b_rptr; 22608 for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++) 22609 mp->b_rptr[i] = IPOPT_EOL; 22610 /* 22611 * Make sure that src address isn't flagrantly invalid. 22612 * Not all broadcast address checking for the src address 22613 * is possible, since we don't know the netmask of the src 22614 * addr. No check for destination address is done, since 22615 * IP will not pass up a packet with a broadcast dest 22616 * address to TCP. Similar checks are done below for IPv6. 22617 */ 22618 if (ipha->ipha_src == 0 || ipha->ipha_src == INADDR_BROADCAST || 22619 CLASSD(ipha->ipha_src)) { 22620 freemsg(ipsec_mp); 22621 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 22622 return; 22623 } 22624 } else { 22625 ip6h = (ip6_t *)mp->b_rptr; 22626 22627 if (IN6_IS_ADDR_UNSPECIFIED(&ip6h->ip6_src) || 22628 IN6_IS_ADDR_MULTICAST(&ip6h->ip6_src)) { 22629 freemsg(ipsec_mp); 22630 BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsInDiscards); 22631 return; 22632 } 22633 22634 /* Remove any extension headers assuming partial overlay */ 22635 if (ip_hdr_len > IPV6_HDR_LEN) { 22636 uint8_t *to; 22637 22638 to = mp->b_rptr + ip_hdr_len - IPV6_HDR_LEN; 22639 ovbcopy(ip6h, to, IPV6_HDR_LEN); 22640 mp->b_rptr += ip_hdr_len - IPV6_HDR_LEN; 22641 ip_hdr_len = IPV6_HDR_LEN; 22642 ip6h = (ip6_t *)mp->b_rptr; 22643 ip6h->ip6_nxt = IPPROTO_TCP; 22644 } 22645 } 22646 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 22647 if (tcph->th_flags[0] & TH_RST) { 22648 freemsg(ipsec_mp); 22649 return; 22650 } 22651 tcph->th_offset_and_rsrvd[0] = (5 << 4); 22652 len = ip_hdr_len + sizeof (tcph_t); 22653 mp->b_wptr = &mp->b_rptr[len]; 22654 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22655 ipha->ipha_length = htons(len); 22656 /* Swap addresses */ 22657 v4addr = ipha->ipha_src; 22658 ipha->ipha_src = ipha->ipha_dst; 22659 ipha->ipha_dst = v4addr; 22660 ipha->ipha_ident = 0; 22661 ipha->ipha_ttl = (uchar_t)tcps->tcps_ipv4_ttl; 22662 addr_len = IP_ADDR_LEN; 22663 addr = &v4addr; 22664 } else { 22665 /* No ip6i_t in this case */ 22666 ip6h->ip6_plen = htons(len - IPV6_HDR_LEN); 22667 /* Swap addresses */ 22668 v6addr = ip6h->ip6_src; 22669 ip6h->ip6_src = ip6h->ip6_dst; 22670 ip6h->ip6_dst = v6addr; 22671 ip6h->ip6_hops = (uchar_t)tcps->tcps_ipv6_hoplimit; 22672 addr_len = IPV6_ADDR_LEN; 22673 addr = &v6addr; 22674 } 22675 tcp_xchg(tcph->th_fport, tcph->th_lport, 2); 22676 U32_TO_BE32(ack, tcph->th_ack); 22677 U32_TO_BE32(seq, tcph->th_seq); 22678 U16_TO_BE16(0, tcph->th_win); 22679 U16_TO_BE16(sizeof (tcph_t), tcph->th_sum); 22680 tcph->th_flags[0] = (uint8_t)ctl; 22681 if (ctl & TH_RST) { 22682 BUMP_MIB(&tcps->tcps_mib, tcpOutRsts); 22683 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 22684 } 22685 22686 /* IP trusts us to set up labels when required. */ 22687 if (is_system_labeled() && (cr = DB_CRED(mp)) != NULL && 22688 crgetlabel(cr) != NULL) { 22689 int err, adjust; 22690 22691 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) 22692 err = tsol_check_label(cr, &mp, &adjust, 22693 tcp->tcp_connp->conn_mac_exempt, 22694 tcps->tcps_netstack->netstack_ip); 22695 else 22696 err = tsol_check_label_v6(cr, &mp, &adjust, 22697 tcp->tcp_connp->conn_mac_exempt, 22698 tcps->tcps_netstack->netstack_ip); 22699 if (mctl_present) 22700 ipsec_mp->b_cont = mp; 22701 else 22702 ipsec_mp = mp; 22703 if (err != 0) { 22704 freemsg(ipsec_mp); 22705 return; 22706 } 22707 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22708 ipha = (ipha_t *)mp->b_rptr; 22709 adjust += ntohs(ipha->ipha_length); 22710 ipha->ipha_length = htons(adjust); 22711 } else { 22712 ip6h = (ip6_t *)mp->b_rptr; 22713 } 22714 } 22715 22716 if (mctl_present) { 22717 ipsec_in_t *ii = (ipsec_in_t *)ipsec_mp->b_rptr; 22718 22719 ASSERT(ii->ipsec_in_type == IPSEC_IN); 22720 if (!ipsec_in_to_out(ipsec_mp, ipha, ip6h)) { 22721 return; 22722 } 22723 } 22724 if (zoneid == ALL_ZONES) 22725 zoneid = GLOBAL_ZONEID; 22726 22727 /* Add the zoneid so ip_output routes it properly */ 22728 if ((nmp = ip_prepend_zoneid(ipsec_mp, zoneid, ipst)) == NULL) { 22729 freemsg(ipsec_mp); 22730 return; 22731 } 22732 ipsec_mp = nmp; 22733 22734 /* 22735 * NOTE: one might consider tracing a TCP packet here, but 22736 * this function has no active TCP state and no tcp structure 22737 * that has a trace buffer. If we traced here, we would have 22738 * to keep a local trace buffer in tcp_record_trace(). 22739 * 22740 * TSol note: The mblk that contains the incoming packet was 22741 * reused by tcp_xmit_listener_reset, so it already contains 22742 * the right credentials and we don't need to call mblk_setcred. 22743 * Also the conn's cred is not right since it is associated 22744 * with tcps_g_q. 22745 */ 22746 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, ipsec_mp); 22747 22748 /* 22749 * Tell IP to mark the IRE used for this destination temporary. 22750 * This way, we can limit our exposure to DoS attack because IP 22751 * creates an IRE for each destination. If there are too many, 22752 * the time to do any routing lookup will be extremely long. And 22753 * the lookup can be in interrupt context. 22754 * 22755 * Note that in normal circumstances, this marking should not 22756 * affect anything. It would be nice if only 1 message is 22757 * needed to inform IP that the IRE created for this RST should 22758 * not be added to the cache table. But there is currently 22759 * not such communication mechanism between TCP and IP. So 22760 * the best we can do now is to send the advice ioctl to IP 22761 * to mark the IRE temporary. 22762 */ 22763 if ((mp = tcp_ip_advise_mblk(addr, addr_len, &ipic)) != NULL) { 22764 ipic->ipic_ire_marks |= IRE_MARK_TEMPORARY; 22765 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 22766 } 22767 } 22768 22769 /* 22770 * Initiate closedown sequence on an active connection. (May be called as 22771 * writer.) Return value zero for OK return, non-zero for error return. 22772 */ 22773 static int 22774 tcp_xmit_end(tcp_t *tcp) 22775 { 22776 ipic_t *ipic; 22777 mblk_t *mp; 22778 tcp_stack_t *tcps = tcp->tcp_tcps; 22779 22780 if (tcp->tcp_state < TCPS_SYN_RCVD || 22781 tcp->tcp_state > TCPS_CLOSE_WAIT) { 22782 /* 22783 * Invalid state, only states TCPS_SYN_RCVD, 22784 * TCPS_ESTABLISHED and TCPS_CLOSE_WAIT are valid 22785 */ 22786 return (-1); 22787 } 22788 22789 tcp->tcp_fss = tcp->tcp_snxt + tcp->tcp_unsent; 22790 tcp->tcp_valid_bits |= TCP_FSS_VALID; 22791 /* 22792 * If there is nothing more unsent, send the FIN now. 22793 * Otherwise, it will go out with the last segment. 22794 */ 22795 if (tcp->tcp_unsent == 0) { 22796 mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, 22797 tcp->tcp_fss, B_FALSE, NULL, B_FALSE); 22798 22799 if (mp) { 22800 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 22801 tcp_send_data(tcp, tcp->tcp_wq, mp); 22802 } else { 22803 /* 22804 * Couldn't allocate msg. Pretend we got it out. 22805 * Wait for rexmit timeout. 22806 */ 22807 tcp->tcp_snxt = tcp->tcp_fss + 1; 22808 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 22809 } 22810 22811 /* 22812 * If needed, update tcp_rexmit_snxt as tcp_snxt is 22813 * changed. 22814 */ 22815 if (tcp->tcp_rexmit && tcp->tcp_rexmit_nxt == tcp->tcp_fss) { 22816 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 22817 } 22818 } else { 22819 /* 22820 * If tcp->tcp_cork is set, then the data will not get sent, 22821 * so we have to check that and unset it first. 22822 */ 22823 if (tcp->tcp_cork) 22824 tcp->tcp_cork = B_FALSE; 22825 tcp_wput_data(tcp, NULL, B_FALSE); 22826 } 22827 22828 /* 22829 * If TCP does not get enough samples of RTT or tcp_rtt_updates 22830 * is 0, don't update the cache. 22831 */ 22832 if (tcps->tcps_rtt_updates == 0 || 22833 tcp->tcp_rtt_update < tcps->tcps_rtt_updates) 22834 return (0); 22835 22836 /* 22837 * NOTE: should not update if source routes i.e. if tcp_remote if 22838 * different from the destination. 22839 */ 22840 if (tcp->tcp_ipversion == IPV4_VERSION) { 22841 if (tcp->tcp_remote != tcp->tcp_ipha->ipha_dst) { 22842 return (0); 22843 } 22844 mp = tcp_ip_advise_mblk(&tcp->tcp_ipha->ipha_dst, IP_ADDR_LEN, 22845 &ipic); 22846 } else { 22847 if (!(IN6_ARE_ADDR_EQUAL(&tcp->tcp_remote_v6, 22848 &tcp->tcp_ip6h->ip6_dst))) { 22849 return (0); 22850 } 22851 mp = tcp_ip_advise_mblk(&tcp->tcp_ip6h->ip6_dst, IPV6_ADDR_LEN, 22852 &ipic); 22853 } 22854 22855 /* Record route attributes in the IRE for use by future connections. */ 22856 if (mp == NULL) 22857 return (0); 22858 22859 /* 22860 * We do not have a good algorithm to update ssthresh at this time. 22861 * So don't do any update. 22862 */ 22863 ipic->ipic_rtt = tcp->tcp_rtt_sa; 22864 ipic->ipic_rtt_sd = tcp->tcp_rtt_sd; 22865 22866 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 22867 return (0); 22868 } 22869 22870 /* 22871 * Generate a "no listener here" RST in response to an "unknown" segment. 22872 * connp is set by caller when RST is in response to an unexpected 22873 * inbound packet for which there is active tcp state in the system. 22874 * Note that we are reusing the incoming mp to construct the outgoing RST. 22875 */ 22876 void 22877 tcp_xmit_listeners_reset(mblk_t *mp, uint_t ip_hdr_len, zoneid_t zoneid, 22878 tcp_stack_t *tcps, conn_t *connp) 22879 { 22880 uchar_t *rptr; 22881 uint32_t seg_len; 22882 tcph_t *tcph; 22883 uint32_t seg_seq; 22884 uint32_t seg_ack; 22885 uint_t flags; 22886 mblk_t *ipsec_mp; 22887 ipha_t *ipha; 22888 ip6_t *ip6h; 22889 boolean_t mctl_present = B_FALSE; 22890 boolean_t check = B_TRUE; 22891 boolean_t policy_present; 22892 ipsec_stack_t *ipss = tcps->tcps_netstack->netstack_ipsec; 22893 22894 TCP_STAT(tcps, tcp_no_listener); 22895 22896 ipsec_mp = mp; 22897 22898 if (mp->b_datap->db_type == M_CTL) { 22899 ipsec_in_t *ii; 22900 22901 mctl_present = B_TRUE; 22902 mp = mp->b_cont; 22903 22904 ii = (ipsec_in_t *)ipsec_mp->b_rptr; 22905 ASSERT(ii->ipsec_in_type == IPSEC_IN); 22906 if (ii->ipsec_in_dont_check) { 22907 check = B_FALSE; 22908 if (!ii->ipsec_in_secure) { 22909 freeb(ipsec_mp); 22910 mctl_present = B_FALSE; 22911 ipsec_mp = mp; 22912 } 22913 } 22914 } 22915 22916 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22917 policy_present = ipss->ipsec_inbound_v4_policy_present; 22918 ipha = (ipha_t *)mp->b_rptr; 22919 ip6h = NULL; 22920 } else { 22921 policy_present = ipss->ipsec_inbound_v6_policy_present; 22922 ipha = NULL; 22923 ip6h = (ip6_t *)mp->b_rptr; 22924 } 22925 22926 if (check && policy_present) { 22927 /* 22928 * The conn_t parameter is NULL because we already know 22929 * nobody's home. 22930 */ 22931 ipsec_mp = ipsec_check_global_policy( 22932 ipsec_mp, (conn_t *)NULL, ipha, ip6h, mctl_present, 22933 tcps->tcps_netstack); 22934 if (ipsec_mp == NULL) 22935 return; 22936 } 22937 if (is_system_labeled() && !tsol_can_reply_error(mp)) { 22938 DTRACE_PROBE2( 22939 tx__ip__log__error__nolistener__tcp, 22940 char *, "Could not reply with RST to mp(1)", 22941 mblk_t *, mp); 22942 ip2dbg(("tcp_xmit_listeners_reset: not permitted to reply\n")); 22943 freemsg(ipsec_mp); 22944 return; 22945 } 22946 22947 rptr = mp->b_rptr; 22948 22949 tcph = (tcph_t *)&rptr[ip_hdr_len]; 22950 seg_seq = BE32_TO_U32(tcph->th_seq); 22951 seg_ack = BE32_TO_U32(tcph->th_ack); 22952 flags = tcph->th_flags[0]; 22953 22954 seg_len = msgdsize(mp) - (TCP_HDR_LENGTH(tcph) + ip_hdr_len); 22955 if (flags & TH_RST) { 22956 freemsg(ipsec_mp); 22957 } else if (flags & TH_ACK) { 22958 tcp_xmit_early_reset("no tcp, reset", 22959 ipsec_mp, seg_ack, 0, TH_RST, ip_hdr_len, zoneid, tcps, 22960 connp); 22961 } else { 22962 if (flags & TH_SYN) { 22963 seg_len++; 22964 } else { 22965 /* 22966 * Here we violate the RFC. Note that a normal 22967 * TCP will never send a segment without the ACK 22968 * flag, except for RST or SYN segment. This 22969 * segment is neither. Just drop it on the 22970 * floor. 22971 */ 22972 freemsg(ipsec_mp); 22973 tcps->tcps_rst_unsent++; 22974 return; 22975 } 22976 22977 tcp_xmit_early_reset("no tcp, reset/ack", 22978 ipsec_mp, 0, seg_seq + seg_len, 22979 TH_RST | TH_ACK, ip_hdr_len, zoneid, tcps, connp); 22980 } 22981 } 22982 22983 /* 22984 * tcp_xmit_mp is called to return a pointer to an mblk chain complete with 22985 * ip and tcp header ready to pass down to IP. If the mp passed in is 22986 * non-NULL, then up to max_to_send bytes of data will be dup'ed off that 22987 * mblk. (If sendall is not set the dup'ing will stop at an mblk boundary 22988 * otherwise it will dup partial mblks.) 22989 * Otherwise, an appropriate ACK packet will be generated. This 22990 * routine is not usually called to send new data for the first time. It 22991 * is mostly called out of the timer for retransmits, and to generate ACKs. 22992 * 22993 * If offset is not NULL, the returned mblk chain's first mblk's b_rptr will 22994 * be adjusted by *offset. And after dupb(), the offset and the ending mblk 22995 * of the original mblk chain will be returned in *offset and *end_mp. 22996 */ 22997 mblk_t * 22998 tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, int32_t *offset, 22999 mblk_t **end_mp, uint32_t seq, boolean_t sendall, uint32_t *seg_len, 23000 boolean_t rexmit) 23001 { 23002 int data_length; 23003 int32_t off = 0; 23004 uint_t flags; 23005 mblk_t *mp1; 23006 mblk_t *mp2; 23007 uchar_t *rptr; 23008 tcph_t *tcph; 23009 int32_t num_sack_blk = 0; 23010 int32_t sack_opt_len = 0; 23011 tcp_stack_t *tcps = tcp->tcp_tcps; 23012 23013 /* Allocate for our maximum TCP header + link-level */ 23014 mp1 = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 23015 tcps->tcps_wroff_xtra, BPRI_MED); 23016 if (!mp1) 23017 return (NULL); 23018 data_length = 0; 23019 23020 /* 23021 * Note that tcp_mss has been adjusted to take into account the 23022 * timestamp option if applicable. Because SACK options do not 23023 * appear in every TCP segments and they are of variable lengths, 23024 * they cannot be included in tcp_mss. Thus we need to calculate 23025 * the actual segment length when we need to send a segment which 23026 * includes SACK options. 23027 */ 23028 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 23029 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 23030 tcp->tcp_num_sack_blk); 23031 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 23032 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 23033 if (max_to_send + sack_opt_len > tcp->tcp_mss) 23034 max_to_send -= sack_opt_len; 23035 } 23036 23037 if (offset != NULL) { 23038 off = *offset; 23039 /* We use offset as an indicator that end_mp is not NULL. */ 23040 *end_mp = NULL; 23041 } 23042 for (mp2 = mp1; mp && data_length != max_to_send; mp = mp->b_cont) { 23043 /* This could be faster with cooperation from downstream */ 23044 if (mp2 != mp1 && !sendall && 23045 data_length + (int)(mp->b_wptr - mp->b_rptr) > 23046 max_to_send) 23047 /* 23048 * Don't send the next mblk since the whole mblk 23049 * does not fit. 23050 */ 23051 break; 23052 mp2->b_cont = dupb(mp); 23053 mp2 = mp2->b_cont; 23054 if (!mp2) { 23055 freemsg(mp1); 23056 return (NULL); 23057 } 23058 mp2->b_rptr += off; 23059 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 23060 (uintptr_t)INT_MAX); 23061 23062 data_length += (int)(mp2->b_wptr - mp2->b_rptr); 23063 if (data_length > max_to_send) { 23064 mp2->b_wptr -= data_length - max_to_send; 23065 data_length = max_to_send; 23066 off = mp2->b_wptr - mp->b_rptr; 23067 break; 23068 } else { 23069 off = 0; 23070 } 23071 } 23072 if (offset != NULL) { 23073 *offset = off; 23074 *end_mp = mp; 23075 } 23076 if (seg_len != NULL) { 23077 *seg_len = data_length; 23078 } 23079 23080 /* Update the latest receive window size in TCP header. */ 23081 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 23082 tcp->tcp_tcph->th_win); 23083 23084 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra; 23085 mp1->b_rptr = rptr; 23086 mp1->b_wptr = rptr + tcp->tcp_hdr_len + sack_opt_len; 23087 bcopy(tcp->tcp_iphc, rptr, tcp->tcp_hdr_len); 23088 tcph = (tcph_t *)&rptr[tcp->tcp_ip_hdr_len]; 23089 U32_TO_ABE32(seq, tcph->th_seq); 23090 23091 /* 23092 * Use tcp_unsent to determine if the PUSH bit should be used assumes 23093 * that this function was called from tcp_wput_data. Thus, when called 23094 * to retransmit data the setting of the PUSH bit may appear some 23095 * what random in that it might get set when it should not. This 23096 * should not pose any performance issues. 23097 */ 23098 if (data_length != 0 && (tcp->tcp_unsent == 0 || 23099 tcp->tcp_unsent == data_length)) { 23100 flags = TH_ACK | TH_PUSH; 23101 } else { 23102 flags = TH_ACK; 23103 } 23104 23105 if (tcp->tcp_ecn_ok) { 23106 if (tcp->tcp_ecn_echo_on) 23107 flags |= TH_ECE; 23108 23109 /* 23110 * Only set ECT bit and ECN_CWR if a segment contains new data. 23111 * There is no TCP flow control for non-data segments, and 23112 * only data segment is transmitted reliably. 23113 */ 23114 if (data_length > 0 && !rexmit) { 23115 SET_ECT(tcp, rptr); 23116 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 23117 flags |= TH_CWR; 23118 tcp->tcp_ecn_cwr_sent = B_TRUE; 23119 } 23120 } 23121 } 23122 23123 if (tcp->tcp_valid_bits) { 23124 uint32_t u1; 23125 23126 if ((tcp->tcp_valid_bits & TCP_ISS_VALID) && 23127 seq == tcp->tcp_iss) { 23128 uchar_t *wptr; 23129 23130 /* 23131 * If TCP_ISS_VALID and the seq number is tcp_iss, 23132 * TCP can only be in SYN-SENT, SYN-RCVD or 23133 * FIN-WAIT-1 state. It can be FIN-WAIT-1 if 23134 * our SYN is not ack'ed but the app closes this 23135 * TCP connection. 23136 */ 23137 ASSERT(tcp->tcp_state == TCPS_SYN_SENT || 23138 tcp->tcp_state == TCPS_SYN_RCVD || 23139 tcp->tcp_state == TCPS_FIN_WAIT_1); 23140 23141 /* 23142 * Tack on the MSS option. It is always needed 23143 * for both active and passive open. 23144 * 23145 * MSS option value should be interface MTU - MIN 23146 * TCP/IP header according to RFC 793 as it means 23147 * the maximum segment size TCP can receive. But 23148 * to get around some broken middle boxes/end hosts 23149 * out there, we allow the option value to be the 23150 * same as the MSS option size on the peer side. 23151 * In this way, the other side will not send 23152 * anything larger than they can receive. 23153 * 23154 * Note that for SYN_SENT state, the ndd param 23155 * tcp_use_smss_as_mss_opt has no effect as we 23156 * don't know the peer's MSS option value. So 23157 * the only case we need to take care of is in 23158 * SYN_RCVD state, which is done later. 23159 */ 23160 wptr = mp1->b_wptr; 23161 wptr[0] = TCPOPT_MAXSEG; 23162 wptr[1] = TCPOPT_MAXSEG_LEN; 23163 wptr += 2; 23164 u1 = tcp->tcp_if_mtu - 23165 (tcp->tcp_ipversion == IPV4_VERSION ? 23166 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) - 23167 TCP_MIN_HEADER_LENGTH; 23168 U16_TO_BE16(u1, wptr); 23169 mp1->b_wptr = wptr + 2; 23170 /* Update the offset to cover the additional word */ 23171 tcph->th_offset_and_rsrvd[0] += (1 << 4); 23172 23173 /* 23174 * Note that the following way of filling in 23175 * TCP options are not optimal. Some NOPs can 23176 * be saved. But there is no need at this time 23177 * to optimize it. When it is needed, we will 23178 * do it. 23179 */ 23180 switch (tcp->tcp_state) { 23181 case TCPS_SYN_SENT: 23182 flags = TH_SYN; 23183 23184 if (tcp->tcp_snd_ts_ok) { 23185 uint32_t llbolt = (uint32_t)lbolt; 23186 23187 wptr = mp1->b_wptr; 23188 wptr[0] = TCPOPT_NOP; 23189 wptr[1] = TCPOPT_NOP; 23190 wptr[2] = TCPOPT_TSTAMP; 23191 wptr[3] = TCPOPT_TSTAMP_LEN; 23192 wptr += 4; 23193 U32_TO_BE32(llbolt, wptr); 23194 wptr += 4; 23195 ASSERT(tcp->tcp_ts_recent == 0); 23196 U32_TO_BE32(0L, wptr); 23197 mp1->b_wptr += TCPOPT_REAL_TS_LEN; 23198 tcph->th_offset_and_rsrvd[0] += 23199 (3 << 4); 23200 } 23201 23202 /* 23203 * Set up all the bits to tell other side 23204 * we are ECN capable. 23205 */ 23206 if (tcp->tcp_ecn_ok) { 23207 flags |= (TH_ECE | TH_CWR); 23208 } 23209 break; 23210 case TCPS_SYN_RCVD: 23211 flags |= TH_SYN; 23212 23213 /* 23214 * Reset the MSS option value to be SMSS 23215 * We should probably add back the bytes 23216 * for timestamp option and IPsec. We 23217 * don't do that as this is a workaround 23218 * for broken middle boxes/end hosts, it 23219 * is better for us to be more cautious. 23220 * They may not take these things into 23221 * account in their SMSS calculation. Thus 23222 * the peer's calculated SMSS may be smaller 23223 * than what it can be. This should be OK. 23224 */ 23225 if (tcps->tcps_use_smss_as_mss_opt) { 23226 u1 = tcp->tcp_mss; 23227 U16_TO_BE16(u1, wptr); 23228 } 23229 23230 /* 23231 * If the other side is ECN capable, reply 23232 * that we are also ECN capable. 23233 */ 23234 if (tcp->tcp_ecn_ok) 23235 flags |= TH_ECE; 23236 break; 23237 default: 23238 /* 23239 * The above ASSERT() makes sure that this 23240 * must be FIN-WAIT-1 state. Our SYN has 23241 * not been ack'ed so retransmit it. 23242 */ 23243 flags |= TH_SYN; 23244 break; 23245 } 23246 23247 if (tcp->tcp_snd_ws_ok) { 23248 wptr = mp1->b_wptr; 23249 wptr[0] = TCPOPT_NOP; 23250 wptr[1] = TCPOPT_WSCALE; 23251 wptr[2] = TCPOPT_WS_LEN; 23252 wptr[3] = (uchar_t)tcp->tcp_rcv_ws; 23253 mp1->b_wptr += TCPOPT_REAL_WS_LEN; 23254 tcph->th_offset_and_rsrvd[0] += (1 << 4); 23255 } 23256 23257 if (tcp->tcp_snd_sack_ok) { 23258 wptr = mp1->b_wptr; 23259 wptr[0] = TCPOPT_NOP; 23260 wptr[1] = TCPOPT_NOP; 23261 wptr[2] = TCPOPT_SACK_PERMITTED; 23262 wptr[3] = TCPOPT_SACK_OK_LEN; 23263 mp1->b_wptr += TCPOPT_REAL_SACK_OK_LEN; 23264 tcph->th_offset_and_rsrvd[0] += (1 << 4); 23265 } 23266 23267 /* allocb() of adequate mblk assures space */ 23268 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 23269 (uintptr_t)INT_MAX); 23270 u1 = (int)(mp1->b_wptr - mp1->b_rptr); 23271 /* 23272 * Get IP set to checksum on our behalf 23273 * Include the adjustment for a source route if any. 23274 */ 23275 u1 += tcp->tcp_sum; 23276 u1 = (u1 >> 16) + (u1 & 0xFFFF); 23277 U16_TO_BE16(u1, tcph->th_sum); 23278 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 23279 } 23280 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 23281 (seq + data_length) == tcp->tcp_fss) { 23282 if (!tcp->tcp_fin_acked) { 23283 flags |= TH_FIN; 23284 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 23285 } 23286 if (!tcp->tcp_fin_sent) { 23287 tcp->tcp_fin_sent = B_TRUE; 23288 switch (tcp->tcp_state) { 23289 case TCPS_SYN_RCVD: 23290 case TCPS_ESTABLISHED: 23291 tcp->tcp_state = TCPS_FIN_WAIT_1; 23292 break; 23293 case TCPS_CLOSE_WAIT: 23294 tcp->tcp_state = TCPS_LAST_ACK; 23295 break; 23296 } 23297 if (tcp->tcp_suna == tcp->tcp_snxt) 23298 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 23299 tcp->tcp_snxt = tcp->tcp_fss + 1; 23300 } 23301 } 23302 /* 23303 * Note the trick here. u1 is unsigned. When tcp_urg 23304 * is smaller than seq, u1 will become a very huge value. 23305 * So the comparison will fail. Also note that tcp_urp 23306 * should be positive, see RFC 793 page 17. 23307 */ 23308 u1 = tcp->tcp_urg - seq + TCP_OLD_URP_INTERPRETATION; 23309 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && u1 != 0 && 23310 u1 < (uint32_t)(64 * 1024)) { 23311 flags |= TH_URG; 23312 BUMP_MIB(&tcps->tcps_mib, tcpOutUrg); 23313 U32_TO_ABE16(u1, tcph->th_urp); 23314 } 23315 } 23316 tcph->th_flags[0] = (uchar_t)flags; 23317 tcp->tcp_rack = tcp->tcp_rnxt; 23318 tcp->tcp_rack_cnt = 0; 23319 23320 if (tcp->tcp_snd_ts_ok) { 23321 if (tcp->tcp_state != TCPS_SYN_SENT) { 23322 uint32_t llbolt = (uint32_t)lbolt; 23323 23324 U32_TO_BE32(llbolt, 23325 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 23326 U32_TO_BE32(tcp->tcp_ts_recent, 23327 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 23328 } 23329 } 23330 23331 if (num_sack_blk > 0) { 23332 uchar_t *wptr = (uchar_t *)tcph + tcp->tcp_tcp_hdr_len; 23333 sack_blk_t *tmp; 23334 int32_t i; 23335 23336 wptr[0] = TCPOPT_NOP; 23337 wptr[1] = TCPOPT_NOP; 23338 wptr[2] = TCPOPT_SACK; 23339 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 23340 sizeof (sack_blk_t); 23341 wptr += TCPOPT_REAL_SACK_LEN; 23342 23343 tmp = tcp->tcp_sack_list; 23344 for (i = 0; i < num_sack_blk; i++) { 23345 U32_TO_BE32(tmp[i].begin, wptr); 23346 wptr += sizeof (tcp_seq); 23347 U32_TO_BE32(tmp[i].end, wptr); 23348 wptr += sizeof (tcp_seq); 23349 } 23350 tcph->th_offset_and_rsrvd[0] += ((num_sack_blk * 2 + 1) << 4); 23351 } 23352 ASSERT((uintptr_t)(mp1->b_wptr - rptr) <= (uintptr_t)INT_MAX); 23353 data_length += (int)(mp1->b_wptr - rptr); 23354 if (tcp->tcp_ipversion == IPV4_VERSION) { 23355 ((ipha_t *)rptr)->ipha_length = htons(data_length); 23356 } else { 23357 ip6_t *ip6 = (ip6_t *)(rptr + 23358 (((ip6_t *)rptr)->ip6_nxt == IPPROTO_RAW ? 23359 sizeof (ip6i_t) : 0)); 23360 23361 ip6->ip6_plen = htons(data_length - 23362 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 23363 } 23364 23365 /* 23366 * Prime pump for IP 23367 * Include the adjustment for a source route if any. 23368 */ 23369 data_length -= tcp->tcp_ip_hdr_len; 23370 data_length += tcp->tcp_sum; 23371 data_length = (data_length >> 16) + (data_length & 0xFFFF); 23372 U16_TO_ABE16(data_length, tcph->th_sum); 23373 if (tcp->tcp_ip_forward_progress) { 23374 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 23375 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 23376 tcp->tcp_ip_forward_progress = B_FALSE; 23377 } 23378 return (mp1); 23379 } 23380 23381 /* This function handles the push timeout. */ 23382 void 23383 tcp_push_timer(void *arg) 23384 { 23385 conn_t *connp = (conn_t *)arg; 23386 tcp_t *tcp = connp->conn_tcp; 23387 tcp_stack_t *tcps = tcp->tcp_tcps; 23388 23389 TCP_DBGSTAT(tcps, tcp_push_timer_cnt); 23390 23391 ASSERT(tcp->tcp_listener == NULL); 23392 23393 /* 23394 * We need to plug synchronous streams during our drain to prevent 23395 * a race with tcp_fuse_rrw() or tcp_fusion_rinfop(). 23396 */ 23397 TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp); 23398 tcp->tcp_push_tid = 0; 23399 if ((tcp->tcp_rcv_list != NULL) && 23400 (tcp_rcv_drain(tcp->tcp_rq, tcp) == TH_ACK_NEEDED)) 23401 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 23402 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp); 23403 } 23404 23405 /* 23406 * This function handles delayed ACK timeout. 23407 */ 23408 static void 23409 tcp_ack_timer(void *arg) 23410 { 23411 conn_t *connp = (conn_t *)arg; 23412 tcp_t *tcp = connp->conn_tcp; 23413 mblk_t *mp; 23414 tcp_stack_t *tcps = tcp->tcp_tcps; 23415 23416 TCP_DBGSTAT(tcps, tcp_ack_timer_cnt); 23417 23418 tcp->tcp_ack_tid = 0; 23419 23420 if (tcp->tcp_fused) 23421 return; 23422 23423 /* 23424 * Do not send ACK if there is no outstanding unack'ed data. 23425 */ 23426 if (tcp->tcp_rnxt == tcp->tcp_rack) { 23427 return; 23428 } 23429 23430 if ((tcp->tcp_rnxt - tcp->tcp_rack) > tcp->tcp_mss) { 23431 /* 23432 * Make sure we don't allow deferred ACKs to result in 23433 * timer-based ACKing. If we have held off an ACK 23434 * when there was more than an mss here, and the timer 23435 * goes off, we have to worry about the possibility 23436 * that the sender isn't doing slow-start, or is out 23437 * of step with us for some other reason. We fall 23438 * permanently back in the direction of 23439 * ACK-every-other-packet as suggested in RFC 1122. 23440 */ 23441 if (tcp->tcp_rack_abs_max > 2) 23442 tcp->tcp_rack_abs_max--; 23443 tcp->tcp_rack_cur_max = 2; 23444 } 23445 mp = tcp_ack_mp(tcp); 23446 23447 if (mp != NULL) { 23448 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 23449 BUMP_LOCAL(tcp->tcp_obsegs); 23450 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 23451 BUMP_MIB(&tcps->tcps_mib, tcpOutAckDelayed); 23452 tcp_send_data(tcp, tcp->tcp_wq, mp); 23453 } 23454 } 23455 23456 23457 /* Generate an ACK-only (no data) segment for a TCP endpoint */ 23458 static mblk_t * 23459 tcp_ack_mp(tcp_t *tcp) 23460 { 23461 uint32_t seq_no; 23462 tcp_stack_t *tcps = tcp->tcp_tcps; 23463 23464 /* 23465 * There are a few cases to be considered while setting the sequence no. 23466 * Essentially, we can come here while processing an unacceptable pkt 23467 * in the TCPS_SYN_RCVD state, in which case we set the sequence number 23468 * to snxt (per RFC 793), note the swnd wouldn't have been set yet. 23469 * If we are here for a zero window probe, stick with suna. In all 23470 * other cases, we check if suna + swnd encompasses snxt and set 23471 * the sequence number to snxt, if so. If snxt falls outside the 23472 * window (the receiver probably shrunk its window), we will go with 23473 * suna + swnd, otherwise the sequence no will be unacceptable to the 23474 * receiver. 23475 */ 23476 if (tcp->tcp_zero_win_probe) { 23477 seq_no = tcp->tcp_suna; 23478 } else if (tcp->tcp_state == TCPS_SYN_RCVD) { 23479 ASSERT(tcp->tcp_swnd == 0); 23480 seq_no = tcp->tcp_snxt; 23481 } else { 23482 seq_no = SEQ_GT(tcp->tcp_snxt, 23483 (tcp->tcp_suna + tcp->tcp_swnd)) ? 23484 (tcp->tcp_suna + tcp->tcp_swnd) : tcp->tcp_snxt; 23485 } 23486 23487 if (tcp->tcp_valid_bits) { 23488 /* 23489 * For the complex case where we have to send some 23490 * controls (FIN or SYN), let tcp_xmit_mp do it. 23491 */ 23492 return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, seq_no, B_FALSE, 23493 NULL, B_FALSE)); 23494 } else { 23495 /* Generate a simple ACK */ 23496 int data_length; 23497 uchar_t *rptr; 23498 tcph_t *tcph; 23499 mblk_t *mp1; 23500 int32_t tcp_hdr_len; 23501 int32_t tcp_tcp_hdr_len; 23502 int32_t num_sack_blk = 0; 23503 int32_t sack_opt_len; 23504 23505 /* 23506 * Allocate space for TCP + IP headers 23507 * and link-level header 23508 */ 23509 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 23510 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 23511 tcp->tcp_num_sack_blk); 23512 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 23513 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 23514 tcp_hdr_len = tcp->tcp_hdr_len + sack_opt_len; 23515 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len + sack_opt_len; 23516 } else { 23517 tcp_hdr_len = tcp->tcp_hdr_len; 23518 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len; 23519 } 23520 mp1 = allocb(tcp_hdr_len + tcps->tcps_wroff_xtra, BPRI_MED); 23521 if (!mp1) 23522 return (NULL); 23523 23524 /* Update the latest receive window size in TCP header. */ 23525 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 23526 tcp->tcp_tcph->th_win); 23527 /* copy in prototype TCP + IP header */ 23528 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra; 23529 mp1->b_rptr = rptr; 23530 mp1->b_wptr = rptr + tcp_hdr_len; 23531 bcopy(tcp->tcp_iphc, rptr, tcp->tcp_hdr_len); 23532 23533 tcph = (tcph_t *)&rptr[tcp->tcp_ip_hdr_len]; 23534 23535 /* Set the TCP sequence number. */ 23536 U32_TO_ABE32(seq_no, tcph->th_seq); 23537 23538 /* Set up the TCP flag field. */ 23539 tcph->th_flags[0] = (uchar_t)TH_ACK; 23540 if (tcp->tcp_ecn_echo_on) 23541 tcph->th_flags[0] |= TH_ECE; 23542 23543 tcp->tcp_rack = tcp->tcp_rnxt; 23544 tcp->tcp_rack_cnt = 0; 23545 23546 /* fill in timestamp option if in use */ 23547 if (tcp->tcp_snd_ts_ok) { 23548 uint32_t llbolt = (uint32_t)lbolt; 23549 23550 U32_TO_BE32(llbolt, 23551 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 23552 U32_TO_BE32(tcp->tcp_ts_recent, 23553 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 23554 } 23555 23556 /* Fill in SACK options */ 23557 if (num_sack_blk > 0) { 23558 uchar_t *wptr = (uchar_t *)tcph + tcp->tcp_tcp_hdr_len; 23559 sack_blk_t *tmp; 23560 int32_t i; 23561 23562 wptr[0] = TCPOPT_NOP; 23563 wptr[1] = TCPOPT_NOP; 23564 wptr[2] = TCPOPT_SACK; 23565 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 23566 sizeof (sack_blk_t); 23567 wptr += TCPOPT_REAL_SACK_LEN; 23568 23569 tmp = tcp->tcp_sack_list; 23570 for (i = 0; i < num_sack_blk; i++) { 23571 U32_TO_BE32(tmp[i].begin, wptr); 23572 wptr += sizeof (tcp_seq); 23573 U32_TO_BE32(tmp[i].end, wptr); 23574 wptr += sizeof (tcp_seq); 23575 } 23576 tcph->th_offset_and_rsrvd[0] += ((num_sack_blk * 2 + 1) 23577 << 4); 23578 } 23579 23580 if (tcp->tcp_ipversion == IPV4_VERSION) { 23581 ((ipha_t *)rptr)->ipha_length = htons(tcp_hdr_len); 23582 } else { 23583 /* Check for ip6i_t header in sticky hdrs */ 23584 ip6_t *ip6 = (ip6_t *)(rptr + 23585 (((ip6_t *)rptr)->ip6_nxt == IPPROTO_RAW ? 23586 sizeof (ip6i_t) : 0)); 23587 23588 ip6->ip6_plen = htons(tcp_hdr_len - 23589 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 23590 } 23591 23592 /* 23593 * Prime pump for checksum calculation in IP. Include the 23594 * adjustment for a source route if any. 23595 */ 23596 data_length = tcp_tcp_hdr_len + tcp->tcp_sum; 23597 data_length = (data_length >> 16) + (data_length & 0xFFFF); 23598 U16_TO_ABE16(data_length, tcph->th_sum); 23599 23600 if (tcp->tcp_ip_forward_progress) { 23601 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 23602 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 23603 tcp->tcp_ip_forward_progress = B_FALSE; 23604 } 23605 return (mp1); 23606 } 23607 } 23608 23609 /* 23610 * To create a temporary tcp structure for inserting into bind hash list. 23611 * The parameter is assumed to be in network byte order, ready for use. 23612 */ 23613 /* ARGSUSED */ 23614 static tcp_t * 23615 tcp_alloc_temp_tcp(in_port_t port, tcp_stack_t *tcps) 23616 { 23617 conn_t *connp; 23618 tcp_t *tcp; 23619 23620 connp = ipcl_conn_create(IPCL_TCPCONN, KM_SLEEP, tcps->tcps_netstack); 23621 if (connp == NULL) 23622 return (NULL); 23623 23624 tcp = connp->conn_tcp; 23625 tcp->tcp_tcps = tcps; 23626 TCPS_REFHOLD(tcps); 23627 23628 /* 23629 * Only initialize the necessary info in those structures. Note 23630 * that since INADDR_ANY is all 0, we do not need to set 23631 * tcp_bound_source to INADDR_ANY here. 23632 */ 23633 tcp->tcp_state = TCPS_BOUND; 23634 tcp->tcp_lport = port; 23635 tcp->tcp_exclbind = 1; 23636 tcp->tcp_reserved_port = 1; 23637 23638 /* Just for place holding... */ 23639 tcp->tcp_ipversion = IPV4_VERSION; 23640 23641 return (tcp); 23642 } 23643 23644 /* 23645 * To remove a port range specified by lo_port and hi_port from the 23646 * reserved port ranges. This is one of the three public functions of 23647 * the reserved port interface. Note that a port range has to be removed 23648 * as a whole. Ports in a range cannot be removed individually. 23649 * 23650 * Params: 23651 * in_port_t lo_port: the beginning port of the reserved port range to 23652 * be deleted. 23653 * in_port_t hi_port: the ending port of the reserved port range to 23654 * be deleted. 23655 * 23656 * Return: 23657 * B_TRUE if the deletion is successful, B_FALSE otherwise. 23658 * 23659 * Assumes that nca is only for zoneid=0 23660 */ 23661 boolean_t 23662 tcp_reserved_port_del(in_port_t lo_port, in_port_t hi_port) 23663 { 23664 int i, j; 23665 int size; 23666 tcp_t **temp_tcp_array; 23667 tcp_t *tcp; 23668 tcp_stack_t *tcps; 23669 23670 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)->netstack_tcp; 23671 ASSERT(tcps != NULL); 23672 23673 rw_enter(&tcps->tcps_reserved_port_lock, RW_WRITER); 23674 23675 /* First make sure that the port ranage is indeed reserved. */ 23676 for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) { 23677 if (tcps->tcps_reserved_port[i].lo_port == lo_port) { 23678 hi_port = tcps->tcps_reserved_port[i].hi_port; 23679 temp_tcp_array = 23680 tcps->tcps_reserved_port[i].temp_tcp_array; 23681 break; 23682 } 23683 } 23684 if (i == tcps->tcps_reserved_port_array_size) { 23685 rw_exit(&tcps->tcps_reserved_port_lock); 23686 netstack_rele(tcps->tcps_netstack); 23687 return (B_FALSE); 23688 } 23689 23690 /* 23691 * Remove the range from the array. This simple loop is possible 23692 * because port ranges are inserted in ascending order. 23693 */ 23694 for (j = i; j < tcps->tcps_reserved_port_array_size - 1; j++) { 23695 tcps->tcps_reserved_port[j].lo_port = 23696 tcps->tcps_reserved_port[j+1].lo_port; 23697 tcps->tcps_reserved_port[j].hi_port = 23698 tcps->tcps_reserved_port[j+1].hi_port; 23699 tcps->tcps_reserved_port[j].temp_tcp_array = 23700 tcps->tcps_reserved_port[j+1].temp_tcp_array; 23701 } 23702 23703 /* Remove all the temporary tcp structures. */ 23704 size = hi_port - lo_port + 1; 23705 while (size > 0) { 23706 tcp = temp_tcp_array[size - 1]; 23707 ASSERT(tcp != NULL); 23708 tcp_bind_hash_remove(tcp); 23709 CONN_DEC_REF(tcp->tcp_connp); 23710 size--; 23711 } 23712 kmem_free(temp_tcp_array, (hi_port - lo_port + 1) * sizeof (tcp_t *)); 23713 tcps->tcps_reserved_port_array_size--; 23714 rw_exit(&tcps->tcps_reserved_port_lock); 23715 netstack_rele(tcps->tcps_netstack); 23716 return (B_TRUE); 23717 } 23718 23719 /* 23720 * Macro to remove temporary tcp structure from the bind hash list. The 23721 * first parameter is the list of tcp to be removed. The second parameter 23722 * is the number of tcps in the array. 23723 */ 23724 #define TCP_TMP_TCP_REMOVE(tcp_array, num, tcps) \ 23725 { \ 23726 while ((num) > 0) { \ 23727 tcp_t *tcp = (tcp_array)[(num) - 1]; \ 23728 tf_t *tbf; \ 23729 tcp_t *tcpnext; \ 23730 tbf = &tcps->tcps_bind_fanout[TCP_BIND_HASH(tcp->tcp_lport)]; \ 23731 mutex_enter(&tbf->tf_lock); \ 23732 tcpnext = tcp->tcp_bind_hash; \ 23733 if (tcpnext) { \ 23734 tcpnext->tcp_ptpbhn = \ 23735 tcp->tcp_ptpbhn; \ 23736 } \ 23737 *tcp->tcp_ptpbhn = tcpnext; \ 23738 mutex_exit(&tbf->tf_lock); \ 23739 kmem_free(tcp, sizeof (tcp_t)); \ 23740 (tcp_array)[(num) - 1] = NULL; \ 23741 (num)--; \ 23742 } \ 23743 } 23744 23745 /* 23746 * The public interface for other modules to call to reserve a port range 23747 * in TCP. The caller passes in how large a port range it wants. TCP 23748 * will try to find a range and return it via lo_port and hi_port. This is 23749 * used by NCA's nca_conn_init. 23750 * NCA can only be used in the global zone so this only affects the global 23751 * zone's ports. 23752 * 23753 * Params: 23754 * int size: the size of the port range to be reserved. 23755 * in_port_t *lo_port (referenced): returns the beginning port of the 23756 * reserved port range added. 23757 * in_port_t *hi_port (referenced): returns the ending port of the 23758 * reserved port range added. 23759 * 23760 * Return: 23761 * B_TRUE if the port reservation is successful, B_FALSE otherwise. 23762 * 23763 * Assumes that nca is only for zoneid=0 23764 */ 23765 boolean_t 23766 tcp_reserved_port_add(int size, in_port_t *lo_port, in_port_t *hi_port) 23767 { 23768 tcp_t *tcp; 23769 tcp_t *tmp_tcp; 23770 tcp_t **temp_tcp_array; 23771 tf_t *tbf; 23772 in_port_t net_port; 23773 in_port_t port; 23774 int32_t cur_size; 23775 int i, j; 23776 boolean_t used; 23777 tcp_rport_t tmp_ports[TCP_RESERVED_PORTS_ARRAY_MAX_SIZE]; 23778 zoneid_t zoneid = GLOBAL_ZONEID; 23779 tcp_stack_t *tcps; 23780 23781 /* Sanity check. */ 23782 if (size <= 0 || size > TCP_RESERVED_PORTS_RANGE_MAX) { 23783 return (B_FALSE); 23784 } 23785 23786 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)->netstack_tcp; 23787 ASSERT(tcps != NULL); 23788 23789 rw_enter(&tcps->tcps_reserved_port_lock, RW_WRITER); 23790 if (tcps->tcps_reserved_port_array_size == 23791 TCP_RESERVED_PORTS_ARRAY_MAX_SIZE) { 23792 rw_exit(&tcps->tcps_reserved_port_lock); 23793 netstack_rele(tcps->tcps_netstack); 23794 return (B_FALSE); 23795 } 23796 23797 /* 23798 * Find the starting port to try. Since the port ranges are ordered 23799 * in the reserved port array, we can do a simple search here. 23800 */ 23801 *lo_port = TCP_SMALLEST_RESERVED_PORT; 23802 *hi_port = TCP_LARGEST_RESERVED_PORT; 23803 for (i = 0; i < tcps->tcps_reserved_port_array_size; 23804 *lo_port = tcps->tcps_reserved_port[i].hi_port + 1, i++) { 23805 if (tcps->tcps_reserved_port[i].lo_port - *lo_port >= size) { 23806 *hi_port = tcps->tcps_reserved_port[i].lo_port - 1; 23807 break; 23808 } 23809 } 23810 /* No available port range. */ 23811 if (i == tcps->tcps_reserved_port_array_size && 23812 *hi_port - *lo_port < size) { 23813 rw_exit(&tcps->tcps_reserved_port_lock); 23814 netstack_rele(tcps->tcps_netstack); 23815 return (B_FALSE); 23816 } 23817 23818 temp_tcp_array = kmem_zalloc(size * sizeof (tcp_t *), KM_NOSLEEP); 23819 if (temp_tcp_array == NULL) { 23820 rw_exit(&tcps->tcps_reserved_port_lock); 23821 netstack_rele(tcps->tcps_netstack); 23822 return (B_FALSE); 23823 } 23824 23825 /* Go thru the port range to see if some ports are already bound. */ 23826 for (port = *lo_port, cur_size = 0; 23827 cur_size < size && port <= *hi_port; 23828 cur_size++, port++) { 23829 used = B_FALSE; 23830 net_port = htons(port); 23831 tbf = &tcps->tcps_bind_fanout[TCP_BIND_HASH(net_port)]; 23832 mutex_enter(&tbf->tf_lock); 23833 for (tcp = tbf->tf_tcp; tcp != NULL; 23834 tcp = tcp->tcp_bind_hash) { 23835 if (IPCL_ZONE_MATCH(tcp->tcp_connp, zoneid) && 23836 net_port == tcp->tcp_lport) { 23837 /* 23838 * A port is already bound. Search again 23839 * starting from port + 1. Release all 23840 * temporary tcps. 23841 */ 23842 mutex_exit(&tbf->tf_lock); 23843 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size, 23844 tcps); 23845 *lo_port = port + 1; 23846 cur_size = -1; 23847 used = B_TRUE; 23848 break; 23849 } 23850 } 23851 if (!used) { 23852 if ((tmp_tcp = tcp_alloc_temp_tcp(net_port, tcps)) == 23853 NULL) { 23854 /* 23855 * Allocation failure. Just fail the request. 23856 * Need to remove all those temporary tcp 23857 * structures. 23858 */ 23859 mutex_exit(&tbf->tf_lock); 23860 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size, 23861 tcps); 23862 rw_exit(&tcps->tcps_reserved_port_lock); 23863 kmem_free(temp_tcp_array, 23864 (hi_port - lo_port + 1) * 23865 sizeof (tcp_t *)); 23866 netstack_rele(tcps->tcps_netstack); 23867 return (B_FALSE); 23868 } 23869 temp_tcp_array[cur_size] = tmp_tcp; 23870 tcp_bind_hash_insert(tbf, tmp_tcp, B_TRUE); 23871 mutex_exit(&tbf->tf_lock); 23872 } 23873 } 23874 23875 /* 23876 * The current range is not large enough. We can actually do another 23877 * search if this search is done between 2 reserved port ranges. But 23878 * for first release, we just stop here and return saying that no port 23879 * range is available. 23880 */ 23881 if (cur_size < size) { 23882 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size, tcps); 23883 rw_exit(&tcps->tcps_reserved_port_lock); 23884 kmem_free(temp_tcp_array, size * sizeof (tcp_t *)); 23885 netstack_rele(tcps->tcps_netstack); 23886 return (B_FALSE); 23887 } 23888 *hi_port = port - 1; 23889 23890 /* 23891 * Insert range into array in ascending order. Since this function 23892 * must not be called often, we choose to use the simplest method. 23893 * The above array should not consume excessive stack space as 23894 * the size must be very small. If in future releases, we find 23895 * that we should provide more reserved port ranges, this function 23896 * has to be modified to be more efficient. 23897 */ 23898 if (tcps->tcps_reserved_port_array_size == 0) { 23899 tcps->tcps_reserved_port[0].lo_port = *lo_port; 23900 tcps->tcps_reserved_port[0].hi_port = *hi_port; 23901 tcps->tcps_reserved_port[0].temp_tcp_array = temp_tcp_array; 23902 } else { 23903 for (i = 0, j = 0; i < tcps->tcps_reserved_port_array_size; 23904 i++, j++) { 23905 if (*lo_port < tcps->tcps_reserved_port[i].lo_port && 23906 i == j) { 23907 tmp_ports[j].lo_port = *lo_port; 23908 tmp_ports[j].hi_port = *hi_port; 23909 tmp_ports[j].temp_tcp_array = temp_tcp_array; 23910 j++; 23911 } 23912 tmp_ports[j].lo_port = 23913 tcps->tcps_reserved_port[i].lo_port; 23914 tmp_ports[j].hi_port = 23915 tcps->tcps_reserved_port[i].hi_port; 23916 tmp_ports[j].temp_tcp_array = 23917 tcps->tcps_reserved_port[i].temp_tcp_array; 23918 } 23919 if (j == i) { 23920 tmp_ports[j].lo_port = *lo_port; 23921 tmp_ports[j].hi_port = *hi_port; 23922 tmp_ports[j].temp_tcp_array = temp_tcp_array; 23923 } 23924 bcopy(tmp_ports, tcps->tcps_reserved_port, sizeof (tmp_ports)); 23925 } 23926 tcps->tcps_reserved_port_array_size++; 23927 rw_exit(&tcps->tcps_reserved_port_lock); 23928 netstack_rele(tcps->tcps_netstack); 23929 return (B_TRUE); 23930 } 23931 23932 /* 23933 * Check to see if a port is in any reserved port range. 23934 * 23935 * Params: 23936 * in_port_t port: the port to be verified. 23937 * 23938 * Return: 23939 * B_TRUE is the port is inside a reserved port range, B_FALSE otherwise. 23940 */ 23941 boolean_t 23942 tcp_reserved_port_check(in_port_t port, tcp_stack_t *tcps) 23943 { 23944 int i; 23945 23946 rw_enter(&tcps->tcps_reserved_port_lock, RW_READER); 23947 for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) { 23948 if (port >= tcps->tcps_reserved_port[i].lo_port || 23949 port <= tcps->tcps_reserved_port[i].hi_port) { 23950 rw_exit(&tcps->tcps_reserved_port_lock); 23951 return (B_TRUE); 23952 } 23953 } 23954 rw_exit(&tcps->tcps_reserved_port_lock); 23955 return (B_FALSE); 23956 } 23957 23958 /* 23959 * To list all reserved port ranges. This is the function to handle 23960 * ndd tcp_reserved_port_list. 23961 */ 23962 /* ARGSUSED */ 23963 static int 23964 tcp_reserved_port_list(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 23965 { 23966 int i; 23967 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 23968 23969 rw_enter(&tcps->tcps_reserved_port_lock, RW_READER); 23970 if (tcps->tcps_reserved_port_array_size > 0) 23971 (void) mi_mpprintf(mp, "The following ports are reserved:"); 23972 else 23973 (void) mi_mpprintf(mp, "No port is reserved."); 23974 for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) { 23975 (void) mi_mpprintf(mp, "%d-%d", 23976 tcps->tcps_reserved_port[i].lo_port, 23977 tcps->tcps_reserved_port[i].hi_port); 23978 } 23979 rw_exit(&tcps->tcps_reserved_port_lock); 23980 return (0); 23981 } 23982 23983 /* 23984 * Hash list insertion routine for tcp_t structures. 23985 * Inserts entries with the ones bound to a specific IP address first 23986 * followed by those bound to INADDR_ANY. 23987 */ 23988 static void 23989 tcp_bind_hash_insert(tf_t *tbf, tcp_t *tcp, int caller_holds_lock) 23990 { 23991 tcp_t **tcpp; 23992 tcp_t *tcpnext; 23993 23994 if (tcp->tcp_ptpbhn != NULL) { 23995 ASSERT(!caller_holds_lock); 23996 tcp_bind_hash_remove(tcp); 23997 } 23998 tcpp = &tbf->tf_tcp; 23999 if (!caller_holds_lock) { 24000 mutex_enter(&tbf->tf_lock); 24001 } else { 24002 ASSERT(MUTEX_HELD(&tbf->tf_lock)); 24003 } 24004 tcpnext = tcpp[0]; 24005 if (tcpnext) { 24006 /* 24007 * If the new tcp bound to the INADDR_ANY address 24008 * and the first one in the list is not bound to 24009 * INADDR_ANY we skip all entries until we find the 24010 * first one bound to INADDR_ANY. 24011 * This makes sure that applications binding to a 24012 * specific address get preference over those binding to 24013 * INADDR_ANY. 24014 */ 24015 if (V6_OR_V4_INADDR_ANY(tcp->tcp_bound_source_v6) && 24016 !V6_OR_V4_INADDR_ANY(tcpnext->tcp_bound_source_v6)) { 24017 while ((tcpnext = tcpp[0]) != NULL && 24018 !V6_OR_V4_INADDR_ANY(tcpnext->tcp_bound_source_v6)) 24019 tcpp = &(tcpnext->tcp_bind_hash); 24020 if (tcpnext) 24021 tcpnext->tcp_ptpbhn = &tcp->tcp_bind_hash; 24022 } else 24023 tcpnext->tcp_ptpbhn = &tcp->tcp_bind_hash; 24024 } 24025 tcp->tcp_bind_hash = tcpnext; 24026 tcp->tcp_ptpbhn = tcpp; 24027 tcpp[0] = tcp; 24028 if (!caller_holds_lock) 24029 mutex_exit(&tbf->tf_lock); 24030 } 24031 24032 /* 24033 * Hash list removal routine for tcp_t structures. 24034 */ 24035 static void 24036 tcp_bind_hash_remove(tcp_t *tcp) 24037 { 24038 tcp_t *tcpnext; 24039 kmutex_t *lockp; 24040 tcp_stack_t *tcps = tcp->tcp_tcps; 24041 24042 if (tcp->tcp_ptpbhn == NULL) 24043 return; 24044 24045 /* 24046 * Extract the lock pointer in case there are concurrent 24047 * hash_remove's for this instance. 24048 */ 24049 ASSERT(tcp->tcp_lport != 0); 24050 lockp = &tcps->tcps_bind_fanout[TCP_BIND_HASH(tcp->tcp_lport)].tf_lock; 24051 24052 ASSERT(lockp != NULL); 24053 mutex_enter(lockp); 24054 if (tcp->tcp_ptpbhn) { 24055 tcpnext = tcp->tcp_bind_hash; 24056 if (tcpnext) { 24057 tcpnext->tcp_ptpbhn = tcp->tcp_ptpbhn; 24058 tcp->tcp_bind_hash = NULL; 24059 } 24060 *tcp->tcp_ptpbhn = tcpnext; 24061 tcp->tcp_ptpbhn = NULL; 24062 } 24063 mutex_exit(lockp); 24064 } 24065 24066 24067 /* 24068 * Hash list lookup routine for tcp_t structures. 24069 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF. 24070 */ 24071 static tcp_t * 24072 tcp_acceptor_hash_lookup(t_uscalar_t id, tcp_stack_t *tcps) 24073 { 24074 tf_t *tf; 24075 tcp_t *tcp; 24076 24077 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)]; 24078 mutex_enter(&tf->tf_lock); 24079 for (tcp = tf->tf_tcp; tcp != NULL; 24080 tcp = tcp->tcp_acceptor_hash) { 24081 if (tcp->tcp_acceptor_id == id) { 24082 CONN_INC_REF(tcp->tcp_connp); 24083 mutex_exit(&tf->tf_lock); 24084 return (tcp); 24085 } 24086 } 24087 mutex_exit(&tf->tf_lock); 24088 return (NULL); 24089 } 24090 24091 24092 /* 24093 * Hash list insertion routine for tcp_t structures. 24094 */ 24095 void 24096 tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp) 24097 { 24098 tf_t *tf; 24099 tcp_t **tcpp; 24100 tcp_t *tcpnext; 24101 tcp_stack_t *tcps = tcp->tcp_tcps; 24102 24103 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)]; 24104 24105 if (tcp->tcp_ptpahn != NULL) 24106 tcp_acceptor_hash_remove(tcp); 24107 tcpp = &tf->tf_tcp; 24108 mutex_enter(&tf->tf_lock); 24109 tcpnext = tcpp[0]; 24110 if (tcpnext) 24111 tcpnext->tcp_ptpahn = &tcp->tcp_acceptor_hash; 24112 tcp->tcp_acceptor_hash = tcpnext; 24113 tcp->tcp_ptpahn = tcpp; 24114 tcpp[0] = tcp; 24115 tcp->tcp_acceptor_lockp = &tf->tf_lock; /* For tcp_*_hash_remove */ 24116 mutex_exit(&tf->tf_lock); 24117 } 24118 24119 /* 24120 * Hash list removal routine for tcp_t structures. 24121 */ 24122 static void 24123 tcp_acceptor_hash_remove(tcp_t *tcp) 24124 { 24125 tcp_t *tcpnext; 24126 kmutex_t *lockp; 24127 24128 /* 24129 * Extract the lock pointer in case there are concurrent 24130 * hash_remove's for this instance. 24131 */ 24132 lockp = tcp->tcp_acceptor_lockp; 24133 24134 if (tcp->tcp_ptpahn == NULL) 24135 return; 24136 24137 ASSERT(lockp != NULL); 24138 mutex_enter(lockp); 24139 if (tcp->tcp_ptpahn) { 24140 tcpnext = tcp->tcp_acceptor_hash; 24141 if (tcpnext) { 24142 tcpnext->tcp_ptpahn = tcp->tcp_ptpahn; 24143 tcp->tcp_acceptor_hash = NULL; 24144 } 24145 *tcp->tcp_ptpahn = tcpnext; 24146 tcp->tcp_ptpahn = NULL; 24147 } 24148 mutex_exit(lockp); 24149 tcp->tcp_acceptor_lockp = NULL; 24150 } 24151 24152 /* ARGSUSED */ 24153 static int 24154 tcp_host_param_setvalue(queue_t *q, mblk_t *mp, char *value, caddr_t cp, int af) 24155 { 24156 int error = 0; 24157 int retval; 24158 char *end; 24159 tcp_hsp_t *hsp; 24160 tcp_hsp_t *hspprev; 24161 ipaddr_t addr = 0; /* Address we're looking for */ 24162 in6_addr_t v6addr; /* Address we're looking for */ 24163 uint32_t hash; /* Hash of that address */ 24164 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 24165 24166 /* 24167 * If the following variables are still zero after parsing the input 24168 * string, the user didn't specify them and we don't change them in 24169 * the HSP. 24170 */ 24171 24172 ipaddr_t mask = 0; /* Subnet mask */ 24173 in6_addr_t v6mask; 24174 long sendspace = 0; /* Send buffer size */ 24175 long recvspace = 0; /* Receive buffer size */ 24176 long timestamp = 0; /* Originate TCP TSTAMP option, 1 = yes */ 24177 boolean_t delete = B_FALSE; /* User asked to delete this HSP */ 24178 24179 rw_enter(&tcps->tcps_hsp_lock, RW_WRITER); 24180 24181 /* Parse and validate address */ 24182 if (af == AF_INET) { 24183 retval = inet_pton(af, value, &addr); 24184 if (retval == 1) 24185 IN6_IPADDR_TO_V4MAPPED(addr, &v6addr); 24186 } else if (af == AF_INET6) { 24187 retval = inet_pton(af, value, &v6addr); 24188 } else { 24189 error = EINVAL; 24190 goto done; 24191 } 24192 if (retval == 0) { 24193 error = EINVAL; 24194 goto done; 24195 } 24196 24197 while ((*value) && *value != ' ') 24198 value++; 24199 24200 /* Parse individual keywords, set variables if found */ 24201 while (*value) { 24202 /* Skip leading blanks */ 24203 24204 while (*value == ' ' || *value == '\t') 24205 value++; 24206 24207 /* If at end of string, we're done */ 24208 24209 if (!*value) 24210 break; 24211 24212 /* We have a word, figure out what it is */ 24213 24214 if (strncmp("mask", value, 4) == 0) { 24215 value += 4; 24216 while (*value == ' ' || *value == '\t') 24217 value++; 24218 /* Parse subnet mask */ 24219 if (af == AF_INET) { 24220 retval = inet_pton(af, value, &mask); 24221 if (retval == 1) { 24222 V4MASK_TO_V6(mask, v6mask); 24223 } 24224 } else if (af == AF_INET6) { 24225 retval = inet_pton(af, value, &v6mask); 24226 } 24227 if (retval != 1) { 24228 error = EINVAL; 24229 goto done; 24230 } 24231 while ((*value) && *value != ' ') 24232 value++; 24233 } else if (strncmp("sendspace", value, 9) == 0) { 24234 value += 9; 24235 24236 if (ddi_strtol(value, &end, 0, &sendspace) != 0 || 24237 sendspace < TCP_XMIT_HIWATER || 24238 sendspace >= (1L<<30)) { 24239 error = EINVAL; 24240 goto done; 24241 } 24242 value = end; 24243 } else if (strncmp("recvspace", value, 9) == 0) { 24244 value += 9; 24245 24246 if (ddi_strtol(value, &end, 0, &recvspace) != 0 || 24247 recvspace < TCP_RECV_HIWATER || 24248 recvspace >= (1L<<30)) { 24249 error = EINVAL; 24250 goto done; 24251 } 24252 value = end; 24253 } else if (strncmp("timestamp", value, 9) == 0) { 24254 value += 9; 24255 24256 if (ddi_strtol(value, &end, 0, ×tamp) != 0 || 24257 timestamp < 0 || timestamp > 1) { 24258 error = EINVAL; 24259 goto done; 24260 } 24261 24262 /* 24263 * We increment timestamp so we know it's been set; 24264 * this is undone when we put it in the HSP 24265 */ 24266 timestamp++; 24267 value = end; 24268 } else if (strncmp("delete", value, 6) == 0) { 24269 value += 6; 24270 delete = B_TRUE; 24271 } else { 24272 error = EINVAL; 24273 goto done; 24274 } 24275 } 24276 24277 /* Hash address for lookup */ 24278 24279 hash = TCP_HSP_HASH(addr); 24280 24281 if (delete) { 24282 /* 24283 * Note that deletes don't return an error if the thing 24284 * we're trying to delete isn't there. 24285 */ 24286 if (tcps->tcps_hsp_hash == NULL) 24287 goto done; 24288 hsp = tcps->tcps_hsp_hash[hash]; 24289 24290 if (hsp) { 24291 if (IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, 24292 &v6addr)) { 24293 tcps->tcps_hsp_hash[hash] = hsp->tcp_hsp_next; 24294 mi_free((char *)hsp); 24295 } else { 24296 hspprev = hsp; 24297 while ((hsp = hsp->tcp_hsp_next) != NULL) { 24298 if (IN6_ARE_ADDR_EQUAL( 24299 &hsp->tcp_hsp_addr_v6, &v6addr)) { 24300 hspprev->tcp_hsp_next = 24301 hsp->tcp_hsp_next; 24302 mi_free((char *)hsp); 24303 break; 24304 } 24305 hspprev = hsp; 24306 } 24307 } 24308 } 24309 } else { 24310 /* 24311 * We're adding/modifying an HSP. If we haven't already done 24312 * so, allocate the hash table. 24313 */ 24314 24315 if (!tcps->tcps_hsp_hash) { 24316 tcps->tcps_hsp_hash = (tcp_hsp_t **) 24317 mi_zalloc(sizeof (tcp_hsp_t *) * TCP_HSP_HASH_SIZE); 24318 if (!tcps->tcps_hsp_hash) { 24319 error = EINVAL; 24320 goto done; 24321 } 24322 } 24323 24324 /* Get head of hash chain */ 24325 24326 hsp = tcps->tcps_hsp_hash[hash]; 24327 24328 /* Try to find pre-existing hsp on hash chain */ 24329 /* Doesn't handle CIDR prefixes. */ 24330 while (hsp) { 24331 if (IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, &v6addr)) 24332 break; 24333 hsp = hsp->tcp_hsp_next; 24334 } 24335 24336 /* 24337 * If we didn't, create one with default values and put it 24338 * at head of hash chain 24339 */ 24340 24341 if (!hsp) { 24342 hsp = (tcp_hsp_t *)mi_zalloc(sizeof (tcp_hsp_t)); 24343 if (!hsp) { 24344 error = EINVAL; 24345 goto done; 24346 } 24347 hsp->tcp_hsp_next = tcps->tcps_hsp_hash[hash]; 24348 tcps->tcps_hsp_hash[hash] = hsp; 24349 } 24350 24351 /* Set values that the user asked us to change */ 24352 24353 hsp->tcp_hsp_addr_v6 = v6addr; 24354 if (IN6_IS_ADDR_V4MAPPED(&v6addr)) 24355 hsp->tcp_hsp_vers = IPV4_VERSION; 24356 else 24357 hsp->tcp_hsp_vers = IPV6_VERSION; 24358 hsp->tcp_hsp_subnet_v6 = v6mask; 24359 if (sendspace > 0) 24360 hsp->tcp_hsp_sendspace = sendspace; 24361 if (recvspace > 0) 24362 hsp->tcp_hsp_recvspace = recvspace; 24363 if (timestamp > 0) 24364 hsp->tcp_hsp_tstamp = timestamp - 1; 24365 } 24366 24367 done: 24368 rw_exit(&tcps->tcps_hsp_lock); 24369 return (error); 24370 } 24371 24372 /* Set callback routine passed to nd_load by tcp_param_register. */ 24373 /* ARGSUSED */ 24374 static int 24375 tcp_host_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr) 24376 { 24377 return (tcp_host_param_setvalue(q, mp, value, cp, AF_INET)); 24378 } 24379 /* ARGSUSED */ 24380 static int 24381 tcp_host_param_set_ipv6(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 24382 cred_t *cr) 24383 { 24384 return (tcp_host_param_setvalue(q, mp, value, cp, AF_INET6)); 24385 } 24386 24387 /* TCP host parameters report triggered via the Named Dispatch mechanism. */ 24388 /* ARGSUSED */ 24389 static int 24390 tcp_host_param_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 24391 { 24392 tcp_hsp_t *hsp; 24393 int i; 24394 char addrbuf[INET6_ADDRSTRLEN], subnetbuf[INET6_ADDRSTRLEN]; 24395 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 24396 24397 rw_enter(&tcps->tcps_hsp_lock, RW_READER); 24398 (void) mi_mpprintf(mp, 24399 "Hash HSP " MI_COL_HDRPAD_STR 24400 "Address Subnet Mask Send Receive TStamp"); 24401 if (tcps->tcps_hsp_hash) { 24402 for (i = 0; i < TCP_HSP_HASH_SIZE; i++) { 24403 hsp = tcps->tcps_hsp_hash[i]; 24404 while (hsp) { 24405 if (hsp->tcp_hsp_vers == IPV4_VERSION) { 24406 (void) inet_ntop(AF_INET, 24407 &hsp->tcp_hsp_addr, 24408 addrbuf, sizeof (addrbuf)); 24409 (void) inet_ntop(AF_INET, 24410 &hsp->tcp_hsp_subnet, 24411 subnetbuf, sizeof (subnetbuf)); 24412 } else { 24413 (void) inet_ntop(AF_INET6, 24414 &hsp->tcp_hsp_addr_v6, 24415 addrbuf, sizeof (addrbuf)); 24416 (void) inet_ntop(AF_INET6, 24417 &hsp->tcp_hsp_subnet_v6, 24418 subnetbuf, sizeof (subnetbuf)); 24419 } 24420 (void) mi_mpprintf(mp, 24421 " %03d " MI_COL_PTRFMT_STR 24422 "%s %s %010d %010d %d", 24423 i, 24424 (void *)hsp, 24425 addrbuf, 24426 subnetbuf, 24427 hsp->tcp_hsp_sendspace, 24428 hsp->tcp_hsp_recvspace, 24429 hsp->tcp_hsp_tstamp); 24430 24431 hsp = hsp->tcp_hsp_next; 24432 } 24433 } 24434 } 24435 rw_exit(&tcps->tcps_hsp_lock); 24436 return (0); 24437 } 24438 24439 24440 /* Data for fast netmask macro used by tcp_hsp_lookup */ 24441 24442 static ipaddr_t netmasks[] = { 24443 IN_CLASSA_NET, IN_CLASSA_NET, IN_CLASSB_NET, 24444 IN_CLASSC_NET | IN_CLASSD_NET /* Class C,D,E */ 24445 }; 24446 24447 #define netmask(addr) (netmasks[(ipaddr_t)(addr) >> 30]) 24448 24449 /* 24450 * XXX This routine should go away and instead we should use the metrics 24451 * associated with the routes to determine the default sndspace and rcvspace. 24452 */ 24453 static tcp_hsp_t * 24454 tcp_hsp_lookup(ipaddr_t addr, tcp_stack_t *tcps) 24455 { 24456 tcp_hsp_t *hsp = NULL; 24457 24458 /* Quick check without acquiring the lock. */ 24459 if (tcps->tcps_hsp_hash == NULL) 24460 return (NULL); 24461 24462 rw_enter(&tcps->tcps_hsp_lock, RW_READER); 24463 24464 /* This routine finds the best-matching HSP for address addr. */ 24465 24466 if (tcps->tcps_hsp_hash) { 24467 int i; 24468 ipaddr_t srchaddr; 24469 tcp_hsp_t *hsp_net; 24470 24471 /* We do three passes: host, network, and subnet. */ 24472 24473 srchaddr = addr; 24474 24475 for (i = 1; i <= 3; i++) { 24476 /* Look for exact match on srchaddr */ 24477 24478 hsp = tcps->tcps_hsp_hash[TCP_HSP_HASH(srchaddr)]; 24479 while (hsp) { 24480 if (hsp->tcp_hsp_vers == IPV4_VERSION && 24481 hsp->tcp_hsp_addr == srchaddr) 24482 break; 24483 hsp = hsp->tcp_hsp_next; 24484 } 24485 ASSERT(hsp == NULL || 24486 hsp->tcp_hsp_vers == IPV4_VERSION); 24487 24488 /* 24489 * If this is the first pass: 24490 * If we found a match, great, return it. 24491 * If not, search for the network on the second pass. 24492 */ 24493 24494 if (i == 1) 24495 if (hsp) 24496 break; 24497 else 24498 { 24499 srchaddr = addr & netmask(addr); 24500 continue; 24501 } 24502 24503 /* 24504 * If this is the second pass: 24505 * If we found a match, but there's a subnet mask, 24506 * save the match but try again using the subnet 24507 * mask on the third pass. 24508 * Otherwise, return whatever we found. 24509 */ 24510 24511 if (i == 2) { 24512 if (hsp && hsp->tcp_hsp_subnet) { 24513 hsp_net = hsp; 24514 srchaddr = addr & hsp->tcp_hsp_subnet; 24515 continue; 24516 } else { 24517 break; 24518 } 24519 } 24520 24521 /* 24522 * This must be the third pass. If we didn't find 24523 * anything, return the saved network HSP instead. 24524 */ 24525 24526 if (!hsp) 24527 hsp = hsp_net; 24528 } 24529 } 24530 24531 rw_exit(&tcps->tcps_hsp_lock); 24532 return (hsp); 24533 } 24534 24535 /* 24536 * XXX Equally broken as the IPv4 routine. Doesn't handle longest 24537 * match lookup. 24538 */ 24539 static tcp_hsp_t * 24540 tcp_hsp_lookup_ipv6(in6_addr_t *v6addr, tcp_stack_t *tcps) 24541 { 24542 tcp_hsp_t *hsp = NULL; 24543 24544 /* Quick check without acquiring the lock. */ 24545 if (tcps->tcps_hsp_hash == NULL) 24546 return (NULL); 24547 24548 rw_enter(&tcps->tcps_hsp_lock, RW_READER); 24549 24550 /* This routine finds the best-matching HSP for address addr. */ 24551 24552 if (tcps->tcps_hsp_hash) { 24553 int i; 24554 in6_addr_t v6srchaddr; 24555 tcp_hsp_t *hsp_net; 24556 24557 /* We do three passes: host, network, and subnet. */ 24558 24559 v6srchaddr = *v6addr; 24560 24561 for (i = 1; i <= 3; i++) { 24562 /* Look for exact match on srchaddr */ 24563 24564 hsp = tcps->tcps_hsp_hash[TCP_HSP_HASH( 24565 V4_PART_OF_V6(v6srchaddr))]; 24566 while (hsp) { 24567 if (hsp->tcp_hsp_vers == IPV6_VERSION && 24568 IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, 24569 &v6srchaddr)) 24570 break; 24571 hsp = hsp->tcp_hsp_next; 24572 } 24573 24574 /* 24575 * If this is the first pass: 24576 * If we found a match, great, return it. 24577 * If not, search for the network on the second pass. 24578 */ 24579 24580 if (i == 1) 24581 if (hsp) 24582 break; 24583 else { 24584 /* Assume a 64 bit mask */ 24585 v6srchaddr.s6_addr32[0] = 24586 v6addr->s6_addr32[0]; 24587 v6srchaddr.s6_addr32[1] = 24588 v6addr->s6_addr32[1]; 24589 v6srchaddr.s6_addr32[2] = 0; 24590 v6srchaddr.s6_addr32[3] = 0; 24591 continue; 24592 } 24593 24594 /* 24595 * If this is the second pass: 24596 * If we found a match, but there's a subnet mask, 24597 * save the match but try again using the subnet 24598 * mask on the third pass. 24599 * Otherwise, return whatever we found. 24600 */ 24601 24602 if (i == 2) { 24603 ASSERT(hsp == NULL || 24604 hsp->tcp_hsp_vers == IPV6_VERSION); 24605 if (hsp && 24606 !IN6_IS_ADDR_UNSPECIFIED( 24607 &hsp->tcp_hsp_subnet_v6)) { 24608 hsp_net = hsp; 24609 V6_MASK_COPY(*v6addr, 24610 hsp->tcp_hsp_subnet_v6, v6srchaddr); 24611 continue; 24612 } else { 24613 break; 24614 } 24615 } 24616 24617 /* 24618 * This must be the third pass. If we didn't find 24619 * anything, return the saved network HSP instead. 24620 */ 24621 24622 if (!hsp) 24623 hsp = hsp_net; 24624 } 24625 } 24626 24627 rw_exit(&tcps->tcps_hsp_lock); 24628 return (hsp); 24629 } 24630 24631 /* 24632 * Type three generator adapted from the random() function in 4.4 BSD: 24633 */ 24634 24635 /* 24636 * Copyright (c) 1983, 1993 24637 * The Regents of the University of California. All rights reserved. 24638 * 24639 * Redistribution and use in source and binary forms, with or without 24640 * modification, are permitted provided that the following conditions 24641 * are met: 24642 * 1. Redistributions of source code must retain the above copyright 24643 * notice, this list of conditions and the following disclaimer. 24644 * 2. Redistributions in binary form must reproduce the above copyright 24645 * notice, this list of conditions and the following disclaimer in the 24646 * documentation and/or other materials provided with the distribution. 24647 * 3. All advertising materials mentioning features or use of this software 24648 * must display the following acknowledgement: 24649 * This product includes software developed by the University of 24650 * California, Berkeley and its contributors. 24651 * 4. Neither the name of the University nor the names of its contributors 24652 * may be used to endorse or promote products derived from this software 24653 * without specific prior written permission. 24654 * 24655 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24656 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24657 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24658 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24659 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24660 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24661 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24662 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24663 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24664 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24665 * SUCH DAMAGE. 24666 */ 24667 24668 /* Type 3 -- x**31 + x**3 + 1 */ 24669 #define DEG_3 31 24670 #define SEP_3 3 24671 24672 24673 /* Protected by tcp_random_lock */ 24674 static int tcp_randtbl[DEG_3 + 1]; 24675 24676 static int *tcp_random_fptr = &tcp_randtbl[SEP_3 + 1]; 24677 static int *tcp_random_rptr = &tcp_randtbl[1]; 24678 24679 static int *tcp_random_state = &tcp_randtbl[1]; 24680 static int *tcp_random_end_ptr = &tcp_randtbl[DEG_3 + 1]; 24681 24682 kmutex_t tcp_random_lock; 24683 24684 void 24685 tcp_random_init(void) 24686 { 24687 int i; 24688 hrtime_t hrt; 24689 time_t wallclock; 24690 uint64_t result; 24691 24692 /* 24693 * Use high-res timer and current time for seed. Gethrtime() returns 24694 * a longlong, which may contain resolution down to nanoseconds. 24695 * The current time will either be a 32-bit or a 64-bit quantity. 24696 * XOR the two together in a 64-bit result variable. 24697 * Convert the result to a 32-bit value by multiplying the high-order 24698 * 32-bits by the low-order 32-bits. 24699 */ 24700 24701 hrt = gethrtime(); 24702 (void) drv_getparm(TIME, &wallclock); 24703 result = (uint64_t)wallclock ^ (uint64_t)hrt; 24704 mutex_enter(&tcp_random_lock); 24705 tcp_random_state[0] = ((result >> 32) & 0xffffffff) * 24706 (result & 0xffffffff); 24707 24708 for (i = 1; i < DEG_3; i++) 24709 tcp_random_state[i] = 1103515245 * tcp_random_state[i - 1] 24710 + 12345; 24711 tcp_random_fptr = &tcp_random_state[SEP_3]; 24712 tcp_random_rptr = &tcp_random_state[0]; 24713 mutex_exit(&tcp_random_lock); 24714 for (i = 0; i < 10 * DEG_3; i++) 24715 (void) tcp_random(); 24716 } 24717 24718 /* 24719 * tcp_random: Return a random number in the range [1 - (128K + 1)]. 24720 * This range is selected to be approximately centered on TCP_ISS / 2, 24721 * and easy to compute. We get this value by generating a 32-bit random 24722 * number, selecting out the high-order 17 bits, and then adding one so 24723 * that we never return zero. 24724 */ 24725 int 24726 tcp_random(void) 24727 { 24728 int i; 24729 24730 mutex_enter(&tcp_random_lock); 24731 *tcp_random_fptr += *tcp_random_rptr; 24732 24733 /* 24734 * The high-order bits are more random than the low-order bits, 24735 * so we select out the high-order 17 bits and add one so that 24736 * we never return zero. 24737 */ 24738 i = ((*tcp_random_fptr >> 15) & 0x1ffff) + 1; 24739 if (++tcp_random_fptr >= tcp_random_end_ptr) { 24740 tcp_random_fptr = tcp_random_state; 24741 ++tcp_random_rptr; 24742 } else if (++tcp_random_rptr >= tcp_random_end_ptr) 24743 tcp_random_rptr = tcp_random_state; 24744 24745 mutex_exit(&tcp_random_lock); 24746 return (i); 24747 } 24748 24749 /* 24750 * XXX This will go away when TPI is extended to send 24751 * info reqs to sockfs/timod ..... 24752 * Given a queue, set the max packet size for the write 24753 * side of the queue below stream head. This value is 24754 * cached on the stream head. 24755 * Returns 1 on success, 0 otherwise. 24756 */ 24757 static int 24758 setmaxps(queue_t *q, int maxpsz) 24759 { 24760 struct stdata *stp; 24761 queue_t *wq; 24762 stp = STREAM(q); 24763 24764 /* 24765 * At this point change of a queue parameter is not allowed 24766 * when a multiplexor is sitting on top. 24767 */ 24768 if (stp->sd_flag & STPLEX) 24769 return (0); 24770 24771 claimstr(stp->sd_wrq); 24772 wq = stp->sd_wrq->q_next; 24773 ASSERT(wq != NULL); 24774 (void) strqset(wq, QMAXPSZ, 0, maxpsz); 24775 releasestr(stp->sd_wrq); 24776 return (1); 24777 } 24778 24779 static int 24780 tcp_conprim_opt_process(tcp_t *tcp, mblk_t *mp, int *do_disconnectp, 24781 int *t_errorp, int *sys_errorp) 24782 { 24783 int error; 24784 int is_absreq_failure; 24785 t_scalar_t *opt_lenp; 24786 t_scalar_t opt_offset; 24787 int prim_type; 24788 struct T_conn_req *tcreqp; 24789 struct T_conn_res *tcresp; 24790 cred_t *cr; 24791 24792 cr = DB_CREDDEF(mp, tcp->tcp_cred); 24793 24794 prim_type = ((union T_primitives *)mp->b_rptr)->type; 24795 ASSERT(prim_type == T_CONN_REQ || prim_type == O_T_CONN_RES || 24796 prim_type == T_CONN_RES); 24797 24798 switch (prim_type) { 24799 case T_CONN_REQ: 24800 tcreqp = (struct T_conn_req *)mp->b_rptr; 24801 opt_offset = tcreqp->OPT_offset; 24802 opt_lenp = (t_scalar_t *)&tcreqp->OPT_length; 24803 break; 24804 case O_T_CONN_RES: 24805 case T_CONN_RES: 24806 tcresp = (struct T_conn_res *)mp->b_rptr; 24807 opt_offset = tcresp->OPT_offset; 24808 opt_lenp = (t_scalar_t *)&tcresp->OPT_length; 24809 break; 24810 } 24811 24812 *t_errorp = 0; 24813 *sys_errorp = 0; 24814 *do_disconnectp = 0; 24815 24816 error = tpi_optcom_buf(tcp->tcp_wq, mp, opt_lenp, 24817 opt_offset, cr, &tcp_opt_obj, 24818 NULL, &is_absreq_failure); 24819 24820 switch (error) { 24821 case 0: /* no error */ 24822 ASSERT(is_absreq_failure == 0); 24823 return (0); 24824 case ENOPROTOOPT: 24825 *t_errorp = TBADOPT; 24826 break; 24827 case EACCES: 24828 *t_errorp = TACCES; 24829 break; 24830 default: 24831 *t_errorp = TSYSERR; *sys_errorp = error; 24832 break; 24833 } 24834 if (is_absreq_failure != 0) { 24835 /* 24836 * The connection request should get the local ack 24837 * T_OK_ACK and then a T_DISCON_IND. 24838 */ 24839 *do_disconnectp = 1; 24840 } 24841 return (-1); 24842 } 24843 24844 /* 24845 * Split this function out so that if the secret changes, I'm okay. 24846 * 24847 * Initialize the tcp_iss_cookie and tcp_iss_key. 24848 */ 24849 24850 #define PASSWD_SIZE 16 /* MUST be multiple of 4 */ 24851 24852 static void 24853 tcp_iss_key_init(uint8_t *phrase, int len, tcp_stack_t *tcps) 24854 { 24855 struct { 24856 int32_t current_time; 24857 uint32_t randnum; 24858 uint16_t pad; 24859 uint8_t ether[6]; 24860 uint8_t passwd[PASSWD_SIZE]; 24861 } tcp_iss_cookie; 24862 time_t t; 24863 24864 /* 24865 * Start with the current absolute time. 24866 */ 24867 (void) drv_getparm(TIME, &t); 24868 tcp_iss_cookie.current_time = t; 24869 24870 /* 24871 * XXX - Need a more random number per RFC 1750, not this crap. 24872 * OTOH, if what follows is pretty random, then I'm in better shape. 24873 */ 24874 tcp_iss_cookie.randnum = (uint32_t)(gethrtime() + tcp_random()); 24875 tcp_iss_cookie.pad = 0x365c; /* Picked from HMAC pad values. */ 24876 24877 /* 24878 * The cpu_type_info is pretty non-random. Ugggh. It does serve 24879 * as a good template. 24880 */ 24881 bcopy(&cpu_list->cpu_type_info, &tcp_iss_cookie.passwd, 24882 min(PASSWD_SIZE, sizeof (cpu_list->cpu_type_info))); 24883 24884 /* 24885 * The pass-phrase. Normally this is supplied by user-called NDD. 24886 */ 24887 bcopy(phrase, &tcp_iss_cookie.passwd, min(PASSWD_SIZE, len)); 24888 24889 /* 24890 * See 4010593 if this section becomes a problem again, 24891 * but the local ethernet address is useful here. 24892 */ 24893 (void) localetheraddr(NULL, 24894 (struct ether_addr *)&tcp_iss_cookie.ether); 24895 24896 /* 24897 * Hash 'em all together. The MD5Final is called per-connection. 24898 */ 24899 mutex_enter(&tcps->tcps_iss_key_lock); 24900 MD5Init(&tcps->tcps_iss_key); 24901 MD5Update(&tcps->tcps_iss_key, (uchar_t *)&tcp_iss_cookie, 24902 sizeof (tcp_iss_cookie)); 24903 mutex_exit(&tcps->tcps_iss_key_lock); 24904 } 24905 24906 /* 24907 * Set the RFC 1948 pass phrase 24908 */ 24909 /* ARGSUSED */ 24910 static int 24911 tcp_1948_phrase_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 24912 cred_t *cr) 24913 { 24914 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 24915 24916 /* 24917 * Basically, value contains a new pass phrase. Pass it along! 24918 */ 24919 tcp_iss_key_init((uint8_t *)value, strlen(value), tcps); 24920 return (0); 24921 } 24922 24923 /* ARGSUSED */ 24924 static int 24925 tcp_sack_info_constructor(void *buf, void *cdrarg, int kmflags) 24926 { 24927 bzero(buf, sizeof (tcp_sack_info_t)); 24928 return (0); 24929 } 24930 24931 /* ARGSUSED */ 24932 static int 24933 tcp_iphc_constructor(void *buf, void *cdrarg, int kmflags) 24934 { 24935 bzero(buf, TCP_MAX_COMBINED_HEADER_LENGTH); 24936 return (0); 24937 } 24938 24939 /* 24940 * Make sure we wait until the default queue is setup, yet allow 24941 * tcp_g_q_create() to open a TCP stream. 24942 * We need to allow tcp_g_q_create() do do an open 24943 * of tcp, hence we compare curhread. 24944 * All others have to wait until the tcps_g_q has been 24945 * setup. 24946 */ 24947 void 24948 tcp_g_q_setup(tcp_stack_t *tcps) 24949 { 24950 mutex_enter(&tcps->tcps_g_q_lock); 24951 if (tcps->tcps_g_q != NULL) { 24952 mutex_exit(&tcps->tcps_g_q_lock); 24953 return; 24954 } 24955 if (tcps->tcps_g_q_creator == NULL) { 24956 /* This thread will set it up */ 24957 tcps->tcps_g_q_creator = curthread; 24958 mutex_exit(&tcps->tcps_g_q_lock); 24959 tcp_g_q_create(tcps); 24960 mutex_enter(&tcps->tcps_g_q_lock); 24961 ASSERT(tcps->tcps_g_q_creator == curthread); 24962 tcps->tcps_g_q_creator = NULL; 24963 cv_signal(&tcps->tcps_g_q_cv); 24964 ASSERT(tcps->tcps_g_q != NULL); 24965 mutex_exit(&tcps->tcps_g_q_lock); 24966 return; 24967 } 24968 /* Everybody but the creator has to wait */ 24969 if (tcps->tcps_g_q_creator != curthread) { 24970 while (tcps->tcps_g_q == NULL) 24971 cv_wait(&tcps->tcps_g_q_cv, &tcps->tcps_g_q_lock); 24972 } 24973 mutex_exit(&tcps->tcps_g_q_lock); 24974 } 24975 24976 #define IP "ip" 24977 24978 #define TCP6DEV "/devices/pseudo/tcp6@0:tcp6" 24979 24980 /* 24981 * Create a default tcp queue here instead of in strplumb 24982 */ 24983 void 24984 tcp_g_q_create(tcp_stack_t *tcps) 24985 { 24986 int error; 24987 ldi_handle_t lh = NULL; 24988 ldi_ident_t li = NULL; 24989 int rval; 24990 cred_t *cr; 24991 major_t IP_MAJ; 24992 24993 #ifdef NS_DEBUG 24994 (void) printf("tcp_g_q_create()\n"); 24995 #endif 24996 24997 IP_MAJ = ddi_name_to_major(IP); 24998 24999 ASSERT(tcps->tcps_g_q_creator == curthread); 25000 25001 error = ldi_ident_from_major(IP_MAJ, &li); 25002 if (error) { 25003 #ifdef DEBUG 25004 printf("tcp_g_q_create: lyr ident get failed error %d\n", 25005 error); 25006 #endif 25007 return; 25008 } 25009 25010 cr = zone_get_kcred(netstackid_to_zoneid( 25011 tcps->tcps_netstack->netstack_stackid)); 25012 ASSERT(cr != NULL); 25013 /* 25014 * We set the tcp default queue to IPv6 because IPv4 falls 25015 * back to IPv6 when it can't find a client, but 25016 * IPv6 does not fall back to IPv4. 25017 */ 25018 error = ldi_open_by_name(TCP6DEV, FREAD|FWRITE, cr, &lh, li); 25019 if (error) { 25020 #ifdef DEBUG 25021 printf("tcp_g_q_create: open of TCP6DEV failed error %d\n", 25022 error); 25023 #endif 25024 goto out; 25025 } 25026 25027 /* 25028 * This ioctl causes the tcp framework to cache a pointer to 25029 * this stream, so we don't want to close the stream after 25030 * this operation. 25031 * Use the kernel credentials that are for the zone we're in. 25032 */ 25033 error = ldi_ioctl(lh, TCP_IOC_DEFAULT_Q, 25034 (intptr_t)0, FKIOCTL, cr, &rval); 25035 if (error) { 25036 #ifdef DEBUG 25037 printf("tcp_g_q_create: ioctl TCP_IOC_DEFAULT_Q failed " 25038 "error %d\n", error); 25039 #endif 25040 goto out; 25041 } 25042 tcps->tcps_g_q_lh = lh; /* For tcp_g_q_close */ 25043 lh = NULL; 25044 out: 25045 /* Close layered handles */ 25046 if (li) 25047 ldi_ident_release(li); 25048 /* Keep cred around until _inactive needs it */ 25049 tcps->tcps_g_q_cr = cr; 25050 } 25051 25052 /* 25053 * We keep tcp_g_q set until all other tcp_t's in the zone 25054 * has gone away, and then when tcp_g_q_inactive() is called 25055 * we clear it. 25056 */ 25057 void 25058 tcp_g_q_destroy(tcp_stack_t *tcps) 25059 { 25060 #ifdef NS_DEBUG 25061 (void) printf("tcp_g_q_destroy()for stack %d\n", 25062 tcps->tcps_netstack->netstack_stackid); 25063 #endif 25064 25065 if (tcps->tcps_g_q == NULL) { 25066 return; /* Nothing to cleanup */ 25067 } 25068 /* 25069 * Drop reference corresponding to the default queue. 25070 * This reference was added from tcp_open when the default queue 25071 * was created, hence we compensate for this extra drop in 25072 * tcp_g_q_close. If the refcnt drops to zero here it means 25073 * the default queue was the last one to be open, in which 25074 * case, then tcp_g_q_inactive will be 25075 * called as a result of the refrele. 25076 */ 25077 TCPS_REFRELE(tcps); 25078 } 25079 25080 /* 25081 * Called when last tcp_t drops reference count using TCPS_REFRELE. 25082 * Run by tcp_q_q_inactive using a taskq. 25083 */ 25084 static void 25085 tcp_g_q_close(void *arg) 25086 { 25087 tcp_stack_t *tcps = arg; 25088 int error; 25089 ldi_handle_t lh = NULL; 25090 ldi_ident_t li = NULL; 25091 cred_t *cr; 25092 major_t IP_MAJ; 25093 25094 IP_MAJ = ddi_name_to_major(IP); 25095 25096 #ifdef NS_DEBUG 25097 (void) printf("tcp_g_q_inactive() for stack %d refcnt %d\n", 25098 tcps->tcps_netstack->netstack_stackid, 25099 tcps->tcps_netstack->netstack_refcnt); 25100 #endif 25101 lh = tcps->tcps_g_q_lh; 25102 if (lh == NULL) 25103 return; /* Nothing to cleanup */ 25104 25105 ASSERT(tcps->tcps_refcnt == 1); 25106 ASSERT(tcps->tcps_g_q != NULL); 25107 25108 error = ldi_ident_from_major(IP_MAJ, &li); 25109 if (error) { 25110 #ifdef DEBUG 25111 printf("tcp_g_q_inactive: lyr ident get failed error %d\n", 25112 error); 25113 #endif 25114 return; 25115 } 25116 25117 cr = tcps->tcps_g_q_cr; 25118 tcps->tcps_g_q_cr = NULL; 25119 ASSERT(cr != NULL); 25120 25121 /* 25122 * Make sure we can break the recursion when tcp_close decrements 25123 * the reference count causing g_q_inactive to be called again. 25124 */ 25125 tcps->tcps_g_q_lh = NULL; 25126 25127 /* close the default queue */ 25128 (void) ldi_close(lh, FREAD|FWRITE, cr); 25129 /* 25130 * At this point in time tcps and the rest of netstack_t might 25131 * have been deleted. 25132 */ 25133 tcps = NULL; 25134 25135 /* Close layered handles */ 25136 ldi_ident_release(li); 25137 crfree(cr); 25138 } 25139 25140 /* 25141 * Called when last tcp_t drops reference count using TCPS_REFRELE. 25142 * 25143 * Have to ensure that the ldi routines are not used by an 25144 * interrupt thread by using a taskq. 25145 */ 25146 void 25147 tcp_g_q_inactive(tcp_stack_t *tcps) 25148 { 25149 if (tcps->tcps_g_q_lh == NULL) 25150 return; /* Nothing to cleanup */ 25151 25152 ASSERT(tcps->tcps_refcnt == 0); 25153 TCPS_REFHOLD(tcps); /* Compensate for what g_q_destroy did */ 25154 25155 if (servicing_interrupt()) { 25156 (void) taskq_dispatch(tcp_taskq, tcp_g_q_close, 25157 (void *) tcps, TQ_SLEEP); 25158 } else { 25159 tcp_g_q_close(tcps); 25160 } 25161 } 25162 25163 /* 25164 * Called by IP when IP is loaded into the kernel 25165 */ 25166 void 25167 tcp_ddi_g_init(void) 25168 { 25169 tcp_timercache = kmem_cache_create("tcp_timercache", 25170 sizeof (tcp_timer_t) + sizeof (mblk_t), 0, 25171 NULL, NULL, NULL, NULL, NULL, 0); 25172 25173 tcp_sack_info_cache = kmem_cache_create("tcp_sack_info_cache", 25174 sizeof (tcp_sack_info_t), 0, 25175 tcp_sack_info_constructor, NULL, NULL, NULL, NULL, 0); 25176 25177 tcp_iphc_cache = kmem_cache_create("tcp_iphc_cache", 25178 TCP_MAX_COMBINED_HEADER_LENGTH, 0, 25179 tcp_iphc_constructor, NULL, NULL, NULL, NULL, 0); 25180 25181 mutex_init(&tcp_random_lock, NULL, MUTEX_DEFAULT, NULL); 25182 25183 /* Initialize the random number generator */ 25184 tcp_random_init(); 25185 25186 tcp_squeue_wput_proc = tcp_squeue_switch(tcp_squeue_wput); 25187 tcp_squeue_close_proc = tcp_squeue_switch(tcp_squeue_close); 25188 25189 /* A single callback independently of how many netstacks we have */ 25190 ip_squeue_init(tcp_squeue_add); 25191 25192 tcp_g_kstat = tcp_g_kstat_init(&tcp_g_statistics); 25193 25194 tcp_taskq = taskq_create("tcp_taskq", 1, minclsyspri, 1, 1, 25195 TASKQ_PREPOPULATE); 25196 25197 /* 25198 * We want to be informed each time a stack is created or 25199 * destroyed in the kernel, so we can maintain the 25200 * set of tcp_stack_t's. 25201 */ 25202 netstack_register(NS_TCP, tcp_stack_init, tcp_stack_shutdown, 25203 tcp_stack_fini); 25204 } 25205 25206 25207 /* 25208 * Initialize the TCP stack instance. 25209 */ 25210 static void * 25211 tcp_stack_init(netstackid_t stackid, netstack_t *ns) 25212 { 25213 tcp_stack_t *tcps; 25214 tcpparam_t *pa; 25215 int i; 25216 25217 tcps = (tcp_stack_t *)kmem_zalloc(sizeof (*tcps), KM_SLEEP); 25218 tcps->tcps_netstack = ns; 25219 25220 /* Initialize locks */ 25221 rw_init(&tcps->tcps_hsp_lock, NULL, RW_DEFAULT, NULL); 25222 mutex_init(&tcps->tcps_g_q_lock, NULL, MUTEX_DEFAULT, NULL); 25223 cv_init(&tcps->tcps_g_q_cv, NULL, CV_DEFAULT, NULL); 25224 mutex_init(&tcps->tcps_iss_key_lock, NULL, MUTEX_DEFAULT, NULL); 25225 mutex_init(&tcps->tcps_epriv_port_lock, NULL, MUTEX_DEFAULT, NULL); 25226 rw_init(&tcps->tcps_reserved_port_lock, NULL, RW_DEFAULT, NULL); 25227 25228 tcps->tcps_g_num_epriv_ports = TCP_NUM_EPRIV_PORTS; 25229 tcps->tcps_g_epriv_ports[0] = 2049; 25230 tcps->tcps_g_epriv_ports[1] = 4045; 25231 tcps->tcps_min_anonpriv_port = 512; 25232 25233 tcps->tcps_bind_fanout = kmem_zalloc(sizeof (tf_t) * 25234 TCP_BIND_FANOUT_SIZE, KM_SLEEP); 25235 tcps->tcps_acceptor_fanout = kmem_zalloc(sizeof (tf_t) * 25236 TCP_FANOUT_SIZE, KM_SLEEP); 25237 tcps->tcps_reserved_port = kmem_zalloc(sizeof (tcp_rport_t) * 25238 TCP_RESERVED_PORTS_ARRAY_MAX_SIZE, KM_SLEEP); 25239 25240 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) { 25241 mutex_init(&tcps->tcps_bind_fanout[i].tf_lock, NULL, 25242 MUTEX_DEFAULT, NULL); 25243 } 25244 25245 for (i = 0; i < TCP_FANOUT_SIZE; i++) { 25246 mutex_init(&tcps->tcps_acceptor_fanout[i].tf_lock, NULL, 25247 MUTEX_DEFAULT, NULL); 25248 } 25249 25250 /* TCP's IPsec code calls the packet dropper. */ 25251 ip_drop_register(&tcps->tcps_dropper, "TCP IPsec policy enforcement"); 25252 25253 pa = (tcpparam_t *)kmem_alloc(sizeof (lcl_tcp_param_arr), KM_SLEEP); 25254 tcps->tcps_params = pa; 25255 bcopy(lcl_tcp_param_arr, tcps->tcps_params, sizeof (lcl_tcp_param_arr)); 25256 25257 (void) tcp_param_register(&tcps->tcps_g_nd, tcps->tcps_params, 25258 A_CNT(lcl_tcp_param_arr), tcps); 25259 25260 /* 25261 * Note: To really walk the device tree you need the devinfo 25262 * pointer to your device which is only available after probe/attach. 25263 * The following is safe only because it uses ddi_root_node() 25264 */ 25265 tcp_max_optsize = optcom_max_optsize(tcp_opt_obj.odb_opt_des_arr, 25266 tcp_opt_obj.odb_opt_arr_cnt); 25267 25268 /* 25269 * Initialize RFC 1948 secret values. This will probably be reset once 25270 * by the boot scripts. 25271 * 25272 * Use NULL name, as the name is caught by the new lockstats. 25273 * 25274 * Initialize with some random, non-guessable string, like the global 25275 * T_INFO_ACK. 25276 */ 25277 25278 tcp_iss_key_init((uint8_t *)&tcp_g_t_info_ack, 25279 sizeof (tcp_g_t_info_ack), tcps); 25280 25281 tcps->tcps_kstat = tcp_kstat2_init(stackid, &tcps->tcps_statistics); 25282 tcps->tcps_mibkp = tcp_kstat_init(stackid, tcps); 25283 25284 return (tcps); 25285 } 25286 25287 /* 25288 * Called when the IP module is about to be unloaded. 25289 */ 25290 void 25291 tcp_ddi_g_destroy(void) 25292 { 25293 tcp_g_kstat_fini(tcp_g_kstat); 25294 tcp_g_kstat = NULL; 25295 bzero(&tcp_g_statistics, sizeof (tcp_g_statistics)); 25296 25297 mutex_destroy(&tcp_random_lock); 25298 25299 kmem_cache_destroy(tcp_timercache); 25300 kmem_cache_destroy(tcp_sack_info_cache); 25301 kmem_cache_destroy(tcp_iphc_cache); 25302 25303 netstack_unregister(NS_TCP); 25304 taskq_destroy(tcp_taskq); 25305 } 25306 25307 /* 25308 * Shut down the TCP stack instance. 25309 */ 25310 /* ARGSUSED */ 25311 static void 25312 tcp_stack_shutdown(netstackid_t stackid, void *arg) 25313 { 25314 tcp_stack_t *tcps = (tcp_stack_t *)arg; 25315 25316 tcp_g_q_destroy(tcps); 25317 } 25318 25319 /* 25320 * Free the TCP stack instance. 25321 */ 25322 static void 25323 tcp_stack_fini(netstackid_t stackid, void *arg) 25324 { 25325 tcp_stack_t *tcps = (tcp_stack_t *)arg; 25326 int i; 25327 25328 nd_free(&tcps->tcps_g_nd); 25329 kmem_free(tcps->tcps_params, sizeof (lcl_tcp_param_arr)); 25330 tcps->tcps_params = NULL; 25331 kmem_free(tcps->tcps_wroff_xtra_param, sizeof (tcpparam_t)); 25332 tcps->tcps_wroff_xtra_param = NULL; 25333 kmem_free(tcps->tcps_mdt_head_param, sizeof (tcpparam_t)); 25334 tcps->tcps_mdt_head_param = NULL; 25335 kmem_free(tcps->tcps_mdt_tail_param, sizeof (tcpparam_t)); 25336 tcps->tcps_mdt_tail_param = NULL; 25337 kmem_free(tcps->tcps_mdt_max_pbufs_param, sizeof (tcpparam_t)); 25338 tcps->tcps_mdt_max_pbufs_param = NULL; 25339 25340 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) { 25341 ASSERT(tcps->tcps_bind_fanout[i].tf_tcp == NULL); 25342 mutex_destroy(&tcps->tcps_bind_fanout[i].tf_lock); 25343 } 25344 25345 for (i = 0; i < TCP_FANOUT_SIZE; i++) { 25346 ASSERT(tcps->tcps_acceptor_fanout[i].tf_tcp == NULL); 25347 mutex_destroy(&tcps->tcps_acceptor_fanout[i].tf_lock); 25348 } 25349 25350 kmem_free(tcps->tcps_bind_fanout, sizeof (tf_t) * TCP_BIND_FANOUT_SIZE); 25351 tcps->tcps_bind_fanout = NULL; 25352 25353 kmem_free(tcps->tcps_acceptor_fanout, sizeof (tf_t) * TCP_FANOUT_SIZE); 25354 tcps->tcps_acceptor_fanout = NULL; 25355 25356 kmem_free(tcps->tcps_reserved_port, sizeof (tcp_rport_t) * 25357 TCP_RESERVED_PORTS_ARRAY_MAX_SIZE); 25358 tcps->tcps_reserved_port = NULL; 25359 25360 mutex_destroy(&tcps->tcps_iss_key_lock); 25361 rw_destroy(&tcps->tcps_hsp_lock); 25362 mutex_destroy(&tcps->tcps_g_q_lock); 25363 cv_destroy(&tcps->tcps_g_q_cv); 25364 mutex_destroy(&tcps->tcps_epriv_port_lock); 25365 rw_destroy(&tcps->tcps_reserved_port_lock); 25366 25367 ip_drop_unregister(&tcps->tcps_dropper); 25368 25369 tcp_kstat2_fini(stackid, tcps->tcps_kstat); 25370 tcps->tcps_kstat = NULL; 25371 bzero(&tcps->tcps_statistics, sizeof (tcps->tcps_statistics)); 25372 25373 tcp_kstat_fini(stackid, tcps->tcps_mibkp); 25374 tcps->tcps_mibkp = NULL; 25375 25376 kmem_free(tcps, sizeof (*tcps)); 25377 } 25378 25379 /* 25380 * Generate ISS, taking into account NDD changes may happen halfway through. 25381 * (If the iss is not zero, set it.) 25382 */ 25383 25384 static void 25385 tcp_iss_init(tcp_t *tcp) 25386 { 25387 MD5_CTX context; 25388 struct { uint32_t ports; in6_addr_t src; in6_addr_t dst; } arg; 25389 uint32_t answer[4]; 25390 tcp_stack_t *tcps = tcp->tcp_tcps; 25391 25392 tcps->tcps_iss_incr_extra += (ISS_INCR >> 1); 25393 tcp->tcp_iss = tcps->tcps_iss_incr_extra; 25394 switch (tcps->tcps_strong_iss) { 25395 case 2: 25396 mutex_enter(&tcps->tcps_iss_key_lock); 25397 context = tcps->tcps_iss_key; 25398 mutex_exit(&tcps->tcps_iss_key_lock); 25399 arg.ports = tcp->tcp_ports; 25400 if (tcp->tcp_ipversion == IPV4_VERSION) { 25401 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 25402 &arg.src); 25403 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_dst, 25404 &arg.dst); 25405 } else { 25406 arg.src = tcp->tcp_ip6h->ip6_src; 25407 arg.dst = tcp->tcp_ip6h->ip6_dst; 25408 } 25409 MD5Update(&context, (uchar_t *)&arg, sizeof (arg)); 25410 MD5Final((uchar_t *)answer, &context); 25411 tcp->tcp_iss += answer[0] ^ answer[1] ^ answer[2] ^ answer[3]; 25412 /* 25413 * Now that we've hashed into a unique per-connection sequence 25414 * space, add a random increment per strong_iss == 1. So I 25415 * guess we'll have to... 25416 */ 25417 /* FALLTHRU */ 25418 case 1: 25419 tcp->tcp_iss += (gethrtime() >> ISS_NSEC_SHT) + tcp_random(); 25420 break; 25421 default: 25422 tcp->tcp_iss += (uint32_t)gethrestime_sec() * ISS_INCR; 25423 break; 25424 } 25425 tcp->tcp_valid_bits = TCP_ISS_VALID; 25426 tcp->tcp_fss = tcp->tcp_iss - 1; 25427 tcp->tcp_suna = tcp->tcp_iss; 25428 tcp->tcp_snxt = tcp->tcp_iss + 1; 25429 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 25430 tcp->tcp_csuna = tcp->tcp_snxt; 25431 } 25432 25433 /* 25434 * Exported routine for extracting active tcp connection status. 25435 * 25436 * This is used by the Solaris Cluster Networking software to 25437 * gather a list of connections that need to be forwarded to 25438 * specific nodes in the cluster when configuration changes occur. 25439 * 25440 * The callback is invoked for each tcp_t structure. Returning 25441 * non-zero from the callback routine terminates the search. 25442 */ 25443 int 25444 cl_tcp_walk_list(int (*cl_callback)(cl_tcp_info_t *, void *), 25445 void *arg) 25446 { 25447 netstack_handle_t nh; 25448 netstack_t *ns; 25449 int ret = 0; 25450 25451 netstack_next_init(&nh); 25452 while ((ns = netstack_next(&nh)) != NULL) { 25453 ret = cl_tcp_walk_list_stack(cl_callback, arg, 25454 ns->netstack_tcp); 25455 netstack_rele(ns); 25456 } 25457 netstack_next_fini(&nh); 25458 return (ret); 25459 } 25460 25461 static int 25462 cl_tcp_walk_list_stack(int (*callback)(cl_tcp_info_t *, void *), void *arg, 25463 tcp_stack_t *tcps) 25464 { 25465 tcp_t *tcp; 25466 cl_tcp_info_t cl_tcpi; 25467 connf_t *connfp; 25468 conn_t *connp; 25469 int i; 25470 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 25471 25472 ASSERT(callback != NULL); 25473 25474 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 25475 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 25476 connp = NULL; 25477 25478 while ((connp = 25479 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 25480 25481 tcp = connp->conn_tcp; 25482 cl_tcpi.cl_tcpi_version = CL_TCPI_V1; 25483 cl_tcpi.cl_tcpi_ipversion = tcp->tcp_ipversion; 25484 cl_tcpi.cl_tcpi_state = tcp->tcp_state; 25485 cl_tcpi.cl_tcpi_lport = tcp->tcp_lport; 25486 cl_tcpi.cl_tcpi_fport = tcp->tcp_fport; 25487 /* 25488 * The macros tcp_laddr and tcp_faddr give the IPv4 25489 * addresses. They are copied implicitly below as 25490 * mapped addresses. 25491 */ 25492 cl_tcpi.cl_tcpi_laddr_v6 = tcp->tcp_ip_src_v6; 25493 if (tcp->tcp_ipversion == IPV4_VERSION) { 25494 cl_tcpi.cl_tcpi_faddr = 25495 tcp->tcp_ipha->ipha_dst; 25496 } else { 25497 cl_tcpi.cl_tcpi_faddr_v6 = 25498 tcp->tcp_ip6h->ip6_dst; 25499 } 25500 25501 /* 25502 * If the callback returns non-zero 25503 * we terminate the traversal. 25504 */ 25505 if ((*callback)(&cl_tcpi, arg) != 0) { 25506 CONN_DEC_REF(tcp->tcp_connp); 25507 return (1); 25508 } 25509 } 25510 } 25511 25512 return (0); 25513 } 25514 25515 /* 25516 * Macros used for accessing the different types of sockaddr 25517 * structures inside a tcp_ioc_abort_conn_t. 25518 */ 25519 #define TCP_AC_V4LADDR(acp) ((sin_t *)&(acp)->ac_local) 25520 #define TCP_AC_V4RADDR(acp) ((sin_t *)&(acp)->ac_remote) 25521 #define TCP_AC_V4LOCAL(acp) (TCP_AC_V4LADDR(acp)->sin_addr.s_addr) 25522 #define TCP_AC_V4REMOTE(acp) (TCP_AC_V4RADDR(acp)->sin_addr.s_addr) 25523 #define TCP_AC_V4LPORT(acp) (TCP_AC_V4LADDR(acp)->sin_port) 25524 #define TCP_AC_V4RPORT(acp) (TCP_AC_V4RADDR(acp)->sin_port) 25525 #define TCP_AC_V6LADDR(acp) ((sin6_t *)&(acp)->ac_local) 25526 #define TCP_AC_V6RADDR(acp) ((sin6_t *)&(acp)->ac_remote) 25527 #define TCP_AC_V6LOCAL(acp) (TCP_AC_V6LADDR(acp)->sin6_addr) 25528 #define TCP_AC_V6REMOTE(acp) (TCP_AC_V6RADDR(acp)->sin6_addr) 25529 #define TCP_AC_V6LPORT(acp) (TCP_AC_V6LADDR(acp)->sin6_port) 25530 #define TCP_AC_V6RPORT(acp) (TCP_AC_V6RADDR(acp)->sin6_port) 25531 25532 /* 25533 * Return the correct error code to mimic the behavior 25534 * of a connection reset. 25535 */ 25536 #define TCP_AC_GET_ERRCODE(state, err) { \ 25537 switch ((state)) { \ 25538 case TCPS_SYN_SENT: \ 25539 case TCPS_SYN_RCVD: \ 25540 (err) = ECONNREFUSED; \ 25541 break; \ 25542 case TCPS_ESTABLISHED: \ 25543 case TCPS_FIN_WAIT_1: \ 25544 case TCPS_FIN_WAIT_2: \ 25545 case TCPS_CLOSE_WAIT: \ 25546 (err) = ECONNRESET; \ 25547 break; \ 25548 case TCPS_CLOSING: \ 25549 case TCPS_LAST_ACK: \ 25550 case TCPS_TIME_WAIT: \ 25551 (err) = 0; \ 25552 break; \ 25553 default: \ 25554 (err) = ENXIO; \ 25555 } \ 25556 } 25557 25558 /* 25559 * Check if a tcp structure matches the info in acp. 25560 */ 25561 #define TCP_AC_ADDR_MATCH(acp, tcp) \ 25562 (((acp)->ac_local.ss_family == AF_INET) ? \ 25563 ((TCP_AC_V4LOCAL((acp)) == INADDR_ANY || \ 25564 TCP_AC_V4LOCAL((acp)) == (tcp)->tcp_ip_src) && \ 25565 (TCP_AC_V4REMOTE((acp)) == INADDR_ANY || \ 25566 TCP_AC_V4REMOTE((acp)) == (tcp)->tcp_remote) && \ 25567 (TCP_AC_V4LPORT((acp)) == 0 || \ 25568 TCP_AC_V4LPORT((acp)) == (tcp)->tcp_lport) && \ 25569 (TCP_AC_V4RPORT((acp)) == 0 || \ 25570 TCP_AC_V4RPORT((acp)) == (tcp)->tcp_fport) && \ 25571 (acp)->ac_start <= (tcp)->tcp_state && \ 25572 (acp)->ac_end >= (tcp)->tcp_state) : \ 25573 ((IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL((acp))) || \ 25574 IN6_ARE_ADDR_EQUAL(&TCP_AC_V6LOCAL((acp)), \ 25575 &(tcp)->tcp_ip_src_v6)) && \ 25576 (IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE((acp))) || \ 25577 IN6_ARE_ADDR_EQUAL(&TCP_AC_V6REMOTE((acp)), \ 25578 &(tcp)->tcp_remote_v6)) && \ 25579 (TCP_AC_V6LPORT((acp)) == 0 || \ 25580 TCP_AC_V6LPORT((acp)) == (tcp)->tcp_lport) && \ 25581 (TCP_AC_V6RPORT((acp)) == 0 || \ 25582 TCP_AC_V6RPORT((acp)) == (tcp)->tcp_fport) && \ 25583 (acp)->ac_start <= (tcp)->tcp_state && \ 25584 (acp)->ac_end >= (tcp)->tcp_state)) 25585 25586 #define TCP_AC_MATCH(acp, tcp) \ 25587 (((acp)->ac_zoneid == ALL_ZONES || \ 25588 (acp)->ac_zoneid == tcp->tcp_connp->conn_zoneid) ? \ 25589 TCP_AC_ADDR_MATCH(acp, tcp) : 0) 25590 25591 /* 25592 * Build a message containing a tcp_ioc_abort_conn_t structure 25593 * which is filled in with information from acp and tp. 25594 */ 25595 static mblk_t * 25596 tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *acp, tcp_t *tp) 25597 { 25598 mblk_t *mp; 25599 tcp_ioc_abort_conn_t *tacp; 25600 25601 mp = allocb(sizeof (uint32_t) + sizeof (*acp), BPRI_LO); 25602 if (mp == NULL) 25603 return (NULL); 25604 25605 mp->b_datap->db_type = M_CTL; 25606 25607 *((uint32_t *)mp->b_rptr) = TCP_IOC_ABORT_CONN; 25608 tacp = (tcp_ioc_abort_conn_t *)((uchar_t *)mp->b_rptr + 25609 sizeof (uint32_t)); 25610 25611 tacp->ac_start = acp->ac_start; 25612 tacp->ac_end = acp->ac_end; 25613 tacp->ac_zoneid = acp->ac_zoneid; 25614 25615 if (acp->ac_local.ss_family == AF_INET) { 25616 tacp->ac_local.ss_family = AF_INET; 25617 tacp->ac_remote.ss_family = AF_INET; 25618 TCP_AC_V4LOCAL(tacp) = tp->tcp_ip_src; 25619 TCP_AC_V4REMOTE(tacp) = tp->tcp_remote; 25620 TCP_AC_V4LPORT(tacp) = tp->tcp_lport; 25621 TCP_AC_V4RPORT(tacp) = tp->tcp_fport; 25622 } else { 25623 tacp->ac_local.ss_family = AF_INET6; 25624 tacp->ac_remote.ss_family = AF_INET6; 25625 TCP_AC_V6LOCAL(tacp) = tp->tcp_ip_src_v6; 25626 TCP_AC_V6REMOTE(tacp) = tp->tcp_remote_v6; 25627 TCP_AC_V6LPORT(tacp) = tp->tcp_lport; 25628 TCP_AC_V6RPORT(tacp) = tp->tcp_fport; 25629 } 25630 mp->b_wptr = (uchar_t *)mp->b_rptr + sizeof (uint32_t) + sizeof (*acp); 25631 return (mp); 25632 } 25633 25634 /* 25635 * Print a tcp_ioc_abort_conn_t structure. 25636 */ 25637 static void 25638 tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *acp) 25639 { 25640 char lbuf[128]; 25641 char rbuf[128]; 25642 sa_family_t af; 25643 in_port_t lport, rport; 25644 ushort_t logflags; 25645 25646 af = acp->ac_local.ss_family; 25647 25648 if (af == AF_INET) { 25649 (void) inet_ntop(af, (const void *)&TCP_AC_V4LOCAL(acp), 25650 lbuf, 128); 25651 (void) inet_ntop(af, (const void *)&TCP_AC_V4REMOTE(acp), 25652 rbuf, 128); 25653 lport = ntohs(TCP_AC_V4LPORT(acp)); 25654 rport = ntohs(TCP_AC_V4RPORT(acp)); 25655 } else { 25656 (void) inet_ntop(af, (const void *)&TCP_AC_V6LOCAL(acp), 25657 lbuf, 128); 25658 (void) inet_ntop(af, (const void *)&TCP_AC_V6REMOTE(acp), 25659 rbuf, 128); 25660 lport = ntohs(TCP_AC_V6LPORT(acp)); 25661 rport = ntohs(TCP_AC_V6RPORT(acp)); 25662 } 25663 25664 logflags = SL_TRACE | SL_NOTE; 25665 /* 25666 * Don't print this message to the console if the operation was done 25667 * to a non-global zone. 25668 */ 25669 if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES) 25670 logflags |= SL_CONSOLE; 25671 (void) strlog(TCP_MOD_ID, 0, 1, logflags, 25672 "TCP_IOC_ABORT_CONN: local = %s:%d, remote = %s:%d, " 25673 "start = %d, end = %d\n", lbuf, lport, rbuf, rport, 25674 acp->ac_start, acp->ac_end); 25675 } 25676 25677 /* 25678 * Called inside tcp_rput when a message built using 25679 * tcp_ioctl_abort_build_msg is put into a queue. 25680 * Note that when we get here there is no wildcard in acp any more. 25681 */ 25682 static void 25683 tcp_ioctl_abort_handler(tcp_t *tcp, mblk_t *mp) 25684 { 25685 tcp_ioc_abort_conn_t *acp; 25686 25687 acp = (tcp_ioc_abort_conn_t *)(mp->b_rptr + sizeof (uint32_t)); 25688 if (tcp->tcp_state <= acp->ac_end) { 25689 /* 25690 * If we get here, we are already on the correct 25691 * squeue. This ioctl follows the following path 25692 * tcp_wput -> tcp_wput_ioctl -> tcp_ioctl_abort_conn 25693 * ->tcp_ioctl_abort->squeue_fill (if on a 25694 * different squeue) 25695 */ 25696 int errcode; 25697 25698 TCP_AC_GET_ERRCODE(tcp->tcp_state, errcode); 25699 (void) tcp_clean_death(tcp, errcode, 26); 25700 } 25701 freemsg(mp); 25702 } 25703 25704 /* 25705 * Abort all matching connections on a hash chain. 25706 */ 25707 static int 25708 tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *acp, int index, int *count, 25709 boolean_t exact, tcp_stack_t *tcps) 25710 { 25711 int nmatch, err = 0; 25712 tcp_t *tcp; 25713 MBLKP mp, last, listhead = NULL; 25714 conn_t *tconnp; 25715 connf_t *connfp; 25716 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 25717 25718 connfp = &ipst->ips_ipcl_conn_fanout[index]; 25719 25720 startover: 25721 nmatch = 0; 25722 25723 mutex_enter(&connfp->connf_lock); 25724 for (tconnp = connfp->connf_head; tconnp != NULL; 25725 tconnp = tconnp->conn_next) { 25726 tcp = tconnp->conn_tcp; 25727 if (TCP_AC_MATCH(acp, tcp)) { 25728 CONN_INC_REF(tcp->tcp_connp); 25729 mp = tcp_ioctl_abort_build_msg(acp, tcp); 25730 if (mp == NULL) { 25731 err = ENOMEM; 25732 CONN_DEC_REF(tcp->tcp_connp); 25733 break; 25734 } 25735 mp->b_prev = (mblk_t *)tcp; 25736 25737 if (listhead == NULL) { 25738 listhead = mp; 25739 last = mp; 25740 } else { 25741 last->b_next = mp; 25742 last = mp; 25743 } 25744 nmatch++; 25745 if (exact) 25746 break; 25747 } 25748 25749 /* Avoid holding lock for too long. */ 25750 if (nmatch >= 500) 25751 break; 25752 } 25753 mutex_exit(&connfp->connf_lock); 25754 25755 /* Pass mp into the correct tcp */ 25756 while ((mp = listhead) != NULL) { 25757 listhead = listhead->b_next; 25758 tcp = (tcp_t *)mp->b_prev; 25759 mp->b_next = mp->b_prev = NULL; 25760 squeue_fill(tcp->tcp_connp->conn_sqp, mp, 25761 tcp_input, tcp->tcp_connp, SQTAG_TCP_ABORT_BUCKET); 25762 } 25763 25764 *count += nmatch; 25765 if (nmatch >= 500 && err == 0) 25766 goto startover; 25767 return (err); 25768 } 25769 25770 /* 25771 * Abort all connections that matches the attributes specified in acp. 25772 */ 25773 static int 25774 tcp_ioctl_abort(tcp_ioc_abort_conn_t *acp, tcp_stack_t *tcps) 25775 { 25776 sa_family_t af; 25777 uint32_t ports; 25778 uint16_t *pports; 25779 int err = 0, count = 0; 25780 boolean_t exact = B_FALSE; /* set when there is no wildcard */ 25781 int index = -1; 25782 ushort_t logflags; 25783 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 25784 25785 af = acp->ac_local.ss_family; 25786 25787 if (af == AF_INET) { 25788 if (TCP_AC_V4REMOTE(acp) != INADDR_ANY && 25789 TCP_AC_V4LPORT(acp) != 0 && TCP_AC_V4RPORT(acp) != 0) { 25790 pports = (uint16_t *)&ports; 25791 pports[1] = TCP_AC_V4LPORT(acp); 25792 pports[0] = TCP_AC_V4RPORT(acp); 25793 exact = (TCP_AC_V4LOCAL(acp) != INADDR_ANY); 25794 } 25795 } else { 25796 if (!IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE(acp)) && 25797 TCP_AC_V6LPORT(acp) != 0 && TCP_AC_V6RPORT(acp) != 0) { 25798 pports = (uint16_t *)&ports; 25799 pports[1] = TCP_AC_V6LPORT(acp); 25800 pports[0] = TCP_AC_V6RPORT(acp); 25801 exact = !IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL(acp)); 25802 } 25803 } 25804 25805 /* 25806 * For cases where remote addr, local port, and remote port are non- 25807 * wildcards, tcp_ioctl_abort_bucket will only be called once. 25808 */ 25809 if (index != -1) { 25810 err = tcp_ioctl_abort_bucket(acp, index, 25811 &count, exact, tcps); 25812 } else { 25813 /* 25814 * loop through all entries for wildcard case 25815 */ 25816 for (index = 0; 25817 index < ipst->ips_ipcl_conn_fanout_size; 25818 index++) { 25819 err = tcp_ioctl_abort_bucket(acp, index, 25820 &count, exact, tcps); 25821 if (err != 0) 25822 break; 25823 } 25824 } 25825 25826 logflags = SL_TRACE | SL_NOTE; 25827 /* 25828 * Don't print this message to the console if the operation was done 25829 * to a non-global zone. 25830 */ 25831 if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES) 25832 logflags |= SL_CONSOLE; 25833 (void) strlog(TCP_MOD_ID, 0, 1, logflags, "TCP_IOC_ABORT_CONN: " 25834 "aborted %d connection%c\n", count, ((count > 1) ? 's' : ' ')); 25835 if (err == 0 && count == 0) 25836 err = ENOENT; 25837 return (err); 25838 } 25839 25840 /* 25841 * Process the TCP_IOC_ABORT_CONN ioctl request. 25842 */ 25843 static void 25844 tcp_ioctl_abort_conn(queue_t *q, mblk_t *mp) 25845 { 25846 int err; 25847 IOCP iocp; 25848 MBLKP mp1; 25849 sa_family_t laf, raf; 25850 tcp_ioc_abort_conn_t *acp; 25851 zone_t *zptr; 25852 conn_t *connp = Q_TO_CONN(q); 25853 zoneid_t zoneid = connp->conn_zoneid; 25854 tcp_t *tcp = connp->conn_tcp; 25855 tcp_stack_t *tcps = tcp->tcp_tcps; 25856 25857 iocp = (IOCP)mp->b_rptr; 25858 25859 if ((mp1 = mp->b_cont) == NULL || 25860 iocp->ioc_count != sizeof (tcp_ioc_abort_conn_t)) { 25861 err = EINVAL; 25862 goto out; 25863 } 25864 25865 /* check permissions */ 25866 if (secpolicy_ip_config(iocp->ioc_cr, B_FALSE) != 0) { 25867 err = EPERM; 25868 goto out; 25869 } 25870 25871 if (mp1->b_cont != NULL) { 25872 freemsg(mp1->b_cont); 25873 mp1->b_cont = NULL; 25874 } 25875 25876 acp = (tcp_ioc_abort_conn_t *)mp1->b_rptr; 25877 laf = acp->ac_local.ss_family; 25878 raf = acp->ac_remote.ss_family; 25879 25880 /* check that a zone with the supplied zoneid exists */ 25881 if (acp->ac_zoneid != GLOBAL_ZONEID && acp->ac_zoneid != ALL_ZONES) { 25882 zptr = zone_find_by_id(zoneid); 25883 if (zptr != NULL) { 25884 zone_rele(zptr); 25885 } else { 25886 err = EINVAL; 25887 goto out; 25888 } 25889 } 25890 25891 /* 25892 * For exclusive stacks we set the zoneid to zero 25893 * to make TCP operate as if in the global zone. 25894 */ 25895 if (tcps->tcps_netstack->netstack_stackid != GLOBAL_NETSTACKID) 25896 acp->ac_zoneid = GLOBAL_ZONEID; 25897 25898 if (acp->ac_start < TCPS_SYN_SENT || acp->ac_end > TCPS_TIME_WAIT || 25899 acp->ac_start > acp->ac_end || laf != raf || 25900 (laf != AF_INET && laf != AF_INET6)) { 25901 err = EINVAL; 25902 goto out; 25903 } 25904 25905 tcp_ioctl_abort_dump(acp); 25906 err = tcp_ioctl_abort(acp, tcps); 25907 25908 out: 25909 if (mp1 != NULL) { 25910 freemsg(mp1); 25911 mp->b_cont = NULL; 25912 } 25913 25914 if (err != 0) 25915 miocnak(q, mp, 0, err); 25916 else 25917 miocack(q, mp, 0, 0); 25918 } 25919 25920 /* 25921 * tcp_time_wait_processing() handles processing of incoming packets when 25922 * the tcp is in the TIME_WAIT state. 25923 * A TIME_WAIT tcp that has an associated open TCP stream is never put 25924 * on the time wait list. 25925 */ 25926 void 25927 tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, uint32_t seg_seq, 25928 uint32_t seg_ack, int seg_len, tcph_t *tcph) 25929 { 25930 int32_t bytes_acked; 25931 int32_t gap; 25932 int32_t rgap; 25933 tcp_opt_t tcpopt; 25934 uint_t flags; 25935 uint32_t new_swnd = 0; 25936 conn_t *connp; 25937 tcp_stack_t *tcps = tcp->tcp_tcps; 25938 25939 BUMP_LOCAL(tcp->tcp_ibsegs); 25940 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_RECV_PKT); 25941 25942 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 25943 new_swnd = BE16_TO_U16(tcph->th_win) << 25944 ((tcph->th_flags[0] & TH_SYN) ? 0 : tcp->tcp_snd_ws); 25945 if (tcp->tcp_snd_ts_ok) { 25946 if (!tcp_paws_check(tcp, tcph, &tcpopt)) { 25947 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 25948 tcp->tcp_rnxt, TH_ACK); 25949 goto done; 25950 } 25951 } 25952 gap = seg_seq - tcp->tcp_rnxt; 25953 rgap = tcp->tcp_rwnd - (gap + seg_len); 25954 if (gap < 0) { 25955 BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs); 25956 UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes, 25957 (seg_len > -gap ? -gap : seg_len)); 25958 seg_len += gap; 25959 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 25960 if (flags & TH_RST) { 25961 goto done; 25962 } 25963 if ((flags & TH_FIN) && seg_len == -1) { 25964 /* 25965 * When TCP receives a duplicate FIN in 25966 * TIME_WAIT state, restart the 2 MSL timer. 25967 * See page 73 in RFC 793. Make sure this TCP 25968 * is already on the TIME_WAIT list. If not, 25969 * just restart the timer. 25970 */ 25971 if (TCP_IS_DETACHED(tcp)) { 25972 if (tcp_time_wait_remove(tcp, NULL) == 25973 B_TRUE) { 25974 tcp_time_wait_append(tcp); 25975 TCP_DBGSTAT(tcps, 25976 tcp_rput_time_wait); 25977 } 25978 } else { 25979 ASSERT(tcp != NULL); 25980 TCP_TIMER_RESTART(tcp, 25981 tcps->tcps_time_wait_interval); 25982 } 25983 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 25984 tcp->tcp_rnxt, TH_ACK); 25985 goto done; 25986 } 25987 flags |= TH_ACK_NEEDED; 25988 seg_len = 0; 25989 goto process_ack; 25990 } 25991 25992 /* Fix seg_seq, and chew the gap off the front. */ 25993 seg_seq = tcp->tcp_rnxt; 25994 } 25995 25996 if ((flags & TH_SYN) && gap > 0 && rgap < 0) { 25997 /* 25998 * Make sure that when we accept the connection, pick 25999 * an ISS greater than (tcp_snxt + ISS_INCR/2) for the 26000 * old connection. 26001 * 26002 * The next ISS generated is equal to tcp_iss_incr_extra 26003 * + ISS_INCR/2 + other components depending on the 26004 * value of tcp_strong_iss. We pre-calculate the new 26005 * ISS here and compare with tcp_snxt to determine if 26006 * we need to make adjustment to tcp_iss_incr_extra. 26007 * 26008 * The above calculation is ugly and is a 26009 * waste of CPU cycles... 26010 */ 26011 uint32_t new_iss = tcps->tcps_iss_incr_extra; 26012 int32_t adj; 26013 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 26014 26015 switch (tcps->tcps_strong_iss) { 26016 case 2: { 26017 /* Add time and MD5 components. */ 26018 uint32_t answer[4]; 26019 struct { 26020 uint32_t ports; 26021 in6_addr_t src; 26022 in6_addr_t dst; 26023 } arg; 26024 MD5_CTX context; 26025 26026 mutex_enter(&tcps->tcps_iss_key_lock); 26027 context = tcps->tcps_iss_key; 26028 mutex_exit(&tcps->tcps_iss_key_lock); 26029 arg.ports = tcp->tcp_ports; 26030 /* We use MAPPED addresses in tcp_iss_init */ 26031 arg.src = tcp->tcp_ip_src_v6; 26032 if (tcp->tcp_ipversion == IPV4_VERSION) { 26033 IN6_IPADDR_TO_V4MAPPED( 26034 tcp->tcp_ipha->ipha_dst, 26035 &arg.dst); 26036 } else { 26037 arg.dst = 26038 tcp->tcp_ip6h->ip6_dst; 26039 } 26040 MD5Update(&context, (uchar_t *)&arg, 26041 sizeof (arg)); 26042 MD5Final((uchar_t *)answer, &context); 26043 answer[0] ^= answer[1] ^ answer[2] ^ answer[3]; 26044 new_iss += (gethrtime() >> ISS_NSEC_SHT) + answer[0]; 26045 break; 26046 } 26047 case 1: 26048 /* Add time component and min random (i.e. 1). */ 26049 new_iss += (gethrtime() >> ISS_NSEC_SHT) + 1; 26050 break; 26051 default: 26052 /* Add only time component. */ 26053 new_iss += (uint32_t)gethrestime_sec() * ISS_INCR; 26054 break; 26055 } 26056 if ((adj = (int32_t)(tcp->tcp_snxt - new_iss)) > 0) { 26057 /* 26058 * New ISS not guaranteed to be ISS_INCR/2 26059 * ahead of the current tcp_snxt, so add the 26060 * difference to tcp_iss_incr_extra. 26061 */ 26062 tcps->tcps_iss_incr_extra += adj; 26063 } 26064 /* 26065 * If tcp_clean_death() can not perform the task now, 26066 * drop the SYN packet and let the other side re-xmit. 26067 * Otherwise pass the SYN packet back in, since the 26068 * old tcp state has been cleaned up or freed. 26069 */ 26070 if (tcp_clean_death(tcp, 0, 27) == -1) 26071 goto done; 26072 /* 26073 * We will come back to tcp_rput_data 26074 * on the global queue. Packets destined 26075 * for the global queue will be checked 26076 * with global policy. But the policy for 26077 * this packet has already been checked as 26078 * this was destined for the detached 26079 * connection. We need to bypass policy 26080 * check this time by attaching a dummy 26081 * ipsec_in with ipsec_in_dont_check set. 26082 */ 26083 connp = ipcl_classify(mp, tcp->tcp_connp->conn_zoneid, ipst); 26084 if (connp != NULL) { 26085 TCP_STAT(tcps, tcp_time_wait_syn_success); 26086 tcp_reinput(connp, mp, tcp->tcp_connp->conn_sqp); 26087 return; 26088 } 26089 goto done; 26090 } 26091 26092 /* 26093 * rgap is the amount of stuff received out of window. A negative 26094 * value is the amount out of window. 26095 */ 26096 if (rgap < 0) { 26097 BUMP_MIB(&tcps->tcps_mib, tcpInDataPastWinSegs); 26098 UPDATE_MIB(&tcps->tcps_mib, tcpInDataPastWinBytes, -rgap); 26099 /* Fix seg_len and make sure there is something left. */ 26100 seg_len += rgap; 26101 if (seg_len <= 0) { 26102 if (flags & TH_RST) { 26103 goto done; 26104 } 26105 flags |= TH_ACK_NEEDED; 26106 seg_len = 0; 26107 goto process_ack; 26108 } 26109 } 26110 /* 26111 * Check whether we can update tcp_ts_recent. This test is 26112 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 26113 * Extensions for High Performance: An Update", Internet Draft. 26114 */ 26115 if (tcp->tcp_snd_ts_ok && 26116 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 26117 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 26118 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 26119 tcp->tcp_last_rcv_lbolt = lbolt64; 26120 } 26121 26122 if (seg_seq != tcp->tcp_rnxt && seg_len > 0) { 26123 /* Always ack out of order packets */ 26124 flags |= TH_ACK_NEEDED; 26125 seg_len = 0; 26126 } else if (seg_len > 0) { 26127 BUMP_MIB(&tcps->tcps_mib, tcpInClosed); 26128 BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs); 26129 UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, seg_len); 26130 } 26131 if (flags & TH_RST) { 26132 (void) tcp_clean_death(tcp, 0, 28); 26133 goto done; 26134 } 26135 if (flags & TH_SYN) { 26136 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 26137 TH_RST|TH_ACK); 26138 /* 26139 * Do not delete the TCP structure if it is in 26140 * TIME_WAIT state. Refer to RFC 1122, 4.2.2.13. 26141 */ 26142 goto done; 26143 } 26144 process_ack: 26145 if (flags & TH_ACK) { 26146 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 26147 if (bytes_acked <= 0) { 26148 if (bytes_acked == 0 && seg_len == 0 && 26149 new_swnd == tcp->tcp_swnd) 26150 BUMP_MIB(&tcps->tcps_mib, tcpInDupAck); 26151 } else { 26152 /* Acks something not sent */ 26153 flags |= TH_ACK_NEEDED; 26154 } 26155 } 26156 if (flags & TH_ACK_NEEDED) { 26157 /* 26158 * Time to send an ack for some reason. 26159 */ 26160 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 26161 tcp->tcp_rnxt, TH_ACK); 26162 } 26163 done: 26164 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 26165 DB_CKSUMSTART(mp) = 0; 26166 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 26167 TCP_STAT(tcps, tcp_time_wait_syn_fail); 26168 } 26169 freemsg(mp); 26170 } 26171 26172 /* 26173 * Allocate a T_SVR4_OPTMGMT_REQ. 26174 * The caller needs to increment tcp_drop_opt_ack_cnt when sending these so 26175 * that tcp_rput_other can drop the acks. 26176 */ 26177 static mblk_t * 26178 tcp_setsockopt_mp(int level, int cmd, char *opt, int optlen) 26179 { 26180 mblk_t *mp; 26181 struct T_optmgmt_req *tor; 26182 struct opthdr *oh; 26183 uint_t size; 26184 char *optptr; 26185 26186 size = sizeof (*tor) + sizeof (*oh) + optlen; 26187 mp = allocb(size, BPRI_MED); 26188 if (mp == NULL) 26189 return (NULL); 26190 26191 mp->b_wptr += size; 26192 mp->b_datap->db_type = M_PROTO; 26193 tor = (struct T_optmgmt_req *)mp->b_rptr; 26194 tor->PRIM_type = T_SVR4_OPTMGMT_REQ; 26195 tor->MGMT_flags = T_NEGOTIATE; 26196 tor->OPT_length = sizeof (*oh) + optlen; 26197 tor->OPT_offset = (t_scalar_t)sizeof (*tor); 26198 26199 oh = (struct opthdr *)&tor[1]; 26200 oh->level = level; 26201 oh->name = cmd; 26202 oh->len = optlen; 26203 if (optlen != 0) { 26204 optptr = (char *)&oh[1]; 26205 bcopy(opt, optptr, optlen); 26206 } 26207 return (mp); 26208 } 26209 26210 /* 26211 * TCP Timers Implementation. 26212 */ 26213 timeout_id_t 26214 tcp_timeout(conn_t *connp, void (*f)(void *), clock_t tim) 26215 { 26216 mblk_t *mp; 26217 tcp_timer_t *tcpt; 26218 tcp_t *tcp = connp->conn_tcp; 26219 tcp_stack_t *tcps = tcp->tcp_tcps; 26220 26221 ASSERT(connp->conn_sqp != NULL); 26222 26223 TCP_DBGSTAT(tcps, tcp_timeout_calls); 26224 26225 if (tcp->tcp_timercache == NULL) { 26226 mp = tcp_timermp_alloc(KM_NOSLEEP | KM_PANIC); 26227 } else { 26228 TCP_DBGSTAT(tcps, tcp_timeout_cached_alloc); 26229 mp = tcp->tcp_timercache; 26230 tcp->tcp_timercache = mp->b_next; 26231 mp->b_next = NULL; 26232 ASSERT(mp->b_wptr == NULL); 26233 } 26234 26235 CONN_INC_REF(connp); 26236 tcpt = (tcp_timer_t *)mp->b_rptr; 26237 tcpt->connp = connp; 26238 tcpt->tcpt_proc = f; 26239 tcpt->tcpt_tid = timeout(tcp_timer_callback, mp, tim); 26240 return ((timeout_id_t)mp); 26241 } 26242 26243 static void 26244 tcp_timer_callback(void *arg) 26245 { 26246 mblk_t *mp = (mblk_t *)arg; 26247 tcp_timer_t *tcpt; 26248 conn_t *connp; 26249 26250 tcpt = (tcp_timer_t *)mp->b_rptr; 26251 connp = tcpt->connp; 26252 squeue_fill(connp->conn_sqp, mp, 26253 tcp_timer_handler, connp, SQTAG_TCP_TIMER); 26254 } 26255 26256 static void 26257 tcp_timer_handler(void *arg, mblk_t *mp, void *arg2) 26258 { 26259 tcp_timer_t *tcpt; 26260 conn_t *connp = (conn_t *)arg; 26261 tcp_t *tcp = connp->conn_tcp; 26262 26263 tcpt = (tcp_timer_t *)mp->b_rptr; 26264 ASSERT(connp == tcpt->connp); 26265 ASSERT((squeue_t *)arg2 == connp->conn_sqp); 26266 26267 /* 26268 * If the TCP has reached the closed state, don't proceed any 26269 * further. This TCP logically does not exist on the system. 26270 * tcpt_proc could for example access queues, that have already 26271 * been qprocoff'ed off. Also see comments at the start of tcp_input 26272 */ 26273 if (tcp->tcp_state != TCPS_CLOSED) { 26274 (*tcpt->tcpt_proc)(connp); 26275 } else { 26276 tcp->tcp_timer_tid = 0; 26277 } 26278 tcp_timer_free(connp->conn_tcp, mp); 26279 } 26280 26281 /* 26282 * There is potential race with untimeout and the handler firing at the same 26283 * time. The mblock may be freed by the handler while we are trying to use 26284 * it. But since both should execute on the same squeue, this race should not 26285 * occur. 26286 */ 26287 clock_t 26288 tcp_timeout_cancel(conn_t *connp, timeout_id_t id) 26289 { 26290 mblk_t *mp = (mblk_t *)id; 26291 tcp_timer_t *tcpt; 26292 clock_t delta; 26293 tcp_stack_t *tcps = connp->conn_tcp->tcp_tcps; 26294 26295 TCP_DBGSTAT(tcps, tcp_timeout_cancel_reqs); 26296 26297 if (mp == NULL) 26298 return (-1); 26299 26300 tcpt = (tcp_timer_t *)mp->b_rptr; 26301 ASSERT(tcpt->connp == connp); 26302 26303 delta = untimeout(tcpt->tcpt_tid); 26304 26305 if (delta >= 0) { 26306 TCP_DBGSTAT(tcps, tcp_timeout_canceled); 26307 tcp_timer_free(connp->conn_tcp, mp); 26308 CONN_DEC_REF(connp); 26309 } 26310 26311 return (delta); 26312 } 26313 26314 /* 26315 * Allocate space for the timer event. The allocation looks like mblk, but it is 26316 * not a proper mblk. To avoid confusion we set b_wptr to NULL. 26317 * 26318 * Dealing with failures: If we can't allocate from the timer cache we try 26319 * allocating from dblock caches using allocb_tryhard(). In this case b_wptr 26320 * points to b_rptr. 26321 * If we can't allocate anything using allocb_tryhard(), we perform a last 26322 * attempt and use kmem_alloc_tryhard(). In this case we set b_wptr to -1 and 26323 * save the actual allocation size in b_datap. 26324 */ 26325 mblk_t * 26326 tcp_timermp_alloc(int kmflags) 26327 { 26328 mblk_t *mp = (mblk_t *)kmem_cache_alloc(tcp_timercache, 26329 kmflags & ~KM_PANIC); 26330 26331 if (mp != NULL) { 26332 mp->b_next = mp->b_prev = NULL; 26333 mp->b_rptr = (uchar_t *)(&mp[1]); 26334 mp->b_wptr = NULL; 26335 mp->b_datap = NULL; 26336 mp->b_queue = NULL; 26337 mp->b_cont = NULL; 26338 } else if (kmflags & KM_PANIC) { 26339 /* 26340 * Failed to allocate memory for the timer. Try allocating from 26341 * dblock caches. 26342 */ 26343 /* ipclassifier calls this from a constructor - hence no tcps */ 26344 TCP_G_STAT(tcp_timermp_allocfail); 26345 mp = allocb_tryhard(sizeof (tcp_timer_t)); 26346 if (mp == NULL) { 26347 size_t size = 0; 26348 /* 26349 * Memory is really low. Try tryhard allocation. 26350 * 26351 * ipclassifier calls this from a constructor - 26352 * hence no tcps 26353 */ 26354 TCP_G_STAT(tcp_timermp_allocdblfail); 26355 mp = kmem_alloc_tryhard(sizeof (mblk_t) + 26356 sizeof (tcp_timer_t), &size, kmflags); 26357 mp->b_rptr = (uchar_t *)(&mp[1]); 26358 mp->b_next = mp->b_prev = NULL; 26359 mp->b_wptr = (uchar_t *)-1; 26360 mp->b_datap = (dblk_t *)size; 26361 mp->b_queue = NULL; 26362 mp->b_cont = NULL; 26363 } 26364 ASSERT(mp->b_wptr != NULL); 26365 } 26366 /* ipclassifier calls this from a constructor - hence no tcps */ 26367 TCP_G_DBGSTAT(tcp_timermp_alloced); 26368 26369 return (mp); 26370 } 26371 26372 /* 26373 * Free per-tcp timer cache. 26374 * It can only contain entries from tcp_timercache. 26375 */ 26376 void 26377 tcp_timermp_free(tcp_t *tcp) 26378 { 26379 mblk_t *mp; 26380 26381 while ((mp = tcp->tcp_timercache) != NULL) { 26382 ASSERT(mp->b_wptr == NULL); 26383 tcp->tcp_timercache = tcp->tcp_timercache->b_next; 26384 kmem_cache_free(tcp_timercache, mp); 26385 } 26386 } 26387 26388 /* 26389 * Free timer event. Put it on the per-tcp timer cache if there is not too many 26390 * events there already (currently at most two events are cached). 26391 * If the event is not allocated from the timer cache, free it right away. 26392 */ 26393 static void 26394 tcp_timer_free(tcp_t *tcp, mblk_t *mp) 26395 { 26396 mblk_t *mp1 = tcp->tcp_timercache; 26397 tcp_stack_t *tcps = tcp->tcp_tcps; 26398 26399 if (mp->b_wptr != NULL) { 26400 /* 26401 * This allocation is not from a timer cache, free it right 26402 * away. 26403 */ 26404 if (mp->b_wptr != (uchar_t *)-1) 26405 freeb(mp); 26406 else 26407 kmem_free(mp, (size_t)mp->b_datap); 26408 } else if (mp1 == NULL || mp1->b_next == NULL) { 26409 /* Cache this timer block for future allocations */ 26410 mp->b_rptr = (uchar_t *)(&mp[1]); 26411 mp->b_next = mp1; 26412 tcp->tcp_timercache = mp; 26413 } else { 26414 kmem_cache_free(tcp_timercache, mp); 26415 TCP_DBGSTAT(tcps, tcp_timermp_freed); 26416 } 26417 } 26418 26419 /* 26420 * End of TCP Timers implementation. 26421 */ 26422 26423 /* 26424 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL 26425 * on the specified backing STREAMS q. Note, the caller may make the 26426 * decision to call based on the tcp_t.tcp_flow_stopped value which 26427 * when check outside the q's lock is only an advisory check ... 26428 */ 26429 26430 void 26431 tcp_setqfull(tcp_t *tcp) 26432 { 26433 queue_t *q = tcp->tcp_wq; 26434 tcp_stack_t *tcps = tcp->tcp_tcps; 26435 26436 if (!(q->q_flag & QFULL)) { 26437 mutex_enter(QLOCK(q)); 26438 if (!(q->q_flag & QFULL)) { 26439 /* still need to set QFULL */ 26440 q->q_flag |= QFULL; 26441 tcp->tcp_flow_stopped = B_TRUE; 26442 mutex_exit(QLOCK(q)); 26443 TCP_STAT(tcps, tcp_flwctl_on); 26444 } else { 26445 mutex_exit(QLOCK(q)); 26446 } 26447 } 26448 } 26449 26450 void 26451 tcp_clrqfull(tcp_t *tcp) 26452 { 26453 queue_t *q = tcp->tcp_wq; 26454 26455 if (q->q_flag & QFULL) { 26456 mutex_enter(QLOCK(q)); 26457 if (q->q_flag & QFULL) { 26458 q->q_flag &= ~QFULL; 26459 tcp->tcp_flow_stopped = B_FALSE; 26460 mutex_exit(QLOCK(q)); 26461 if (q->q_flag & QWANTW) 26462 qbackenable(q, 0); 26463 } else { 26464 mutex_exit(QLOCK(q)); 26465 } 26466 } 26467 } 26468 26469 26470 /* 26471 * kstats related to squeues i.e. not per IP instance 26472 */ 26473 static void * 26474 tcp_g_kstat_init(tcp_g_stat_t *tcp_g_statp) 26475 { 26476 kstat_t *ksp; 26477 26478 tcp_g_stat_t template = { 26479 { "tcp_timermp_alloced", KSTAT_DATA_UINT64 }, 26480 { "tcp_timermp_allocfail", KSTAT_DATA_UINT64 }, 26481 { "tcp_timermp_allocdblfail", KSTAT_DATA_UINT64 }, 26482 { "tcp_freelist_cleanup", KSTAT_DATA_UINT64 }, 26483 }; 26484 26485 ksp = kstat_create(TCP_MOD_NAME, 0, "tcpstat_g", "net", 26486 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 26487 KSTAT_FLAG_VIRTUAL); 26488 26489 if (ksp == NULL) 26490 return (NULL); 26491 26492 bcopy(&template, tcp_g_statp, sizeof (template)); 26493 ksp->ks_data = (void *)tcp_g_statp; 26494 26495 kstat_install(ksp); 26496 return (ksp); 26497 } 26498 26499 static void 26500 tcp_g_kstat_fini(kstat_t *ksp) 26501 { 26502 if (ksp != NULL) { 26503 kstat_delete(ksp); 26504 } 26505 } 26506 26507 26508 static void * 26509 tcp_kstat2_init(netstackid_t stackid, tcp_stat_t *tcps_statisticsp) 26510 { 26511 kstat_t *ksp; 26512 26513 tcp_stat_t template = { 26514 { "tcp_time_wait", KSTAT_DATA_UINT64 }, 26515 { "tcp_time_wait_syn", KSTAT_DATA_UINT64 }, 26516 { "tcp_time_wait_success", KSTAT_DATA_UINT64 }, 26517 { "tcp_time_wait_fail", KSTAT_DATA_UINT64 }, 26518 { "tcp_reinput_syn", KSTAT_DATA_UINT64 }, 26519 { "tcp_ip_output", KSTAT_DATA_UINT64 }, 26520 { "tcp_detach_non_time_wait", KSTAT_DATA_UINT64 }, 26521 { "tcp_detach_time_wait", KSTAT_DATA_UINT64 }, 26522 { "tcp_time_wait_reap", KSTAT_DATA_UINT64 }, 26523 { "tcp_clean_death_nondetached", KSTAT_DATA_UINT64 }, 26524 { "tcp_reinit_calls", KSTAT_DATA_UINT64 }, 26525 { "tcp_eager_err1", KSTAT_DATA_UINT64 }, 26526 { "tcp_eager_err2", KSTAT_DATA_UINT64 }, 26527 { "tcp_eager_blowoff_calls", KSTAT_DATA_UINT64 }, 26528 { "tcp_eager_blowoff_q", KSTAT_DATA_UINT64 }, 26529 { "tcp_eager_blowoff_q0", KSTAT_DATA_UINT64 }, 26530 { "tcp_not_hard_bound", KSTAT_DATA_UINT64 }, 26531 { "tcp_no_listener", KSTAT_DATA_UINT64 }, 26532 { "tcp_found_eager", KSTAT_DATA_UINT64 }, 26533 { "tcp_wrong_queue", KSTAT_DATA_UINT64 }, 26534 { "tcp_found_eager_binding1", KSTAT_DATA_UINT64 }, 26535 { "tcp_found_eager_bound1", KSTAT_DATA_UINT64 }, 26536 { "tcp_eager_has_listener1", KSTAT_DATA_UINT64 }, 26537 { "tcp_open_alloc", KSTAT_DATA_UINT64 }, 26538 { "tcp_open_detached_alloc", KSTAT_DATA_UINT64 }, 26539 { "tcp_rput_time_wait", KSTAT_DATA_UINT64 }, 26540 { "tcp_listendrop", KSTAT_DATA_UINT64 }, 26541 { "tcp_listendropq0", KSTAT_DATA_UINT64 }, 26542 { "tcp_wrong_rq", KSTAT_DATA_UINT64 }, 26543 { "tcp_rsrv_calls", KSTAT_DATA_UINT64 }, 26544 { "tcp_eagerfree2", KSTAT_DATA_UINT64 }, 26545 { "tcp_eagerfree3", KSTAT_DATA_UINT64 }, 26546 { "tcp_eagerfree4", KSTAT_DATA_UINT64 }, 26547 { "tcp_eagerfree5", KSTAT_DATA_UINT64 }, 26548 { "tcp_timewait_syn_fail", KSTAT_DATA_UINT64 }, 26549 { "tcp_listen_badflags", KSTAT_DATA_UINT64 }, 26550 { "tcp_timeout_calls", KSTAT_DATA_UINT64 }, 26551 { "tcp_timeout_cached_alloc", KSTAT_DATA_UINT64 }, 26552 { "tcp_timeout_cancel_reqs", KSTAT_DATA_UINT64 }, 26553 { "tcp_timeout_canceled", KSTAT_DATA_UINT64 }, 26554 { "tcp_timermp_freed", KSTAT_DATA_UINT64 }, 26555 { "tcp_push_timer_cnt", KSTAT_DATA_UINT64 }, 26556 { "tcp_ack_timer_cnt", KSTAT_DATA_UINT64 }, 26557 { "tcp_ire_null1", KSTAT_DATA_UINT64 }, 26558 { "tcp_ire_null", KSTAT_DATA_UINT64 }, 26559 { "tcp_ip_send", KSTAT_DATA_UINT64 }, 26560 { "tcp_ip_ire_send", KSTAT_DATA_UINT64 }, 26561 { "tcp_wsrv_called", KSTAT_DATA_UINT64 }, 26562 { "tcp_flwctl_on", KSTAT_DATA_UINT64 }, 26563 { "tcp_timer_fire_early", KSTAT_DATA_UINT64 }, 26564 { "tcp_timer_fire_miss", KSTAT_DATA_UINT64 }, 26565 { "tcp_rput_v6_error", KSTAT_DATA_UINT64 }, 26566 { "tcp_out_sw_cksum", KSTAT_DATA_UINT64 }, 26567 { "tcp_out_sw_cksum_bytes", KSTAT_DATA_UINT64 }, 26568 { "tcp_zcopy_on", KSTAT_DATA_UINT64 }, 26569 { "tcp_zcopy_off", KSTAT_DATA_UINT64 }, 26570 { "tcp_zcopy_backoff", KSTAT_DATA_UINT64 }, 26571 { "tcp_zcopy_disable", KSTAT_DATA_UINT64 }, 26572 { "tcp_mdt_pkt_out", KSTAT_DATA_UINT64 }, 26573 { "tcp_mdt_pkt_out_v4", KSTAT_DATA_UINT64 }, 26574 { "tcp_mdt_pkt_out_v6", KSTAT_DATA_UINT64 }, 26575 { "tcp_mdt_discarded", KSTAT_DATA_UINT64 }, 26576 { "tcp_mdt_conn_halted1", KSTAT_DATA_UINT64 }, 26577 { "tcp_mdt_conn_halted2", KSTAT_DATA_UINT64 }, 26578 { "tcp_mdt_conn_halted3", KSTAT_DATA_UINT64 }, 26579 { "tcp_mdt_conn_resumed1", KSTAT_DATA_UINT64 }, 26580 { "tcp_mdt_conn_resumed2", KSTAT_DATA_UINT64 }, 26581 { "tcp_mdt_legacy_small", KSTAT_DATA_UINT64 }, 26582 { "tcp_mdt_legacy_all", KSTAT_DATA_UINT64 }, 26583 { "tcp_mdt_legacy_ret", KSTAT_DATA_UINT64 }, 26584 { "tcp_mdt_allocfail", KSTAT_DATA_UINT64 }, 26585 { "tcp_mdt_addpdescfail", KSTAT_DATA_UINT64 }, 26586 { "tcp_mdt_allocd", KSTAT_DATA_UINT64 }, 26587 { "tcp_mdt_linked", KSTAT_DATA_UINT64 }, 26588 { "tcp_fusion_flowctl", KSTAT_DATA_UINT64 }, 26589 { "tcp_fusion_backenabled", KSTAT_DATA_UINT64 }, 26590 { "tcp_fusion_urg", KSTAT_DATA_UINT64 }, 26591 { "tcp_fusion_putnext", KSTAT_DATA_UINT64 }, 26592 { "tcp_fusion_unfusable", KSTAT_DATA_UINT64 }, 26593 { "tcp_fusion_aborted", KSTAT_DATA_UINT64 }, 26594 { "tcp_fusion_unqualified", KSTAT_DATA_UINT64 }, 26595 { "tcp_fusion_rrw_busy", KSTAT_DATA_UINT64 }, 26596 { "tcp_fusion_rrw_msgcnt", KSTAT_DATA_UINT64 }, 26597 { "tcp_fusion_rrw_plugged", KSTAT_DATA_UINT64 }, 26598 { "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64 }, 26599 { "tcp_sock_fallback", KSTAT_DATA_UINT64 }, 26600 { "tcp_lso_enabled", KSTAT_DATA_UINT64 }, 26601 { "tcp_lso_disabled", KSTAT_DATA_UINT64 }, 26602 { "tcp_lso_times", KSTAT_DATA_UINT64 }, 26603 { "tcp_lso_pkt_out", KSTAT_DATA_UINT64 }, 26604 }; 26605 26606 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, "tcpstat", "net", 26607 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 26608 KSTAT_FLAG_VIRTUAL, stackid); 26609 26610 if (ksp == NULL) 26611 return (NULL); 26612 26613 bcopy(&template, tcps_statisticsp, sizeof (template)); 26614 ksp->ks_data = (void *)tcps_statisticsp; 26615 ksp->ks_private = (void *)(uintptr_t)stackid; 26616 26617 kstat_install(ksp); 26618 return (ksp); 26619 } 26620 26621 static void 26622 tcp_kstat2_fini(netstackid_t stackid, kstat_t *ksp) 26623 { 26624 if (ksp != NULL) { 26625 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 26626 kstat_delete_netstack(ksp, stackid); 26627 } 26628 } 26629 26630 /* 26631 * TCP Kstats implementation 26632 */ 26633 static void * 26634 tcp_kstat_init(netstackid_t stackid, tcp_stack_t *tcps) 26635 { 26636 kstat_t *ksp; 26637 26638 tcp_named_kstat_t template = { 26639 { "rtoAlgorithm", KSTAT_DATA_INT32, 0 }, 26640 { "rtoMin", KSTAT_DATA_INT32, 0 }, 26641 { "rtoMax", KSTAT_DATA_INT32, 0 }, 26642 { "maxConn", KSTAT_DATA_INT32, 0 }, 26643 { "activeOpens", KSTAT_DATA_UINT32, 0 }, 26644 { "passiveOpens", KSTAT_DATA_UINT32, 0 }, 26645 { "attemptFails", KSTAT_DATA_UINT32, 0 }, 26646 { "estabResets", KSTAT_DATA_UINT32, 0 }, 26647 { "currEstab", KSTAT_DATA_UINT32, 0 }, 26648 { "inSegs", KSTAT_DATA_UINT64, 0 }, 26649 { "outSegs", KSTAT_DATA_UINT64, 0 }, 26650 { "retransSegs", KSTAT_DATA_UINT32, 0 }, 26651 { "connTableSize", KSTAT_DATA_INT32, 0 }, 26652 { "outRsts", KSTAT_DATA_UINT32, 0 }, 26653 { "outDataSegs", KSTAT_DATA_UINT32, 0 }, 26654 { "outDataBytes", KSTAT_DATA_UINT32, 0 }, 26655 { "retransBytes", KSTAT_DATA_UINT32, 0 }, 26656 { "outAck", KSTAT_DATA_UINT32, 0 }, 26657 { "outAckDelayed", KSTAT_DATA_UINT32, 0 }, 26658 { "outUrg", KSTAT_DATA_UINT32, 0 }, 26659 { "outWinUpdate", KSTAT_DATA_UINT32, 0 }, 26660 { "outWinProbe", KSTAT_DATA_UINT32, 0 }, 26661 { "outControl", KSTAT_DATA_UINT32, 0 }, 26662 { "outFastRetrans", KSTAT_DATA_UINT32, 0 }, 26663 { "inAckSegs", KSTAT_DATA_UINT32, 0 }, 26664 { "inAckBytes", KSTAT_DATA_UINT32, 0 }, 26665 { "inDupAck", KSTAT_DATA_UINT32, 0 }, 26666 { "inAckUnsent", KSTAT_DATA_UINT32, 0 }, 26667 { "inDataInorderSegs", KSTAT_DATA_UINT32, 0 }, 26668 { "inDataInorderBytes", KSTAT_DATA_UINT32, 0 }, 26669 { "inDataUnorderSegs", KSTAT_DATA_UINT32, 0 }, 26670 { "inDataUnorderBytes", KSTAT_DATA_UINT32, 0 }, 26671 { "inDataDupSegs", KSTAT_DATA_UINT32, 0 }, 26672 { "inDataDupBytes", KSTAT_DATA_UINT32, 0 }, 26673 { "inDataPartDupSegs", KSTAT_DATA_UINT32, 0 }, 26674 { "inDataPartDupBytes", KSTAT_DATA_UINT32, 0 }, 26675 { "inDataPastWinSegs", KSTAT_DATA_UINT32, 0 }, 26676 { "inDataPastWinBytes", KSTAT_DATA_UINT32, 0 }, 26677 { "inWinProbe", KSTAT_DATA_UINT32, 0 }, 26678 { "inWinUpdate", KSTAT_DATA_UINT32, 0 }, 26679 { "inClosed", KSTAT_DATA_UINT32, 0 }, 26680 { "rttUpdate", KSTAT_DATA_UINT32, 0 }, 26681 { "rttNoUpdate", KSTAT_DATA_UINT32, 0 }, 26682 { "timRetrans", KSTAT_DATA_UINT32, 0 }, 26683 { "timRetransDrop", KSTAT_DATA_UINT32, 0 }, 26684 { "timKeepalive", KSTAT_DATA_UINT32, 0 }, 26685 { "timKeepaliveProbe", KSTAT_DATA_UINT32, 0 }, 26686 { "timKeepaliveDrop", KSTAT_DATA_UINT32, 0 }, 26687 { "listenDrop", KSTAT_DATA_UINT32, 0 }, 26688 { "listenDropQ0", KSTAT_DATA_UINT32, 0 }, 26689 { "halfOpenDrop", KSTAT_DATA_UINT32, 0 }, 26690 { "outSackRetransSegs", KSTAT_DATA_UINT32, 0 }, 26691 { "connTableSize6", KSTAT_DATA_INT32, 0 } 26692 }; 26693 26694 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, TCP_MOD_NAME, "mib2", 26695 KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0, stackid); 26696 26697 if (ksp == NULL) 26698 return (NULL); 26699 26700 template.rtoAlgorithm.value.ui32 = 4; 26701 template.rtoMin.value.ui32 = tcps->tcps_rexmit_interval_min; 26702 template.rtoMax.value.ui32 = tcps->tcps_rexmit_interval_max; 26703 template.maxConn.value.i32 = -1; 26704 26705 bcopy(&template, ksp->ks_data, sizeof (template)); 26706 ksp->ks_update = tcp_kstat_update; 26707 ksp->ks_private = (void *)(uintptr_t)stackid; 26708 26709 kstat_install(ksp); 26710 return (ksp); 26711 } 26712 26713 static void 26714 tcp_kstat_fini(netstackid_t stackid, kstat_t *ksp) 26715 { 26716 if (ksp != NULL) { 26717 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 26718 kstat_delete_netstack(ksp, stackid); 26719 } 26720 } 26721 26722 static int 26723 tcp_kstat_update(kstat_t *kp, int rw) 26724 { 26725 tcp_named_kstat_t *tcpkp; 26726 tcp_t *tcp; 26727 connf_t *connfp; 26728 conn_t *connp; 26729 int i; 26730 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 26731 netstack_t *ns; 26732 tcp_stack_t *tcps; 26733 ip_stack_t *ipst; 26734 26735 if ((kp == NULL) || (kp->ks_data == NULL)) 26736 return (EIO); 26737 26738 if (rw == KSTAT_WRITE) 26739 return (EACCES); 26740 26741 ns = netstack_find_by_stackid(stackid); 26742 if (ns == NULL) 26743 return (-1); 26744 tcps = ns->netstack_tcp; 26745 if (tcps == NULL) { 26746 netstack_rele(ns); 26747 return (-1); 26748 } 26749 tcpkp = (tcp_named_kstat_t *)kp->ks_data; 26750 26751 tcpkp->currEstab.value.ui32 = 0; 26752 26753 ipst = ns->netstack_ip; 26754 26755 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 26756 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 26757 connp = NULL; 26758 while ((connp = 26759 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 26760 tcp = connp->conn_tcp; 26761 switch (tcp_snmp_state(tcp)) { 26762 case MIB2_TCP_established: 26763 case MIB2_TCP_closeWait: 26764 tcpkp->currEstab.value.ui32++; 26765 break; 26766 } 26767 } 26768 } 26769 26770 tcpkp->activeOpens.value.ui32 = tcps->tcps_mib.tcpActiveOpens; 26771 tcpkp->passiveOpens.value.ui32 = tcps->tcps_mib.tcpPassiveOpens; 26772 tcpkp->attemptFails.value.ui32 = tcps->tcps_mib.tcpAttemptFails; 26773 tcpkp->estabResets.value.ui32 = tcps->tcps_mib.tcpEstabResets; 26774 tcpkp->inSegs.value.ui64 = tcps->tcps_mib.tcpHCInSegs; 26775 tcpkp->outSegs.value.ui64 = tcps->tcps_mib.tcpHCOutSegs; 26776 tcpkp->retransSegs.value.ui32 = tcps->tcps_mib.tcpRetransSegs; 26777 tcpkp->connTableSize.value.i32 = tcps->tcps_mib.tcpConnTableSize; 26778 tcpkp->outRsts.value.ui32 = tcps->tcps_mib.tcpOutRsts; 26779 tcpkp->outDataSegs.value.ui32 = tcps->tcps_mib.tcpOutDataSegs; 26780 tcpkp->outDataBytes.value.ui32 = tcps->tcps_mib.tcpOutDataBytes; 26781 tcpkp->retransBytes.value.ui32 = tcps->tcps_mib.tcpRetransBytes; 26782 tcpkp->outAck.value.ui32 = tcps->tcps_mib.tcpOutAck; 26783 tcpkp->outAckDelayed.value.ui32 = tcps->tcps_mib.tcpOutAckDelayed; 26784 tcpkp->outUrg.value.ui32 = tcps->tcps_mib.tcpOutUrg; 26785 tcpkp->outWinUpdate.value.ui32 = tcps->tcps_mib.tcpOutWinUpdate; 26786 tcpkp->outWinProbe.value.ui32 = tcps->tcps_mib.tcpOutWinProbe; 26787 tcpkp->outControl.value.ui32 = tcps->tcps_mib.tcpOutControl; 26788 tcpkp->outFastRetrans.value.ui32 = tcps->tcps_mib.tcpOutFastRetrans; 26789 tcpkp->inAckSegs.value.ui32 = tcps->tcps_mib.tcpInAckSegs; 26790 tcpkp->inAckBytes.value.ui32 = tcps->tcps_mib.tcpInAckBytes; 26791 tcpkp->inDupAck.value.ui32 = tcps->tcps_mib.tcpInDupAck; 26792 tcpkp->inAckUnsent.value.ui32 = tcps->tcps_mib.tcpInAckUnsent; 26793 tcpkp->inDataInorderSegs.value.ui32 = 26794 tcps->tcps_mib.tcpInDataInorderSegs; 26795 tcpkp->inDataInorderBytes.value.ui32 = 26796 tcps->tcps_mib.tcpInDataInorderBytes; 26797 tcpkp->inDataUnorderSegs.value.ui32 = 26798 tcps->tcps_mib.tcpInDataUnorderSegs; 26799 tcpkp->inDataUnorderBytes.value.ui32 = 26800 tcps->tcps_mib.tcpInDataUnorderBytes; 26801 tcpkp->inDataDupSegs.value.ui32 = tcps->tcps_mib.tcpInDataDupSegs; 26802 tcpkp->inDataDupBytes.value.ui32 = tcps->tcps_mib.tcpInDataDupBytes; 26803 tcpkp->inDataPartDupSegs.value.ui32 = 26804 tcps->tcps_mib.tcpInDataPartDupSegs; 26805 tcpkp->inDataPartDupBytes.value.ui32 = 26806 tcps->tcps_mib.tcpInDataPartDupBytes; 26807 tcpkp->inDataPastWinSegs.value.ui32 = 26808 tcps->tcps_mib.tcpInDataPastWinSegs; 26809 tcpkp->inDataPastWinBytes.value.ui32 = 26810 tcps->tcps_mib.tcpInDataPastWinBytes; 26811 tcpkp->inWinProbe.value.ui32 = tcps->tcps_mib.tcpInWinProbe; 26812 tcpkp->inWinUpdate.value.ui32 = tcps->tcps_mib.tcpInWinUpdate; 26813 tcpkp->inClosed.value.ui32 = tcps->tcps_mib.tcpInClosed; 26814 tcpkp->rttNoUpdate.value.ui32 = tcps->tcps_mib.tcpRttNoUpdate; 26815 tcpkp->rttUpdate.value.ui32 = tcps->tcps_mib.tcpRttUpdate; 26816 tcpkp->timRetrans.value.ui32 = tcps->tcps_mib.tcpTimRetrans; 26817 tcpkp->timRetransDrop.value.ui32 = tcps->tcps_mib.tcpTimRetransDrop; 26818 tcpkp->timKeepalive.value.ui32 = tcps->tcps_mib.tcpTimKeepalive; 26819 tcpkp->timKeepaliveProbe.value.ui32 = 26820 tcps->tcps_mib.tcpTimKeepaliveProbe; 26821 tcpkp->timKeepaliveDrop.value.ui32 = 26822 tcps->tcps_mib.tcpTimKeepaliveDrop; 26823 tcpkp->listenDrop.value.ui32 = tcps->tcps_mib.tcpListenDrop; 26824 tcpkp->listenDropQ0.value.ui32 = tcps->tcps_mib.tcpListenDropQ0; 26825 tcpkp->halfOpenDrop.value.ui32 = tcps->tcps_mib.tcpHalfOpenDrop; 26826 tcpkp->outSackRetransSegs.value.ui32 = 26827 tcps->tcps_mib.tcpOutSackRetransSegs; 26828 tcpkp->connTableSize6.value.i32 = tcps->tcps_mib.tcp6ConnTableSize; 26829 26830 netstack_rele(ns); 26831 return (0); 26832 } 26833 26834 void 26835 tcp_reinput(conn_t *connp, mblk_t *mp, squeue_t *sqp) 26836 { 26837 uint16_t hdr_len; 26838 ipha_t *ipha; 26839 uint8_t *nexthdrp; 26840 tcph_t *tcph; 26841 tcp_stack_t *tcps = connp->conn_tcp->tcp_tcps; 26842 26843 /* Already has an eager */ 26844 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 26845 TCP_STAT(tcps, tcp_reinput_syn); 26846 squeue_enter(connp->conn_sqp, mp, connp->conn_recv, 26847 connp, SQTAG_TCP_REINPUT_EAGER); 26848 return; 26849 } 26850 26851 switch (IPH_HDR_VERSION(mp->b_rptr)) { 26852 case IPV4_VERSION: 26853 ipha = (ipha_t *)mp->b_rptr; 26854 hdr_len = IPH_HDR_LENGTH(ipha); 26855 break; 26856 case IPV6_VERSION: 26857 if (!ip_hdr_length_nexthdr_v6(mp, (ip6_t *)mp->b_rptr, 26858 &hdr_len, &nexthdrp)) { 26859 CONN_DEC_REF(connp); 26860 freemsg(mp); 26861 return; 26862 } 26863 break; 26864 } 26865 26866 tcph = (tcph_t *)&mp->b_rptr[hdr_len]; 26867 if ((tcph->th_flags[0] & (TH_SYN|TH_ACK|TH_RST|TH_URG)) == TH_SYN) { 26868 mp->b_datap->db_struioflag |= STRUIO_EAGER; 26869 DB_CKSUMSTART(mp) = (intptr_t)sqp; 26870 } 26871 26872 squeue_fill(connp->conn_sqp, mp, connp->conn_recv, connp, 26873 SQTAG_TCP_REINPUT); 26874 } 26875 26876 static squeue_func_t 26877 tcp_squeue_switch(int val) 26878 { 26879 squeue_func_t rval = squeue_fill; 26880 26881 switch (val) { 26882 case 1: 26883 rval = squeue_enter_nodrain; 26884 break; 26885 case 2: 26886 rval = squeue_enter; 26887 break; 26888 default: 26889 break; 26890 } 26891 return (rval); 26892 } 26893 26894 /* 26895 * This is called once for each squeue - globally for all stack 26896 * instances. 26897 */ 26898 static void 26899 tcp_squeue_add(squeue_t *sqp) 26900 { 26901 tcp_squeue_priv_t *tcp_time_wait = kmem_zalloc( 26902 sizeof (tcp_squeue_priv_t), KM_SLEEP); 26903 26904 *squeue_getprivate(sqp, SQPRIVATE_TCP) = (intptr_t)tcp_time_wait; 26905 tcp_time_wait->tcp_time_wait_tid = timeout(tcp_time_wait_collector, 26906 sqp, TCP_TIME_WAIT_DELAY); 26907 if (tcp_free_list_max_cnt == 0) { 26908 int tcp_ncpus = ((boot_max_ncpus == -1) ? 26909 max_ncpus : boot_max_ncpus); 26910 26911 /* 26912 * Limit number of entries to 1% of availble memory / tcp_ncpus 26913 */ 26914 tcp_free_list_max_cnt = (freemem * PAGESIZE) / 26915 (tcp_ncpus * sizeof (tcp_t) * 100); 26916 } 26917 tcp_time_wait->tcp_free_list_cnt = 0; 26918 } 26919