1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 /* Copyright (c) 1990 Mentat Inc. */ 27 28 #pragma ident "%Z%%M% %I% %E% SMI" 29 const char tcp_version[] = "%Z%%M% %I% %E% SMI"; 30 31 32 #include <sys/types.h> 33 #include <sys/stream.h> 34 #include <sys/strsun.h> 35 #include <sys/strsubr.h> 36 #include <sys/stropts.h> 37 #include <sys/strlog.h> 38 #include <sys/strsun.h> 39 #define _SUN_TPI_VERSION 2 40 #include <sys/tihdr.h> 41 #include <sys/timod.h> 42 #include <sys/ddi.h> 43 #include <sys/sunddi.h> 44 #include <sys/suntpi.h> 45 #include <sys/xti_inet.h> 46 #include <sys/cmn_err.h> 47 #include <sys/debug.h> 48 #include <sys/sdt.h> 49 #include <sys/vtrace.h> 50 #include <sys/kmem.h> 51 #include <sys/ethernet.h> 52 #include <sys/cpuvar.h> 53 #include <sys/dlpi.h> 54 #include <sys/multidata.h> 55 #include <sys/multidata_impl.h> 56 #include <sys/pattr.h> 57 #include <sys/policy.h> 58 #include <sys/priv.h> 59 #include <sys/zone.h> 60 #include <sys/sunldi.h> 61 62 #include <sys/errno.h> 63 #include <sys/signal.h> 64 #include <sys/socket.h> 65 #include <sys/sockio.h> 66 #include <sys/isa_defs.h> 67 #include <sys/md5.h> 68 #include <sys/random.h> 69 #include <netinet/in.h> 70 #include <netinet/tcp.h> 71 #include <netinet/ip6.h> 72 #include <netinet/icmp6.h> 73 #include <net/if.h> 74 #include <net/route.h> 75 #include <inet/ipsec_impl.h> 76 77 #include <inet/common.h> 78 #include <inet/ip.h> 79 #include <inet/ip_impl.h> 80 #include <inet/ip6.h> 81 #include <inet/ip_ndp.h> 82 #include <inet/mi.h> 83 #include <inet/mib2.h> 84 #include <inet/nd.h> 85 #include <inet/optcom.h> 86 #include <inet/snmpcom.h> 87 #include <inet/kstatcom.h> 88 #include <inet/tcp.h> 89 #include <inet/tcp_impl.h> 90 #include <net/pfkeyv2.h> 91 #include <inet/ipsec_info.h> 92 #include <inet/ipdrop.h> 93 #include <inet/tcp_trace.h> 94 95 #include <inet/ipclassifier.h> 96 #include <inet/ip_ire.h> 97 #include <inet/ip_ftable.h> 98 #include <inet/ip_if.h> 99 #include <inet/ipp_common.h> 100 #include <inet/ip_netinfo.h> 101 #include <sys/squeue.h> 102 #include <inet/kssl/ksslapi.h> 103 #include <sys/tsol/label.h> 104 #include <sys/tsol/tnet.h> 105 #include <rpc/pmap_prot.h> 106 107 /* 108 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433) 109 * 110 * (Read the detailed design doc in PSARC case directory) 111 * 112 * The entire tcp state is contained in tcp_t and conn_t structure 113 * which are allocated in tandem using ipcl_conn_create() and passing 114 * IPCL_CONNTCP as a flag. We use 'conn_ref' and 'conn_lock' to protect 115 * the references on the tcp_t. The tcp_t structure is never compressed 116 * and packets always land on the correct TCP perimeter from the time 117 * eager is created till the time tcp_t dies (as such the old mentat 118 * TCP global queue is not used for detached state and no IPSEC checking 119 * is required). The global queue is still allocated to send out resets 120 * for connection which have no listeners and IP directly calls 121 * tcp_xmit_listeners_reset() which does any policy check. 122 * 123 * Protection and Synchronisation mechanism: 124 * 125 * The tcp data structure does not use any kind of lock for protecting 126 * its state but instead uses 'squeues' for mutual exclusion from various 127 * read and write side threads. To access a tcp member, the thread should 128 * always be behind squeue (via squeue_enter, squeue_enter_nodrain, or 129 * squeue_fill). Since the squeues allow a direct function call, caller 130 * can pass any tcp function having prototype of edesc_t as argument 131 * (different from traditional STREAMs model where packets come in only 132 * designated entry points). The list of functions that can be directly 133 * called via squeue are listed before the usual function prototype. 134 * 135 * Referencing: 136 * 137 * TCP is MT-Hot and we use a reference based scheme to make sure that the 138 * tcp structure doesn't disappear when its needed. When the application 139 * creates an outgoing connection or accepts an incoming connection, we 140 * start out with 2 references on 'conn_ref'. One for TCP and one for IP. 141 * The IP reference is just a symbolic reference since ip_tcpclose() 142 * looks at tcp structure after tcp_close_output() returns which could 143 * have dropped the last TCP reference. So as long as the connection is 144 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the 145 * conn_t. The classifier puts its own reference when the connection is 146 * inserted in listen or connected hash. Anytime a thread needs to enter 147 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr 148 * on write side or by doing a classify on read side and then puts a 149 * reference on the conn before doing squeue_enter/tryenter/fill. For 150 * read side, the classifier itself puts the reference under fanout lock 151 * to make sure that tcp can't disappear before it gets processed. The 152 * squeue will drop this reference automatically so the called function 153 * doesn't have to do a DEC_REF. 154 * 155 * Opening a new connection: 156 * 157 * The outgoing connection open is pretty simple. tcp_open() does the 158 * work in creating the conn/tcp structure and initializing it. The 159 * squeue assignment is done based on the CPU the application 160 * is running on. So for outbound connections, processing is always done 161 * on application CPU which might be different from the incoming CPU 162 * being interrupted by the NIC. An optimal way would be to figure out 163 * the NIC <-> CPU binding at listen time, and assign the outgoing 164 * connection to the squeue attached to the CPU that will be interrupted 165 * for incoming packets (we know the NIC based on the bind IP address). 166 * This might seem like a problem if more data is going out but the 167 * fact is that in most cases the transmit is ACK driven transmit where 168 * the outgoing data normally sits on TCP's xmit queue waiting to be 169 * transmitted. 170 * 171 * Accepting a connection: 172 * 173 * This is a more interesting case because of various races involved in 174 * establishing a eager in its own perimeter. Read the meta comment on 175 * top of tcp_conn_request(). But briefly, the squeue is picked by 176 * ip_tcp_input()/ip_fanout_tcp_v6() based on the interrupted CPU. 177 * 178 * Closing a connection: 179 * 180 * The close is fairly straight forward. tcp_close() calls tcp_close_output() 181 * via squeue to do the close and mark the tcp as detached if the connection 182 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its 183 * reference but tcp_close() drop IP's reference always. So if tcp was 184 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP 185 * and 1 because it is in classifier's connected hash. This is the condition 186 * we use to determine that its OK to clean up the tcp outside of squeue 187 * when time wait expires (check the ref under fanout and conn_lock and 188 * if it is 2, remove it from fanout hash and kill it). 189 * 190 * Although close just drops the necessary references and marks the 191 * tcp_detached state, tcp_close needs to know the tcp_detached has been 192 * set (under squeue) before letting the STREAM go away (because a 193 * inbound packet might attempt to go up the STREAM while the close 194 * has happened and tcp_detached is not set). So a special lock and 195 * flag is used along with a condition variable (tcp_closelock, tcp_closed, 196 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked 197 * tcp_detached. 198 * 199 * Special provisions and fast paths: 200 * 201 * We make special provision for (AF_INET, SOCK_STREAM) sockets which 202 * can't have 'ipv6_recvpktinfo' set and for these type of sockets, IP 203 * will never send a M_CTL to TCP. As such, ip_tcp_input() which handles 204 * all TCP packets from the wire makes a IPCL_IS_TCP4_CONNECTED_NO_POLICY 205 * check to send packets directly to tcp_rput_data via squeue. Everyone 206 * else comes through tcp_input() on the read side. 207 * 208 * We also make special provisions for sockfs by marking tcp_issocket 209 * whenever we have only sockfs on top of TCP. This allows us to skip 210 * putting the tcp in acceptor hash since a sockfs listener can never 211 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM 212 * since eager has already been allocated and the accept now happens 213 * on acceptor STREAM. There is a big blob of comment on top of 214 * tcp_conn_request explaining the new accept. When socket is POP'd, 215 * sockfs sends us an ioctl to mark the fact and we go back to old 216 * behaviour. Once tcp_issocket is unset, its never set for the 217 * life of that connection. 218 * 219 * IPsec notes : 220 * 221 * Since a packet is always executed on the correct TCP perimeter 222 * all IPsec processing is defered to IP including checking new 223 * connections and setting IPSEC policies for new connection. The 224 * only exception is tcp_xmit_listeners_reset() which is called 225 * directly from IP and needs to policy check to see if TH_RST 226 * can be sent out. 227 * 228 * PFHooks notes : 229 * 230 * For mdt case, one meta buffer contains multiple packets. Mblks for every 231 * packet are assembled and passed to the hooks. When packets are blocked, 232 * or boundary of any packet is changed, the mdt processing is stopped, and 233 * packets of the meta buffer are send to the IP path one by one. 234 */ 235 236 /* 237 * Values for squeue switch: 238 * 1: squeue_enter_nodrain 239 * 2: squeue_enter 240 * 3: squeue_fill 241 */ 242 int tcp_squeue_close = 2; /* Setable in /etc/system */ 243 int tcp_squeue_wput = 2; 244 245 squeue_func_t tcp_squeue_close_proc; 246 squeue_func_t tcp_squeue_wput_proc; 247 248 /* 249 * This controls how tiny a write must be before we try to copy it 250 * into the the mblk on the tail of the transmit queue. Not much 251 * speedup is observed for values larger than sixteen. Zero will 252 * disable the optimisation. 253 */ 254 int tcp_tx_pull_len = 16; 255 256 /* 257 * TCP Statistics. 258 * 259 * How TCP statistics work. 260 * 261 * There are two types of statistics invoked by two macros. 262 * 263 * TCP_STAT(name) does non-atomic increment of a named stat counter. It is 264 * supposed to be used in non MT-hot paths of the code. 265 * 266 * TCP_DBGSTAT(name) does atomic increment of a named stat counter. It is 267 * supposed to be used for DEBUG purposes and may be used on a hot path. 268 * 269 * Both TCP_STAT and TCP_DBGSTAT counters are available using kstat 270 * (use "kstat tcp" to get them). 271 * 272 * There is also additional debugging facility that marks tcp_clean_death() 273 * instances and saves them in tcp_t structure. It is triggered by 274 * TCP_TAG_CLEAN_DEATH define. Also, there is a global array of counters for 275 * tcp_clean_death() calls that counts the number of times each tag was hit. It 276 * is triggered by TCP_CLD_COUNTERS define. 277 * 278 * How to add new counters. 279 * 280 * 1) Add a field in the tcp_stat structure describing your counter. 281 * 2) Add a line in the template in tcp_kstat2_init() with the name 282 * of the counter. 283 * 284 * IMPORTANT!! - make sure that both are in sync !! 285 * 3) Use either TCP_STAT or TCP_DBGSTAT with the name. 286 * 287 * Please avoid using private counters which are not kstat-exported. 288 * 289 * TCP_TAG_CLEAN_DEATH set to 1 enables tagging of tcp_clean_death() instances 290 * in tcp_t structure. 291 * 292 * TCP_MAX_CLEAN_DEATH_TAG is the maximum number of possible clean death tags. 293 */ 294 295 #ifndef TCP_DEBUG_COUNTER 296 #ifdef DEBUG 297 #define TCP_DEBUG_COUNTER 1 298 #else 299 #define TCP_DEBUG_COUNTER 0 300 #endif 301 #endif 302 303 #define TCP_CLD_COUNTERS 0 304 305 #define TCP_TAG_CLEAN_DEATH 1 306 #define TCP_MAX_CLEAN_DEATH_TAG 32 307 308 #ifdef lint 309 static int _lint_dummy_; 310 #endif 311 312 #if TCP_CLD_COUNTERS 313 static uint_t tcp_clean_death_stat[TCP_MAX_CLEAN_DEATH_TAG]; 314 #define TCP_CLD_STAT(x) tcp_clean_death_stat[x]++ 315 #elif defined(lint) 316 #define TCP_CLD_STAT(x) ASSERT(_lint_dummy_ == 0); 317 #else 318 #define TCP_CLD_STAT(x) 319 #endif 320 321 #if TCP_DEBUG_COUNTER 322 #define TCP_DBGSTAT(tcps, x) \ 323 atomic_add_64(&((tcps)->tcps_statistics.x.value.ui64), 1) 324 #define TCP_G_DBGSTAT(x) \ 325 atomic_add_64(&(tcp_g_statistics.x.value.ui64), 1) 326 #elif defined(lint) 327 #define TCP_DBGSTAT(tcps, x) ASSERT(_lint_dummy_ == 0); 328 #define TCP_G_DBGSTAT(x) ASSERT(_lint_dummy_ == 0); 329 #else 330 #define TCP_DBGSTAT(tcps, x) 331 #define TCP_G_DBGSTAT(x) 332 #endif 333 334 #define TCP_G_STAT(x) (tcp_g_statistics.x.value.ui64++) 335 336 tcp_g_stat_t tcp_g_statistics; 337 kstat_t *tcp_g_kstat; 338 339 /* 340 * Call either ip_output or ip_output_v6. This replaces putnext() calls on the 341 * tcp write side. 342 */ 343 #define CALL_IP_WPUT(connp, q, mp) { \ 344 tcp_stack_t *tcps; \ 345 \ 346 tcps = connp->conn_netstack->netstack_tcp; \ 347 ASSERT(((q)->q_flag & QREADR) == 0); \ 348 TCP_DBGSTAT(tcps, tcp_ip_output); \ 349 connp->conn_send(connp, (mp), (q), IP_WPUT); \ 350 } 351 352 /* Macros for timestamp comparisons */ 353 #define TSTMP_GEQ(a, b) ((int32_t)((a)-(b)) >= 0) 354 #define TSTMP_LT(a, b) ((int32_t)((a)-(b)) < 0) 355 356 /* 357 * Parameters for TCP Initial Send Sequence number (ISS) generation. When 358 * tcp_strong_iss is set to 1, which is the default, the ISS is calculated 359 * by adding three components: a time component which grows by 1 every 4096 360 * nanoseconds (versus every 4 microseconds suggested by RFC 793, page 27); 361 * a per-connection component which grows by 125000 for every new connection; 362 * and an "extra" component that grows by a random amount centered 363 * approximately on 64000. This causes the the ISS generator to cycle every 364 * 4.89 hours if no TCP connections are made, and faster if connections are 365 * made. 366 * 367 * When tcp_strong_iss is set to 0, ISS is calculated by adding two 368 * components: a time component which grows by 250000 every second; and 369 * a per-connection component which grows by 125000 for every new connections. 370 * 371 * A third method, when tcp_strong_iss is set to 2, for generating ISS is 372 * prescribed by Steve Bellovin. This involves adding time, the 125000 per 373 * connection, and a one-way hash (MD5) of the connection ID <sport, dport, 374 * src, dst>, a "truly" random (per RFC 1750) number, and a console-entered 375 * password. 376 */ 377 #define ISS_INCR 250000 378 #define ISS_NSEC_SHT 12 379 380 static sin_t sin_null; /* Zero address for quick clears */ 381 static sin6_t sin6_null; /* Zero address for quick clears */ 382 383 /* 384 * This implementation follows the 4.3BSD interpretation of the urgent 385 * pointer and not RFC 1122. Switching to RFC 1122 behavior would cause 386 * incompatible changes in protocols like telnet and rlogin. 387 */ 388 #define TCP_OLD_URP_INTERPRETATION 1 389 390 #define TCP_IS_DETACHED_NONEAGER(tcp) \ 391 (TCP_IS_DETACHED(tcp) && \ 392 (!(tcp)->tcp_hard_binding)) 393 394 /* 395 * TCP reassembly macros. We hide starting and ending sequence numbers in 396 * b_next and b_prev of messages on the reassembly queue. The messages are 397 * chained using b_cont. These macros are used in tcp_reass() so we don't 398 * have to see the ugly casts and assignments. 399 */ 400 #define TCP_REASS_SEQ(mp) ((uint32_t)(uintptr_t)((mp)->b_next)) 401 #define TCP_REASS_SET_SEQ(mp, u) ((mp)->b_next = \ 402 (mblk_t *)(uintptr_t)(u)) 403 #define TCP_REASS_END(mp) ((uint32_t)(uintptr_t)((mp)->b_prev)) 404 #define TCP_REASS_SET_END(mp, u) ((mp)->b_prev = \ 405 (mblk_t *)(uintptr_t)(u)) 406 407 /* 408 * Implementation of TCP Timers. 409 * ============================= 410 * 411 * INTERFACE: 412 * 413 * There are two basic functions dealing with tcp timers: 414 * 415 * timeout_id_t tcp_timeout(connp, func, time) 416 * clock_t tcp_timeout_cancel(connp, timeout_id) 417 * TCP_TIMER_RESTART(tcp, intvl) 418 * 419 * tcp_timeout() starts a timer for the 'tcp' instance arranging to call 'func' 420 * after 'time' ticks passed. The function called by timeout() must adhere to 421 * the same restrictions as a driver soft interrupt handler - it must not sleep 422 * or call other functions that might sleep. The value returned is the opaque 423 * non-zero timeout identifier that can be passed to tcp_timeout_cancel() to 424 * cancel the request. The call to tcp_timeout() may fail in which case it 425 * returns zero. This is different from the timeout(9F) function which never 426 * fails. 427 * 428 * The call-back function 'func' always receives 'connp' as its single 429 * argument. It is always executed in the squeue corresponding to the tcp 430 * structure. The tcp structure is guaranteed to be present at the time the 431 * call-back is called. 432 * 433 * NOTE: The call-back function 'func' is never called if tcp is in 434 * the TCPS_CLOSED state. 435 * 436 * tcp_timeout_cancel() attempts to cancel a pending tcp_timeout() 437 * request. locks acquired by the call-back routine should not be held across 438 * the call to tcp_timeout_cancel() or a deadlock may result. 439 * 440 * tcp_timeout_cancel() returns -1 if it can not cancel the timeout request. 441 * Otherwise, it returns an integer value greater than or equal to 0. In 442 * particular, if the call-back function is already placed on the squeue, it can 443 * not be canceled. 444 * 445 * NOTE: both tcp_timeout() and tcp_timeout_cancel() should always be called 446 * within squeue context corresponding to the tcp instance. Since the 447 * call-back is also called via the same squeue, there are no race 448 * conditions described in untimeout(9F) manual page since all calls are 449 * strictly serialized. 450 * 451 * TCP_TIMER_RESTART() is a macro that attempts to cancel a pending timeout 452 * stored in tcp_timer_tid and starts a new one using 453 * MSEC_TO_TICK(intvl). It always uses tcp_timer() function as a call-back 454 * and stores the return value of tcp_timeout() in the tcp->tcp_timer_tid 455 * field. 456 * 457 * NOTE: since the timeout cancellation is not guaranteed, the cancelled 458 * call-back may still be called, so it is possible tcp_timer() will be 459 * called several times. This should not be a problem since tcp_timer() 460 * should always check the tcp instance state. 461 * 462 * 463 * IMPLEMENTATION: 464 * 465 * TCP timers are implemented using three-stage process. The call to 466 * tcp_timeout() uses timeout(9F) function to call tcp_timer_callback() function 467 * when the timer expires. The tcp_timer_callback() arranges the call of the 468 * tcp_timer_handler() function via squeue corresponding to the tcp 469 * instance. The tcp_timer_handler() calls actual requested timeout call-back 470 * and passes tcp instance as an argument to it. Information is passed between 471 * stages using the tcp_timer_t structure which contains the connp pointer, the 472 * tcp call-back to call and the timeout id returned by the timeout(9F). 473 * 474 * The tcp_timer_t structure is not used directly, it is embedded in an mblk_t - 475 * like structure that is used to enter an squeue. The mp->b_rptr of this pseudo 476 * mblk points to the beginning of tcp_timer_t structure. The tcp_timeout() 477 * returns the pointer to this mblk. 478 * 479 * The pseudo mblk is allocated from a special tcp_timer_cache kmem cache. It 480 * looks like a normal mblk without actual dblk attached to it. 481 * 482 * To optimize performance each tcp instance holds a small cache of timer 483 * mblocks. In the current implementation it caches up to two timer mblocks per 484 * tcp instance. The cache is preserved over tcp frees and is only freed when 485 * the whole tcp structure is destroyed by its kmem destructor. Since all tcp 486 * timer processing happens on a corresponding squeue, the cache manipulation 487 * does not require any locks. Experiments show that majority of timer mblocks 488 * allocations are satisfied from the tcp cache and do not involve kmem calls. 489 * 490 * The tcp_timeout() places a refhold on the connp instance which guarantees 491 * that it will be present at the time the call-back function fires. The 492 * tcp_timer_handler() drops the reference after calling the call-back, so the 493 * call-back function does not need to manipulate the references explicitly. 494 */ 495 496 typedef struct tcp_timer_s { 497 conn_t *connp; 498 void (*tcpt_proc)(void *); 499 timeout_id_t tcpt_tid; 500 } tcp_timer_t; 501 502 static kmem_cache_t *tcp_timercache; 503 kmem_cache_t *tcp_sack_info_cache; 504 kmem_cache_t *tcp_iphc_cache; 505 506 /* 507 * For scalability, we must not run a timer for every TCP connection 508 * in TIME_WAIT state. To see why, consider (for time wait interval of 509 * 4 minutes): 510 * 1000 connections/sec * 240 seconds/time wait = 240,000 active conn's 511 * 512 * This list is ordered by time, so you need only delete from the head 513 * until you get to entries which aren't old enough to delete yet. 514 * The list consists of only the detached TIME_WAIT connections. 515 * 516 * Note that the timer (tcp_time_wait_expire) is started when the tcp_t 517 * becomes detached TIME_WAIT (either by changing the state and already 518 * being detached or the other way around). This means that the TIME_WAIT 519 * state can be extended (up to doubled) if the connection doesn't become 520 * detached for a long time. 521 * 522 * The list manipulations (including tcp_time_wait_next/prev) 523 * are protected by the tcp_time_wait_lock. The content of the 524 * detached TIME_WAIT connections is protected by the normal perimeters. 525 * 526 * This list is per squeue and squeues are shared across the tcp_stack_t's. 527 * Things on tcp_time_wait_head remain associated with the tcp_stack_t 528 * and conn_netstack. 529 * The tcp_t's that are added to tcp_free_list are disassociated and 530 * have NULL tcp_tcps and conn_netstack pointers. 531 */ 532 typedef struct tcp_squeue_priv_s { 533 kmutex_t tcp_time_wait_lock; 534 timeout_id_t tcp_time_wait_tid; 535 tcp_t *tcp_time_wait_head; 536 tcp_t *tcp_time_wait_tail; 537 tcp_t *tcp_free_list; 538 uint_t tcp_free_list_cnt; 539 } tcp_squeue_priv_t; 540 541 /* 542 * TCP_TIME_WAIT_DELAY governs how often the time_wait_collector runs. 543 * Running it every 5 seconds seems to give the best results. 544 */ 545 #define TCP_TIME_WAIT_DELAY drv_usectohz(5000000) 546 547 /* 548 * To prevent memory hog, limit the number of entries in tcp_free_list 549 * to 1% of available memory / number of cpus 550 */ 551 uint_t tcp_free_list_max_cnt = 0; 552 553 #define TCP_XMIT_LOWATER 4096 554 #define TCP_XMIT_HIWATER 49152 555 #define TCP_RECV_LOWATER 2048 556 #define TCP_RECV_HIWATER 49152 557 558 /* 559 * PAWS needs a timer for 24 days. This is the number of ticks in 24 days 560 */ 561 #define PAWS_TIMEOUT ((clock_t)(24*24*60*60*hz)) 562 563 #define TIDUSZ 4096 /* transport interface data unit size */ 564 565 /* 566 * Bind hash list size and has function. It has to be a power of 2 for 567 * hashing. 568 */ 569 #define TCP_BIND_FANOUT_SIZE 512 570 #define TCP_BIND_HASH(lport) (ntohs(lport) & (TCP_BIND_FANOUT_SIZE - 1)) 571 /* 572 * Size of listen and acceptor hash list. It has to be a power of 2 for 573 * hashing. 574 */ 575 #define TCP_FANOUT_SIZE 256 576 577 #ifdef _ILP32 578 #define TCP_ACCEPTOR_HASH(accid) \ 579 (((uint_t)(accid) >> 8) & (TCP_FANOUT_SIZE - 1)) 580 #else 581 #define TCP_ACCEPTOR_HASH(accid) \ 582 ((uint_t)(accid) & (TCP_FANOUT_SIZE - 1)) 583 #endif /* _ILP32 */ 584 585 #define IP_ADDR_CACHE_SIZE 2048 586 #define IP_ADDR_CACHE_HASH(faddr) \ 587 (ntohl(faddr) & (IP_ADDR_CACHE_SIZE -1)) 588 589 /* Hash for HSPs uses all 32 bits, since both networks and hosts are in table */ 590 #define TCP_HSP_HASH_SIZE 256 591 592 #define TCP_HSP_HASH(addr) \ 593 (((addr>>24) ^ (addr >>16) ^ \ 594 (addr>>8) ^ (addr)) % TCP_HSP_HASH_SIZE) 595 596 /* 597 * TCP options struct returned from tcp_parse_options. 598 */ 599 typedef struct tcp_opt_s { 600 uint32_t tcp_opt_mss; 601 uint32_t tcp_opt_wscale; 602 uint32_t tcp_opt_ts_val; 603 uint32_t tcp_opt_ts_ecr; 604 tcp_t *tcp; 605 } tcp_opt_t; 606 607 /* 608 * RFC1323-recommended phrasing of TSTAMP option, for easier parsing 609 */ 610 611 #ifdef _BIG_ENDIAN 612 #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 613 (TCPOPT_TSTAMP << 8) | 10) 614 #else 615 #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \ 616 (TCPOPT_NOP << 8) | TCPOPT_NOP) 617 #endif 618 619 /* 620 * Flags returned from tcp_parse_options. 621 */ 622 #define TCP_OPT_MSS_PRESENT 1 623 #define TCP_OPT_WSCALE_PRESENT 2 624 #define TCP_OPT_TSTAMP_PRESENT 4 625 #define TCP_OPT_SACK_OK_PRESENT 8 626 #define TCP_OPT_SACK_PRESENT 16 627 628 /* TCP option length */ 629 #define TCPOPT_NOP_LEN 1 630 #define TCPOPT_MAXSEG_LEN 4 631 #define TCPOPT_WS_LEN 3 632 #define TCPOPT_REAL_WS_LEN (TCPOPT_WS_LEN+1) 633 #define TCPOPT_TSTAMP_LEN 10 634 #define TCPOPT_REAL_TS_LEN (TCPOPT_TSTAMP_LEN+2) 635 #define TCPOPT_SACK_OK_LEN 2 636 #define TCPOPT_REAL_SACK_OK_LEN (TCPOPT_SACK_OK_LEN+2) 637 #define TCPOPT_REAL_SACK_LEN 4 638 #define TCPOPT_MAX_SACK_LEN 36 639 #define TCPOPT_HEADER_LEN 2 640 641 /* TCP cwnd burst factor. */ 642 #define TCP_CWND_INFINITE 65535 643 #define TCP_CWND_SS 3 644 #define TCP_CWND_NORMAL 5 645 646 /* Maximum TCP initial cwin (start/restart). */ 647 #define TCP_MAX_INIT_CWND 8 648 649 /* 650 * Initialize cwnd according to RFC 3390. def_max_init_cwnd is 651 * either tcp_slow_start_initial or tcp_slow_start_after idle 652 * depending on the caller. If the upper layer has not used the 653 * TCP_INIT_CWND option to change the initial cwnd, tcp_init_cwnd 654 * should be 0 and we use the formula in RFC 3390 to set tcp_cwnd. 655 * If the upper layer has changed set the tcp_init_cwnd, just use 656 * it to calculate the tcp_cwnd. 657 */ 658 #define SET_TCP_INIT_CWND(tcp, mss, def_max_init_cwnd) \ 659 { \ 660 if ((tcp)->tcp_init_cwnd == 0) { \ 661 (tcp)->tcp_cwnd = MIN(def_max_init_cwnd * (mss), \ 662 MIN(4 * (mss), MAX(2 * (mss), 4380 / (mss) * (mss)))); \ 663 } else { \ 664 (tcp)->tcp_cwnd = (tcp)->tcp_init_cwnd * (mss); \ 665 } \ 666 tcp->tcp_cwnd_cnt = 0; \ 667 } 668 669 /* TCP Timer control structure */ 670 typedef struct tcpt_s { 671 pfv_t tcpt_pfv; /* The routine we are to call */ 672 tcp_t *tcpt_tcp; /* The parameter we are to pass in */ 673 } tcpt_t; 674 675 /* Host Specific Parameter structure */ 676 typedef struct tcp_hsp { 677 struct tcp_hsp *tcp_hsp_next; 678 in6_addr_t tcp_hsp_addr_v6; 679 in6_addr_t tcp_hsp_subnet_v6; 680 uint_t tcp_hsp_vers; /* IPV4_VERSION | IPV6_VERSION */ 681 int32_t tcp_hsp_sendspace; 682 int32_t tcp_hsp_recvspace; 683 int32_t tcp_hsp_tstamp; 684 } tcp_hsp_t; 685 #define tcp_hsp_addr V4_PART_OF_V6(tcp_hsp_addr_v6) 686 #define tcp_hsp_subnet V4_PART_OF_V6(tcp_hsp_subnet_v6) 687 688 /* 689 * Functions called directly via squeue having a prototype of edesc_t. 690 */ 691 void tcp_conn_request(void *arg, mblk_t *mp, void *arg2); 692 static void tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2); 693 void tcp_accept_finish(void *arg, mblk_t *mp, void *arg2); 694 static void tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2); 695 static void tcp_wput_proto(void *arg, mblk_t *mp, void *arg2); 696 void tcp_input(void *arg, mblk_t *mp, void *arg2); 697 void tcp_rput_data(void *arg, mblk_t *mp, void *arg2); 698 static void tcp_close_output(void *arg, mblk_t *mp, void *arg2); 699 void tcp_output(void *arg, mblk_t *mp, void *arg2); 700 static void tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2); 701 static void tcp_timer_handler(void *arg, mblk_t *mp, void *arg2); 702 static void tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2); 703 704 705 /* Prototype for TCP functions */ 706 static void tcp_random_init(void); 707 int tcp_random(void); 708 static void tcp_accept(tcp_t *tcp, mblk_t *mp); 709 static void tcp_accept_swap(tcp_t *listener, tcp_t *acceptor, 710 tcp_t *eager); 711 static int tcp_adapt_ire(tcp_t *tcp, mblk_t *ire_mp); 712 static in_port_t tcp_bindi(tcp_t *tcp, in_port_t port, const in6_addr_t *laddr, 713 int reuseaddr, boolean_t quick_connect, boolean_t bind_to_req_port_only, 714 boolean_t user_specified); 715 static void tcp_closei_local(tcp_t *tcp); 716 static void tcp_close_detached(tcp_t *tcp); 717 static boolean_t tcp_conn_con(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph, 718 mblk_t *idmp, mblk_t **defermp); 719 static void tcp_connect(tcp_t *tcp, mblk_t *mp); 720 static void tcp_connect_ipv4(tcp_t *tcp, mblk_t *mp, ipaddr_t *dstaddrp, 721 in_port_t dstport, uint_t srcid); 722 static void tcp_connect_ipv6(tcp_t *tcp, mblk_t *mp, in6_addr_t *dstaddrp, 723 in_port_t dstport, uint32_t flowinfo, uint_t srcid, 724 uint32_t scope_id); 725 static int tcp_clean_death(tcp_t *tcp, int err, uint8_t tag); 726 static void tcp_def_q_set(tcp_t *tcp, mblk_t *mp); 727 static void tcp_disconnect(tcp_t *tcp, mblk_t *mp); 728 static char *tcp_display(tcp_t *tcp, char *, char); 729 static boolean_t tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum); 730 static void tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only); 731 static void tcp_eager_unlink(tcp_t *tcp); 732 static void tcp_err_ack(tcp_t *tcp, mblk_t *mp, int tlierr, 733 int unixerr); 734 static void tcp_err_ack_prim(tcp_t *tcp, mblk_t *mp, int primitive, 735 int tlierr, int unixerr); 736 static int tcp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, 737 cred_t *cr); 738 static int tcp_extra_priv_ports_add(queue_t *q, mblk_t *mp, 739 char *value, caddr_t cp, cred_t *cr); 740 static int tcp_extra_priv_ports_del(queue_t *q, mblk_t *mp, 741 char *value, caddr_t cp, cred_t *cr); 742 static int tcp_tpistate(tcp_t *tcp); 743 static void tcp_bind_hash_insert(tf_t *tf, tcp_t *tcp, 744 int caller_holds_lock); 745 static void tcp_bind_hash_remove(tcp_t *tcp); 746 static tcp_t *tcp_acceptor_hash_lookup(t_uscalar_t id, tcp_stack_t *); 747 void tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp); 748 static void tcp_acceptor_hash_remove(tcp_t *tcp); 749 static void tcp_capability_req(tcp_t *tcp, mblk_t *mp); 750 static void tcp_info_req(tcp_t *tcp, mblk_t *mp); 751 static void tcp_addr_req(tcp_t *tcp, mblk_t *mp); 752 static void tcp_addr_req_ipv6(tcp_t *tcp, mblk_t *mp); 753 void tcp_g_q_setup(tcp_stack_t *); 754 void tcp_g_q_create(tcp_stack_t *); 755 void tcp_g_q_destroy(tcp_stack_t *); 756 static int tcp_header_init_ipv4(tcp_t *tcp); 757 static int tcp_header_init_ipv6(tcp_t *tcp); 758 int tcp_init(tcp_t *tcp, queue_t *q); 759 static int tcp_init_values(tcp_t *tcp); 760 static mblk_t *tcp_ip_advise_mblk(void *addr, int addr_len, ipic_t **ipic); 761 static mblk_t *tcp_ip_bind_mp(tcp_t *tcp, t_scalar_t bind_prim, 762 t_scalar_t addr_length); 763 static void tcp_ip_ire_mark_advice(tcp_t *tcp); 764 static void tcp_ip_notify(tcp_t *tcp); 765 static mblk_t *tcp_ire_mp(mblk_t *mp); 766 static void tcp_iss_init(tcp_t *tcp); 767 static void tcp_keepalive_killer(void *arg); 768 static int tcp_parse_options(tcph_t *tcph, tcp_opt_t *tcpopt); 769 static void tcp_mss_set(tcp_t *tcp, uint32_t size, boolean_t do_ss); 770 static int tcp_conprim_opt_process(tcp_t *tcp, mblk_t *mp, 771 int *do_disconnectp, int *t_errorp, int *sys_errorp); 772 static boolean_t tcp_allow_connopt_set(int level, int name); 773 int tcp_opt_default(queue_t *q, int level, int name, uchar_t *ptr); 774 int tcp_opt_get(queue_t *q, int level, int name, uchar_t *ptr); 775 int tcp_opt_set(queue_t *q, uint_t optset_context, int level, 776 int name, uint_t inlen, uchar_t *invalp, uint_t *outlenp, 777 uchar_t *outvalp, void *thisdg_attrs, cred_t *cr, 778 mblk_t *mblk); 779 static void tcp_opt_reverse(tcp_t *tcp, ipha_t *ipha); 780 static int tcp_opt_set_header(tcp_t *tcp, boolean_t checkonly, 781 uchar_t *ptr, uint_t len); 782 static int tcp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr); 783 static boolean_t tcp_param_register(IDP *ndp, tcpparam_t *tcppa, int cnt, 784 tcp_stack_t *); 785 static int tcp_param_set(queue_t *q, mblk_t *mp, char *value, 786 caddr_t cp, cred_t *cr); 787 static int tcp_param_set_aligned(queue_t *q, mblk_t *mp, char *value, 788 caddr_t cp, cred_t *cr); 789 static void tcp_iss_key_init(uint8_t *phrase, int len, tcp_stack_t *); 790 static int tcp_1948_phrase_set(queue_t *q, mblk_t *mp, char *value, 791 caddr_t cp, cred_t *cr); 792 static void tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_cnt); 793 static mblk_t *tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start); 794 static void tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp); 795 static void tcp_reinit(tcp_t *tcp); 796 static void tcp_reinit_values(tcp_t *tcp); 797 static void tcp_report_item(mblk_t *mp, tcp_t *tcp, int hashval, 798 tcp_t *thisstream, cred_t *cr); 799 800 static uint_t tcp_rcv_drain(queue_t *q, tcp_t *tcp); 801 static void tcp_sack_rxmit(tcp_t *tcp, uint_t *flags); 802 static boolean_t tcp_send_rst_chk(tcp_stack_t *); 803 static void tcp_ss_rexmit(tcp_t *tcp); 804 static mblk_t *tcp_rput_add_ancillary(tcp_t *tcp, mblk_t *mp, ip6_pkt_t *ipp); 805 static void tcp_process_options(tcp_t *, tcph_t *); 806 static void tcp_rput_common(tcp_t *tcp, mblk_t *mp); 807 static void tcp_rsrv(queue_t *q); 808 static int tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd); 809 static int tcp_snmp_state(tcp_t *tcp); 810 static int tcp_status_report(queue_t *q, mblk_t *mp, caddr_t cp, 811 cred_t *cr); 812 static int tcp_bind_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 813 cred_t *cr); 814 static int tcp_listen_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 815 cred_t *cr); 816 static int tcp_conn_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 817 cred_t *cr); 818 static int tcp_acceptor_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 819 cred_t *cr); 820 static int tcp_host_param_set(queue_t *q, mblk_t *mp, char *value, 821 caddr_t cp, cred_t *cr); 822 static int tcp_host_param_set_ipv6(queue_t *q, mblk_t *mp, char *value, 823 caddr_t cp, cred_t *cr); 824 static int tcp_host_param_report(queue_t *q, mblk_t *mp, caddr_t cp, 825 cred_t *cr); 826 static void tcp_timer(void *arg); 827 static void tcp_timer_callback(void *); 828 static in_port_t tcp_update_next_port(in_port_t port, const tcp_t *tcp, 829 boolean_t random); 830 static in_port_t tcp_get_next_priv_port(const tcp_t *); 831 static void tcp_wput_sock(queue_t *q, mblk_t *mp); 832 void tcp_wput_accept(queue_t *q, mblk_t *mp); 833 static void tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent); 834 static void tcp_wput_flush(tcp_t *tcp, mblk_t *mp); 835 static void tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp); 836 static int tcp_send(queue_t *q, tcp_t *tcp, const int mss, 837 const int tcp_hdr_len, const int tcp_tcp_hdr_len, 838 const int num_sack_blk, int *usable, uint_t *snxt, 839 int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 840 const int mdt_thres); 841 static int tcp_multisend(queue_t *q, tcp_t *tcp, const int mss, 842 const int tcp_hdr_len, const int tcp_tcp_hdr_len, 843 const int num_sack_blk, int *usable, uint_t *snxt, 844 int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 845 const int mdt_thres); 846 static void tcp_fill_header(tcp_t *tcp, uchar_t *rptr, clock_t now, 847 int num_sack_blk); 848 static void tcp_wsrv(queue_t *q); 849 static int tcp_xmit_end(tcp_t *tcp); 850 static void tcp_ack_timer(void *arg); 851 static mblk_t *tcp_ack_mp(tcp_t *tcp); 852 static void tcp_xmit_early_reset(char *str, mblk_t *mp, 853 uint32_t seq, uint32_t ack, int ctl, uint_t ip_hdr_len, 854 zoneid_t zoneid, tcp_stack_t *, conn_t *connp); 855 static void tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, 856 uint32_t ack, int ctl); 857 static tcp_hsp_t *tcp_hsp_lookup(ipaddr_t addr, tcp_stack_t *); 858 static tcp_hsp_t *tcp_hsp_lookup_ipv6(in6_addr_t *addr, tcp_stack_t *); 859 static int setmaxps(queue_t *q, int maxpsz); 860 static void tcp_set_rto(tcp_t *, time_t); 861 static boolean_t tcp_check_policy(tcp_t *, mblk_t *, ipha_t *, ip6_t *, 862 boolean_t, boolean_t); 863 static void tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, 864 boolean_t ipsec_mctl); 865 static mblk_t *tcp_setsockopt_mp(int level, int cmd, 866 char *opt, int optlen); 867 static int tcp_build_hdrs(queue_t *, tcp_t *); 868 static void tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, 869 uint32_t seg_seq, uint32_t seg_ack, int seg_len, 870 tcph_t *tcph); 871 boolean_t tcp_paws_check(tcp_t *tcp, tcph_t *tcph, tcp_opt_t *tcpoptp); 872 boolean_t tcp_reserved_port_add(int, in_port_t *, in_port_t *); 873 boolean_t tcp_reserved_port_del(in_port_t, in_port_t); 874 boolean_t tcp_reserved_port_check(in_port_t, tcp_stack_t *); 875 static tcp_t *tcp_alloc_temp_tcp(in_port_t, tcp_stack_t *); 876 static int tcp_reserved_port_list(queue_t *, mblk_t *, caddr_t, cred_t *); 877 static mblk_t *tcp_mdt_info_mp(mblk_t *); 878 static void tcp_mdt_update(tcp_t *, ill_mdt_capab_t *, boolean_t); 879 static int tcp_mdt_add_attrs(multidata_t *, const mblk_t *, 880 const boolean_t, const uint32_t, const uint32_t, 881 const uint32_t, const uint32_t, tcp_stack_t *); 882 static void tcp_multisend_data(tcp_t *, ire_t *, const ill_t *, mblk_t *, 883 const uint_t, const uint_t, boolean_t *); 884 static mblk_t *tcp_lso_info_mp(mblk_t *); 885 static void tcp_lso_update(tcp_t *, ill_lso_capab_t *); 886 static void tcp_send_data(tcp_t *, queue_t *, mblk_t *); 887 extern mblk_t *tcp_timermp_alloc(int); 888 extern void tcp_timermp_free(tcp_t *); 889 static void tcp_timer_free(tcp_t *tcp, mblk_t *mp); 890 static void tcp_stop_lingering(tcp_t *tcp); 891 static void tcp_close_linger_timeout(void *arg); 892 static void *tcp_stack_init(netstackid_t stackid, netstack_t *ns); 893 static void tcp_stack_shutdown(netstackid_t stackid, void *arg); 894 static void tcp_stack_fini(netstackid_t stackid, void *arg); 895 static void *tcp_g_kstat_init(tcp_g_stat_t *); 896 static void tcp_g_kstat_fini(kstat_t *); 897 static void *tcp_kstat_init(netstackid_t, tcp_stack_t *); 898 static void tcp_kstat_fini(netstackid_t, kstat_t *); 899 static void *tcp_kstat2_init(netstackid_t, tcp_stat_t *); 900 static void tcp_kstat2_fini(netstackid_t, kstat_t *); 901 static int tcp_kstat_update(kstat_t *kp, int rw); 902 void tcp_reinput(conn_t *connp, mblk_t *mp, squeue_t *sqp); 903 static int tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 904 tcph_t *tcph, uint_t ipvers, mblk_t *idmp); 905 static int tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, ipha_t *ipha, 906 tcph_t *tcph, mblk_t *idmp); 907 static squeue_func_t tcp_squeue_switch(int); 908 909 static int tcp_open(queue_t *, dev_t *, int, int, cred_t *, boolean_t); 910 static int tcp_openv4(queue_t *, dev_t *, int, int, cred_t *); 911 static int tcp_openv6(queue_t *, dev_t *, int, int, cred_t *); 912 static int tcp_close(queue_t *, int); 913 static int tcpclose_accept(queue_t *); 914 915 static void tcp_squeue_add(squeue_t *); 916 static boolean_t tcp_zcopy_check(tcp_t *); 917 static void tcp_zcopy_notify(tcp_t *); 918 static mblk_t *tcp_zcopy_disable(tcp_t *, mblk_t *); 919 static mblk_t *tcp_zcopy_backoff(tcp_t *, mblk_t *, int); 920 static void tcp_ire_ill_check(tcp_t *, ire_t *, ill_t *, boolean_t); 921 922 extern void tcp_kssl_input(tcp_t *, mblk_t *); 923 924 void tcp_eager_kill(void *arg, mblk_t *mp, void *arg2); 925 void tcp_clean_death_wrapper(void *arg, mblk_t *mp, void *arg2); 926 927 /* 928 * Routines related to the TCP_IOC_ABORT_CONN ioctl command. 929 * 930 * TCP_IOC_ABORT_CONN is a non-transparent ioctl command used for aborting 931 * TCP connections. To invoke this ioctl, a tcp_ioc_abort_conn_t structure 932 * (defined in tcp.h) needs to be filled in and passed into the kernel 933 * via an I_STR ioctl command (see streamio(7I)). The tcp_ioc_abort_conn_t 934 * structure contains the four-tuple of a TCP connection and a range of TCP 935 * states (specified by ac_start and ac_end). The use of wildcard addresses 936 * and ports is allowed. Connections with a matching four tuple and a state 937 * within the specified range will be aborted. The valid states for the 938 * ac_start and ac_end fields are in the range TCPS_SYN_SENT to TCPS_TIME_WAIT, 939 * inclusive. 940 * 941 * An application which has its connection aborted by this ioctl will receive 942 * an error that is dependent on the connection state at the time of the abort. 943 * If the connection state is < TCPS_TIME_WAIT, an application should behave as 944 * though a RST packet has been received. If the connection state is equal to 945 * TCPS_TIME_WAIT, the 2MSL timeout will immediately be canceled by the kernel 946 * and all resources associated with the connection will be freed. 947 */ 948 static mblk_t *tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *, tcp_t *); 949 static void tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *); 950 static void tcp_ioctl_abort_handler(tcp_t *, mblk_t *); 951 static int tcp_ioctl_abort(tcp_ioc_abort_conn_t *, tcp_stack_t *tcps); 952 static void tcp_ioctl_abort_conn(queue_t *, mblk_t *); 953 static int tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *, int, int *, 954 boolean_t, tcp_stack_t *); 955 956 static struct module_info tcp_rinfo = { 957 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, TCP_RECV_HIWATER, TCP_RECV_LOWATER 958 }; 959 960 static struct module_info tcp_winfo = { 961 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, 127, 16 962 }; 963 964 /* 965 * Entry points for TCP as a device. The normal case which supports 966 * the TCP functionality. 967 * We have separate open functions for the /dev/tcp and /dev/tcp6 devices. 968 */ 969 struct qinit tcp_rinitv4 = { 970 NULL, (pfi_t)tcp_rsrv, tcp_openv4, tcp_close, NULL, &tcp_rinfo 971 }; 972 973 struct qinit tcp_rinitv6 = { 974 NULL, (pfi_t)tcp_rsrv, tcp_openv6, tcp_close, NULL, &tcp_rinfo 975 }; 976 977 struct qinit tcp_winit = { 978 (pfi_t)tcp_wput, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo 979 }; 980 981 /* Initial entry point for TCP in socket mode. */ 982 struct qinit tcp_sock_winit = { 983 (pfi_t)tcp_wput_sock, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo 984 }; 985 986 /* 987 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing 988 * an accept. Avoid allocating data structures since eager has already 989 * been created. 990 */ 991 struct qinit tcp_acceptor_rinit = { 992 NULL, (pfi_t)tcp_rsrv, NULL, tcpclose_accept, NULL, &tcp_winfo 993 }; 994 995 struct qinit tcp_acceptor_winit = { 996 (pfi_t)tcp_wput_accept, NULL, NULL, NULL, NULL, &tcp_winfo 997 }; 998 999 /* 1000 * Entry points for TCP loopback (read side only) 1001 * The open routine is only used for reopens, thus no need to 1002 * have a separate one for tcp_openv6. 1003 */ 1004 struct qinit tcp_loopback_rinit = { 1005 (pfi_t)0, (pfi_t)tcp_rsrv, tcp_openv4, tcp_close, (pfi_t)0, 1006 &tcp_rinfo, NULL, tcp_fuse_rrw, tcp_fuse_rinfop, STRUIOT_STANDARD 1007 }; 1008 1009 /* For AF_INET aka /dev/tcp */ 1010 struct streamtab tcpinfov4 = { 1011 &tcp_rinitv4, &tcp_winit 1012 }; 1013 1014 /* For AF_INET6 aka /dev/tcp6 */ 1015 struct streamtab tcpinfov6 = { 1016 &tcp_rinitv6, &tcp_winit 1017 }; 1018 1019 /* 1020 * Have to ensure that tcp_g_q_close is not done by an 1021 * interrupt thread. 1022 */ 1023 static taskq_t *tcp_taskq; 1024 1025 /* 1026 * TCP has a private interface for other kernel modules to reserve a 1027 * port range for them to use. Once reserved, TCP will not use any ports 1028 * in the range. This interface relies on the TCP_EXCLBIND feature. If 1029 * the semantics of TCP_EXCLBIND is changed, implementation of this interface 1030 * has to be verified. 1031 * 1032 * There can be TCP_RESERVED_PORTS_ARRAY_MAX_SIZE port ranges. Each port 1033 * range can cover at most TCP_RESERVED_PORTS_RANGE_MAX ports. A port 1034 * range is [port a, port b] inclusive. And each port range is between 1035 * TCP_LOWESET_RESERVED_PORT and TCP_LARGEST_RESERVED_PORT inclusive. 1036 * 1037 * Note that the default anonymous port range starts from 32768. There is 1038 * no port "collision" between that and the reserved port range. If there 1039 * is port collision (because the default smallest anonymous port is lowered 1040 * or some apps specifically bind to ports in the reserved port range), the 1041 * system may not be able to reserve a port range even there are enough 1042 * unbound ports as a reserved port range contains consecutive ports . 1043 */ 1044 #define TCP_RESERVED_PORTS_ARRAY_MAX_SIZE 5 1045 #define TCP_RESERVED_PORTS_RANGE_MAX 1000 1046 #define TCP_SMALLEST_RESERVED_PORT 10240 1047 #define TCP_LARGEST_RESERVED_PORT 20480 1048 1049 /* Structure to represent those reserved port ranges. */ 1050 typedef struct tcp_rport_s { 1051 in_port_t lo_port; 1052 in_port_t hi_port; 1053 tcp_t **temp_tcp_array; 1054 } tcp_rport_t; 1055 1056 /* Setable only in /etc/system. Move to ndd? */ 1057 boolean_t tcp_icmp_source_quench = B_FALSE; 1058 1059 /* 1060 * Following assumes TPI alignment requirements stay along 32 bit 1061 * boundaries 1062 */ 1063 #define ROUNDUP32(x) \ 1064 (((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1)) 1065 1066 /* Template for response to info request. */ 1067 static struct T_info_ack tcp_g_t_info_ack = { 1068 T_INFO_ACK, /* PRIM_type */ 1069 0, /* TSDU_size */ 1070 T_INFINITE, /* ETSDU_size */ 1071 T_INVALID, /* CDATA_size */ 1072 T_INVALID, /* DDATA_size */ 1073 sizeof (sin_t), /* ADDR_size */ 1074 0, /* OPT_size - not initialized here */ 1075 TIDUSZ, /* TIDU_size */ 1076 T_COTS_ORD, /* SERV_type */ 1077 TCPS_IDLE, /* CURRENT_state */ 1078 (XPG4_1|EXPINLINE) /* PROVIDER_flag */ 1079 }; 1080 1081 static struct T_info_ack tcp_g_t_info_ack_v6 = { 1082 T_INFO_ACK, /* PRIM_type */ 1083 0, /* TSDU_size */ 1084 T_INFINITE, /* ETSDU_size */ 1085 T_INVALID, /* CDATA_size */ 1086 T_INVALID, /* DDATA_size */ 1087 sizeof (sin6_t), /* ADDR_size */ 1088 0, /* OPT_size - not initialized here */ 1089 TIDUSZ, /* TIDU_size */ 1090 T_COTS_ORD, /* SERV_type */ 1091 TCPS_IDLE, /* CURRENT_state */ 1092 (XPG4_1|EXPINLINE) /* PROVIDER_flag */ 1093 }; 1094 1095 #define MS 1L 1096 #define SECONDS (1000 * MS) 1097 #define MINUTES (60 * SECONDS) 1098 #define HOURS (60 * MINUTES) 1099 #define DAYS (24 * HOURS) 1100 1101 #define PARAM_MAX (~(uint32_t)0) 1102 1103 /* Max size IP datagram is 64k - 1 */ 1104 #define TCP_MSS_MAX_IPV4 (IP_MAXPACKET - (sizeof (ipha_t) + sizeof (tcph_t))) 1105 #define TCP_MSS_MAX_IPV6 (IP_MAXPACKET - (sizeof (ip6_t) + sizeof (tcph_t))) 1106 /* Max of the above */ 1107 #define TCP_MSS_MAX TCP_MSS_MAX_IPV4 1108 1109 /* Largest TCP port number */ 1110 #define TCP_MAX_PORT (64 * 1024 - 1) 1111 1112 /* 1113 * tcp_wroff_xtra is the extra space in front of TCP/IP header for link 1114 * layer header. It has to be a multiple of 4. 1115 */ 1116 static tcpparam_t lcl_tcp_wroff_xtra_param = { 0, 256, 32, "tcp_wroff_xtra" }; 1117 #define tcps_wroff_xtra tcps_wroff_xtra_param->tcp_param_val 1118 1119 /* 1120 * All of these are alterable, within the min/max values given, at run time. 1121 * Note that the default value of "tcp_time_wait_interval" is four minutes, 1122 * per the TCP spec. 1123 */ 1124 /* BEGIN CSTYLED */ 1125 static tcpparam_t lcl_tcp_param_arr[] = { 1126 /*min max value name */ 1127 { 1*SECONDS, 10*MINUTES, 1*MINUTES, "tcp_time_wait_interval"}, 1128 { 1, PARAM_MAX, 128, "tcp_conn_req_max_q" }, 1129 { 0, PARAM_MAX, 1024, "tcp_conn_req_max_q0" }, 1130 { 1, 1024, 1, "tcp_conn_req_min" }, 1131 { 0*MS, 20*SECONDS, 0*MS, "tcp_conn_grace_period" }, 1132 { 128, (1<<30), 1024*1024, "tcp_cwnd_max" }, 1133 { 0, 10, 0, "tcp_debug" }, 1134 { 1024, (32*1024), 1024, "tcp_smallest_nonpriv_port"}, 1135 { 1*SECONDS, PARAM_MAX, 3*MINUTES, "tcp_ip_abort_cinterval"}, 1136 { 1*SECONDS, PARAM_MAX, 3*MINUTES, "tcp_ip_abort_linterval"}, 1137 { 500*MS, PARAM_MAX, 8*MINUTES, "tcp_ip_abort_interval"}, 1138 { 1*SECONDS, PARAM_MAX, 10*SECONDS, "tcp_ip_notify_cinterval"}, 1139 { 500*MS, PARAM_MAX, 10*SECONDS, "tcp_ip_notify_interval"}, 1140 { 1, 255, 64, "tcp_ipv4_ttl"}, 1141 { 10*SECONDS, 10*DAYS, 2*HOURS, "tcp_keepalive_interval"}, 1142 { 0, 100, 10, "tcp_maxpsz_multiplier" }, 1143 { 1, TCP_MSS_MAX_IPV4, 536, "tcp_mss_def_ipv4"}, 1144 { 1, TCP_MSS_MAX_IPV4, TCP_MSS_MAX_IPV4, "tcp_mss_max_ipv4"}, 1145 { 1, TCP_MSS_MAX, 108, "tcp_mss_min"}, 1146 { 1, (64*1024)-1, (4*1024)-1, "tcp_naglim_def"}, 1147 { 1*MS, 20*SECONDS, 3*SECONDS, "tcp_rexmit_interval_initial"}, 1148 { 1*MS, 2*HOURS, 60*SECONDS, "tcp_rexmit_interval_max"}, 1149 { 1*MS, 2*HOURS, 400*MS, "tcp_rexmit_interval_min"}, 1150 { 1*MS, 1*MINUTES, 100*MS, "tcp_deferred_ack_interval" }, 1151 { 0, 16, 0, "tcp_snd_lowat_fraction" }, 1152 { 0, 128000, 0, "tcp_sth_rcv_hiwat" }, 1153 { 0, 128000, 0, "tcp_sth_rcv_lowat" }, 1154 { 1, 10000, 3, "tcp_dupack_fast_retransmit" }, 1155 { 0, 1, 0, "tcp_ignore_path_mtu" }, 1156 { 1024, TCP_MAX_PORT, 32*1024, "tcp_smallest_anon_port"}, 1157 { 1024, TCP_MAX_PORT, TCP_MAX_PORT, "tcp_largest_anon_port"}, 1158 { TCP_XMIT_LOWATER, (1<<30), TCP_XMIT_HIWATER,"tcp_xmit_hiwat"}, 1159 { TCP_XMIT_LOWATER, (1<<30), TCP_XMIT_LOWATER,"tcp_xmit_lowat"}, 1160 { TCP_RECV_LOWATER, (1<<30), TCP_RECV_HIWATER,"tcp_recv_hiwat"}, 1161 { 1, 65536, 4, "tcp_recv_hiwat_minmss"}, 1162 { 1*SECONDS, PARAM_MAX, 675*SECONDS, "tcp_fin_wait_2_flush_interval"}, 1163 { 0, TCP_MSS_MAX, 64, "tcp_co_min"}, 1164 { 8192, (1<<30), 1024*1024, "tcp_max_buf"}, 1165 /* 1166 * Question: What default value should I set for tcp_strong_iss? 1167 */ 1168 { 0, 2, 1, "tcp_strong_iss"}, 1169 { 0, 65536, 20, "tcp_rtt_updates"}, 1170 { 0, 1, 1, "tcp_wscale_always"}, 1171 { 0, 1, 0, "tcp_tstamp_always"}, 1172 { 0, 1, 1, "tcp_tstamp_if_wscale"}, 1173 { 0*MS, 2*HOURS, 0*MS, "tcp_rexmit_interval_extra"}, 1174 { 0, 16, 2, "tcp_deferred_acks_max"}, 1175 { 1, 16384, 4, "tcp_slow_start_after_idle"}, 1176 { 1, 4, 4, "tcp_slow_start_initial"}, 1177 { 10*MS, 50*MS, 20*MS, "tcp_co_timer_interval"}, 1178 { 0, 2, 2, "tcp_sack_permitted"}, 1179 { 0, 1, 0, "tcp_trace"}, 1180 { 0, 1, 1, "tcp_compression_enabled"}, 1181 { 0, IPV6_MAX_HOPS, IPV6_DEFAULT_HOPS, "tcp_ipv6_hoplimit"}, 1182 { 1, TCP_MSS_MAX_IPV6, 1220, "tcp_mss_def_ipv6"}, 1183 { 1, TCP_MSS_MAX_IPV6, TCP_MSS_MAX_IPV6, "tcp_mss_max_ipv6"}, 1184 { 0, 1, 0, "tcp_rev_src_routes"}, 1185 { 10*MS, 500*MS, 50*MS, "tcp_local_dack_interval"}, 1186 { 100*MS, 60*SECONDS, 1*SECONDS, "tcp_ndd_get_info_interval"}, 1187 { 0, 16, 8, "tcp_local_dacks_max"}, 1188 { 0, 2, 1, "tcp_ecn_permitted"}, 1189 { 0, 1, 1, "tcp_rst_sent_rate_enabled"}, 1190 { 0, PARAM_MAX, 40, "tcp_rst_sent_rate"}, 1191 { 0, 100*MS, 50*MS, "tcp_push_timer_interval"}, 1192 { 0, 1, 0, "tcp_use_smss_as_mss_opt"}, 1193 { 0, PARAM_MAX, 8*MINUTES, "tcp_keepalive_abort_interval"}, 1194 }; 1195 /* END CSTYLED */ 1196 1197 /* 1198 * tcp_mdt_hdr_{head,tail}_min are the leading and trailing spaces of 1199 * each header fragment in the header buffer. Each parameter value has 1200 * to be a multiple of 4 (32-bit aligned). 1201 */ 1202 static tcpparam_t lcl_tcp_mdt_head_param = 1203 { 32, 256, 32, "tcp_mdt_hdr_head_min" }; 1204 static tcpparam_t lcl_tcp_mdt_tail_param = 1205 { 0, 256, 32, "tcp_mdt_hdr_tail_min" }; 1206 #define tcps_mdt_hdr_head_min tcps_mdt_head_param->tcp_param_val 1207 #define tcps_mdt_hdr_tail_min tcps_mdt_tail_param->tcp_param_val 1208 1209 /* 1210 * tcp_mdt_max_pbufs is the upper limit value that tcp uses to figure out 1211 * the maximum number of payload buffers associated per Multidata. 1212 */ 1213 static tcpparam_t lcl_tcp_mdt_max_pbufs_param = 1214 { 1, MULTIDATA_MAX_PBUFS, MULTIDATA_MAX_PBUFS, "tcp_mdt_max_pbufs" }; 1215 #define tcps_mdt_max_pbufs tcps_mdt_max_pbufs_param->tcp_param_val 1216 1217 /* Round up the value to the nearest mss. */ 1218 #define MSS_ROUNDUP(value, mss) ((((value) - 1) / (mss) + 1) * (mss)) 1219 1220 /* 1221 * Set ECN capable transport (ECT) code point in IP header. 1222 * 1223 * Note that there are 2 ECT code points '01' and '10', which are called 1224 * ECT(1) and ECT(0) respectively. Here we follow the original ECT code 1225 * point ECT(0) for TCP as described in RFC 2481. 1226 */ 1227 #define SET_ECT(tcp, iph) \ 1228 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1229 /* We need to clear the code point first. */ \ 1230 ((ipha_t *)(iph))->ipha_type_of_service &= 0xFC; \ 1231 ((ipha_t *)(iph))->ipha_type_of_service |= IPH_ECN_ECT0; \ 1232 } else { \ 1233 ((ip6_t *)(iph))->ip6_vcf &= htonl(0xFFCFFFFF); \ 1234 ((ip6_t *)(iph))->ip6_vcf |= htonl(IPH_ECN_ECT0 << 20); \ 1235 } 1236 1237 /* 1238 * The format argument to pass to tcp_display(). 1239 * DISP_PORT_ONLY means that the returned string has only port info. 1240 * DISP_ADDR_AND_PORT means that the returned string also contains the 1241 * remote and local IP address. 1242 */ 1243 #define DISP_PORT_ONLY 1 1244 #define DISP_ADDR_AND_PORT 2 1245 1246 #define NDD_TOO_QUICK_MSG \ 1247 "ndd get info rate too high for non-privileged users, try again " \ 1248 "later.\n" 1249 #define NDD_OUT_OF_BUF_MSG "<< Out of buffer >>\n" 1250 1251 #define IS_VMLOANED_MBLK(mp) \ 1252 (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0) 1253 1254 1255 /* Enable or disable b_cont M_MULTIDATA chaining for MDT. */ 1256 boolean_t tcp_mdt_chain = B_TRUE; 1257 1258 /* 1259 * MDT threshold in the form of effective send MSS multiplier; we take 1260 * the MDT path if the amount of unsent data exceeds the threshold value 1261 * (default threshold is 1*SMSS). 1262 */ 1263 uint_t tcp_mdt_smss_threshold = 1; 1264 1265 uint32_t do_tcpzcopy = 1; /* 0: disable, 1: enable, 2: force */ 1266 1267 /* 1268 * Forces all connections to obey the value of the tcps_maxpsz_multiplier 1269 * tunable settable via NDD. Otherwise, the per-connection behavior is 1270 * determined dynamically during tcp_adapt_ire(), which is the default. 1271 */ 1272 boolean_t tcp_static_maxpsz = B_FALSE; 1273 1274 /* Setable in /etc/system */ 1275 /* If set to 0, pick ephemeral port sequentially; otherwise randomly. */ 1276 uint32_t tcp_random_anon_port = 1; 1277 1278 /* 1279 * To reach to an eager in Q0 which can be dropped due to an incoming 1280 * new SYN request when Q0 is full, a new doubly linked list is 1281 * introduced. This list allows to select an eager from Q0 in O(1) time. 1282 * This is needed to avoid spending too much time walking through the 1283 * long list of eagers in Q0 when tcp_drop_q0() is called. Each member of 1284 * this new list has to be a member of Q0. 1285 * This list is headed by listener's tcp_t. When the list is empty, 1286 * both the pointers - tcp_eager_next_drop_q0 and tcp_eager_prev_drop_q0, 1287 * of listener's tcp_t point to listener's tcp_t itself. 1288 * 1289 * Given an eager in Q0 and a listener, MAKE_DROPPABLE() puts the eager 1290 * in the list. MAKE_UNDROPPABLE() takes the eager out of the list. 1291 * These macros do not affect the eager's membership to Q0. 1292 */ 1293 1294 1295 #define MAKE_DROPPABLE(listener, eager) \ 1296 if ((eager)->tcp_eager_next_drop_q0 == NULL) { \ 1297 (listener)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0\ 1298 = (eager); \ 1299 (eager)->tcp_eager_prev_drop_q0 = (listener); \ 1300 (eager)->tcp_eager_next_drop_q0 = \ 1301 (listener)->tcp_eager_next_drop_q0; \ 1302 (listener)->tcp_eager_next_drop_q0 = (eager); \ 1303 } 1304 1305 #define MAKE_UNDROPPABLE(eager) \ 1306 if ((eager)->tcp_eager_next_drop_q0 != NULL) { \ 1307 (eager)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0 \ 1308 = (eager)->tcp_eager_prev_drop_q0; \ 1309 (eager)->tcp_eager_prev_drop_q0->tcp_eager_next_drop_q0 \ 1310 = (eager)->tcp_eager_next_drop_q0; \ 1311 (eager)->tcp_eager_prev_drop_q0 = NULL; \ 1312 (eager)->tcp_eager_next_drop_q0 = NULL; \ 1313 } 1314 1315 /* 1316 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more 1317 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent 1318 * data, TCP will not respond with an ACK. RFC 793 requires that 1319 * TCP responds with an ACK for such a bogus ACK. By not following 1320 * the RFC, we prevent TCP from getting into an ACK storm if somehow 1321 * an attacker successfully spoofs an acceptable segment to our 1322 * peer; or when our peer is "confused." 1323 */ 1324 uint32_t tcp_drop_ack_unsent_cnt = 10; 1325 1326 /* 1327 * Hook functions to enable cluster networking 1328 * On non-clustered systems these vectors must always be NULL. 1329 */ 1330 1331 void (*cl_inet_listen)(uint8_t protocol, sa_family_t addr_family, 1332 uint8_t *laddrp, in_port_t lport) = NULL; 1333 void (*cl_inet_unlisten)(uint8_t protocol, sa_family_t addr_family, 1334 uint8_t *laddrp, in_port_t lport) = NULL; 1335 void (*cl_inet_connect)(uint8_t protocol, sa_family_t addr_family, 1336 uint8_t *laddrp, in_port_t lport, 1337 uint8_t *faddrp, in_port_t fport) = NULL; 1338 void (*cl_inet_disconnect)(uint8_t protocol, sa_family_t addr_family, 1339 uint8_t *laddrp, in_port_t lport, 1340 uint8_t *faddrp, in_port_t fport) = NULL; 1341 1342 /* 1343 * The following are defined in ip.c 1344 */ 1345 extern int (*cl_inet_isclusterwide)(uint8_t protocol, sa_family_t addr_family, 1346 uint8_t *laddrp); 1347 extern uint32_t (*cl_inet_ipident)(uint8_t protocol, sa_family_t addr_family, 1348 uint8_t *laddrp, uint8_t *faddrp); 1349 1350 #define CL_INET_CONNECT(tcp) { \ 1351 if (cl_inet_connect != NULL) { \ 1352 /* \ 1353 * Running in cluster mode - register active connection \ 1354 * information \ 1355 */ \ 1356 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1357 if ((tcp)->tcp_ipha->ipha_src != 0) { \ 1358 (*cl_inet_connect)(IPPROTO_TCP, AF_INET,\ 1359 (uint8_t *)(&((tcp)->tcp_ipha->ipha_src)),\ 1360 (in_port_t)(tcp)->tcp_lport, \ 1361 (uint8_t *)(&((tcp)->tcp_ipha->ipha_dst)),\ 1362 (in_port_t)(tcp)->tcp_fport); \ 1363 } \ 1364 } else { \ 1365 if (!IN6_IS_ADDR_UNSPECIFIED( \ 1366 &(tcp)->tcp_ip6h->ip6_src)) {\ 1367 (*cl_inet_connect)(IPPROTO_TCP, AF_INET6,\ 1368 (uint8_t *)(&((tcp)->tcp_ip6h->ip6_src)),\ 1369 (in_port_t)(tcp)->tcp_lport, \ 1370 (uint8_t *)(&((tcp)->tcp_ip6h->ip6_dst)),\ 1371 (in_port_t)(tcp)->tcp_fport); \ 1372 } \ 1373 } \ 1374 } \ 1375 } 1376 1377 #define CL_INET_DISCONNECT(tcp) { \ 1378 if (cl_inet_disconnect != NULL) { \ 1379 /* \ 1380 * Running in cluster mode - deregister active \ 1381 * connection information \ 1382 */ \ 1383 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1384 if ((tcp)->tcp_ip_src != 0) { \ 1385 (*cl_inet_disconnect)(IPPROTO_TCP, \ 1386 AF_INET, \ 1387 (uint8_t *)(&((tcp)->tcp_ip_src)),\ 1388 (in_port_t)(tcp)->tcp_lport, \ 1389 (uint8_t *) \ 1390 (&((tcp)->tcp_ipha->ipha_dst)),\ 1391 (in_port_t)(tcp)->tcp_fport); \ 1392 } \ 1393 } else { \ 1394 if (!IN6_IS_ADDR_UNSPECIFIED( \ 1395 &(tcp)->tcp_ip_src_v6)) { \ 1396 (*cl_inet_disconnect)(IPPROTO_TCP, AF_INET6,\ 1397 (uint8_t *)(&((tcp)->tcp_ip_src_v6)),\ 1398 (in_port_t)(tcp)->tcp_lport, \ 1399 (uint8_t *) \ 1400 (&((tcp)->tcp_ip6h->ip6_dst)),\ 1401 (in_port_t)(tcp)->tcp_fport); \ 1402 } \ 1403 } \ 1404 } \ 1405 } 1406 1407 /* 1408 * Cluster networking hook for traversing current connection list. 1409 * This routine is used to extract the current list of live connections 1410 * which must continue to to be dispatched to this node. 1411 */ 1412 int cl_tcp_walk_list(int (*callback)(cl_tcp_info_t *, void *), void *arg); 1413 1414 static int cl_tcp_walk_list_stack(int (*callback)(cl_tcp_info_t *, void *), 1415 void *arg, tcp_stack_t *tcps); 1416 1417 /* 1418 * Figure out the value of window scale opton. Note that the rwnd is 1419 * ASSUMED to be rounded up to the nearest MSS before the calculation. 1420 * We cannot find the scale value and then do a round up of tcp_rwnd 1421 * because the scale value may not be correct after that. 1422 * 1423 * Set the compiler flag to make this function inline. 1424 */ 1425 static void 1426 tcp_set_ws_value(tcp_t *tcp) 1427 { 1428 int i; 1429 uint32_t rwnd = tcp->tcp_rwnd; 1430 1431 for (i = 0; rwnd > TCP_MAXWIN && i < TCP_MAX_WINSHIFT; 1432 i++, rwnd >>= 1) 1433 ; 1434 tcp->tcp_rcv_ws = i; 1435 } 1436 1437 /* 1438 * Remove a connection from the list of detached TIME_WAIT connections. 1439 * It returns B_FALSE if it can't remove the connection from the list 1440 * as the connection has already been removed from the list due to an 1441 * earlier call to tcp_time_wait_remove(); otherwise it returns B_TRUE. 1442 */ 1443 static boolean_t 1444 tcp_time_wait_remove(tcp_t *tcp, tcp_squeue_priv_t *tcp_time_wait) 1445 { 1446 boolean_t locked = B_FALSE; 1447 1448 if (tcp_time_wait == NULL) { 1449 tcp_time_wait = *((tcp_squeue_priv_t **) 1450 squeue_getprivate(tcp->tcp_connp->conn_sqp, SQPRIVATE_TCP)); 1451 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1452 locked = B_TRUE; 1453 } else { 1454 ASSERT(MUTEX_HELD(&tcp_time_wait->tcp_time_wait_lock)); 1455 } 1456 1457 if (tcp->tcp_time_wait_expire == 0) { 1458 ASSERT(tcp->tcp_time_wait_next == NULL); 1459 ASSERT(tcp->tcp_time_wait_prev == NULL); 1460 if (locked) 1461 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1462 return (B_FALSE); 1463 } 1464 ASSERT(TCP_IS_DETACHED(tcp)); 1465 ASSERT(tcp->tcp_state == TCPS_TIME_WAIT); 1466 1467 if (tcp == tcp_time_wait->tcp_time_wait_head) { 1468 ASSERT(tcp->tcp_time_wait_prev == NULL); 1469 tcp_time_wait->tcp_time_wait_head = tcp->tcp_time_wait_next; 1470 if (tcp_time_wait->tcp_time_wait_head != NULL) { 1471 tcp_time_wait->tcp_time_wait_head->tcp_time_wait_prev = 1472 NULL; 1473 } else { 1474 tcp_time_wait->tcp_time_wait_tail = NULL; 1475 } 1476 } else if (tcp == tcp_time_wait->tcp_time_wait_tail) { 1477 ASSERT(tcp != tcp_time_wait->tcp_time_wait_head); 1478 ASSERT(tcp->tcp_time_wait_next == NULL); 1479 tcp_time_wait->tcp_time_wait_tail = tcp->tcp_time_wait_prev; 1480 ASSERT(tcp_time_wait->tcp_time_wait_tail != NULL); 1481 tcp_time_wait->tcp_time_wait_tail->tcp_time_wait_next = NULL; 1482 } else { 1483 ASSERT(tcp->tcp_time_wait_prev->tcp_time_wait_next == tcp); 1484 ASSERT(tcp->tcp_time_wait_next->tcp_time_wait_prev == tcp); 1485 tcp->tcp_time_wait_prev->tcp_time_wait_next = 1486 tcp->tcp_time_wait_next; 1487 tcp->tcp_time_wait_next->tcp_time_wait_prev = 1488 tcp->tcp_time_wait_prev; 1489 } 1490 tcp->tcp_time_wait_next = NULL; 1491 tcp->tcp_time_wait_prev = NULL; 1492 tcp->tcp_time_wait_expire = 0; 1493 1494 if (locked) 1495 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1496 return (B_TRUE); 1497 } 1498 1499 /* 1500 * Add a connection to the list of detached TIME_WAIT connections 1501 * and set its time to expire. 1502 */ 1503 static void 1504 tcp_time_wait_append(tcp_t *tcp) 1505 { 1506 tcp_stack_t *tcps = tcp->tcp_tcps; 1507 tcp_squeue_priv_t *tcp_time_wait = 1508 *((tcp_squeue_priv_t **)squeue_getprivate(tcp->tcp_connp->conn_sqp, 1509 SQPRIVATE_TCP)); 1510 1511 tcp_timers_stop(tcp); 1512 1513 /* Freed above */ 1514 ASSERT(tcp->tcp_timer_tid == 0); 1515 ASSERT(tcp->tcp_ack_tid == 0); 1516 1517 /* must have happened at the time of detaching the tcp */ 1518 ASSERT(tcp->tcp_ptpahn == NULL); 1519 ASSERT(tcp->tcp_flow_stopped == 0); 1520 ASSERT(tcp->tcp_time_wait_next == NULL); 1521 ASSERT(tcp->tcp_time_wait_prev == NULL); 1522 ASSERT(tcp->tcp_time_wait_expire == NULL); 1523 ASSERT(tcp->tcp_listener == NULL); 1524 1525 tcp->tcp_time_wait_expire = ddi_get_lbolt(); 1526 /* 1527 * The value computed below in tcp->tcp_time_wait_expire may 1528 * appear negative or wrap around. That is ok since our 1529 * interest is only in the difference between the current lbolt 1530 * value and tcp->tcp_time_wait_expire. But the value should not 1531 * be zero, since it means the tcp is not in the TIME_WAIT list. 1532 * The corresponding comparison in tcp_time_wait_collector() uses 1533 * modular arithmetic. 1534 */ 1535 tcp->tcp_time_wait_expire += 1536 drv_usectohz(tcps->tcps_time_wait_interval * 1000); 1537 if (tcp->tcp_time_wait_expire == 0) 1538 tcp->tcp_time_wait_expire = 1; 1539 1540 ASSERT(TCP_IS_DETACHED(tcp)); 1541 ASSERT(tcp->tcp_state == TCPS_TIME_WAIT); 1542 ASSERT(tcp->tcp_time_wait_next == NULL); 1543 ASSERT(tcp->tcp_time_wait_prev == NULL); 1544 TCP_DBGSTAT(tcps, tcp_time_wait); 1545 1546 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1547 if (tcp_time_wait->tcp_time_wait_head == NULL) { 1548 ASSERT(tcp_time_wait->tcp_time_wait_tail == NULL); 1549 tcp_time_wait->tcp_time_wait_head = tcp; 1550 } else { 1551 ASSERT(tcp_time_wait->tcp_time_wait_tail != NULL); 1552 ASSERT(tcp_time_wait->tcp_time_wait_tail->tcp_state == 1553 TCPS_TIME_WAIT); 1554 tcp_time_wait->tcp_time_wait_tail->tcp_time_wait_next = tcp; 1555 tcp->tcp_time_wait_prev = tcp_time_wait->tcp_time_wait_tail; 1556 } 1557 tcp_time_wait->tcp_time_wait_tail = tcp; 1558 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1559 } 1560 1561 /* ARGSUSED */ 1562 void 1563 tcp_timewait_output(void *arg, mblk_t *mp, void *arg2) 1564 { 1565 conn_t *connp = (conn_t *)arg; 1566 tcp_t *tcp = connp->conn_tcp; 1567 tcp_stack_t *tcps = tcp->tcp_tcps; 1568 1569 ASSERT(tcp != NULL); 1570 if (tcp->tcp_state == TCPS_CLOSED) { 1571 return; 1572 } 1573 1574 ASSERT((tcp->tcp_family == AF_INET && 1575 tcp->tcp_ipversion == IPV4_VERSION) || 1576 (tcp->tcp_family == AF_INET6 && 1577 (tcp->tcp_ipversion == IPV4_VERSION || 1578 tcp->tcp_ipversion == IPV6_VERSION))); 1579 ASSERT(!tcp->tcp_listener); 1580 1581 TCP_STAT(tcps, tcp_time_wait_reap); 1582 ASSERT(TCP_IS_DETACHED(tcp)); 1583 1584 /* 1585 * Because they have no upstream client to rebind or tcp_close() 1586 * them later, we axe the connection here and now. 1587 */ 1588 tcp_close_detached(tcp); 1589 } 1590 1591 /* 1592 * Remove cached/latched IPsec references. 1593 */ 1594 void 1595 tcp_ipsec_cleanup(tcp_t *tcp) 1596 { 1597 conn_t *connp = tcp->tcp_connp; 1598 1599 ASSERT(connp->conn_flags & IPCL_TCPCONN); 1600 1601 if (connp->conn_latch != NULL) { 1602 IPLATCH_REFRELE(connp->conn_latch, 1603 connp->conn_netstack); 1604 connp->conn_latch = NULL; 1605 } 1606 if (connp->conn_policy != NULL) { 1607 IPPH_REFRELE(connp->conn_policy, connp->conn_netstack); 1608 connp->conn_policy = NULL; 1609 } 1610 } 1611 1612 /* 1613 * Cleaup before placing on free list. 1614 * Disassociate from the netstack/tcp_stack_t since the freelist 1615 * is per squeue and not per netstack. 1616 */ 1617 void 1618 tcp_cleanup(tcp_t *tcp) 1619 { 1620 mblk_t *mp; 1621 char *tcp_iphc; 1622 int tcp_iphc_len; 1623 int tcp_hdr_grown; 1624 tcp_sack_info_t *tcp_sack_info; 1625 conn_t *connp = tcp->tcp_connp; 1626 tcp_stack_t *tcps = tcp->tcp_tcps; 1627 netstack_t *ns = tcps->tcps_netstack; 1628 1629 tcp_bind_hash_remove(tcp); 1630 1631 /* Cleanup that which needs the netstack first */ 1632 tcp_ipsec_cleanup(tcp); 1633 1634 tcp_free(tcp); 1635 1636 /* Release any SSL context */ 1637 if (tcp->tcp_kssl_ent != NULL) { 1638 kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); 1639 tcp->tcp_kssl_ent = NULL; 1640 } 1641 1642 if (tcp->tcp_kssl_ctx != NULL) { 1643 kssl_release_ctx(tcp->tcp_kssl_ctx); 1644 tcp->tcp_kssl_ctx = NULL; 1645 } 1646 tcp->tcp_kssl_pending = B_FALSE; 1647 1648 conn_delete_ire(connp, NULL); 1649 1650 /* 1651 * Since we will bzero the entire structure, we need to 1652 * remove it and reinsert it in global hash list. We 1653 * know the walkers can't get to this conn because we 1654 * had set CONDEMNED flag earlier and checked reference 1655 * under conn_lock so walker won't pick it and when we 1656 * go the ipcl_globalhash_remove() below, no walker 1657 * can get to it. 1658 */ 1659 ipcl_globalhash_remove(connp); 1660 1661 /* 1662 * Now it is safe to decrement the reference counts. 1663 * This might be the last reference on the netstack and TCPS 1664 * in which case it will cause the tcp_g_q_close and 1665 * the freeing of the IP Instance. 1666 */ 1667 connp->conn_netstack = NULL; 1668 netstack_rele(ns); 1669 ASSERT(tcps != NULL); 1670 tcp->tcp_tcps = NULL; 1671 TCPS_REFRELE(tcps); 1672 1673 /* Save some state */ 1674 mp = tcp->tcp_timercache; 1675 1676 tcp_sack_info = tcp->tcp_sack_info; 1677 tcp_iphc = tcp->tcp_iphc; 1678 tcp_iphc_len = tcp->tcp_iphc_len; 1679 tcp_hdr_grown = tcp->tcp_hdr_grown; 1680 1681 if (connp->conn_cred != NULL) { 1682 crfree(connp->conn_cred); 1683 connp->conn_cred = NULL; 1684 } 1685 if (connp->conn_peercred != NULL) { 1686 crfree(connp->conn_peercred); 1687 connp->conn_peercred = NULL; 1688 } 1689 ipcl_conn_cleanup(connp); 1690 connp->conn_flags = IPCL_TCPCONN; 1691 bzero(tcp, sizeof (tcp_t)); 1692 1693 /* restore the state */ 1694 tcp->tcp_timercache = mp; 1695 1696 tcp->tcp_sack_info = tcp_sack_info; 1697 tcp->tcp_iphc = tcp_iphc; 1698 tcp->tcp_iphc_len = tcp_iphc_len; 1699 tcp->tcp_hdr_grown = tcp_hdr_grown; 1700 1701 tcp->tcp_connp = connp; 1702 1703 ASSERT(connp->conn_tcp == tcp); 1704 ASSERT(connp->conn_flags & IPCL_TCPCONN); 1705 connp->conn_state_flags = CONN_INCIPIENT; 1706 ASSERT(connp->conn_ulp == IPPROTO_TCP); 1707 ASSERT(connp->conn_ref == 1); 1708 } 1709 1710 /* 1711 * Blows away all tcps whose TIME_WAIT has expired. List traversal 1712 * is done forwards from the head. 1713 * This walks all stack instances since 1714 * tcp_time_wait remains global across all stacks. 1715 */ 1716 /* ARGSUSED */ 1717 void 1718 tcp_time_wait_collector(void *arg) 1719 { 1720 tcp_t *tcp; 1721 clock_t now; 1722 mblk_t *mp; 1723 conn_t *connp; 1724 kmutex_t *lock; 1725 boolean_t removed; 1726 1727 squeue_t *sqp = (squeue_t *)arg; 1728 tcp_squeue_priv_t *tcp_time_wait = 1729 *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP)); 1730 1731 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1732 tcp_time_wait->tcp_time_wait_tid = 0; 1733 1734 if (tcp_time_wait->tcp_free_list != NULL && 1735 tcp_time_wait->tcp_free_list->tcp_in_free_list == B_TRUE) { 1736 TCP_G_STAT(tcp_freelist_cleanup); 1737 while ((tcp = tcp_time_wait->tcp_free_list) != NULL) { 1738 tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next; 1739 tcp->tcp_time_wait_next = NULL; 1740 tcp_time_wait->tcp_free_list_cnt--; 1741 ASSERT(tcp->tcp_tcps == NULL); 1742 CONN_DEC_REF(tcp->tcp_connp); 1743 } 1744 ASSERT(tcp_time_wait->tcp_free_list_cnt == 0); 1745 } 1746 1747 /* 1748 * In order to reap time waits reliably, we should use a 1749 * source of time that is not adjustable by the user -- hence 1750 * the call to ddi_get_lbolt(). 1751 */ 1752 now = ddi_get_lbolt(); 1753 while ((tcp = tcp_time_wait->tcp_time_wait_head) != NULL) { 1754 /* 1755 * Compare times using modular arithmetic, since 1756 * lbolt can wrapover. 1757 */ 1758 if ((now - tcp->tcp_time_wait_expire) < 0) { 1759 break; 1760 } 1761 1762 removed = tcp_time_wait_remove(tcp, tcp_time_wait); 1763 ASSERT(removed); 1764 1765 connp = tcp->tcp_connp; 1766 ASSERT(connp->conn_fanout != NULL); 1767 lock = &connp->conn_fanout->connf_lock; 1768 /* 1769 * This is essentially a TW reclaim fast path optimization for 1770 * performance where the timewait collector checks under the 1771 * fanout lock (so that no one else can get access to the 1772 * conn_t) that the refcnt is 2 i.e. one for TCP and one for 1773 * the classifier hash list. If ref count is indeed 2, we can 1774 * just remove the conn under the fanout lock and avoid 1775 * cleaning up the conn under the squeue, provided that 1776 * clustering callbacks are not enabled. If clustering is 1777 * enabled, we need to make the clustering callback before 1778 * setting the CONDEMNED flag and after dropping all locks and 1779 * so we forego this optimization and fall back to the slow 1780 * path. Also please see the comments in tcp_closei_local 1781 * regarding the refcnt logic. 1782 * 1783 * Since we are holding the tcp_time_wait_lock, its better 1784 * not to block on the fanout_lock because other connections 1785 * can't add themselves to time_wait list. So we do a 1786 * tryenter instead of mutex_enter. 1787 */ 1788 if (mutex_tryenter(lock)) { 1789 mutex_enter(&connp->conn_lock); 1790 if ((connp->conn_ref == 2) && 1791 (cl_inet_disconnect == NULL)) { 1792 ipcl_hash_remove_locked(connp, 1793 connp->conn_fanout); 1794 /* 1795 * Set the CONDEMNED flag now itself so that 1796 * the refcnt cannot increase due to any 1797 * walker. But we have still not cleaned up 1798 * conn_ire_cache. This is still ok since 1799 * we are going to clean it up in tcp_cleanup 1800 * immediately and any interface unplumb 1801 * thread will wait till the ire is blown away 1802 */ 1803 connp->conn_state_flags |= CONN_CONDEMNED; 1804 mutex_exit(lock); 1805 mutex_exit(&connp->conn_lock); 1806 if (tcp_time_wait->tcp_free_list_cnt < 1807 tcp_free_list_max_cnt) { 1808 /* Add to head of tcp_free_list */ 1809 mutex_exit( 1810 &tcp_time_wait->tcp_time_wait_lock); 1811 tcp_cleanup(tcp); 1812 ASSERT(connp->conn_latch == NULL); 1813 ASSERT(connp->conn_policy == NULL); 1814 ASSERT(tcp->tcp_tcps == NULL); 1815 ASSERT(connp->conn_netstack == NULL); 1816 1817 mutex_enter( 1818 &tcp_time_wait->tcp_time_wait_lock); 1819 tcp->tcp_time_wait_next = 1820 tcp_time_wait->tcp_free_list; 1821 tcp_time_wait->tcp_free_list = tcp; 1822 tcp_time_wait->tcp_free_list_cnt++; 1823 continue; 1824 } else { 1825 /* Do not add to tcp_free_list */ 1826 mutex_exit( 1827 &tcp_time_wait->tcp_time_wait_lock); 1828 tcp_bind_hash_remove(tcp); 1829 conn_delete_ire(tcp->tcp_connp, NULL); 1830 tcp_ipsec_cleanup(tcp); 1831 CONN_DEC_REF(tcp->tcp_connp); 1832 } 1833 } else { 1834 CONN_INC_REF_LOCKED(connp); 1835 mutex_exit(lock); 1836 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1837 mutex_exit(&connp->conn_lock); 1838 /* 1839 * We can reuse the closemp here since conn has 1840 * detached (otherwise we wouldn't even be in 1841 * time_wait list). tcp_closemp_used can safely 1842 * be changed without taking a lock as no other 1843 * thread can concurrently access it at this 1844 * point in the connection lifecycle. 1845 */ 1846 1847 if (tcp->tcp_closemp.b_prev == NULL) 1848 tcp->tcp_closemp_used = B_TRUE; 1849 else 1850 cmn_err(CE_PANIC, 1851 "tcp_timewait_collector: " 1852 "concurrent use of tcp_closemp: " 1853 "connp %p tcp %p\n", (void *)connp, 1854 (void *)tcp); 1855 1856 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15); 1857 mp = &tcp->tcp_closemp; 1858 squeue_fill(connp->conn_sqp, mp, 1859 tcp_timewait_output, connp, 1860 SQTAG_TCP_TIMEWAIT); 1861 } 1862 } else { 1863 mutex_enter(&connp->conn_lock); 1864 CONN_INC_REF_LOCKED(connp); 1865 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1866 mutex_exit(&connp->conn_lock); 1867 /* 1868 * We can reuse the closemp here since conn has 1869 * detached (otherwise we wouldn't even be in 1870 * time_wait list). tcp_closemp_used can safely 1871 * be changed without taking a lock as no other 1872 * thread can concurrently access it at this 1873 * point in the connection lifecycle. 1874 */ 1875 1876 if (tcp->tcp_closemp.b_prev == NULL) 1877 tcp->tcp_closemp_used = B_TRUE; 1878 else 1879 cmn_err(CE_PANIC, "tcp_timewait_collector: " 1880 "concurrent use of tcp_closemp: " 1881 "connp %p tcp %p\n", (void *)connp, 1882 (void *)tcp); 1883 1884 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15); 1885 mp = &tcp->tcp_closemp; 1886 squeue_fill(connp->conn_sqp, mp, 1887 tcp_timewait_output, connp, 0); 1888 } 1889 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1890 } 1891 1892 if (tcp_time_wait->tcp_free_list != NULL) 1893 tcp_time_wait->tcp_free_list->tcp_in_free_list = B_TRUE; 1894 1895 tcp_time_wait->tcp_time_wait_tid = 1896 timeout(tcp_time_wait_collector, sqp, TCP_TIME_WAIT_DELAY); 1897 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1898 } 1899 /* 1900 * Reply to a clients T_CONN_RES TPI message. This function 1901 * is used only for TLI/XTI listener. Sockfs sends T_CONN_RES 1902 * on the acceptor STREAM and processed in tcp_wput_accept(). 1903 * Read the block comment on top of tcp_conn_request(). 1904 */ 1905 static void 1906 tcp_accept(tcp_t *listener, mblk_t *mp) 1907 { 1908 tcp_t *acceptor; 1909 tcp_t *eager; 1910 tcp_t *tcp; 1911 struct T_conn_res *tcr; 1912 t_uscalar_t acceptor_id; 1913 t_scalar_t seqnum; 1914 mblk_t *opt_mp = NULL; /* T_OPTMGMT_REQ messages */ 1915 mblk_t *ok_mp; 1916 mblk_t *mp1; 1917 tcp_stack_t *tcps = listener->tcp_tcps; 1918 1919 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tcr)) { 1920 tcp_err_ack(listener, mp, TPROTO, 0); 1921 return; 1922 } 1923 tcr = (struct T_conn_res *)mp->b_rptr; 1924 1925 /* 1926 * Under ILP32 the stream head points tcr->ACCEPTOR_id at the 1927 * read side queue of the streams device underneath us i.e. the 1928 * read side queue of 'ip'. Since we can't deference QUEUE_ptr we 1929 * look it up in the queue_hash. Under LP64 it sends down the 1930 * minor_t of the accepting endpoint. 1931 * 1932 * Once the acceptor/eager are modified (in tcp_accept_swap) the 1933 * fanout hash lock is held. 1934 * This prevents any thread from entering the acceptor queue from 1935 * below (since it has not been hard bound yet i.e. any inbound 1936 * packets will arrive on the listener or default tcp queue and 1937 * go through tcp_lookup). 1938 * The CONN_INC_REF will prevent the acceptor from closing. 1939 * 1940 * XXX It is still possible for a tli application to send down data 1941 * on the accepting stream while another thread calls t_accept. 1942 * This should not be a problem for well-behaved applications since 1943 * the T_OK_ACK is sent after the queue swapping is completed. 1944 * 1945 * If the accepting fd is the same as the listening fd, avoid 1946 * queue hash lookup since that will return an eager listener in a 1947 * already established state. 1948 */ 1949 acceptor_id = tcr->ACCEPTOR_id; 1950 mutex_enter(&listener->tcp_eager_lock); 1951 if (listener->tcp_acceptor_id == acceptor_id) { 1952 eager = listener->tcp_eager_next_q; 1953 /* only count how many T_CONN_INDs so don't count q0 */ 1954 if ((listener->tcp_conn_req_cnt_q != 1) || 1955 (eager->tcp_conn_req_seqnum != tcr->SEQ_number)) { 1956 mutex_exit(&listener->tcp_eager_lock); 1957 tcp_err_ack(listener, mp, TBADF, 0); 1958 return; 1959 } 1960 if (listener->tcp_conn_req_cnt_q0 != 0) { 1961 /* Throw away all the eagers on q0. */ 1962 tcp_eager_cleanup(listener, 1); 1963 } 1964 if (listener->tcp_syn_defense) { 1965 listener->tcp_syn_defense = B_FALSE; 1966 if (listener->tcp_ip_addr_cache != NULL) { 1967 kmem_free(listener->tcp_ip_addr_cache, 1968 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 1969 listener->tcp_ip_addr_cache = NULL; 1970 } 1971 } 1972 /* 1973 * Transfer tcp_conn_req_max to the eager so that when 1974 * a disconnect occurs we can revert the endpoint to the 1975 * listen state. 1976 */ 1977 eager->tcp_conn_req_max = listener->tcp_conn_req_max; 1978 ASSERT(listener->tcp_conn_req_cnt_q0 == 0); 1979 /* 1980 * Get a reference on the acceptor just like the 1981 * tcp_acceptor_hash_lookup below. 1982 */ 1983 acceptor = listener; 1984 CONN_INC_REF(acceptor->tcp_connp); 1985 } else { 1986 acceptor = tcp_acceptor_hash_lookup(acceptor_id, tcps); 1987 if (acceptor == NULL) { 1988 if (listener->tcp_debug) { 1989 (void) strlog(TCP_MOD_ID, 0, 1, 1990 SL_ERROR|SL_TRACE, 1991 "tcp_accept: did not find acceptor 0x%x\n", 1992 acceptor_id); 1993 } 1994 mutex_exit(&listener->tcp_eager_lock); 1995 tcp_err_ack(listener, mp, TPROVMISMATCH, 0); 1996 return; 1997 } 1998 /* 1999 * Verify acceptor state. The acceptable states for an acceptor 2000 * include TCPS_IDLE and TCPS_BOUND. 2001 */ 2002 switch (acceptor->tcp_state) { 2003 case TCPS_IDLE: 2004 /* FALLTHRU */ 2005 case TCPS_BOUND: 2006 break; 2007 default: 2008 CONN_DEC_REF(acceptor->tcp_connp); 2009 mutex_exit(&listener->tcp_eager_lock); 2010 tcp_err_ack(listener, mp, TOUTSTATE, 0); 2011 return; 2012 } 2013 } 2014 2015 /* The listener must be in TCPS_LISTEN */ 2016 if (listener->tcp_state != TCPS_LISTEN) { 2017 CONN_DEC_REF(acceptor->tcp_connp); 2018 mutex_exit(&listener->tcp_eager_lock); 2019 tcp_err_ack(listener, mp, TOUTSTATE, 0); 2020 return; 2021 } 2022 2023 /* 2024 * Rendezvous with an eager connection request packet hanging off 2025 * 'tcp' that has the 'seqnum' tag. We tagged the detached open 2026 * tcp structure when the connection packet arrived in 2027 * tcp_conn_request(). 2028 */ 2029 seqnum = tcr->SEQ_number; 2030 eager = listener; 2031 do { 2032 eager = eager->tcp_eager_next_q; 2033 if (eager == NULL) { 2034 CONN_DEC_REF(acceptor->tcp_connp); 2035 mutex_exit(&listener->tcp_eager_lock); 2036 tcp_err_ack(listener, mp, TBADSEQ, 0); 2037 return; 2038 } 2039 } while (eager->tcp_conn_req_seqnum != seqnum); 2040 mutex_exit(&listener->tcp_eager_lock); 2041 2042 /* 2043 * At this point, both acceptor and listener have 2 ref 2044 * that they begin with. Acceptor has one additional ref 2045 * we placed in lookup while listener has 3 additional 2046 * ref for being behind the squeue (tcp_accept() is 2047 * done on listener's squeue); being in classifier hash; 2048 * and eager's ref on listener. 2049 */ 2050 ASSERT(listener->tcp_connp->conn_ref >= 5); 2051 ASSERT(acceptor->tcp_connp->conn_ref >= 3); 2052 2053 /* 2054 * The eager at this point is set in its own squeue and 2055 * could easily have been killed (tcp_accept_finish will 2056 * deal with that) because of a TH_RST so we can only 2057 * ASSERT for a single ref. 2058 */ 2059 ASSERT(eager->tcp_connp->conn_ref >= 1); 2060 2061 /* Pre allocate the stroptions mblk also */ 2062 opt_mp = allocb(sizeof (struct stroptions), BPRI_HI); 2063 if (opt_mp == NULL) { 2064 CONN_DEC_REF(acceptor->tcp_connp); 2065 CONN_DEC_REF(eager->tcp_connp); 2066 tcp_err_ack(listener, mp, TSYSERR, ENOMEM); 2067 return; 2068 } 2069 DB_TYPE(opt_mp) = M_SETOPTS; 2070 opt_mp->b_wptr += sizeof (struct stroptions); 2071 2072 /* 2073 * Prepare for inheriting IPV6_BOUND_IF and IPV6_RECVPKTINFO 2074 * from listener to acceptor. The message is chained on opt_mp 2075 * which will be sent onto eager's squeue. 2076 */ 2077 if (listener->tcp_bound_if != 0) { 2078 /* allocate optmgmt req */ 2079 mp1 = tcp_setsockopt_mp(IPPROTO_IPV6, 2080 IPV6_BOUND_IF, (char *)&listener->tcp_bound_if, 2081 sizeof (int)); 2082 if (mp1 != NULL) 2083 linkb(opt_mp, mp1); 2084 } 2085 if (listener->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) { 2086 uint_t on = 1; 2087 2088 /* allocate optmgmt req */ 2089 mp1 = tcp_setsockopt_mp(IPPROTO_IPV6, 2090 IPV6_RECVPKTINFO, (char *)&on, sizeof (on)); 2091 if (mp1 != NULL) 2092 linkb(opt_mp, mp1); 2093 } 2094 2095 /* Re-use mp1 to hold a copy of mp, in case reallocb fails */ 2096 if ((mp1 = copymsg(mp)) == NULL) { 2097 CONN_DEC_REF(acceptor->tcp_connp); 2098 CONN_DEC_REF(eager->tcp_connp); 2099 freemsg(opt_mp); 2100 tcp_err_ack(listener, mp, TSYSERR, ENOMEM); 2101 return; 2102 } 2103 2104 tcr = (struct T_conn_res *)mp1->b_rptr; 2105 2106 /* 2107 * This is an expanded version of mi_tpi_ok_ack_alloc() 2108 * which allocates a larger mblk and appends the new 2109 * local address to the ok_ack. The address is copied by 2110 * soaccept() for getsockname(). 2111 */ 2112 { 2113 int extra; 2114 2115 extra = (eager->tcp_family == AF_INET) ? 2116 sizeof (sin_t) : sizeof (sin6_t); 2117 2118 /* 2119 * Try to re-use mp, if possible. Otherwise, allocate 2120 * an mblk and return it as ok_mp. In any case, mp 2121 * is no longer usable upon return. 2122 */ 2123 if ((ok_mp = mi_tpi_ok_ack_alloc_extra(mp, extra)) == NULL) { 2124 CONN_DEC_REF(acceptor->tcp_connp); 2125 CONN_DEC_REF(eager->tcp_connp); 2126 freemsg(opt_mp); 2127 /* Original mp has been freed by now, so use mp1 */ 2128 tcp_err_ack(listener, mp1, TSYSERR, ENOMEM); 2129 return; 2130 } 2131 2132 mp = NULL; /* We should never use mp after this point */ 2133 2134 switch (extra) { 2135 case sizeof (sin_t): { 2136 sin_t *sin = (sin_t *)ok_mp->b_wptr; 2137 2138 ok_mp->b_wptr += extra; 2139 sin->sin_family = AF_INET; 2140 sin->sin_port = eager->tcp_lport; 2141 sin->sin_addr.s_addr = 2142 eager->tcp_ipha->ipha_src; 2143 break; 2144 } 2145 case sizeof (sin6_t): { 2146 sin6_t *sin6 = (sin6_t *)ok_mp->b_wptr; 2147 2148 ok_mp->b_wptr += extra; 2149 sin6->sin6_family = AF_INET6; 2150 sin6->sin6_port = eager->tcp_lport; 2151 if (eager->tcp_ipversion == IPV4_VERSION) { 2152 sin6->sin6_flowinfo = 0; 2153 IN6_IPADDR_TO_V4MAPPED( 2154 eager->tcp_ipha->ipha_src, 2155 &sin6->sin6_addr); 2156 } else { 2157 ASSERT(eager->tcp_ip6h != NULL); 2158 sin6->sin6_flowinfo = 2159 eager->tcp_ip6h->ip6_vcf & 2160 ~IPV6_VERS_AND_FLOW_MASK; 2161 sin6->sin6_addr = 2162 eager->tcp_ip6h->ip6_src; 2163 } 2164 sin6->sin6_scope_id = 0; 2165 sin6->__sin6_src_id = 0; 2166 break; 2167 } 2168 default: 2169 break; 2170 } 2171 ASSERT(ok_mp->b_wptr <= ok_mp->b_datap->db_lim); 2172 } 2173 2174 /* 2175 * If there are no options we know that the T_CONN_RES will 2176 * succeed. However, we can't send the T_OK_ACK upstream until 2177 * the tcp_accept_swap is done since it would be dangerous to 2178 * let the application start using the new fd prior to the swap. 2179 */ 2180 tcp_accept_swap(listener, acceptor, eager); 2181 2182 /* 2183 * tcp_accept_swap unlinks eager from listener but does not drop 2184 * the eager's reference on the listener. 2185 */ 2186 ASSERT(eager->tcp_listener == NULL); 2187 ASSERT(listener->tcp_connp->conn_ref >= 5); 2188 2189 /* 2190 * The eager is now associated with its own queue. Insert in 2191 * the hash so that the connection can be reused for a future 2192 * T_CONN_RES. 2193 */ 2194 tcp_acceptor_hash_insert(acceptor_id, eager); 2195 2196 /* 2197 * We now do the processing of options with T_CONN_RES. 2198 * We delay till now since we wanted to have queue to pass to 2199 * option processing routines that points back to the right 2200 * instance structure which does not happen until after 2201 * tcp_accept_swap(). 2202 * 2203 * Note: 2204 * The sanity of the logic here assumes that whatever options 2205 * are appropriate to inherit from listner=>eager are done 2206 * before this point, and whatever were to be overridden (or not) 2207 * in transfer logic from eager=>acceptor in tcp_accept_swap(). 2208 * [ Warning: acceptor endpoint can have T_OPTMGMT_REQ done to it 2209 * before its ACCEPTOR_id comes down in T_CONN_RES ] 2210 * This may not be true at this point in time but can be fixed 2211 * independently. This option processing code starts with 2212 * the instantiated acceptor instance and the final queue at 2213 * this point. 2214 */ 2215 2216 if (tcr->OPT_length != 0) { 2217 /* Options to process */ 2218 int t_error = 0; 2219 int sys_error = 0; 2220 int do_disconnect = 0; 2221 2222 if (tcp_conprim_opt_process(eager, mp1, 2223 &do_disconnect, &t_error, &sys_error) < 0) { 2224 eager->tcp_accept_error = 1; 2225 if (do_disconnect) { 2226 /* 2227 * An option failed which does not allow 2228 * connection to be accepted. 2229 * 2230 * We allow T_CONN_RES to succeed and 2231 * put a T_DISCON_IND on the eager queue. 2232 */ 2233 ASSERT(t_error == 0 && sys_error == 0); 2234 eager->tcp_send_discon_ind = 1; 2235 } else { 2236 ASSERT(t_error != 0); 2237 freemsg(ok_mp); 2238 /* 2239 * Original mp was either freed or set 2240 * to ok_mp above, so use mp1 instead. 2241 */ 2242 tcp_err_ack(listener, mp1, t_error, sys_error); 2243 goto finish; 2244 } 2245 } 2246 /* 2247 * Most likely success in setting options (except if 2248 * eager->tcp_send_discon_ind set). 2249 * mp1 option buffer represented by OPT_length/offset 2250 * potentially modified and contains results of setting 2251 * options at this point 2252 */ 2253 } 2254 2255 /* We no longer need mp1, since all options processing has passed */ 2256 freemsg(mp1); 2257 2258 putnext(listener->tcp_rq, ok_mp); 2259 2260 mutex_enter(&listener->tcp_eager_lock); 2261 if (listener->tcp_eager_prev_q0->tcp_conn_def_q0) { 2262 tcp_t *tail; 2263 mblk_t *conn_ind; 2264 2265 /* 2266 * This path should not be executed if listener and 2267 * acceptor streams are the same. 2268 */ 2269 ASSERT(listener != acceptor); 2270 2271 tcp = listener->tcp_eager_prev_q0; 2272 /* 2273 * listener->tcp_eager_prev_q0 points to the TAIL of the 2274 * deferred T_conn_ind queue. We need to get to the head of 2275 * the queue in order to send up T_conn_ind the same order as 2276 * how the 3WHS is completed. 2277 */ 2278 while (tcp != listener) { 2279 if (!tcp->tcp_eager_prev_q0->tcp_conn_def_q0) 2280 break; 2281 else 2282 tcp = tcp->tcp_eager_prev_q0; 2283 } 2284 ASSERT(tcp != listener); 2285 conn_ind = tcp->tcp_conn.tcp_eager_conn_ind; 2286 ASSERT(conn_ind != NULL); 2287 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 2288 2289 /* Move from q0 to q */ 2290 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 2291 listener->tcp_conn_req_cnt_q0--; 2292 listener->tcp_conn_req_cnt_q++; 2293 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 2294 tcp->tcp_eager_prev_q0; 2295 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 2296 tcp->tcp_eager_next_q0; 2297 tcp->tcp_eager_prev_q0 = NULL; 2298 tcp->tcp_eager_next_q0 = NULL; 2299 tcp->tcp_conn_def_q0 = B_FALSE; 2300 2301 /* Make sure the tcp isn't in the list of droppables */ 2302 ASSERT(tcp->tcp_eager_next_drop_q0 == NULL && 2303 tcp->tcp_eager_prev_drop_q0 == NULL); 2304 2305 /* 2306 * Insert at end of the queue because sockfs sends 2307 * down T_CONN_RES in chronological order. Leaving 2308 * the older conn indications at front of the queue 2309 * helps reducing search time. 2310 */ 2311 tail = listener->tcp_eager_last_q; 2312 if (tail != NULL) 2313 tail->tcp_eager_next_q = tcp; 2314 else 2315 listener->tcp_eager_next_q = tcp; 2316 listener->tcp_eager_last_q = tcp; 2317 tcp->tcp_eager_next_q = NULL; 2318 mutex_exit(&listener->tcp_eager_lock); 2319 putnext(tcp->tcp_rq, conn_ind); 2320 } else { 2321 mutex_exit(&listener->tcp_eager_lock); 2322 } 2323 2324 /* 2325 * Done with the acceptor - free it 2326 * 2327 * Note: from this point on, no access to listener should be made 2328 * as listener can be equal to acceptor. 2329 */ 2330 finish: 2331 ASSERT(acceptor->tcp_detached); 2332 ASSERT(tcps->tcps_g_q != NULL); 2333 acceptor->tcp_rq = tcps->tcps_g_q; 2334 acceptor->tcp_wq = WR(tcps->tcps_g_q); 2335 (void) tcp_clean_death(acceptor, 0, 2); 2336 CONN_DEC_REF(acceptor->tcp_connp); 2337 2338 /* 2339 * In case we already received a FIN we have to make tcp_rput send 2340 * the ordrel_ind. This will also send up a window update if the window 2341 * has opened up. 2342 * 2343 * In the normal case of a successful connection acceptance 2344 * we give the O_T_BIND_REQ to the read side put procedure as an 2345 * indication that this was just accepted. This tells tcp_rput to 2346 * pass up any data queued in tcp_rcv_list. 2347 * 2348 * In the fringe case where options sent with T_CONN_RES failed and 2349 * we required, we would be indicating a T_DISCON_IND to blow 2350 * away this connection. 2351 */ 2352 2353 /* 2354 * XXX: we currently have a problem if XTI application closes the 2355 * acceptor stream in between. This problem exists in on10-gate also 2356 * and is well know but nothing can be done short of major rewrite 2357 * to fix it. Now it is possible to take care of it by assigning TLI/XTI 2358 * eager same squeue as listener (we can distinguish non socket 2359 * listeners at the time of handling a SYN in tcp_conn_request) 2360 * and do most of the work that tcp_accept_finish does here itself 2361 * and then get behind the acceptor squeue to access the acceptor 2362 * queue. 2363 */ 2364 /* 2365 * We already have a ref on tcp so no need to do one before squeue_fill 2366 */ 2367 squeue_fill(eager->tcp_connp->conn_sqp, opt_mp, 2368 tcp_accept_finish, eager->tcp_connp, SQTAG_TCP_ACCEPT_FINISH); 2369 } 2370 2371 /* 2372 * Swap information between the eager and acceptor for a TLI/XTI client. 2373 * The sockfs accept is done on the acceptor stream and control goes 2374 * through tcp_wput_accept() and tcp_accept()/tcp_accept_swap() is not 2375 * called. In either case, both the eager and listener are in their own 2376 * perimeter (squeue) and the code has to deal with potential race. 2377 * 2378 * See the block comment on top of tcp_accept() and tcp_wput_accept(). 2379 */ 2380 static void 2381 tcp_accept_swap(tcp_t *listener, tcp_t *acceptor, tcp_t *eager) 2382 { 2383 conn_t *econnp, *aconnp; 2384 2385 ASSERT(eager->tcp_rq == listener->tcp_rq); 2386 ASSERT(eager->tcp_detached && !acceptor->tcp_detached); 2387 ASSERT(!eager->tcp_hard_bound); 2388 ASSERT(!TCP_IS_SOCKET(acceptor)); 2389 ASSERT(!TCP_IS_SOCKET(eager)); 2390 ASSERT(!TCP_IS_SOCKET(listener)); 2391 2392 acceptor->tcp_detached = B_TRUE; 2393 /* 2394 * To permit stream re-use by TLI/XTI, the eager needs a copy of 2395 * the acceptor id. 2396 */ 2397 eager->tcp_acceptor_id = acceptor->tcp_acceptor_id; 2398 2399 /* remove eager from listen list... */ 2400 mutex_enter(&listener->tcp_eager_lock); 2401 tcp_eager_unlink(eager); 2402 ASSERT(eager->tcp_eager_next_q == NULL && 2403 eager->tcp_eager_last_q == NULL); 2404 ASSERT(eager->tcp_eager_next_q0 == NULL && 2405 eager->tcp_eager_prev_q0 == NULL); 2406 mutex_exit(&listener->tcp_eager_lock); 2407 eager->tcp_rq = acceptor->tcp_rq; 2408 eager->tcp_wq = acceptor->tcp_wq; 2409 2410 econnp = eager->tcp_connp; 2411 aconnp = acceptor->tcp_connp; 2412 2413 eager->tcp_rq->q_ptr = econnp; 2414 eager->tcp_wq->q_ptr = econnp; 2415 2416 /* 2417 * In the TLI/XTI loopback case, we are inside the listener's squeue, 2418 * which might be a different squeue from our peer TCP instance. 2419 * For TCP Fusion, the peer expects that whenever tcp_detached is 2420 * clear, our TCP queues point to the acceptor's queues. Thus, use 2421 * membar_producer() to ensure that the assignments of tcp_rq/tcp_wq 2422 * above reach global visibility prior to the clearing of tcp_detached. 2423 */ 2424 membar_producer(); 2425 eager->tcp_detached = B_FALSE; 2426 2427 ASSERT(eager->tcp_ack_tid == 0); 2428 2429 econnp->conn_dev = aconnp->conn_dev; 2430 econnp->conn_minor_arena = aconnp->conn_minor_arena; 2431 ASSERT(econnp->conn_minor_arena != NULL); 2432 if (eager->tcp_cred != NULL) 2433 crfree(eager->tcp_cred); 2434 eager->tcp_cred = econnp->conn_cred = aconnp->conn_cred; 2435 ASSERT(econnp->conn_netstack == aconnp->conn_netstack); 2436 ASSERT(eager->tcp_tcps == acceptor->tcp_tcps); 2437 2438 aconnp->conn_cred = NULL; 2439 2440 econnp->conn_zoneid = aconnp->conn_zoneid; 2441 econnp->conn_allzones = aconnp->conn_allzones; 2442 2443 econnp->conn_mac_exempt = aconnp->conn_mac_exempt; 2444 aconnp->conn_mac_exempt = B_FALSE; 2445 2446 ASSERT(aconnp->conn_peercred == NULL); 2447 2448 /* Do the IPC initialization */ 2449 CONN_INC_REF(econnp); 2450 2451 econnp->conn_multicast_loop = aconnp->conn_multicast_loop; 2452 econnp->conn_af_isv6 = aconnp->conn_af_isv6; 2453 econnp->conn_pkt_isv6 = aconnp->conn_pkt_isv6; 2454 2455 /* Done with old IPC. Drop its ref on its connp */ 2456 CONN_DEC_REF(aconnp); 2457 } 2458 2459 2460 /* 2461 * Adapt to the information, such as rtt and rtt_sd, provided from the 2462 * ire cached in conn_cache_ire. If no ire cached, do a ire lookup. 2463 * 2464 * Checks for multicast and broadcast destination address. 2465 * Returns zero on failure; non-zero if ok. 2466 * 2467 * Note that the MSS calculation here is based on the info given in 2468 * the IRE. We do not do any calculation based on TCP options. They 2469 * will be handled in tcp_rput_other() and tcp_rput_data() when TCP 2470 * knows which options to use. 2471 * 2472 * Note on how TCP gets its parameters for a connection. 2473 * 2474 * When a tcp_t structure is allocated, it gets all the default parameters. 2475 * In tcp_adapt_ire(), it gets those metric parameters, like rtt, rtt_sd, 2476 * spipe, rpipe, ... from the route metrics. Route metric overrides the 2477 * default. But if there is an associated tcp_host_param, it will override 2478 * the metrics. 2479 * 2480 * An incoming SYN with a multicast or broadcast destination address, is dropped 2481 * in 1 of 2 places. 2482 * 2483 * 1. If the packet was received over the wire it is dropped in 2484 * ip_rput_process_broadcast() 2485 * 2486 * 2. If the packet was received through internal IP loopback, i.e. the packet 2487 * was generated and received on the same machine, it is dropped in 2488 * ip_wput_local() 2489 * 2490 * An incoming SYN with a multicast or broadcast source address is always 2491 * dropped in tcp_adapt_ire. The same logic in tcp_adapt_ire also serves to 2492 * reject an attempt to connect to a broadcast or multicast (destination) 2493 * address. 2494 */ 2495 static int 2496 tcp_adapt_ire(tcp_t *tcp, mblk_t *ire_mp) 2497 { 2498 tcp_hsp_t *hsp; 2499 ire_t *ire; 2500 ire_t *sire = NULL; 2501 iulp_t *ire_uinfo = NULL; 2502 uint32_t mss_max; 2503 uint32_t mss; 2504 boolean_t tcp_detached = TCP_IS_DETACHED(tcp); 2505 conn_t *connp = tcp->tcp_connp; 2506 boolean_t ire_cacheable = B_FALSE; 2507 zoneid_t zoneid = connp->conn_zoneid; 2508 int match_flags = MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT | 2509 MATCH_IRE_SECATTR; 2510 ts_label_t *tsl = crgetlabel(CONN_CRED(connp)); 2511 ill_t *ill = NULL; 2512 boolean_t incoming = (ire_mp == NULL); 2513 tcp_stack_t *tcps = tcp->tcp_tcps; 2514 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 2515 2516 ASSERT(connp->conn_ire_cache == NULL); 2517 2518 if (tcp->tcp_ipversion == IPV4_VERSION) { 2519 2520 if (CLASSD(tcp->tcp_connp->conn_rem)) { 2521 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 2522 return (0); 2523 } 2524 /* 2525 * If IP_NEXTHOP is set, then look for an IRE_CACHE 2526 * for the destination with the nexthop as gateway. 2527 * ire_ctable_lookup() is used because this particular 2528 * ire, if it exists, will be marked private. 2529 * If that is not available, use the interface ire 2530 * for the nexthop. 2531 * 2532 * TSol: tcp_update_label will detect label mismatches based 2533 * only on the destination's label, but that would not 2534 * detect label mismatches based on the security attributes 2535 * of routes or next hop gateway. Hence we need to pass the 2536 * label to ire_ftable_lookup below in order to locate the 2537 * right prefix (and/or) ire cache. Similarly we also need 2538 * pass the label to the ire_cache_lookup below to locate 2539 * the right ire that also matches on the label. 2540 */ 2541 if (tcp->tcp_connp->conn_nexthop_set) { 2542 ire = ire_ctable_lookup(tcp->tcp_connp->conn_rem, 2543 tcp->tcp_connp->conn_nexthop_v4, 0, NULL, zoneid, 2544 tsl, MATCH_IRE_MARK_PRIVATE_ADDR | MATCH_IRE_GW, 2545 ipst); 2546 if (ire == NULL) { 2547 ire = ire_ftable_lookup( 2548 tcp->tcp_connp->conn_nexthop_v4, 2549 0, 0, IRE_INTERFACE, NULL, NULL, zoneid, 0, 2550 tsl, match_flags, ipst); 2551 if (ire == NULL) 2552 return (0); 2553 } else { 2554 ire_uinfo = &ire->ire_uinfo; 2555 } 2556 } else { 2557 ire = ire_cache_lookup(tcp->tcp_connp->conn_rem, 2558 zoneid, tsl, ipst); 2559 if (ire != NULL) { 2560 ire_cacheable = B_TRUE; 2561 ire_uinfo = (ire_mp != NULL) ? 2562 &((ire_t *)ire_mp->b_rptr)->ire_uinfo: 2563 &ire->ire_uinfo; 2564 2565 } else { 2566 if (ire_mp == NULL) { 2567 ire = ire_ftable_lookup( 2568 tcp->tcp_connp->conn_rem, 2569 0, 0, 0, NULL, &sire, zoneid, 0, 2570 tsl, (MATCH_IRE_RECURSIVE | 2571 MATCH_IRE_DEFAULT), ipst); 2572 if (ire == NULL) 2573 return (0); 2574 ire_uinfo = (sire != NULL) ? 2575 &sire->ire_uinfo : 2576 &ire->ire_uinfo; 2577 } else { 2578 ire = (ire_t *)ire_mp->b_rptr; 2579 ire_uinfo = 2580 &((ire_t *) 2581 ire_mp->b_rptr)->ire_uinfo; 2582 } 2583 } 2584 } 2585 ASSERT(ire != NULL); 2586 2587 if ((ire->ire_src_addr == INADDR_ANY) || 2588 (ire->ire_type & IRE_BROADCAST)) { 2589 /* 2590 * ire->ire_mp is non null when ire_mp passed in is used 2591 * ire->ire_mp is set in ip_bind_insert_ire[_v6](). 2592 */ 2593 if (ire->ire_mp == NULL) 2594 ire_refrele(ire); 2595 if (sire != NULL) 2596 ire_refrele(sire); 2597 return (0); 2598 } 2599 2600 if (tcp->tcp_ipha->ipha_src == INADDR_ANY) { 2601 ipaddr_t src_addr; 2602 2603 /* 2604 * ip_bind_connected() has stored the correct source 2605 * address in conn_src. 2606 */ 2607 src_addr = tcp->tcp_connp->conn_src; 2608 tcp->tcp_ipha->ipha_src = src_addr; 2609 /* 2610 * Copy of the src addr. in tcp_t is needed 2611 * for the lookup funcs. 2612 */ 2613 IN6_IPADDR_TO_V4MAPPED(src_addr, &tcp->tcp_ip_src_v6); 2614 } 2615 /* 2616 * Set the fragment bit so that IP will tell us if the MTU 2617 * should change. IP tells us the latest setting of 2618 * ip_path_mtu_discovery through ire_frag_flag. 2619 */ 2620 if (ipst->ips_ip_path_mtu_discovery) { 2621 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 2622 htons(IPH_DF); 2623 } 2624 /* 2625 * If ire_uinfo is NULL, this is the IRE_INTERFACE case 2626 * for IP_NEXTHOP. No cache ire has been found for the 2627 * destination and we are working with the nexthop's 2628 * interface ire. Since we need to forward all packets 2629 * to the nexthop first, we "blindly" set tcp_localnet 2630 * to false, eventhough the destination may also be 2631 * onlink. 2632 */ 2633 if (ire_uinfo == NULL) 2634 tcp->tcp_localnet = 0; 2635 else 2636 tcp->tcp_localnet = (ire->ire_gateway_addr == 0); 2637 } else { 2638 /* 2639 * For incoming connection ire_mp = NULL 2640 * For outgoing connection ire_mp != NULL 2641 * Technically we should check conn_incoming_ill 2642 * when ire_mp is NULL and conn_outgoing_ill when 2643 * ire_mp is non-NULL. But this is performance 2644 * critical path and for IPV*_BOUND_IF, outgoing 2645 * and incoming ill are always set to the same value. 2646 */ 2647 ill_t *dst_ill = NULL; 2648 ipif_t *dst_ipif = NULL; 2649 2650 ASSERT(connp->conn_outgoing_ill == connp->conn_incoming_ill); 2651 2652 if (connp->conn_outgoing_ill != NULL) { 2653 /* Outgoing or incoming path */ 2654 int err; 2655 2656 dst_ill = conn_get_held_ill(connp, 2657 &connp->conn_outgoing_ill, &err); 2658 if (err == ILL_LOOKUP_FAILED || dst_ill == NULL) { 2659 ip1dbg(("tcp_adapt_ire: ill_lookup failed\n")); 2660 return (0); 2661 } 2662 match_flags |= MATCH_IRE_ILL; 2663 dst_ipif = dst_ill->ill_ipif; 2664 } 2665 ire = ire_ctable_lookup_v6(&tcp->tcp_connp->conn_remv6, 2666 0, 0, dst_ipif, zoneid, tsl, match_flags, ipst); 2667 2668 if (ire != NULL) { 2669 ire_cacheable = B_TRUE; 2670 ire_uinfo = (ire_mp != NULL) ? 2671 &((ire_t *)ire_mp->b_rptr)->ire_uinfo: 2672 &ire->ire_uinfo; 2673 } else { 2674 if (ire_mp == NULL) { 2675 ire = ire_ftable_lookup_v6( 2676 &tcp->tcp_connp->conn_remv6, 2677 0, 0, 0, dst_ipif, &sire, zoneid, 2678 0, tsl, match_flags, ipst); 2679 if (ire == NULL) { 2680 if (dst_ill != NULL) 2681 ill_refrele(dst_ill); 2682 return (0); 2683 } 2684 ire_uinfo = (sire != NULL) ? &sire->ire_uinfo : 2685 &ire->ire_uinfo; 2686 } else { 2687 ire = (ire_t *)ire_mp->b_rptr; 2688 ire_uinfo = 2689 &((ire_t *)ire_mp->b_rptr)->ire_uinfo; 2690 } 2691 } 2692 if (dst_ill != NULL) 2693 ill_refrele(dst_ill); 2694 2695 ASSERT(ire != NULL); 2696 ASSERT(ire_uinfo != NULL); 2697 2698 if (IN6_IS_ADDR_UNSPECIFIED(&ire->ire_src_addr_v6) || 2699 IN6_IS_ADDR_MULTICAST(&ire->ire_addr_v6)) { 2700 /* 2701 * ire->ire_mp is non null when ire_mp passed in is used 2702 * ire->ire_mp is set in ip_bind_insert_ire[_v6](). 2703 */ 2704 if (ire->ire_mp == NULL) 2705 ire_refrele(ire); 2706 if (sire != NULL) 2707 ire_refrele(sire); 2708 return (0); 2709 } 2710 2711 if (IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip6h->ip6_src)) { 2712 in6_addr_t src_addr; 2713 2714 /* 2715 * ip_bind_connected_v6() has stored the correct source 2716 * address per IPv6 addr. selection policy in 2717 * conn_src_v6. 2718 */ 2719 src_addr = tcp->tcp_connp->conn_srcv6; 2720 2721 tcp->tcp_ip6h->ip6_src = src_addr; 2722 /* 2723 * Copy of the src addr. in tcp_t is needed 2724 * for the lookup funcs. 2725 */ 2726 tcp->tcp_ip_src_v6 = src_addr; 2727 ASSERT(IN6_ARE_ADDR_EQUAL(&tcp->tcp_ip6h->ip6_src, 2728 &connp->conn_srcv6)); 2729 } 2730 tcp->tcp_localnet = 2731 IN6_IS_ADDR_UNSPECIFIED(&ire->ire_gateway_addr_v6); 2732 } 2733 2734 /* 2735 * This allows applications to fail quickly when connections are made 2736 * to dead hosts. Hosts can be labeled dead by adding a reject route 2737 * with both the RTF_REJECT and RTF_PRIVATE flags set. 2738 */ 2739 if ((ire->ire_flags & RTF_REJECT) && 2740 (ire->ire_flags & RTF_PRIVATE)) 2741 goto error; 2742 2743 /* 2744 * Make use of the cached rtt and rtt_sd values to calculate the 2745 * initial RTO. Note that they are already initialized in 2746 * tcp_init_values(). 2747 * If ire_uinfo is NULL, i.e., we do not have a cache ire for 2748 * IP_NEXTHOP, but instead are using the interface ire for the 2749 * nexthop, then we do not use the ire_uinfo from that ire to 2750 * do any initializations. 2751 */ 2752 if (ire_uinfo != NULL) { 2753 if (ire_uinfo->iulp_rtt != 0) { 2754 clock_t rto; 2755 2756 tcp->tcp_rtt_sa = ire_uinfo->iulp_rtt; 2757 tcp->tcp_rtt_sd = ire_uinfo->iulp_rtt_sd; 2758 rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 2759 tcps->tcps_rexmit_interval_extra + 2760 (tcp->tcp_rtt_sa >> 5); 2761 2762 if (rto > tcps->tcps_rexmit_interval_max) { 2763 tcp->tcp_rto = tcps->tcps_rexmit_interval_max; 2764 } else if (rto < tcps->tcps_rexmit_interval_min) { 2765 tcp->tcp_rto = tcps->tcps_rexmit_interval_min; 2766 } else { 2767 tcp->tcp_rto = rto; 2768 } 2769 } 2770 if (ire_uinfo->iulp_ssthresh != 0) 2771 tcp->tcp_cwnd_ssthresh = ire_uinfo->iulp_ssthresh; 2772 else 2773 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN; 2774 if (ire_uinfo->iulp_spipe > 0) { 2775 tcp->tcp_xmit_hiwater = MIN(ire_uinfo->iulp_spipe, 2776 tcps->tcps_max_buf); 2777 if (tcps->tcps_snd_lowat_fraction != 0) 2778 tcp->tcp_xmit_lowater = tcp->tcp_xmit_hiwater / 2779 tcps->tcps_snd_lowat_fraction; 2780 (void) tcp_maxpsz_set(tcp, B_TRUE); 2781 } 2782 /* 2783 * Note that up till now, acceptor always inherits receive 2784 * window from the listener. But if there is a metrics 2785 * associated with a host, we should use that instead of 2786 * inheriting it from listener. Thus we need to pass this 2787 * info back to the caller. 2788 */ 2789 if (ire_uinfo->iulp_rpipe > 0) { 2790 tcp->tcp_rwnd = MIN(ire_uinfo->iulp_rpipe, 2791 tcps->tcps_max_buf); 2792 } 2793 2794 if (ire_uinfo->iulp_rtomax > 0) { 2795 tcp->tcp_second_timer_threshold = 2796 ire_uinfo->iulp_rtomax; 2797 } 2798 2799 /* 2800 * Use the metric option settings, iulp_tstamp_ok and 2801 * iulp_wscale_ok, only for active open. What this means 2802 * is that if the other side uses timestamp or window 2803 * scale option, TCP will also use those options. That 2804 * is for passive open. If the application sets a 2805 * large window, window scale is enabled regardless of 2806 * the value in iulp_wscale_ok. This is the behavior 2807 * since 2.6. So we keep it. 2808 * The only case left in passive open processing is the 2809 * check for SACK. 2810 * For ECN, it should probably be like SACK. But the 2811 * current value is binary, so we treat it like the other 2812 * cases. The metric only controls active open.For passive 2813 * open, the ndd param, tcp_ecn_permitted, controls the 2814 * behavior. 2815 */ 2816 if (!tcp_detached) { 2817 /* 2818 * The if check means that the following can only 2819 * be turned on by the metrics only IRE, but not off. 2820 */ 2821 if (ire_uinfo->iulp_tstamp_ok) 2822 tcp->tcp_snd_ts_ok = B_TRUE; 2823 if (ire_uinfo->iulp_wscale_ok) 2824 tcp->tcp_snd_ws_ok = B_TRUE; 2825 if (ire_uinfo->iulp_sack == 2) 2826 tcp->tcp_snd_sack_ok = B_TRUE; 2827 if (ire_uinfo->iulp_ecn_ok) 2828 tcp->tcp_ecn_ok = B_TRUE; 2829 } else { 2830 /* 2831 * Passive open. 2832 * 2833 * As above, the if check means that SACK can only be 2834 * turned on by the metric only IRE. 2835 */ 2836 if (ire_uinfo->iulp_sack > 0) { 2837 tcp->tcp_snd_sack_ok = B_TRUE; 2838 } 2839 } 2840 } 2841 2842 2843 /* 2844 * XXX: Note that currently, ire_max_frag can be as small as 68 2845 * because of PMTUd. So tcp_mss may go to negative if combined 2846 * length of all those options exceeds 28 bytes. But because 2847 * of the tcp_mss_min check below, we may not have a problem if 2848 * tcp_mss_min is of a reasonable value. The default is 1 so 2849 * the negative problem still exists. And the check defeats PMTUd. 2850 * In fact, if PMTUd finds that the MSS should be smaller than 2851 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min 2852 * value. 2853 * 2854 * We do not deal with that now. All those problems related to 2855 * PMTUd will be fixed later. 2856 */ 2857 ASSERT(ire->ire_max_frag != 0); 2858 mss = tcp->tcp_if_mtu = ire->ire_max_frag; 2859 if (tcp->tcp_ipp_fields & IPPF_USE_MIN_MTU) { 2860 if (tcp->tcp_ipp_use_min_mtu == IPV6_USE_MIN_MTU_NEVER) { 2861 mss = MIN(mss, IPV6_MIN_MTU); 2862 } 2863 } 2864 2865 /* Sanity check for MSS value. */ 2866 if (tcp->tcp_ipversion == IPV4_VERSION) 2867 mss_max = tcps->tcps_mss_max_ipv4; 2868 else 2869 mss_max = tcps->tcps_mss_max_ipv6; 2870 2871 if (tcp->tcp_ipversion == IPV6_VERSION && 2872 (ire->ire_frag_flag & IPH_FRAG_HDR)) { 2873 /* 2874 * After receiving an ICMPv6 "packet too big" message with a 2875 * MTU < 1280, and for multirouted IPv6 packets, the IP layer 2876 * will insert a 8-byte fragment header in every packet; we 2877 * reduce the MSS by that amount here. 2878 */ 2879 mss -= sizeof (ip6_frag_t); 2880 } 2881 2882 if (tcp->tcp_ipsec_overhead == 0) 2883 tcp->tcp_ipsec_overhead = conn_ipsec_length(connp); 2884 2885 mss -= tcp->tcp_ipsec_overhead; 2886 2887 if (mss < tcps->tcps_mss_min) 2888 mss = tcps->tcps_mss_min; 2889 if (mss > mss_max) 2890 mss = mss_max; 2891 2892 /* Note that this is the maximum MSS, excluding all options. */ 2893 tcp->tcp_mss = mss; 2894 2895 /* 2896 * Initialize the ISS here now that we have the full connection ID. 2897 * The RFC 1948 method of initial sequence number generation requires 2898 * knowledge of the full connection ID before setting the ISS. 2899 */ 2900 2901 tcp_iss_init(tcp); 2902 2903 if (ire->ire_type & (IRE_LOOPBACK | IRE_LOCAL)) 2904 tcp->tcp_loopback = B_TRUE; 2905 2906 if (tcp->tcp_ipversion == IPV4_VERSION) { 2907 hsp = tcp_hsp_lookup(tcp->tcp_remote, tcps); 2908 } else { 2909 hsp = tcp_hsp_lookup_ipv6(&tcp->tcp_remote_v6, tcps); 2910 } 2911 2912 if (hsp != NULL) { 2913 /* Only modify if we're going to make them bigger */ 2914 if (hsp->tcp_hsp_sendspace > tcp->tcp_xmit_hiwater) { 2915 tcp->tcp_xmit_hiwater = hsp->tcp_hsp_sendspace; 2916 if (tcps->tcps_snd_lowat_fraction != 0) 2917 tcp->tcp_xmit_lowater = tcp->tcp_xmit_hiwater / 2918 tcps->tcps_snd_lowat_fraction; 2919 } 2920 2921 if (hsp->tcp_hsp_recvspace > tcp->tcp_rwnd) { 2922 tcp->tcp_rwnd = hsp->tcp_hsp_recvspace; 2923 } 2924 2925 /* Copy timestamp flag only for active open */ 2926 if (!tcp_detached) 2927 tcp->tcp_snd_ts_ok = hsp->tcp_hsp_tstamp; 2928 } 2929 2930 if (sire != NULL) 2931 IRE_REFRELE(sire); 2932 2933 /* 2934 * If we got an IRE_CACHE and an ILL, go through their properties; 2935 * otherwise, this is deferred until later when we have an IRE_CACHE. 2936 */ 2937 if (tcp->tcp_loopback || 2938 (ire_cacheable && (ill = ire_to_ill(ire)) != NULL)) { 2939 /* 2940 * For incoming, see if this tcp may be MDT-capable. For 2941 * outgoing, this process has been taken care of through 2942 * tcp_rput_other. 2943 */ 2944 tcp_ire_ill_check(tcp, ire, ill, incoming); 2945 tcp->tcp_ire_ill_check_done = B_TRUE; 2946 } 2947 2948 mutex_enter(&connp->conn_lock); 2949 /* 2950 * Make sure that conn is not marked incipient 2951 * for incoming connections. A blind 2952 * removal of incipient flag is cheaper than 2953 * check and removal. 2954 */ 2955 connp->conn_state_flags &= ~CONN_INCIPIENT; 2956 2957 /* 2958 * Must not cache forwarding table routes 2959 * or recache an IRE after the conn_t has 2960 * had conn_ire_cache cleared and is flagged 2961 * unusable, (see the CONN_CACHE_IRE() macro). 2962 */ 2963 if (ire_cacheable && CONN_CACHE_IRE(connp)) { 2964 rw_enter(&ire->ire_bucket->irb_lock, RW_READER); 2965 if (!(ire->ire_marks & IRE_MARK_CONDEMNED)) { 2966 connp->conn_ire_cache = ire; 2967 IRE_UNTRACE_REF(ire); 2968 rw_exit(&ire->ire_bucket->irb_lock); 2969 mutex_exit(&connp->conn_lock); 2970 return (1); 2971 } 2972 rw_exit(&ire->ire_bucket->irb_lock); 2973 } 2974 mutex_exit(&connp->conn_lock); 2975 2976 if (ire->ire_mp == NULL) 2977 ire_refrele(ire); 2978 return (1); 2979 2980 error: 2981 if (ire->ire_mp == NULL) 2982 ire_refrele(ire); 2983 if (sire != NULL) 2984 ire_refrele(sire); 2985 return (0); 2986 } 2987 2988 /* 2989 * tcp_bind is called (holding the writer lock) by tcp_wput_proto to process a 2990 * O_T_BIND_REQ/T_BIND_REQ message. 2991 */ 2992 static void 2993 tcp_bind(tcp_t *tcp, mblk_t *mp) 2994 { 2995 sin_t *sin; 2996 sin6_t *sin6; 2997 mblk_t *mp1; 2998 in_port_t requested_port; 2999 in_port_t allocated_port; 3000 struct T_bind_req *tbr; 3001 boolean_t bind_to_req_port_only; 3002 boolean_t backlog_update = B_FALSE; 3003 boolean_t user_specified; 3004 in6_addr_t v6addr; 3005 ipaddr_t v4addr; 3006 uint_t origipversion; 3007 int err; 3008 queue_t *q = tcp->tcp_wq; 3009 conn_t *connp = tcp->tcp_connp; 3010 mlp_type_t addrtype, mlptype; 3011 zone_t *zone; 3012 cred_t *cr; 3013 in_port_t mlp_port; 3014 tcp_stack_t *tcps = tcp->tcp_tcps; 3015 3016 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 3017 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tbr)) { 3018 if (tcp->tcp_debug) { 3019 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 3020 "tcp_bind: bad req, len %u", 3021 (uint_t)(mp->b_wptr - mp->b_rptr)); 3022 } 3023 tcp_err_ack(tcp, mp, TPROTO, 0); 3024 return; 3025 } 3026 /* Make sure the largest address fits */ 3027 mp1 = reallocb(mp, sizeof (struct T_bind_ack) + sizeof (sin6_t) + 1, 1); 3028 if (mp1 == NULL) { 3029 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 3030 return; 3031 } 3032 mp = mp1; 3033 tbr = (struct T_bind_req *)mp->b_rptr; 3034 if (tcp->tcp_state >= TCPS_BOUND) { 3035 if ((tcp->tcp_state == TCPS_BOUND || 3036 tcp->tcp_state == TCPS_LISTEN) && 3037 tcp->tcp_conn_req_max != tbr->CONIND_number && 3038 tbr->CONIND_number > 0) { 3039 /* 3040 * Handle listen() increasing CONIND_number. 3041 * This is more "liberal" then what the TPI spec 3042 * requires but is needed to avoid a t_unbind 3043 * when handling listen() since the port number 3044 * might be "stolen" between the unbind and bind. 3045 */ 3046 backlog_update = B_TRUE; 3047 goto do_bind; 3048 } 3049 if (tcp->tcp_debug) { 3050 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 3051 "tcp_bind: bad state, %d", tcp->tcp_state); 3052 } 3053 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 3054 return; 3055 } 3056 origipversion = tcp->tcp_ipversion; 3057 3058 switch (tbr->ADDR_length) { 3059 case 0: /* request for a generic port */ 3060 tbr->ADDR_offset = sizeof (struct T_bind_req); 3061 if (tcp->tcp_family == AF_INET) { 3062 tbr->ADDR_length = sizeof (sin_t); 3063 sin = (sin_t *)&tbr[1]; 3064 *sin = sin_null; 3065 sin->sin_family = AF_INET; 3066 mp->b_wptr = (uchar_t *)&sin[1]; 3067 tcp->tcp_ipversion = IPV4_VERSION; 3068 IN6_IPADDR_TO_V4MAPPED(INADDR_ANY, &v6addr); 3069 } else { 3070 ASSERT(tcp->tcp_family == AF_INET6); 3071 tbr->ADDR_length = sizeof (sin6_t); 3072 sin6 = (sin6_t *)&tbr[1]; 3073 *sin6 = sin6_null; 3074 sin6->sin6_family = AF_INET6; 3075 mp->b_wptr = (uchar_t *)&sin6[1]; 3076 tcp->tcp_ipversion = IPV6_VERSION; 3077 V6_SET_ZERO(v6addr); 3078 } 3079 requested_port = 0; 3080 break; 3081 3082 case sizeof (sin_t): /* Complete IPv4 address */ 3083 sin = (sin_t *)mi_offset_param(mp, tbr->ADDR_offset, 3084 sizeof (sin_t)); 3085 if (sin == NULL || !OK_32PTR((char *)sin)) { 3086 if (tcp->tcp_debug) { 3087 (void) strlog(TCP_MOD_ID, 0, 1, 3088 SL_ERROR|SL_TRACE, 3089 "tcp_bind: bad address parameter, " 3090 "offset %d, len %d", 3091 tbr->ADDR_offset, tbr->ADDR_length); 3092 } 3093 tcp_err_ack(tcp, mp, TPROTO, 0); 3094 return; 3095 } 3096 /* 3097 * With sockets sockfs will accept bogus sin_family in 3098 * bind() and replace it with the family used in the socket 3099 * call. 3100 */ 3101 if (sin->sin_family != AF_INET || 3102 tcp->tcp_family != AF_INET) { 3103 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 3104 return; 3105 } 3106 requested_port = ntohs(sin->sin_port); 3107 tcp->tcp_ipversion = IPV4_VERSION; 3108 v4addr = sin->sin_addr.s_addr; 3109 IN6_IPADDR_TO_V4MAPPED(v4addr, &v6addr); 3110 break; 3111 3112 case sizeof (sin6_t): /* Complete IPv6 address */ 3113 sin6 = (sin6_t *)mi_offset_param(mp, 3114 tbr->ADDR_offset, sizeof (sin6_t)); 3115 if (sin6 == NULL || !OK_32PTR((char *)sin6)) { 3116 if (tcp->tcp_debug) { 3117 (void) strlog(TCP_MOD_ID, 0, 1, 3118 SL_ERROR|SL_TRACE, 3119 "tcp_bind: bad IPv6 address parameter, " 3120 "offset %d, len %d", tbr->ADDR_offset, 3121 tbr->ADDR_length); 3122 } 3123 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 3124 return; 3125 } 3126 if (sin6->sin6_family != AF_INET6 || 3127 tcp->tcp_family != AF_INET6) { 3128 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 3129 return; 3130 } 3131 requested_port = ntohs(sin6->sin6_port); 3132 tcp->tcp_ipversion = IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr) ? 3133 IPV4_VERSION : IPV6_VERSION; 3134 v6addr = sin6->sin6_addr; 3135 break; 3136 3137 default: 3138 if (tcp->tcp_debug) { 3139 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 3140 "tcp_bind: bad address length, %d", 3141 tbr->ADDR_length); 3142 } 3143 tcp_err_ack(tcp, mp, TBADADDR, 0); 3144 return; 3145 } 3146 tcp->tcp_bound_source_v6 = v6addr; 3147 3148 /* Check for change in ipversion */ 3149 if (origipversion != tcp->tcp_ipversion) { 3150 ASSERT(tcp->tcp_family == AF_INET6); 3151 err = tcp->tcp_ipversion == IPV6_VERSION ? 3152 tcp_header_init_ipv6(tcp) : tcp_header_init_ipv4(tcp); 3153 if (err) { 3154 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 3155 return; 3156 } 3157 } 3158 3159 /* 3160 * Initialize family specific fields. Copy of the src addr. 3161 * in tcp_t is needed for the lookup funcs. 3162 */ 3163 if (tcp->tcp_ipversion == IPV6_VERSION) { 3164 tcp->tcp_ip6h->ip6_src = v6addr; 3165 } else { 3166 IN6_V4MAPPED_TO_IPADDR(&v6addr, tcp->tcp_ipha->ipha_src); 3167 } 3168 tcp->tcp_ip_src_v6 = v6addr; 3169 3170 /* 3171 * For O_T_BIND_REQ: 3172 * Verify that the target port/addr is available, or choose 3173 * another. 3174 * For T_BIND_REQ: 3175 * Verify that the target port/addr is available or fail. 3176 * In both cases when it succeeds the tcp is inserted in the 3177 * bind hash table. This ensures that the operation is atomic 3178 * under the lock on the hash bucket. 3179 */ 3180 bind_to_req_port_only = requested_port != 0 && 3181 tbr->PRIM_type != O_T_BIND_REQ; 3182 /* 3183 * Get a valid port (within the anonymous range and should not 3184 * be a privileged one) to use if the user has not given a port. 3185 * If multiple threads are here, they may all start with 3186 * with the same initial port. But, it should be fine as long as 3187 * tcp_bindi will ensure that no two threads will be assigned 3188 * the same port. 3189 * 3190 * NOTE: XXX If a privileged process asks for an anonymous port, we 3191 * still check for ports only in the range > tcp_smallest_non_priv_port, 3192 * unless TCP_ANONPRIVBIND option is set. 3193 */ 3194 mlptype = mlptSingle; 3195 mlp_port = requested_port; 3196 if (requested_port == 0) { 3197 requested_port = tcp->tcp_anon_priv_bind ? 3198 tcp_get_next_priv_port(tcp) : 3199 tcp_update_next_port(tcps->tcps_next_port_to_try, 3200 tcp, B_TRUE); 3201 if (requested_port == 0) { 3202 tcp_err_ack(tcp, mp, TNOADDR, 0); 3203 return; 3204 } 3205 user_specified = B_FALSE; 3206 3207 /* 3208 * If the user went through one of the RPC interfaces to create 3209 * this socket and RPC is MLP in this zone, then give him an 3210 * anonymous MLP. 3211 */ 3212 cr = DB_CREDDEF(mp, tcp->tcp_cred); 3213 if (connp->conn_anon_mlp && is_system_labeled()) { 3214 zone = crgetzone(cr); 3215 addrtype = tsol_mlp_addr_type(zone->zone_id, 3216 IPV6_VERSION, &v6addr, 3217 tcps->tcps_netstack->netstack_ip); 3218 if (addrtype == mlptSingle) { 3219 tcp_err_ack(tcp, mp, TNOADDR, 0); 3220 return; 3221 } 3222 mlptype = tsol_mlp_port_type(zone, IPPROTO_TCP, 3223 PMAPPORT, addrtype); 3224 mlp_port = PMAPPORT; 3225 } 3226 } else { 3227 int i; 3228 boolean_t priv = B_FALSE; 3229 3230 /* 3231 * If the requested_port is in the well-known privileged range, 3232 * verify that the stream was opened by a privileged user. 3233 * Note: No locks are held when inspecting tcp_g_*epriv_ports 3234 * but instead the code relies on: 3235 * - the fact that the address of the array and its size never 3236 * changes 3237 * - the atomic assignment of the elements of the array 3238 */ 3239 cr = DB_CREDDEF(mp, tcp->tcp_cred); 3240 if (requested_port < tcps->tcps_smallest_nonpriv_port) { 3241 priv = B_TRUE; 3242 } else { 3243 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 3244 if (requested_port == 3245 tcps->tcps_g_epriv_ports[i]) { 3246 priv = B_TRUE; 3247 break; 3248 } 3249 } 3250 } 3251 if (priv) { 3252 if (secpolicy_net_privaddr(cr, requested_port, 3253 IPPROTO_TCP) != 0) { 3254 if (tcp->tcp_debug) { 3255 (void) strlog(TCP_MOD_ID, 0, 1, 3256 SL_ERROR|SL_TRACE, 3257 "tcp_bind: no priv for port %d", 3258 requested_port); 3259 } 3260 tcp_err_ack(tcp, mp, TACCES, 0); 3261 return; 3262 } 3263 } 3264 user_specified = B_TRUE; 3265 3266 if (is_system_labeled()) { 3267 zone = crgetzone(cr); 3268 addrtype = tsol_mlp_addr_type(zone->zone_id, 3269 IPV6_VERSION, &v6addr, 3270 tcps->tcps_netstack->netstack_ip); 3271 if (addrtype == mlptSingle) { 3272 tcp_err_ack(tcp, mp, TNOADDR, 0); 3273 return; 3274 } 3275 mlptype = tsol_mlp_port_type(zone, IPPROTO_TCP, 3276 requested_port, addrtype); 3277 } 3278 } 3279 3280 if (mlptype != mlptSingle) { 3281 if (secpolicy_net_bindmlp(cr) != 0) { 3282 if (tcp->tcp_debug) { 3283 (void) strlog(TCP_MOD_ID, 0, 1, 3284 SL_ERROR|SL_TRACE, 3285 "tcp_bind: no priv for multilevel port %d", 3286 requested_port); 3287 } 3288 tcp_err_ack(tcp, mp, TACCES, 0); 3289 return; 3290 } 3291 3292 /* 3293 * If we're specifically binding a shared IP address and the 3294 * port is MLP on shared addresses, then check to see if this 3295 * zone actually owns the MLP. Reject if not. 3296 */ 3297 if (mlptype == mlptShared && addrtype == mlptShared) { 3298 /* 3299 * No need to handle exclusive-stack zones since 3300 * ALL_ZONES only applies to the shared stack. 3301 */ 3302 zoneid_t mlpzone; 3303 3304 mlpzone = tsol_mlp_findzone(IPPROTO_TCP, 3305 htons(mlp_port)); 3306 if (connp->conn_zoneid != mlpzone) { 3307 if (tcp->tcp_debug) { 3308 (void) strlog(TCP_MOD_ID, 0, 1, 3309 SL_ERROR|SL_TRACE, 3310 "tcp_bind: attempt to bind port " 3311 "%d on shared addr in zone %d " 3312 "(should be %d)", 3313 mlp_port, connp->conn_zoneid, 3314 mlpzone); 3315 } 3316 tcp_err_ack(tcp, mp, TACCES, 0); 3317 return; 3318 } 3319 } 3320 3321 if (!user_specified) { 3322 err = tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3323 requested_port, B_TRUE); 3324 if (err != 0) { 3325 if (tcp->tcp_debug) { 3326 (void) strlog(TCP_MOD_ID, 0, 1, 3327 SL_ERROR|SL_TRACE, 3328 "tcp_bind: cannot establish anon " 3329 "MLP for port %d", 3330 requested_port); 3331 } 3332 tcp_err_ack(tcp, mp, TSYSERR, err); 3333 return; 3334 } 3335 connp->conn_anon_port = B_TRUE; 3336 } 3337 connp->conn_mlp_type = mlptype; 3338 } 3339 3340 allocated_port = tcp_bindi(tcp, requested_port, &v6addr, 3341 tcp->tcp_reuseaddr, B_FALSE, bind_to_req_port_only, user_specified); 3342 3343 if (allocated_port == 0) { 3344 connp->conn_mlp_type = mlptSingle; 3345 if (connp->conn_anon_port) { 3346 connp->conn_anon_port = B_FALSE; 3347 (void) tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3348 requested_port, B_FALSE); 3349 } 3350 if (bind_to_req_port_only) { 3351 if (tcp->tcp_debug) { 3352 (void) strlog(TCP_MOD_ID, 0, 1, 3353 SL_ERROR|SL_TRACE, 3354 "tcp_bind: requested addr busy"); 3355 } 3356 tcp_err_ack(tcp, mp, TADDRBUSY, 0); 3357 } else { 3358 /* If we are out of ports, fail the bind. */ 3359 if (tcp->tcp_debug) { 3360 (void) strlog(TCP_MOD_ID, 0, 1, 3361 SL_ERROR|SL_TRACE, 3362 "tcp_bind: out of ports?"); 3363 } 3364 tcp_err_ack(tcp, mp, TNOADDR, 0); 3365 } 3366 return; 3367 } 3368 ASSERT(tcp->tcp_state == TCPS_BOUND); 3369 do_bind: 3370 if (!backlog_update) { 3371 if (tcp->tcp_family == AF_INET) 3372 sin->sin_port = htons(allocated_port); 3373 else 3374 sin6->sin6_port = htons(allocated_port); 3375 } 3376 if (tcp->tcp_family == AF_INET) { 3377 if (tbr->CONIND_number != 0) { 3378 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3379 sizeof (sin_t)); 3380 } else { 3381 /* Just verify the local IP address */ 3382 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, IP_ADDR_LEN); 3383 } 3384 } else { 3385 if (tbr->CONIND_number != 0) { 3386 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3387 sizeof (sin6_t)); 3388 } else { 3389 /* Just verify the local IP address */ 3390 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3391 IPV6_ADDR_LEN); 3392 } 3393 } 3394 if (mp1 == NULL) { 3395 if (connp->conn_anon_port) { 3396 connp->conn_anon_port = B_FALSE; 3397 (void) tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3398 requested_port, B_FALSE); 3399 } 3400 connp->conn_mlp_type = mlptSingle; 3401 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 3402 return; 3403 } 3404 3405 tbr->PRIM_type = T_BIND_ACK; 3406 mp->b_datap->db_type = M_PCPROTO; 3407 3408 /* Chain in the reply mp for tcp_rput() */ 3409 mp1->b_cont = mp; 3410 mp = mp1; 3411 3412 tcp->tcp_conn_req_max = tbr->CONIND_number; 3413 if (tcp->tcp_conn_req_max) { 3414 if (tcp->tcp_conn_req_max < tcps->tcps_conn_req_min) 3415 tcp->tcp_conn_req_max = tcps->tcps_conn_req_min; 3416 if (tcp->tcp_conn_req_max > tcps->tcps_conn_req_max_q) 3417 tcp->tcp_conn_req_max = tcps->tcps_conn_req_max_q; 3418 /* 3419 * If this is a listener, do not reset the eager list 3420 * and other stuffs. Note that we don't check if the 3421 * existing eager list meets the new tcp_conn_req_max 3422 * requirement. 3423 */ 3424 if (tcp->tcp_state != TCPS_LISTEN) { 3425 tcp->tcp_state = TCPS_LISTEN; 3426 /* Initialize the chain. Don't need the eager_lock */ 3427 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp; 3428 tcp->tcp_eager_next_drop_q0 = tcp; 3429 tcp->tcp_eager_prev_drop_q0 = tcp; 3430 tcp->tcp_second_ctimer_threshold = 3431 tcps->tcps_ip_abort_linterval; 3432 } 3433 } 3434 3435 /* 3436 * We can call ip_bind directly which returns a T_BIND_ACK mp. The 3437 * processing continues in tcp_rput_other(). 3438 * 3439 * We need to make sure that the conn_recv is set to a non-null 3440 * value before we insert the conn into the classifier table. 3441 * This is to avoid a race with an incoming packet which does an 3442 * ipcl_classify(). 3443 */ 3444 connp->conn_recv = tcp_conn_request; 3445 if (tcp->tcp_family == AF_INET6) { 3446 ASSERT(tcp->tcp_connp->conn_af_isv6); 3447 mp = ip_bind_v6(q, mp, tcp->tcp_connp, &tcp->tcp_sticky_ipp); 3448 } else { 3449 ASSERT(!tcp->tcp_connp->conn_af_isv6); 3450 mp = ip_bind_v4(q, mp, tcp->tcp_connp); 3451 } 3452 /* 3453 * If the bind cannot complete immediately 3454 * IP will arrange to call tcp_rput_other 3455 * when the bind completes. 3456 */ 3457 if (mp != NULL) { 3458 tcp_rput_other(tcp, mp); 3459 } else { 3460 /* 3461 * Bind will be resumed later. Need to ensure 3462 * that conn doesn't disappear when that happens. 3463 * This will be decremented in ip_resume_tcp_bind(). 3464 */ 3465 CONN_INC_REF(tcp->tcp_connp); 3466 } 3467 } 3468 3469 3470 /* 3471 * If the "bind_to_req_port_only" parameter is set, if the requested port 3472 * number is available, return it, If not return 0 3473 * 3474 * If "bind_to_req_port_only" parameter is not set and 3475 * If the requested port number is available, return it. If not, return 3476 * the first anonymous port we happen across. If no anonymous ports are 3477 * available, return 0. addr is the requested local address, if any. 3478 * 3479 * In either case, when succeeding update the tcp_t to record the port number 3480 * and insert it in the bind hash table. 3481 * 3482 * Note that TCP over IPv4 and IPv6 sockets can use the same port number 3483 * without setting SO_REUSEADDR. This is needed so that they 3484 * can be viewed as two independent transport protocols. 3485 */ 3486 static in_port_t 3487 tcp_bindi(tcp_t *tcp, in_port_t port, const in6_addr_t *laddr, 3488 int reuseaddr, boolean_t quick_connect, 3489 boolean_t bind_to_req_port_only, boolean_t user_specified) 3490 { 3491 /* number of times we have run around the loop */ 3492 int count = 0; 3493 /* maximum number of times to run around the loop */ 3494 int loopmax; 3495 conn_t *connp = tcp->tcp_connp; 3496 zoneid_t zoneid = connp->conn_zoneid; 3497 tcp_stack_t *tcps = tcp->tcp_tcps; 3498 3499 /* 3500 * Lookup for free addresses is done in a loop and "loopmax" 3501 * influences how long we spin in the loop 3502 */ 3503 if (bind_to_req_port_only) { 3504 /* 3505 * If the requested port is busy, don't bother to look 3506 * for a new one. Setting loop maximum count to 1 has 3507 * that effect. 3508 */ 3509 loopmax = 1; 3510 } else { 3511 /* 3512 * If the requested port is busy, look for a free one 3513 * in the anonymous port range. 3514 * Set loopmax appropriately so that one does not look 3515 * forever in the case all of the anonymous ports are in use. 3516 */ 3517 if (tcp->tcp_anon_priv_bind) { 3518 /* 3519 * loopmax = 3520 * (IPPORT_RESERVED-1) - tcp_min_anonpriv_port + 1 3521 */ 3522 loopmax = IPPORT_RESERVED - 3523 tcps->tcps_min_anonpriv_port; 3524 } else { 3525 loopmax = (tcps->tcps_largest_anon_port - 3526 tcps->tcps_smallest_anon_port + 1); 3527 } 3528 } 3529 do { 3530 uint16_t lport; 3531 tf_t *tbf; 3532 tcp_t *ltcp; 3533 conn_t *lconnp; 3534 3535 lport = htons(port); 3536 3537 /* 3538 * Ensure that the tcp_t is not currently in the bind hash. 3539 * Hold the lock on the hash bucket to ensure that 3540 * the duplicate check plus the insertion is an atomic 3541 * operation. 3542 * 3543 * This function does an inline lookup on the bind hash list 3544 * Make sure that we access only members of tcp_t 3545 * and that we don't look at tcp_tcp, since we are not 3546 * doing a CONN_INC_REF. 3547 */ 3548 tcp_bind_hash_remove(tcp); 3549 tbf = &tcps->tcps_bind_fanout[TCP_BIND_HASH(lport)]; 3550 mutex_enter(&tbf->tf_lock); 3551 for (ltcp = tbf->tf_tcp; ltcp != NULL; 3552 ltcp = ltcp->tcp_bind_hash) { 3553 boolean_t not_socket; 3554 boolean_t exclbind; 3555 3556 if (lport != ltcp->tcp_lport) 3557 continue; 3558 3559 lconnp = ltcp->tcp_connp; 3560 3561 /* 3562 * On a labeled system, we must treat bindings to ports 3563 * on shared IP addresses by sockets with MAC exemption 3564 * privilege as being in all zones, as there's 3565 * otherwise no way to identify the right receiver. 3566 */ 3567 if (!(IPCL_ZONE_MATCH(ltcp->tcp_connp, zoneid) || 3568 IPCL_ZONE_MATCH(connp, 3569 ltcp->tcp_connp->conn_zoneid)) && 3570 !lconnp->conn_mac_exempt && 3571 !connp->conn_mac_exempt) 3572 continue; 3573 3574 /* 3575 * If TCP_EXCLBIND is set for either the bound or 3576 * binding endpoint, the semantics of bind 3577 * is changed according to the following. 3578 * 3579 * spec = specified address (v4 or v6) 3580 * unspec = unspecified address (v4 or v6) 3581 * A = specified addresses are different for endpoints 3582 * 3583 * bound bind to allowed 3584 * ------------------------------------- 3585 * unspec unspec no 3586 * unspec spec no 3587 * spec unspec no 3588 * spec spec yes if A 3589 * 3590 * For labeled systems, SO_MAC_EXEMPT behaves the same 3591 * as TCP_EXCLBIND, except that zoneid is ignored. 3592 * 3593 * Note: 3594 * 3595 * 1. Because of TLI semantics, an endpoint can go 3596 * back from, say TCP_ESTABLISHED to TCPS_LISTEN or 3597 * TCPS_BOUND, depending on whether it is originally 3598 * a listener or not. That is why we need to check 3599 * for states greater than or equal to TCPS_BOUND 3600 * here. 3601 * 3602 * 2. Ideally, we should only check for state equals 3603 * to TCPS_LISTEN. And the following check should be 3604 * added. 3605 * 3606 * if (ltcp->tcp_state == TCPS_LISTEN || 3607 * !reuseaddr || !ltcp->tcp_reuseaddr) { 3608 * ... 3609 * } 3610 * 3611 * The semantics will be changed to this. If the 3612 * endpoint on the list is in state not equal to 3613 * TCPS_LISTEN and both endpoints have SO_REUSEADDR 3614 * set, let the bind succeed. 3615 * 3616 * Because of (1), we cannot do that for TLI 3617 * endpoints. But we can do that for socket endpoints. 3618 * If in future, we can change this going back 3619 * semantics, we can use the above check for TLI also. 3620 */ 3621 not_socket = !(TCP_IS_SOCKET(ltcp) && 3622 TCP_IS_SOCKET(tcp)); 3623 exclbind = ltcp->tcp_exclbind || tcp->tcp_exclbind; 3624 3625 if (lconnp->conn_mac_exempt || connp->conn_mac_exempt || 3626 (exclbind && (not_socket || 3627 ltcp->tcp_state <= TCPS_ESTABLISHED))) { 3628 if (V6_OR_V4_INADDR_ANY( 3629 ltcp->tcp_bound_source_v6) || 3630 V6_OR_V4_INADDR_ANY(*laddr) || 3631 IN6_ARE_ADDR_EQUAL(laddr, 3632 <cp->tcp_bound_source_v6)) { 3633 break; 3634 } 3635 continue; 3636 } 3637 3638 /* 3639 * Check ipversion to allow IPv4 and IPv6 sockets to 3640 * have disjoint port number spaces, if *_EXCLBIND 3641 * is not set and only if the application binds to a 3642 * specific port. We use the same autoassigned port 3643 * number space for IPv4 and IPv6 sockets. 3644 */ 3645 if (tcp->tcp_ipversion != ltcp->tcp_ipversion && 3646 bind_to_req_port_only) 3647 continue; 3648 3649 /* 3650 * Ideally, we should make sure that the source 3651 * address, remote address, and remote port in the 3652 * four tuple for this tcp-connection is unique. 3653 * However, trying to find out the local source 3654 * address would require too much code duplication 3655 * with IP, since IP needs needs to have that code 3656 * to support userland TCP implementations. 3657 */ 3658 if (quick_connect && 3659 (ltcp->tcp_state > TCPS_LISTEN) && 3660 ((tcp->tcp_fport != ltcp->tcp_fport) || 3661 !IN6_ARE_ADDR_EQUAL(&tcp->tcp_remote_v6, 3662 <cp->tcp_remote_v6))) 3663 continue; 3664 3665 if (!reuseaddr) { 3666 /* 3667 * No socket option SO_REUSEADDR. 3668 * If existing port is bound to 3669 * a non-wildcard IP address 3670 * and the requesting stream is 3671 * bound to a distinct 3672 * different IP addresses 3673 * (non-wildcard, also), keep 3674 * going. 3675 */ 3676 if (!V6_OR_V4_INADDR_ANY(*laddr) && 3677 !V6_OR_V4_INADDR_ANY( 3678 ltcp->tcp_bound_source_v6) && 3679 !IN6_ARE_ADDR_EQUAL(laddr, 3680 <cp->tcp_bound_source_v6)) 3681 continue; 3682 if (ltcp->tcp_state >= TCPS_BOUND) { 3683 /* 3684 * This port is being used and 3685 * its state is >= TCPS_BOUND, 3686 * so we can't bind to it. 3687 */ 3688 break; 3689 } 3690 } else { 3691 /* 3692 * socket option SO_REUSEADDR is set on the 3693 * binding tcp_t. 3694 * 3695 * If two streams are bound to 3696 * same IP address or both addr 3697 * and bound source are wildcards 3698 * (INADDR_ANY), we want to stop 3699 * searching. 3700 * We have found a match of IP source 3701 * address and source port, which is 3702 * refused regardless of the 3703 * SO_REUSEADDR setting, so we break. 3704 */ 3705 if (IN6_ARE_ADDR_EQUAL(laddr, 3706 <cp->tcp_bound_source_v6) && 3707 (ltcp->tcp_state == TCPS_LISTEN || 3708 ltcp->tcp_state == TCPS_BOUND)) 3709 break; 3710 } 3711 } 3712 if (ltcp != NULL) { 3713 /* The port number is busy */ 3714 mutex_exit(&tbf->tf_lock); 3715 } else { 3716 /* 3717 * This port is ours. Insert in fanout and mark as 3718 * bound to prevent others from getting the port 3719 * number. 3720 */ 3721 tcp->tcp_state = TCPS_BOUND; 3722 tcp->tcp_lport = htons(port); 3723 *(uint16_t *)tcp->tcp_tcph->th_lport = tcp->tcp_lport; 3724 3725 ASSERT(&tcps->tcps_bind_fanout[TCP_BIND_HASH( 3726 tcp->tcp_lport)] == tbf); 3727 tcp_bind_hash_insert(tbf, tcp, 1); 3728 3729 mutex_exit(&tbf->tf_lock); 3730 3731 /* 3732 * We don't want tcp_next_port_to_try to "inherit" 3733 * a port number supplied by the user in a bind. 3734 */ 3735 if (user_specified) 3736 return (port); 3737 3738 /* 3739 * This is the only place where tcp_next_port_to_try 3740 * is updated. After the update, it may or may not 3741 * be in the valid range. 3742 */ 3743 if (!tcp->tcp_anon_priv_bind) 3744 tcps->tcps_next_port_to_try = port + 1; 3745 return (port); 3746 } 3747 3748 if (tcp->tcp_anon_priv_bind) { 3749 port = tcp_get_next_priv_port(tcp); 3750 } else { 3751 if (count == 0 && user_specified) { 3752 /* 3753 * We may have to return an anonymous port. So 3754 * get one to start with. 3755 */ 3756 port = 3757 tcp_update_next_port( 3758 tcps->tcps_next_port_to_try, 3759 tcp, B_TRUE); 3760 user_specified = B_FALSE; 3761 } else { 3762 port = tcp_update_next_port(port + 1, tcp, 3763 B_FALSE); 3764 } 3765 } 3766 if (port == 0) 3767 break; 3768 3769 /* 3770 * Don't let this loop run forever in the case where 3771 * all of the anonymous ports are in use. 3772 */ 3773 } while (++count < loopmax); 3774 return (0); 3775 } 3776 3777 /* 3778 * tcp_clean_death / tcp_close_detached must not be called more than once 3779 * on a tcp. Thus every function that potentially calls tcp_clean_death 3780 * must check for the tcp state before calling tcp_clean_death. 3781 * Eg. tcp_input, tcp_rput_data, tcp_eager_kill, tcp_clean_death_wrapper, 3782 * tcp_timer_handler, all check for the tcp state. 3783 */ 3784 /* ARGSUSED */ 3785 void 3786 tcp_clean_death_wrapper(void *arg, mblk_t *mp, void *arg2) 3787 { 3788 tcp_t *tcp = ((conn_t *)arg)->conn_tcp; 3789 3790 freemsg(mp); 3791 if (tcp->tcp_state > TCPS_BOUND) 3792 (void) tcp_clean_death(((conn_t *)arg)->conn_tcp, 3793 ETIMEDOUT, 5); 3794 } 3795 3796 /* 3797 * We are dying for some reason. Try to do it gracefully. (May be called 3798 * as writer.) 3799 * 3800 * Return -1 if the structure was not cleaned up (if the cleanup had to be 3801 * done by a service procedure). 3802 * TBD - Should the return value distinguish between the tcp_t being 3803 * freed and it being reinitialized? 3804 */ 3805 static int 3806 tcp_clean_death(tcp_t *tcp, int err, uint8_t tag) 3807 { 3808 mblk_t *mp; 3809 queue_t *q; 3810 tcp_stack_t *tcps = tcp->tcp_tcps; 3811 3812 TCP_CLD_STAT(tag); 3813 3814 #if TCP_TAG_CLEAN_DEATH 3815 tcp->tcp_cleandeathtag = tag; 3816 #endif 3817 3818 if (tcp->tcp_fused) 3819 tcp_unfuse(tcp); 3820 3821 if (tcp->tcp_linger_tid != 0 && 3822 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) { 3823 tcp_stop_lingering(tcp); 3824 } 3825 3826 ASSERT(tcp != NULL); 3827 ASSERT((tcp->tcp_family == AF_INET && 3828 tcp->tcp_ipversion == IPV4_VERSION) || 3829 (tcp->tcp_family == AF_INET6 && 3830 (tcp->tcp_ipversion == IPV4_VERSION || 3831 tcp->tcp_ipversion == IPV6_VERSION))); 3832 3833 if (TCP_IS_DETACHED(tcp)) { 3834 if (tcp->tcp_hard_binding) { 3835 /* 3836 * Its an eager that we are dealing with. We close the 3837 * eager but in case a conn_ind has already gone to the 3838 * listener, let tcp_accept_finish() send a discon_ind 3839 * to the listener and drop the last reference. If the 3840 * listener doesn't even know about the eager i.e. the 3841 * conn_ind hasn't gone up, blow away the eager and drop 3842 * the last reference as well. If the conn_ind has gone 3843 * up, state should be BOUND. tcp_accept_finish 3844 * will figure out that the connection has received a 3845 * RST and will send a DISCON_IND to the application. 3846 */ 3847 tcp_closei_local(tcp); 3848 if (!tcp->tcp_tconnind_started) { 3849 CONN_DEC_REF(tcp->tcp_connp); 3850 } else { 3851 tcp->tcp_state = TCPS_BOUND; 3852 } 3853 } else { 3854 tcp_close_detached(tcp); 3855 } 3856 return (0); 3857 } 3858 3859 TCP_STAT(tcps, tcp_clean_death_nondetached); 3860 3861 /* 3862 * If T_ORDREL_IND has not been sent yet (done when service routine 3863 * is run) postpone cleaning up the endpoint until service routine 3864 * has sent up the T_ORDREL_IND. Avoid clearing out an existing 3865 * client_errno since tcp_close uses the client_errno field. 3866 */ 3867 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 3868 if (err != 0) 3869 tcp->tcp_client_errno = err; 3870 3871 tcp->tcp_deferred_clean_death = B_TRUE; 3872 return (-1); 3873 } 3874 3875 q = tcp->tcp_rq; 3876 3877 /* Trash all inbound data */ 3878 flushq(q, FLUSHALL); 3879 3880 /* 3881 * If we are at least part way open and there is error 3882 * (err==0 implies no error) 3883 * notify our client by a T_DISCON_IND. 3884 */ 3885 if ((tcp->tcp_state >= TCPS_SYN_SENT) && err) { 3886 if (tcp->tcp_state >= TCPS_ESTABLISHED && 3887 !TCP_IS_SOCKET(tcp)) { 3888 /* 3889 * Send M_FLUSH according to TPI. Because sockets will 3890 * (and must) ignore FLUSHR we do that only for TPI 3891 * endpoints and sockets in STREAMS mode. 3892 */ 3893 (void) putnextctl1(q, M_FLUSH, FLUSHR); 3894 } 3895 if (tcp->tcp_debug) { 3896 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 3897 "tcp_clean_death: discon err %d", err); 3898 } 3899 mp = mi_tpi_discon_ind(NULL, err, 0); 3900 if (mp != NULL) { 3901 putnext(q, mp); 3902 } else { 3903 if (tcp->tcp_debug) { 3904 (void) strlog(TCP_MOD_ID, 0, 1, 3905 SL_ERROR|SL_TRACE, 3906 "tcp_clean_death, sending M_ERROR"); 3907 } 3908 (void) putnextctl1(q, M_ERROR, EPROTO); 3909 } 3910 if (tcp->tcp_state <= TCPS_SYN_RCVD) { 3911 /* SYN_SENT or SYN_RCVD */ 3912 BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails); 3913 } else if (tcp->tcp_state <= TCPS_CLOSE_WAIT) { 3914 /* ESTABLISHED or CLOSE_WAIT */ 3915 BUMP_MIB(&tcps->tcps_mib, tcpEstabResets); 3916 } 3917 } 3918 3919 tcp_reinit(tcp); 3920 return (-1); 3921 } 3922 3923 /* 3924 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout 3925 * to expire, stop the wait and finish the close. 3926 */ 3927 static void 3928 tcp_stop_lingering(tcp_t *tcp) 3929 { 3930 clock_t delta = 0; 3931 tcp_stack_t *tcps = tcp->tcp_tcps; 3932 3933 tcp->tcp_linger_tid = 0; 3934 if (tcp->tcp_state > TCPS_LISTEN) { 3935 tcp_acceptor_hash_remove(tcp); 3936 mutex_enter(&tcp->tcp_non_sq_lock); 3937 if (tcp->tcp_flow_stopped) { 3938 tcp_clrqfull(tcp); 3939 } 3940 mutex_exit(&tcp->tcp_non_sq_lock); 3941 3942 if (tcp->tcp_timer_tid != 0) { 3943 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 3944 tcp->tcp_timer_tid = 0; 3945 } 3946 /* 3947 * Need to cancel those timers which will not be used when 3948 * TCP is detached. This has to be done before the tcp_wq 3949 * is set to the global queue. 3950 */ 3951 tcp_timers_stop(tcp); 3952 3953 3954 tcp->tcp_detached = B_TRUE; 3955 ASSERT(tcps->tcps_g_q != NULL); 3956 tcp->tcp_rq = tcps->tcps_g_q; 3957 tcp->tcp_wq = WR(tcps->tcps_g_q); 3958 3959 if (tcp->tcp_state == TCPS_TIME_WAIT) { 3960 tcp_time_wait_append(tcp); 3961 TCP_DBGSTAT(tcps, tcp_detach_time_wait); 3962 goto finish; 3963 } 3964 3965 /* 3966 * If delta is zero the timer event wasn't executed and was 3967 * successfully canceled. In this case we need to restart it 3968 * with the minimal delta possible. 3969 */ 3970 if (delta >= 0) { 3971 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer, 3972 delta ? delta : 1); 3973 } 3974 } else { 3975 tcp_closei_local(tcp); 3976 CONN_DEC_REF(tcp->tcp_connp); 3977 } 3978 finish: 3979 /* Signal closing thread that it can complete close */ 3980 mutex_enter(&tcp->tcp_closelock); 3981 tcp->tcp_detached = B_TRUE; 3982 ASSERT(tcps->tcps_g_q != NULL); 3983 tcp->tcp_rq = tcps->tcps_g_q; 3984 tcp->tcp_wq = WR(tcps->tcps_g_q); 3985 tcp->tcp_closed = 1; 3986 cv_signal(&tcp->tcp_closecv); 3987 mutex_exit(&tcp->tcp_closelock); 3988 } 3989 3990 /* 3991 * Handle lingering timeouts. This function is called when the SO_LINGER timeout 3992 * expires. 3993 */ 3994 static void 3995 tcp_close_linger_timeout(void *arg) 3996 { 3997 conn_t *connp = (conn_t *)arg; 3998 tcp_t *tcp = connp->conn_tcp; 3999 4000 tcp->tcp_client_errno = ETIMEDOUT; 4001 tcp_stop_lingering(tcp); 4002 } 4003 4004 static int 4005 tcp_close(queue_t *q, int flags) 4006 { 4007 conn_t *connp = Q_TO_CONN(q); 4008 tcp_t *tcp = connp->conn_tcp; 4009 mblk_t *mp = &tcp->tcp_closemp; 4010 boolean_t conn_ioctl_cleanup_reqd = B_FALSE; 4011 mblk_t *bp; 4012 4013 ASSERT(WR(q)->q_next == NULL); 4014 ASSERT(connp->conn_ref >= 2); 4015 4016 /* 4017 * We are being closed as /dev/tcp or /dev/tcp6. 4018 * 4019 * Mark the conn as closing. ill_pending_mp_add will not 4020 * add any mp to the pending mp list, after this conn has 4021 * started closing. Same for sq_pending_mp_add 4022 */ 4023 mutex_enter(&connp->conn_lock); 4024 connp->conn_state_flags |= CONN_CLOSING; 4025 if (connp->conn_oper_pending_ill != NULL) 4026 conn_ioctl_cleanup_reqd = B_TRUE; 4027 CONN_INC_REF_LOCKED(connp); 4028 mutex_exit(&connp->conn_lock); 4029 tcp->tcp_closeflags = (uint8_t)flags; 4030 ASSERT(connp->conn_ref >= 3); 4031 4032 /* 4033 * tcp_closemp_used is used below without any protection of a lock 4034 * as we don't expect any one else to use it concurrently at this 4035 * point otherwise it would be a major defect. 4036 */ 4037 4038 if (mp->b_prev == NULL) 4039 tcp->tcp_closemp_used = B_TRUE; 4040 else 4041 cmn_err(CE_PANIC, "tcp_close: concurrent use of tcp_closemp: " 4042 "connp %p tcp %p\n", (void *)connp, (void *)tcp); 4043 4044 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15); 4045 4046 (*tcp_squeue_close_proc)(connp->conn_sqp, mp, 4047 tcp_close_output, connp, SQTAG_IP_TCP_CLOSE); 4048 4049 mutex_enter(&tcp->tcp_closelock); 4050 while (!tcp->tcp_closed) { 4051 if (!cv_wait_sig(&tcp->tcp_closecv, &tcp->tcp_closelock)) { 4052 /* 4053 * The cv_wait_sig() was interrupted. We now do the 4054 * following: 4055 * 4056 * 1) If the endpoint was lingering, we allow this 4057 * to be interrupted by cancelling the linger timeout 4058 * and closing normally. 4059 * 4060 * 2) Revert to calling cv_wait() 4061 * 4062 * We revert to using cv_wait() to avoid an 4063 * infinite loop which can occur if the calling 4064 * thread is higher priority than the squeue worker 4065 * thread and is bound to the same cpu. 4066 */ 4067 if (tcp->tcp_linger && tcp->tcp_lingertime > 0) { 4068 mutex_exit(&tcp->tcp_closelock); 4069 /* Entering squeue, bump ref count. */ 4070 CONN_INC_REF(connp); 4071 bp = allocb_wait(0, BPRI_HI, STR_NOSIG, NULL); 4072 squeue_enter(connp->conn_sqp, bp, 4073 tcp_linger_interrupted, connp, 4074 SQTAG_IP_TCP_CLOSE); 4075 mutex_enter(&tcp->tcp_closelock); 4076 } 4077 break; 4078 } 4079 } 4080 while (!tcp->tcp_closed) 4081 cv_wait(&tcp->tcp_closecv, &tcp->tcp_closelock); 4082 mutex_exit(&tcp->tcp_closelock); 4083 4084 /* 4085 * In the case of listener streams that have eagers in the q or q0 4086 * we wait for the eagers to drop their reference to us. tcp_rq and 4087 * tcp_wq of the eagers point to our queues. By waiting for the 4088 * refcnt to drop to 1, we are sure that the eagers have cleaned 4089 * up their queue pointers and also dropped their references to us. 4090 */ 4091 if (tcp->tcp_wait_for_eagers) { 4092 mutex_enter(&connp->conn_lock); 4093 while (connp->conn_ref != 1) { 4094 cv_wait(&connp->conn_cv, &connp->conn_lock); 4095 } 4096 mutex_exit(&connp->conn_lock); 4097 } 4098 /* 4099 * ioctl cleanup. The mp is queued in the 4100 * ill_pending_mp or in the sq_pending_mp. 4101 */ 4102 if (conn_ioctl_cleanup_reqd) 4103 conn_ioctl_cleanup(connp); 4104 4105 qprocsoff(q); 4106 inet_minor_free(connp->conn_minor_arena, connp->conn_dev); 4107 4108 tcp->tcp_cpid = -1; 4109 4110 /* 4111 * Drop IP's reference on the conn. This is the last reference 4112 * on the connp if the state was less than established. If the 4113 * connection has gone into timewait state, then we will have 4114 * one ref for the TCP and one more ref (total of two) for the 4115 * classifier connected hash list (a timewait connections stays 4116 * in connected hash till closed). 4117 * 4118 * We can't assert the references because there might be other 4119 * transient reference places because of some walkers or queued 4120 * packets in squeue for the timewait state. 4121 */ 4122 CONN_DEC_REF(connp); 4123 q->q_ptr = WR(q)->q_ptr = NULL; 4124 return (0); 4125 } 4126 4127 static int 4128 tcpclose_accept(queue_t *q) 4129 { 4130 vmem_t *minor_arena; 4131 dev_t conn_dev; 4132 4133 ASSERT(WR(q)->q_qinfo == &tcp_acceptor_winit); 4134 4135 /* 4136 * We had opened an acceptor STREAM for sockfs which is 4137 * now being closed due to some error. 4138 */ 4139 qprocsoff(q); 4140 4141 minor_arena = (vmem_t *)WR(q)->q_ptr; 4142 conn_dev = (dev_t)RD(q)->q_ptr; 4143 ASSERT(minor_arena != NULL); 4144 ASSERT(conn_dev != 0); 4145 inet_minor_free(minor_arena, conn_dev); 4146 q->q_ptr = WR(q)->q_ptr = NULL; 4147 return (0); 4148 } 4149 4150 /* 4151 * Called by tcp_close() routine via squeue when lingering is 4152 * interrupted by a signal. 4153 */ 4154 4155 /* ARGSUSED */ 4156 static void 4157 tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2) 4158 { 4159 conn_t *connp = (conn_t *)arg; 4160 tcp_t *tcp = connp->conn_tcp; 4161 4162 freeb(mp); 4163 if (tcp->tcp_linger_tid != 0 && 4164 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) { 4165 tcp_stop_lingering(tcp); 4166 tcp->tcp_client_errno = EINTR; 4167 } 4168 } 4169 4170 /* 4171 * Called by streams close routine via squeues when our client blows off her 4172 * descriptor, we take this to mean: "close the stream state NOW, close the tcp 4173 * connection politely" When SO_LINGER is set (with a non-zero linger time and 4174 * it is not a nonblocking socket) then this routine sleeps until the FIN is 4175 * acked. 4176 * 4177 * NOTE: tcp_close potentially returns error when lingering. 4178 * However, the stream head currently does not pass these errors 4179 * to the application. 4.4BSD only returns EINTR and EWOULDBLOCK 4180 * errors to the application (from tsleep()) and not errors 4181 * like ECONNRESET caused by receiving a reset packet. 4182 */ 4183 4184 /* ARGSUSED */ 4185 static void 4186 tcp_close_output(void *arg, mblk_t *mp, void *arg2) 4187 { 4188 char *msg; 4189 conn_t *connp = (conn_t *)arg; 4190 tcp_t *tcp = connp->conn_tcp; 4191 clock_t delta = 0; 4192 tcp_stack_t *tcps = tcp->tcp_tcps; 4193 4194 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 4195 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 4196 4197 /* Cancel any pending timeout */ 4198 if (tcp->tcp_ordrelid != 0) { 4199 if (tcp->tcp_timeout) { 4200 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ordrelid); 4201 } 4202 tcp->tcp_ordrelid = 0; 4203 tcp->tcp_timeout = B_FALSE; 4204 } 4205 4206 mutex_enter(&tcp->tcp_eager_lock); 4207 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) { 4208 /* Cleanup for listener */ 4209 tcp_eager_cleanup(tcp, 0); 4210 tcp->tcp_wait_for_eagers = 1; 4211 } 4212 mutex_exit(&tcp->tcp_eager_lock); 4213 4214 connp->conn_mdt_ok = B_FALSE; 4215 tcp->tcp_mdt = B_FALSE; 4216 4217 connp->conn_lso_ok = B_FALSE; 4218 tcp->tcp_lso = B_FALSE; 4219 4220 msg = NULL; 4221 switch (tcp->tcp_state) { 4222 case TCPS_CLOSED: 4223 case TCPS_IDLE: 4224 case TCPS_BOUND: 4225 case TCPS_LISTEN: 4226 break; 4227 case TCPS_SYN_SENT: 4228 msg = "tcp_close, during connect"; 4229 break; 4230 case TCPS_SYN_RCVD: 4231 /* 4232 * Close during the connect 3-way handshake 4233 * but here there may or may not be pending data 4234 * already on queue. Process almost same as in 4235 * the ESTABLISHED state. 4236 */ 4237 /* FALLTHRU */ 4238 default: 4239 if (tcp->tcp_fused) 4240 tcp_unfuse(tcp); 4241 4242 /* 4243 * If SO_LINGER has set a zero linger time, abort the 4244 * connection with a reset. 4245 */ 4246 if (tcp->tcp_linger && tcp->tcp_lingertime == 0) { 4247 msg = "tcp_close, zero lingertime"; 4248 break; 4249 } 4250 4251 ASSERT(tcp->tcp_hard_bound || tcp->tcp_hard_binding); 4252 /* 4253 * Abort connection if there is unread data queued. 4254 */ 4255 if (tcp->tcp_rcv_list || tcp->tcp_reass_head) { 4256 msg = "tcp_close, unread data"; 4257 break; 4258 } 4259 /* 4260 * tcp_hard_bound is now cleared thus all packets go through 4261 * tcp_lookup. This fact is used by tcp_detach below. 4262 * 4263 * We have done a qwait() above which could have possibly 4264 * drained more messages in turn causing transition to a 4265 * different state. Check whether we have to do the rest 4266 * of the processing or not. 4267 */ 4268 if (tcp->tcp_state <= TCPS_LISTEN) 4269 break; 4270 4271 /* 4272 * Transmit the FIN before detaching the tcp_t. 4273 * After tcp_detach returns this queue/perimeter 4274 * no longer owns the tcp_t thus others can modify it. 4275 */ 4276 (void) tcp_xmit_end(tcp); 4277 4278 /* 4279 * If lingering on close then wait until the fin is acked, 4280 * the SO_LINGER time passes, or a reset is sent/received. 4281 */ 4282 if (tcp->tcp_linger && tcp->tcp_lingertime > 0 && 4283 !(tcp->tcp_fin_acked) && 4284 tcp->tcp_state >= TCPS_ESTABLISHED) { 4285 if (tcp->tcp_closeflags & (FNDELAY|FNONBLOCK)) { 4286 tcp->tcp_client_errno = EWOULDBLOCK; 4287 } else if (tcp->tcp_client_errno == 0) { 4288 4289 ASSERT(tcp->tcp_linger_tid == 0); 4290 4291 tcp->tcp_linger_tid = TCP_TIMER(tcp, 4292 tcp_close_linger_timeout, 4293 tcp->tcp_lingertime * hz); 4294 4295 /* tcp_close_linger_timeout will finish close */ 4296 if (tcp->tcp_linger_tid == 0) 4297 tcp->tcp_client_errno = ENOSR; 4298 else 4299 return; 4300 } 4301 4302 /* 4303 * Check if we need to detach or just close 4304 * the instance. 4305 */ 4306 if (tcp->tcp_state <= TCPS_LISTEN) 4307 break; 4308 } 4309 4310 /* 4311 * Make sure that no other thread will access the tcp_rq of 4312 * this instance (through lookups etc.) as tcp_rq will go 4313 * away shortly. 4314 */ 4315 tcp_acceptor_hash_remove(tcp); 4316 4317 mutex_enter(&tcp->tcp_non_sq_lock); 4318 if (tcp->tcp_flow_stopped) { 4319 tcp_clrqfull(tcp); 4320 } 4321 mutex_exit(&tcp->tcp_non_sq_lock); 4322 4323 if (tcp->tcp_timer_tid != 0) { 4324 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 4325 tcp->tcp_timer_tid = 0; 4326 } 4327 /* 4328 * Need to cancel those timers which will not be used when 4329 * TCP is detached. This has to be done before the tcp_wq 4330 * is set to the global queue. 4331 */ 4332 tcp_timers_stop(tcp); 4333 4334 tcp->tcp_detached = B_TRUE; 4335 if (tcp->tcp_state == TCPS_TIME_WAIT) { 4336 tcp_time_wait_append(tcp); 4337 TCP_DBGSTAT(tcps, tcp_detach_time_wait); 4338 ASSERT(connp->conn_ref >= 3); 4339 goto finish; 4340 } 4341 4342 /* 4343 * If delta is zero the timer event wasn't executed and was 4344 * successfully canceled. In this case we need to restart it 4345 * with the minimal delta possible. 4346 */ 4347 if (delta >= 0) 4348 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer, 4349 delta ? delta : 1); 4350 4351 ASSERT(connp->conn_ref >= 3); 4352 goto finish; 4353 } 4354 4355 /* Detach did not complete. Still need to remove q from stream. */ 4356 if (msg) { 4357 if (tcp->tcp_state == TCPS_ESTABLISHED || 4358 tcp->tcp_state == TCPS_CLOSE_WAIT) 4359 BUMP_MIB(&tcps->tcps_mib, tcpEstabResets); 4360 if (tcp->tcp_state == TCPS_SYN_SENT || 4361 tcp->tcp_state == TCPS_SYN_RCVD) 4362 BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails); 4363 tcp_xmit_ctl(msg, tcp, tcp->tcp_snxt, 0, TH_RST); 4364 } 4365 4366 tcp_closei_local(tcp); 4367 CONN_DEC_REF(connp); 4368 ASSERT(connp->conn_ref >= 2); 4369 4370 finish: 4371 /* 4372 * Although packets are always processed on the correct 4373 * tcp's perimeter and access is serialized via squeue's, 4374 * IP still needs a queue when sending packets in time_wait 4375 * state so use WR(tcps_g_q) till ip_output() can be 4376 * changed to deal with just connp. For read side, we 4377 * could have set tcp_rq to NULL but there are some cases 4378 * in tcp_rput_data() from early days of this code which 4379 * do a putnext without checking if tcp is closed. Those 4380 * need to be identified before both tcp_rq and tcp_wq 4381 * can be set to NULL and tcps_g_q can disappear forever. 4382 */ 4383 mutex_enter(&tcp->tcp_closelock); 4384 /* 4385 * Don't change the queues in the case of a listener that has 4386 * eagers in its q or q0. It could surprise the eagers. 4387 * Instead wait for the eagers outside the squeue. 4388 */ 4389 if (!tcp->tcp_wait_for_eagers) { 4390 tcp->tcp_detached = B_TRUE; 4391 /* 4392 * When default queue is closing we set tcps_g_q to NULL 4393 * after the close is done. 4394 */ 4395 ASSERT(tcps->tcps_g_q != NULL); 4396 tcp->tcp_rq = tcps->tcps_g_q; 4397 tcp->tcp_wq = WR(tcps->tcps_g_q); 4398 } 4399 4400 /* Signal tcp_close() to finish closing. */ 4401 tcp->tcp_closed = 1; 4402 cv_signal(&tcp->tcp_closecv); 4403 mutex_exit(&tcp->tcp_closelock); 4404 } 4405 4406 4407 /* 4408 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp. 4409 * Some stream heads get upset if they see these later on as anything but NULL. 4410 */ 4411 static void 4412 tcp_close_mpp(mblk_t **mpp) 4413 { 4414 mblk_t *mp; 4415 4416 if ((mp = *mpp) != NULL) { 4417 do { 4418 mp->b_next = NULL; 4419 mp->b_prev = NULL; 4420 } while ((mp = mp->b_cont) != NULL); 4421 4422 mp = *mpp; 4423 *mpp = NULL; 4424 freemsg(mp); 4425 } 4426 } 4427 4428 /* Do detached close. */ 4429 static void 4430 tcp_close_detached(tcp_t *tcp) 4431 { 4432 if (tcp->tcp_fused) 4433 tcp_unfuse(tcp); 4434 4435 /* 4436 * Clustering code serializes TCP disconnect callbacks and 4437 * cluster tcp list walks by blocking a TCP disconnect callback 4438 * if a cluster tcp list walk is in progress. This ensures 4439 * accurate accounting of TCPs in the cluster code even though 4440 * the TCP list walk itself is not atomic. 4441 */ 4442 tcp_closei_local(tcp); 4443 CONN_DEC_REF(tcp->tcp_connp); 4444 } 4445 4446 /* 4447 * Stop all TCP timers, and free the timer mblks if requested. 4448 */ 4449 void 4450 tcp_timers_stop(tcp_t *tcp) 4451 { 4452 if (tcp->tcp_timer_tid != 0) { 4453 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 4454 tcp->tcp_timer_tid = 0; 4455 } 4456 if (tcp->tcp_ka_tid != 0) { 4457 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ka_tid); 4458 tcp->tcp_ka_tid = 0; 4459 } 4460 if (tcp->tcp_ack_tid != 0) { 4461 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 4462 tcp->tcp_ack_tid = 0; 4463 } 4464 if (tcp->tcp_push_tid != 0) { 4465 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 4466 tcp->tcp_push_tid = 0; 4467 } 4468 } 4469 4470 /* 4471 * The tcp_t is going away. Remove it from all lists and set it 4472 * to TCPS_CLOSED. The freeing up of memory is deferred until 4473 * tcp_inactive. This is needed since a thread in tcp_rput might have 4474 * done a CONN_INC_REF on this structure before it was removed from the 4475 * hashes. 4476 */ 4477 static void 4478 tcp_closei_local(tcp_t *tcp) 4479 { 4480 ire_t *ire; 4481 conn_t *connp = tcp->tcp_connp; 4482 tcp_stack_t *tcps = tcp->tcp_tcps; 4483 4484 if (!TCP_IS_SOCKET(tcp)) 4485 tcp_acceptor_hash_remove(tcp); 4486 4487 UPDATE_MIB(&tcps->tcps_mib, tcpHCInSegs, tcp->tcp_ibsegs); 4488 tcp->tcp_ibsegs = 0; 4489 UPDATE_MIB(&tcps->tcps_mib, tcpHCOutSegs, tcp->tcp_obsegs); 4490 tcp->tcp_obsegs = 0; 4491 4492 /* 4493 * If we are an eager connection hanging off a listener that 4494 * hasn't formally accepted the connection yet, get off his 4495 * list and blow off any data that we have accumulated. 4496 */ 4497 if (tcp->tcp_listener != NULL) { 4498 tcp_t *listener = tcp->tcp_listener; 4499 mutex_enter(&listener->tcp_eager_lock); 4500 /* 4501 * tcp_tconnind_started == B_TRUE means that the 4502 * conn_ind has already gone to listener. At 4503 * this point, eager will be closed but we 4504 * leave it in listeners eager list so that 4505 * if listener decides to close without doing 4506 * accept, we can clean this up. In tcp_wput_accept 4507 * we take care of the case of accept on closed 4508 * eager. 4509 */ 4510 if (!tcp->tcp_tconnind_started) { 4511 tcp_eager_unlink(tcp); 4512 mutex_exit(&listener->tcp_eager_lock); 4513 /* 4514 * We don't want to have any pointers to the 4515 * listener queue, after we have released our 4516 * reference on the listener 4517 */ 4518 ASSERT(tcps->tcps_g_q != NULL); 4519 tcp->tcp_rq = tcps->tcps_g_q; 4520 tcp->tcp_wq = WR(tcps->tcps_g_q); 4521 CONN_DEC_REF(listener->tcp_connp); 4522 } else { 4523 mutex_exit(&listener->tcp_eager_lock); 4524 } 4525 } 4526 4527 /* Stop all the timers */ 4528 tcp_timers_stop(tcp); 4529 4530 if (tcp->tcp_state == TCPS_LISTEN) { 4531 if (tcp->tcp_ip_addr_cache) { 4532 kmem_free((void *)tcp->tcp_ip_addr_cache, 4533 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 4534 tcp->tcp_ip_addr_cache = NULL; 4535 } 4536 } 4537 mutex_enter(&tcp->tcp_non_sq_lock); 4538 if (tcp->tcp_flow_stopped) 4539 tcp_clrqfull(tcp); 4540 mutex_exit(&tcp->tcp_non_sq_lock); 4541 4542 tcp_bind_hash_remove(tcp); 4543 /* 4544 * If the tcp_time_wait_collector (which runs outside the squeue) 4545 * is trying to remove this tcp from the time wait list, we will 4546 * block in tcp_time_wait_remove while trying to acquire the 4547 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also 4548 * requires the ipcl_hash_remove to be ordered after the 4549 * tcp_time_wait_remove for the refcnt checks to work correctly. 4550 */ 4551 if (tcp->tcp_state == TCPS_TIME_WAIT) 4552 (void) tcp_time_wait_remove(tcp, NULL); 4553 CL_INET_DISCONNECT(tcp); 4554 ipcl_hash_remove(connp); 4555 4556 /* 4557 * Delete the cached ire in conn_ire_cache and also mark 4558 * the conn as CONDEMNED 4559 */ 4560 mutex_enter(&connp->conn_lock); 4561 connp->conn_state_flags |= CONN_CONDEMNED; 4562 ire = connp->conn_ire_cache; 4563 connp->conn_ire_cache = NULL; 4564 mutex_exit(&connp->conn_lock); 4565 if (ire != NULL) 4566 IRE_REFRELE_NOTR(ire); 4567 4568 /* Need to cleanup any pending ioctls */ 4569 ASSERT(tcp->tcp_time_wait_next == NULL); 4570 ASSERT(tcp->tcp_time_wait_prev == NULL); 4571 ASSERT(tcp->tcp_time_wait_expire == 0); 4572 tcp->tcp_state = TCPS_CLOSED; 4573 4574 /* Release any SSL context */ 4575 if (tcp->tcp_kssl_ent != NULL) { 4576 kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); 4577 tcp->tcp_kssl_ent = NULL; 4578 } 4579 if (tcp->tcp_kssl_ctx != NULL) { 4580 kssl_release_ctx(tcp->tcp_kssl_ctx); 4581 tcp->tcp_kssl_ctx = NULL; 4582 } 4583 tcp->tcp_kssl_pending = B_FALSE; 4584 4585 tcp_ipsec_cleanup(tcp); 4586 } 4587 4588 /* 4589 * tcp is dying (called from ipcl_conn_destroy and error cases). 4590 * Free the tcp_t in either case. 4591 */ 4592 void 4593 tcp_free(tcp_t *tcp) 4594 { 4595 mblk_t *mp; 4596 ip6_pkt_t *ipp; 4597 4598 ASSERT(tcp != NULL); 4599 ASSERT(tcp->tcp_ptpahn == NULL && tcp->tcp_acceptor_hash == NULL); 4600 4601 tcp->tcp_rq = NULL; 4602 tcp->tcp_wq = NULL; 4603 4604 tcp_close_mpp(&tcp->tcp_xmit_head); 4605 tcp_close_mpp(&tcp->tcp_reass_head); 4606 if (tcp->tcp_rcv_list != NULL) { 4607 /* Free b_next chain */ 4608 tcp_close_mpp(&tcp->tcp_rcv_list); 4609 } 4610 if ((mp = tcp->tcp_urp_mp) != NULL) { 4611 freemsg(mp); 4612 } 4613 if ((mp = tcp->tcp_urp_mark_mp) != NULL) { 4614 freemsg(mp); 4615 } 4616 4617 if (tcp->tcp_fused_sigurg_mp != NULL) { 4618 freeb(tcp->tcp_fused_sigurg_mp); 4619 tcp->tcp_fused_sigurg_mp = NULL; 4620 } 4621 4622 if (tcp->tcp_sack_info != NULL) { 4623 if (tcp->tcp_notsack_list != NULL) { 4624 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 4625 } 4626 bzero(tcp->tcp_sack_info, sizeof (tcp_sack_info_t)); 4627 } 4628 4629 if (tcp->tcp_hopopts != NULL) { 4630 mi_free(tcp->tcp_hopopts); 4631 tcp->tcp_hopopts = NULL; 4632 tcp->tcp_hopoptslen = 0; 4633 } 4634 ASSERT(tcp->tcp_hopoptslen == 0); 4635 if (tcp->tcp_dstopts != NULL) { 4636 mi_free(tcp->tcp_dstopts); 4637 tcp->tcp_dstopts = NULL; 4638 tcp->tcp_dstoptslen = 0; 4639 } 4640 ASSERT(tcp->tcp_dstoptslen == 0); 4641 if (tcp->tcp_rtdstopts != NULL) { 4642 mi_free(tcp->tcp_rtdstopts); 4643 tcp->tcp_rtdstopts = NULL; 4644 tcp->tcp_rtdstoptslen = 0; 4645 } 4646 ASSERT(tcp->tcp_rtdstoptslen == 0); 4647 if (tcp->tcp_rthdr != NULL) { 4648 mi_free(tcp->tcp_rthdr); 4649 tcp->tcp_rthdr = NULL; 4650 tcp->tcp_rthdrlen = 0; 4651 } 4652 ASSERT(tcp->tcp_rthdrlen == 0); 4653 4654 ipp = &tcp->tcp_sticky_ipp; 4655 if (ipp->ipp_fields & (IPPF_HOPOPTS | IPPF_RTDSTOPTS | IPPF_DSTOPTS | 4656 IPPF_RTHDR)) 4657 ip6_pkt_free(ipp); 4658 4659 /* 4660 * Free memory associated with the tcp/ip header template. 4661 */ 4662 4663 if (tcp->tcp_iphc != NULL) 4664 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 4665 4666 /* 4667 * Following is really a blowing away a union. 4668 * It happens to have exactly two members of identical size 4669 * the following code is enough. 4670 */ 4671 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind); 4672 4673 if (tcp->tcp_tracebuf != NULL) { 4674 kmem_free(tcp->tcp_tracebuf, sizeof (tcptrch_t)); 4675 tcp->tcp_tracebuf = NULL; 4676 } 4677 } 4678 4679 4680 /* 4681 * Put a connection confirmation message upstream built from the 4682 * address information within 'iph' and 'tcph'. Report our success or failure. 4683 */ 4684 static boolean_t 4685 tcp_conn_con(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph, mblk_t *idmp, 4686 mblk_t **defermp) 4687 { 4688 sin_t sin; 4689 sin6_t sin6; 4690 mblk_t *mp; 4691 char *optp = NULL; 4692 int optlen = 0; 4693 cred_t *cr; 4694 4695 if (defermp != NULL) 4696 *defermp = NULL; 4697 4698 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) { 4699 /* 4700 * Return in T_CONN_CON results of option negotiation through 4701 * the T_CONN_REQ. Note: If there is an real end-to-end option 4702 * negotiation, then what is received from remote end needs 4703 * to be taken into account but there is no such thing (yet?) 4704 * in our TCP/IP. 4705 * Note: We do not use mi_offset_param() here as 4706 * tcp_opts_conn_req contents do not directly come from 4707 * an application and are either generated in kernel or 4708 * from user input that was already verified. 4709 */ 4710 mp = tcp->tcp_conn.tcp_opts_conn_req; 4711 optp = (char *)(mp->b_rptr + 4712 ((struct T_conn_req *)mp->b_rptr)->OPT_offset); 4713 optlen = (int) 4714 ((struct T_conn_req *)mp->b_rptr)->OPT_length; 4715 } 4716 4717 if (IPH_HDR_VERSION(iphdr) == IPV4_VERSION) { 4718 ipha_t *ipha = (ipha_t *)iphdr; 4719 4720 /* packet is IPv4 */ 4721 if (tcp->tcp_family == AF_INET) { 4722 sin = sin_null; 4723 sin.sin_addr.s_addr = ipha->ipha_src; 4724 sin.sin_port = *(uint16_t *)tcph->th_lport; 4725 sin.sin_family = AF_INET; 4726 mp = mi_tpi_conn_con(NULL, (char *)&sin, 4727 (int)sizeof (sin_t), optp, optlen); 4728 } else { 4729 sin6 = sin6_null; 4730 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &sin6.sin6_addr); 4731 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4732 sin6.sin6_family = AF_INET6; 4733 mp = mi_tpi_conn_con(NULL, (char *)&sin6, 4734 (int)sizeof (sin6_t), optp, optlen); 4735 4736 } 4737 } else { 4738 ip6_t *ip6h = (ip6_t *)iphdr; 4739 4740 ASSERT(IPH_HDR_VERSION(iphdr) == IPV6_VERSION); 4741 ASSERT(tcp->tcp_family == AF_INET6); 4742 sin6 = sin6_null; 4743 sin6.sin6_addr = ip6h->ip6_src; 4744 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4745 sin6.sin6_family = AF_INET6; 4746 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 4747 mp = mi_tpi_conn_con(NULL, (char *)&sin6, 4748 (int)sizeof (sin6_t), optp, optlen); 4749 } 4750 4751 if (!mp) 4752 return (B_FALSE); 4753 4754 if ((cr = DB_CRED(idmp)) != NULL) { 4755 mblk_setcred(mp, cr); 4756 DB_CPID(mp) = DB_CPID(idmp); 4757 } 4758 4759 if (defermp == NULL) 4760 putnext(tcp->tcp_rq, mp); 4761 else 4762 *defermp = mp; 4763 4764 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 4765 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 4766 return (B_TRUE); 4767 } 4768 4769 /* 4770 * Defense for the SYN attack - 4771 * 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest 4772 * one from the list of droppable eagers. This list is a subset of q0. 4773 * see comments before the definition of MAKE_DROPPABLE(). 4774 * 2. Don't drop a SYN request before its first timeout. This gives every 4775 * request at least til the first timeout to complete its 3-way handshake. 4776 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many 4777 * requests currently on the queue that has timed out. This will be used 4778 * as an indicator of whether an attack is under way, so that appropriate 4779 * actions can be taken. (It's incremented in tcp_timer() and decremented 4780 * either when eager goes into ESTABLISHED, or gets freed up.) 4781 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on 4782 * # of timeout drops back to <= q0len/32 => SYN alert off 4783 */ 4784 static boolean_t 4785 tcp_drop_q0(tcp_t *tcp) 4786 { 4787 tcp_t *eager; 4788 mblk_t *mp; 4789 tcp_stack_t *tcps = tcp->tcp_tcps; 4790 4791 ASSERT(MUTEX_HELD(&tcp->tcp_eager_lock)); 4792 ASSERT(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0); 4793 4794 /* Pick oldest eager from the list of droppable eagers */ 4795 eager = tcp->tcp_eager_prev_drop_q0; 4796 4797 /* If list is empty. return B_FALSE */ 4798 if (eager == tcp) { 4799 return (B_FALSE); 4800 } 4801 4802 /* If allocated, the mp will be freed in tcp_clean_death_wrapper() */ 4803 if ((mp = allocb(0, BPRI_HI)) == NULL) 4804 return (B_FALSE); 4805 4806 /* 4807 * Take this eager out from the list of droppable eagers since we are 4808 * going to drop it. 4809 */ 4810 MAKE_UNDROPPABLE(eager); 4811 4812 if (tcp->tcp_debug) { 4813 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 4814 "tcp_drop_q0: listen half-open queue (max=%d) overflow" 4815 " (%d pending) on %s, drop one", tcps->tcps_conn_req_max_q0, 4816 tcp->tcp_conn_req_cnt_q0, 4817 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4818 } 4819 4820 BUMP_MIB(&tcps->tcps_mib, tcpHalfOpenDrop); 4821 4822 /* Put a reference on the conn as we are enqueueing it in the sqeue */ 4823 CONN_INC_REF(eager->tcp_connp); 4824 4825 /* Mark the IRE created for this SYN request temporary */ 4826 tcp_ip_ire_mark_advice(eager); 4827 squeue_fill(eager->tcp_connp->conn_sqp, mp, 4828 tcp_clean_death_wrapper, eager->tcp_connp, SQTAG_TCP_DROP_Q0); 4829 4830 return (B_TRUE); 4831 } 4832 4833 int 4834 tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 4835 tcph_t *tcph, uint_t ipvers, mblk_t *idmp) 4836 { 4837 tcp_t *ltcp = lconnp->conn_tcp; 4838 tcp_t *tcp = connp->conn_tcp; 4839 mblk_t *tpi_mp; 4840 ipha_t *ipha; 4841 ip6_t *ip6h; 4842 sin6_t sin6; 4843 in6_addr_t v6dst; 4844 int err; 4845 int ifindex = 0; 4846 cred_t *cr; 4847 tcp_stack_t *tcps = tcp->tcp_tcps; 4848 4849 if (ipvers == IPV4_VERSION) { 4850 ipha = (ipha_t *)mp->b_rptr; 4851 4852 connp->conn_send = ip_output; 4853 connp->conn_recv = tcp_input; 4854 4855 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_srcv6); 4856 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_remv6); 4857 4858 sin6 = sin6_null; 4859 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &sin6.sin6_addr); 4860 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &v6dst); 4861 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4862 sin6.sin6_family = AF_INET6; 4863 sin6.__sin6_src_id = ip_srcid_find_addr(&v6dst, 4864 lconnp->conn_zoneid, tcps->tcps_netstack); 4865 if (tcp->tcp_recvdstaddr) { 4866 sin6_t sin6d; 4867 4868 sin6d = sin6_null; 4869 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, 4870 &sin6d.sin6_addr); 4871 sin6d.sin6_port = *(uint16_t *)tcph->th_fport; 4872 sin6d.sin6_family = AF_INET; 4873 tpi_mp = mi_tpi_extconn_ind(NULL, 4874 (char *)&sin6d, sizeof (sin6_t), 4875 (char *)&tcp, 4876 (t_scalar_t)sizeof (intptr_t), 4877 (char *)&sin6d, sizeof (sin6_t), 4878 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4879 } else { 4880 tpi_mp = mi_tpi_conn_ind(NULL, 4881 (char *)&sin6, sizeof (sin6_t), 4882 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4883 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4884 } 4885 } else { 4886 ip6h = (ip6_t *)mp->b_rptr; 4887 4888 connp->conn_send = ip_output_v6; 4889 connp->conn_recv = tcp_input; 4890 4891 connp->conn_srcv6 = ip6h->ip6_dst; 4892 connp->conn_remv6 = ip6h->ip6_src; 4893 4894 /* db_cksumstuff is set at ip_fanout_tcp_v6 */ 4895 ifindex = (int)DB_CKSUMSTUFF(mp); 4896 DB_CKSUMSTUFF(mp) = 0; 4897 4898 sin6 = sin6_null; 4899 sin6.sin6_addr = ip6h->ip6_src; 4900 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4901 sin6.sin6_family = AF_INET6; 4902 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 4903 sin6.__sin6_src_id = ip_srcid_find_addr(&ip6h->ip6_dst, 4904 lconnp->conn_zoneid, tcps->tcps_netstack); 4905 4906 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 4907 /* Pass up the scope_id of remote addr */ 4908 sin6.sin6_scope_id = ifindex; 4909 } else { 4910 sin6.sin6_scope_id = 0; 4911 } 4912 if (tcp->tcp_recvdstaddr) { 4913 sin6_t sin6d; 4914 4915 sin6d = sin6_null; 4916 sin6.sin6_addr = ip6h->ip6_dst; 4917 sin6d.sin6_port = *(uint16_t *)tcph->th_fport; 4918 sin6d.sin6_family = AF_INET; 4919 tpi_mp = mi_tpi_extconn_ind(NULL, 4920 (char *)&sin6d, sizeof (sin6_t), 4921 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4922 (char *)&sin6d, sizeof (sin6_t), 4923 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4924 } else { 4925 tpi_mp = mi_tpi_conn_ind(NULL, 4926 (char *)&sin6, sizeof (sin6_t), 4927 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4928 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4929 } 4930 } 4931 4932 if (tpi_mp == NULL) 4933 return (ENOMEM); 4934 4935 connp->conn_fport = *(uint16_t *)tcph->th_lport; 4936 connp->conn_lport = *(uint16_t *)tcph->th_fport; 4937 connp->conn_flags |= (IPCL_TCP6|IPCL_EAGER); 4938 connp->conn_fully_bound = B_FALSE; 4939 4940 if (tcps->tcps_trace) 4941 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_NOSLEEP); 4942 4943 /* Inherit information from the "parent" */ 4944 tcp->tcp_ipversion = ltcp->tcp_ipversion; 4945 tcp->tcp_family = ltcp->tcp_family; 4946 tcp->tcp_wq = ltcp->tcp_wq; 4947 tcp->tcp_rq = ltcp->tcp_rq; 4948 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 4949 tcp->tcp_detached = B_TRUE; 4950 if ((err = tcp_init_values(tcp)) != 0) { 4951 freemsg(tpi_mp); 4952 return (err); 4953 } 4954 4955 if (ipvers == IPV4_VERSION) { 4956 if ((err = tcp_header_init_ipv4(tcp)) != 0) { 4957 freemsg(tpi_mp); 4958 return (err); 4959 } 4960 ASSERT(tcp->tcp_ipha != NULL); 4961 } else { 4962 /* ifindex must be already set */ 4963 ASSERT(ifindex != 0); 4964 4965 if (ltcp->tcp_bound_if != 0) { 4966 /* 4967 * Set newtcp's bound_if equal to 4968 * listener's value. If ifindex is 4969 * not the same as ltcp->tcp_bound_if, 4970 * it must be a packet for the ipmp group 4971 * of interfaces 4972 */ 4973 tcp->tcp_bound_if = ltcp->tcp_bound_if; 4974 } else if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 4975 tcp->tcp_bound_if = ifindex; 4976 } 4977 4978 tcp->tcp_ipv6_recvancillary = ltcp->tcp_ipv6_recvancillary; 4979 tcp->tcp_recvifindex = 0; 4980 tcp->tcp_recvhops = 0xffffffffU; 4981 ASSERT(tcp->tcp_ip6h != NULL); 4982 } 4983 4984 tcp->tcp_lport = ltcp->tcp_lport; 4985 4986 if (ltcp->tcp_ipversion == tcp->tcp_ipversion) { 4987 if (tcp->tcp_iphc_len != ltcp->tcp_iphc_len) { 4988 /* 4989 * Listener had options of some sort; eager inherits. 4990 * Free up the eager template and allocate one 4991 * of the right size. 4992 */ 4993 if (tcp->tcp_hdr_grown) { 4994 kmem_free(tcp->tcp_iphc, tcp->tcp_iphc_len); 4995 } else { 4996 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 4997 kmem_cache_free(tcp_iphc_cache, tcp->tcp_iphc); 4998 } 4999 tcp->tcp_iphc = kmem_zalloc(ltcp->tcp_iphc_len, 5000 KM_NOSLEEP); 5001 if (tcp->tcp_iphc == NULL) { 5002 tcp->tcp_iphc_len = 0; 5003 freemsg(tpi_mp); 5004 return (ENOMEM); 5005 } 5006 tcp->tcp_iphc_len = ltcp->tcp_iphc_len; 5007 tcp->tcp_hdr_grown = B_TRUE; 5008 } 5009 tcp->tcp_hdr_len = ltcp->tcp_hdr_len; 5010 tcp->tcp_ip_hdr_len = ltcp->tcp_ip_hdr_len; 5011 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 5012 tcp->tcp_ip6_hops = ltcp->tcp_ip6_hops; 5013 tcp->tcp_ip6_vcf = ltcp->tcp_ip6_vcf; 5014 5015 /* 5016 * Copy the IP+TCP header template from listener to eager 5017 */ 5018 bcopy(ltcp->tcp_iphc, tcp->tcp_iphc, ltcp->tcp_hdr_len); 5019 if (tcp->tcp_ipversion == IPV6_VERSION) { 5020 if (((ip6i_t *)(tcp->tcp_iphc))->ip6i_nxt == 5021 IPPROTO_RAW) { 5022 tcp->tcp_ip6h = 5023 (ip6_t *)(tcp->tcp_iphc + 5024 sizeof (ip6i_t)); 5025 } else { 5026 tcp->tcp_ip6h = 5027 (ip6_t *)(tcp->tcp_iphc); 5028 } 5029 tcp->tcp_ipha = NULL; 5030 } else { 5031 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 5032 tcp->tcp_ip6h = NULL; 5033 } 5034 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + 5035 tcp->tcp_ip_hdr_len); 5036 } else { 5037 /* 5038 * only valid case when ipversion of listener and 5039 * eager differ is when listener is IPv6 and 5040 * eager is IPv4. 5041 * Eager header template has been initialized to the 5042 * maximum v4 header sizes, which includes space for 5043 * TCP and IP options. 5044 */ 5045 ASSERT((ltcp->tcp_ipversion == IPV6_VERSION) && 5046 (tcp->tcp_ipversion == IPV4_VERSION)); 5047 ASSERT(tcp->tcp_iphc_len >= 5048 TCP_MAX_COMBINED_HEADER_LENGTH); 5049 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 5050 /* copy IP header fields individually */ 5051 tcp->tcp_ipha->ipha_ttl = 5052 ltcp->tcp_ip6h->ip6_hops; 5053 bcopy(ltcp->tcp_tcph->th_lport, 5054 tcp->tcp_tcph->th_lport, sizeof (ushort_t)); 5055 } 5056 5057 bcopy(tcph->th_lport, tcp->tcp_tcph->th_fport, sizeof (in_port_t)); 5058 bcopy(tcp->tcp_tcph->th_fport, &tcp->tcp_fport, 5059 sizeof (in_port_t)); 5060 5061 if (ltcp->tcp_lport == 0) { 5062 tcp->tcp_lport = *(in_port_t *)tcph->th_fport; 5063 bcopy(tcph->th_fport, tcp->tcp_tcph->th_lport, 5064 sizeof (in_port_t)); 5065 } 5066 5067 if (tcp->tcp_ipversion == IPV4_VERSION) { 5068 ASSERT(ipha != NULL); 5069 tcp->tcp_ipha->ipha_dst = ipha->ipha_src; 5070 tcp->tcp_ipha->ipha_src = ipha->ipha_dst; 5071 5072 /* Source routing option copyover (reverse it) */ 5073 if (tcps->tcps_rev_src_routes) 5074 tcp_opt_reverse(tcp, ipha); 5075 } else { 5076 ASSERT(ip6h != NULL); 5077 tcp->tcp_ip6h->ip6_dst = ip6h->ip6_src; 5078 tcp->tcp_ip6h->ip6_src = ip6h->ip6_dst; 5079 } 5080 5081 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 5082 ASSERT(!tcp->tcp_tconnind_started); 5083 /* 5084 * If the SYN contains a credential, it's a loopback packet; attach 5085 * the credential to the TPI message. 5086 */ 5087 if ((cr = DB_CRED(idmp)) != NULL) { 5088 mblk_setcred(tpi_mp, cr); 5089 DB_CPID(tpi_mp) = DB_CPID(idmp); 5090 } 5091 tcp->tcp_conn.tcp_eager_conn_ind = tpi_mp; 5092 5093 /* Inherit the listener's SSL protection state */ 5094 5095 if ((tcp->tcp_kssl_ent = ltcp->tcp_kssl_ent) != NULL) { 5096 kssl_hold_ent(tcp->tcp_kssl_ent); 5097 tcp->tcp_kssl_pending = B_TRUE; 5098 } 5099 5100 return (0); 5101 } 5102 5103 5104 int 5105 tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, ipha_t *ipha, 5106 tcph_t *tcph, mblk_t *idmp) 5107 { 5108 tcp_t *ltcp = lconnp->conn_tcp; 5109 tcp_t *tcp = connp->conn_tcp; 5110 sin_t sin; 5111 mblk_t *tpi_mp = NULL; 5112 int err; 5113 cred_t *cr; 5114 tcp_stack_t *tcps = tcp->tcp_tcps; 5115 5116 sin = sin_null; 5117 sin.sin_addr.s_addr = ipha->ipha_src; 5118 sin.sin_port = *(uint16_t *)tcph->th_lport; 5119 sin.sin_family = AF_INET; 5120 if (ltcp->tcp_recvdstaddr) { 5121 sin_t sind; 5122 5123 sind = sin_null; 5124 sind.sin_addr.s_addr = ipha->ipha_dst; 5125 sind.sin_port = *(uint16_t *)tcph->th_fport; 5126 sind.sin_family = AF_INET; 5127 tpi_mp = mi_tpi_extconn_ind(NULL, 5128 (char *)&sind, sizeof (sin_t), (char *)&tcp, 5129 (t_scalar_t)sizeof (intptr_t), (char *)&sind, 5130 sizeof (sin_t), (t_scalar_t)ltcp->tcp_conn_req_seqnum); 5131 } else { 5132 tpi_mp = mi_tpi_conn_ind(NULL, 5133 (char *)&sin, sizeof (sin_t), 5134 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 5135 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 5136 } 5137 5138 if (tpi_mp == NULL) { 5139 return (ENOMEM); 5140 } 5141 5142 connp->conn_flags |= (IPCL_TCP4|IPCL_EAGER); 5143 connp->conn_send = ip_output; 5144 connp->conn_recv = tcp_input; 5145 connp->conn_fully_bound = B_FALSE; 5146 5147 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_srcv6); 5148 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_remv6); 5149 connp->conn_fport = *(uint16_t *)tcph->th_lport; 5150 connp->conn_lport = *(uint16_t *)tcph->th_fport; 5151 5152 if (tcps->tcps_trace) { 5153 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_NOSLEEP); 5154 } 5155 5156 /* Inherit information from the "parent" */ 5157 tcp->tcp_ipversion = ltcp->tcp_ipversion; 5158 tcp->tcp_family = ltcp->tcp_family; 5159 tcp->tcp_wq = ltcp->tcp_wq; 5160 tcp->tcp_rq = ltcp->tcp_rq; 5161 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 5162 tcp->tcp_detached = B_TRUE; 5163 if ((err = tcp_init_values(tcp)) != 0) { 5164 freemsg(tpi_mp); 5165 return (err); 5166 } 5167 5168 /* 5169 * Let's make sure that eager tcp template has enough space to 5170 * copy IPv4 listener's tcp template. Since the conn_t structure is 5171 * preserved and tcp_iphc_len is also preserved, an eager conn_t may 5172 * have a tcp_template of total len TCP_MAX_COMBINED_HEADER_LENGTH or 5173 * more (in case of re-allocation of conn_t with tcp-IPv6 template with 5174 * extension headers or with ip6i_t struct). Note that bcopy() below 5175 * copies listener tcp's hdr_len which cannot be greater than TCP_MAX_ 5176 * COMBINED_HEADER_LENGTH as this listener must be a IPv4 listener. 5177 */ 5178 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 5179 ASSERT(ltcp->tcp_hdr_len <= TCP_MAX_COMBINED_HEADER_LENGTH); 5180 5181 tcp->tcp_hdr_len = ltcp->tcp_hdr_len; 5182 tcp->tcp_ip_hdr_len = ltcp->tcp_ip_hdr_len; 5183 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 5184 tcp->tcp_ttl = ltcp->tcp_ttl; 5185 tcp->tcp_tos = ltcp->tcp_tos; 5186 5187 /* Copy the IP+TCP header template from listener to eager */ 5188 bcopy(ltcp->tcp_iphc, tcp->tcp_iphc, ltcp->tcp_hdr_len); 5189 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 5190 tcp->tcp_ip6h = NULL; 5191 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + 5192 tcp->tcp_ip_hdr_len); 5193 5194 /* Initialize the IP addresses and Ports */ 5195 tcp->tcp_ipha->ipha_dst = ipha->ipha_src; 5196 tcp->tcp_ipha->ipha_src = ipha->ipha_dst; 5197 bcopy(tcph->th_lport, tcp->tcp_tcph->th_fport, sizeof (in_port_t)); 5198 bcopy(tcph->th_fport, tcp->tcp_tcph->th_lport, sizeof (in_port_t)); 5199 5200 /* Source routing option copyover (reverse it) */ 5201 if (tcps->tcps_rev_src_routes) 5202 tcp_opt_reverse(tcp, ipha); 5203 5204 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 5205 ASSERT(!tcp->tcp_tconnind_started); 5206 5207 /* 5208 * If the SYN contains a credential, it's a loopback packet; attach 5209 * the credential to the TPI message. 5210 */ 5211 if ((cr = DB_CRED(idmp)) != NULL) { 5212 mblk_setcred(tpi_mp, cr); 5213 DB_CPID(tpi_mp) = DB_CPID(idmp); 5214 } 5215 tcp->tcp_conn.tcp_eager_conn_ind = tpi_mp; 5216 5217 /* Inherit the listener's SSL protection state */ 5218 if ((tcp->tcp_kssl_ent = ltcp->tcp_kssl_ent) != NULL) { 5219 kssl_hold_ent(tcp->tcp_kssl_ent); 5220 tcp->tcp_kssl_pending = B_TRUE; 5221 } 5222 5223 return (0); 5224 } 5225 5226 /* 5227 * sets up conn for ipsec. 5228 * if the first mblk is M_CTL it is consumed and mpp is updated. 5229 * in case of error mpp is freed. 5230 */ 5231 conn_t * 5232 tcp_get_ipsec_conn(tcp_t *tcp, squeue_t *sqp, mblk_t **mpp) 5233 { 5234 conn_t *connp = tcp->tcp_connp; 5235 conn_t *econnp; 5236 squeue_t *new_sqp; 5237 mblk_t *first_mp = *mpp; 5238 mblk_t *mp = *mpp; 5239 boolean_t mctl_present = B_FALSE; 5240 uint_t ipvers; 5241 5242 econnp = tcp_get_conn(sqp, tcp->tcp_tcps); 5243 if (econnp == NULL) { 5244 freemsg(first_mp); 5245 return (NULL); 5246 } 5247 if (DB_TYPE(mp) == M_CTL) { 5248 if (mp->b_cont == NULL || 5249 mp->b_cont->b_datap->db_type != M_DATA) { 5250 freemsg(first_mp); 5251 return (NULL); 5252 } 5253 mp = mp->b_cont; 5254 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) == 0) { 5255 freemsg(first_mp); 5256 return (NULL); 5257 } 5258 5259 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 5260 first_mp->b_datap->db_struioflag &= ~STRUIO_POLICY; 5261 mctl_present = B_TRUE; 5262 } else { 5263 ASSERT(mp->b_datap->db_struioflag & STRUIO_POLICY); 5264 mp->b_datap->db_struioflag &= ~STRUIO_POLICY; 5265 } 5266 5267 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 5268 DB_CKSUMSTART(mp) = 0; 5269 5270 ASSERT(OK_32PTR(mp->b_rptr)); 5271 ipvers = IPH_HDR_VERSION(mp->b_rptr); 5272 if (ipvers == IPV4_VERSION) { 5273 uint16_t *up; 5274 uint32_t ports; 5275 ipha_t *ipha; 5276 5277 ipha = (ipha_t *)mp->b_rptr; 5278 up = (uint16_t *)((uchar_t *)ipha + 5279 IPH_HDR_LENGTH(ipha) + TCP_PORTS_OFFSET); 5280 ports = *(uint32_t *)up; 5281 IPCL_TCP_EAGER_INIT(econnp, IPPROTO_TCP, 5282 ipha->ipha_dst, ipha->ipha_src, ports); 5283 } else { 5284 uint16_t *up; 5285 uint32_t ports; 5286 uint16_t ip_hdr_len; 5287 uint8_t *nexthdrp; 5288 ip6_t *ip6h; 5289 tcph_t *tcph; 5290 5291 ip6h = (ip6_t *)mp->b_rptr; 5292 if (ip6h->ip6_nxt == IPPROTO_TCP) { 5293 ip_hdr_len = IPV6_HDR_LEN; 5294 } else if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ip_hdr_len, 5295 &nexthdrp) || *nexthdrp != IPPROTO_TCP) { 5296 CONN_DEC_REF(econnp); 5297 freemsg(first_mp); 5298 return (NULL); 5299 } 5300 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5301 up = (uint16_t *)tcph->th_lport; 5302 ports = *(uint32_t *)up; 5303 IPCL_TCP_EAGER_INIT_V6(econnp, IPPROTO_TCP, 5304 ip6h->ip6_dst, ip6h->ip6_src, ports); 5305 } 5306 5307 /* 5308 * The caller already ensured that there is a sqp present. 5309 */ 5310 econnp->conn_sqp = new_sqp; 5311 5312 if (connp->conn_policy != NULL) { 5313 ipsec_in_t *ii; 5314 ii = (ipsec_in_t *)(first_mp->b_rptr); 5315 ASSERT(ii->ipsec_in_policy == NULL); 5316 IPPH_REFHOLD(connp->conn_policy); 5317 ii->ipsec_in_policy = connp->conn_policy; 5318 5319 first_mp->b_datap->db_type = IPSEC_POLICY_SET; 5320 if (!ip_bind_ipsec_policy_set(econnp, first_mp)) { 5321 CONN_DEC_REF(econnp); 5322 freemsg(first_mp); 5323 return (NULL); 5324 } 5325 } 5326 5327 if (ipsec_conn_cache_policy(econnp, ipvers == IPV4_VERSION) != 0) { 5328 CONN_DEC_REF(econnp); 5329 freemsg(first_mp); 5330 return (NULL); 5331 } 5332 5333 /* 5334 * If we know we have some policy, pass the "IPSEC" 5335 * options size TCP uses this adjust the MSS. 5336 */ 5337 econnp->conn_tcp->tcp_ipsec_overhead = conn_ipsec_length(econnp); 5338 if (mctl_present) { 5339 freeb(first_mp); 5340 *mpp = mp; 5341 } 5342 5343 return (econnp); 5344 } 5345 5346 /* 5347 * tcp_get_conn/tcp_free_conn 5348 * 5349 * tcp_get_conn is used to get a clean tcp connection structure. 5350 * It tries to reuse the connections put on the freelist by the 5351 * time_wait_collector failing which it goes to kmem_cache. This 5352 * way has two benefits compared to just allocating from and 5353 * freeing to kmem_cache. 5354 * 1) The time_wait_collector can free (which includes the cleanup) 5355 * outside the squeue. So when the interrupt comes, we have a clean 5356 * connection sitting in the freelist. Obviously, this buys us 5357 * performance. 5358 * 5359 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_conn_request 5360 * has multiple disadvantages - tying up the squeue during alloc, and the 5361 * fact that IPSec policy initialization has to happen here which 5362 * requires us sending a M_CTL and checking for it i.e. real ugliness. 5363 * But allocating the conn/tcp in IP land is also not the best since 5364 * we can't check the 'q' and 'q0' which are protected by squeue and 5365 * blindly allocate memory which might have to be freed here if we are 5366 * not allowed to accept the connection. By using the freelist and 5367 * putting the conn/tcp back in freelist, we don't pay a penalty for 5368 * allocating memory without checking 'q/q0' and freeing it if we can't 5369 * accept the connection. 5370 * 5371 * Care should be taken to put the conn back in the same squeue's freelist 5372 * from which it was allocated. Best results are obtained if conn is 5373 * allocated from listener's squeue and freed to the same. Time wait 5374 * collector will free up the freelist is the connection ends up sitting 5375 * there for too long. 5376 */ 5377 void * 5378 tcp_get_conn(void *arg, tcp_stack_t *tcps) 5379 { 5380 tcp_t *tcp = NULL; 5381 conn_t *connp = NULL; 5382 squeue_t *sqp = (squeue_t *)arg; 5383 tcp_squeue_priv_t *tcp_time_wait; 5384 netstack_t *ns; 5385 5386 tcp_time_wait = 5387 *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP)); 5388 5389 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 5390 tcp = tcp_time_wait->tcp_free_list; 5391 ASSERT((tcp != NULL) ^ (tcp_time_wait->tcp_free_list_cnt == 0)); 5392 if (tcp != NULL) { 5393 tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next; 5394 tcp_time_wait->tcp_free_list_cnt--; 5395 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 5396 tcp->tcp_time_wait_next = NULL; 5397 connp = tcp->tcp_connp; 5398 connp->conn_flags |= IPCL_REUSED; 5399 5400 ASSERT(tcp->tcp_tcps == NULL); 5401 ASSERT(connp->conn_netstack == NULL); 5402 ns = tcps->tcps_netstack; 5403 netstack_hold(ns); 5404 connp->conn_netstack = ns; 5405 tcp->tcp_tcps = tcps; 5406 TCPS_REFHOLD(tcps); 5407 ipcl_globalhash_insert(connp); 5408 return ((void *)connp); 5409 } 5410 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 5411 if ((connp = ipcl_conn_create(IPCL_TCPCONN, KM_NOSLEEP, 5412 tcps->tcps_netstack)) == NULL) 5413 return (NULL); 5414 tcp = connp->conn_tcp; 5415 tcp->tcp_tcps = tcps; 5416 TCPS_REFHOLD(tcps); 5417 return ((void *)connp); 5418 } 5419 5420 /* 5421 * Update the cached label for the given tcp_t. This should be called once per 5422 * connection, and before any packets are sent or tcp_process_options is 5423 * invoked. Returns B_FALSE if the correct label could not be constructed. 5424 */ 5425 static boolean_t 5426 tcp_update_label(tcp_t *tcp, const cred_t *cr) 5427 { 5428 conn_t *connp = tcp->tcp_connp; 5429 5430 if (tcp->tcp_ipversion == IPV4_VERSION) { 5431 uchar_t optbuf[IP_MAX_OPT_LENGTH]; 5432 int added; 5433 5434 if (tsol_compute_label(cr, tcp->tcp_remote, optbuf, 5435 connp->conn_mac_exempt, 5436 tcp->tcp_tcps->tcps_netstack->netstack_ip) != 0) 5437 return (B_FALSE); 5438 5439 added = tsol_remove_secopt(tcp->tcp_ipha, tcp->tcp_hdr_len); 5440 if (added == -1) 5441 return (B_FALSE); 5442 tcp->tcp_hdr_len += added; 5443 tcp->tcp_tcph = (tcph_t *)((uchar_t *)tcp->tcp_tcph + added); 5444 tcp->tcp_ip_hdr_len += added; 5445 if ((tcp->tcp_label_len = optbuf[IPOPT_OLEN]) != 0) { 5446 tcp->tcp_label_len = (tcp->tcp_label_len + 3) & ~3; 5447 added = tsol_prepend_option(optbuf, tcp->tcp_ipha, 5448 tcp->tcp_hdr_len); 5449 if (added == -1) 5450 return (B_FALSE); 5451 tcp->tcp_hdr_len += added; 5452 tcp->tcp_tcph = (tcph_t *) 5453 ((uchar_t *)tcp->tcp_tcph + added); 5454 tcp->tcp_ip_hdr_len += added; 5455 } 5456 } else { 5457 uchar_t optbuf[TSOL_MAX_IPV6_OPTION]; 5458 5459 if (tsol_compute_label_v6(cr, &tcp->tcp_remote_v6, optbuf, 5460 connp->conn_mac_exempt, 5461 tcp->tcp_tcps->tcps_netstack->netstack_ip) != 0) 5462 return (B_FALSE); 5463 if (tsol_update_sticky(&tcp->tcp_sticky_ipp, 5464 &tcp->tcp_label_len, optbuf) != 0) 5465 return (B_FALSE); 5466 if (tcp_build_hdrs(tcp->tcp_rq, tcp) != 0) 5467 return (B_FALSE); 5468 } 5469 5470 connp->conn_ulp_labeled = 1; 5471 5472 return (B_TRUE); 5473 } 5474 5475 /* BEGIN CSTYLED */ 5476 /* 5477 * 5478 * The sockfs ACCEPT path: 5479 * ======================= 5480 * 5481 * The eager is now established in its own perimeter as soon as SYN is 5482 * received in tcp_conn_request(). When sockfs receives conn_ind, it 5483 * completes the accept processing on the acceptor STREAM. The sending 5484 * of conn_ind part is common for both sockfs listener and a TLI/XTI 5485 * listener but a TLI/XTI listener completes the accept processing 5486 * on the listener perimeter. 5487 * 5488 * Common control flow for 3 way handshake: 5489 * ---------------------------------------- 5490 * 5491 * incoming SYN (listener perimeter) -> tcp_rput_data() 5492 * -> tcp_conn_request() 5493 * 5494 * incoming SYN-ACK-ACK (eager perim) -> tcp_rput_data() 5495 * send T_CONN_IND (listener perim) -> tcp_send_conn_ind() 5496 * 5497 * Sockfs ACCEPT Path: 5498 * ------------------- 5499 * 5500 * open acceptor stream (tcp_open allocates tcp_wput_accept() 5501 * as STREAM entry point) 5502 * 5503 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_wput_accept() 5504 * 5505 * tcp_wput_accept() extracts the eager and makes the q->q_ptr <-> eager 5506 * association (we are not behind eager's squeue but sockfs is protecting us 5507 * and no one knows about this stream yet. The STREAMS entry point q->q_info 5508 * is changed to point at tcp_wput(). 5509 * 5510 * tcp_wput_accept() sends any deferred eagers via tcp_send_pending() to 5511 * listener (done on listener's perimeter). 5512 * 5513 * tcp_wput_accept() calls tcp_accept_finish() on eagers perimeter to finish 5514 * accept. 5515 * 5516 * TLI/XTI client ACCEPT path: 5517 * --------------------------- 5518 * 5519 * soaccept() sends T_CONN_RES on the listener STREAM. 5520 * 5521 * tcp_accept() -> tcp_accept_swap() complete the processing and send 5522 * the bind_mp to eager perimeter to finish accept (tcp_rput_other()). 5523 * 5524 * Locks: 5525 * ====== 5526 * 5527 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and 5528 * and listeners->tcp_eager_next_q. 5529 * 5530 * Referencing: 5531 * ============ 5532 * 5533 * 1) We start out in tcp_conn_request by eager placing a ref on 5534 * listener and listener adding eager to listeners->tcp_eager_next_q0. 5535 * 5536 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before 5537 * doing so we place a ref on the eager. This ref is finally dropped at the 5538 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the 5539 * reference is dropped by the squeue framework. 5540 * 5541 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish 5542 * 5543 * The reference must be released by the same entity that added the reference 5544 * In the above scheme, the eager is the entity that adds and releases the 5545 * references. Note that tcp_accept_finish executes in the squeue of the eager 5546 * (albeit after it is attached to the acceptor stream). Though 1. executes 5547 * in the listener's squeue, the eager is nascent at this point and the 5548 * reference can be considered to have been added on behalf of the eager. 5549 * 5550 * Eager getting a Reset or listener closing: 5551 * ========================================== 5552 * 5553 * Once the listener and eager are linked, the listener never does the unlink. 5554 * If the listener needs to close, tcp_eager_cleanup() is called which queues 5555 * a message on all eager perimeter. The eager then does the unlink, clears 5556 * any pointers to the listener's queue and drops the reference to the 5557 * listener. The listener waits in tcp_close outside the squeue until its 5558 * refcount has dropped to 1. This ensures that the listener has waited for 5559 * all eagers to clear their association with the listener. 5560 * 5561 * Similarly, if eager decides to go away, it can unlink itself and close. 5562 * When the T_CONN_RES comes down, we check if eager has closed. Note that 5563 * the reference to eager is still valid because of the extra ref we put 5564 * in tcp_send_conn_ind. 5565 * 5566 * Listener can always locate the eager under the protection 5567 * of the listener->tcp_eager_lock, and then do a refhold 5568 * on the eager during the accept processing. 5569 * 5570 * The acceptor stream accesses the eager in the accept processing 5571 * based on the ref placed on eager before sending T_conn_ind. 5572 * The only entity that can negate this refhold is a listener close 5573 * which is mutually exclusive with an active acceptor stream. 5574 * 5575 * Eager's reference on the listener 5576 * =================================== 5577 * 5578 * If the accept happens (even on a closed eager) the eager drops its 5579 * reference on the listener at the start of tcp_accept_finish. If the 5580 * eager is killed due to an incoming RST before the T_conn_ind is sent up, 5581 * the reference is dropped in tcp_closei_local. If the listener closes, 5582 * the reference is dropped in tcp_eager_kill. In all cases the reference 5583 * is dropped while executing in the eager's context (squeue). 5584 */ 5585 /* END CSTYLED */ 5586 5587 /* Process the SYN packet, mp, directed at the listener 'tcp' */ 5588 5589 /* 5590 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN. 5591 * tcp_rput_data will not see any SYN packets. 5592 */ 5593 /* ARGSUSED */ 5594 void 5595 tcp_conn_request(void *arg, mblk_t *mp, void *arg2) 5596 { 5597 tcph_t *tcph; 5598 uint32_t seg_seq; 5599 tcp_t *eager; 5600 uint_t ipvers; 5601 ipha_t *ipha; 5602 ip6_t *ip6h; 5603 int err; 5604 conn_t *econnp = NULL; 5605 squeue_t *new_sqp; 5606 mblk_t *mp1; 5607 uint_t ip_hdr_len; 5608 conn_t *connp = (conn_t *)arg; 5609 tcp_t *tcp = connp->conn_tcp; 5610 cred_t *credp; 5611 tcp_stack_t *tcps = tcp->tcp_tcps; 5612 ip_stack_t *ipst; 5613 5614 if (tcp->tcp_state != TCPS_LISTEN) 5615 goto error2; 5616 5617 ASSERT((tcp->tcp_connp->conn_flags & IPCL_BOUND) != 0); 5618 5619 mutex_enter(&tcp->tcp_eager_lock); 5620 if (tcp->tcp_conn_req_cnt_q >= tcp->tcp_conn_req_max) { 5621 mutex_exit(&tcp->tcp_eager_lock); 5622 TCP_STAT(tcps, tcp_listendrop); 5623 BUMP_MIB(&tcps->tcps_mib, tcpListenDrop); 5624 if (tcp->tcp_debug) { 5625 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 5626 "tcp_conn_request: listen backlog (max=%d) " 5627 "overflow (%d pending) on %s", 5628 tcp->tcp_conn_req_max, tcp->tcp_conn_req_cnt_q, 5629 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 5630 } 5631 goto error2; 5632 } 5633 5634 if (tcp->tcp_conn_req_cnt_q0 >= 5635 tcp->tcp_conn_req_max + tcps->tcps_conn_req_max_q0) { 5636 /* 5637 * Q0 is full. Drop a pending half-open req from the queue 5638 * to make room for the new SYN req. Also mark the time we 5639 * drop a SYN. 5640 * 5641 * A more aggressive defense against SYN attack will 5642 * be to set the "tcp_syn_defense" flag now. 5643 */ 5644 TCP_STAT(tcps, tcp_listendropq0); 5645 tcp->tcp_last_rcv_lbolt = lbolt64; 5646 if (!tcp_drop_q0(tcp)) { 5647 mutex_exit(&tcp->tcp_eager_lock); 5648 BUMP_MIB(&tcps->tcps_mib, tcpListenDropQ0); 5649 if (tcp->tcp_debug) { 5650 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 5651 "tcp_conn_request: listen half-open queue " 5652 "(max=%d) full (%d pending) on %s", 5653 tcps->tcps_conn_req_max_q0, 5654 tcp->tcp_conn_req_cnt_q0, 5655 tcp_display(tcp, NULL, 5656 DISP_PORT_ONLY)); 5657 } 5658 goto error2; 5659 } 5660 } 5661 mutex_exit(&tcp->tcp_eager_lock); 5662 5663 /* 5664 * IP adds STRUIO_EAGER and ensures that the received packet is 5665 * M_DATA even if conn_ipv6_recvpktinfo is enabled or for ip6 5666 * link local address. If IPSec is enabled, db_struioflag has 5667 * STRUIO_POLICY set (mutually exclusive from STRUIO_EAGER); 5668 * otherwise an error case if neither of them is set. 5669 */ 5670 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 5671 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 5672 DB_CKSUMSTART(mp) = 0; 5673 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 5674 econnp = (conn_t *)tcp_get_conn(arg2, tcps); 5675 if (econnp == NULL) 5676 goto error2; 5677 ASSERT(econnp->conn_netstack == connp->conn_netstack); 5678 econnp->conn_sqp = new_sqp; 5679 } else if ((mp->b_datap->db_struioflag & STRUIO_POLICY) != 0) { 5680 /* 5681 * mp is updated in tcp_get_ipsec_conn(). 5682 */ 5683 econnp = tcp_get_ipsec_conn(tcp, arg2, &mp); 5684 if (econnp == NULL) { 5685 /* 5686 * mp freed by tcp_get_ipsec_conn. 5687 */ 5688 return; 5689 } 5690 ASSERT(econnp->conn_netstack == connp->conn_netstack); 5691 } else { 5692 goto error2; 5693 } 5694 5695 ASSERT(DB_TYPE(mp) == M_DATA); 5696 5697 ipvers = IPH_HDR_VERSION(mp->b_rptr); 5698 ASSERT(ipvers == IPV6_VERSION || ipvers == IPV4_VERSION); 5699 ASSERT(OK_32PTR(mp->b_rptr)); 5700 if (ipvers == IPV4_VERSION) { 5701 ipha = (ipha_t *)mp->b_rptr; 5702 ip_hdr_len = IPH_HDR_LENGTH(ipha); 5703 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5704 } else { 5705 ip6h = (ip6_t *)mp->b_rptr; 5706 ip_hdr_len = ip_hdr_length_v6(mp, ip6h); 5707 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5708 } 5709 5710 if (tcp->tcp_family == AF_INET) { 5711 ASSERT(ipvers == IPV4_VERSION); 5712 err = tcp_conn_create_v4(connp, econnp, ipha, tcph, mp); 5713 } else { 5714 err = tcp_conn_create_v6(connp, econnp, mp, tcph, ipvers, mp); 5715 } 5716 5717 if (err) 5718 goto error3; 5719 5720 eager = econnp->conn_tcp; 5721 5722 /* Inherit various TCP parameters from the listener */ 5723 eager->tcp_naglim = tcp->tcp_naglim; 5724 eager->tcp_first_timer_threshold = 5725 tcp->tcp_first_timer_threshold; 5726 eager->tcp_second_timer_threshold = 5727 tcp->tcp_second_timer_threshold; 5728 5729 eager->tcp_first_ctimer_threshold = 5730 tcp->tcp_first_ctimer_threshold; 5731 eager->tcp_second_ctimer_threshold = 5732 tcp->tcp_second_ctimer_threshold; 5733 5734 /* 5735 * tcp_adapt_ire() may change tcp_rwnd according to the ire metrics. 5736 * If it does not, the eager's receive window will be set to the 5737 * listener's receive window later in this function. 5738 */ 5739 eager->tcp_rwnd = 0; 5740 5741 /* 5742 * Inherit listener's tcp_init_cwnd. Need to do this before 5743 * calling tcp_process_options() where tcp_mss_set() is called 5744 * to set the initial cwnd. 5745 */ 5746 eager->tcp_init_cwnd = tcp->tcp_init_cwnd; 5747 5748 /* 5749 * Zones: tcp_adapt_ire() and tcp_send_data() both need the 5750 * zone id before the accept is completed in tcp_wput_accept(). 5751 */ 5752 econnp->conn_zoneid = connp->conn_zoneid; 5753 econnp->conn_allzones = connp->conn_allzones; 5754 5755 /* Copy nexthop information from listener to eager */ 5756 if (connp->conn_nexthop_set) { 5757 econnp->conn_nexthop_set = connp->conn_nexthop_set; 5758 econnp->conn_nexthop_v4 = connp->conn_nexthop_v4; 5759 } 5760 5761 /* 5762 * TSOL: tsol_input_proc() needs the eager's cred before the 5763 * eager is accepted 5764 */ 5765 econnp->conn_cred = eager->tcp_cred = credp = connp->conn_cred; 5766 crhold(credp); 5767 5768 /* 5769 * If the caller has the process-wide flag set, then default to MAC 5770 * exempt mode. This allows read-down to unlabeled hosts. 5771 */ 5772 if (getpflags(NET_MAC_AWARE, credp) != 0) 5773 econnp->conn_mac_exempt = B_TRUE; 5774 5775 if (is_system_labeled()) { 5776 cred_t *cr; 5777 5778 if (connp->conn_mlp_type != mlptSingle) { 5779 cr = econnp->conn_peercred = DB_CRED(mp); 5780 if (cr != NULL) 5781 crhold(cr); 5782 else 5783 cr = econnp->conn_cred; 5784 DTRACE_PROBE2(mlp_syn_accept, conn_t *, 5785 econnp, cred_t *, cr) 5786 } else { 5787 cr = econnp->conn_cred; 5788 DTRACE_PROBE2(syn_accept, conn_t *, 5789 econnp, cred_t *, cr) 5790 } 5791 5792 if (!tcp_update_label(eager, cr)) { 5793 DTRACE_PROBE3( 5794 tx__ip__log__error__connrequest__tcp, 5795 char *, "eager connp(1) label on SYN mp(2) failed", 5796 conn_t *, econnp, mblk_t *, mp); 5797 goto error3; 5798 } 5799 } 5800 5801 eager->tcp_hard_binding = B_TRUE; 5802 5803 tcp_bind_hash_insert(&tcps->tcps_bind_fanout[ 5804 TCP_BIND_HASH(eager->tcp_lport)], eager, 0); 5805 5806 CL_INET_CONNECT(eager); 5807 5808 /* 5809 * No need to check for multicast destination since ip will only pass 5810 * up multicasts to those that have expressed interest 5811 * TODO: what about rejecting broadcasts? 5812 * Also check that source is not a multicast or broadcast address. 5813 */ 5814 eager->tcp_state = TCPS_SYN_RCVD; 5815 5816 5817 /* 5818 * There should be no ire in the mp as we are being called after 5819 * receiving the SYN. 5820 */ 5821 ASSERT(tcp_ire_mp(mp) == NULL); 5822 5823 /* 5824 * Adapt our mss, ttl, ... according to information provided in IRE. 5825 */ 5826 5827 if (tcp_adapt_ire(eager, NULL) == 0) { 5828 /* Undo the bind_hash_insert */ 5829 tcp_bind_hash_remove(eager); 5830 goto error3; 5831 } 5832 5833 /* Process all TCP options. */ 5834 tcp_process_options(eager, tcph); 5835 5836 /* Is the other end ECN capable? */ 5837 if (tcps->tcps_ecn_permitted >= 1 && 5838 (tcph->th_flags[0] & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) { 5839 eager->tcp_ecn_ok = B_TRUE; 5840 } 5841 5842 /* 5843 * listener->tcp_rq->q_hiwat should be the default window size or a 5844 * window size changed via SO_RCVBUF option. First round up the 5845 * eager's tcp_rwnd to the nearest MSS. Then find out the window 5846 * scale option value if needed. Call tcp_rwnd_set() to finish the 5847 * setting. 5848 * 5849 * Note if there is a rpipe metric associated with the remote host, 5850 * we should not inherit receive window size from listener. 5851 */ 5852 eager->tcp_rwnd = MSS_ROUNDUP( 5853 (eager->tcp_rwnd == 0 ? tcp->tcp_rq->q_hiwat : 5854 eager->tcp_rwnd), eager->tcp_mss); 5855 if (eager->tcp_snd_ws_ok) 5856 tcp_set_ws_value(eager); 5857 /* 5858 * Note that this is the only place tcp_rwnd_set() is called for 5859 * accepting a connection. We need to call it here instead of 5860 * after the 3-way handshake because we need to tell the other 5861 * side our rwnd in the SYN-ACK segment. 5862 */ 5863 (void) tcp_rwnd_set(eager, eager->tcp_rwnd); 5864 5865 /* 5866 * We eliminate the need for sockfs to send down a T_SVR4_OPTMGMT_REQ 5867 * via soaccept()->soinheritoptions() which essentially applies 5868 * all the listener options to the new STREAM. The options that we 5869 * need to take care of are: 5870 * SO_DEBUG, SO_REUSEADDR, SO_KEEPALIVE, SO_DONTROUTE, SO_BROADCAST, 5871 * SO_USELOOPBACK, SO_OOBINLINE, SO_DGRAM_ERRIND, SO_LINGER, 5872 * SO_SNDBUF, SO_RCVBUF. 5873 * 5874 * SO_RCVBUF: tcp_rwnd_set() above takes care of it. 5875 * SO_SNDBUF: Set the tcp_xmit_hiwater for the eager. When 5876 * tcp_maxpsz_set() gets called later from 5877 * tcp_accept_finish(), the option takes effect. 5878 * 5879 */ 5880 /* Set the TCP options */ 5881 eager->tcp_xmit_hiwater = tcp->tcp_xmit_hiwater; 5882 eager->tcp_dgram_errind = tcp->tcp_dgram_errind; 5883 eager->tcp_oobinline = tcp->tcp_oobinline; 5884 eager->tcp_reuseaddr = tcp->tcp_reuseaddr; 5885 eager->tcp_broadcast = tcp->tcp_broadcast; 5886 eager->tcp_useloopback = tcp->tcp_useloopback; 5887 eager->tcp_dontroute = tcp->tcp_dontroute; 5888 eager->tcp_linger = tcp->tcp_linger; 5889 eager->tcp_lingertime = tcp->tcp_lingertime; 5890 if (tcp->tcp_ka_enabled) 5891 eager->tcp_ka_enabled = 1; 5892 5893 /* Set the IP options */ 5894 econnp->conn_broadcast = connp->conn_broadcast; 5895 econnp->conn_loopback = connp->conn_loopback; 5896 econnp->conn_dontroute = connp->conn_dontroute; 5897 econnp->conn_reuseaddr = connp->conn_reuseaddr; 5898 5899 /* Put a ref on the listener for the eager. */ 5900 CONN_INC_REF(connp); 5901 mutex_enter(&tcp->tcp_eager_lock); 5902 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = eager; 5903 eager->tcp_eager_next_q0 = tcp->tcp_eager_next_q0; 5904 tcp->tcp_eager_next_q0 = eager; 5905 eager->tcp_eager_prev_q0 = tcp; 5906 5907 /* Set tcp_listener before adding it to tcp_conn_fanout */ 5908 eager->tcp_listener = tcp; 5909 eager->tcp_saved_listener = tcp; 5910 5911 /* 5912 * Tag this detached tcp vector for later retrieval 5913 * by our listener client in tcp_accept(). 5914 */ 5915 eager->tcp_conn_req_seqnum = tcp->tcp_conn_req_seqnum; 5916 tcp->tcp_conn_req_cnt_q0++; 5917 if (++tcp->tcp_conn_req_seqnum == -1) { 5918 /* 5919 * -1 is "special" and defined in TPI as something 5920 * that should never be used in T_CONN_IND 5921 */ 5922 ++tcp->tcp_conn_req_seqnum; 5923 } 5924 mutex_exit(&tcp->tcp_eager_lock); 5925 5926 if (tcp->tcp_syn_defense) { 5927 /* Don't drop the SYN that comes from a good IP source */ 5928 ipaddr_t *addr_cache = (ipaddr_t *)(tcp->tcp_ip_addr_cache); 5929 if (addr_cache != NULL && eager->tcp_remote == 5930 addr_cache[IP_ADDR_CACHE_HASH(eager->tcp_remote)]) { 5931 eager->tcp_dontdrop = B_TRUE; 5932 } 5933 } 5934 5935 /* 5936 * We need to insert the eager in its own perimeter but as soon 5937 * as we do that, we expose the eager to the classifier and 5938 * should not touch any field outside the eager's perimeter. 5939 * So do all the work necessary before inserting the eager 5940 * in its own perimeter. Be optimistic that ipcl_conn_insert() 5941 * will succeed but undo everything if it fails. 5942 */ 5943 seg_seq = ABE32_TO_U32(tcph->th_seq); 5944 eager->tcp_irs = seg_seq; 5945 eager->tcp_rack = seg_seq; 5946 eager->tcp_rnxt = seg_seq + 1; 5947 U32_TO_ABE32(eager->tcp_rnxt, eager->tcp_tcph->th_ack); 5948 BUMP_MIB(&tcps->tcps_mib, tcpPassiveOpens); 5949 eager->tcp_state = TCPS_SYN_RCVD; 5950 mp1 = tcp_xmit_mp(eager, eager->tcp_xmit_head, eager->tcp_mss, 5951 NULL, NULL, eager->tcp_iss, B_FALSE, NULL, B_FALSE); 5952 if (mp1 == NULL) { 5953 /* 5954 * Increment the ref count as we are going to 5955 * enqueueing an mp in squeue 5956 */ 5957 CONN_INC_REF(econnp); 5958 goto error; 5959 } 5960 DB_CPID(mp1) = tcp->tcp_cpid; 5961 eager->tcp_cpid = tcp->tcp_cpid; 5962 eager->tcp_open_time = lbolt64; 5963 5964 /* 5965 * We need to start the rto timer. In normal case, we start 5966 * the timer after sending the packet on the wire (or at 5967 * least believing that packet was sent by waiting for 5968 * CALL_IP_WPUT() to return). Since this is the first packet 5969 * being sent on the wire for the eager, our initial tcp_rto 5970 * is at least tcp_rexmit_interval_min which is a fairly 5971 * large value to allow the algorithm to adjust slowly to large 5972 * fluctuations of RTT during first few transmissions. 5973 * 5974 * Starting the timer first and then sending the packet in this 5975 * case shouldn't make much difference since tcp_rexmit_interval_min 5976 * is of the order of several 100ms and starting the timer 5977 * first and then sending the packet will result in difference 5978 * of few micro seconds. 5979 * 5980 * Without this optimization, we are forced to hold the fanout 5981 * lock across the ipcl_bind_insert() and sending the packet 5982 * so that we don't race against an incoming packet (maybe RST) 5983 * for this eager. 5984 * 5985 * It is necessary to acquire an extra reference on the eager 5986 * at this point and hold it until after tcp_send_data() to 5987 * ensure against an eager close race. 5988 */ 5989 5990 CONN_INC_REF(eager->tcp_connp); 5991 5992 TCP_RECORD_TRACE(eager, mp1, TCP_TRACE_SEND_PKT); 5993 TCP_TIMER_RESTART(eager, eager->tcp_rto); 5994 5995 5996 /* 5997 * Insert the eager in its own perimeter now. We are ready to deal 5998 * with any packets on eager. 5999 */ 6000 if (eager->tcp_ipversion == IPV4_VERSION) { 6001 if (ipcl_conn_insert(econnp, IPPROTO_TCP, 0, 0, 0) != 0) { 6002 goto error; 6003 } 6004 } else { 6005 if (ipcl_conn_insert_v6(econnp, IPPROTO_TCP, 0, 0, 0, 0) != 0) { 6006 goto error; 6007 } 6008 } 6009 6010 /* mark conn as fully-bound */ 6011 econnp->conn_fully_bound = B_TRUE; 6012 6013 /* Send the SYN-ACK */ 6014 tcp_send_data(eager, eager->tcp_wq, mp1); 6015 CONN_DEC_REF(eager->tcp_connp); 6016 freemsg(mp); 6017 6018 return; 6019 error: 6020 freemsg(mp1); 6021 eager->tcp_closemp_used = B_TRUE; 6022 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 6023 squeue_fill(econnp->conn_sqp, &eager->tcp_closemp, tcp_eager_kill, 6024 econnp, SQTAG_TCP_CONN_REQ_2); 6025 6026 /* 6027 * If a connection already exists, send the mp to that connections so 6028 * that it can be appropriately dealt with. 6029 */ 6030 ipst = tcps->tcps_netstack->netstack_ip; 6031 6032 if ((econnp = ipcl_classify(mp, connp->conn_zoneid, ipst)) != NULL) { 6033 if (!IPCL_IS_CONNECTED(econnp)) { 6034 /* 6035 * Something bad happened. ipcl_conn_insert() 6036 * failed because a connection already existed 6037 * in connected hash but we can't find it 6038 * anymore (someone blew it away). Just 6039 * free this message and hopefully remote 6040 * will retransmit at which time the SYN can be 6041 * treated as a new connection or dealth with 6042 * a TH_RST if a connection already exists. 6043 */ 6044 CONN_DEC_REF(econnp); 6045 freemsg(mp); 6046 } else { 6047 squeue_fill(econnp->conn_sqp, mp, tcp_input, 6048 econnp, SQTAG_TCP_CONN_REQ_1); 6049 } 6050 } else { 6051 /* Nobody wants this packet */ 6052 freemsg(mp); 6053 } 6054 return; 6055 error3: 6056 CONN_DEC_REF(econnp); 6057 error2: 6058 freemsg(mp); 6059 } 6060 6061 /* 6062 * In an ideal case of vertical partition in NUMA architecture, its 6063 * beneficial to have the listener and all the incoming connections 6064 * tied to the same squeue. The other constraint is that incoming 6065 * connections should be tied to the squeue attached to interrupted 6066 * CPU for obvious locality reason so this leaves the listener to 6067 * be tied to the same squeue. Our only problem is that when listener 6068 * is binding, the CPU that will get interrupted by the NIC whose 6069 * IP address the listener is binding to is not even known. So 6070 * the code below allows us to change that binding at the time the 6071 * CPU is interrupted by virtue of incoming connection's squeue. 6072 * 6073 * This is usefull only in case of a listener bound to a specific IP 6074 * address. For other kind of listeners, they get bound the 6075 * very first time and there is no attempt to rebind them. 6076 */ 6077 void 6078 tcp_conn_request_unbound(void *arg, mblk_t *mp, void *arg2) 6079 { 6080 conn_t *connp = (conn_t *)arg; 6081 squeue_t *sqp = (squeue_t *)arg2; 6082 squeue_t *new_sqp; 6083 uint32_t conn_flags; 6084 6085 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 6086 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 6087 } else { 6088 goto done; 6089 } 6090 6091 if (connp->conn_fanout == NULL) 6092 goto done; 6093 6094 if (!(connp->conn_flags & IPCL_FULLY_BOUND)) { 6095 mutex_enter(&connp->conn_fanout->connf_lock); 6096 mutex_enter(&connp->conn_lock); 6097 /* 6098 * No one from read or write side can access us now 6099 * except for already queued packets on this squeue. 6100 * But since we haven't changed the squeue yet, they 6101 * can't execute. If they are processed after we have 6102 * changed the squeue, they are sent back to the 6103 * correct squeue down below. 6104 * But a listner close can race with processing of 6105 * incoming SYN. If incoming SYN processing changes 6106 * the squeue then the listener close which is waiting 6107 * to enter the squeue would operate on the wrong 6108 * squeue. Hence we don't change the squeue here unless 6109 * the refcount is exactly the minimum refcount. The 6110 * minimum refcount of 4 is counted as - 1 each for 6111 * TCP and IP, 1 for being in the classifier hash, and 6112 * 1 for the mblk being processed. 6113 */ 6114 6115 if (connp->conn_ref != 4 || 6116 connp->conn_tcp->tcp_state != TCPS_LISTEN) { 6117 mutex_exit(&connp->conn_lock); 6118 mutex_exit(&connp->conn_fanout->connf_lock); 6119 goto done; 6120 } 6121 if (connp->conn_sqp != new_sqp) { 6122 while (connp->conn_sqp != new_sqp) 6123 (void) casptr(&connp->conn_sqp, sqp, new_sqp); 6124 } 6125 6126 do { 6127 conn_flags = connp->conn_flags; 6128 conn_flags |= IPCL_FULLY_BOUND; 6129 (void) cas32(&connp->conn_flags, connp->conn_flags, 6130 conn_flags); 6131 } while (!(connp->conn_flags & IPCL_FULLY_BOUND)); 6132 6133 mutex_exit(&connp->conn_fanout->connf_lock); 6134 mutex_exit(&connp->conn_lock); 6135 } 6136 6137 done: 6138 if (connp->conn_sqp != sqp) { 6139 CONN_INC_REF(connp); 6140 squeue_fill(connp->conn_sqp, mp, 6141 connp->conn_recv, connp, SQTAG_TCP_CONN_REQ_UNBOUND); 6142 } else { 6143 tcp_conn_request(connp, mp, sqp); 6144 } 6145 } 6146 6147 /* 6148 * Successful connect request processing begins when our client passes 6149 * a T_CONN_REQ message into tcp_wput() and ends when tcp_rput() passes 6150 * our T_OK_ACK reply message upstream. The control flow looks like this: 6151 * upstream -> tcp_wput() -> tcp_wput_proto() -> tcp_connect() -> IP 6152 * upstream <- tcp_rput() <- IP 6153 * After various error checks are completed, tcp_connect() lays 6154 * the target address and port into the composite header template, 6155 * preallocates the T_OK_ACK reply message, construct a full 12 byte bind 6156 * request followed by an IRE request, and passes the three mblk message 6157 * down to IP looking like this: 6158 * O_T_BIND_REQ for IP --> IRE req --> T_OK_ACK for our client 6159 * Processing continues in tcp_rput() when we receive the following message: 6160 * T_BIND_ACK from IP --> IRE ack --> T_OK_ACK for our client 6161 * After consuming the first two mblks, tcp_rput() calls tcp_timer(), 6162 * to fire off the connection request, and then passes the T_OK_ACK mblk 6163 * upstream that we filled in below. There are, of course, numerous 6164 * error conditions along the way which truncate the processing described 6165 * above. 6166 */ 6167 static void 6168 tcp_connect(tcp_t *tcp, mblk_t *mp) 6169 { 6170 sin_t *sin; 6171 sin6_t *sin6; 6172 queue_t *q = tcp->tcp_wq; 6173 struct T_conn_req *tcr; 6174 ipaddr_t *dstaddrp; 6175 in_port_t dstport; 6176 uint_t srcid; 6177 6178 tcr = (struct T_conn_req *)mp->b_rptr; 6179 6180 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 6181 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tcr)) { 6182 tcp_err_ack(tcp, mp, TPROTO, 0); 6183 return; 6184 } 6185 6186 /* 6187 * Determine packet type based on type of address passed in 6188 * the request should contain an IPv4 or IPv6 address. 6189 * Make sure that address family matches the type of 6190 * family of the the address passed down 6191 */ 6192 switch (tcr->DEST_length) { 6193 default: 6194 tcp_err_ack(tcp, mp, TBADADDR, 0); 6195 return; 6196 6197 case (sizeof (sin_t) - sizeof (sin->sin_zero)): { 6198 /* 6199 * XXX: The check for valid DEST_length was not there 6200 * in earlier releases and some buggy 6201 * TLI apps (e.g Sybase) got away with not feeding 6202 * in sin_zero part of address. 6203 * We allow that bug to keep those buggy apps humming. 6204 * Test suites require the check on DEST_length. 6205 * We construct a new mblk with valid DEST_length 6206 * free the original so the rest of the code does 6207 * not have to keep track of this special shorter 6208 * length address case. 6209 */ 6210 mblk_t *nmp; 6211 struct T_conn_req *ntcr; 6212 sin_t *nsin; 6213 6214 nmp = allocb(sizeof (struct T_conn_req) + sizeof (sin_t) + 6215 tcr->OPT_length, BPRI_HI); 6216 if (nmp == NULL) { 6217 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 6218 return; 6219 } 6220 ntcr = (struct T_conn_req *)nmp->b_rptr; 6221 bzero(ntcr, sizeof (struct T_conn_req)); /* zero fill */ 6222 ntcr->PRIM_type = T_CONN_REQ; 6223 ntcr->DEST_length = sizeof (sin_t); 6224 ntcr->DEST_offset = sizeof (struct T_conn_req); 6225 6226 nsin = (sin_t *)((uchar_t *)ntcr + ntcr->DEST_offset); 6227 *nsin = sin_null; 6228 /* Get pointer to shorter address to copy from original mp */ 6229 sin = (sin_t *)mi_offset_param(mp, tcr->DEST_offset, 6230 tcr->DEST_length); /* extract DEST_length worth of sin_t */ 6231 if (sin == NULL || !OK_32PTR((char *)sin)) { 6232 freemsg(nmp); 6233 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6234 return; 6235 } 6236 nsin->sin_family = sin->sin_family; 6237 nsin->sin_port = sin->sin_port; 6238 nsin->sin_addr = sin->sin_addr; 6239 /* Note:nsin->sin_zero zero-fill with sin_null assign above */ 6240 nmp->b_wptr = (uchar_t *)&nsin[1]; 6241 if (tcr->OPT_length != 0) { 6242 ntcr->OPT_length = tcr->OPT_length; 6243 ntcr->OPT_offset = nmp->b_wptr - nmp->b_rptr; 6244 bcopy((uchar_t *)tcr + tcr->OPT_offset, 6245 (uchar_t *)ntcr + ntcr->OPT_offset, 6246 tcr->OPT_length); 6247 nmp->b_wptr += tcr->OPT_length; 6248 } 6249 freemsg(mp); /* original mp freed */ 6250 mp = nmp; /* re-initialize original variables */ 6251 tcr = ntcr; 6252 } 6253 /* FALLTHRU */ 6254 6255 case sizeof (sin_t): 6256 sin = (sin_t *)mi_offset_param(mp, tcr->DEST_offset, 6257 sizeof (sin_t)); 6258 if (sin == NULL || !OK_32PTR((char *)sin)) { 6259 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6260 return; 6261 } 6262 if (tcp->tcp_family != AF_INET || 6263 sin->sin_family != AF_INET) { 6264 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6265 return; 6266 } 6267 if (sin->sin_port == 0) { 6268 tcp_err_ack(tcp, mp, TBADADDR, 0); 6269 return; 6270 } 6271 if (tcp->tcp_connp && tcp->tcp_connp->conn_ipv6_v6only) { 6272 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6273 return; 6274 } 6275 6276 break; 6277 6278 case sizeof (sin6_t): 6279 sin6 = (sin6_t *)mi_offset_param(mp, tcr->DEST_offset, 6280 sizeof (sin6_t)); 6281 if (sin6 == NULL || !OK_32PTR((char *)sin6)) { 6282 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6283 return; 6284 } 6285 if (tcp->tcp_family != AF_INET6 || 6286 sin6->sin6_family != AF_INET6) { 6287 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6288 return; 6289 } 6290 if (sin6->sin6_port == 0) { 6291 tcp_err_ack(tcp, mp, TBADADDR, 0); 6292 return; 6293 } 6294 break; 6295 } 6296 /* 6297 * TODO: If someone in TCPS_TIME_WAIT has this dst/port we 6298 * should key on their sequence number and cut them loose. 6299 */ 6300 6301 /* 6302 * If options passed in, feed it for verification and handling 6303 */ 6304 if (tcr->OPT_length != 0) { 6305 mblk_t *ok_mp; 6306 mblk_t *discon_mp; 6307 mblk_t *conn_opts_mp; 6308 int t_error, sys_error, do_disconnect; 6309 6310 conn_opts_mp = NULL; 6311 6312 if (tcp_conprim_opt_process(tcp, mp, 6313 &do_disconnect, &t_error, &sys_error) < 0) { 6314 if (do_disconnect) { 6315 ASSERT(t_error == 0 && sys_error == 0); 6316 discon_mp = mi_tpi_discon_ind(NULL, 6317 ECONNREFUSED, 0); 6318 if (!discon_mp) { 6319 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, 6320 TSYSERR, ENOMEM); 6321 return; 6322 } 6323 ok_mp = mi_tpi_ok_ack_alloc(mp); 6324 if (!ok_mp) { 6325 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6326 TSYSERR, ENOMEM); 6327 return; 6328 } 6329 qreply(q, ok_mp); 6330 qreply(q, discon_mp); /* no flush! */ 6331 } else { 6332 ASSERT(t_error != 0); 6333 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, t_error, 6334 sys_error); 6335 } 6336 return; 6337 } 6338 /* 6339 * Success in setting options, the mp option buffer represented 6340 * by OPT_length/offset has been potentially modified and 6341 * contains results of option processing. We copy it in 6342 * another mp to save it for potentially influencing returning 6343 * it in T_CONN_CONN. 6344 */ 6345 if (tcr->OPT_length != 0) { /* there are resulting options */ 6346 conn_opts_mp = copyb(mp); 6347 if (!conn_opts_mp) { 6348 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, 6349 TSYSERR, ENOMEM); 6350 return; 6351 } 6352 ASSERT(tcp->tcp_conn.tcp_opts_conn_req == NULL); 6353 tcp->tcp_conn.tcp_opts_conn_req = conn_opts_mp; 6354 /* 6355 * Note: 6356 * These resulting option negotiation can include any 6357 * end-to-end negotiation options but there no such 6358 * thing (yet?) in our TCP/IP. 6359 */ 6360 } 6361 } 6362 6363 /* 6364 * If we're connecting to an IPv4-mapped IPv6 address, we need to 6365 * make sure that the template IP header in the tcp structure is an 6366 * IPv4 header, and that the tcp_ipversion is IPV4_VERSION. We 6367 * need to this before we call tcp_bindi() so that the port lookup 6368 * code will look for ports in the correct port space (IPv4 and 6369 * IPv6 have separate port spaces). 6370 */ 6371 if (tcp->tcp_family == AF_INET6 && tcp->tcp_ipversion == IPV6_VERSION && 6372 IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6373 int err = 0; 6374 6375 err = tcp_header_init_ipv4(tcp); 6376 if (err != 0) { 6377 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6378 goto connect_failed; 6379 } 6380 if (tcp->tcp_lport != 0) 6381 *(uint16_t *)tcp->tcp_tcph->th_lport = tcp->tcp_lport; 6382 } 6383 6384 switch (tcp->tcp_state) { 6385 case TCPS_IDLE: 6386 /* 6387 * We support quick connect, refer to comments in 6388 * tcp_connect_*() 6389 */ 6390 /* FALLTHRU */ 6391 case TCPS_BOUND: 6392 case TCPS_LISTEN: 6393 if (tcp->tcp_family == AF_INET6) { 6394 if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6395 tcp_connect_ipv6(tcp, mp, 6396 &sin6->sin6_addr, 6397 sin6->sin6_port, sin6->sin6_flowinfo, 6398 sin6->__sin6_src_id, sin6->sin6_scope_id); 6399 return; 6400 } 6401 /* 6402 * Destination adress is mapped IPv6 address. 6403 * Source bound address should be unspecified or 6404 * IPv6 mapped address as well. 6405 */ 6406 if (!IN6_IS_ADDR_UNSPECIFIED( 6407 &tcp->tcp_bound_source_v6) && 6408 !IN6_IS_ADDR_V4MAPPED(&tcp->tcp_bound_source_v6)) { 6409 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, 6410 EADDRNOTAVAIL); 6411 break; 6412 } 6413 dstaddrp = &V4_PART_OF_V6((sin6->sin6_addr)); 6414 dstport = sin6->sin6_port; 6415 srcid = sin6->__sin6_src_id; 6416 } else { 6417 dstaddrp = &sin->sin_addr.s_addr; 6418 dstport = sin->sin_port; 6419 srcid = 0; 6420 } 6421 6422 tcp_connect_ipv4(tcp, mp, dstaddrp, dstport, srcid); 6423 return; 6424 default: 6425 mp = mi_tpi_err_ack_alloc(mp, TOUTSTATE, 0); 6426 break; 6427 } 6428 /* 6429 * Note: Code below is the "failure" case 6430 */ 6431 /* return error ack and blow away saved option results if any */ 6432 connect_failed: 6433 if (mp != NULL) 6434 putnext(tcp->tcp_rq, mp); 6435 else { 6436 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6437 TSYSERR, ENOMEM); 6438 } 6439 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6440 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6441 } 6442 6443 /* 6444 * Handle connect to IPv4 destinations, including connections for AF_INET6 6445 * sockets connecting to IPv4 mapped IPv6 destinations. 6446 */ 6447 static void 6448 tcp_connect_ipv4(tcp_t *tcp, mblk_t *mp, ipaddr_t *dstaddrp, in_port_t dstport, 6449 uint_t srcid) 6450 { 6451 tcph_t *tcph; 6452 mblk_t *mp1; 6453 ipaddr_t dstaddr = *dstaddrp; 6454 int32_t oldstate; 6455 uint16_t lport; 6456 tcp_stack_t *tcps = tcp->tcp_tcps; 6457 6458 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 6459 6460 /* Check for attempt to connect to INADDR_ANY */ 6461 if (dstaddr == INADDR_ANY) { 6462 /* 6463 * SunOS 4.x and 4.3 BSD allow an application 6464 * to connect a TCP socket to INADDR_ANY. 6465 * When they do this, the kernel picks the 6466 * address of one interface and uses it 6467 * instead. The kernel usually ends up 6468 * picking the address of the loopback 6469 * interface. This is an undocumented feature. 6470 * However, we provide the same thing here 6471 * in order to have source and binary 6472 * compatibility with SunOS 4.x. 6473 * Update the T_CONN_REQ (sin/sin6) since it is used to 6474 * generate the T_CONN_CON. 6475 */ 6476 dstaddr = htonl(INADDR_LOOPBACK); 6477 *dstaddrp = dstaddr; 6478 } 6479 6480 /* Handle __sin6_src_id if socket not bound to an IP address */ 6481 if (srcid != 0 && tcp->tcp_ipha->ipha_src == INADDR_ANY) { 6482 ip_srcid_find_id(srcid, &tcp->tcp_ip_src_v6, 6483 tcp->tcp_connp->conn_zoneid, tcps->tcps_netstack); 6484 IN6_V4MAPPED_TO_IPADDR(&tcp->tcp_ip_src_v6, 6485 tcp->tcp_ipha->ipha_src); 6486 } 6487 6488 /* 6489 * Don't let an endpoint connect to itself. Note that 6490 * the test here does not catch the case where the 6491 * source IP addr was left unspecified by the user. In 6492 * this case, the source addr is set in tcp_adapt_ire() 6493 * using the reply to the T_BIND message that we send 6494 * down to IP here and the check is repeated in tcp_rput_other. 6495 */ 6496 if (dstaddr == tcp->tcp_ipha->ipha_src && 6497 dstport == tcp->tcp_lport) { 6498 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6499 goto failed; 6500 } 6501 6502 tcp->tcp_ipha->ipha_dst = dstaddr; 6503 IN6_IPADDR_TO_V4MAPPED(dstaddr, &tcp->tcp_remote_v6); 6504 6505 /* 6506 * Massage a source route if any putting the first hop 6507 * in iph_dst. Compute a starting value for the checksum which 6508 * takes into account that the original iph_dst should be 6509 * included in the checksum but that ip will include the 6510 * first hop in the source route in the tcp checksum. 6511 */ 6512 tcp->tcp_sum = ip_massage_options(tcp->tcp_ipha, tcps->tcps_netstack); 6513 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 6514 tcp->tcp_sum -= ((tcp->tcp_ipha->ipha_dst >> 16) + 6515 (tcp->tcp_ipha->ipha_dst & 0xffff)); 6516 if ((int)tcp->tcp_sum < 0) 6517 tcp->tcp_sum--; 6518 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 6519 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 6520 (tcp->tcp_sum >> 16)); 6521 tcph = tcp->tcp_tcph; 6522 *(uint16_t *)tcph->th_fport = dstport; 6523 tcp->tcp_fport = dstport; 6524 6525 oldstate = tcp->tcp_state; 6526 /* 6527 * At this point the remote destination address and remote port fields 6528 * in the tcp-four-tuple have been filled in the tcp structure. Now we 6529 * have to see which state tcp was in so we can take apropriate action. 6530 */ 6531 if (oldstate == TCPS_IDLE) { 6532 /* 6533 * We support a quick connect capability here, allowing 6534 * clients to transition directly from IDLE to SYN_SENT 6535 * tcp_bindi will pick an unused port, insert the connection 6536 * in the bind hash and transition to BOUND state. 6537 */ 6538 lport = tcp_update_next_port(tcps->tcps_next_port_to_try, 6539 tcp, B_TRUE); 6540 lport = tcp_bindi(tcp, lport, &tcp->tcp_ip_src_v6, 0, B_TRUE, 6541 B_FALSE, B_FALSE); 6542 if (lport == 0) { 6543 mp = mi_tpi_err_ack_alloc(mp, TNOADDR, 0); 6544 goto failed; 6545 } 6546 } 6547 tcp->tcp_state = TCPS_SYN_SENT; 6548 6549 /* 6550 * TODO: allow data with connect requests 6551 * by unlinking M_DATA trailers here and 6552 * linking them in behind the T_OK_ACK mblk. 6553 * The tcp_rput() bind ack handler would then 6554 * feed them to tcp_wput_data() rather than call 6555 * tcp_timer(). 6556 */ 6557 mp = mi_tpi_ok_ack_alloc(mp); 6558 if (!mp) { 6559 tcp->tcp_state = oldstate; 6560 goto failed; 6561 } 6562 if (tcp->tcp_family == AF_INET) { 6563 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 6564 sizeof (ipa_conn_t)); 6565 } else { 6566 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 6567 sizeof (ipa6_conn_t)); 6568 } 6569 if (mp1) { 6570 /* 6571 * We need to make sure that the conn_recv is set to a non-null 6572 * value before we insert the conn_t into the classifier table. 6573 * This is to avoid a race with an incoming packet which does 6574 * an ipcl_classify(). 6575 */ 6576 tcp->tcp_connp->conn_recv = tcp_input; 6577 6578 /* Hang onto the T_OK_ACK for later. */ 6579 linkb(mp1, mp); 6580 mblk_setcred(mp1, tcp->tcp_cred); 6581 if (tcp->tcp_family == AF_INET) 6582 mp1 = ip_bind_v4(tcp->tcp_wq, mp1, tcp->tcp_connp); 6583 else { 6584 mp1 = ip_bind_v6(tcp->tcp_wq, mp1, tcp->tcp_connp, 6585 &tcp->tcp_sticky_ipp); 6586 } 6587 BUMP_MIB(&tcps->tcps_mib, tcpActiveOpens); 6588 tcp->tcp_active_open = 1; 6589 /* 6590 * If the bind cannot complete immediately 6591 * IP will arrange to call tcp_rput_other 6592 * when the bind completes. 6593 */ 6594 if (mp1 != NULL) 6595 tcp_rput_other(tcp, mp1); 6596 return; 6597 } 6598 /* Error case */ 6599 tcp->tcp_state = oldstate; 6600 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6601 6602 failed: 6603 /* return error ack and blow away saved option results if any */ 6604 if (mp != NULL) 6605 putnext(tcp->tcp_rq, mp); 6606 else { 6607 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6608 TSYSERR, ENOMEM); 6609 } 6610 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6611 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6612 6613 } 6614 6615 /* 6616 * Handle connect to IPv6 destinations. 6617 */ 6618 static void 6619 tcp_connect_ipv6(tcp_t *tcp, mblk_t *mp, in6_addr_t *dstaddrp, 6620 in_port_t dstport, uint32_t flowinfo, uint_t srcid, uint32_t scope_id) 6621 { 6622 tcph_t *tcph; 6623 mblk_t *mp1; 6624 ip6_rthdr_t *rth; 6625 int32_t oldstate; 6626 uint16_t lport; 6627 tcp_stack_t *tcps = tcp->tcp_tcps; 6628 6629 ASSERT(tcp->tcp_family == AF_INET6); 6630 6631 /* 6632 * If we're here, it means that the destination address is a native 6633 * IPv6 address. Return an error if tcp_ipversion is not IPv6. A 6634 * reason why it might not be IPv6 is if the socket was bound to an 6635 * IPv4-mapped IPv6 address. 6636 */ 6637 if (tcp->tcp_ipversion != IPV6_VERSION) { 6638 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6639 goto failed; 6640 } 6641 6642 /* 6643 * Interpret a zero destination to mean loopback. 6644 * Update the T_CONN_REQ (sin/sin6) since it is used to 6645 * generate the T_CONN_CON. 6646 */ 6647 if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp)) { 6648 *dstaddrp = ipv6_loopback; 6649 } 6650 6651 /* Handle __sin6_src_id if socket not bound to an IP address */ 6652 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip6h->ip6_src)) { 6653 ip_srcid_find_id(srcid, &tcp->tcp_ip6h->ip6_src, 6654 tcp->tcp_connp->conn_zoneid, tcps->tcps_netstack); 6655 tcp->tcp_ip_src_v6 = tcp->tcp_ip6h->ip6_src; 6656 } 6657 6658 /* 6659 * Take care of the scope_id now and add ip6i_t 6660 * if ip6i_t is not already allocated through TCP 6661 * sticky options. At this point tcp_ip6h does not 6662 * have dst info, thus use dstaddrp. 6663 */ 6664 if (scope_id != 0 && 6665 IN6_IS_ADDR_LINKSCOPE(dstaddrp)) { 6666 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 6667 ip6i_t *ip6i; 6668 6669 ipp->ipp_ifindex = scope_id; 6670 ip6i = (ip6i_t *)tcp->tcp_iphc; 6671 6672 if ((ipp->ipp_fields & IPPF_HAS_IP6I) && 6673 ip6i != NULL && (ip6i->ip6i_nxt == IPPROTO_RAW)) { 6674 /* Already allocated */ 6675 ip6i->ip6i_flags |= IP6I_IFINDEX; 6676 ip6i->ip6i_ifindex = ipp->ipp_ifindex; 6677 ipp->ipp_fields |= IPPF_SCOPE_ID; 6678 } else { 6679 int reterr; 6680 6681 ipp->ipp_fields |= IPPF_SCOPE_ID; 6682 if (ipp->ipp_fields & IPPF_HAS_IP6I) 6683 ip2dbg(("tcp_connect_v6: SCOPE_ID set\n")); 6684 reterr = tcp_build_hdrs(tcp->tcp_rq, tcp); 6685 if (reterr != 0) 6686 goto failed; 6687 ip1dbg(("tcp_connect_ipv6: tcp_bld_hdrs returned\n")); 6688 } 6689 } 6690 6691 /* 6692 * Don't let an endpoint connect to itself. Note that 6693 * the test here does not catch the case where the 6694 * source IP addr was left unspecified by the user. In 6695 * this case, the source addr is set in tcp_adapt_ire() 6696 * using the reply to the T_BIND message that we send 6697 * down to IP here and the check is repeated in tcp_rput_other. 6698 */ 6699 if (IN6_ARE_ADDR_EQUAL(dstaddrp, &tcp->tcp_ip6h->ip6_src) && 6700 (dstport == tcp->tcp_lport)) { 6701 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6702 goto failed; 6703 } 6704 6705 tcp->tcp_ip6h->ip6_dst = *dstaddrp; 6706 tcp->tcp_remote_v6 = *dstaddrp; 6707 tcp->tcp_ip6h->ip6_vcf = 6708 (IPV6_DEFAULT_VERS_AND_FLOW & IPV6_VERS_AND_FLOW_MASK) | 6709 (flowinfo & ~IPV6_VERS_AND_FLOW_MASK); 6710 6711 6712 /* 6713 * Massage a routing header (if present) putting the first hop 6714 * in ip6_dst. Compute a starting value for the checksum which 6715 * takes into account that the original ip6_dst should be 6716 * included in the checksum but that ip will include the 6717 * first hop in the source route in the tcp checksum. 6718 */ 6719 rth = ip_find_rthdr_v6(tcp->tcp_ip6h, (uint8_t *)tcp->tcp_tcph); 6720 if (rth != NULL) { 6721 tcp->tcp_sum = ip_massage_options_v6(tcp->tcp_ip6h, rth, 6722 tcps->tcps_netstack); 6723 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 6724 (tcp->tcp_sum >> 16)); 6725 } else { 6726 tcp->tcp_sum = 0; 6727 } 6728 6729 tcph = tcp->tcp_tcph; 6730 *(uint16_t *)tcph->th_fport = dstport; 6731 tcp->tcp_fport = dstport; 6732 6733 oldstate = tcp->tcp_state; 6734 /* 6735 * At this point the remote destination address and remote port fields 6736 * in the tcp-four-tuple have been filled in the tcp structure. Now we 6737 * have to see which state tcp was in so we can take apropriate action. 6738 */ 6739 if (oldstate == TCPS_IDLE) { 6740 /* 6741 * We support a quick connect capability here, allowing 6742 * clients to transition directly from IDLE to SYN_SENT 6743 * tcp_bindi will pick an unused port, insert the connection 6744 * in the bind hash and transition to BOUND state. 6745 */ 6746 lport = tcp_update_next_port(tcps->tcps_next_port_to_try, 6747 tcp, B_TRUE); 6748 lport = tcp_bindi(tcp, lport, &tcp->tcp_ip_src_v6, 0, B_TRUE, 6749 B_FALSE, B_FALSE); 6750 if (lport == 0) { 6751 mp = mi_tpi_err_ack_alloc(mp, TNOADDR, 0); 6752 goto failed; 6753 } 6754 } 6755 tcp->tcp_state = TCPS_SYN_SENT; 6756 /* 6757 * TODO: allow data with connect requests 6758 * by unlinking M_DATA trailers here and 6759 * linking them in behind the T_OK_ACK mblk. 6760 * The tcp_rput() bind ack handler would then 6761 * feed them to tcp_wput_data() rather than call 6762 * tcp_timer(). 6763 */ 6764 mp = mi_tpi_ok_ack_alloc(mp); 6765 if (!mp) { 6766 tcp->tcp_state = oldstate; 6767 goto failed; 6768 } 6769 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, sizeof (ipa6_conn_t)); 6770 if (mp1) { 6771 /* 6772 * We need to make sure that the conn_recv is set to a non-null 6773 * value before we insert the conn_t into the classifier table. 6774 * This is to avoid a race with an incoming packet which does 6775 * an ipcl_classify(). 6776 */ 6777 tcp->tcp_connp->conn_recv = tcp_input; 6778 6779 /* Hang onto the T_OK_ACK for later. */ 6780 linkb(mp1, mp); 6781 mblk_setcred(mp1, tcp->tcp_cred); 6782 mp1 = ip_bind_v6(tcp->tcp_wq, mp1, tcp->tcp_connp, 6783 &tcp->tcp_sticky_ipp); 6784 BUMP_MIB(&tcps->tcps_mib, tcpActiveOpens); 6785 tcp->tcp_active_open = 1; 6786 /* ip_bind_v6() may return ACK or ERROR */ 6787 if (mp1 != NULL) 6788 tcp_rput_other(tcp, mp1); 6789 return; 6790 } 6791 /* Error case */ 6792 tcp->tcp_state = oldstate; 6793 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6794 6795 failed: 6796 /* return error ack and blow away saved option results if any */ 6797 if (mp != NULL) 6798 putnext(tcp->tcp_rq, mp); 6799 else { 6800 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6801 TSYSERR, ENOMEM); 6802 } 6803 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6804 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6805 } 6806 6807 /* 6808 * We need a stream q for detached closing tcp connections 6809 * to use. Our client hereby indicates that this q is the 6810 * one to use. 6811 */ 6812 static void 6813 tcp_def_q_set(tcp_t *tcp, mblk_t *mp) 6814 { 6815 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 6816 queue_t *q = tcp->tcp_wq; 6817 tcp_stack_t *tcps = tcp->tcp_tcps; 6818 6819 #ifdef NS_DEBUG 6820 (void) printf("TCP_IOC_DEFAULT_Q for stack %d\n", 6821 tcps->tcps_netstack->netstack_stackid); 6822 #endif 6823 mp->b_datap->db_type = M_IOCACK; 6824 iocp->ioc_count = 0; 6825 mutex_enter(&tcps->tcps_g_q_lock); 6826 if (tcps->tcps_g_q != NULL) { 6827 mutex_exit(&tcps->tcps_g_q_lock); 6828 iocp->ioc_error = EALREADY; 6829 } else { 6830 mblk_t *mp1; 6831 6832 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 0); 6833 if (mp1 == NULL) { 6834 mutex_exit(&tcps->tcps_g_q_lock); 6835 iocp->ioc_error = ENOMEM; 6836 } else { 6837 tcps->tcps_g_q = tcp->tcp_rq; 6838 mutex_exit(&tcps->tcps_g_q_lock); 6839 iocp->ioc_error = 0; 6840 iocp->ioc_rval = 0; 6841 /* 6842 * We are passing tcp_sticky_ipp as NULL 6843 * as it is not useful for tcp_default queue 6844 * 6845 * Set conn_recv just in case. 6846 */ 6847 tcp->tcp_connp->conn_recv = tcp_conn_request; 6848 6849 mp1 = ip_bind_v6(q, mp1, tcp->tcp_connp, NULL); 6850 if (mp1 != NULL) 6851 tcp_rput_other(tcp, mp1); 6852 } 6853 } 6854 qreply(q, mp); 6855 } 6856 6857 /* 6858 * Our client hereby directs us to reject the connection request 6859 * that tcp_conn_request() marked with 'seqnum'. Rejection consists 6860 * of sending the appropriate RST, not an ICMP error. 6861 */ 6862 static void 6863 tcp_disconnect(tcp_t *tcp, mblk_t *mp) 6864 { 6865 tcp_t *ltcp = NULL; 6866 t_scalar_t seqnum; 6867 conn_t *connp; 6868 tcp_stack_t *tcps = tcp->tcp_tcps; 6869 6870 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 6871 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_discon_req)) { 6872 tcp_err_ack(tcp, mp, TPROTO, 0); 6873 return; 6874 } 6875 6876 /* 6877 * Right now, upper modules pass down a T_DISCON_REQ to TCP, 6878 * when the stream is in BOUND state. Do not send a reset, 6879 * since the destination IP address is not valid, and it can 6880 * be the initialized value of all zeros (broadcast address). 6881 * 6882 * If TCP has sent down a bind request to IP and has not 6883 * received the reply, reject the request. Otherwise, TCP 6884 * will be confused. 6885 */ 6886 if (tcp->tcp_state <= TCPS_BOUND || tcp->tcp_hard_binding) { 6887 if (tcp->tcp_debug) { 6888 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 6889 "tcp_disconnect: bad state, %d", tcp->tcp_state); 6890 } 6891 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 6892 return; 6893 } 6894 6895 seqnum = ((struct T_discon_req *)mp->b_rptr)->SEQ_number; 6896 6897 if (seqnum == -1 || tcp->tcp_conn_req_max == 0) { 6898 6899 /* 6900 * According to TPI, for non-listeners, ignore seqnum 6901 * and disconnect. 6902 * Following interpretation of -1 seqnum is historical 6903 * and implied TPI ? (TPI only states that for T_CONN_IND, 6904 * a valid seqnum should not be -1). 6905 * 6906 * -1 means disconnect everything 6907 * regardless even on a listener. 6908 */ 6909 6910 int old_state = tcp->tcp_state; 6911 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 6912 6913 /* 6914 * The connection can't be on the tcp_time_wait_head list 6915 * since it is not detached. 6916 */ 6917 ASSERT(tcp->tcp_time_wait_next == NULL); 6918 ASSERT(tcp->tcp_time_wait_prev == NULL); 6919 ASSERT(tcp->tcp_time_wait_expire == 0); 6920 ltcp = NULL; 6921 /* 6922 * If it used to be a listener, check to make sure no one else 6923 * has taken the port before switching back to LISTEN state. 6924 */ 6925 if (tcp->tcp_ipversion == IPV4_VERSION) { 6926 connp = ipcl_lookup_listener_v4(tcp->tcp_lport, 6927 tcp->tcp_ipha->ipha_src, 6928 tcp->tcp_connp->conn_zoneid, ipst); 6929 if (connp != NULL) 6930 ltcp = connp->conn_tcp; 6931 } else { 6932 /* Allow tcp_bound_if listeners? */ 6933 connp = ipcl_lookup_listener_v6(tcp->tcp_lport, 6934 &tcp->tcp_ip6h->ip6_src, 0, 6935 tcp->tcp_connp->conn_zoneid, ipst); 6936 if (connp != NULL) 6937 ltcp = connp->conn_tcp; 6938 } 6939 if (tcp->tcp_conn_req_max && ltcp == NULL) { 6940 tcp->tcp_state = TCPS_LISTEN; 6941 } else if (old_state > TCPS_BOUND) { 6942 tcp->tcp_conn_req_max = 0; 6943 tcp->tcp_state = TCPS_BOUND; 6944 } 6945 if (ltcp != NULL) 6946 CONN_DEC_REF(ltcp->tcp_connp); 6947 if (old_state == TCPS_SYN_SENT || old_state == TCPS_SYN_RCVD) { 6948 BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails); 6949 } else if (old_state == TCPS_ESTABLISHED || 6950 old_state == TCPS_CLOSE_WAIT) { 6951 BUMP_MIB(&tcps->tcps_mib, tcpEstabResets); 6952 } 6953 6954 if (tcp->tcp_fused) 6955 tcp_unfuse(tcp); 6956 6957 mutex_enter(&tcp->tcp_eager_lock); 6958 if ((tcp->tcp_conn_req_cnt_q0 != 0) || 6959 (tcp->tcp_conn_req_cnt_q != 0)) { 6960 tcp_eager_cleanup(tcp, 0); 6961 } 6962 mutex_exit(&tcp->tcp_eager_lock); 6963 6964 tcp_xmit_ctl("tcp_disconnect", tcp, tcp->tcp_snxt, 6965 tcp->tcp_rnxt, TH_RST | TH_ACK); 6966 6967 tcp_reinit(tcp); 6968 6969 if (old_state >= TCPS_ESTABLISHED) { 6970 /* Send M_FLUSH according to TPI */ 6971 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 6972 } 6973 mp = mi_tpi_ok_ack_alloc(mp); 6974 if (mp) 6975 putnext(tcp->tcp_rq, mp); 6976 return; 6977 } else if (!tcp_eager_blowoff(tcp, seqnum)) { 6978 tcp_err_ack(tcp, mp, TBADSEQ, 0); 6979 return; 6980 } 6981 if (tcp->tcp_state >= TCPS_ESTABLISHED) { 6982 /* Send M_FLUSH according to TPI */ 6983 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 6984 } 6985 mp = mi_tpi_ok_ack_alloc(mp); 6986 if (mp) 6987 putnext(tcp->tcp_rq, mp); 6988 } 6989 6990 /* 6991 * Diagnostic routine used to return a string associated with the tcp state. 6992 * Note that if the caller does not supply a buffer, it will use an internal 6993 * static string. This means that if multiple threads call this function at 6994 * the same time, output can be corrupted... Note also that this function 6995 * does not check the size of the supplied buffer. The caller has to make 6996 * sure that it is big enough. 6997 */ 6998 static char * 6999 tcp_display(tcp_t *tcp, char *sup_buf, char format) 7000 { 7001 char buf1[30]; 7002 static char priv_buf[INET6_ADDRSTRLEN * 2 + 80]; 7003 char *buf; 7004 char *cp; 7005 in6_addr_t local, remote; 7006 char local_addrbuf[INET6_ADDRSTRLEN]; 7007 char remote_addrbuf[INET6_ADDRSTRLEN]; 7008 7009 if (sup_buf != NULL) 7010 buf = sup_buf; 7011 else 7012 buf = priv_buf; 7013 7014 if (tcp == NULL) 7015 return ("NULL_TCP"); 7016 switch (tcp->tcp_state) { 7017 case TCPS_CLOSED: 7018 cp = "TCP_CLOSED"; 7019 break; 7020 case TCPS_IDLE: 7021 cp = "TCP_IDLE"; 7022 break; 7023 case TCPS_BOUND: 7024 cp = "TCP_BOUND"; 7025 break; 7026 case TCPS_LISTEN: 7027 cp = "TCP_LISTEN"; 7028 break; 7029 case TCPS_SYN_SENT: 7030 cp = "TCP_SYN_SENT"; 7031 break; 7032 case TCPS_SYN_RCVD: 7033 cp = "TCP_SYN_RCVD"; 7034 break; 7035 case TCPS_ESTABLISHED: 7036 cp = "TCP_ESTABLISHED"; 7037 break; 7038 case TCPS_CLOSE_WAIT: 7039 cp = "TCP_CLOSE_WAIT"; 7040 break; 7041 case TCPS_FIN_WAIT_1: 7042 cp = "TCP_FIN_WAIT_1"; 7043 break; 7044 case TCPS_CLOSING: 7045 cp = "TCP_CLOSING"; 7046 break; 7047 case TCPS_LAST_ACK: 7048 cp = "TCP_LAST_ACK"; 7049 break; 7050 case TCPS_FIN_WAIT_2: 7051 cp = "TCP_FIN_WAIT_2"; 7052 break; 7053 case TCPS_TIME_WAIT: 7054 cp = "TCP_TIME_WAIT"; 7055 break; 7056 default: 7057 (void) mi_sprintf(buf1, "TCPUnkState(%d)", tcp->tcp_state); 7058 cp = buf1; 7059 break; 7060 } 7061 switch (format) { 7062 case DISP_ADDR_AND_PORT: 7063 if (tcp->tcp_ipversion == IPV4_VERSION) { 7064 /* 7065 * Note that we use the remote address in the tcp_b 7066 * structure. This means that it will print out 7067 * the real destination address, not the next hop's 7068 * address if source routing is used. 7069 */ 7070 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ip_src, &local); 7071 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_remote, &remote); 7072 7073 } else { 7074 local = tcp->tcp_ip_src_v6; 7075 remote = tcp->tcp_remote_v6; 7076 } 7077 (void) inet_ntop(AF_INET6, &local, local_addrbuf, 7078 sizeof (local_addrbuf)); 7079 (void) inet_ntop(AF_INET6, &remote, remote_addrbuf, 7080 sizeof (remote_addrbuf)); 7081 (void) mi_sprintf(buf, "[%s.%u, %s.%u] %s", 7082 local_addrbuf, ntohs(tcp->tcp_lport), remote_addrbuf, 7083 ntohs(tcp->tcp_fport), cp); 7084 break; 7085 case DISP_PORT_ONLY: 7086 default: 7087 (void) mi_sprintf(buf, "[%u, %u] %s", 7088 ntohs(tcp->tcp_lport), ntohs(tcp->tcp_fport), cp); 7089 break; 7090 } 7091 7092 return (buf); 7093 } 7094 7095 /* 7096 * Called via squeue to get on to eager's perimeter. It sends a 7097 * TH_RST if eager is in the fanout table. The listener wants the 7098 * eager to disappear either by means of tcp_eager_blowoff() or 7099 * tcp_eager_cleanup() being called. tcp_eager_kill() can also be 7100 * called (via squeue) if the eager cannot be inserted in the 7101 * fanout table in tcp_conn_request(). 7102 */ 7103 /* ARGSUSED */ 7104 void 7105 tcp_eager_kill(void *arg, mblk_t *mp, void *arg2) 7106 { 7107 conn_t *econnp = (conn_t *)arg; 7108 tcp_t *eager = econnp->conn_tcp; 7109 tcp_t *listener = eager->tcp_listener; 7110 tcp_stack_t *tcps = eager->tcp_tcps; 7111 7112 /* 7113 * We could be called because listener is closing. Since 7114 * the eager is using listener's queue's, its not safe. 7115 * Better use the default queue just to send the TH_RST 7116 * out. 7117 */ 7118 ASSERT(tcps->tcps_g_q != NULL); 7119 eager->tcp_rq = tcps->tcps_g_q; 7120 eager->tcp_wq = WR(tcps->tcps_g_q); 7121 7122 /* 7123 * An eager's conn_fanout will be NULL if it's a duplicate 7124 * for an existing 4-tuples in the conn fanout table. 7125 * We don't want to send an RST out in such case. 7126 */ 7127 if (econnp->conn_fanout != NULL && eager->tcp_state > TCPS_LISTEN) { 7128 tcp_xmit_ctl("tcp_eager_kill, can't wait", 7129 eager, eager->tcp_snxt, 0, TH_RST); 7130 } 7131 7132 /* We are here because listener wants this eager gone */ 7133 if (listener != NULL) { 7134 mutex_enter(&listener->tcp_eager_lock); 7135 tcp_eager_unlink(eager); 7136 if (eager->tcp_tconnind_started) { 7137 /* 7138 * The eager has sent a conn_ind up to the 7139 * listener but listener decides to close 7140 * instead. We need to drop the extra ref 7141 * placed on eager in tcp_rput_data() before 7142 * sending the conn_ind to listener. 7143 */ 7144 CONN_DEC_REF(econnp); 7145 } 7146 mutex_exit(&listener->tcp_eager_lock); 7147 CONN_DEC_REF(listener->tcp_connp); 7148 } 7149 7150 if (eager->tcp_state > TCPS_BOUND) 7151 tcp_close_detached(eager); 7152 } 7153 7154 /* 7155 * Reset any eager connection hanging off this listener marked 7156 * with 'seqnum' and then reclaim it's resources. 7157 */ 7158 static boolean_t 7159 tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum) 7160 { 7161 tcp_t *eager; 7162 mblk_t *mp; 7163 tcp_stack_t *tcps = listener->tcp_tcps; 7164 7165 TCP_STAT(tcps, tcp_eager_blowoff_calls); 7166 eager = listener; 7167 mutex_enter(&listener->tcp_eager_lock); 7168 do { 7169 eager = eager->tcp_eager_next_q; 7170 if (eager == NULL) { 7171 mutex_exit(&listener->tcp_eager_lock); 7172 return (B_FALSE); 7173 } 7174 } while (eager->tcp_conn_req_seqnum != seqnum); 7175 7176 if (eager->tcp_closemp_used) { 7177 mutex_exit(&listener->tcp_eager_lock); 7178 return (B_TRUE); 7179 } 7180 eager->tcp_closemp_used = B_TRUE; 7181 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 7182 CONN_INC_REF(eager->tcp_connp); 7183 mutex_exit(&listener->tcp_eager_lock); 7184 mp = &eager->tcp_closemp; 7185 squeue_fill(eager->tcp_connp->conn_sqp, mp, tcp_eager_kill, 7186 eager->tcp_connp, SQTAG_TCP_EAGER_BLOWOFF); 7187 return (B_TRUE); 7188 } 7189 7190 /* 7191 * Reset any eager connection hanging off this listener 7192 * and then reclaim it's resources. 7193 */ 7194 static void 7195 tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only) 7196 { 7197 tcp_t *eager; 7198 mblk_t *mp; 7199 tcp_stack_t *tcps = listener->tcp_tcps; 7200 7201 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 7202 7203 if (!q0_only) { 7204 /* First cleanup q */ 7205 TCP_STAT(tcps, tcp_eager_blowoff_q); 7206 eager = listener->tcp_eager_next_q; 7207 while (eager != NULL) { 7208 if (!eager->tcp_closemp_used) { 7209 eager->tcp_closemp_used = B_TRUE; 7210 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 7211 CONN_INC_REF(eager->tcp_connp); 7212 mp = &eager->tcp_closemp; 7213 squeue_fill(eager->tcp_connp->conn_sqp, mp, 7214 tcp_eager_kill, eager->tcp_connp, 7215 SQTAG_TCP_EAGER_CLEANUP); 7216 } 7217 eager = eager->tcp_eager_next_q; 7218 } 7219 } 7220 /* Then cleanup q0 */ 7221 TCP_STAT(tcps, tcp_eager_blowoff_q0); 7222 eager = listener->tcp_eager_next_q0; 7223 while (eager != listener) { 7224 if (!eager->tcp_closemp_used) { 7225 eager->tcp_closemp_used = B_TRUE; 7226 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 7227 CONN_INC_REF(eager->tcp_connp); 7228 mp = &eager->tcp_closemp; 7229 squeue_fill(eager->tcp_connp->conn_sqp, mp, 7230 tcp_eager_kill, eager->tcp_connp, 7231 SQTAG_TCP_EAGER_CLEANUP_Q0); 7232 } 7233 eager = eager->tcp_eager_next_q0; 7234 } 7235 } 7236 7237 /* 7238 * If we are an eager connection hanging off a listener that hasn't 7239 * formally accepted the connection yet, get off his list and blow off 7240 * any data that we have accumulated. 7241 */ 7242 static void 7243 tcp_eager_unlink(tcp_t *tcp) 7244 { 7245 tcp_t *listener = tcp->tcp_listener; 7246 7247 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 7248 ASSERT(listener != NULL); 7249 if (tcp->tcp_eager_next_q0 != NULL) { 7250 ASSERT(tcp->tcp_eager_prev_q0 != NULL); 7251 7252 /* Remove the eager tcp from q0 */ 7253 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 7254 tcp->tcp_eager_prev_q0; 7255 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 7256 tcp->tcp_eager_next_q0; 7257 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 7258 listener->tcp_conn_req_cnt_q0--; 7259 7260 tcp->tcp_eager_next_q0 = NULL; 7261 tcp->tcp_eager_prev_q0 = NULL; 7262 7263 /* 7264 * Take the eager out, if it is in the list of droppable 7265 * eagers. 7266 */ 7267 MAKE_UNDROPPABLE(tcp); 7268 7269 if (tcp->tcp_syn_rcvd_timeout != 0) { 7270 /* we have timed out before */ 7271 ASSERT(listener->tcp_syn_rcvd_timeout > 0); 7272 listener->tcp_syn_rcvd_timeout--; 7273 } 7274 } else { 7275 tcp_t **tcpp = &listener->tcp_eager_next_q; 7276 tcp_t *prev = NULL; 7277 7278 for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) { 7279 if (tcpp[0] == tcp) { 7280 if (listener->tcp_eager_last_q == tcp) { 7281 /* 7282 * If we are unlinking the last 7283 * element on the list, adjust 7284 * tail pointer. Set tail pointer 7285 * to nil when list is empty. 7286 */ 7287 ASSERT(tcp->tcp_eager_next_q == NULL); 7288 if (listener->tcp_eager_last_q == 7289 listener->tcp_eager_next_q) { 7290 listener->tcp_eager_last_q = 7291 NULL; 7292 } else { 7293 /* 7294 * We won't get here if there 7295 * is only one eager in the 7296 * list. 7297 */ 7298 ASSERT(prev != NULL); 7299 listener->tcp_eager_last_q = 7300 prev; 7301 } 7302 } 7303 tcpp[0] = tcp->tcp_eager_next_q; 7304 tcp->tcp_eager_next_q = NULL; 7305 tcp->tcp_eager_last_q = NULL; 7306 ASSERT(listener->tcp_conn_req_cnt_q > 0); 7307 listener->tcp_conn_req_cnt_q--; 7308 break; 7309 } 7310 prev = tcpp[0]; 7311 } 7312 } 7313 tcp->tcp_listener = NULL; 7314 } 7315 7316 /* Shorthand to generate and send TPI error acks to our client */ 7317 static void 7318 tcp_err_ack(tcp_t *tcp, mblk_t *mp, int t_error, int sys_error) 7319 { 7320 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL) 7321 putnext(tcp->tcp_rq, mp); 7322 } 7323 7324 /* Shorthand to generate and send TPI error acks to our client */ 7325 static void 7326 tcp_err_ack_prim(tcp_t *tcp, mblk_t *mp, int primitive, 7327 int t_error, int sys_error) 7328 { 7329 struct T_error_ack *teackp; 7330 7331 if ((mp = tpi_ack_alloc(mp, sizeof (struct T_error_ack), 7332 M_PCPROTO, T_ERROR_ACK)) != NULL) { 7333 teackp = (struct T_error_ack *)mp->b_rptr; 7334 teackp->ERROR_prim = primitive; 7335 teackp->TLI_error = t_error; 7336 teackp->UNIX_error = sys_error; 7337 putnext(tcp->tcp_rq, mp); 7338 } 7339 } 7340 7341 /* 7342 * Note: No locks are held when inspecting tcp_g_*epriv_ports 7343 * but instead the code relies on: 7344 * - the fact that the address of the array and its size never changes 7345 * - the atomic assignment of the elements of the array 7346 */ 7347 /* ARGSUSED */ 7348 static int 7349 tcp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 7350 { 7351 int i; 7352 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 7353 7354 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7355 if (tcps->tcps_g_epriv_ports[i] != 0) 7356 (void) mi_mpprintf(mp, "%d ", 7357 tcps->tcps_g_epriv_ports[i]); 7358 } 7359 return (0); 7360 } 7361 7362 /* 7363 * Hold a lock while changing tcp_g_epriv_ports to prevent multiple 7364 * threads from changing it at the same time. 7365 */ 7366 /* ARGSUSED */ 7367 static int 7368 tcp_extra_priv_ports_add(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 7369 cred_t *cr) 7370 { 7371 long new_value; 7372 int i; 7373 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 7374 7375 /* 7376 * Fail the request if the new value does not lie within the 7377 * port number limits. 7378 */ 7379 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 7380 new_value <= 0 || new_value >= 65536) { 7381 return (EINVAL); 7382 } 7383 7384 mutex_enter(&tcps->tcps_epriv_port_lock); 7385 /* Check if the value is already in the list */ 7386 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7387 if (new_value == tcps->tcps_g_epriv_ports[i]) { 7388 mutex_exit(&tcps->tcps_epriv_port_lock); 7389 return (EEXIST); 7390 } 7391 } 7392 /* Find an empty slot */ 7393 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7394 if (tcps->tcps_g_epriv_ports[i] == 0) 7395 break; 7396 } 7397 if (i == tcps->tcps_g_num_epriv_ports) { 7398 mutex_exit(&tcps->tcps_epriv_port_lock); 7399 return (EOVERFLOW); 7400 } 7401 /* Set the new value */ 7402 tcps->tcps_g_epriv_ports[i] = (uint16_t)new_value; 7403 mutex_exit(&tcps->tcps_epriv_port_lock); 7404 return (0); 7405 } 7406 7407 /* 7408 * Hold a lock while changing tcp_g_epriv_ports to prevent multiple 7409 * threads from changing it at the same time. 7410 */ 7411 /* ARGSUSED */ 7412 static int 7413 tcp_extra_priv_ports_del(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 7414 cred_t *cr) 7415 { 7416 long new_value; 7417 int i; 7418 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 7419 7420 /* 7421 * Fail the request if the new value does not lie within the 7422 * port number limits. 7423 */ 7424 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || new_value <= 0 || 7425 new_value >= 65536) { 7426 return (EINVAL); 7427 } 7428 7429 mutex_enter(&tcps->tcps_epriv_port_lock); 7430 /* Check that the value is already in the list */ 7431 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7432 if (tcps->tcps_g_epriv_ports[i] == new_value) 7433 break; 7434 } 7435 if (i == tcps->tcps_g_num_epriv_ports) { 7436 mutex_exit(&tcps->tcps_epriv_port_lock); 7437 return (ESRCH); 7438 } 7439 /* Clear the value */ 7440 tcps->tcps_g_epriv_ports[i] = 0; 7441 mutex_exit(&tcps->tcps_epriv_port_lock); 7442 return (0); 7443 } 7444 7445 /* Return the TPI/TLI equivalent of our current tcp_state */ 7446 static int 7447 tcp_tpistate(tcp_t *tcp) 7448 { 7449 switch (tcp->tcp_state) { 7450 case TCPS_IDLE: 7451 return (TS_UNBND); 7452 case TCPS_LISTEN: 7453 /* 7454 * Return whether there are outstanding T_CONN_IND waiting 7455 * for the matching T_CONN_RES. Therefore don't count q0. 7456 */ 7457 if (tcp->tcp_conn_req_cnt_q > 0) 7458 return (TS_WRES_CIND); 7459 else 7460 return (TS_IDLE); 7461 case TCPS_BOUND: 7462 return (TS_IDLE); 7463 case TCPS_SYN_SENT: 7464 return (TS_WCON_CREQ); 7465 case TCPS_SYN_RCVD: 7466 /* 7467 * Note: assumption: this has to the active open SYN_RCVD. 7468 * The passive instance is detached in SYN_RCVD stage of 7469 * incoming connection processing so we cannot get request 7470 * for T_info_ack on it. 7471 */ 7472 return (TS_WACK_CRES); 7473 case TCPS_ESTABLISHED: 7474 return (TS_DATA_XFER); 7475 case TCPS_CLOSE_WAIT: 7476 return (TS_WREQ_ORDREL); 7477 case TCPS_FIN_WAIT_1: 7478 return (TS_WIND_ORDREL); 7479 case TCPS_FIN_WAIT_2: 7480 return (TS_WIND_ORDREL); 7481 7482 case TCPS_CLOSING: 7483 case TCPS_LAST_ACK: 7484 case TCPS_TIME_WAIT: 7485 case TCPS_CLOSED: 7486 /* 7487 * Following TS_WACK_DREQ7 is a rendition of "not 7488 * yet TS_IDLE" TPI state. There is no best match to any 7489 * TPI state for TCPS_{CLOSING, LAST_ACK, TIME_WAIT} but we 7490 * choose a value chosen that will map to TLI/XTI level 7491 * state of TSTATECHNG (state is process of changing) which 7492 * captures what this dummy state represents. 7493 */ 7494 return (TS_WACK_DREQ7); 7495 default: 7496 cmn_err(CE_WARN, "tcp_tpistate: strange state (%d) %s", 7497 tcp->tcp_state, tcp_display(tcp, NULL, 7498 DISP_PORT_ONLY)); 7499 return (TS_UNBND); 7500 } 7501 } 7502 7503 static void 7504 tcp_copy_info(struct T_info_ack *tia, tcp_t *tcp) 7505 { 7506 tcp_stack_t *tcps = tcp->tcp_tcps; 7507 7508 if (tcp->tcp_family == AF_INET6) 7509 *tia = tcp_g_t_info_ack_v6; 7510 else 7511 *tia = tcp_g_t_info_ack; 7512 tia->CURRENT_state = tcp_tpistate(tcp); 7513 tia->OPT_size = tcp_max_optsize; 7514 if (tcp->tcp_mss == 0) { 7515 /* Not yet set - tcp_open does not set mss */ 7516 if (tcp->tcp_ipversion == IPV4_VERSION) 7517 tia->TIDU_size = tcps->tcps_mss_def_ipv4; 7518 else 7519 tia->TIDU_size = tcps->tcps_mss_def_ipv6; 7520 } else { 7521 tia->TIDU_size = tcp->tcp_mss; 7522 } 7523 /* TODO: Default ETSDU is 1. Is that correct for tcp? */ 7524 } 7525 7526 /* 7527 * This routine responds to T_CAPABILITY_REQ messages. It is called by 7528 * tcp_wput. Much of the T_CAPABILITY_ACK information is copied from 7529 * tcp_g_t_info_ack. The current state of the stream is copied from 7530 * tcp_state. 7531 */ 7532 static void 7533 tcp_capability_req(tcp_t *tcp, mblk_t *mp) 7534 { 7535 t_uscalar_t cap_bits1; 7536 struct T_capability_ack *tcap; 7537 7538 if (MBLKL(mp) < sizeof (struct T_capability_req)) { 7539 freemsg(mp); 7540 return; 7541 } 7542 7543 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 7544 7545 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 7546 mp->b_datap->db_type, T_CAPABILITY_ACK); 7547 if (mp == NULL) 7548 return; 7549 7550 tcap = (struct T_capability_ack *)mp->b_rptr; 7551 tcap->CAP_bits1 = 0; 7552 7553 if (cap_bits1 & TC1_INFO) { 7554 tcp_copy_info(&tcap->INFO_ack, tcp); 7555 tcap->CAP_bits1 |= TC1_INFO; 7556 } 7557 7558 if (cap_bits1 & TC1_ACCEPTOR_ID) { 7559 tcap->ACCEPTOR_id = tcp->tcp_acceptor_id; 7560 tcap->CAP_bits1 |= TC1_ACCEPTOR_ID; 7561 } 7562 7563 putnext(tcp->tcp_rq, mp); 7564 } 7565 7566 /* 7567 * This routine responds to T_INFO_REQ messages. It is called by tcp_wput. 7568 * Most of the T_INFO_ACK information is copied from tcp_g_t_info_ack. 7569 * The current state of the stream is copied from tcp_state. 7570 */ 7571 static void 7572 tcp_info_req(tcp_t *tcp, mblk_t *mp) 7573 { 7574 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO, 7575 T_INFO_ACK); 7576 if (!mp) { 7577 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 7578 return; 7579 } 7580 tcp_copy_info((struct T_info_ack *)mp->b_rptr, tcp); 7581 putnext(tcp->tcp_rq, mp); 7582 } 7583 7584 /* Respond to the TPI addr request */ 7585 static void 7586 tcp_addr_req(tcp_t *tcp, mblk_t *mp) 7587 { 7588 sin_t *sin; 7589 mblk_t *ackmp; 7590 struct T_addr_ack *taa; 7591 7592 /* Make it large enough for worst case */ 7593 ackmp = reallocb(mp, sizeof (struct T_addr_ack) + 7594 2 * sizeof (sin6_t), 1); 7595 if (ackmp == NULL) { 7596 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 7597 return; 7598 } 7599 7600 if (tcp->tcp_ipversion == IPV6_VERSION) { 7601 tcp_addr_req_ipv6(tcp, ackmp); 7602 return; 7603 } 7604 taa = (struct T_addr_ack *)ackmp->b_rptr; 7605 7606 bzero(taa, sizeof (struct T_addr_ack)); 7607 ackmp->b_wptr = (uchar_t *)&taa[1]; 7608 7609 taa->PRIM_type = T_ADDR_ACK; 7610 ackmp->b_datap->db_type = M_PCPROTO; 7611 7612 /* 7613 * Note: Following code assumes 32 bit alignment of basic 7614 * data structures like sin_t and struct T_addr_ack. 7615 */ 7616 if (tcp->tcp_state >= TCPS_BOUND) { 7617 /* 7618 * Fill in local address 7619 */ 7620 taa->LOCADDR_length = sizeof (sin_t); 7621 taa->LOCADDR_offset = sizeof (*taa); 7622 7623 sin = (sin_t *)&taa[1]; 7624 7625 /* Fill zeroes and then intialize non-zero fields */ 7626 *sin = sin_null; 7627 7628 sin->sin_family = AF_INET; 7629 7630 sin->sin_addr.s_addr = tcp->tcp_ipha->ipha_src; 7631 sin->sin_port = *(uint16_t *)tcp->tcp_tcph->th_lport; 7632 7633 ackmp->b_wptr = (uchar_t *)&sin[1]; 7634 7635 if (tcp->tcp_state >= TCPS_SYN_RCVD) { 7636 /* 7637 * Fill in Remote address 7638 */ 7639 taa->REMADDR_length = sizeof (sin_t); 7640 taa->REMADDR_offset = ROUNDUP32(taa->LOCADDR_offset + 7641 taa->LOCADDR_length); 7642 7643 sin = (sin_t *)(ackmp->b_rptr + taa->REMADDR_offset); 7644 *sin = sin_null; 7645 sin->sin_family = AF_INET; 7646 sin->sin_addr.s_addr = tcp->tcp_remote; 7647 sin->sin_port = tcp->tcp_fport; 7648 7649 ackmp->b_wptr = (uchar_t *)&sin[1]; 7650 } 7651 } 7652 putnext(tcp->tcp_rq, ackmp); 7653 } 7654 7655 /* Assumes that tcp_addr_req gets enough space and alignment */ 7656 static void 7657 tcp_addr_req_ipv6(tcp_t *tcp, mblk_t *ackmp) 7658 { 7659 sin6_t *sin6; 7660 struct T_addr_ack *taa; 7661 7662 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 7663 ASSERT(OK_32PTR(ackmp->b_rptr)); 7664 ASSERT(ackmp->b_wptr - ackmp->b_rptr >= sizeof (struct T_addr_ack) + 7665 2 * sizeof (sin6_t)); 7666 7667 taa = (struct T_addr_ack *)ackmp->b_rptr; 7668 7669 bzero(taa, sizeof (struct T_addr_ack)); 7670 ackmp->b_wptr = (uchar_t *)&taa[1]; 7671 7672 taa->PRIM_type = T_ADDR_ACK; 7673 ackmp->b_datap->db_type = M_PCPROTO; 7674 7675 /* 7676 * Note: Following code assumes 32 bit alignment of basic 7677 * data structures like sin6_t and struct T_addr_ack. 7678 */ 7679 if (tcp->tcp_state >= TCPS_BOUND) { 7680 /* 7681 * Fill in local address 7682 */ 7683 taa->LOCADDR_length = sizeof (sin6_t); 7684 taa->LOCADDR_offset = sizeof (*taa); 7685 7686 sin6 = (sin6_t *)&taa[1]; 7687 *sin6 = sin6_null; 7688 7689 sin6->sin6_family = AF_INET6; 7690 sin6->sin6_addr = tcp->tcp_ip6h->ip6_src; 7691 sin6->sin6_port = tcp->tcp_lport; 7692 7693 ackmp->b_wptr = (uchar_t *)&sin6[1]; 7694 7695 if (tcp->tcp_state >= TCPS_SYN_RCVD) { 7696 /* 7697 * Fill in Remote address 7698 */ 7699 taa->REMADDR_length = sizeof (sin6_t); 7700 taa->REMADDR_offset = ROUNDUP32(taa->LOCADDR_offset + 7701 taa->LOCADDR_length); 7702 7703 sin6 = (sin6_t *)(ackmp->b_rptr + taa->REMADDR_offset); 7704 *sin6 = sin6_null; 7705 sin6->sin6_family = AF_INET6; 7706 sin6->sin6_flowinfo = 7707 tcp->tcp_ip6h->ip6_vcf & 7708 ~IPV6_VERS_AND_FLOW_MASK; 7709 sin6->sin6_addr = tcp->tcp_remote_v6; 7710 sin6->sin6_port = tcp->tcp_fport; 7711 7712 ackmp->b_wptr = (uchar_t *)&sin6[1]; 7713 } 7714 } 7715 putnext(tcp->tcp_rq, ackmp); 7716 } 7717 7718 /* 7719 * Handle reinitialization of a tcp structure. 7720 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE. 7721 */ 7722 static void 7723 tcp_reinit(tcp_t *tcp) 7724 { 7725 mblk_t *mp; 7726 int err; 7727 tcp_stack_t *tcps = tcp->tcp_tcps; 7728 7729 TCP_STAT(tcps, tcp_reinit_calls); 7730 7731 /* tcp_reinit should never be called for detached tcp_t's */ 7732 ASSERT(tcp->tcp_listener == NULL); 7733 ASSERT((tcp->tcp_family == AF_INET && 7734 tcp->tcp_ipversion == IPV4_VERSION) || 7735 (tcp->tcp_family == AF_INET6 && 7736 (tcp->tcp_ipversion == IPV4_VERSION || 7737 tcp->tcp_ipversion == IPV6_VERSION))); 7738 7739 /* Cancel outstanding timers */ 7740 tcp_timers_stop(tcp); 7741 7742 /* 7743 * Reset everything in the state vector, after updating global 7744 * MIB data from instance counters. 7745 */ 7746 UPDATE_MIB(&tcps->tcps_mib, tcpHCInSegs, tcp->tcp_ibsegs); 7747 tcp->tcp_ibsegs = 0; 7748 UPDATE_MIB(&tcps->tcps_mib, tcpHCOutSegs, tcp->tcp_obsegs); 7749 tcp->tcp_obsegs = 0; 7750 7751 tcp_close_mpp(&tcp->tcp_xmit_head); 7752 if (tcp->tcp_snd_zcopy_aware) 7753 tcp_zcopy_notify(tcp); 7754 tcp->tcp_xmit_last = tcp->tcp_xmit_tail = NULL; 7755 tcp->tcp_unsent = tcp->tcp_xmit_tail_unsent = 0; 7756 mutex_enter(&tcp->tcp_non_sq_lock); 7757 if (tcp->tcp_flow_stopped && 7758 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 7759 tcp_clrqfull(tcp); 7760 } 7761 mutex_exit(&tcp->tcp_non_sq_lock); 7762 tcp_close_mpp(&tcp->tcp_reass_head); 7763 tcp->tcp_reass_tail = NULL; 7764 if (tcp->tcp_rcv_list != NULL) { 7765 /* Free b_next chain */ 7766 tcp_close_mpp(&tcp->tcp_rcv_list); 7767 tcp->tcp_rcv_last_head = NULL; 7768 tcp->tcp_rcv_last_tail = NULL; 7769 tcp->tcp_rcv_cnt = 0; 7770 } 7771 tcp->tcp_rcv_last_tail = NULL; 7772 7773 if ((mp = tcp->tcp_urp_mp) != NULL) { 7774 freemsg(mp); 7775 tcp->tcp_urp_mp = NULL; 7776 } 7777 if ((mp = tcp->tcp_urp_mark_mp) != NULL) { 7778 freemsg(mp); 7779 tcp->tcp_urp_mark_mp = NULL; 7780 } 7781 if (tcp->tcp_fused_sigurg_mp != NULL) { 7782 freeb(tcp->tcp_fused_sigurg_mp); 7783 tcp->tcp_fused_sigurg_mp = NULL; 7784 } 7785 7786 /* 7787 * Following is a union with two members which are 7788 * identical types and size so the following cleanup 7789 * is enough. 7790 */ 7791 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind); 7792 7793 CL_INET_DISCONNECT(tcp); 7794 7795 /* 7796 * The connection can't be on the tcp_time_wait_head list 7797 * since it is not detached. 7798 */ 7799 ASSERT(tcp->tcp_time_wait_next == NULL); 7800 ASSERT(tcp->tcp_time_wait_prev == NULL); 7801 ASSERT(tcp->tcp_time_wait_expire == 0); 7802 7803 if (tcp->tcp_kssl_pending) { 7804 tcp->tcp_kssl_pending = B_FALSE; 7805 7806 /* Don't reset if the initialized by bind. */ 7807 if (tcp->tcp_kssl_ent != NULL) { 7808 kssl_release_ent(tcp->tcp_kssl_ent, NULL, 7809 KSSL_NO_PROXY); 7810 } 7811 } 7812 if (tcp->tcp_kssl_ctx != NULL) { 7813 kssl_release_ctx(tcp->tcp_kssl_ctx); 7814 tcp->tcp_kssl_ctx = NULL; 7815 } 7816 7817 /* 7818 * Reset/preserve other values 7819 */ 7820 tcp_reinit_values(tcp); 7821 ipcl_hash_remove(tcp->tcp_connp); 7822 conn_delete_ire(tcp->tcp_connp, NULL); 7823 tcp_ipsec_cleanup(tcp); 7824 7825 if (tcp->tcp_conn_req_max != 0) { 7826 /* 7827 * This is the case when a TLI program uses the same 7828 * transport end point to accept a connection. This 7829 * makes the TCP both a listener and acceptor. When 7830 * this connection is closed, we need to set the state 7831 * back to TCPS_LISTEN. Make sure that the eager list 7832 * is reinitialized. 7833 * 7834 * Note that this stream is still bound to the four 7835 * tuples of the previous connection in IP. If a new 7836 * SYN with different foreign address comes in, IP will 7837 * not find it and will send it to the global queue. In 7838 * the global queue, TCP will do a tcp_lookup_listener() 7839 * to find this stream. This works because this stream 7840 * is only removed from connected hash. 7841 * 7842 */ 7843 tcp->tcp_state = TCPS_LISTEN; 7844 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp; 7845 tcp->tcp_eager_next_drop_q0 = tcp; 7846 tcp->tcp_eager_prev_drop_q0 = tcp; 7847 tcp->tcp_connp->conn_recv = tcp_conn_request; 7848 if (tcp->tcp_family == AF_INET6) { 7849 ASSERT(tcp->tcp_connp->conn_af_isv6); 7850 (void) ipcl_bind_insert_v6(tcp->tcp_connp, IPPROTO_TCP, 7851 &tcp->tcp_ip6h->ip6_src, tcp->tcp_lport); 7852 } else { 7853 ASSERT(!tcp->tcp_connp->conn_af_isv6); 7854 (void) ipcl_bind_insert(tcp->tcp_connp, IPPROTO_TCP, 7855 tcp->tcp_ipha->ipha_src, tcp->tcp_lport); 7856 } 7857 } else { 7858 tcp->tcp_state = TCPS_BOUND; 7859 } 7860 7861 /* 7862 * Initialize to default values 7863 * Can't fail since enough header template space already allocated 7864 * at open(). 7865 */ 7866 err = tcp_init_values(tcp); 7867 ASSERT(err == 0); 7868 /* Restore state in tcp_tcph */ 7869 bcopy(&tcp->tcp_lport, tcp->tcp_tcph->th_lport, TCP_PORT_LEN); 7870 if (tcp->tcp_ipversion == IPV4_VERSION) 7871 tcp->tcp_ipha->ipha_src = tcp->tcp_bound_source; 7872 else 7873 tcp->tcp_ip6h->ip6_src = tcp->tcp_bound_source_v6; 7874 /* 7875 * Copy of the src addr. in tcp_t is needed in tcp_t 7876 * since the lookup funcs can only lookup on tcp_t 7877 */ 7878 tcp->tcp_ip_src_v6 = tcp->tcp_bound_source_v6; 7879 7880 ASSERT(tcp->tcp_ptpbhn != NULL); 7881 tcp->tcp_rq->q_hiwat = tcps->tcps_recv_hiwat; 7882 tcp->tcp_rwnd = tcps->tcps_recv_hiwat; 7883 tcp->tcp_mss = tcp->tcp_ipversion != IPV4_VERSION ? 7884 tcps->tcps_mss_def_ipv6 : tcps->tcps_mss_def_ipv4; 7885 } 7886 7887 /* 7888 * Force values to zero that need be zero. 7889 * Do not touch values asociated with the BOUND or LISTEN state 7890 * since the connection will end up in that state after the reinit. 7891 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t 7892 * structure! 7893 */ 7894 static void 7895 tcp_reinit_values(tcp) 7896 tcp_t *tcp; 7897 { 7898 tcp_stack_t *tcps = tcp->tcp_tcps; 7899 7900 #ifndef lint 7901 #define DONTCARE(x) 7902 #define PRESERVE(x) 7903 #else 7904 #define DONTCARE(x) ((x) = (x)) 7905 #define PRESERVE(x) ((x) = (x)) 7906 #endif /* lint */ 7907 7908 PRESERVE(tcp->tcp_bind_hash); 7909 PRESERVE(tcp->tcp_ptpbhn); 7910 PRESERVE(tcp->tcp_acceptor_hash); 7911 PRESERVE(tcp->tcp_ptpahn); 7912 7913 /* Should be ASSERT NULL on these with new code! */ 7914 ASSERT(tcp->tcp_time_wait_next == NULL); 7915 ASSERT(tcp->tcp_time_wait_prev == NULL); 7916 ASSERT(tcp->tcp_time_wait_expire == 0); 7917 PRESERVE(tcp->tcp_state); 7918 PRESERVE(tcp->tcp_rq); 7919 PRESERVE(tcp->tcp_wq); 7920 7921 ASSERT(tcp->tcp_xmit_head == NULL); 7922 ASSERT(tcp->tcp_xmit_last == NULL); 7923 ASSERT(tcp->tcp_unsent == 0); 7924 ASSERT(tcp->tcp_xmit_tail == NULL); 7925 ASSERT(tcp->tcp_xmit_tail_unsent == 0); 7926 7927 tcp->tcp_snxt = 0; /* Displayed in mib */ 7928 tcp->tcp_suna = 0; /* Displayed in mib */ 7929 tcp->tcp_swnd = 0; 7930 DONTCARE(tcp->tcp_cwnd); /* Init in tcp_mss_set */ 7931 7932 ASSERT(tcp->tcp_ibsegs == 0); 7933 ASSERT(tcp->tcp_obsegs == 0); 7934 7935 if (tcp->tcp_iphc != NULL) { 7936 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 7937 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 7938 } 7939 7940 DONTCARE(tcp->tcp_naglim); /* Init in tcp_init_values */ 7941 DONTCARE(tcp->tcp_hdr_len); /* Init in tcp_init_values */ 7942 DONTCARE(tcp->tcp_ipha); 7943 DONTCARE(tcp->tcp_ip6h); 7944 DONTCARE(tcp->tcp_ip_hdr_len); 7945 DONTCARE(tcp->tcp_tcph); 7946 DONTCARE(tcp->tcp_tcp_hdr_len); /* Init in tcp_init_values */ 7947 tcp->tcp_valid_bits = 0; 7948 7949 DONTCARE(tcp->tcp_xmit_hiwater); /* Init in tcp_init_values */ 7950 DONTCARE(tcp->tcp_timer_backoff); /* Init in tcp_init_values */ 7951 DONTCARE(tcp->tcp_last_recv_time); /* Init in tcp_init_values */ 7952 tcp->tcp_last_rcv_lbolt = 0; 7953 7954 tcp->tcp_init_cwnd = 0; 7955 7956 tcp->tcp_urp_last_valid = 0; 7957 tcp->tcp_hard_binding = 0; 7958 tcp->tcp_hard_bound = 0; 7959 PRESERVE(tcp->tcp_cred); 7960 PRESERVE(tcp->tcp_cpid); 7961 PRESERVE(tcp->tcp_open_time); 7962 PRESERVE(tcp->tcp_exclbind); 7963 7964 tcp->tcp_fin_acked = 0; 7965 tcp->tcp_fin_rcvd = 0; 7966 tcp->tcp_fin_sent = 0; 7967 tcp->tcp_ordrel_done = 0; 7968 7969 tcp->tcp_debug = 0; 7970 tcp->tcp_dontroute = 0; 7971 tcp->tcp_broadcast = 0; 7972 7973 tcp->tcp_useloopback = 0; 7974 tcp->tcp_reuseaddr = 0; 7975 tcp->tcp_oobinline = 0; 7976 tcp->tcp_dgram_errind = 0; 7977 7978 tcp->tcp_detached = 0; 7979 tcp->tcp_bind_pending = 0; 7980 tcp->tcp_unbind_pending = 0; 7981 tcp->tcp_deferred_clean_death = 0; 7982 7983 tcp->tcp_snd_ws_ok = B_FALSE; 7984 tcp->tcp_snd_ts_ok = B_FALSE; 7985 tcp->tcp_linger = 0; 7986 tcp->tcp_ka_enabled = 0; 7987 tcp->tcp_zero_win_probe = 0; 7988 7989 tcp->tcp_loopback = 0; 7990 tcp->tcp_localnet = 0; 7991 tcp->tcp_syn_defense = 0; 7992 tcp->tcp_set_timer = 0; 7993 7994 tcp->tcp_active_open = 0; 7995 ASSERT(tcp->tcp_timeout == B_FALSE); 7996 tcp->tcp_rexmit = B_FALSE; 7997 tcp->tcp_xmit_zc_clean = B_FALSE; 7998 7999 tcp->tcp_snd_sack_ok = B_FALSE; 8000 PRESERVE(tcp->tcp_recvdstaddr); 8001 tcp->tcp_hwcksum = B_FALSE; 8002 8003 tcp->tcp_ire_ill_check_done = B_FALSE; 8004 DONTCARE(tcp->tcp_maxpsz); /* Init in tcp_init_values */ 8005 8006 tcp->tcp_mdt = B_FALSE; 8007 tcp->tcp_mdt_hdr_head = 0; 8008 tcp->tcp_mdt_hdr_tail = 0; 8009 8010 tcp->tcp_conn_def_q0 = 0; 8011 tcp->tcp_ip_forward_progress = B_FALSE; 8012 tcp->tcp_anon_priv_bind = 0; 8013 tcp->tcp_ecn_ok = B_FALSE; 8014 8015 tcp->tcp_cwr = B_FALSE; 8016 tcp->tcp_ecn_echo_on = B_FALSE; 8017 8018 if (tcp->tcp_sack_info != NULL) { 8019 if (tcp->tcp_notsack_list != NULL) { 8020 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 8021 } 8022 kmem_cache_free(tcp_sack_info_cache, tcp->tcp_sack_info); 8023 tcp->tcp_sack_info = NULL; 8024 } 8025 8026 tcp->tcp_rcv_ws = 0; 8027 tcp->tcp_snd_ws = 0; 8028 tcp->tcp_ts_recent = 0; 8029 tcp->tcp_rnxt = 0; /* Displayed in mib */ 8030 DONTCARE(tcp->tcp_rwnd); /* Set in tcp_reinit() */ 8031 tcp->tcp_if_mtu = 0; 8032 8033 ASSERT(tcp->tcp_reass_head == NULL); 8034 ASSERT(tcp->tcp_reass_tail == NULL); 8035 8036 tcp->tcp_cwnd_cnt = 0; 8037 8038 ASSERT(tcp->tcp_rcv_list == NULL); 8039 ASSERT(tcp->tcp_rcv_last_head == NULL); 8040 ASSERT(tcp->tcp_rcv_last_tail == NULL); 8041 ASSERT(tcp->tcp_rcv_cnt == 0); 8042 8043 DONTCARE(tcp->tcp_cwnd_ssthresh); /* Init in tcp_adapt_ire */ 8044 DONTCARE(tcp->tcp_cwnd_max); /* Init in tcp_init_values */ 8045 tcp->tcp_csuna = 0; 8046 8047 tcp->tcp_rto = 0; /* Displayed in MIB */ 8048 DONTCARE(tcp->tcp_rtt_sa); /* Init in tcp_init_values */ 8049 DONTCARE(tcp->tcp_rtt_sd); /* Init in tcp_init_values */ 8050 tcp->tcp_rtt_update = 0; 8051 8052 DONTCARE(tcp->tcp_swl1); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */ 8053 DONTCARE(tcp->tcp_swl2); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */ 8054 8055 tcp->tcp_rack = 0; /* Displayed in mib */ 8056 tcp->tcp_rack_cnt = 0; 8057 tcp->tcp_rack_cur_max = 0; 8058 tcp->tcp_rack_abs_max = 0; 8059 8060 tcp->tcp_max_swnd = 0; 8061 8062 ASSERT(tcp->tcp_listener == NULL); 8063 8064 DONTCARE(tcp->tcp_xmit_lowater); /* Init in tcp_init_values */ 8065 8066 DONTCARE(tcp->tcp_irs); /* tcp_valid_bits cleared */ 8067 DONTCARE(tcp->tcp_iss); /* tcp_valid_bits cleared */ 8068 DONTCARE(tcp->tcp_fss); /* tcp_valid_bits cleared */ 8069 DONTCARE(tcp->tcp_urg); /* tcp_valid_bits cleared */ 8070 8071 ASSERT(tcp->tcp_conn_req_cnt_q == 0); 8072 ASSERT(tcp->tcp_conn_req_cnt_q0 == 0); 8073 PRESERVE(tcp->tcp_conn_req_max); 8074 PRESERVE(tcp->tcp_conn_req_seqnum); 8075 8076 DONTCARE(tcp->tcp_ip_hdr_len); /* Init in tcp_init_values */ 8077 DONTCARE(tcp->tcp_first_timer_threshold); /* Init in tcp_init_values */ 8078 DONTCARE(tcp->tcp_second_timer_threshold); /* Init in tcp_init_values */ 8079 DONTCARE(tcp->tcp_first_ctimer_threshold); /* Init in tcp_init_values */ 8080 DONTCARE(tcp->tcp_second_ctimer_threshold); /* in tcp_init_values */ 8081 8082 tcp->tcp_lingertime = 0; 8083 8084 DONTCARE(tcp->tcp_urp_last); /* tcp_urp_last_valid is cleared */ 8085 ASSERT(tcp->tcp_urp_mp == NULL); 8086 ASSERT(tcp->tcp_urp_mark_mp == NULL); 8087 ASSERT(tcp->tcp_fused_sigurg_mp == NULL); 8088 8089 ASSERT(tcp->tcp_eager_next_q == NULL); 8090 ASSERT(tcp->tcp_eager_last_q == NULL); 8091 ASSERT((tcp->tcp_eager_next_q0 == NULL && 8092 tcp->tcp_eager_prev_q0 == NULL) || 8093 tcp->tcp_eager_next_q0 == tcp->tcp_eager_prev_q0); 8094 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 8095 8096 ASSERT((tcp->tcp_eager_next_drop_q0 == NULL && 8097 tcp->tcp_eager_prev_drop_q0 == NULL) || 8098 tcp->tcp_eager_next_drop_q0 == tcp->tcp_eager_prev_drop_q0); 8099 8100 tcp->tcp_client_errno = 0; 8101 8102 DONTCARE(tcp->tcp_sum); /* Init in tcp_init_values */ 8103 8104 tcp->tcp_remote_v6 = ipv6_all_zeros; /* Displayed in MIB */ 8105 8106 PRESERVE(tcp->tcp_bound_source_v6); 8107 tcp->tcp_last_sent_len = 0; 8108 tcp->tcp_dupack_cnt = 0; 8109 8110 tcp->tcp_fport = 0; /* Displayed in MIB */ 8111 PRESERVE(tcp->tcp_lport); 8112 8113 PRESERVE(tcp->tcp_acceptor_lockp); 8114 8115 ASSERT(tcp->tcp_ordrelid == 0); 8116 PRESERVE(tcp->tcp_acceptor_id); 8117 DONTCARE(tcp->tcp_ipsec_overhead); 8118 8119 /* 8120 * If tcp_tracing flag is ON (i.e. We have a trace buffer 8121 * in tcp structure and now tracing), Re-initialize all 8122 * members of tcp_traceinfo. 8123 */ 8124 if (tcp->tcp_tracebuf != NULL) { 8125 bzero(tcp->tcp_tracebuf, sizeof (tcptrch_t)); 8126 } 8127 8128 PRESERVE(tcp->tcp_family); 8129 if (tcp->tcp_family == AF_INET6) { 8130 tcp->tcp_ipversion = IPV6_VERSION; 8131 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 8132 } else { 8133 tcp->tcp_ipversion = IPV4_VERSION; 8134 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 8135 } 8136 8137 tcp->tcp_bound_if = 0; 8138 tcp->tcp_ipv6_recvancillary = 0; 8139 tcp->tcp_recvifindex = 0; 8140 tcp->tcp_recvhops = 0; 8141 tcp->tcp_closed = 0; 8142 tcp->tcp_cleandeathtag = 0; 8143 if (tcp->tcp_hopopts != NULL) { 8144 mi_free(tcp->tcp_hopopts); 8145 tcp->tcp_hopopts = NULL; 8146 tcp->tcp_hopoptslen = 0; 8147 } 8148 ASSERT(tcp->tcp_hopoptslen == 0); 8149 if (tcp->tcp_dstopts != NULL) { 8150 mi_free(tcp->tcp_dstopts); 8151 tcp->tcp_dstopts = NULL; 8152 tcp->tcp_dstoptslen = 0; 8153 } 8154 ASSERT(tcp->tcp_dstoptslen == 0); 8155 if (tcp->tcp_rtdstopts != NULL) { 8156 mi_free(tcp->tcp_rtdstopts); 8157 tcp->tcp_rtdstopts = NULL; 8158 tcp->tcp_rtdstoptslen = 0; 8159 } 8160 ASSERT(tcp->tcp_rtdstoptslen == 0); 8161 if (tcp->tcp_rthdr != NULL) { 8162 mi_free(tcp->tcp_rthdr); 8163 tcp->tcp_rthdr = NULL; 8164 tcp->tcp_rthdrlen = 0; 8165 } 8166 ASSERT(tcp->tcp_rthdrlen == 0); 8167 PRESERVE(tcp->tcp_drop_opt_ack_cnt); 8168 8169 /* Reset fusion-related fields */ 8170 tcp->tcp_fused = B_FALSE; 8171 tcp->tcp_unfusable = B_FALSE; 8172 tcp->tcp_fused_sigurg = B_FALSE; 8173 tcp->tcp_direct_sockfs = B_FALSE; 8174 tcp->tcp_fuse_syncstr_stopped = B_FALSE; 8175 tcp->tcp_fuse_syncstr_plugged = B_FALSE; 8176 tcp->tcp_loopback_peer = NULL; 8177 tcp->tcp_fuse_rcv_hiwater = 0; 8178 tcp->tcp_fuse_rcv_unread_hiwater = 0; 8179 tcp->tcp_fuse_rcv_unread_cnt = 0; 8180 8181 tcp->tcp_lso = B_FALSE; 8182 8183 tcp->tcp_in_ack_unsent = 0; 8184 tcp->tcp_cork = B_FALSE; 8185 tcp->tcp_tconnind_started = B_FALSE; 8186 8187 PRESERVE(tcp->tcp_squeue_bytes); 8188 8189 ASSERT(tcp->tcp_kssl_ctx == NULL); 8190 ASSERT(!tcp->tcp_kssl_pending); 8191 PRESERVE(tcp->tcp_kssl_ent); 8192 8193 tcp->tcp_closemp_used = B_FALSE; 8194 8195 #ifdef DEBUG 8196 DONTCARE(tcp->tcmp_stk[0]); 8197 #endif 8198 8199 8200 #undef DONTCARE 8201 #undef PRESERVE 8202 } 8203 8204 /* 8205 * Allocate necessary resources and initialize state vector. 8206 * Guaranteed not to fail so that when an error is returned, 8207 * the caller doesn't need to do any additional cleanup. 8208 */ 8209 int 8210 tcp_init(tcp_t *tcp, queue_t *q) 8211 { 8212 int err; 8213 8214 tcp->tcp_rq = q; 8215 tcp->tcp_wq = WR(q); 8216 tcp->tcp_state = TCPS_IDLE; 8217 if ((err = tcp_init_values(tcp)) != 0) 8218 tcp_timers_stop(tcp); 8219 return (err); 8220 } 8221 8222 static int 8223 tcp_init_values(tcp_t *tcp) 8224 { 8225 int err; 8226 tcp_stack_t *tcps = tcp->tcp_tcps; 8227 8228 ASSERT((tcp->tcp_family == AF_INET && 8229 tcp->tcp_ipversion == IPV4_VERSION) || 8230 (tcp->tcp_family == AF_INET6 && 8231 (tcp->tcp_ipversion == IPV4_VERSION || 8232 tcp->tcp_ipversion == IPV6_VERSION))); 8233 8234 /* 8235 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO 8236 * will be close to tcp_rexmit_interval_initial. By doing this, we 8237 * allow the algorithm to adjust slowly to large fluctuations of RTT 8238 * during first few transmissions of a connection as seen in slow 8239 * links. 8240 */ 8241 tcp->tcp_rtt_sa = tcps->tcps_rexmit_interval_initial << 2; 8242 tcp->tcp_rtt_sd = tcps->tcps_rexmit_interval_initial >> 1; 8243 tcp->tcp_rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 8244 tcps->tcps_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5) + 8245 tcps->tcps_conn_grace_period; 8246 if (tcp->tcp_rto < tcps->tcps_rexmit_interval_min) 8247 tcp->tcp_rto = tcps->tcps_rexmit_interval_min; 8248 tcp->tcp_timer_backoff = 0; 8249 tcp->tcp_ms_we_have_waited = 0; 8250 tcp->tcp_last_recv_time = lbolt; 8251 tcp->tcp_cwnd_max = tcps->tcps_cwnd_max_; 8252 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN; 8253 tcp->tcp_snd_burst = TCP_CWND_INFINITE; 8254 8255 tcp->tcp_maxpsz = tcps->tcps_maxpsz_multiplier; 8256 8257 tcp->tcp_first_timer_threshold = tcps->tcps_ip_notify_interval; 8258 tcp->tcp_first_ctimer_threshold = tcps->tcps_ip_notify_cinterval; 8259 tcp->tcp_second_timer_threshold = tcps->tcps_ip_abort_interval; 8260 /* 8261 * Fix it to tcp_ip_abort_linterval later if it turns out to be a 8262 * passive open. 8263 */ 8264 tcp->tcp_second_ctimer_threshold = tcps->tcps_ip_abort_cinterval; 8265 8266 tcp->tcp_naglim = tcps->tcps_naglim_def; 8267 8268 /* NOTE: ISS is now set in tcp_adapt_ire(). */ 8269 8270 tcp->tcp_mdt_hdr_head = 0; 8271 tcp->tcp_mdt_hdr_tail = 0; 8272 8273 /* Reset fusion-related fields */ 8274 tcp->tcp_fused = B_FALSE; 8275 tcp->tcp_unfusable = B_FALSE; 8276 tcp->tcp_fused_sigurg = B_FALSE; 8277 tcp->tcp_direct_sockfs = B_FALSE; 8278 tcp->tcp_fuse_syncstr_stopped = B_FALSE; 8279 tcp->tcp_fuse_syncstr_plugged = B_FALSE; 8280 tcp->tcp_loopback_peer = NULL; 8281 tcp->tcp_fuse_rcv_hiwater = 0; 8282 tcp->tcp_fuse_rcv_unread_hiwater = 0; 8283 tcp->tcp_fuse_rcv_unread_cnt = 0; 8284 8285 /* Initialize the header template */ 8286 if (tcp->tcp_ipversion == IPV4_VERSION) { 8287 err = tcp_header_init_ipv4(tcp); 8288 } else { 8289 err = tcp_header_init_ipv6(tcp); 8290 } 8291 if (err) 8292 return (err); 8293 8294 /* 8295 * Init the window scale to the max so tcp_rwnd_set() won't pare 8296 * down tcp_rwnd. tcp_adapt_ire() will set the right value later. 8297 */ 8298 tcp->tcp_rcv_ws = TCP_MAX_WINSHIFT; 8299 tcp->tcp_xmit_lowater = tcps->tcps_xmit_lowat; 8300 tcp->tcp_xmit_hiwater = tcps->tcps_xmit_hiwat; 8301 8302 tcp->tcp_cork = B_FALSE; 8303 /* 8304 * Init the tcp_debug option. This value determines whether TCP 8305 * calls strlog() to print out debug messages. Doing this 8306 * initialization here means that this value is not inherited thru 8307 * tcp_reinit(). 8308 */ 8309 tcp->tcp_debug = tcps->tcps_dbg; 8310 8311 tcp->tcp_ka_interval = tcps->tcps_keepalive_interval; 8312 tcp->tcp_ka_abort_thres = tcps->tcps_keepalive_abort_interval; 8313 8314 return (0); 8315 } 8316 8317 /* 8318 * Initialize the IPv4 header. Loses any record of any IP options. 8319 */ 8320 static int 8321 tcp_header_init_ipv4(tcp_t *tcp) 8322 { 8323 tcph_t *tcph; 8324 uint32_t sum; 8325 conn_t *connp; 8326 tcp_stack_t *tcps = tcp->tcp_tcps; 8327 8328 /* 8329 * This is a simple initialization. If there's 8330 * already a template, it should never be too small, 8331 * so reuse it. Otherwise, allocate space for the new one. 8332 */ 8333 if (tcp->tcp_iphc == NULL) { 8334 ASSERT(tcp->tcp_iphc_len == 0); 8335 tcp->tcp_iphc_len = TCP_MAX_COMBINED_HEADER_LENGTH; 8336 tcp->tcp_iphc = kmem_cache_alloc(tcp_iphc_cache, KM_NOSLEEP); 8337 if (tcp->tcp_iphc == NULL) { 8338 tcp->tcp_iphc_len = 0; 8339 return (ENOMEM); 8340 } 8341 } 8342 8343 /* options are gone; may need a new label */ 8344 connp = tcp->tcp_connp; 8345 connp->conn_mlp_type = mlptSingle; 8346 connp->conn_ulp_labeled = !is_system_labeled(); 8347 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 8348 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 8349 tcp->tcp_ip6h = NULL; 8350 tcp->tcp_ipversion = IPV4_VERSION; 8351 tcp->tcp_hdr_len = sizeof (ipha_t) + sizeof (tcph_t); 8352 tcp->tcp_tcp_hdr_len = sizeof (tcph_t); 8353 tcp->tcp_ip_hdr_len = sizeof (ipha_t); 8354 tcp->tcp_ipha->ipha_length = htons(sizeof (ipha_t) + sizeof (tcph_t)); 8355 tcp->tcp_ipha->ipha_version_and_hdr_length 8356 = (IP_VERSION << 4) | IP_SIMPLE_HDR_LENGTH_IN_WORDS; 8357 tcp->tcp_ipha->ipha_ident = 0; 8358 8359 tcp->tcp_ttl = (uchar_t)tcps->tcps_ipv4_ttl; 8360 tcp->tcp_tos = 0; 8361 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 0; 8362 tcp->tcp_ipha->ipha_ttl = (uchar_t)tcps->tcps_ipv4_ttl; 8363 tcp->tcp_ipha->ipha_protocol = IPPROTO_TCP; 8364 8365 tcph = (tcph_t *)(tcp->tcp_iphc + sizeof (ipha_t)); 8366 tcp->tcp_tcph = tcph; 8367 tcph->th_offset_and_rsrvd[0] = (5 << 4); 8368 /* 8369 * IP wants our header length in the checksum field to 8370 * allow it to perform a single pseudo-header+checksum 8371 * calculation on behalf of TCP. 8372 * Include the adjustment for a source route once IP_OPTIONS is set. 8373 */ 8374 sum = sizeof (tcph_t) + tcp->tcp_sum; 8375 sum = (sum >> 16) + (sum & 0xFFFF); 8376 U16_TO_ABE16(sum, tcph->th_sum); 8377 return (0); 8378 } 8379 8380 /* 8381 * Initialize the IPv6 header. Loses any record of any IPv6 extension headers. 8382 */ 8383 static int 8384 tcp_header_init_ipv6(tcp_t *tcp) 8385 { 8386 tcph_t *tcph; 8387 uint32_t sum; 8388 conn_t *connp; 8389 tcp_stack_t *tcps = tcp->tcp_tcps; 8390 8391 /* 8392 * This is a simple initialization. If there's 8393 * already a template, it should never be too small, 8394 * so reuse it. Otherwise, allocate space for the new one. 8395 * Ensure that there is enough space to "downgrade" the tcp_t 8396 * to an IPv4 tcp_t. This requires having space for a full load 8397 * of IPv4 options, as well as a full load of TCP options 8398 * (TCP_MAX_COMBINED_HEADER_LENGTH, 120 bytes); this is more space 8399 * than a v6 header and a TCP header with a full load of TCP options 8400 * (IPV6_HDR_LEN is 40 bytes; TCP_MAX_HDR_LENGTH is 60 bytes). 8401 * We want to avoid reallocation in the "downgraded" case when 8402 * processing outbound IPv4 options. 8403 */ 8404 if (tcp->tcp_iphc == NULL) { 8405 ASSERT(tcp->tcp_iphc_len == 0); 8406 tcp->tcp_iphc_len = TCP_MAX_COMBINED_HEADER_LENGTH; 8407 tcp->tcp_iphc = kmem_cache_alloc(tcp_iphc_cache, KM_NOSLEEP); 8408 if (tcp->tcp_iphc == NULL) { 8409 tcp->tcp_iphc_len = 0; 8410 return (ENOMEM); 8411 } 8412 } 8413 8414 /* options are gone; may need a new label */ 8415 connp = tcp->tcp_connp; 8416 connp->conn_mlp_type = mlptSingle; 8417 connp->conn_ulp_labeled = !is_system_labeled(); 8418 8419 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 8420 tcp->tcp_ipversion = IPV6_VERSION; 8421 tcp->tcp_hdr_len = IPV6_HDR_LEN + sizeof (tcph_t); 8422 tcp->tcp_tcp_hdr_len = sizeof (tcph_t); 8423 tcp->tcp_ip_hdr_len = IPV6_HDR_LEN; 8424 tcp->tcp_ip6h = (ip6_t *)tcp->tcp_iphc; 8425 tcp->tcp_ipha = NULL; 8426 8427 /* Initialize the header template */ 8428 8429 tcp->tcp_ip6h->ip6_vcf = IPV6_DEFAULT_VERS_AND_FLOW; 8430 tcp->tcp_ip6h->ip6_plen = ntohs(sizeof (tcph_t)); 8431 tcp->tcp_ip6h->ip6_nxt = IPPROTO_TCP; 8432 tcp->tcp_ip6h->ip6_hops = (uint8_t)tcps->tcps_ipv6_hoplimit; 8433 8434 tcph = (tcph_t *)(tcp->tcp_iphc + IPV6_HDR_LEN); 8435 tcp->tcp_tcph = tcph; 8436 tcph->th_offset_and_rsrvd[0] = (5 << 4); 8437 /* 8438 * IP wants our header length in the checksum field to 8439 * allow it to perform a single psuedo-header+checksum 8440 * calculation on behalf of TCP. 8441 * Include the adjustment for a source route when IPV6_RTHDR is set. 8442 */ 8443 sum = sizeof (tcph_t) + tcp->tcp_sum; 8444 sum = (sum >> 16) + (sum & 0xFFFF); 8445 U16_TO_ABE16(sum, tcph->th_sum); 8446 return (0); 8447 } 8448 8449 /* At minimum we need 8 bytes in the TCP header for the lookup */ 8450 #define ICMP_MIN_TCP_HDR 8 8451 8452 /* 8453 * tcp_icmp_error is called by tcp_rput_other to process ICMP error messages 8454 * passed up by IP. The message is always received on the correct tcp_t. 8455 * Assumes that IP has pulled up everything up to and including the ICMP header. 8456 */ 8457 void 8458 tcp_icmp_error(tcp_t *tcp, mblk_t *mp) 8459 { 8460 icmph_t *icmph; 8461 ipha_t *ipha; 8462 int iph_hdr_length; 8463 tcph_t *tcph; 8464 boolean_t ipsec_mctl = B_FALSE; 8465 boolean_t secure; 8466 mblk_t *first_mp = mp; 8467 uint32_t new_mss; 8468 uint32_t ratio; 8469 size_t mp_size = MBLKL(mp); 8470 uint32_t seg_seq; 8471 tcp_stack_t *tcps = tcp->tcp_tcps; 8472 8473 /* Assume IP provides aligned packets - otherwise toss */ 8474 if (!OK_32PTR(mp->b_rptr)) { 8475 freemsg(mp); 8476 return; 8477 } 8478 8479 /* 8480 * Since ICMP errors are normal data marked with M_CTL when sent 8481 * to TCP or UDP, we have to look for a IPSEC_IN value to identify 8482 * packets starting with an ipsec_info_t, see ipsec_info.h. 8483 */ 8484 if ((mp_size == sizeof (ipsec_info_t)) && 8485 (((ipsec_info_t *)mp->b_rptr)->ipsec_info_type == IPSEC_IN)) { 8486 ASSERT(mp->b_cont != NULL); 8487 mp = mp->b_cont; 8488 /* IP should have done this */ 8489 ASSERT(OK_32PTR(mp->b_rptr)); 8490 mp_size = MBLKL(mp); 8491 ipsec_mctl = B_TRUE; 8492 } 8493 8494 /* 8495 * Verify that we have a complete outer IP header. If not, drop it. 8496 */ 8497 if (mp_size < sizeof (ipha_t)) { 8498 noticmpv4: 8499 freemsg(first_mp); 8500 return; 8501 } 8502 8503 ipha = (ipha_t *)mp->b_rptr; 8504 /* 8505 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent 8506 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6. 8507 */ 8508 switch (IPH_HDR_VERSION(ipha)) { 8509 case IPV6_VERSION: 8510 tcp_icmp_error_ipv6(tcp, first_mp, ipsec_mctl); 8511 return; 8512 case IPV4_VERSION: 8513 break; 8514 default: 8515 goto noticmpv4; 8516 } 8517 8518 /* Skip past the outer IP and ICMP headers */ 8519 iph_hdr_length = IPH_HDR_LENGTH(ipha); 8520 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 8521 /* 8522 * If we don't have the correct outer IP header length or if the ULP 8523 * is not IPPROTO_ICMP or if we don't have a complete inner IP header 8524 * send it upstream. 8525 */ 8526 if (iph_hdr_length < sizeof (ipha_t) || 8527 ipha->ipha_protocol != IPPROTO_ICMP || 8528 (ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) { 8529 goto noticmpv4; 8530 } 8531 ipha = (ipha_t *)&icmph[1]; 8532 8533 /* Skip past the inner IP and find the ULP header */ 8534 iph_hdr_length = IPH_HDR_LENGTH(ipha); 8535 tcph = (tcph_t *)((char *)ipha + iph_hdr_length); 8536 /* 8537 * If we don't have the correct inner IP header length or if the ULP 8538 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR 8539 * bytes of TCP header, drop it. 8540 */ 8541 if (iph_hdr_length < sizeof (ipha_t) || 8542 ipha->ipha_protocol != IPPROTO_TCP || 8543 (uchar_t *)tcph + ICMP_MIN_TCP_HDR > mp->b_wptr) { 8544 goto noticmpv4; 8545 } 8546 8547 if (TCP_IS_DETACHED_NONEAGER(tcp)) { 8548 if (ipsec_mctl) { 8549 secure = ipsec_in_is_secure(first_mp); 8550 } else { 8551 secure = B_FALSE; 8552 } 8553 if (secure) { 8554 /* 8555 * If we are willing to accept this in clear 8556 * we don't have to verify policy. 8557 */ 8558 if (!ipsec_inbound_accept_clear(mp, ipha, NULL)) { 8559 if (!tcp_check_policy(tcp, first_mp, 8560 ipha, NULL, secure, ipsec_mctl)) { 8561 /* 8562 * tcp_check_policy called 8563 * ip_drop_packet() on failure. 8564 */ 8565 return; 8566 } 8567 } 8568 } 8569 } else if (ipsec_mctl) { 8570 /* 8571 * This is a hard_bound connection. IP has already 8572 * verified policy. We don't have to do it again. 8573 */ 8574 freeb(first_mp); 8575 first_mp = mp; 8576 ipsec_mctl = B_FALSE; 8577 } 8578 8579 seg_seq = ABE32_TO_U32(tcph->th_seq); 8580 /* 8581 * TCP SHOULD check that the TCP sequence number contained in 8582 * payload of the ICMP error message is within the range 8583 * SND.UNA <= SEG.SEQ < SND.NXT. 8584 */ 8585 if (SEQ_LT(seg_seq, tcp->tcp_suna) || SEQ_GEQ(seg_seq, tcp->tcp_snxt)) { 8586 /* 8587 * If the ICMP message is bogus, should we kill the 8588 * connection, or should we just drop the bogus ICMP 8589 * message? It would probably make more sense to just 8590 * drop the message so that if this one managed to get 8591 * in, the real connection should not suffer. 8592 */ 8593 goto noticmpv4; 8594 } 8595 8596 switch (icmph->icmph_type) { 8597 case ICMP_DEST_UNREACHABLE: 8598 switch (icmph->icmph_code) { 8599 case ICMP_FRAGMENTATION_NEEDED: 8600 /* 8601 * Reduce the MSS based on the new MTU. This will 8602 * eliminate any fragmentation locally. 8603 * N.B. There may well be some funny side-effects on 8604 * the local send policy and the remote receive policy. 8605 * Pending further research, we provide 8606 * tcp_ignore_path_mtu just in case this proves 8607 * disastrous somewhere. 8608 * 8609 * After updating the MSS, retransmit part of the 8610 * dropped segment using the new mss by calling 8611 * tcp_wput_data(). Need to adjust all those 8612 * params to make sure tcp_wput_data() work properly. 8613 */ 8614 if (tcps->tcps_ignore_path_mtu) 8615 break; 8616 8617 /* 8618 * Decrease the MSS by time stamp options 8619 * IP options and IPSEC options. tcp_hdr_len 8620 * includes time stamp option and IP option 8621 * length. 8622 */ 8623 8624 new_mss = ntohs(icmph->icmph_du_mtu) - 8625 tcp->tcp_hdr_len - tcp->tcp_ipsec_overhead; 8626 8627 /* 8628 * Only update the MSS if the new one is 8629 * smaller than the previous one. This is 8630 * to avoid problems when getting multiple 8631 * ICMP errors for the same MTU. 8632 */ 8633 if (new_mss >= tcp->tcp_mss) 8634 break; 8635 8636 /* 8637 * Stop doing PMTU if new_mss is less than 68 8638 * or less than tcp_mss_min. 8639 * The value 68 comes from rfc 1191. 8640 */ 8641 if (new_mss < MAX(68, tcps->tcps_mss_min)) 8642 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 8643 0; 8644 8645 ratio = tcp->tcp_cwnd / tcp->tcp_mss; 8646 ASSERT(ratio >= 1); 8647 tcp_mss_set(tcp, new_mss, B_TRUE); 8648 8649 /* 8650 * Make sure we have something to 8651 * send. 8652 */ 8653 if (SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) && 8654 (tcp->tcp_xmit_head != NULL)) { 8655 /* 8656 * Shrink tcp_cwnd in 8657 * proportion to the old MSS/new MSS. 8658 */ 8659 tcp->tcp_cwnd = ratio * tcp->tcp_mss; 8660 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 8661 (tcp->tcp_unsent == 0)) { 8662 tcp->tcp_rexmit_max = tcp->tcp_fss; 8663 } else { 8664 tcp->tcp_rexmit_max = tcp->tcp_snxt; 8665 } 8666 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 8667 tcp->tcp_rexmit = B_TRUE; 8668 tcp->tcp_dupack_cnt = 0; 8669 tcp->tcp_snd_burst = TCP_CWND_SS; 8670 tcp_ss_rexmit(tcp); 8671 } 8672 break; 8673 case ICMP_PORT_UNREACHABLE: 8674 case ICMP_PROTOCOL_UNREACHABLE: 8675 switch (tcp->tcp_state) { 8676 case TCPS_SYN_SENT: 8677 case TCPS_SYN_RCVD: 8678 /* 8679 * ICMP can snipe away incipient 8680 * TCP connections as long as 8681 * seq number is same as initial 8682 * send seq number. 8683 */ 8684 if (seg_seq == tcp->tcp_iss) { 8685 (void) tcp_clean_death(tcp, 8686 ECONNREFUSED, 6); 8687 } 8688 break; 8689 } 8690 break; 8691 case ICMP_HOST_UNREACHABLE: 8692 case ICMP_NET_UNREACHABLE: 8693 /* Record the error in case we finally time out. */ 8694 if (icmph->icmph_code == ICMP_HOST_UNREACHABLE) 8695 tcp->tcp_client_errno = EHOSTUNREACH; 8696 else 8697 tcp->tcp_client_errno = ENETUNREACH; 8698 if (tcp->tcp_state == TCPS_SYN_RCVD) { 8699 if (tcp->tcp_listener != NULL && 8700 tcp->tcp_listener->tcp_syn_defense) { 8701 /* 8702 * Ditch the half-open connection if we 8703 * suspect a SYN attack is under way. 8704 */ 8705 tcp_ip_ire_mark_advice(tcp); 8706 (void) tcp_clean_death(tcp, 8707 tcp->tcp_client_errno, 7); 8708 } 8709 } 8710 break; 8711 default: 8712 break; 8713 } 8714 break; 8715 case ICMP_SOURCE_QUENCH: { 8716 /* 8717 * use a global boolean to control 8718 * whether TCP should respond to ICMP_SOURCE_QUENCH. 8719 * The default is false. 8720 */ 8721 if (tcp_icmp_source_quench) { 8722 /* 8723 * Reduce the sending rate as if we got a 8724 * retransmit timeout 8725 */ 8726 uint32_t npkt; 8727 8728 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / 8729 tcp->tcp_mss; 8730 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * tcp->tcp_mss; 8731 tcp->tcp_cwnd = tcp->tcp_mss; 8732 tcp->tcp_cwnd_cnt = 0; 8733 } 8734 break; 8735 } 8736 } 8737 freemsg(first_mp); 8738 } 8739 8740 /* 8741 * tcp_icmp_error_ipv6 is called by tcp_rput_other to process ICMPv6 8742 * error messages passed up by IP. 8743 * Assumes that IP has pulled up all the extension headers as well 8744 * as the ICMPv6 header. 8745 */ 8746 static void 8747 tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, boolean_t ipsec_mctl) 8748 { 8749 icmp6_t *icmp6; 8750 ip6_t *ip6h; 8751 uint16_t iph_hdr_length; 8752 tcpha_t *tcpha; 8753 uint8_t *nexthdrp; 8754 uint32_t new_mss; 8755 uint32_t ratio; 8756 boolean_t secure; 8757 mblk_t *first_mp = mp; 8758 size_t mp_size; 8759 uint32_t seg_seq; 8760 tcp_stack_t *tcps = tcp->tcp_tcps; 8761 8762 /* 8763 * The caller has determined if this is an IPSEC_IN packet and 8764 * set ipsec_mctl appropriately (see tcp_icmp_error). 8765 */ 8766 if (ipsec_mctl) 8767 mp = mp->b_cont; 8768 8769 mp_size = MBLKL(mp); 8770 8771 /* 8772 * Verify that we have a complete IP header. If not, send it upstream. 8773 */ 8774 if (mp_size < sizeof (ip6_t)) { 8775 noticmpv6: 8776 freemsg(first_mp); 8777 return; 8778 } 8779 8780 /* 8781 * Verify this is an ICMPV6 packet, else send it upstream. 8782 */ 8783 ip6h = (ip6_t *)mp->b_rptr; 8784 if (ip6h->ip6_nxt == IPPROTO_ICMPV6) { 8785 iph_hdr_length = IPV6_HDR_LEN; 8786 } else if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, 8787 &nexthdrp) || 8788 *nexthdrp != IPPROTO_ICMPV6) { 8789 goto noticmpv6; 8790 } 8791 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length]; 8792 ip6h = (ip6_t *)&icmp6[1]; 8793 /* 8794 * Verify if we have a complete ICMP and inner IP header. 8795 */ 8796 if ((uchar_t *)&ip6h[1] > mp->b_wptr) 8797 goto noticmpv6; 8798 8799 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) 8800 goto noticmpv6; 8801 tcpha = (tcpha_t *)((char *)ip6h + iph_hdr_length); 8802 /* 8803 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't 8804 * have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the 8805 * packet. 8806 */ 8807 if ((*nexthdrp != IPPROTO_TCP) || 8808 ((uchar_t *)tcpha + ICMP_MIN_TCP_HDR) > mp->b_wptr) { 8809 goto noticmpv6; 8810 } 8811 8812 /* 8813 * ICMP errors come on the right queue or come on 8814 * listener/global queue for detached connections and 8815 * get switched to the right queue. If it comes on the 8816 * right queue, policy check has already been done by IP 8817 * and thus free the first_mp without verifying the policy. 8818 * If it has come for a non-hard bound connection, we need 8819 * to verify policy as IP may not have done it. 8820 */ 8821 if (!tcp->tcp_hard_bound) { 8822 if (ipsec_mctl) { 8823 secure = ipsec_in_is_secure(first_mp); 8824 } else { 8825 secure = B_FALSE; 8826 } 8827 if (secure) { 8828 /* 8829 * If we are willing to accept this in clear 8830 * we don't have to verify policy. 8831 */ 8832 if (!ipsec_inbound_accept_clear(mp, NULL, ip6h)) { 8833 if (!tcp_check_policy(tcp, first_mp, 8834 NULL, ip6h, secure, ipsec_mctl)) { 8835 /* 8836 * tcp_check_policy called 8837 * ip_drop_packet() on failure. 8838 */ 8839 return; 8840 } 8841 } 8842 } 8843 } else if (ipsec_mctl) { 8844 /* 8845 * This is a hard_bound connection. IP has already 8846 * verified policy. We don't have to do it again. 8847 */ 8848 freeb(first_mp); 8849 first_mp = mp; 8850 ipsec_mctl = B_FALSE; 8851 } 8852 8853 seg_seq = ntohl(tcpha->tha_seq); 8854 /* 8855 * TCP SHOULD check that the TCP sequence number contained in 8856 * payload of the ICMP error message is within the range 8857 * SND.UNA <= SEG.SEQ < SND.NXT. 8858 */ 8859 if (SEQ_LT(seg_seq, tcp->tcp_suna) || SEQ_GEQ(seg_seq, tcp->tcp_snxt)) { 8860 /* 8861 * If the ICMP message is bogus, should we kill the 8862 * connection, or should we just drop the bogus ICMP 8863 * message? It would probably make more sense to just 8864 * drop the message so that if this one managed to get 8865 * in, the real connection should not suffer. 8866 */ 8867 goto noticmpv6; 8868 } 8869 8870 switch (icmp6->icmp6_type) { 8871 case ICMP6_PACKET_TOO_BIG: 8872 /* 8873 * Reduce the MSS based on the new MTU. This will 8874 * eliminate any fragmentation locally. 8875 * N.B. There may well be some funny side-effects on 8876 * the local send policy and the remote receive policy. 8877 * Pending further research, we provide 8878 * tcp_ignore_path_mtu just in case this proves 8879 * disastrous somewhere. 8880 * 8881 * After updating the MSS, retransmit part of the 8882 * dropped segment using the new mss by calling 8883 * tcp_wput_data(). Need to adjust all those 8884 * params to make sure tcp_wput_data() work properly. 8885 */ 8886 if (tcps->tcps_ignore_path_mtu) 8887 break; 8888 8889 /* 8890 * Decrease the MSS by time stamp options 8891 * IP options and IPSEC options. tcp_hdr_len 8892 * includes time stamp option and IP option 8893 * length. 8894 */ 8895 new_mss = ntohs(icmp6->icmp6_mtu) - tcp->tcp_hdr_len - 8896 tcp->tcp_ipsec_overhead; 8897 8898 /* 8899 * Only update the MSS if the new one is 8900 * smaller than the previous one. This is 8901 * to avoid problems when getting multiple 8902 * ICMP errors for the same MTU. 8903 */ 8904 if (new_mss >= tcp->tcp_mss) 8905 break; 8906 8907 ratio = tcp->tcp_cwnd / tcp->tcp_mss; 8908 ASSERT(ratio >= 1); 8909 tcp_mss_set(tcp, new_mss, B_TRUE); 8910 8911 /* 8912 * Make sure we have something to 8913 * send. 8914 */ 8915 if (SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) && 8916 (tcp->tcp_xmit_head != NULL)) { 8917 /* 8918 * Shrink tcp_cwnd in 8919 * proportion to the old MSS/new MSS. 8920 */ 8921 tcp->tcp_cwnd = ratio * tcp->tcp_mss; 8922 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 8923 (tcp->tcp_unsent == 0)) { 8924 tcp->tcp_rexmit_max = tcp->tcp_fss; 8925 } else { 8926 tcp->tcp_rexmit_max = tcp->tcp_snxt; 8927 } 8928 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 8929 tcp->tcp_rexmit = B_TRUE; 8930 tcp->tcp_dupack_cnt = 0; 8931 tcp->tcp_snd_burst = TCP_CWND_SS; 8932 tcp_ss_rexmit(tcp); 8933 } 8934 break; 8935 8936 case ICMP6_DST_UNREACH: 8937 switch (icmp6->icmp6_code) { 8938 case ICMP6_DST_UNREACH_NOPORT: 8939 if (((tcp->tcp_state == TCPS_SYN_SENT) || 8940 (tcp->tcp_state == TCPS_SYN_RCVD)) && 8941 (seg_seq == tcp->tcp_iss)) { 8942 (void) tcp_clean_death(tcp, 8943 ECONNREFUSED, 8); 8944 } 8945 break; 8946 8947 case ICMP6_DST_UNREACH_ADMIN: 8948 case ICMP6_DST_UNREACH_NOROUTE: 8949 case ICMP6_DST_UNREACH_BEYONDSCOPE: 8950 case ICMP6_DST_UNREACH_ADDR: 8951 /* Record the error in case we finally time out. */ 8952 tcp->tcp_client_errno = EHOSTUNREACH; 8953 if (((tcp->tcp_state == TCPS_SYN_SENT) || 8954 (tcp->tcp_state == TCPS_SYN_RCVD)) && 8955 (seg_seq == tcp->tcp_iss)) { 8956 if (tcp->tcp_listener != NULL && 8957 tcp->tcp_listener->tcp_syn_defense) { 8958 /* 8959 * Ditch the half-open connection if we 8960 * suspect a SYN attack is under way. 8961 */ 8962 tcp_ip_ire_mark_advice(tcp); 8963 (void) tcp_clean_death(tcp, 8964 tcp->tcp_client_errno, 9); 8965 } 8966 } 8967 8968 8969 break; 8970 default: 8971 break; 8972 } 8973 break; 8974 8975 case ICMP6_PARAM_PROB: 8976 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */ 8977 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER && 8978 (uchar_t *)ip6h + icmp6->icmp6_pptr == 8979 (uchar_t *)nexthdrp) { 8980 if (tcp->tcp_state == TCPS_SYN_SENT || 8981 tcp->tcp_state == TCPS_SYN_RCVD) { 8982 (void) tcp_clean_death(tcp, 8983 ECONNREFUSED, 10); 8984 } 8985 break; 8986 } 8987 break; 8988 8989 case ICMP6_TIME_EXCEEDED: 8990 default: 8991 break; 8992 } 8993 freemsg(first_mp); 8994 } 8995 8996 /* 8997 * IP recognizes seven kinds of bind requests: 8998 * 8999 * - A zero-length address binds only to the protocol number. 9000 * 9001 * - A 4-byte address is treated as a request to 9002 * validate that the address is a valid local IPv4 9003 * address, appropriate for an application to bind to. 9004 * IP does the verification, but does not make any note 9005 * of the address at this time. 9006 * 9007 * - A 16-byte address contains is treated as a request 9008 * to validate a local IPv6 address, as the 4-byte 9009 * address case above. 9010 * 9011 * - A 16-byte sockaddr_in to validate the local IPv4 address and also 9012 * use it for the inbound fanout of packets. 9013 * 9014 * - A 24-byte sockaddr_in6 to validate the local IPv6 address and also 9015 * use it for the inbound fanout of packets. 9016 * 9017 * - A 12-byte address (ipa_conn_t) containing complete IPv4 fanout 9018 * information consisting of local and remote addresses 9019 * and ports. In this case, the addresses are both 9020 * validated as appropriate for this operation, and, if 9021 * so, the information is retained for use in the 9022 * inbound fanout. 9023 * 9024 * - A 36-byte address address (ipa6_conn_t) containing complete IPv6 9025 * fanout information, like the 12-byte case above. 9026 * 9027 * IP will also fill in the IRE request mblk with information 9028 * regarding our peer. In all cases, we notify IP of our protocol 9029 * type by appending a single protocol byte to the bind request. 9030 */ 9031 static mblk_t * 9032 tcp_ip_bind_mp(tcp_t *tcp, t_scalar_t bind_prim, t_scalar_t addr_length) 9033 { 9034 char *cp; 9035 mblk_t *mp; 9036 struct T_bind_req *tbr; 9037 ipa_conn_t *ac; 9038 ipa6_conn_t *ac6; 9039 sin_t *sin; 9040 sin6_t *sin6; 9041 9042 ASSERT(bind_prim == O_T_BIND_REQ || bind_prim == T_BIND_REQ); 9043 ASSERT((tcp->tcp_family == AF_INET && 9044 tcp->tcp_ipversion == IPV4_VERSION) || 9045 (tcp->tcp_family == AF_INET6 && 9046 (tcp->tcp_ipversion == IPV4_VERSION || 9047 tcp->tcp_ipversion == IPV6_VERSION))); 9048 9049 mp = allocb(sizeof (*tbr) + addr_length + 1, BPRI_HI); 9050 if (!mp) 9051 return (mp); 9052 mp->b_datap->db_type = M_PROTO; 9053 tbr = (struct T_bind_req *)mp->b_rptr; 9054 tbr->PRIM_type = bind_prim; 9055 tbr->ADDR_offset = sizeof (*tbr); 9056 tbr->CONIND_number = 0; 9057 tbr->ADDR_length = addr_length; 9058 cp = (char *)&tbr[1]; 9059 switch (addr_length) { 9060 case sizeof (ipa_conn_t): 9061 ASSERT(tcp->tcp_family == AF_INET); 9062 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 9063 9064 mp->b_cont = allocb(sizeof (ire_t), BPRI_HI); 9065 if (mp->b_cont == NULL) { 9066 freemsg(mp); 9067 return (NULL); 9068 } 9069 mp->b_cont->b_wptr += sizeof (ire_t); 9070 mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE; 9071 9072 /* cp known to be 32 bit aligned */ 9073 ac = (ipa_conn_t *)cp; 9074 ac->ac_laddr = tcp->tcp_ipha->ipha_src; 9075 ac->ac_faddr = tcp->tcp_remote; 9076 ac->ac_fport = tcp->tcp_fport; 9077 ac->ac_lport = tcp->tcp_lport; 9078 tcp->tcp_hard_binding = 1; 9079 break; 9080 9081 case sizeof (ipa6_conn_t): 9082 ASSERT(tcp->tcp_family == AF_INET6); 9083 9084 mp->b_cont = allocb(sizeof (ire_t), BPRI_HI); 9085 if (mp->b_cont == NULL) { 9086 freemsg(mp); 9087 return (NULL); 9088 } 9089 mp->b_cont->b_wptr += sizeof (ire_t); 9090 mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE; 9091 9092 /* cp known to be 32 bit aligned */ 9093 ac6 = (ipa6_conn_t *)cp; 9094 if (tcp->tcp_ipversion == IPV4_VERSION) { 9095 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 9096 &ac6->ac6_laddr); 9097 } else { 9098 ac6->ac6_laddr = tcp->tcp_ip6h->ip6_src; 9099 } 9100 ac6->ac6_faddr = tcp->tcp_remote_v6; 9101 ac6->ac6_fport = tcp->tcp_fport; 9102 ac6->ac6_lport = tcp->tcp_lport; 9103 tcp->tcp_hard_binding = 1; 9104 break; 9105 9106 case sizeof (sin_t): 9107 /* 9108 * NOTE: IPV6_ADDR_LEN also has same size. 9109 * Use family to discriminate. 9110 */ 9111 if (tcp->tcp_family == AF_INET) { 9112 sin = (sin_t *)cp; 9113 9114 *sin = sin_null; 9115 sin->sin_family = AF_INET; 9116 sin->sin_addr.s_addr = tcp->tcp_bound_source; 9117 sin->sin_port = tcp->tcp_lport; 9118 break; 9119 } else { 9120 *(in6_addr_t *)cp = tcp->tcp_bound_source_v6; 9121 } 9122 break; 9123 9124 case sizeof (sin6_t): 9125 ASSERT(tcp->tcp_family == AF_INET6); 9126 sin6 = (sin6_t *)cp; 9127 9128 *sin6 = sin6_null; 9129 sin6->sin6_family = AF_INET6; 9130 sin6->sin6_addr = tcp->tcp_bound_source_v6; 9131 sin6->sin6_port = tcp->tcp_lport; 9132 break; 9133 9134 case IP_ADDR_LEN: 9135 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 9136 *(uint32_t *)cp = tcp->tcp_ipha->ipha_src; 9137 break; 9138 9139 } 9140 /* Add protocol number to end */ 9141 cp[addr_length] = (char)IPPROTO_TCP; 9142 mp->b_wptr = (uchar_t *)&cp[addr_length + 1]; 9143 return (mp); 9144 } 9145 9146 /* 9147 * Notify IP that we are having trouble with this connection. IP should 9148 * blow the IRE away and start over. 9149 */ 9150 static void 9151 tcp_ip_notify(tcp_t *tcp) 9152 { 9153 struct iocblk *iocp; 9154 ipid_t *ipid; 9155 mblk_t *mp; 9156 9157 /* IPv6 has NUD thus notification to delete the IRE is not needed */ 9158 if (tcp->tcp_ipversion == IPV6_VERSION) 9159 return; 9160 9161 mp = mkiocb(IP_IOCTL); 9162 if (mp == NULL) 9163 return; 9164 9165 iocp = (struct iocblk *)mp->b_rptr; 9166 iocp->ioc_count = sizeof (ipid_t) + sizeof (tcp->tcp_ipha->ipha_dst); 9167 9168 mp->b_cont = allocb(iocp->ioc_count, BPRI_HI); 9169 if (!mp->b_cont) { 9170 freeb(mp); 9171 return; 9172 } 9173 9174 ipid = (ipid_t *)mp->b_cont->b_rptr; 9175 mp->b_cont->b_wptr += iocp->ioc_count; 9176 bzero(ipid, sizeof (*ipid)); 9177 ipid->ipid_cmd = IP_IOC_IRE_DELETE_NO_REPLY; 9178 ipid->ipid_ire_type = IRE_CACHE; 9179 ipid->ipid_addr_offset = sizeof (ipid_t); 9180 ipid->ipid_addr_length = sizeof (tcp->tcp_ipha->ipha_dst); 9181 /* 9182 * Note: in the case of source routing we want to blow away the 9183 * route to the first source route hop. 9184 */ 9185 bcopy(&tcp->tcp_ipha->ipha_dst, &ipid[1], 9186 sizeof (tcp->tcp_ipha->ipha_dst)); 9187 9188 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 9189 } 9190 9191 /* Unlink and return any mblk that looks like it contains an ire */ 9192 static mblk_t * 9193 tcp_ire_mp(mblk_t *mp) 9194 { 9195 mblk_t *prev_mp; 9196 9197 for (;;) { 9198 prev_mp = mp; 9199 mp = mp->b_cont; 9200 if (mp == NULL) 9201 break; 9202 switch (DB_TYPE(mp)) { 9203 case IRE_DB_TYPE: 9204 case IRE_DB_REQ_TYPE: 9205 if (prev_mp != NULL) 9206 prev_mp->b_cont = mp->b_cont; 9207 mp->b_cont = NULL; 9208 return (mp); 9209 default: 9210 break; 9211 } 9212 } 9213 return (mp); 9214 } 9215 9216 /* 9217 * Timer callback routine for keepalive probe. We do a fake resend of 9218 * last ACKed byte. Then set a timer using RTO. When the timer expires, 9219 * check to see if we have heard anything from the other end for the last 9220 * RTO period. If we have, set the timer to expire for another 9221 * tcp_keepalive_intrvl and check again. If we have not, set a timer using 9222 * RTO << 1 and check again when it expires. Keep exponentially increasing 9223 * the timeout if we have not heard from the other side. If for more than 9224 * (tcp_ka_interval + tcp_ka_abort_thres) we have not heard anything, 9225 * kill the connection unless the keepalive abort threshold is 0. In 9226 * that case, we will probe "forever." 9227 */ 9228 static void 9229 tcp_keepalive_killer(void *arg) 9230 { 9231 mblk_t *mp; 9232 conn_t *connp = (conn_t *)arg; 9233 tcp_t *tcp = connp->conn_tcp; 9234 int32_t firetime; 9235 int32_t idletime; 9236 int32_t ka_intrvl; 9237 tcp_stack_t *tcps = tcp->tcp_tcps; 9238 9239 tcp->tcp_ka_tid = 0; 9240 9241 if (tcp->tcp_fused) 9242 return; 9243 9244 BUMP_MIB(&tcps->tcps_mib, tcpTimKeepalive); 9245 ka_intrvl = tcp->tcp_ka_interval; 9246 9247 /* 9248 * Keepalive probe should only be sent if the application has not 9249 * done a close on the connection. 9250 */ 9251 if (tcp->tcp_state > TCPS_CLOSE_WAIT) { 9252 return; 9253 } 9254 /* Timer fired too early, restart it. */ 9255 if (tcp->tcp_state < TCPS_ESTABLISHED) { 9256 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 9257 MSEC_TO_TICK(ka_intrvl)); 9258 return; 9259 } 9260 9261 idletime = TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time); 9262 /* 9263 * If we have not heard from the other side for a long 9264 * time, kill the connection unless the keepalive abort 9265 * threshold is 0. In that case, we will probe "forever." 9266 */ 9267 if (tcp->tcp_ka_abort_thres != 0 && 9268 idletime > (ka_intrvl + tcp->tcp_ka_abort_thres)) { 9269 BUMP_MIB(&tcps->tcps_mib, tcpTimKeepaliveDrop); 9270 (void) tcp_clean_death(tcp, tcp->tcp_client_errno ? 9271 tcp->tcp_client_errno : ETIMEDOUT, 11); 9272 return; 9273 } 9274 9275 if (tcp->tcp_snxt == tcp->tcp_suna && 9276 idletime >= ka_intrvl) { 9277 /* Fake resend of last ACKed byte. */ 9278 mblk_t *mp1 = allocb(1, BPRI_LO); 9279 9280 if (mp1 != NULL) { 9281 *mp1->b_wptr++ = '\0'; 9282 mp = tcp_xmit_mp(tcp, mp1, 1, NULL, NULL, 9283 tcp->tcp_suna - 1, B_FALSE, NULL, B_TRUE); 9284 freeb(mp1); 9285 /* 9286 * if allocation failed, fall through to start the 9287 * timer back. 9288 */ 9289 if (mp != NULL) { 9290 TCP_RECORD_TRACE(tcp, mp, 9291 TCP_TRACE_SEND_PKT); 9292 tcp_send_data(tcp, tcp->tcp_wq, mp); 9293 BUMP_MIB(&tcps->tcps_mib, 9294 tcpTimKeepaliveProbe); 9295 if (tcp->tcp_ka_last_intrvl != 0) { 9296 int max; 9297 /* 9298 * We should probe again at least 9299 * in ka_intrvl, but not more than 9300 * tcp_rexmit_interval_max. 9301 */ 9302 max = tcps->tcps_rexmit_interval_max; 9303 firetime = MIN(ka_intrvl - 1, 9304 tcp->tcp_ka_last_intrvl << 1); 9305 if (firetime > max) 9306 firetime = max; 9307 } else { 9308 firetime = tcp->tcp_rto; 9309 } 9310 tcp->tcp_ka_tid = TCP_TIMER(tcp, 9311 tcp_keepalive_killer, 9312 MSEC_TO_TICK(firetime)); 9313 tcp->tcp_ka_last_intrvl = firetime; 9314 return; 9315 } 9316 } 9317 } else { 9318 tcp->tcp_ka_last_intrvl = 0; 9319 } 9320 9321 /* firetime can be negative if (mp1 == NULL || mp == NULL) */ 9322 if ((firetime = ka_intrvl - idletime) < 0) { 9323 firetime = ka_intrvl; 9324 } 9325 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 9326 MSEC_TO_TICK(firetime)); 9327 } 9328 9329 int 9330 tcp_maxpsz_set(tcp_t *tcp, boolean_t set_maxblk) 9331 { 9332 queue_t *q = tcp->tcp_rq; 9333 int32_t mss = tcp->tcp_mss; 9334 int maxpsz; 9335 9336 if (TCP_IS_DETACHED(tcp)) 9337 return (mss); 9338 9339 if (tcp->tcp_fused) { 9340 maxpsz = tcp_fuse_maxpsz_set(tcp); 9341 mss = INFPSZ; 9342 } else if (tcp->tcp_mdt || tcp->tcp_lso || tcp->tcp_maxpsz == 0) { 9343 /* 9344 * Set the sd_qn_maxpsz according to the socket send buffer 9345 * size, and sd_maxblk to INFPSZ (-1). This will essentially 9346 * instruct the stream head to copyin user data into contiguous 9347 * kernel-allocated buffers without breaking it up into smaller 9348 * chunks. We round up the buffer size to the nearest SMSS. 9349 */ 9350 maxpsz = MSS_ROUNDUP(tcp->tcp_xmit_hiwater, mss); 9351 if (tcp->tcp_kssl_ctx == NULL) 9352 mss = INFPSZ; 9353 else 9354 mss = SSL3_MAX_RECORD_LEN; 9355 } else { 9356 /* 9357 * Set sd_qn_maxpsz to approx half the (receivers) buffer 9358 * (and a multiple of the mss). This instructs the stream 9359 * head to break down larger than SMSS writes into SMSS- 9360 * size mblks, up to tcp_maxpsz_multiplier mblks at a time. 9361 */ 9362 maxpsz = tcp->tcp_maxpsz * mss; 9363 if (maxpsz > tcp->tcp_xmit_hiwater/2) { 9364 maxpsz = tcp->tcp_xmit_hiwater/2; 9365 /* Round up to nearest mss */ 9366 maxpsz = MSS_ROUNDUP(maxpsz, mss); 9367 } 9368 } 9369 (void) setmaxps(q, maxpsz); 9370 tcp->tcp_wq->q_maxpsz = maxpsz; 9371 9372 if (set_maxblk) 9373 (void) mi_set_sth_maxblk(q, mss); 9374 9375 return (mss); 9376 } 9377 9378 /* 9379 * Extract option values from a tcp header. We put any found values into the 9380 * tcpopt struct and return a bitmask saying which options were found. 9381 */ 9382 static int 9383 tcp_parse_options(tcph_t *tcph, tcp_opt_t *tcpopt) 9384 { 9385 uchar_t *endp; 9386 int len; 9387 uint32_t mss; 9388 uchar_t *up = (uchar_t *)tcph; 9389 int found = 0; 9390 int32_t sack_len; 9391 tcp_seq sack_begin, sack_end; 9392 tcp_t *tcp; 9393 9394 endp = up + TCP_HDR_LENGTH(tcph); 9395 up += TCP_MIN_HEADER_LENGTH; 9396 while (up < endp) { 9397 len = endp - up; 9398 switch (*up) { 9399 case TCPOPT_EOL: 9400 break; 9401 9402 case TCPOPT_NOP: 9403 up++; 9404 continue; 9405 9406 case TCPOPT_MAXSEG: 9407 if (len < TCPOPT_MAXSEG_LEN || 9408 up[1] != TCPOPT_MAXSEG_LEN) 9409 break; 9410 9411 mss = BE16_TO_U16(up+2); 9412 /* Caller must handle tcp_mss_min and tcp_mss_max_* */ 9413 tcpopt->tcp_opt_mss = mss; 9414 found |= TCP_OPT_MSS_PRESENT; 9415 9416 up += TCPOPT_MAXSEG_LEN; 9417 continue; 9418 9419 case TCPOPT_WSCALE: 9420 if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN) 9421 break; 9422 9423 if (up[2] > TCP_MAX_WINSHIFT) 9424 tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT; 9425 else 9426 tcpopt->tcp_opt_wscale = up[2]; 9427 found |= TCP_OPT_WSCALE_PRESENT; 9428 9429 up += TCPOPT_WS_LEN; 9430 continue; 9431 9432 case TCPOPT_SACK_PERMITTED: 9433 if (len < TCPOPT_SACK_OK_LEN || 9434 up[1] != TCPOPT_SACK_OK_LEN) 9435 break; 9436 found |= TCP_OPT_SACK_OK_PRESENT; 9437 up += TCPOPT_SACK_OK_LEN; 9438 continue; 9439 9440 case TCPOPT_SACK: 9441 if (len <= 2 || up[1] <= 2 || len < up[1]) 9442 break; 9443 9444 /* If TCP is not interested in SACK blks... */ 9445 if ((tcp = tcpopt->tcp) == NULL) { 9446 up += up[1]; 9447 continue; 9448 } 9449 sack_len = up[1] - TCPOPT_HEADER_LEN; 9450 up += TCPOPT_HEADER_LEN; 9451 9452 /* 9453 * If the list is empty, allocate one and assume 9454 * nothing is sack'ed. 9455 */ 9456 ASSERT(tcp->tcp_sack_info != NULL); 9457 if (tcp->tcp_notsack_list == NULL) { 9458 tcp_notsack_update(&(tcp->tcp_notsack_list), 9459 tcp->tcp_suna, tcp->tcp_snxt, 9460 &(tcp->tcp_num_notsack_blk), 9461 &(tcp->tcp_cnt_notsack_list)); 9462 9463 /* 9464 * Make sure tcp_notsack_list is not NULL. 9465 * This happens when kmem_alloc(KM_NOSLEEP) 9466 * returns NULL. 9467 */ 9468 if (tcp->tcp_notsack_list == NULL) { 9469 up += sack_len; 9470 continue; 9471 } 9472 tcp->tcp_fack = tcp->tcp_suna; 9473 } 9474 9475 while (sack_len > 0) { 9476 if (up + 8 > endp) { 9477 up = endp; 9478 break; 9479 } 9480 sack_begin = BE32_TO_U32(up); 9481 up += 4; 9482 sack_end = BE32_TO_U32(up); 9483 up += 4; 9484 sack_len -= 8; 9485 /* 9486 * Bounds checking. Make sure the SACK 9487 * info is within tcp_suna and tcp_snxt. 9488 * If this SACK blk is out of bound, ignore 9489 * it but continue to parse the following 9490 * blks. 9491 */ 9492 if (SEQ_LEQ(sack_end, sack_begin) || 9493 SEQ_LT(sack_begin, tcp->tcp_suna) || 9494 SEQ_GT(sack_end, tcp->tcp_snxt)) { 9495 continue; 9496 } 9497 tcp_notsack_insert(&(tcp->tcp_notsack_list), 9498 sack_begin, sack_end, 9499 &(tcp->tcp_num_notsack_blk), 9500 &(tcp->tcp_cnt_notsack_list)); 9501 if (SEQ_GT(sack_end, tcp->tcp_fack)) { 9502 tcp->tcp_fack = sack_end; 9503 } 9504 } 9505 found |= TCP_OPT_SACK_PRESENT; 9506 continue; 9507 9508 case TCPOPT_TSTAMP: 9509 if (len < TCPOPT_TSTAMP_LEN || 9510 up[1] != TCPOPT_TSTAMP_LEN) 9511 break; 9512 9513 tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2); 9514 tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6); 9515 9516 found |= TCP_OPT_TSTAMP_PRESENT; 9517 9518 up += TCPOPT_TSTAMP_LEN; 9519 continue; 9520 9521 default: 9522 if (len <= 1 || len < (int)up[1] || up[1] == 0) 9523 break; 9524 up += up[1]; 9525 continue; 9526 } 9527 break; 9528 } 9529 return (found); 9530 } 9531 9532 /* 9533 * Set the mss associated with a particular tcp based on its current value, 9534 * and a new one passed in. Observe minimums and maximums, and reset 9535 * other state variables that we want to view as multiples of mss. 9536 * 9537 * This function is called mainly because values like tcp_mss, tcp_cwnd, 9538 * highwater marks etc. need to be initialized or adjusted. 9539 * 1) From tcp_process_options() when the other side's SYN/SYN-ACK 9540 * packet arrives. 9541 * 2) We need to set a new MSS when ICMP_FRAGMENTATION_NEEDED or 9542 * ICMP6_PACKET_TOO_BIG arrives. 9543 * 3) From tcp_paws_check() if the other side stops sending the timestamp, 9544 * to increase the MSS to use the extra bytes available. 9545 * 9546 * Callers except tcp_paws_check() ensure that they only reduce mss. 9547 */ 9548 static void 9549 tcp_mss_set(tcp_t *tcp, uint32_t mss, boolean_t do_ss) 9550 { 9551 uint32_t mss_max; 9552 tcp_stack_t *tcps = tcp->tcp_tcps; 9553 9554 if (tcp->tcp_ipversion == IPV4_VERSION) 9555 mss_max = tcps->tcps_mss_max_ipv4; 9556 else 9557 mss_max = tcps->tcps_mss_max_ipv6; 9558 9559 if (mss < tcps->tcps_mss_min) 9560 mss = tcps->tcps_mss_min; 9561 if (mss > mss_max) 9562 mss = mss_max; 9563 /* 9564 * Unless naglim has been set by our client to 9565 * a non-mss value, force naglim to track mss. 9566 * This can help to aggregate small writes. 9567 */ 9568 if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim) 9569 tcp->tcp_naglim = mss; 9570 /* 9571 * TCP should be able to buffer at least 4 MSS data for obvious 9572 * performance reason. 9573 */ 9574 if ((mss << 2) > tcp->tcp_xmit_hiwater) 9575 tcp->tcp_xmit_hiwater = mss << 2; 9576 9577 if (do_ss) { 9578 /* 9579 * Either the tcp_cwnd is as yet uninitialized, or mss is 9580 * changing due to a reduction in MTU, presumably as a 9581 * result of a new path component, reset cwnd to its 9582 * "initial" value, as a multiple of the new mss. 9583 */ 9584 SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_initial); 9585 } else { 9586 /* 9587 * Called by tcp_paws_check(), the mss increased 9588 * marginally to allow use of space previously taken 9589 * by the timestamp option. It would be inappropriate 9590 * to apply slow start or tcp_init_cwnd values to 9591 * tcp_cwnd, simply adjust to a multiple of the new mss. 9592 */ 9593 tcp->tcp_cwnd = (tcp->tcp_cwnd / tcp->tcp_mss) * mss; 9594 tcp->tcp_cwnd_cnt = 0; 9595 } 9596 tcp->tcp_mss = mss; 9597 (void) tcp_maxpsz_set(tcp, B_TRUE); 9598 } 9599 9600 /* For /dev/tcp aka AF_INET open */ 9601 static int 9602 tcp_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 9603 { 9604 return (tcp_open(q, devp, flag, sflag, credp, B_FALSE)); 9605 } 9606 9607 /* For /dev/tcp6 aka AF_INET6 open */ 9608 static int 9609 tcp_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 9610 { 9611 return (tcp_open(q, devp, flag, sflag, credp, B_TRUE)); 9612 } 9613 9614 static int 9615 tcp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp, 9616 boolean_t isv6) 9617 { 9618 tcp_t *tcp = NULL; 9619 conn_t *connp; 9620 int err; 9621 vmem_t *minor_arena = NULL; 9622 dev_t conn_dev; 9623 zoneid_t zoneid; 9624 tcp_stack_t *tcps = NULL; 9625 9626 if (q->q_ptr != NULL) 9627 return (0); 9628 9629 if (sflag == MODOPEN) 9630 return (EINVAL); 9631 9632 if (!(flag & SO_ACCEPTOR)) { 9633 /* 9634 * Special case for install: miniroot needs to be able to 9635 * access files via NFS as though it were always in the 9636 * global zone. 9637 */ 9638 if (credp == kcred && nfs_global_client_only != 0) { 9639 zoneid = GLOBAL_ZONEID; 9640 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)-> 9641 netstack_tcp; 9642 ASSERT(tcps != NULL); 9643 } else { 9644 netstack_t *ns; 9645 9646 ns = netstack_find_by_cred(credp); 9647 ASSERT(ns != NULL); 9648 tcps = ns->netstack_tcp; 9649 ASSERT(tcps != NULL); 9650 9651 /* 9652 * For exclusive stacks we set the zoneid to zero 9653 * to make TCP operate as if in the global zone. 9654 */ 9655 if (tcps->tcps_netstack->netstack_stackid != 9656 GLOBAL_NETSTACKID) 9657 zoneid = GLOBAL_ZONEID; 9658 else 9659 zoneid = crgetzoneid(credp); 9660 } 9661 /* 9662 * For stackid zero this is done from strplumb.c, but 9663 * non-zero stackids are handled here. 9664 */ 9665 if (tcps->tcps_g_q == NULL && 9666 tcps->tcps_netstack->netstack_stackid != 9667 GLOBAL_NETSTACKID) { 9668 tcp_g_q_setup(tcps); 9669 } 9670 } 9671 9672 if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) && 9673 ((conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) { 9674 minor_arena = ip_minor_arena_la; 9675 } else { 9676 /* 9677 * Either minor numbers in the large arena were exhausted 9678 * or a non socket application is doing the open. 9679 * Try to allocate from the small arena. 9680 */ 9681 if ((conn_dev = inet_minor_alloc(ip_minor_arena_sa)) == 0) { 9682 if (tcps != NULL) 9683 netstack_rele(tcps->tcps_netstack); 9684 return (EBUSY); 9685 } 9686 minor_arena = ip_minor_arena_sa; 9687 } 9688 ASSERT(minor_arena != NULL); 9689 9690 *devp = makedevice(getemajor(*devp), (minor_t)conn_dev); 9691 9692 if (flag & SO_ACCEPTOR) { 9693 /* No netstack_find_by_cred, hence no netstack_rele needed */ 9694 ASSERT(tcps == NULL); 9695 q->q_qinfo = &tcp_acceptor_rinit; 9696 /* 9697 * the conn_dev and minor_arena will be subsequently used by 9698 * tcp_wput_accept() and tcpclose_accept() to figure out the 9699 * minor device number for this connection from the q_ptr. 9700 */ 9701 RD(q)->q_ptr = (void *)conn_dev; 9702 WR(q)->q_qinfo = &tcp_acceptor_winit; 9703 WR(q)->q_ptr = (void *)minor_arena; 9704 qprocson(q); 9705 return (0); 9706 } 9707 9708 connp = (conn_t *)tcp_get_conn(IP_SQUEUE_GET(lbolt), tcps); 9709 /* 9710 * Both tcp_get_conn and netstack_find_by_cred incremented refcnt, 9711 * so we drop it by one. 9712 */ 9713 netstack_rele(tcps->tcps_netstack); 9714 if (connp == NULL) { 9715 inet_minor_free(minor_arena, conn_dev); 9716 q->q_ptr = NULL; 9717 return (ENOSR); 9718 } 9719 connp->conn_sqp = IP_SQUEUE_GET(lbolt); 9720 tcp = connp->conn_tcp; 9721 9722 q->q_ptr = WR(q)->q_ptr = connp; 9723 if (isv6) { 9724 connp->conn_flags |= (IPCL_TCP6|IPCL_ISV6); 9725 connp->conn_send = ip_output_v6; 9726 connp->conn_af_isv6 = B_TRUE; 9727 connp->conn_pkt_isv6 = B_TRUE; 9728 connp->conn_src_preferences = IPV6_PREFER_SRC_DEFAULT; 9729 tcp->tcp_ipversion = IPV6_VERSION; 9730 tcp->tcp_family = AF_INET6; 9731 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 9732 } else { 9733 connp->conn_flags |= IPCL_TCP4; 9734 connp->conn_send = ip_output; 9735 connp->conn_af_isv6 = B_FALSE; 9736 connp->conn_pkt_isv6 = B_FALSE; 9737 tcp->tcp_ipversion = IPV4_VERSION; 9738 tcp->tcp_family = AF_INET; 9739 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 9740 } 9741 9742 /* 9743 * TCP keeps a copy of cred for cache locality reasons but 9744 * we put a reference only once. If connp->conn_cred 9745 * becomes invalid, tcp_cred should also be set to NULL. 9746 */ 9747 tcp->tcp_cred = connp->conn_cred = credp; 9748 crhold(connp->conn_cred); 9749 tcp->tcp_cpid = curproc->p_pid; 9750 tcp->tcp_open_time = lbolt64; 9751 connp->conn_zoneid = zoneid; 9752 connp->conn_mlp_type = mlptSingle; 9753 connp->conn_ulp_labeled = !is_system_labeled(); 9754 ASSERT(connp->conn_netstack == tcps->tcps_netstack); 9755 ASSERT(tcp->tcp_tcps == tcps); 9756 9757 /* 9758 * If the caller has the process-wide flag set, then default to MAC 9759 * exempt mode. This allows read-down to unlabeled hosts. 9760 */ 9761 if (getpflags(NET_MAC_AWARE, credp) != 0) 9762 connp->conn_mac_exempt = B_TRUE; 9763 9764 connp->conn_dev = conn_dev; 9765 connp->conn_minor_arena = minor_arena; 9766 9767 ASSERT(q->q_qinfo == &tcp_rinitv4 || q->q_qinfo == &tcp_rinitv6); 9768 ASSERT(WR(q)->q_qinfo == &tcp_winit); 9769 9770 if (flag & SO_SOCKSTR) { 9771 /* 9772 * No need to insert a socket in tcp acceptor hash. 9773 * If it was a socket acceptor stream, we dealt with 9774 * it above. A socket listener can never accept a 9775 * connection and doesn't need acceptor_id. 9776 */ 9777 connp->conn_flags |= IPCL_SOCKET; 9778 tcp->tcp_issocket = 1; 9779 WR(q)->q_qinfo = &tcp_sock_winit; 9780 } else { 9781 #ifdef _ILP32 9782 tcp->tcp_acceptor_id = (t_uscalar_t)RD(q); 9783 #else 9784 tcp->tcp_acceptor_id = conn_dev; 9785 #endif /* _ILP32 */ 9786 tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp); 9787 } 9788 9789 if (tcps->tcps_trace) 9790 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_SLEEP); 9791 9792 err = tcp_init(tcp, q); 9793 if (err != 0) { 9794 inet_minor_free(connp->conn_minor_arena, connp->conn_dev); 9795 tcp_acceptor_hash_remove(tcp); 9796 CONN_DEC_REF(connp); 9797 q->q_ptr = WR(q)->q_ptr = NULL; 9798 return (err); 9799 } 9800 9801 RD(q)->q_hiwat = tcps->tcps_recv_hiwat; 9802 tcp->tcp_rwnd = tcps->tcps_recv_hiwat; 9803 9804 /* Non-zero default values */ 9805 connp->conn_multicast_loop = IP_DEFAULT_MULTICAST_LOOP; 9806 /* 9807 * Put the ref for TCP. Ref for IP was already put 9808 * by ipcl_conn_create. Also Make the conn_t globally 9809 * visible to walkers 9810 */ 9811 mutex_enter(&connp->conn_lock); 9812 CONN_INC_REF_LOCKED(connp); 9813 ASSERT(connp->conn_ref == 2); 9814 connp->conn_state_flags &= ~CONN_INCIPIENT; 9815 mutex_exit(&connp->conn_lock); 9816 9817 qprocson(q); 9818 return (0); 9819 } 9820 9821 /* 9822 * Some TCP options can be "set" by requesting them in the option 9823 * buffer. This is needed for XTI feature test though we do not 9824 * allow it in general. We interpret that this mechanism is more 9825 * applicable to OSI protocols and need not be allowed in general. 9826 * This routine filters out options for which it is not allowed (most) 9827 * and lets through those (few) for which it is. [ The XTI interface 9828 * test suite specifics will imply that any XTI_GENERIC level XTI_* if 9829 * ever implemented will have to be allowed here ]. 9830 */ 9831 static boolean_t 9832 tcp_allow_connopt_set(int level, int name) 9833 { 9834 9835 switch (level) { 9836 case IPPROTO_TCP: 9837 switch (name) { 9838 case TCP_NODELAY: 9839 return (B_TRUE); 9840 default: 9841 return (B_FALSE); 9842 } 9843 /*NOTREACHED*/ 9844 default: 9845 return (B_FALSE); 9846 } 9847 /*NOTREACHED*/ 9848 } 9849 9850 /* 9851 * This routine gets default values of certain options whose default 9852 * values are maintained by protocol specific code 9853 */ 9854 /* ARGSUSED */ 9855 int 9856 tcp_opt_default(queue_t *q, int level, int name, uchar_t *ptr) 9857 { 9858 int32_t *i1 = (int32_t *)ptr; 9859 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 9860 9861 switch (level) { 9862 case IPPROTO_TCP: 9863 switch (name) { 9864 case TCP_NOTIFY_THRESHOLD: 9865 *i1 = tcps->tcps_ip_notify_interval; 9866 break; 9867 case TCP_ABORT_THRESHOLD: 9868 *i1 = tcps->tcps_ip_abort_interval; 9869 break; 9870 case TCP_CONN_NOTIFY_THRESHOLD: 9871 *i1 = tcps->tcps_ip_notify_cinterval; 9872 break; 9873 case TCP_CONN_ABORT_THRESHOLD: 9874 *i1 = tcps->tcps_ip_abort_cinterval; 9875 break; 9876 default: 9877 return (-1); 9878 } 9879 break; 9880 case IPPROTO_IP: 9881 switch (name) { 9882 case IP_TTL: 9883 *i1 = tcps->tcps_ipv4_ttl; 9884 break; 9885 default: 9886 return (-1); 9887 } 9888 break; 9889 case IPPROTO_IPV6: 9890 switch (name) { 9891 case IPV6_UNICAST_HOPS: 9892 *i1 = tcps->tcps_ipv6_hoplimit; 9893 break; 9894 default: 9895 return (-1); 9896 } 9897 break; 9898 default: 9899 return (-1); 9900 } 9901 return (sizeof (int)); 9902 } 9903 9904 9905 /* 9906 * TCP routine to get the values of options. 9907 */ 9908 int 9909 tcp_opt_get(queue_t *q, int level, int name, uchar_t *ptr) 9910 { 9911 int *i1 = (int *)ptr; 9912 conn_t *connp = Q_TO_CONN(q); 9913 tcp_t *tcp = connp->conn_tcp; 9914 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 9915 9916 switch (level) { 9917 case SOL_SOCKET: 9918 switch (name) { 9919 case SO_LINGER: { 9920 struct linger *lgr = (struct linger *)ptr; 9921 9922 lgr->l_onoff = tcp->tcp_linger ? SO_LINGER : 0; 9923 lgr->l_linger = tcp->tcp_lingertime; 9924 } 9925 return (sizeof (struct linger)); 9926 case SO_DEBUG: 9927 *i1 = tcp->tcp_debug ? SO_DEBUG : 0; 9928 break; 9929 case SO_KEEPALIVE: 9930 *i1 = tcp->tcp_ka_enabled ? SO_KEEPALIVE : 0; 9931 break; 9932 case SO_DONTROUTE: 9933 *i1 = tcp->tcp_dontroute ? SO_DONTROUTE : 0; 9934 break; 9935 case SO_USELOOPBACK: 9936 *i1 = tcp->tcp_useloopback ? SO_USELOOPBACK : 0; 9937 break; 9938 case SO_BROADCAST: 9939 *i1 = tcp->tcp_broadcast ? SO_BROADCAST : 0; 9940 break; 9941 case SO_REUSEADDR: 9942 *i1 = tcp->tcp_reuseaddr ? SO_REUSEADDR : 0; 9943 break; 9944 case SO_OOBINLINE: 9945 *i1 = tcp->tcp_oobinline ? SO_OOBINLINE : 0; 9946 break; 9947 case SO_DGRAM_ERRIND: 9948 *i1 = tcp->tcp_dgram_errind ? SO_DGRAM_ERRIND : 0; 9949 break; 9950 case SO_TYPE: 9951 *i1 = SOCK_STREAM; 9952 break; 9953 case SO_SNDBUF: 9954 *i1 = tcp->tcp_xmit_hiwater; 9955 break; 9956 case SO_RCVBUF: 9957 *i1 = RD(q)->q_hiwat; 9958 break; 9959 case SO_SND_COPYAVOID: 9960 *i1 = tcp->tcp_snd_zcopy_on ? 9961 SO_SND_COPYAVOID : 0; 9962 break; 9963 case SO_ALLZONES: 9964 *i1 = connp->conn_allzones ? 1 : 0; 9965 break; 9966 case SO_ANON_MLP: 9967 *i1 = connp->conn_anon_mlp; 9968 break; 9969 case SO_MAC_EXEMPT: 9970 *i1 = connp->conn_mac_exempt; 9971 break; 9972 case SO_EXCLBIND: 9973 *i1 = tcp->tcp_exclbind ? SO_EXCLBIND : 0; 9974 break; 9975 case SO_PROTOTYPE: 9976 *i1 = IPPROTO_TCP; 9977 break; 9978 case SO_DOMAIN: 9979 *i1 = tcp->tcp_family; 9980 break; 9981 default: 9982 return (-1); 9983 } 9984 break; 9985 case IPPROTO_TCP: 9986 switch (name) { 9987 case TCP_NODELAY: 9988 *i1 = (tcp->tcp_naglim == 1) ? TCP_NODELAY : 0; 9989 break; 9990 case TCP_MAXSEG: 9991 *i1 = tcp->tcp_mss; 9992 break; 9993 case TCP_NOTIFY_THRESHOLD: 9994 *i1 = (int)tcp->tcp_first_timer_threshold; 9995 break; 9996 case TCP_ABORT_THRESHOLD: 9997 *i1 = tcp->tcp_second_timer_threshold; 9998 break; 9999 case TCP_CONN_NOTIFY_THRESHOLD: 10000 *i1 = tcp->tcp_first_ctimer_threshold; 10001 break; 10002 case TCP_CONN_ABORT_THRESHOLD: 10003 *i1 = tcp->tcp_second_ctimer_threshold; 10004 break; 10005 case TCP_RECVDSTADDR: 10006 *i1 = tcp->tcp_recvdstaddr; 10007 break; 10008 case TCP_ANONPRIVBIND: 10009 *i1 = tcp->tcp_anon_priv_bind; 10010 break; 10011 case TCP_EXCLBIND: 10012 *i1 = tcp->tcp_exclbind ? TCP_EXCLBIND : 0; 10013 break; 10014 case TCP_INIT_CWND: 10015 *i1 = tcp->tcp_init_cwnd; 10016 break; 10017 case TCP_KEEPALIVE_THRESHOLD: 10018 *i1 = tcp->tcp_ka_interval; 10019 break; 10020 case TCP_KEEPALIVE_ABORT_THRESHOLD: 10021 *i1 = tcp->tcp_ka_abort_thres; 10022 break; 10023 case TCP_CORK: 10024 *i1 = tcp->tcp_cork; 10025 break; 10026 default: 10027 return (-1); 10028 } 10029 break; 10030 case IPPROTO_IP: 10031 if (tcp->tcp_family != AF_INET) 10032 return (-1); 10033 switch (name) { 10034 case IP_OPTIONS: 10035 case T_IP_OPTIONS: { 10036 /* 10037 * This is compatible with BSD in that in only return 10038 * the reverse source route with the final destination 10039 * as the last entry. The first 4 bytes of the option 10040 * will contain the final destination. 10041 */ 10042 int opt_len; 10043 10044 opt_len = (char *)tcp->tcp_tcph - (char *)tcp->tcp_ipha; 10045 opt_len -= tcp->tcp_label_len + IP_SIMPLE_HDR_LENGTH; 10046 ASSERT(opt_len >= 0); 10047 /* Caller ensures enough space */ 10048 if (opt_len > 0) { 10049 /* 10050 * TODO: Do we have to handle getsockopt on an 10051 * initiator as well? 10052 */ 10053 return (ip_opt_get_user(tcp->tcp_ipha, ptr)); 10054 } 10055 return (0); 10056 } 10057 case IP_TOS: 10058 case T_IP_TOS: 10059 *i1 = (int)tcp->tcp_ipha->ipha_type_of_service; 10060 break; 10061 case IP_TTL: 10062 *i1 = (int)tcp->tcp_ipha->ipha_ttl; 10063 break; 10064 case IP_NEXTHOP: 10065 /* Handled at IP level */ 10066 return (-EINVAL); 10067 default: 10068 return (-1); 10069 } 10070 break; 10071 case IPPROTO_IPV6: 10072 /* 10073 * IPPROTO_IPV6 options are only supported for sockets 10074 * that are using IPv6 on the wire. 10075 */ 10076 if (tcp->tcp_ipversion != IPV6_VERSION) { 10077 return (-1); 10078 } 10079 switch (name) { 10080 case IPV6_UNICAST_HOPS: 10081 *i1 = (unsigned int) tcp->tcp_ip6h->ip6_hops; 10082 break; /* goto sizeof (int) option return */ 10083 case IPV6_BOUND_IF: 10084 /* Zero if not set */ 10085 *i1 = tcp->tcp_bound_if; 10086 break; /* goto sizeof (int) option return */ 10087 case IPV6_RECVPKTINFO: 10088 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) 10089 *i1 = 1; 10090 else 10091 *i1 = 0; 10092 break; /* goto sizeof (int) option return */ 10093 case IPV6_RECVTCLASS: 10094 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVTCLASS) 10095 *i1 = 1; 10096 else 10097 *i1 = 0; 10098 break; /* goto sizeof (int) option return */ 10099 case IPV6_RECVHOPLIMIT: 10100 if (tcp->tcp_ipv6_recvancillary & 10101 TCP_IPV6_RECVHOPLIMIT) 10102 *i1 = 1; 10103 else 10104 *i1 = 0; 10105 break; /* goto sizeof (int) option return */ 10106 case IPV6_RECVHOPOPTS: 10107 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPOPTS) 10108 *i1 = 1; 10109 else 10110 *i1 = 0; 10111 break; /* goto sizeof (int) option return */ 10112 case IPV6_RECVDSTOPTS: 10113 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVDSTOPTS) 10114 *i1 = 1; 10115 else 10116 *i1 = 0; 10117 break; /* goto sizeof (int) option return */ 10118 case _OLD_IPV6_RECVDSTOPTS: 10119 if (tcp->tcp_ipv6_recvancillary & 10120 TCP_OLD_IPV6_RECVDSTOPTS) 10121 *i1 = 1; 10122 else 10123 *i1 = 0; 10124 break; /* goto sizeof (int) option return */ 10125 case IPV6_RECVRTHDR: 10126 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTHDR) 10127 *i1 = 1; 10128 else 10129 *i1 = 0; 10130 break; /* goto sizeof (int) option return */ 10131 case IPV6_RECVRTHDRDSTOPTS: 10132 if (tcp->tcp_ipv6_recvancillary & 10133 TCP_IPV6_RECVRTDSTOPTS) 10134 *i1 = 1; 10135 else 10136 *i1 = 0; 10137 break; /* goto sizeof (int) option return */ 10138 case IPV6_PKTINFO: { 10139 /* XXX assumes that caller has room for max size! */ 10140 struct in6_pktinfo *pkti; 10141 10142 pkti = (struct in6_pktinfo *)ptr; 10143 if (ipp->ipp_fields & IPPF_IFINDEX) 10144 pkti->ipi6_ifindex = ipp->ipp_ifindex; 10145 else 10146 pkti->ipi6_ifindex = 0; 10147 if (ipp->ipp_fields & IPPF_ADDR) 10148 pkti->ipi6_addr = ipp->ipp_addr; 10149 else 10150 pkti->ipi6_addr = ipv6_all_zeros; 10151 return (sizeof (struct in6_pktinfo)); 10152 } 10153 case IPV6_TCLASS: 10154 if (ipp->ipp_fields & IPPF_TCLASS) 10155 *i1 = ipp->ipp_tclass; 10156 else 10157 *i1 = IPV6_FLOW_TCLASS( 10158 IPV6_DEFAULT_VERS_AND_FLOW); 10159 break; /* goto sizeof (int) option return */ 10160 case IPV6_NEXTHOP: { 10161 sin6_t *sin6 = (sin6_t *)ptr; 10162 10163 if (!(ipp->ipp_fields & IPPF_NEXTHOP)) 10164 return (0); 10165 *sin6 = sin6_null; 10166 sin6->sin6_family = AF_INET6; 10167 sin6->sin6_addr = ipp->ipp_nexthop; 10168 return (sizeof (sin6_t)); 10169 } 10170 case IPV6_HOPOPTS: 10171 if (!(ipp->ipp_fields & IPPF_HOPOPTS)) 10172 return (0); 10173 if (ipp->ipp_hopoptslen <= tcp->tcp_label_len) 10174 return (0); 10175 bcopy((char *)ipp->ipp_hopopts + tcp->tcp_label_len, 10176 ptr, ipp->ipp_hopoptslen - tcp->tcp_label_len); 10177 if (tcp->tcp_label_len > 0) { 10178 ptr[0] = ((char *)ipp->ipp_hopopts)[0]; 10179 ptr[1] = (ipp->ipp_hopoptslen - 10180 tcp->tcp_label_len + 7) / 8 - 1; 10181 } 10182 return (ipp->ipp_hopoptslen - tcp->tcp_label_len); 10183 case IPV6_RTHDRDSTOPTS: 10184 if (!(ipp->ipp_fields & IPPF_RTDSTOPTS)) 10185 return (0); 10186 bcopy(ipp->ipp_rtdstopts, ptr, ipp->ipp_rtdstoptslen); 10187 return (ipp->ipp_rtdstoptslen); 10188 case IPV6_RTHDR: 10189 if (!(ipp->ipp_fields & IPPF_RTHDR)) 10190 return (0); 10191 bcopy(ipp->ipp_rthdr, ptr, ipp->ipp_rthdrlen); 10192 return (ipp->ipp_rthdrlen); 10193 case IPV6_DSTOPTS: 10194 if (!(ipp->ipp_fields & IPPF_DSTOPTS)) 10195 return (0); 10196 bcopy(ipp->ipp_dstopts, ptr, ipp->ipp_dstoptslen); 10197 return (ipp->ipp_dstoptslen); 10198 case IPV6_SRC_PREFERENCES: 10199 return (ip6_get_src_preferences(connp, 10200 (uint32_t *)ptr)); 10201 case IPV6_PATHMTU: { 10202 struct ip6_mtuinfo *mtuinfo = (struct ip6_mtuinfo *)ptr; 10203 10204 if (tcp->tcp_state < TCPS_ESTABLISHED) 10205 return (-1); 10206 10207 return (ip_fill_mtuinfo(&connp->conn_remv6, 10208 connp->conn_fport, mtuinfo, 10209 connp->conn_netstack)); 10210 } 10211 default: 10212 return (-1); 10213 } 10214 break; 10215 default: 10216 return (-1); 10217 } 10218 return (sizeof (int)); 10219 } 10220 10221 /* 10222 * We declare as 'int' rather than 'void' to satisfy pfi_t arg requirements. 10223 * Parameters are assumed to be verified by the caller. 10224 */ 10225 /* ARGSUSED */ 10226 int 10227 tcp_opt_set(queue_t *q, uint_t optset_context, int level, int name, 10228 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 10229 void *thisdg_attrs, cred_t *cr, mblk_t *mblk) 10230 { 10231 conn_t *connp = Q_TO_CONN(q); 10232 tcp_t *tcp = connp->conn_tcp; 10233 int *i1 = (int *)invalp; 10234 boolean_t onoff = (*i1 == 0) ? 0 : 1; 10235 boolean_t checkonly; 10236 int reterr; 10237 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 10238 10239 switch (optset_context) { 10240 case SETFN_OPTCOM_CHECKONLY: 10241 checkonly = B_TRUE; 10242 /* 10243 * Note: Implies T_CHECK semantics for T_OPTCOM_REQ 10244 * inlen != 0 implies value supplied and 10245 * we have to "pretend" to set it. 10246 * inlen == 0 implies that there is no 10247 * value part in T_CHECK request and just validation 10248 * done elsewhere should be enough, we just return here. 10249 */ 10250 if (inlen == 0) { 10251 *outlenp = 0; 10252 return (0); 10253 } 10254 break; 10255 case SETFN_OPTCOM_NEGOTIATE: 10256 checkonly = B_FALSE; 10257 break; 10258 case SETFN_UD_NEGOTIATE: /* error on conn-oriented transports ? */ 10259 case SETFN_CONN_NEGOTIATE: 10260 checkonly = B_FALSE; 10261 /* 10262 * Negotiating local and "association-related" options 10263 * from other (T_CONN_REQ, T_CONN_RES,T_UNITDATA_REQ) 10264 * primitives is allowed by XTI, but we choose 10265 * to not implement this style negotiation for Internet 10266 * protocols (We interpret it is a must for OSI world but 10267 * optional for Internet protocols) for all options. 10268 * [ Will do only for the few options that enable test 10269 * suites that our XTI implementation of this feature 10270 * works for transports that do allow it ] 10271 */ 10272 if (!tcp_allow_connopt_set(level, name)) { 10273 *outlenp = 0; 10274 return (EINVAL); 10275 } 10276 break; 10277 default: 10278 /* 10279 * We should never get here 10280 */ 10281 *outlenp = 0; 10282 return (EINVAL); 10283 } 10284 10285 ASSERT((optset_context != SETFN_OPTCOM_CHECKONLY) || 10286 (optset_context == SETFN_OPTCOM_CHECKONLY && inlen != 0)); 10287 10288 /* 10289 * For TCP, we should have no ancillary data sent down 10290 * (sendmsg isn't supported for SOCK_STREAM), so thisdg_attrs 10291 * has to be zero. 10292 */ 10293 ASSERT(thisdg_attrs == NULL); 10294 10295 /* 10296 * For fixed length options, no sanity check 10297 * of passed in length is done. It is assumed *_optcom_req() 10298 * routines do the right thing. 10299 */ 10300 10301 switch (level) { 10302 case SOL_SOCKET: 10303 switch (name) { 10304 case SO_LINGER: { 10305 struct linger *lgr = (struct linger *)invalp; 10306 10307 if (!checkonly) { 10308 if (lgr->l_onoff) { 10309 tcp->tcp_linger = 1; 10310 tcp->tcp_lingertime = lgr->l_linger; 10311 } else { 10312 tcp->tcp_linger = 0; 10313 tcp->tcp_lingertime = 0; 10314 } 10315 /* struct copy */ 10316 *(struct linger *)outvalp = *lgr; 10317 } else { 10318 if (!lgr->l_onoff) { 10319 ((struct linger *) 10320 outvalp)->l_onoff = 0; 10321 ((struct linger *) 10322 outvalp)->l_linger = 0; 10323 } else { 10324 /* struct copy */ 10325 *(struct linger *)outvalp = *lgr; 10326 } 10327 } 10328 *outlenp = sizeof (struct linger); 10329 return (0); 10330 } 10331 case SO_DEBUG: 10332 if (!checkonly) 10333 tcp->tcp_debug = onoff; 10334 break; 10335 case SO_KEEPALIVE: 10336 if (checkonly) { 10337 /* T_CHECK case */ 10338 break; 10339 } 10340 10341 if (!onoff) { 10342 if (tcp->tcp_ka_enabled) { 10343 if (tcp->tcp_ka_tid != 0) { 10344 (void) TCP_TIMER_CANCEL(tcp, 10345 tcp->tcp_ka_tid); 10346 tcp->tcp_ka_tid = 0; 10347 } 10348 tcp->tcp_ka_enabled = 0; 10349 } 10350 break; 10351 } 10352 if (!tcp->tcp_ka_enabled) { 10353 /* Crank up the keepalive timer */ 10354 tcp->tcp_ka_last_intrvl = 0; 10355 tcp->tcp_ka_tid = TCP_TIMER(tcp, 10356 tcp_keepalive_killer, 10357 MSEC_TO_TICK(tcp->tcp_ka_interval)); 10358 tcp->tcp_ka_enabled = 1; 10359 } 10360 break; 10361 case SO_DONTROUTE: 10362 /* 10363 * SO_DONTROUTE, SO_USELOOPBACK, and SO_BROADCAST are 10364 * only of interest to IP. We track them here only so 10365 * that we can report their current value. 10366 */ 10367 if (!checkonly) { 10368 tcp->tcp_dontroute = onoff; 10369 tcp->tcp_connp->conn_dontroute = onoff; 10370 } 10371 break; 10372 case SO_USELOOPBACK: 10373 if (!checkonly) { 10374 tcp->tcp_useloopback = onoff; 10375 tcp->tcp_connp->conn_loopback = onoff; 10376 } 10377 break; 10378 case SO_BROADCAST: 10379 if (!checkonly) { 10380 tcp->tcp_broadcast = onoff; 10381 tcp->tcp_connp->conn_broadcast = onoff; 10382 } 10383 break; 10384 case SO_REUSEADDR: 10385 if (!checkonly) { 10386 tcp->tcp_reuseaddr = onoff; 10387 tcp->tcp_connp->conn_reuseaddr = onoff; 10388 } 10389 break; 10390 case SO_OOBINLINE: 10391 if (!checkonly) 10392 tcp->tcp_oobinline = onoff; 10393 break; 10394 case SO_DGRAM_ERRIND: 10395 if (!checkonly) 10396 tcp->tcp_dgram_errind = onoff; 10397 break; 10398 case SO_SNDBUF: { 10399 if (*i1 > tcps->tcps_max_buf) { 10400 *outlenp = 0; 10401 return (ENOBUFS); 10402 } 10403 if (checkonly) 10404 break; 10405 10406 tcp->tcp_xmit_hiwater = *i1; 10407 if (tcps->tcps_snd_lowat_fraction != 0) 10408 tcp->tcp_xmit_lowater = 10409 tcp->tcp_xmit_hiwater / 10410 tcps->tcps_snd_lowat_fraction; 10411 (void) tcp_maxpsz_set(tcp, B_TRUE); 10412 /* 10413 * If we are flow-controlled, recheck the condition. 10414 * There are apps that increase SO_SNDBUF size when 10415 * flow-controlled (EWOULDBLOCK), and expect the flow 10416 * control condition to be lifted right away. 10417 */ 10418 mutex_enter(&tcp->tcp_non_sq_lock); 10419 if (tcp->tcp_flow_stopped && 10420 TCP_UNSENT_BYTES(tcp) < tcp->tcp_xmit_hiwater) { 10421 tcp_clrqfull(tcp); 10422 } 10423 mutex_exit(&tcp->tcp_non_sq_lock); 10424 break; 10425 } 10426 case SO_RCVBUF: 10427 if (*i1 > tcps->tcps_max_buf) { 10428 *outlenp = 0; 10429 return (ENOBUFS); 10430 } 10431 /* Silently ignore zero */ 10432 if (!checkonly && *i1 != 0) { 10433 *i1 = MSS_ROUNDUP(*i1, tcp->tcp_mss); 10434 (void) tcp_rwnd_set(tcp, *i1); 10435 } 10436 /* 10437 * XXX should we return the rwnd here 10438 * and tcp_opt_get ? 10439 */ 10440 break; 10441 case SO_SND_COPYAVOID: 10442 if (!checkonly) { 10443 /* we only allow enable at most once for now */ 10444 if (tcp->tcp_loopback || 10445 (tcp->tcp_kssl_ctx != NULL) || 10446 (!tcp->tcp_snd_zcopy_aware && 10447 (onoff != 1 || !tcp_zcopy_check(tcp)))) { 10448 *outlenp = 0; 10449 return (EOPNOTSUPP); 10450 } 10451 tcp->tcp_snd_zcopy_aware = 1; 10452 } 10453 break; 10454 case SO_ALLZONES: 10455 /* Handled at the IP level */ 10456 return (-EINVAL); 10457 case SO_ANON_MLP: 10458 if (!checkonly) { 10459 mutex_enter(&connp->conn_lock); 10460 connp->conn_anon_mlp = onoff; 10461 mutex_exit(&connp->conn_lock); 10462 } 10463 break; 10464 case SO_MAC_EXEMPT: 10465 if (secpolicy_net_mac_aware(cr) != 0 || 10466 IPCL_IS_BOUND(connp)) 10467 return (EACCES); 10468 if (!checkonly) { 10469 mutex_enter(&connp->conn_lock); 10470 connp->conn_mac_exempt = onoff; 10471 mutex_exit(&connp->conn_lock); 10472 } 10473 break; 10474 case SO_EXCLBIND: 10475 if (!checkonly) 10476 tcp->tcp_exclbind = onoff; 10477 break; 10478 default: 10479 *outlenp = 0; 10480 return (EINVAL); 10481 } 10482 break; 10483 case IPPROTO_TCP: 10484 switch (name) { 10485 case TCP_NODELAY: 10486 if (!checkonly) 10487 tcp->tcp_naglim = *i1 ? 1 : tcp->tcp_mss; 10488 break; 10489 case TCP_NOTIFY_THRESHOLD: 10490 if (!checkonly) 10491 tcp->tcp_first_timer_threshold = *i1; 10492 break; 10493 case TCP_ABORT_THRESHOLD: 10494 if (!checkonly) 10495 tcp->tcp_second_timer_threshold = *i1; 10496 break; 10497 case TCP_CONN_NOTIFY_THRESHOLD: 10498 if (!checkonly) 10499 tcp->tcp_first_ctimer_threshold = *i1; 10500 break; 10501 case TCP_CONN_ABORT_THRESHOLD: 10502 if (!checkonly) 10503 tcp->tcp_second_ctimer_threshold = *i1; 10504 break; 10505 case TCP_RECVDSTADDR: 10506 if (tcp->tcp_state > TCPS_LISTEN) 10507 return (EOPNOTSUPP); 10508 if (!checkonly) 10509 tcp->tcp_recvdstaddr = onoff; 10510 break; 10511 case TCP_ANONPRIVBIND: 10512 if ((reterr = secpolicy_net_privaddr(cr, 0, 10513 IPPROTO_TCP)) != 0) { 10514 *outlenp = 0; 10515 return (reterr); 10516 } 10517 if (!checkonly) { 10518 tcp->tcp_anon_priv_bind = onoff; 10519 } 10520 break; 10521 case TCP_EXCLBIND: 10522 if (!checkonly) 10523 tcp->tcp_exclbind = onoff; 10524 break; /* goto sizeof (int) option return */ 10525 case TCP_INIT_CWND: { 10526 uint32_t init_cwnd = *((uint32_t *)invalp); 10527 10528 if (checkonly) 10529 break; 10530 10531 /* 10532 * Only allow socket with network configuration 10533 * privilege to set the initial cwnd to be larger 10534 * than allowed by RFC 3390. 10535 */ 10536 if (init_cwnd <= MIN(4, MAX(2, 4380 / tcp->tcp_mss))) { 10537 tcp->tcp_init_cwnd = init_cwnd; 10538 break; 10539 } 10540 if ((reterr = secpolicy_ip_config(cr, B_TRUE)) != 0) { 10541 *outlenp = 0; 10542 return (reterr); 10543 } 10544 if (init_cwnd > TCP_MAX_INIT_CWND) { 10545 *outlenp = 0; 10546 return (EINVAL); 10547 } 10548 tcp->tcp_init_cwnd = init_cwnd; 10549 break; 10550 } 10551 case TCP_KEEPALIVE_THRESHOLD: 10552 if (checkonly) 10553 break; 10554 10555 if (*i1 < tcps->tcps_keepalive_interval_low || 10556 *i1 > tcps->tcps_keepalive_interval_high) { 10557 *outlenp = 0; 10558 return (EINVAL); 10559 } 10560 if (*i1 != tcp->tcp_ka_interval) { 10561 tcp->tcp_ka_interval = *i1; 10562 /* 10563 * Check if we need to restart the 10564 * keepalive timer. 10565 */ 10566 if (tcp->tcp_ka_tid != 0) { 10567 ASSERT(tcp->tcp_ka_enabled); 10568 (void) TCP_TIMER_CANCEL(tcp, 10569 tcp->tcp_ka_tid); 10570 tcp->tcp_ka_last_intrvl = 0; 10571 tcp->tcp_ka_tid = TCP_TIMER(tcp, 10572 tcp_keepalive_killer, 10573 MSEC_TO_TICK(tcp->tcp_ka_interval)); 10574 } 10575 } 10576 break; 10577 case TCP_KEEPALIVE_ABORT_THRESHOLD: 10578 if (!checkonly) { 10579 if (*i1 < 10580 tcps->tcps_keepalive_abort_interval_low || 10581 *i1 > 10582 tcps->tcps_keepalive_abort_interval_high) { 10583 *outlenp = 0; 10584 return (EINVAL); 10585 } 10586 tcp->tcp_ka_abort_thres = *i1; 10587 } 10588 break; 10589 case TCP_CORK: 10590 if (!checkonly) { 10591 /* 10592 * if tcp->tcp_cork was set and is now 10593 * being unset, we have to make sure that 10594 * the remaining data gets sent out. Also 10595 * unset tcp->tcp_cork so that tcp_wput_data() 10596 * can send data even if it is less than mss 10597 */ 10598 if (tcp->tcp_cork && onoff == 0 && 10599 tcp->tcp_unsent > 0) { 10600 tcp->tcp_cork = B_FALSE; 10601 tcp_wput_data(tcp, NULL, B_FALSE); 10602 } 10603 tcp->tcp_cork = onoff; 10604 } 10605 break; 10606 default: 10607 *outlenp = 0; 10608 return (EINVAL); 10609 } 10610 break; 10611 case IPPROTO_IP: 10612 if (tcp->tcp_family != AF_INET) { 10613 *outlenp = 0; 10614 return (ENOPROTOOPT); 10615 } 10616 switch (name) { 10617 case IP_OPTIONS: 10618 case T_IP_OPTIONS: 10619 reterr = tcp_opt_set_header(tcp, checkonly, 10620 invalp, inlen); 10621 if (reterr) { 10622 *outlenp = 0; 10623 return (reterr); 10624 } 10625 /* OK return - copy input buffer into output buffer */ 10626 if (invalp != outvalp) { 10627 /* don't trust bcopy for identical src/dst */ 10628 bcopy(invalp, outvalp, inlen); 10629 } 10630 *outlenp = inlen; 10631 return (0); 10632 case IP_TOS: 10633 case T_IP_TOS: 10634 if (!checkonly) { 10635 tcp->tcp_ipha->ipha_type_of_service = 10636 (uchar_t)*i1; 10637 tcp->tcp_tos = (uchar_t)*i1; 10638 } 10639 break; 10640 case IP_TTL: 10641 if (!checkonly) { 10642 tcp->tcp_ipha->ipha_ttl = (uchar_t)*i1; 10643 tcp->tcp_ttl = (uchar_t)*i1; 10644 } 10645 break; 10646 case IP_BOUND_IF: 10647 case IP_NEXTHOP: 10648 /* Handled at the IP level */ 10649 return (-EINVAL); 10650 case IP_SEC_OPT: 10651 /* 10652 * We should not allow policy setting after 10653 * we start listening for connections. 10654 */ 10655 if (tcp->tcp_state == TCPS_LISTEN) { 10656 return (EINVAL); 10657 } else { 10658 /* Handled at the IP level */ 10659 return (-EINVAL); 10660 } 10661 default: 10662 *outlenp = 0; 10663 return (EINVAL); 10664 } 10665 break; 10666 case IPPROTO_IPV6: { 10667 ip6_pkt_t *ipp; 10668 10669 /* 10670 * IPPROTO_IPV6 options are only supported for sockets 10671 * that are using IPv6 on the wire. 10672 */ 10673 if (tcp->tcp_ipversion != IPV6_VERSION) { 10674 *outlenp = 0; 10675 return (ENOPROTOOPT); 10676 } 10677 /* 10678 * Only sticky options; no ancillary data 10679 */ 10680 ASSERT(thisdg_attrs == NULL); 10681 ipp = &tcp->tcp_sticky_ipp; 10682 10683 switch (name) { 10684 case IPV6_UNICAST_HOPS: 10685 /* -1 means use default */ 10686 if (*i1 < -1 || *i1 > IPV6_MAX_HOPS) { 10687 *outlenp = 0; 10688 return (EINVAL); 10689 } 10690 if (!checkonly) { 10691 if (*i1 == -1) { 10692 tcp->tcp_ip6h->ip6_hops = 10693 ipp->ipp_unicast_hops = 10694 (uint8_t)tcps->tcps_ipv6_hoplimit; 10695 ipp->ipp_fields &= ~IPPF_UNICAST_HOPS; 10696 /* Pass modified value to IP. */ 10697 *i1 = tcp->tcp_ip6h->ip6_hops; 10698 } else { 10699 tcp->tcp_ip6h->ip6_hops = 10700 ipp->ipp_unicast_hops = 10701 (uint8_t)*i1; 10702 ipp->ipp_fields |= IPPF_UNICAST_HOPS; 10703 } 10704 reterr = tcp_build_hdrs(q, tcp); 10705 if (reterr != 0) 10706 return (reterr); 10707 } 10708 break; 10709 case IPV6_BOUND_IF: 10710 if (!checkonly) { 10711 int error = 0; 10712 10713 tcp->tcp_bound_if = *i1; 10714 error = ip_opt_set_ill(tcp->tcp_connp, *i1, 10715 B_TRUE, checkonly, level, name, mblk); 10716 if (error != 0) { 10717 *outlenp = 0; 10718 return (error); 10719 } 10720 } 10721 break; 10722 /* 10723 * Set boolean switches for ancillary data delivery 10724 */ 10725 case IPV6_RECVPKTINFO: 10726 if (!checkonly) { 10727 if (onoff) 10728 tcp->tcp_ipv6_recvancillary |= 10729 TCP_IPV6_RECVPKTINFO; 10730 else 10731 tcp->tcp_ipv6_recvancillary &= 10732 ~TCP_IPV6_RECVPKTINFO; 10733 /* Force it to be sent up with the next msg */ 10734 tcp->tcp_recvifindex = 0; 10735 } 10736 break; 10737 case IPV6_RECVTCLASS: 10738 if (!checkonly) { 10739 if (onoff) 10740 tcp->tcp_ipv6_recvancillary |= 10741 TCP_IPV6_RECVTCLASS; 10742 else 10743 tcp->tcp_ipv6_recvancillary &= 10744 ~TCP_IPV6_RECVTCLASS; 10745 } 10746 break; 10747 case IPV6_RECVHOPLIMIT: 10748 if (!checkonly) { 10749 if (onoff) 10750 tcp->tcp_ipv6_recvancillary |= 10751 TCP_IPV6_RECVHOPLIMIT; 10752 else 10753 tcp->tcp_ipv6_recvancillary &= 10754 ~TCP_IPV6_RECVHOPLIMIT; 10755 /* Force it to be sent up with the next msg */ 10756 tcp->tcp_recvhops = 0xffffffffU; 10757 } 10758 break; 10759 case IPV6_RECVHOPOPTS: 10760 if (!checkonly) { 10761 if (onoff) 10762 tcp->tcp_ipv6_recvancillary |= 10763 TCP_IPV6_RECVHOPOPTS; 10764 else 10765 tcp->tcp_ipv6_recvancillary &= 10766 ~TCP_IPV6_RECVHOPOPTS; 10767 } 10768 break; 10769 case IPV6_RECVDSTOPTS: 10770 if (!checkonly) { 10771 if (onoff) 10772 tcp->tcp_ipv6_recvancillary |= 10773 TCP_IPV6_RECVDSTOPTS; 10774 else 10775 tcp->tcp_ipv6_recvancillary &= 10776 ~TCP_IPV6_RECVDSTOPTS; 10777 } 10778 break; 10779 case _OLD_IPV6_RECVDSTOPTS: 10780 if (!checkonly) { 10781 if (onoff) 10782 tcp->tcp_ipv6_recvancillary |= 10783 TCP_OLD_IPV6_RECVDSTOPTS; 10784 else 10785 tcp->tcp_ipv6_recvancillary &= 10786 ~TCP_OLD_IPV6_RECVDSTOPTS; 10787 } 10788 break; 10789 case IPV6_RECVRTHDR: 10790 if (!checkonly) { 10791 if (onoff) 10792 tcp->tcp_ipv6_recvancillary |= 10793 TCP_IPV6_RECVRTHDR; 10794 else 10795 tcp->tcp_ipv6_recvancillary &= 10796 ~TCP_IPV6_RECVRTHDR; 10797 } 10798 break; 10799 case IPV6_RECVRTHDRDSTOPTS: 10800 if (!checkonly) { 10801 if (onoff) 10802 tcp->tcp_ipv6_recvancillary |= 10803 TCP_IPV6_RECVRTDSTOPTS; 10804 else 10805 tcp->tcp_ipv6_recvancillary &= 10806 ~TCP_IPV6_RECVRTDSTOPTS; 10807 } 10808 break; 10809 case IPV6_PKTINFO: 10810 if (inlen != 0 && inlen != sizeof (struct in6_pktinfo)) 10811 return (EINVAL); 10812 if (checkonly) 10813 break; 10814 10815 if (inlen == 0) { 10816 ipp->ipp_fields &= ~(IPPF_IFINDEX|IPPF_ADDR); 10817 } else { 10818 struct in6_pktinfo *pkti; 10819 10820 pkti = (struct in6_pktinfo *)invalp; 10821 /* 10822 * RFC 3542 states that ipi6_addr must be 10823 * the unspecified address when setting the 10824 * IPV6_PKTINFO sticky socket option on a 10825 * TCP socket. 10826 */ 10827 if (!IN6_IS_ADDR_UNSPECIFIED(&pkti->ipi6_addr)) 10828 return (EINVAL); 10829 /* 10830 * ip6_set_pktinfo() validates the source 10831 * address and interface index. 10832 */ 10833 reterr = ip6_set_pktinfo(cr, tcp->tcp_connp, 10834 pkti, mblk); 10835 if (reterr != 0) 10836 return (reterr); 10837 ipp->ipp_ifindex = pkti->ipi6_ifindex; 10838 ipp->ipp_addr = pkti->ipi6_addr; 10839 if (ipp->ipp_ifindex != 0) 10840 ipp->ipp_fields |= IPPF_IFINDEX; 10841 else 10842 ipp->ipp_fields &= ~IPPF_IFINDEX; 10843 if (!IN6_IS_ADDR_UNSPECIFIED(&ipp->ipp_addr)) 10844 ipp->ipp_fields |= IPPF_ADDR; 10845 else 10846 ipp->ipp_fields &= ~IPPF_ADDR; 10847 } 10848 reterr = tcp_build_hdrs(q, tcp); 10849 if (reterr != 0) 10850 return (reterr); 10851 break; 10852 case IPV6_TCLASS: 10853 if (inlen != 0 && inlen != sizeof (int)) 10854 return (EINVAL); 10855 if (checkonly) 10856 break; 10857 10858 if (inlen == 0) { 10859 ipp->ipp_fields &= ~IPPF_TCLASS; 10860 } else { 10861 if (*i1 > 255 || *i1 < -1) 10862 return (EINVAL); 10863 if (*i1 == -1) { 10864 ipp->ipp_tclass = 0; 10865 *i1 = 0; 10866 } else { 10867 ipp->ipp_tclass = *i1; 10868 } 10869 ipp->ipp_fields |= IPPF_TCLASS; 10870 } 10871 reterr = tcp_build_hdrs(q, tcp); 10872 if (reterr != 0) 10873 return (reterr); 10874 break; 10875 case IPV6_NEXTHOP: 10876 /* 10877 * IP will verify that the nexthop is reachable 10878 * and fail for sticky options. 10879 */ 10880 if (inlen != 0 && inlen != sizeof (sin6_t)) 10881 return (EINVAL); 10882 if (checkonly) 10883 break; 10884 10885 if (inlen == 0) { 10886 ipp->ipp_fields &= ~IPPF_NEXTHOP; 10887 } else { 10888 sin6_t *sin6 = (sin6_t *)invalp; 10889 10890 if (sin6->sin6_family != AF_INET6) 10891 return (EAFNOSUPPORT); 10892 if (IN6_IS_ADDR_V4MAPPED( 10893 &sin6->sin6_addr)) 10894 return (EADDRNOTAVAIL); 10895 ipp->ipp_nexthop = sin6->sin6_addr; 10896 if (!IN6_IS_ADDR_UNSPECIFIED( 10897 &ipp->ipp_nexthop)) 10898 ipp->ipp_fields |= IPPF_NEXTHOP; 10899 else 10900 ipp->ipp_fields &= ~IPPF_NEXTHOP; 10901 } 10902 reterr = tcp_build_hdrs(q, tcp); 10903 if (reterr != 0) 10904 return (reterr); 10905 break; 10906 case IPV6_HOPOPTS: { 10907 ip6_hbh_t *hopts = (ip6_hbh_t *)invalp; 10908 10909 /* 10910 * Sanity checks - minimum size, size a multiple of 10911 * eight bytes, and matching size passed in. 10912 */ 10913 if (inlen != 0 && 10914 inlen != (8 * (hopts->ip6h_len + 1))) 10915 return (EINVAL); 10916 10917 if (checkonly) 10918 break; 10919 10920 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10921 (uchar_t **)&ipp->ipp_hopopts, 10922 &ipp->ipp_hopoptslen, tcp->tcp_label_len); 10923 if (reterr != 0) 10924 return (reterr); 10925 if (ipp->ipp_hopoptslen == 0) 10926 ipp->ipp_fields &= ~IPPF_HOPOPTS; 10927 else 10928 ipp->ipp_fields |= IPPF_HOPOPTS; 10929 reterr = tcp_build_hdrs(q, tcp); 10930 if (reterr != 0) 10931 return (reterr); 10932 break; 10933 } 10934 case IPV6_RTHDRDSTOPTS: { 10935 ip6_dest_t *dopts = (ip6_dest_t *)invalp; 10936 10937 /* 10938 * Sanity checks - minimum size, size a multiple of 10939 * eight bytes, and matching size passed in. 10940 */ 10941 if (inlen != 0 && 10942 inlen != (8 * (dopts->ip6d_len + 1))) 10943 return (EINVAL); 10944 10945 if (checkonly) 10946 break; 10947 10948 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10949 (uchar_t **)&ipp->ipp_rtdstopts, 10950 &ipp->ipp_rtdstoptslen, 0); 10951 if (reterr != 0) 10952 return (reterr); 10953 if (ipp->ipp_rtdstoptslen == 0) 10954 ipp->ipp_fields &= ~IPPF_RTDSTOPTS; 10955 else 10956 ipp->ipp_fields |= IPPF_RTDSTOPTS; 10957 reterr = tcp_build_hdrs(q, tcp); 10958 if (reterr != 0) 10959 return (reterr); 10960 break; 10961 } 10962 case IPV6_DSTOPTS: { 10963 ip6_dest_t *dopts = (ip6_dest_t *)invalp; 10964 10965 /* 10966 * Sanity checks - minimum size, size a multiple of 10967 * eight bytes, and matching size passed in. 10968 */ 10969 if (inlen != 0 && 10970 inlen != (8 * (dopts->ip6d_len + 1))) 10971 return (EINVAL); 10972 10973 if (checkonly) 10974 break; 10975 10976 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10977 (uchar_t **)&ipp->ipp_dstopts, 10978 &ipp->ipp_dstoptslen, 0); 10979 if (reterr != 0) 10980 return (reterr); 10981 if (ipp->ipp_dstoptslen == 0) 10982 ipp->ipp_fields &= ~IPPF_DSTOPTS; 10983 else 10984 ipp->ipp_fields |= IPPF_DSTOPTS; 10985 reterr = tcp_build_hdrs(q, tcp); 10986 if (reterr != 0) 10987 return (reterr); 10988 break; 10989 } 10990 case IPV6_RTHDR: { 10991 ip6_rthdr_t *rt = (ip6_rthdr_t *)invalp; 10992 10993 /* 10994 * Sanity checks - minimum size, size a multiple of 10995 * eight bytes, and matching size passed in. 10996 */ 10997 if (inlen != 0 && 10998 inlen != (8 * (rt->ip6r_len + 1))) 10999 return (EINVAL); 11000 11001 if (checkonly) 11002 break; 11003 11004 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 11005 (uchar_t **)&ipp->ipp_rthdr, 11006 &ipp->ipp_rthdrlen, 0); 11007 if (reterr != 0) 11008 return (reterr); 11009 if (ipp->ipp_rthdrlen == 0) 11010 ipp->ipp_fields &= ~IPPF_RTHDR; 11011 else 11012 ipp->ipp_fields |= IPPF_RTHDR; 11013 reterr = tcp_build_hdrs(q, tcp); 11014 if (reterr != 0) 11015 return (reterr); 11016 break; 11017 } 11018 case IPV6_V6ONLY: 11019 if (!checkonly) 11020 tcp->tcp_connp->conn_ipv6_v6only = onoff; 11021 break; 11022 case IPV6_USE_MIN_MTU: 11023 if (inlen != sizeof (int)) 11024 return (EINVAL); 11025 11026 if (*i1 < -1 || *i1 > 1) 11027 return (EINVAL); 11028 11029 if (checkonly) 11030 break; 11031 11032 ipp->ipp_fields |= IPPF_USE_MIN_MTU; 11033 ipp->ipp_use_min_mtu = *i1; 11034 break; 11035 case IPV6_BOUND_PIF: 11036 /* Handled at the IP level */ 11037 return (-EINVAL); 11038 case IPV6_SEC_OPT: 11039 /* 11040 * We should not allow policy setting after 11041 * we start listening for connections. 11042 */ 11043 if (tcp->tcp_state == TCPS_LISTEN) { 11044 return (EINVAL); 11045 } else { 11046 /* Handled at the IP level */ 11047 return (-EINVAL); 11048 } 11049 case IPV6_SRC_PREFERENCES: 11050 if (inlen != sizeof (uint32_t)) 11051 return (EINVAL); 11052 reterr = ip6_set_src_preferences(tcp->tcp_connp, 11053 *(uint32_t *)invalp); 11054 if (reterr != 0) { 11055 *outlenp = 0; 11056 return (reterr); 11057 } 11058 break; 11059 default: 11060 *outlenp = 0; 11061 return (EINVAL); 11062 } 11063 break; 11064 } /* end IPPROTO_IPV6 */ 11065 default: 11066 *outlenp = 0; 11067 return (EINVAL); 11068 } 11069 /* 11070 * Common case of OK return with outval same as inval 11071 */ 11072 if (invalp != outvalp) { 11073 /* don't trust bcopy for identical src/dst */ 11074 (void) bcopy(invalp, outvalp, inlen); 11075 } 11076 *outlenp = inlen; 11077 return (0); 11078 } 11079 11080 /* 11081 * Update tcp_sticky_hdrs based on tcp_sticky_ipp. 11082 * The headers include ip6i_t (if needed), ip6_t, any sticky extension 11083 * headers, and the maximum size tcp header (to avoid reallocation 11084 * on the fly for additional tcp options). 11085 * Returns failure if can't allocate memory. 11086 */ 11087 static int 11088 tcp_build_hdrs(queue_t *q, tcp_t *tcp) 11089 { 11090 char *hdrs; 11091 uint_t hdrs_len; 11092 ip6i_t *ip6i; 11093 char buf[TCP_MAX_HDR_LENGTH]; 11094 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 11095 in6_addr_t src, dst; 11096 tcp_stack_t *tcps = tcp->tcp_tcps; 11097 11098 /* 11099 * save the existing tcp header and source/dest IP addresses 11100 */ 11101 bcopy(tcp->tcp_tcph, buf, tcp->tcp_tcp_hdr_len); 11102 src = tcp->tcp_ip6h->ip6_src; 11103 dst = tcp->tcp_ip6h->ip6_dst; 11104 hdrs_len = ip_total_hdrs_len_v6(ipp) + TCP_MAX_HDR_LENGTH; 11105 ASSERT(hdrs_len != 0); 11106 if (hdrs_len > tcp->tcp_iphc_len) { 11107 /* Need to reallocate */ 11108 hdrs = kmem_zalloc(hdrs_len, KM_NOSLEEP); 11109 if (hdrs == NULL) 11110 return (ENOMEM); 11111 if (tcp->tcp_iphc != NULL) { 11112 if (tcp->tcp_hdr_grown) { 11113 kmem_free(tcp->tcp_iphc, tcp->tcp_iphc_len); 11114 } else { 11115 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 11116 kmem_cache_free(tcp_iphc_cache, tcp->tcp_iphc); 11117 } 11118 tcp->tcp_iphc_len = 0; 11119 } 11120 ASSERT(tcp->tcp_iphc_len == 0); 11121 tcp->tcp_iphc = hdrs; 11122 tcp->tcp_iphc_len = hdrs_len; 11123 tcp->tcp_hdr_grown = B_TRUE; 11124 } 11125 ip_build_hdrs_v6((uchar_t *)tcp->tcp_iphc, 11126 hdrs_len - TCP_MAX_HDR_LENGTH, ipp, IPPROTO_TCP); 11127 11128 /* Set header fields not in ipp */ 11129 if (ipp->ipp_fields & IPPF_HAS_IP6I) { 11130 ip6i = (ip6i_t *)tcp->tcp_iphc; 11131 tcp->tcp_ip6h = (ip6_t *)&ip6i[1]; 11132 } else { 11133 tcp->tcp_ip6h = (ip6_t *)tcp->tcp_iphc; 11134 } 11135 /* 11136 * tcp->tcp_ip_hdr_len will include ip6i_t if there is one. 11137 * 11138 * tcp->tcp_tcp_hdr_len doesn't change here. 11139 */ 11140 tcp->tcp_ip_hdr_len = hdrs_len - TCP_MAX_HDR_LENGTH; 11141 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + tcp->tcp_ip_hdr_len); 11142 tcp->tcp_hdr_len = tcp->tcp_ip_hdr_len + tcp->tcp_tcp_hdr_len; 11143 11144 bcopy(buf, tcp->tcp_tcph, tcp->tcp_tcp_hdr_len); 11145 11146 tcp->tcp_ip6h->ip6_src = src; 11147 tcp->tcp_ip6h->ip6_dst = dst; 11148 11149 /* 11150 * If the hop limit was not set by ip_build_hdrs_v6(), set it to 11151 * the default value for TCP. 11152 */ 11153 if (!(ipp->ipp_fields & IPPF_UNICAST_HOPS)) 11154 tcp->tcp_ip6h->ip6_hops = tcps->tcps_ipv6_hoplimit; 11155 11156 /* 11157 * If we're setting extension headers after a connection 11158 * has been established, and if we have a routing header 11159 * among the extension headers, call ip_massage_options_v6 to 11160 * manipulate the routing header/ip6_dst set the checksum 11161 * difference in the tcp header template. 11162 * (This happens in tcp_connect_ipv6 if the routing header 11163 * is set prior to the connect.) 11164 * Set the tcp_sum to zero first in case we've cleared a 11165 * routing header or don't have one at all. 11166 */ 11167 tcp->tcp_sum = 0; 11168 if ((tcp->tcp_state >= TCPS_SYN_SENT) && 11169 (tcp->tcp_ipp_fields & IPPF_RTHDR)) { 11170 ip6_rthdr_t *rth = ip_find_rthdr_v6(tcp->tcp_ip6h, 11171 (uint8_t *)tcp->tcp_tcph); 11172 if (rth != NULL) { 11173 tcp->tcp_sum = ip_massage_options_v6(tcp->tcp_ip6h, 11174 rth, tcps->tcps_netstack); 11175 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 11176 (tcp->tcp_sum >> 16)); 11177 } 11178 } 11179 11180 /* Try to get everything in a single mblk */ 11181 (void) mi_set_sth_wroff(RD(q), hdrs_len + tcps->tcps_wroff_xtra); 11182 return (0); 11183 } 11184 11185 /* 11186 * Transfer any source route option from ipha to buf/dst in reversed form. 11187 */ 11188 static int 11189 tcp_opt_rev_src_route(ipha_t *ipha, char *buf, uchar_t *dst) 11190 { 11191 ipoptp_t opts; 11192 uchar_t *opt; 11193 uint8_t optval; 11194 uint8_t optlen; 11195 uint32_t len = 0; 11196 11197 for (optval = ipoptp_first(&opts, ipha); 11198 optval != IPOPT_EOL; 11199 optval = ipoptp_next(&opts)) { 11200 opt = opts.ipoptp_cur; 11201 optlen = opts.ipoptp_len; 11202 switch (optval) { 11203 int off1, off2; 11204 case IPOPT_SSRR: 11205 case IPOPT_LSRR: 11206 11207 /* Reverse source route */ 11208 /* 11209 * First entry should be the next to last one in the 11210 * current source route (the last entry is our 11211 * address.) 11212 * The last entry should be the final destination. 11213 */ 11214 buf[IPOPT_OPTVAL] = (uint8_t)optval; 11215 buf[IPOPT_OLEN] = (uint8_t)optlen; 11216 off1 = IPOPT_MINOFF_SR - 1; 11217 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1; 11218 if (off2 < 0) { 11219 /* No entries in source route */ 11220 break; 11221 } 11222 bcopy(opt + off2, dst, IP_ADDR_LEN); 11223 /* 11224 * Note: use src since ipha has not had its src 11225 * and dst reversed (it is in the state it was 11226 * received. 11227 */ 11228 bcopy(&ipha->ipha_src, buf + off2, 11229 IP_ADDR_LEN); 11230 off2 -= IP_ADDR_LEN; 11231 11232 while (off2 > 0) { 11233 bcopy(opt + off2, buf + off1, 11234 IP_ADDR_LEN); 11235 off1 += IP_ADDR_LEN; 11236 off2 -= IP_ADDR_LEN; 11237 } 11238 buf[IPOPT_OFFSET] = IPOPT_MINOFF_SR; 11239 buf += optlen; 11240 len += optlen; 11241 break; 11242 } 11243 } 11244 done: 11245 /* Pad the resulting options */ 11246 while (len & 0x3) { 11247 *buf++ = IPOPT_EOL; 11248 len++; 11249 } 11250 return (len); 11251 } 11252 11253 11254 /* 11255 * Extract and revert a source route from ipha (if any) 11256 * and then update the relevant fields in both tcp_t and the standard header. 11257 */ 11258 static void 11259 tcp_opt_reverse(tcp_t *tcp, ipha_t *ipha) 11260 { 11261 char buf[TCP_MAX_HDR_LENGTH]; 11262 uint_t tcph_len; 11263 int len; 11264 11265 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 11266 len = IPH_HDR_LENGTH(ipha); 11267 if (len == IP_SIMPLE_HDR_LENGTH) 11268 /* Nothing to do */ 11269 return; 11270 if (len > IP_SIMPLE_HDR_LENGTH + TCP_MAX_IP_OPTIONS_LENGTH || 11271 (len & 0x3)) 11272 return; 11273 11274 tcph_len = tcp->tcp_tcp_hdr_len; 11275 bcopy(tcp->tcp_tcph, buf, tcph_len); 11276 tcp->tcp_sum = (tcp->tcp_ipha->ipha_dst >> 16) + 11277 (tcp->tcp_ipha->ipha_dst & 0xffff); 11278 len = tcp_opt_rev_src_route(ipha, (char *)tcp->tcp_ipha + 11279 IP_SIMPLE_HDR_LENGTH, (uchar_t *)&tcp->tcp_ipha->ipha_dst); 11280 len += IP_SIMPLE_HDR_LENGTH; 11281 tcp->tcp_sum -= ((tcp->tcp_ipha->ipha_dst >> 16) + 11282 (tcp->tcp_ipha->ipha_dst & 0xffff)); 11283 if ((int)tcp->tcp_sum < 0) 11284 tcp->tcp_sum--; 11285 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 11286 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16)); 11287 tcp->tcp_tcph = (tcph_t *)((char *)tcp->tcp_ipha + len); 11288 bcopy(buf, tcp->tcp_tcph, tcph_len); 11289 tcp->tcp_ip_hdr_len = len; 11290 tcp->tcp_ipha->ipha_version_and_hdr_length = 11291 (IP_VERSION << 4) | (len >> 2); 11292 len += tcph_len; 11293 tcp->tcp_hdr_len = len; 11294 } 11295 11296 /* 11297 * Copy the standard header into its new location, 11298 * lay in the new options and then update the relevant 11299 * fields in both tcp_t and the standard header. 11300 */ 11301 static int 11302 tcp_opt_set_header(tcp_t *tcp, boolean_t checkonly, uchar_t *ptr, uint_t len) 11303 { 11304 uint_t tcph_len; 11305 uint8_t *ip_optp; 11306 tcph_t *new_tcph; 11307 tcp_stack_t *tcps = tcp->tcp_tcps; 11308 11309 if ((len > TCP_MAX_IP_OPTIONS_LENGTH) || (len & 0x3)) 11310 return (EINVAL); 11311 11312 if (len > IP_MAX_OPT_LENGTH - tcp->tcp_label_len) 11313 return (EINVAL); 11314 11315 if (checkonly) { 11316 /* 11317 * do not really set, just pretend to - T_CHECK 11318 */ 11319 return (0); 11320 } 11321 11322 ip_optp = (uint8_t *)tcp->tcp_ipha + IP_SIMPLE_HDR_LENGTH; 11323 if (tcp->tcp_label_len > 0) { 11324 int padlen; 11325 uint8_t opt; 11326 11327 /* convert list termination to no-ops */ 11328 padlen = tcp->tcp_label_len - ip_optp[IPOPT_OLEN]; 11329 ip_optp += ip_optp[IPOPT_OLEN]; 11330 opt = len > 0 ? IPOPT_NOP : IPOPT_EOL; 11331 while (--padlen >= 0) 11332 *ip_optp++ = opt; 11333 } 11334 tcph_len = tcp->tcp_tcp_hdr_len; 11335 new_tcph = (tcph_t *)(ip_optp + len); 11336 ovbcopy(tcp->tcp_tcph, new_tcph, tcph_len); 11337 tcp->tcp_tcph = new_tcph; 11338 bcopy(ptr, ip_optp, len); 11339 11340 len += IP_SIMPLE_HDR_LENGTH + tcp->tcp_label_len; 11341 11342 tcp->tcp_ip_hdr_len = len; 11343 tcp->tcp_ipha->ipha_version_and_hdr_length = 11344 (IP_VERSION << 4) | (len >> 2); 11345 tcp->tcp_hdr_len = len + tcph_len; 11346 if (!TCP_IS_DETACHED(tcp)) { 11347 /* Always allocate room for all options. */ 11348 (void) mi_set_sth_wroff(tcp->tcp_rq, 11349 TCP_MAX_COMBINED_HEADER_LENGTH + tcps->tcps_wroff_xtra); 11350 } 11351 return (0); 11352 } 11353 11354 /* Get callback routine passed to nd_load by tcp_param_register */ 11355 /* ARGSUSED */ 11356 static int 11357 tcp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 11358 { 11359 tcpparam_t *tcppa = (tcpparam_t *)cp; 11360 11361 (void) mi_mpprintf(mp, "%u", tcppa->tcp_param_val); 11362 return (0); 11363 } 11364 11365 /* 11366 * Walk through the param array specified registering each element with the 11367 * named dispatch handler. 11368 */ 11369 static boolean_t 11370 tcp_param_register(IDP *ndp, tcpparam_t *tcppa, int cnt, tcp_stack_t *tcps) 11371 { 11372 for (; cnt-- > 0; tcppa++) { 11373 if (tcppa->tcp_param_name && tcppa->tcp_param_name[0]) { 11374 if (!nd_load(ndp, tcppa->tcp_param_name, 11375 tcp_param_get, tcp_param_set, 11376 (caddr_t)tcppa)) { 11377 nd_free(ndp); 11378 return (B_FALSE); 11379 } 11380 } 11381 } 11382 tcps->tcps_wroff_xtra_param = kmem_zalloc(sizeof (tcpparam_t), 11383 KM_SLEEP); 11384 bcopy(&lcl_tcp_wroff_xtra_param, tcps->tcps_wroff_xtra_param, 11385 sizeof (tcpparam_t)); 11386 if (!nd_load(ndp, tcps->tcps_wroff_xtra_param->tcp_param_name, 11387 tcp_param_get, tcp_param_set_aligned, 11388 (caddr_t)tcps->tcps_wroff_xtra_param)) { 11389 nd_free(ndp); 11390 return (B_FALSE); 11391 } 11392 tcps->tcps_mdt_head_param = kmem_zalloc(sizeof (tcpparam_t), 11393 KM_SLEEP); 11394 bcopy(&lcl_tcp_mdt_head_param, tcps->tcps_mdt_head_param, 11395 sizeof (tcpparam_t)); 11396 if (!nd_load(ndp, tcps->tcps_mdt_head_param->tcp_param_name, 11397 tcp_param_get, tcp_param_set_aligned, 11398 (caddr_t)tcps->tcps_mdt_head_param)) { 11399 nd_free(ndp); 11400 return (B_FALSE); 11401 } 11402 tcps->tcps_mdt_tail_param = kmem_zalloc(sizeof (tcpparam_t), 11403 KM_SLEEP); 11404 bcopy(&lcl_tcp_mdt_tail_param, tcps->tcps_mdt_tail_param, 11405 sizeof (tcpparam_t)); 11406 if (!nd_load(ndp, tcps->tcps_mdt_tail_param->tcp_param_name, 11407 tcp_param_get, tcp_param_set_aligned, 11408 (caddr_t)tcps->tcps_mdt_tail_param)) { 11409 nd_free(ndp); 11410 return (B_FALSE); 11411 } 11412 tcps->tcps_mdt_max_pbufs_param = kmem_zalloc(sizeof (tcpparam_t), 11413 KM_SLEEP); 11414 bcopy(&lcl_tcp_mdt_max_pbufs_param, tcps->tcps_mdt_max_pbufs_param, 11415 sizeof (tcpparam_t)); 11416 if (!nd_load(ndp, tcps->tcps_mdt_max_pbufs_param->tcp_param_name, 11417 tcp_param_get, tcp_param_set_aligned, 11418 (caddr_t)tcps->tcps_mdt_max_pbufs_param)) { 11419 nd_free(ndp); 11420 return (B_FALSE); 11421 } 11422 if (!nd_load(ndp, "tcp_extra_priv_ports", 11423 tcp_extra_priv_ports_get, NULL, NULL)) { 11424 nd_free(ndp); 11425 return (B_FALSE); 11426 } 11427 if (!nd_load(ndp, "tcp_extra_priv_ports_add", 11428 NULL, tcp_extra_priv_ports_add, NULL)) { 11429 nd_free(ndp); 11430 return (B_FALSE); 11431 } 11432 if (!nd_load(ndp, "tcp_extra_priv_ports_del", 11433 NULL, tcp_extra_priv_ports_del, NULL)) { 11434 nd_free(ndp); 11435 return (B_FALSE); 11436 } 11437 if (!nd_load(ndp, "tcp_status", tcp_status_report, NULL, 11438 NULL)) { 11439 nd_free(ndp); 11440 return (B_FALSE); 11441 } 11442 if (!nd_load(ndp, "tcp_bind_hash", tcp_bind_hash_report, 11443 NULL, NULL)) { 11444 nd_free(ndp); 11445 return (B_FALSE); 11446 } 11447 if (!nd_load(ndp, "tcp_listen_hash", 11448 tcp_listen_hash_report, NULL, NULL)) { 11449 nd_free(ndp); 11450 return (B_FALSE); 11451 } 11452 if (!nd_load(ndp, "tcp_conn_hash", tcp_conn_hash_report, 11453 NULL, NULL)) { 11454 nd_free(ndp); 11455 return (B_FALSE); 11456 } 11457 if (!nd_load(ndp, "tcp_acceptor_hash", 11458 tcp_acceptor_hash_report, NULL, NULL)) { 11459 nd_free(ndp); 11460 return (B_FALSE); 11461 } 11462 if (!nd_load(ndp, "tcp_host_param", tcp_host_param_report, 11463 tcp_host_param_set, NULL)) { 11464 nd_free(ndp); 11465 return (B_FALSE); 11466 } 11467 if (!nd_load(ndp, "tcp_host_param_ipv6", 11468 tcp_host_param_report, tcp_host_param_set_ipv6, NULL)) { 11469 nd_free(ndp); 11470 return (B_FALSE); 11471 } 11472 if (!nd_load(ndp, "tcp_1948_phrase", NULL, 11473 tcp_1948_phrase_set, NULL)) { 11474 nd_free(ndp); 11475 return (B_FALSE); 11476 } 11477 if (!nd_load(ndp, "tcp_reserved_port_list", 11478 tcp_reserved_port_list, NULL, NULL)) { 11479 nd_free(ndp); 11480 return (B_FALSE); 11481 } 11482 /* 11483 * Dummy ndd variables - only to convey obsolescence information 11484 * through printing of their name (no get or set routines) 11485 * XXX Remove in future releases ? 11486 */ 11487 if (!nd_load(ndp, 11488 "tcp_close_wait_interval(obsoleted - " 11489 "use tcp_time_wait_interval)", NULL, NULL, NULL)) { 11490 nd_free(ndp); 11491 return (B_FALSE); 11492 } 11493 return (B_TRUE); 11494 } 11495 11496 /* ndd set routine for tcp_wroff_xtra, tcp_mdt_hdr_{head,tail}_min. */ 11497 /* ARGSUSED */ 11498 static int 11499 tcp_param_set_aligned(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 11500 cred_t *cr) 11501 { 11502 long new_value; 11503 tcpparam_t *tcppa = (tcpparam_t *)cp; 11504 11505 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 11506 new_value < tcppa->tcp_param_min || 11507 new_value > tcppa->tcp_param_max) { 11508 return (EINVAL); 11509 } 11510 /* 11511 * Need to make sure new_value is a multiple of 4. If it is not, 11512 * round it up. For future 64 bit requirement, we actually make it 11513 * a multiple of 8. 11514 */ 11515 if (new_value & 0x7) { 11516 new_value = (new_value & ~0x7) + 0x8; 11517 } 11518 tcppa->tcp_param_val = new_value; 11519 return (0); 11520 } 11521 11522 /* Set callback routine passed to nd_load by tcp_param_register */ 11523 /* ARGSUSED */ 11524 static int 11525 tcp_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr) 11526 { 11527 long new_value; 11528 tcpparam_t *tcppa = (tcpparam_t *)cp; 11529 11530 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 11531 new_value < tcppa->tcp_param_min || 11532 new_value > tcppa->tcp_param_max) { 11533 return (EINVAL); 11534 } 11535 tcppa->tcp_param_val = new_value; 11536 return (0); 11537 } 11538 11539 /* 11540 * Add a new piece to the tcp reassembly queue. If the gap at the beginning 11541 * is filled, return as much as we can. The message passed in may be 11542 * multi-part, chained using b_cont. "start" is the starting sequence 11543 * number for this piece. 11544 */ 11545 static mblk_t * 11546 tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start) 11547 { 11548 uint32_t end; 11549 mblk_t *mp1; 11550 mblk_t *mp2; 11551 mblk_t *next_mp; 11552 uint32_t u1; 11553 tcp_stack_t *tcps = tcp->tcp_tcps; 11554 11555 /* Walk through all the new pieces. */ 11556 do { 11557 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 11558 (uintptr_t)INT_MAX); 11559 end = start + (int)(mp->b_wptr - mp->b_rptr); 11560 next_mp = mp->b_cont; 11561 if (start == end) { 11562 /* Empty. Blast it. */ 11563 freeb(mp); 11564 continue; 11565 } 11566 mp->b_cont = NULL; 11567 TCP_REASS_SET_SEQ(mp, start); 11568 TCP_REASS_SET_END(mp, end); 11569 mp1 = tcp->tcp_reass_tail; 11570 if (!mp1) { 11571 tcp->tcp_reass_tail = mp; 11572 tcp->tcp_reass_head = mp; 11573 BUMP_MIB(&tcps->tcps_mib, tcpInDataUnorderSegs); 11574 UPDATE_MIB(&tcps->tcps_mib, 11575 tcpInDataUnorderBytes, end - start); 11576 continue; 11577 } 11578 /* New stuff completely beyond tail? */ 11579 if (SEQ_GEQ(start, TCP_REASS_END(mp1))) { 11580 /* Link it on end. */ 11581 mp1->b_cont = mp; 11582 tcp->tcp_reass_tail = mp; 11583 BUMP_MIB(&tcps->tcps_mib, tcpInDataUnorderSegs); 11584 UPDATE_MIB(&tcps->tcps_mib, 11585 tcpInDataUnorderBytes, end - start); 11586 continue; 11587 } 11588 mp1 = tcp->tcp_reass_head; 11589 u1 = TCP_REASS_SEQ(mp1); 11590 /* New stuff at the front? */ 11591 if (SEQ_LT(start, u1)) { 11592 /* Yes... Check for overlap. */ 11593 mp->b_cont = mp1; 11594 tcp->tcp_reass_head = mp; 11595 tcp_reass_elim_overlap(tcp, mp); 11596 continue; 11597 } 11598 /* 11599 * The new piece fits somewhere between the head and tail. 11600 * We find our slot, where mp1 precedes us and mp2 trails. 11601 */ 11602 for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) { 11603 u1 = TCP_REASS_SEQ(mp2); 11604 if (SEQ_LEQ(start, u1)) 11605 break; 11606 } 11607 /* Link ourselves in */ 11608 mp->b_cont = mp2; 11609 mp1->b_cont = mp; 11610 11611 /* Trim overlap with following mblk(s) first */ 11612 tcp_reass_elim_overlap(tcp, mp); 11613 11614 /* Trim overlap with preceding mblk */ 11615 tcp_reass_elim_overlap(tcp, mp1); 11616 11617 } while (start = end, mp = next_mp); 11618 mp1 = tcp->tcp_reass_head; 11619 /* Anything ready to go? */ 11620 if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt) 11621 return (NULL); 11622 /* Eat what we can off the queue */ 11623 for (;;) { 11624 mp = mp1->b_cont; 11625 end = TCP_REASS_END(mp1); 11626 TCP_REASS_SET_SEQ(mp1, 0); 11627 TCP_REASS_SET_END(mp1, 0); 11628 if (!mp) { 11629 tcp->tcp_reass_tail = NULL; 11630 break; 11631 } 11632 if (end != TCP_REASS_SEQ(mp)) { 11633 mp1->b_cont = NULL; 11634 break; 11635 } 11636 mp1 = mp; 11637 } 11638 mp1 = tcp->tcp_reass_head; 11639 tcp->tcp_reass_head = mp; 11640 return (mp1); 11641 } 11642 11643 /* Eliminate any overlap that mp may have over later mblks */ 11644 static void 11645 tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp) 11646 { 11647 uint32_t end; 11648 mblk_t *mp1; 11649 uint32_t u1; 11650 tcp_stack_t *tcps = tcp->tcp_tcps; 11651 11652 end = TCP_REASS_END(mp); 11653 while ((mp1 = mp->b_cont) != NULL) { 11654 u1 = TCP_REASS_SEQ(mp1); 11655 if (!SEQ_GT(end, u1)) 11656 break; 11657 if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) { 11658 mp->b_wptr -= end - u1; 11659 TCP_REASS_SET_END(mp, u1); 11660 BUMP_MIB(&tcps->tcps_mib, tcpInDataPartDupSegs); 11661 UPDATE_MIB(&tcps->tcps_mib, 11662 tcpInDataPartDupBytes, end - u1); 11663 break; 11664 } 11665 mp->b_cont = mp1->b_cont; 11666 TCP_REASS_SET_SEQ(mp1, 0); 11667 TCP_REASS_SET_END(mp1, 0); 11668 freeb(mp1); 11669 BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs); 11670 UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes, end - u1); 11671 } 11672 if (!mp1) 11673 tcp->tcp_reass_tail = mp; 11674 } 11675 11676 /* 11677 * Send up all messages queued on tcp_rcv_list. 11678 */ 11679 static uint_t 11680 tcp_rcv_drain(queue_t *q, tcp_t *tcp) 11681 { 11682 mblk_t *mp; 11683 uint_t ret = 0; 11684 uint_t thwin; 11685 #ifdef DEBUG 11686 uint_t cnt = 0; 11687 #endif 11688 tcp_stack_t *tcps = tcp->tcp_tcps; 11689 11690 /* Can't drain on an eager connection */ 11691 if (tcp->tcp_listener != NULL) 11692 return (ret); 11693 11694 /* 11695 * Handle two cases here: we are currently fused or we were 11696 * previously fused and have some urgent data to be delivered 11697 * upstream. The latter happens because we either ran out of 11698 * memory or were detached and therefore sending the SIGURG was 11699 * deferred until this point. In either case we pass control 11700 * over to tcp_fuse_rcv_drain() since it may need to complete 11701 * some work. 11702 */ 11703 if ((tcp->tcp_fused || tcp->tcp_fused_sigurg)) { 11704 ASSERT(tcp->tcp_fused_sigurg_mp != NULL); 11705 if (tcp_fuse_rcv_drain(q, tcp, tcp->tcp_fused ? NULL : 11706 &tcp->tcp_fused_sigurg_mp)) 11707 return (ret); 11708 } 11709 11710 while ((mp = tcp->tcp_rcv_list) != NULL) { 11711 tcp->tcp_rcv_list = mp->b_next; 11712 mp->b_next = NULL; 11713 #ifdef DEBUG 11714 cnt += msgdsize(mp); 11715 #endif 11716 /* Does this need SSL processing first? */ 11717 if ((tcp->tcp_kssl_ctx != NULL) && (DB_TYPE(mp) == M_DATA)) { 11718 DTRACE_PROBE1(kssl_mblk__ksslinput_rcvdrain, 11719 mblk_t *, mp); 11720 tcp_kssl_input(tcp, mp); 11721 continue; 11722 } 11723 putnext(q, mp); 11724 } 11725 ASSERT(cnt == tcp->tcp_rcv_cnt); 11726 tcp->tcp_rcv_last_head = NULL; 11727 tcp->tcp_rcv_last_tail = NULL; 11728 tcp->tcp_rcv_cnt = 0; 11729 11730 /* Learn the latest rwnd information that we sent to the other side. */ 11731 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 11732 << tcp->tcp_rcv_ws; 11733 /* This is peer's calculated send window (our receive window). */ 11734 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 11735 /* 11736 * Increase the receive window to max. But we need to do receiver 11737 * SWS avoidance. This means that we need to check the increase of 11738 * of receive window is at least 1 MSS. 11739 */ 11740 if (canputnext(q) && (q->q_hiwat - thwin >= tcp->tcp_mss)) { 11741 /* 11742 * If the window that the other side knows is less than max 11743 * deferred acks segments, send an update immediately. 11744 */ 11745 if (thwin < tcp->tcp_rack_cur_max * tcp->tcp_mss) { 11746 BUMP_MIB(&tcps->tcps_mib, tcpOutWinUpdate); 11747 ret = TH_ACK_NEEDED; 11748 } 11749 tcp->tcp_rwnd = q->q_hiwat; 11750 } 11751 /* No need for the push timer now. */ 11752 if (tcp->tcp_push_tid != 0) { 11753 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 11754 tcp->tcp_push_tid = 0; 11755 } 11756 return (ret); 11757 } 11758 11759 /* 11760 * Queue data on tcp_rcv_list which is a b_next chain. 11761 * tcp_rcv_last_head/tail is the last element of this chain. 11762 * Each element of the chain is a b_cont chain. 11763 * 11764 * M_DATA messages are added to the current element. 11765 * Other messages are added as new (b_next) elements. 11766 */ 11767 void 11768 tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len) 11769 { 11770 ASSERT(seg_len == msgdsize(mp)); 11771 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_rcv_last_head != NULL); 11772 11773 if (tcp->tcp_rcv_list == NULL) { 11774 ASSERT(tcp->tcp_rcv_last_head == NULL); 11775 tcp->tcp_rcv_list = mp; 11776 tcp->tcp_rcv_last_head = mp; 11777 } else if (DB_TYPE(mp) == DB_TYPE(tcp->tcp_rcv_last_head)) { 11778 tcp->tcp_rcv_last_tail->b_cont = mp; 11779 } else { 11780 tcp->tcp_rcv_last_head->b_next = mp; 11781 tcp->tcp_rcv_last_head = mp; 11782 } 11783 11784 while (mp->b_cont) 11785 mp = mp->b_cont; 11786 11787 tcp->tcp_rcv_last_tail = mp; 11788 tcp->tcp_rcv_cnt += seg_len; 11789 tcp->tcp_rwnd -= seg_len; 11790 } 11791 11792 /* 11793 * DEFAULT TCP ENTRY POINT via squeue on READ side. 11794 * 11795 * This is the default entry function into TCP on the read side. TCP is 11796 * always entered via squeue i.e. using squeue's for mutual exclusion. 11797 * When classifier does a lookup to find the tcp, it also puts a reference 11798 * on the conn structure associated so the tcp is guaranteed to exist 11799 * when we come here. We still need to check the state because it might 11800 * as well has been closed. The squeue processing function i.e. squeue_enter, 11801 * squeue_enter_nodrain, or squeue_drain is responsible for doing the 11802 * CONN_DEC_REF. 11803 * 11804 * Apart from the default entry point, IP also sends packets directly to 11805 * tcp_rput_data for AF_INET fast path and tcp_conn_request for incoming 11806 * connections. 11807 */ 11808 void 11809 tcp_input(void *arg, mblk_t *mp, void *arg2) 11810 { 11811 conn_t *connp = (conn_t *)arg; 11812 tcp_t *tcp = (tcp_t *)connp->conn_tcp; 11813 11814 /* arg2 is the sqp */ 11815 ASSERT(arg2 != NULL); 11816 ASSERT(mp != NULL); 11817 11818 /* 11819 * Don't accept any input on a closed tcp as this TCP logically does 11820 * not exist on the system. Don't proceed further with this TCP. 11821 * For eg. this packet could trigger another close of this tcp 11822 * which would be disastrous for tcp_refcnt. tcp_close_detached / 11823 * tcp_clean_death / tcp_closei_local must be called at most once 11824 * on a TCP. In this case we need to refeed the packet into the 11825 * classifier and figure out where the packet should go. Need to 11826 * preserve the recv_ill somehow. Until we figure that out, for 11827 * now just drop the packet if we can't classify the packet. 11828 */ 11829 if (tcp->tcp_state == TCPS_CLOSED || 11830 tcp->tcp_state == TCPS_BOUND) { 11831 conn_t *new_connp; 11832 ip_stack_t *ipst = tcp->tcp_tcps->tcps_netstack->netstack_ip; 11833 11834 new_connp = ipcl_classify(mp, connp->conn_zoneid, ipst); 11835 if (new_connp != NULL) { 11836 tcp_reinput(new_connp, mp, arg2); 11837 return; 11838 } 11839 /* We failed to classify. For now just drop the packet */ 11840 freemsg(mp); 11841 return; 11842 } 11843 11844 if (DB_TYPE(mp) == M_DATA) 11845 tcp_rput_data(connp, mp, arg2); 11846 else 11847 tcp_rput_common(tcp, mp); 11848 } 11849 11850 /* 11851 * The read side put procedure. 11852 * The packets passed up by ip are assume to be aligned according to 11853 * OK_32PTR and the IP+TCP headers fitting in the first mblk. 11854 */ 11855 static void 11856 tcp_rput_common(tcp_t *tcp, mblk_t *mp) 11857 { 11858 /* 11859 * tcp_rput_data() does not expect M_CTL except for the case 11860 * where tcp_ipv6_recvancillary is set and we get a IN_PKTINFO 11861 * type. Need to make sure that any other M_CTLs don't make 11862 * it to tcp_rput_data since it is not expecting any and doesn't 11863 * check for it. 11864 */ 11865 if (DB_TYPE(mp) == M_CTL) { 11866 switch (*(uint32_t *)(mp->b_rptr)) { 11867 case TCP_IOC_ABORT_CONN: 11868 /* 11869 * Handle connection abort request. 11870 */ 11871 tcp_ioctl_abort_handler(tcp, mp); 11872 return; 11873 case IPSEC_IN: 11874 /* 11875 * Only secure icmp arrive in TCP and they 11876 * don't go through data path. 11877 */ 11878 tcp_icmp_error(tcp, mp); 11879 return; 11880 case IN_PKTINFO: 11881 /* 11882 * Handle IPV6_RECVPKTINFO socket option on AF_INET6 11883 * sockets that are receiving IPv4 traffic. tcp 11884 */ 11885 ASSERT(tcp->tcp_family == AF_INET6); 11886 ASSERT(tcp->tcp_ipv6_recvancillary & 11887 TCP_IPV6_RECVPKTINFO); 11888 tcp_rput_data(tcp->tcp_connp, mp, 11889 tcp->tcp_connp->conn_sqp); 11890 return; 11891 case MDT_IOC_INFO_UPDATE: 11892 /* 11893 * Handle Multidata information update; the 11894 * following routine will free the message. 11895 */ 11896 if (tcp->tcp_connp->conn_mdt_ok) { 11897 tcp_mdt_update(tcp, 11898 &((ip_mdt_info_t *)mp->b_rptr)->mdt_capab, 11899 B_FALSE); 11900 } 11901 freemsg(mp); 11902 return; 11903 case LSO_IOC_INFO_UPDATE: 11904 /* 11905 * Handle LSO information update; the following 11906 * routine will free the message. 11907 */ 11908 if (tcp->tcp_connp->conn_lso_ok) { 11909 tcp_lso_update(tcp, 11910 &((ip_lso_info_t *)mp->b_rptr)->lso_capab); 11911 } 11912 freemsg(mp); 11913 return; 11914 default: 11915 /* 11916 * tcp_icmp_err() will process the M_CTL packets. 11917 * Non-ICMP packets, if any, will be discarded in 11918 * tcp_icmp_err(). We will process the ICMP packet 11919 * even if we are TCP_IS_DETACHED_NONEAGER as the 11920 * incoming ICMP packet may result in changing 11921 * the tcp_mss, which we would need if we have 11922 * packets to retransmit. 11923 */ 11924 tcp_icmp_error(tcp, mp); 11925 return; 11926 } 11927 } 11928 11929 /* No point processing the message if tcp is already closed */ 11930 if (TCP_IS_DETACHED_NONEAGER(tcp)) { 11931 freemsg(mp); 11932 return; 11933 } 11934 11935 tcp_rput_other(tcp, mp); 11936 } 11937 11938 11939 /* The minimum of smoothed mean deviation in RTO calculation. */ 11940 #define TCP_SD_MIN 400 11941 11942 /* 11943 * Set RTO for this connection. The formula is from Jacobson and Karels' 11944 * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names 11945 * are the same as those in Appendix A.2 of that paper. 11946 * 11947 * m = new measurement 11948 * sa = smoothed RTT average (8 * average estimates). 11949 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates). 11950 */ 11951 static void 11952 tcp_set_rto(tcp_t *tcp, clock_t rtt) 11953 { 11954 long m = TICK_TO_MSEC(rtt); 11955 clock_t sa = tcp->tcp_rtt_sa; 11956 clock_t sv = tcp->tcp_rtt_sd; 11957 clock_t rto; 11958 tcp_stack_t *tcps = tcp->tcp_tcps; 11959 11960 BUMP_MIB(&tcps->tcps_mib, tcpRttUpdate); 11961 tcp->tcp_rtt_update++; 11962 11963 /* tcp_rtt_sa is not 0 means this is a new sample. */ 11964 if (sa != 0) { 11965 /* 11966 * Update average estimator: 11967 * new rtt = 7/8 old rtt + 1/8 Error 11968 */ 11969 11970 /* m is now Error in estimate. */ 11971 m -= sa >> 3; 11972 if ((sa += m) <= 0) { 11973 /* 11974 * Don't allow the smoothed average to be negative. 11975 * We use 0 to denote reinitialization of the 11976 * variables. 11977 */ 11978 sa = 1; 11979 } 11980 11981 /* 11982 * Update deviation estimator: 11983 * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev) 11984 */ 11985 if (m < 0) 11986 m = -m; 11987 m -= sv >> 2; 11988 sv += m; 11989 } else { 11990 /* 11991 * This follows BSD's implementation. So the reinitialized 11992 * RTO is 3 * m. We cannot go less than 2 because if the 11993 * link is bandwidth dominated, doubling the window size 11994 * during slow start means doubling the RTT. We want to be 11995 * more conservative when we reinitialize our estimates. 3 11996 * is just a convenient number. 11997 */ 11998 sa = m << 3; 11999 sv = m << 1; 12000 } 12001 if (sv < TCP_SD_MIN) { 12002 /* 12003 * We do not know that if sa captures the delay ACK 12004 * effect as in a long train of segments, a receiver 12005 * does not delay its ACKs. So set the minimum of sv 12006 * to be TCP_SD_MIN, which is default to 400 ms, twice 12007 * of BSD DATO. That means the minimum of mean 12008 * deviation is 100 ms. 12009 * 12010 */ 12011 sv = TCP_SD_MIN; 12012 } 12013 tcp->tcp_rtt_sa = sa; 12014 tcp->tcp_rtt_sd = sv; 12015 /* 12016 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv) 12017 * 12018 * Add tcp_rexmit_interval extra in case of extreme environment 12019 * where the algorithm fails to work. The default value of 12020 * tcp_rexmit_interval_extra should be 0. 12021 * 12022 * As we use a finer grained clock than BSD and update 12023 * RTO for every ACKs, add in another .25 of RTT to the 12024 * deviation of RTO to accomodate burstiness of 1/4 of 12025 * window size. 12026 */ 12027 rto = (sa >> 3) + sv + tcps->tcps_rexmit_interval_extra + (sa >> 5); 12028 12029 if (rto > tcps->tcps_rexmit_interval_max) { 12030 tcp->tcp_rto = tcps->tcps_rexmit_interval_max; 12031 } else if (rto < tcps->tcps_rexmit_interval_min) { 12032 tcp->tcp_rto = tcps->tcps_rexmit_interval_min; 12033 } else { 12034 tcp->tcp_rto = rto; 12035 } 12036 12037 /* Now, we can reset tcp_timer_backoff to use the new RTO... */ 12038 tcp->tcp_timer_backoff = 0; 12039 } 12040 12041 /* 12042 * tcp_get_seg_mp() is called to get the pointer to a segment in the 12043 * send queue which starts at the given seq. no. 12044 * 12045 * Parameters: 12046 * tcp_t *tcp: the tcp instance pointer. 12047 * uint32_t seq: the starting seq. no of the requested segment. 12048 * int32_t *off: after the execution, *off will be the offset to 12049 * the returned mblk which points to the requested seq no. 12050 * It is the caller's responsibility to send in a non-null off. 12051 * 12052 * Return: 12053 * A mblk_t pointer pointing to the requested segment in send queue. 12054 */ 12055 static mblk_t * 12056 tcp_get_seg_mp(tcp_t *tcp, uint32_t seq, int32_t *off) 12057 { 12058 int32_t cnt; 12059 mblk_t *mp; 12060 12061 /* Defensive coding. Make sure we don't send incorrect data. */ 12062 if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt)) 12063 return (NULL); 12064 12065 cnt = seq - tcp->tcp_suna; 12066 mp = tcp->tcp_xmit_head; 12067 while (cnt > 0 && mp != NULL) { 12068 cnt -= mp->b_wptr - mp->b_rptr; 12069 if (cnt < 0) { 12070 cnt += mp->b_wptr - mp->b_rptr; 12071 break; 12072 } 12073 mp = mp->b_cont; 12074 } 12075 ASSERT(mp != NULL); 12076 *off = cnt; 12077 return (mp); 12078 } 12079 12080 /* 12081 * This function handles all retransmissions if SACK is enabled for this 12082 * connection. First it calculates how many segments can be retransmitted 12083 * based on tcp_pipe. Then it goes thru the notsack list to find eligible 12084 * segments. A segment is eligible if sack_cnt for that segment is greater 12085 * than or equal tcp_dupack_fast_retransmit. After it has retransmitted 12086 * all eligible segments, it checks to see if TCP can send some new segments 12087 * (fast recovery). If it can, set the appropriate flag for tcp_rput_data(). 12088 * 12089 * Parameters: 12090 * tcp_t *tcp: the tcp structure of the connection. 12091 * uint_t *flags: in return, appropriate value will be set for 12092 * tcp_rput_data(). 12093 */ 12094 static void 12095 tcp_sack_rxmit(tcp_t *tcp, uint_t *flags) 12096 { 12097 notsack_blk_t *notsack_blk; 12098 int32_t usable_swnd; 12099 int32_t mss; 12100 uint32_t seg_len; 12101 mblk_t *xmit_mp; 12102 tcp_stack_t *tcps = tcp->tcp_tcps; 12103 12104 ASSERT(tcp->tcp_sack_info != NULL); 12105 ASSERT(tcp->tcp_notsack_list != NULL); 12106 ASSERT(tcp->tcp_rexmit == B_FALSE); 12107 12108 /* Defensive coding in case there is a bug... */ 12109 if (tcp->tcp_notsack_list == NULL) { 12110 return; 12111 } 12112 notsack_blk = tcp->tcp_notsack_list; 12113 mss = tcp->tcp_mss; 12114 12115 /* 12116 * Limit the num of outstanding data in the network to be 12117 * tcp_cwnd_ssthresh, which is half of the original congestion wnd. 12118 */ 12119 usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe; 12120 12121 /* At least retransmit 1 MSS of data. */ 12122 if (usable_swnd <= 0) { 12123 usable_swnd = mss; 12124 } 12125 12126 /* Make sure no new RTT samples will be taken. */ 12127 tcp->tcp_csuna = tcp->tcp_snxt; 12128 12129 notsack_blk = tcp->tcp_notsack_list; 12130 while (usable_swnd > 0) { 12131 mblk_t *snxt_mp, *tmp_mp; 12132 tcp_seq begin = tcp->tcp_sack_snxt; 12133 tcp_seq end; 12134 int32_t off; 12135 12136 for (; notsack_blk != NULL; notsack_blk = notsack_blk->next) { 12137 if (SEQ_GT(notsack_blk->end, begin) && 12138 (notsack_blk->sack_cnt >= 12139 tcps->tcps_dupack_fast_retransmit)) { 12140 end = notsack_blk->end; 12141 if (SEQ_LT(begin, notsack_blk->begin)) { 12142 begin = notsack_blk->begin; 12143 } 12144 break; 12145 } 12146 } 12147 /* 12148 * All holes are filled. Manipulate tcp_cwnd to send more 12149 * if we can. Note that after the SACK recovery, tcp_cwnd is 12150 * set to tcp_cwnd_ssthresh. 12151 */ 12152 if (notsack_blk == NULL) { 12153 usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe; 12154 if (usable_swnd <= 0 || tcp->tcp_unsent == 0) { 12155 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna; 12156 ASSERT(tcp->tcp_cwnd > 0); 12157 return; 12158 } else { 12159 usable_swnd = usable_swnd / mss; 12160 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna + 12161 MAX(usable_swnd * mss, mss); 12162 *flags |= TH_XMIT_NEEDED; 12163 return; 12164 } 12165 } 12166 12167 /* 12168 * Note that we may send more than usable_swnd allows here 12169 * because of round off, but no more than 1 MSS of data. 12170 */ 12171 seg_len = end - begin; 12172 if (seg_len > mss) 12173 seg_len = mss; 12174 snxt_mp = tcp_get_seg_mp(tcp, begin, &off); 12175 ASSERT(snxt_mp != NULL); 12176 /* This should not happen. Defensive coding again... */ 12177 if (snxt_mp == NULL) { 12178 return; 12179 } 12180 12181 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, seg_len, &off, 12182 &tmp_mp, begin, B_TRUE, &seg_len, B_TRUE); 12183 if (xmit_mp == NULL) 12184 return; 12185 12186 usable_swnd -= seg_len; 12187 tcp->tcp_pipe += seg_len; 12188 tcp->tcp_sack_snxt = begin + seg_len; 12189 TCP_RECORD_TRACE(tcp, xmit_mp, TCP_TRACE_SEND_PKT); 12190 tcp_send_data(tcp, tcp->tcp_wq, xmit_mp); 12191 12192 /* 12193 * Update the send timestamp to avoid false retransmission. 12194 */ 12195 snxt_mp->b_prev = (mblk_t *)lbolt; 12196 12197 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 12198 UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, seg_len); 12199 BUMP_MIB(&tcps->tcps_mib, tcpOutSackRetransSegs); 12200 /* 12201 * Update tcp_rexmit_max to extend this SACK recovery phase. 12202 * This happens when new data sent during fast recovery is 12203 * also lost. If TCP retransmits those new data, it needs 12204 * to extend SACK recover phase to avoid starting another 12205 * fast retransmit/recovery unnecessarily. 12206 */ 12207 if (SEQ_GT(tcp->tcp_sack_snxt, tcp->tcp_rexmit_max)) { 12208 tcp->tcp_rexmit_max = tcp->tcp_sack_snxt; 12209 } 12210 } 12211 } 12212 12213 /* 12214 * This function handles policy checking at TCP level for non-hard_bound/ 12215 * detached connections. 12216 */ 12217 static boolean_t 12218 tcp_check_policy(tcp_t *tcp, mblk_t *first_mp, ipha_t *ipha, ip6_t *ip6h, 12219 boolean_t secure, boolean_t mctl_present) 12220 { 12221 ipsec_latch_t *ipl = NULL; 12222 ipsec_action_t *act = NULL; 12223 mblk_t *data_mp; 12224 ipsec_in_t *ii; 12225 const char *reason; 12226 kstat_named_t *counter; 12227 tcp_stack_t *tcps = tcp->tcp_tcps; 12228 ipsec_stack_t *ipss; 12229 ip_stack_t *ipst; 12230 12231 ASSERT(mctl_present || !secure); 12232 12233 ASSERT((ipha == NULL && ip6h != NULL) || 12234 (ip6h == NULL && ipha != NULL)); 12235 12236 /* 12237 * We don't necessarily have an ipsec_in_act action to verify 12238 * policy because of assymetrical policy where we have only 12239 * outbound policy and no inbound policy (possible with global 12240 * policy). 12241 */ 12242 if (!secure) { 12243 if (act == NULL || act->ipa_act.ipa_type == IPSEC_ACT_BYPASS || 12244 act->ipa_act.ipa_type == IPSEC_ACT_CLEAR) 12245 return (B_TRUE); 12246 ipsec_log_policy_failure(IPSEC_POLICY_MISMATCH, 12247 "tcp_check_policy", ipha, ip6h, secure, 12248 tcps->tcps_netstack); 12249 ipss = tcps->tcps_netstack->netstack_ipsec; 12250 12251 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, 12252 DROPPER(ipss, ipds_tcp_clear), 12253 &tcps->tcps_dropper); 12254 return (B_FALSE); 12255 } 12256 12257 /* 12258 * We have a secure packet. 12259 */ 12260 if (act == NULL) { 12261 ipsec_log_policy_failure(IPSEC_POLICY_NOT_NEEDED, 12262 "tcp_check_policy", ipha, ip6h, secure, 12263 tcps->tcps_netstack); 12264 ipss = tcps->tcps_netstack->netstack_ipsec; 12265 12266 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, 12267 DROPPER(ipss, ipds_tcp_secure), 12268 &tcps->tcps_dropper); 12269 return (B_FALSE); 12270 } 12271 12272 /* 12273 * XXX This whole routine is currently incorrect. ipl should 12274 * be set to the latch pointer, but is currently not set, so 12275 * we initialize it to NULL to avoid picking up random garbage. 12276 */ 12277 if (ipl == NULL) 12278 return (B_TRUE); 12279 12280 data_mp = first_mp->b_cont; 12281 12282 ii = (ipsec_in_t *)first_mp->b_rptr; 12283 12284 ipst = tcps->tcps_netstack->netstack_ip; 12285 12286 if (ipsec_check_ipsecin_latch(ii, data_mp, ipl, ipha, ip6h, &reason, 12287 &counter, tcp->tcp_connp)) { 12288 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded); 12289 return (B_TRUE); 12290 } 12291 (void) strlog(TCP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE, 12292 "tcp inbound policy mismatch: %s, packet dropped\n", 12293 reason); 12294 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed); 12295 12296 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, counter, 12297 &tcps->tcps_dropper); 12298 return (B_FALSE); 12299 } 12300 12301 /* 12302 * tcp_ss_rexmit() is called in tcp_rput_data() to do slow start 12303 * retransmission after a timeout. 12304 * 12305 * To limit the number of duplicate segments, we limit the number of segment 12306 * to be sent in one time to tcp_snd_burst, the burst variable. 12307 */ 12308 static void 12309 tcp_ss_rexmit(tcp_t *tcp) 12310 { 12311 uint32_t snxt; 12312 uint32_t smax; 12313 int32_t win; 12314 int32_t mss; 12315 int32_t off; 12316 int32_t burst = tcp->tcp_snd_burst; 12317 mblk_t *snxt_mp; 12318 tcp_stack_t *tcps = tcp->tcp_tcps; 12319 12320 /* 12321 * Note that tcp_rexmit can be set even though TCP has retransmitted 12322 * all unack'ed segments. 12323 */ 12324 if (SEQ_LT(tcp->tcp_rexmit_nxt, tcp->tcp_rexmit_max)) { 12325 smax = tcp->tcp_rexmit_max; 12326 snxt = tcp->tcp_rexmit_nxt; 12327 if (SEQ_LT(snxt, tcp->tcp_suna)) { 12328 snxt = tcp->tcp_suna; 12329 } 12330 win = MIN(tcp->tcp_cwnd, tcp->tcp_swnd); 12331 win -= snxt - tcp->tcp_suna; 12332 mss = tcp->tcp_mss; 12333 snxt_mp = tcp_get_seg_mp(tcp, snxt, &off); 12334 12335 while (SEQ_LT(snxt, smax) && (win > 0) && 12336 (burst > 0) && (snxt_mp != NULL)) { 12337 mblk_t *xmit_mp; 12338 mblk_t *old_snxt_mp = snxt_mp; 12339 uint32_t cnt = mss; 12340 12341 if (win < cnt) { 12342 cnt = win; 12343 } 12344 if (SEQ_GT(snxt + cnt, smax)) { 12345 cnt = smax - snxt; 12346 } 12347 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, cnt, &off, 12348 &snxt_mp, snxt, B_TRUE, &cnt, B_TRUE); 12349 if (xmit_mp == NULL) 12350 return; 12351 12352 tcp_send_data(tcp, tcp->tcp_wq, xmit_mp); 12353 12354 snxt += cnt; 12355 win -= cnt; 12356 /* 12357 * Update the send timestamp to avoid false 12358 * retransmission. 12359 */ 12360 old_snxt_mp->b_prev = (mblk_t *)lbolt; 12361 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 12362 UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, cnt); 12363 12364 tcp->tcp_rexmit_nxt = snxt; 12365 burst--; 12366 } 12367 /* 12368 * If we have transmitted all we have at the time 12369 * we started the retranmission, we can leave 12370 * the rest of the job to tcp_wput_data(). But we 12371 * need to check the send window first. If the 12372 * win is not 0, go on with tcp_wput_data(). 12373 */ 12374 if (SEQ_LT(snxt, smax) || win == 0) { 12375 return; 12376 } 12377 } 12378 /* Only call tcp_wput_data() if there is data to be sent. */ 12379 if (tcp->tcp_unsent) { 12380 tcp_wput_data(tcp, NULL, B_FALSE); 12381 } 12382 } 12383 12384 /* 12385 * Process all TCP option in SYN segment. Note that this function should 12386 * be called after tcp_adapt_ire() is called so that the necessary info 12387 * from IRE is already set in the tcp structure. 12388 * 12389 * This function sets up the correct tcp_mss value according to the 12390 * MSS option value and our header size. It also sets up the window scale 12391 * and timestamp values, and initialize SACK info blocks. But it does not 12392 * change receive window size after setting the tcp_mss value. The caller 12393 * should do the appropriate change. 12394 */ 12395 void 12396 tcp_process_options(tcp_t *tcp, tcph_t *tcph) 12397 { 12398 int options; 12399 tcp_opt_t tcpopt; 12400 uint32_t mss_max; 12401 char *tmp_tcph; 12402 tcp_stack_t *tcps = tcp->tcp_tcps; 12403 12404 tcpopt.tcp = NULL; 12405 options = tcp_parse_options(tcph, &tcpopt); 12406 12407 /* 12408 * Process MSS option. Note that MSS option value does not account 12409 * for IP or TCP options. This means that it is equal to MTU - minimum 12410 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for 12411 * IPv6. 12412 */ 12413 if (!(options & TCP_OPT_MSS_PRESENT)) { 12414 if (tcp->tcp_ipversion == IPV4_VERSION) 12415 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv4; 12416 else 12417 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv6; 12418 } else { 12419 if (tcp->tcp_ipversion == IPV4_VERSION) 12420 mss_max = tcps->tcps_mss_max_ipv4; 12421 else 12422 mss_max = tcps->tcps_mss_max_ipv6; 12423 if (tcpopt.tcp_opt_mss < tcps->tcps_mss_min) 12424 tcpopt.tcp_opt_mss = tcps->tcps_mss_min; 12425 else if (tcpopt.tcp_opt_mss > mss_max) 12426 tcpopt.tcp_opt_mss = mss_max; 12427 } 12428 12429 /* Process Window Scale option. */ 12430 if (options & TCP_OPT_WSCALE_PRESENT) { 12431 tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale; 12432 tcp->tcp_snd_ws_ok = B_TRUE; 12433 } else { 12434 tcp->tcp_snd_ws = B_FALSE; 12435 tcp->tcp_snd_ws_ok = B_FALSE; 12436 tcp->tcp_rcv_ws = B_FALSE; 12437 } 12438 12439 /* Process Timestamp option. */ 12440 if ((options & TCP_OPT_TSTAMP_PRESENT) && 12441 (tcp->tcp_snd_ts_ok || TCP_IS_DETACHED(tcp))) { 12442 tmp_tcph = (char *)tcp->tcp_tcph; 12443 12444 tcp->tcp_snd_ts_ok = B_TRUE; 12445 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 12446 tcp->tcp_last_rcv_lbolt = lbolt64; 12447 ASSERT(OK_32PTR(tmp_tcph)); 12448 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 12449 12450 /* Fill in our template header with basic timestamp option. */ 12451 tmp_tcph += tcp->tcp_tcp_hdr_len; 12452 tmp_tcph[0] = TCPOPT_NOP; 12453 tmp_tcph[1] = TCPOPT_NOP; 12454 tmp_tcph[2] = TCPOPT_TSTAMP; 12455 tmp_tcph[3] = TCPOPT_TSTAMP_LEN; 12456 tcp->tcp_hdr_len += TCPOPT_REAL_TS_LEN; 12457 tcp->tcp_tcp_hdr_len += TCPOPT_REAL_TS_LEN; 12458 tcp->tcp_tcph->th_offset_and_rsrvd[0] += (3 << 4); 12459 } else { 12460 tcp->tcp_snd_ts_ok = B_FALSE; 12461 } 12462 12463 /* 12464 * Process SACK options. If SACK is enabled for this connection, 12465 * then allocate the SACK info structure. Note the following ways 12466 * when tcp_snd_sack_ok is set to true. 12467 * 12468 * For active connection: in tcp_adapt_ire() called in 12469 * tcp_rput_other(), or in tcp_rput_other() when tcp_sack_permitted 12470 * is checked. 12471 * 12472 * For passive connection: in tcp_adapt_ire() called in 12473 * tcp_accept_comm(). 12474 * 12475 * That's the reason why the extra TCP_IS_DETACHED() check is there. 12476 * That check makes sure that if we did not send a SACK OK option, 12477 * we will not enable SACK for this connection even though the other 12478 * side sends us SACK OK option. For active connection, the SACK 12479 * info structure has already been allocated. So we need to free 12480 * it if SACK is disabled. 12481 */ 12482 if ((options & TCP_OPT_SACK_OK_PRESENT) && 12483 (tcp->tcp_snd_sack_ok || 12484 (tcps->tcps_sack_permitted != 0 && TCP_IS_DETACHED(tcp)))) { 12485 /* This should be true only in the passive case. */ 12486 if (tcp->tcp_sack_info == NULL) { 12487 ASSERT(TCP_IS_DETACHED(tcp)); 12488 tcp->tcp_sack_info = 12489 kmem_cache_alloc(tcp_sack_info_cache, KM_NOSLEEP); 12490 } 12491 if (tcp->tcp_sack_info == NULL) { 12492 tcp->tcp_snd_sack_ok = B_FALSE; 12493 } else { 12494 tcp->tcp_snd_sack_ok = B_TRUE; 12495 if (tcp->tcp_snd_ts_ok) { 12496 tcp->tcp_max_sack_blk = 3; 12497 } else { 12498 tcp->tcp_max_sack_blk = 4; 12499 } 12500 } 12501 } else { 12502 /* 12503 * Resetting tcp_snd_sack_ok to B_FALSE so that 12504 * no SACK info will be used for this 12505 * connection. This assumes that SACK usage 12506 * permission is negotiated. This may need 12507 * to be changed once this is clarified. 12508 */ 12509 if (tcp->tcp_sack_info != NULL) { 12510 ASSERT(tcp->tcp_notsack_list == NULL); 12511 kmem_cache_free(tcp_sack_info_cache, 12512 tcp->tcp_sack_info); 12513 tcp->tcp_sack_info = NULL; 12514 } 12515 tcp->tcp_snd_sack_ok = B_FALSE; 12516 } 12517 12518 /* 12519 * Now we know the exact TCP/IP header length, subtract 12520 * that from tcp_mss to get our side's MSS. 12521 */ 12522 tcp->tcp_mss -= tcp->tcp_hdr_len; 12523 /* 12524 * Here we assume that the other side's header size will be equal to 12525 * our header size. We calculate the real MSS accordingly. Need to 12526 * take into additional stuffs IPsec puts in. 12527 * 12528 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header) 12529 */ 12530 tcpopt.tcp_opt_mss -= tcp->tcp_hdr_len + tcp->tcp_ipsec_overhead - 12531 ((tcp->tcp_ipversion == IPV4_VERSION ? 12532 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) + TCP_MIN_HEADER_LENGTH); 12533 12534 /* 12535 * Set MSS to the smaller one of both ends of the connection. 12536 * We should not have called tcp_mss_set() before, but our 12537 * side of the MSS should have been set to a proper value 12538 * by tcp_adapt_ire(). tcp_mss_set() will also set up the 12539 * STREAM head parameters properly. 12540 * 12541 * If we have a larger-than-16-bit window but the other side 12542 * didn't want to do window scale, tcp_rwnd_set() will take 12543 * care of that. 12544 */ 12545 tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss), B_TRUE); 12546 } 12547 12548 /* 12549 * Sends the T_CONN_IND to the listener. The caller calls this 12550 * functions via squeue to get inside the listener's perimeter 12551 * once the 3 way hand shake is done a T_CONN_IND needs to be 12552 * sent. As an optimization, the caller can call this directly 12553 * if listener's perimeter is same as eager's. 12554 */ 12555 /* ARGSUSED */ 12556 void 12557 tcp_send_conn_ind(void *arg, mblk_t *mp, void *arg2) 12558 { 12559 conn_t *lconnp = (conn_t *)arg; 12560 tcp_t *listener = lconnp->conn_tcp; 12561 tcp_t *tcp; 12562 struct T_conn_ind *conn_ind; 12563 ipaddr_t *addr_cache; 12564 boolean_t need_send_conn_ind = B_FALSE; 12565 tcp_stack_t *tcps = listener->tcp_tcps; 12566 12567 /* retrieve the eager */ 12568 conn_ind = (struct T_conn_ind *)mp->b_rptr; 12569 ASSERT(conn_ind->OPT_offset != 0 && 12570 conn_ind->OPT_length == sizeof (intptr_t)); 12571 bcopy(mp->b_rptr + conn_ind->OPT_offset, &tcp, 12572 conn_ind->OPT_length); 12573 12574 /* 12575 * TLI/XTI applications will get confused by 12576 * sending eager as an option since it violates 12577 * the option semantics. So remove the eager as 12578 * option since TLI/XTI app doesn't need it anyway. 12579 */ 12580 if (!TCP_IS_SOCKET(listener)) { 12581 conn_ind->OPT_length = 0; 12582 conn_ind->OPT_offset = 0; 12583 } 12584 if (listener->tcp_state == TCPS_CLOSED || 12585 TCP_IS_DETACHED(listener)) { 12586 /* 12587 * If listener has closed, it would have caused a 12588 * a cleanup/blowoff to happen for the eager. We 12589 * just need to return. 12590 */ 12591 freemsg(mp); 12592 return; 12593 } 12594 12595 12596 /* 12597 * if the conn_req_q is full defer passing up the 12598 * T_CONN_IND until space is availabe after t_accept() 12599 * processing 12600 */ 12601 mutex_enter(&listener->tcp_eager_lock); 12602 12603 /* 12604 * Take the eager out, if it is in the list of droppable eagers 12605 * as we are here because the 3W handshake is over. 12606 */ 12607 MAKE_UNDROPPABLE(tcp); 12608 12609 if (listener->tcp_conn_req_cnt_q < listener->tcp_conn_req_max) { 12610 tcp_t *tail; 12611 12612 /* 12613 * The eager already has an extra ref put in tcp_rput_data 12614 * so that it stays till accept comes back even though it 12615 * might get into TCPS_CLOSED as a result of a TH_RST etc. 12616 */ 12617 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 12618 listener->tcp_conn_req_cnt_q0--; 12619 listener->tcp_conn_req_cnt_q++; 12620 12621 /* Move from SYN_RCVD to ESTABLISHED list */ 12622 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 12623 tcp->tcp_eager_prev_q0; 12624 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 12625 tcp->tcp_eager_next_q0; 12626 tcp->tcp_eager_prev_q0 = NULL; 12627 tcp->tcp_eager_next_q0 = NULL; 12628 12629 /* 12630 * Insert at end of the queue because sockfs 12631 * sends down T_CONN_RES in chronological 12632 * order. Leaving the older conn indications 12633 * at front of the queue helps reducing search 12634 * time. 12635 */ 12636 tail = listener->tcp_eager_last_q; 12637 if (tail != NULL) 12638 tail->tcp_eager_next_q = tcp; 12639 else 12640 listener->tcp_eager_next_q = tcp; 12641 listener->tcp_eager_last_q = tcp; 12642 tcp->tcp_eager_next_q = NULL; 12643 /* 12644 * Delay sending up the T_conn_ind until we are 12645 * done with the eager. Once we have have sent up 12646 * the T_conn_ind, the accept can potentially complete 12647 * any time and release the refhold we have on the eager. 12648 */ 12649 need_send_conn_ind = B_TRUE; 12650 } else { 12651 /* 12652 * Defer connection on q0 and set deferred 12653 * connection bit true 12654 */ 12655 tcp->tcp_conn_def_q0 = B_TRUE; 12656 12657 /* take tcp out of q0 ... */ 12658 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 12659 tcp->tcp_eager_next_q0; 12660 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 12661 tcp->tcp_eager_prev_q0; 12662 12663 /* ... and place it at the end of q0 */ 12664 tcp->tcp_eager_prev_q0 = listener->tcp_eager_prev_q0; 12665 tcp->tcp_eager_next_q0 = listener; 12666 listener->tcp_eager_prev_q0->tcp_eager_next_q0 = tcp; 12667 listener->tcp_eager_prev_q0 = tcp; 12668 tcp->tcp_conn.tcp_eager_conn_ind = mp; 12669 } 12670 12671 /* we have timed out before */ 12672 if (tcp->tcp_syn_rcvd_timeout != 0) { 12673 tcp->tcp_syn_rcvd_timeout = 0; 12674 listener->tcp_syn_rcvd_timeout--; 12675 if (listener->tcp_syn_defense && 12676 listener->tcp_syn_rcvd_timeout <= 12677 (tcps->tcps_conn_req_max_q0 >> 5) && 12678 10*MINUTES < TICK_TO_MSEC(lbolt64 - 12679 listener->tcp_last_rcv_lbolt)) { 12680 /* 12681 * Turn off the defense mode if we 12682 * believe the SYN attack is over. 12683 */ 12684 listener->tcp_syn_defense = B_FALSE; 12685 if (listener->tcp_ip_addr_cache) { 12686 kmem_free((void *)listener->tcp_ip_addr_cache, 12687 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 12688 listener->tcp_ip_addr_cache = NULL; 12689 } 12690 } 12691 } 12692 addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache); 12693 if (addr_cache != NULL) { 12694 /* 12695 * We have finished a 3-way handshake with this 12696 * remote host. This proves the IP addr is good. 12697 * Cache it! 12698 */ 12699 addr_cache[IP_ADDR_CACHE_HASH( 12700 tcp->tcp_remote)] = tcp->tcp_remote; 12701 } 12702 mutex_exit(&listener->tcp_eager_lock); 12703 if (need_send_conn_ind) 12704 putnext(listener->tcp_rq, mp); 12705 } 12706 12707 mblk_t * 12708 tcp_find_pktinfo(tcp_t *tcp, mblk_t *mp, uint_t *ipversp, uint_t *ip_hdr_lenp, 12709 uint_t *ifindexp, ip6_pkt_t *ippp) 12710 { 12711 ip_pktinfo_t *pinfo; 12712 ip6_t *ip6h; 12713 uchar_t *rptr; 12714 mblk_t *first_mp = mp; 12715 boolean_t mctl_present = B_FALSE; 12716 uint_t ifindex = 0; 12717 ip6_pkt_t ipp; 12718 uint_t ipvers; 12719 uint_t ip_hdr_len; 12720 tcp_stack_t *tcps = tcp->tcp_tcps; 12721 12722 rptr = mp->b_rptr; 12723 ASSERT(OK_32PTR(rptr)); 12724 ASSERT(tcp != NULL); 12725 ipp.ipp_fields = 0; 12726 12727 switch DB_TYPE(mp) { 12728 case M_CTL: 12729 mp = mp->b_cont; 12730 if (mp == NULL) { 12731 freemsg(first_mp); 12732 return (NULL); 12733 } 12734 if (DB_TYPE(mp) != M_DATA) { 12735 freemsg(first_mp); 12736 return (NULL); 12737 } 12738 mctl_present = B_TRUE; 12739 break; 12740 case M_DATA: 12741 break; 12742 default: 12743 cmn_err(CE_NOTE, "tcp_find_pktinfo: unknown db_type"); 12744 freemsg(mp); 12745 return (NULL); 12746 } 12747 ipvers = IPH_HDR_VERSION(rptr); 12748 if (ipvers == IPV4_VERSION) { 12749 if (tcp == NULL) { 12750 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12751 goto done; 12752 } 12753 12754 ipp.ipp_fields |= IPPF_HOPLIMIT; 12755 ipp.ipp_hoplimit = ((ipha_t *)rptr)->ipha_ttl; 12756 12757 /* 12758 * If we have IN_PKTINFO in an M_CTL and tcp_ipv6_recvancillary 12759 * has TCP_IPV6_RECVPKTINFO set, pass I/F index along in ipp. 12760 */ 12761 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) && 12762 mctl_present) { 12763 pinfo = (ip_pktinfo_t *)first_mp->b_rptr; 12764 if ((MBLKL(first_mp) == sizeof (ip_pktinfo_t)) && 12765 (pinfo->ip_pkt_ulp_type == IN_PKTINFO) && 12766 (pinfo->ip_pkt_flags & IPF_RECVIF)) { 12767 ipp.ipp_fields |= IPPF_IFINDEX; 12768 ipp.ipp_ifindex = pinfo->ip_pkt_ifindex; 12769 ifindex = pinfo->ip_pkt_ifindex; 12770 } 12771 freeb(first_mp); 12772 mctl_present = B_FALSE; 12773 } 12774 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12775 } else { 12776 ip6h = (ip6_t *)rptr; 12777 12778 ASSERT(ipvers == IPV6_VERSION); 12779 ipp.ipp_fields = IPPF_HOPLIMIT | IPPF_TCLASS; 12780 ipp.ipp_tclass = (ip6h->ip6_flow & 0x0FF00000) >> 20; 12781 ipp.ipp_hoplimit = ip6h->ip6_hops; 12782 12783 if (ip6h->ip6_nxt != IPPROTO_TCP) { 12784 uint8_t nexthdrp; 12785 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 12786 12787 /* Look for ifindex information */ 12788 if (ip6h->ip6_nxt == IPPROTO_RAW) { 12789 ip6i_t *ip6i = (ip6i_t *)ip6h; 12790 if ((uchar_t *)&ip6i[1] > mp->b_wptr) { 12791 BUMP_MIB(&ipst->ips_ip_mib, tcpInErrs); 12792 freemsg(first_mp); 12793 return (NULL); 12794 } 12795 12796 if (ip6i->ip6i_flags & IP6I_IFINDEX) { 12797 ASSERT(ip6i->ip6i_ifindex != 0); 12798 ipp.ipp_fields |= IPPF_IFINDEX; 12799 ipp.ipp_ifindex = ip6i->ip6i_ifindex; 12800 ifindex = ip6i->ip6i_ifindex; 12801 } 12802 rptr = (uchar_t *)&ip6i[1]; 12803 mp->b_rptr = rptr; 12804 if (rptr == mp->b_wptr) { 12805 mblk_t *mp1; 12806 mp1 = mp->b_cont; 12807 freeb(mp); 12808 mp = mp1; 12809 rptr = mp->b_rptr; 12810 } 12811 if (MBLKL(mp) < IPV6_HDR_LEN + 12812 sizeof (tcph_t)) { 12813 BUMP_MIB(&ipst->ips_ip_mib, tcpInErrs); 12814 freemsg(first_mp); 12815 return (NULL); 12816 } 12817 ip6h = (ip6_t *)rptr; 12818 } 12819 12820 /* 12821 * Find any potentially interesting extension headers 12822 * as well as the length of the IPv6 + extension 12823 * headers. 12824 */ 12825 ip_hdr_len = ip_find_hdr_v6(mp, ip6h, &ipp, &nexthdrp); 12826 /* Verify if this is a TCP packet */ 12827 if (nexthdrp != IPPROTO_TCP) { 12828 BUMP_MIB(&ipst->ips_ip_mib, tcpInErrs); 12829 freemsg(first_mp); 12830 return (NULL); 12831 } 12832 } else { 12833 ip_hdr_len = IPV6_HDR_LEN; 12834 } 12835 } 12836 12837 done: 12838 if (ipversp != NULL) 12839 *ipversp = ipvers; 12840 if (ip_hdr_lenp != NULL) 12841 *ip_hdr_lenp = ip_hdr_len; 12842 if (ippp != NULL) 12843 *ippp = ipp; 12844 if (ifindexp != NULL) 12845 *ifindexp = ifindex; 12846 if (mctl_present) { 12847 freeb(first_mp); 12848 } 12849 return (mp); 12850 } 12851 12852 /* 12853 * Handle M_DATA messages from IP. Its called directly from IP via 12854 * squeue for AF_INET type sockets fast path. No M_CTL are expected 12855 * in this path. 12856 * 12857 * For everything else (including AF_INET6 sockets with 'tcp_ipversion' 12858 * v4 and v6), we are called through tcp_input() and a M_CTL can 12859 * be present for options but tcp_find_pktinfo() deals with it. We 12860 * only expect M_DATA packets after tcp_find_pktinfo() is done. 12861 * 12862 * The first argument is always the connp/tcp to which the mp belongs. 12863 * There are no exceptions to this rule. The caller has already put 12864 * a reference on this connp/tcp and once tcp_rput_data() returns, 12865 * the squeue will do the refrele. 12866 * 12867 * The TH_SYN for the listener directly go to tcp_conn_request via 12868 * squeue. 12869 * 12870 * sqp: NULL = recursive, sqp != NULL means called from squeue 12871 */ 12872 void 12873 tcp_rput_data(void *arg, mblk_t *mp, void *arg2) 12874 { 12875 int32_t bytes_acked; 12876 int32_t gap; 12877 mblk_t *mp1; 12878 uint_t flags; 12879 uint32_t new_swnd = 0; 12880 uchar_t *iphdr; 12881 uchar_t *rptr; 12882 int32_t rgap; 12883 uint32_t seg_ack; 12884 int seg_len; 12885 uint_t ip_hdr_len; 12886 uint32_t seg_seq; 12887 tcph_t *tcph; 12888 int urp; 12889 tcp_opt_t tcpopt; 12890 uint_t ipvers; 12891 ip6_pkt_t ipp; 12892 boolean_t ofo_seg = B_FALSE; /* Out of order segment */ 12893 uint32_t cwnd; 12894 uint32_t add; 12895 int npkt; 12896 int mss; 12897 conn_t *connp = (conn_t *)arg; 12898 squeue_t *sqp = (squeue_t *)arg2; 12899 tcp_t *tcp = connp->conn_tcp; 12900 tcp_stack_t *tcps = tcp->tcp_tcps; 12901 12902 /* 12903 * RST from fused tcp loopback peer should trigger an unfuse. 12904 */ 12905 if (tcp->tcp_fused) { 12906 TCP_STAT(tcps, tcp_fusion_aborted); 12907 tcp_unfuse(tcp); 12908 } 12909 12910 iphdr = mp->b_rptr; 12911 rptr = mp->b_rptr; 12912 ASSERT(OK_32PTR(rptr)); 12913 12914 /* 12915 * An AF_INET socket is not capable of receiving any pktinfo. Do inline 12916 * processing here. For rest call tcp_find_pktinfo to fill up the 12917 * necessary information. 12918 */ 12919 if (IPCL_IS_TCP4(connp)) { 12920 ipvers = IPV4_VERSION; 12921 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12922 } else { 12923 mp = tcp_find_pktinfo(tcp, mp, &ipvers, &ip_hdr_len, 12924 NULL, &ipp); 12925 if (mp == NULL) { 12926 TCP_STAT(tcps, tcp_rput_v6_error); 12927 return; 12928 } 12929 iphdr = mp->b_rptr; 12930 rptr = mp->b_rptr; 12931 } 12932 ASSERT(DB_TYPE(mp) == M_DATA); 12933 12934 tcph = (tcph_t *)&rptr[ip_hdr_len]; 12935 seg_seq = ABE32_TO_U32(tcph->th_seq); 12936 seg_ack = ABE32_TO_U32(tcph->th_ack); 12937 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 12938 seg_len = (int)(mp->b_wptr - rptr) - 12939 (ip_hdr_len + TCP_HDR_LENGTH(tcph)); 12940 if ((mp1 = mp->b_cont) != NULL && mp1->b_datap->db_type == M_DATA) { 12941 do { 12942 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 12943 (uintptr_t)INT_MAX); 12944 seg_len += (int)(mp1->b_wptr - mp1->b_rptr); 12945 } while ((mp1 = mp1->b_cont) != NULL && 12946 mp1->b_datap->db_type == M_DATA); 12947 } 12948 12949 if (tcp->tcp_state == TCPS_TIME_WAIT) { 12950 tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack, 12951 seg_len, tcph); 12952 return; 12953 } 12954 12955 if (sqp != NULL) { 12956 /* 12957 * This is the correct place to update tcp_last_recv_time. Note 12958 * that it is also updated for tcp structure that belongs to 12959 * global and listener queues which do not really need updating. 12960 * But that should not cause any harm. And it is updated for 12961 * all kinds of incoming segments, not only for data segments. 12962 */ 12963 tcp->tcp_last_recv_time = lbolt; 12964 } 12965 12966 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 12967 12968 BUMP_LOCAL(tcp->tcp_ibsegs); 12969 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_RECV_PKT); 12970 12971 if ((flags & TH_URG) && sqp != NULL) { 12972 /* 12973 * TCP can't handle urgent pointers that arrive before 12974 * the connection has been accept()ed since it can't 12975 * buffer OOB data. Discard segment if this happens. 12976 * 12977 * We can't just rely on a non-null tcp_listener to indicate 12978 * that the accept() has completed since unlinking of the 12979 * eager and completion of the accept are not atomic. 12980 * tcp_detached, when it is not set (B_FALSE) indicates 12981 * that the accept() has completed. 12982 * 12983 * Nor can it reassemble urgent pointers, so discard 12984 * if it's not the next segment expected. 12985 * 12986 * Otherwise, collapse chain into one mblk (discard if 12987 * that fails). This makes sure the headers, retransmitted 12988 * data, and new data all are in the same mblk. 12989 */ 12990 ASSERT(mp != NULL); 12991 if (tcp->tcp_detached || !pullupmsg(mp, -1)) { 12992 freemsg(mp); 12993 return; 12994 } 12995 /* Update pointers into message */ 12996 iphdr = rptr = mp->b_rptr; 12997 tcph = (tcph_t *)&rptr[ip_hdr_len]; 12998 if (SEQ_GT(seg_seq, tcp->tcp_rnxt)) { 12999 /* 13000 * Since we can't handle any data with this urgent 13001 * pointer that is out of sequence, we expunge 13002 * the data. This allows us to still register 13003 * the urgent mark and generate the M_PCSIG, 13004 * which we can do. 13005 */ 13006 mp->b_wptr = (uchar_t *)tcph + TCP_HDR_LENGTH(tcph); 13007 seg_len = 0; 13008 } 13009 } 13010 13011 switch (tcp->tcp_state) { 13012 case TCPS_SYN_SENT: 13013 if (flags & TH_ACK) { 13014 /* 13015 * Note that our stack cannot send data before a 13016 * connection is established, therefore the 13017 * following check is valid. Otherwise, it has 13018 * to be changed. 13019 */ 13020 if (SEQ_LEQ(seg_ack, tcp->tcp_iss) || 13021 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 13022 freemsg(mp); 13023 if (flags & TH_RST) 13024 return; 13025 tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq", 13026 tcp, seg_ack, 0, TH_RST); 13027 return; 13028 } 13029 ASSERT(tcp->tcp_suna + 1 == seg_ack); 13030 } 13031 if (flags & TH_RST) { 13032 freemsg(mp); 13033 if (flags & TH_ACK) 13034 (void) tcp_clean_death(tcp, 13035 ECONNREFUSED, 13); 13036 return; 13037 } 13038 if (!(flags & TH_SYN)) { 13039 freemsg(mp); 13040 return; 13041 } 13042 13043 /* Process all TCP options. */ 13044 tcp_process_options(tcp, tcph); 13045 /* 13046 * The following changes our rwnd to be a multiple of the 13047 * MIN(peer MSS, our MSS) for performance reason. 13048 */ 13049 (void) tcp_rwnd_set(tcp, MSS_ROUNDUP(tcp->tcp_rq->q_hiwat, 13050 tcp->tcp_mss)); 13051 13052 /* Is the other end ECN capable? */ 13053 if (tcp->tcp_ecn_ok) { 13054 if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) { 13055 tcp->tcp_ecn_ok = B_FALSE; 13056 } 13057 } 13058 /* 13059 * Clear ECN flags because it may interfere with later 13060 * processing. 13061 */ 13062 flags &= ~(TH_ECE|TH_CWR); 13063 13064 tcp->tcp_irs = seg_seq; 13065 tcp->tcp_rack = seg_seq; 13066 tcp->tcp_rnxt = seg_seq + 1; 13067 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 13068 if (!TCP_IS_DETACHED(tcp)) { 13069 /* Allocate room for SACK options if needed. */ 13070 if (tcp->tcp_snd_sack_ok) { 13071 (void) mi_set_sth_wroff(tcp->tcp_rq, 13072 tcp->tcp_hdr_len + TCPOPT_MAX_SACK_LEN + 13073 (tcp->tcp_loopback ? 0 : 13074 tcps->tcps_wroff_xtra)); 13075 } else { 13076 (void) mi_set_sth_wroff(tcp->tcp_rq, 13077 tcp->tcp_hdr_len + 13078 (tcp->tcp_loopback ? 0 : 13079 tcps->tcps_wroff_xtra)); 13080 } 13081 } 13082 if (flags & TH_ACK) { 13083 /* 13084 * If we can't get the confirmation upstream, pretend 13085 * we didn't even see this one. 13086 * 13087 * XXX: how can we pretend we didn't see it if we 13088 * have updated rnxt et. al. 13089 * 13090 * For loopback we defer sending up the T_CONN_CON 13091 * until after some checks below. 13092 */ 13093 mp1 = NULL; 13094 if (!tcp_conn_con(tcp, iphdr, tcph, mp, 13095 tcp->tcp_loopback ? &mp1 : NULL)) { 13096 freemsg(mp); 13097 return; 13098 } 13099 /* SYN was acked - making progress */ 13100 if (tcp->tcp_ipversion == IPV6_VERSION) 13101 tcp->tcp_ip_forward_progress = B_TRUE; 13102 13103 /* One for the SYN */ 13104 tcp->tcp_suna = tcp->tcp_iss + 1; 13105 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 13106 tcp->tcp_state = TCPS_ESTABLISHED; 13107 13108 /* 13109 * If SYN was retransmitted, need to reset all 13110 * retransmission info. This is because this 13111 * segment will be treated as a dup ACK. 13112 */ 13113 if (tcp->tcp_rexmit) { 13114 tcp->tcp_rexmit = B_FALSE; 13115 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 13116 tcp->tcp_rexmit_max = tcp->tcp_snxt; 13117 tcp->tcp_snd_burst = tcp->tcp_localnet ? 13118 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 13119 tcp->tcp_ms_we_have_waited = 0; 13120 13121 /* 13122 * Set tcp_cwnd back to 1 MSS, per 13123 * recommendation from 13124 * draft-floyd-incr-init-win-01.txt, 13125 * Increasing TCP's Initial Window. 13126 */ 13127 tcp->tcp_cwnd = tcp->tcp_mss; 13128 } 13129 13130 tcp->tcp_swl1 = seg_seq; 13131 tcp->tcp_swl2 = seg_ack; 13132 13133 new_swnd = BE16_TO_U16(tcph->th_win); 13134 tcp->tcp_swnd = new_swnd; 13135 if (new_swnd > tcp->tcp_max_swnd) 13136 tcp->tcp_max_swnd = new_swnd; 13137 13138 /* 13139 * Always send the three-way handshake ack immediately 13140 * in order to make the connection complete as soon as 13141 * possible on the accepting host. 13142 */ 13143 flags |= TH_ACK_NEEDED; 13144 13145 /* 13146 * Special case for loopback. At this point we have 13147 * received SYN-ACK from the remote endpoint. In 13148 * order to ensure that both endpoints reach the 13149 * fused state prior to any data exchange, the final 13150 * ACK needs to be sent before we indicate T_CONN_CON 13151 * to the module upstream. 13152 */ 13153 if (tcp->tcp_loopback) { 13154 mblk_t *ack_mp; 13155 13156 ASSERT(!tcp->tcp_unfusable); 13157 ASSERT(mp1 != NULL); 13158 /* 13159 * For loopback, we always get a pure SYN-ACK 13160 * and only need to send back the final ACK 13161 * with no data (this is because the other 13162 * tcp is ours and we don't do T/TCP). This 13163 * final ACK triggers the passive side to 13164 * perform fusion in ESTABLISHED state. 13165 */ 13166 if ((ack_mp = tcp_ack_mp(tcp)) != NULL) { 13167 if (tcp->tcp_ack_tid != 0) { 13168 (void) TCP_TIMER_CANCEL(tcp, 13169 tcp->tcp_ack_tid); 13170 tcp->tcp_ack_tid = 0; 13171 } 13172 TCP_RECORD_TRACE(tcp, ack_mp, 13173 TCP_TRACE_SEND_PKT); 13174 tcp_send_data(tcp, tcp->tcp_wq, ack_mp); 13175 BUMP_LOCAL(tcp->tcp_obsegs); 13176 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 13177 13178 /* Send up T_CONN_CON */ 13179 putnext(tcp->tcp_rq, mp1); 13180 13181 freemsg(mp); 13182 return; 13183 } 13184 /* 13185 * Forget fusion; we need to handle more 13186 * complex cases below. Send the deferred 13187 * T_CONN_CON message upstream and proceed 13188 * as usual. Mark this tcp as not capable 13189 * of fusion. 13190 */ 13191 TCP_STAT(tcps, tcp_fusion_unfusable); 13192 tcp->tcp_unfusable = B_TRUE; 13193 putnext(tcp->tcp_rq, mp1); 13194 } 13195 13196 /* 13197 * Check to see if there is data to be sent. If 13198 * yes, set the transmit flag. Then check to see 13199 * if received data processing needs to be done. 13200 * If not, go straight to xmit_check. This short 13201 * cut is OK as we don't support T/TCP. 13202 */ 13203 if (tcp->tcp_unsent) 13204 flags |= TH_XMIT_NEEDED; 13205 13206 if (seg_len == 0 && !(flags & TH_URG)) { 13207 freemsg(mp); 13208 goto xmit_check; 13209 } 13210 13211 flags &= ~TH_SYN; 13212 seg_seq++; 13213 break; 13214 } 13215 tcp->tcp_state = TCPS_SYN_RCVD; 13216 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss, 13217 NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 13218 if (mp1) { 13219 DB_CPID(mp1) = tcp->tcp_cpid; 13220 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 13221 tcp_send_data(tcp, tcp->tcp_wq, mp1); 13222 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 13223 } 13224 freemsg(mp); 13225 return; 13226 case TCPS_SYN_RCVD: 13227 if (flags & TH_ACK) { 13228 /* 13229 * In this state, a SYN|ACK packet is either bogus 13230 * because the other side must be ACKing our SYN which 13231 * indicates it has seen the ACK for their SYN and 13232 * shouldn't retransmit it or we're crossing SYNs 13233 * on active open. 13234 */ 13235 if ((flags & TH_SYN) && !tcp->tcp_active_open) { 13236 freemsg(mp); 13237 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn", 13238 tcp, seg_ack, 0, TH_RST); 13239 return; 13240 } 13241 /* 13242 * NOTE: RFC 793 pg. 72 says this should be 13243 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt 13244 * but that would mean we have an ack that ignored 13245 * our SYN. 13246 */ 13247 if (SEQ_LEQ(seg_ack, tcp->tcp_suna) || 13248 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 13249 freemsg(mp); 13250 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack", 13251 tcp, seg_ack, 0, TH_RST); 13252 return; 13253 } 13254 } 13255 break; 13256 case TCPS_LISTEN: 13257 /* 13258 * Only a TLI listener can come through this path when a 13259 * acceptor is going back to be a listener and a packet 13260 * for the acceptor hits the classifier. For a socket 13261 * listener, this can never happen because a listener 13262 * can never accept connection on itself and hence a 13263 * socket acceptor can not go back to being a listener. 13264 */ 13265 ASSERT(!TCP_IS_SOCKET(tcp)); 13266 /*FALLTHRU*/ 13267 case TCPS_CLOSED: 13268 case TCPS_BOUND: { 13269 conn_t *new_connp; 13270 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 13271 13272 new_connp = ipcl_classify(mp, connp->conn_zoneid, ipst); 13273 if (new_connp != NULL) { 13274 tcp_reinput(new_connp, mp, connp->conn_sqp); 13275 return; 13276 } 13277 /* We failed to classify. For now just drop the packet */ 13278 freemsg(mp); 13279 return; 13280 } 13281 case TCPS_IDLE: 13282 /* 13283 * Handle the case where the tcp_clean_death() has happened 13284 * on a connection (application hasn't closed yet) but a packet 13285 * was already queued on squeue before tcp_clean_death() 13286 * was processed. Calling tcp_clean_death() twice on same 13287 * connection can result in weird behaviour. 13288 */ 13289 freemsg(mp); 13290 return; 13291 default: 13292 break; 13293 } 13294 13295 /* 13296 * Already on the correct queue/perimeter. 13297 * If this is a detached connection and not an eager 13298 * connection hanging off a listener then new data 13299 * (past the FIN) will cause a reset. 13300 * We do a special check here where it 13301 * is out of the main line, rather than check 13302 * if we are detached every time we see new 13303 * data down below. 13304 */ 13305 if (TCP_IS_DETACHED_NONEAGER(tcp) && 13306 (seg_len > 0 && SEQ_GT(seg_seq + seg_len, tcp->tcp_rnxt))) { 13307 BUMP_MIB(&tcps->tcps_mib, tcpInClosed); 13308 TCP_RECORD_TRACE(tcp, 13309 mp, TCP_TRACE_RECV_PKT); 13310 13311 freemsg(mp); 13312 /* 13313 * This could be an SSL closure alert. We're detached so just 13314 * acknowledge it this last time. 13315 */ 13316 if (tcp->tcp_kssl_ctx != NULL) { 13317 kssl_release_ctx(tcp->tcp_kssl_ctx); 13318 tcp->tcp_kssl_ctx = NULL; 13319 13320 tcp->tcp_rnxt += seg_len; 13321 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 13322 flags |= TH_ACK_NEEDED; 13323 goto ack_check; 13324 } 13325 13326 tcp_xmit_ctl("new data when detached", tcp, 13327 tcp->tcp_snxt, 0, TH_RST); 13328 (void) tcp_clean_death(tcp, EPROTO, 12); 13329 return; 13330 } 13331 13332 mp->b_rptr = (uchar_t *)tcph + TCP_HDR_LENGTH(tcph); 13333 urp = BE16_TO_U16(tcph->th_urp) - TCP_OLD_URP_INTERPRETATION; 13334 new_swnd = BE16_TO_U16(tcph->th_win) << 13335 ((tcph->th_flags[0] & TH_SYN) ? 0 : tcp->tcp_snd_ws); 13336 13337 if (tcp->tcp_snd_ts_ok) { 13338 if (!tcp_paws_check(tcp, tcph, &tcpopt)) { 13339 /* 13340 * This segment is not acceptable. 13341 * Drop it and send back an ACK. 13342 */ 13343 freemsg(mp); 13344 flags |= TH_ACK_NEEDED; 13345 goto ack_check; 13346 } 13347 } else if (tcp->tcp_snd_sack_ok) { 13348 ASSERT(tcp->tcp_sack_info != NULL); 13349 tcpopt.tcp = tcp; 13350 /* 13351 * SACK info in already updated in tcp_parse_options. Ignore 13352 * all other TCP options... 13353 */ 13354 (void) tcp_parse_options(tcph, &tcpopt); 13355 } 13356 try_again:; 13357 mss = tcp->tcp_mss; 13358 gap = seg_seq - tcp->tcp_rnxt; 13359 rgap = tcp->tcp_rwnd - (gap + seg_len); 13360 /* 13361 * gap is the amount of sequence space between what we expect to see 13362 * and what we got for seg_seq. A positive value for gap means 13363 * something got lost. A negative value means we got some old stuff. 13364 */ 13365 if (gap < 0) { 13366 /* Old stuff present. Is the SYN in there? */ 13367 if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) && 13368 (seg_len != 0)) { 13369 flags &= ~TH_SYN; 13370 seg_seq++; 13371 urp--; 13372 /* Recompute the gaps after noting the SYN. */ 13373 goto try_again; 13374 } 13375 BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs); 13376 UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes, 13377 (seg_len > -gap ? -gap : seg_len)); 13378 /* Remove the old stuff from seg_len. */ 13379 seg_len += gap; 13380 /* 13381 * Anything left? 13382 * Make sure to check for unack'd FIN when rest of data 13383 * has been previously ack'd. 13384 */ 13385 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 13386 /* 13387 * Resets are only valid if they lie within our offered 13388 * window. If the RST bit is set, we just ignore this 13389 * segment. 13390 */ 13391 if (flags & TH_RST) { 13392 freemsg(mp); 13393 return; 13394 } 13395 13396 /* 13397 * The arriving of dup data packets indicate that we 13398 * may have postponed an ack for too long, or the other 13399 * side's RTT estimate is out of shape. Start acking 13400 * more often. 13401 */ 13402 if (SEQ_GEQ(seg_seq + seg_len - gap, tcp->tcp_rack) && 13403 tcp->tcp_rack_cnt >= 1 && 13404 tcp->tcp_rack_abs_max > 2) { 13405 tcp->tcp_rack_abs_max--; 13406 } 13407 tcp->tcp_rack_cur_max = 1; 13408 13409 /* 13410 * This segment is "unacceptable". None of its 13411 * sequence space lies within our advertized window. 13412 * 13413 * Adjust seg_len to the original value for tracing. 13414 */ 13415 seg_len -= gap; 13416 if (tcp->tcp_debug) { 13417 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13418 "tcp_rput: unacceptable, gap %d, rgap %d, " 13419 "flags 0x%x, seg_seq %u, seg_ack %u, " 13420 "seg_len %d, rnxt %u, snxt %u, %s", 13421 gap, rgap, flags, seg_seq, seg_ack, 13422 seg_len, tcp->tcp_rnxt, tcp->tcp_snxt, 13423 tcp_display(tcp, NULL, 13424 DISP_ADDR_AND_PORT)); 13425 } 13426 13427 /* 13428 * Arrange to send an ACK in response to the 13429 * unacceptable segment per RFC 793 page 69. There 13430 * is only one small difference between ours and the 13431 * acceptability test in the RFC - we accept ACK-only 13432 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK 13433 * will be generated. 13434 * 13435 * Note that we have to ACK an ACK-only packet at least 13436 * for stacks that send 0-length keep-alives with 13437 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122, 13438 * section 4.2.3.6. As long as we don't ever generate 13439 * an unacceptable packet in response to an incoming 13440 * packet that is unacceptable, it should not cause 13441 * "ACK wars". 13442 */ 13443 flags |= TH_ACK_NEEDED; 13444 13445 /* 13446 * Continue processing this segment in order to use the 13447 * ACK information it contains, but skip all other 13448 * sequence-number processing. Processing the ACK 13449 * information is necessary in order to 13450 * re-synchronize connections that may have lost 13451 * synchronization. 13452 * 13453 * We clear seg_len and flag fields related to 13454 * sequence number processing as they are not 13455 * to be trusted for an unacceptable segment. 13456 */ 13457 seg_len = 0; 13458 flags &= ~(TH_SYN | TH_FIN | TH_URG); 13459 goto process_ack; 13460 } 13461 13462 /* Fix seg_seq, and chew the gap off the front. */ 13463 seg_seq = tcp->tcp_rnxt; 13464 urp += gap; 13465 do { 13466 mblk_t *mp2; 13467 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 13468 (uintptr_t)UINT_MAX); 13469 gap += (uint_t)(mp->b_wptr - mp->b_rptr); 13470 if (gap > 0) { 13471 mp->b_rptr = mp->b_wptr - gap; 13472 break; 13473 } 13474 mp2 = mp; 13475 mp = mp->b_cont; 13476 freeb(mp2); 13477 } while (gap < 0); 13478 /* 13479 * If the urgent data has already been acknowledged, we 13480 * should ignore TH_URG below 13481 */ 13482 if (urp < 0) 13483 flags &= ~TH_URG; 13484 } 13485 /* 13486 * rgap is the amount of stuff received out of window. A negative 13487 * value is the amount out of window. 13488 */ 13489 if (rgap < 0) { 13490 mblk_t *mp2; 13491 13492 if (tcp->tcp_rwnd == 0) { 13493 BUMP_MIB(&tcps->tcps_mib, tcpInWinProbe); 13494 } else { 13495 BUMP_MIB(&tcps->tcps_mib, tcpInDataPastWinSegs); 13496 UPDATE_MIB(&tcps->tcps_mib, 13497 tcpInDataPastWinBytes, -rgap); 13498 } 13499 13500 /* 13501 * seg_len does not include the FIN, so if more than 13502 * just the FIN is out of window, we act like we don't 13503 * see it. (If just the FIN is out of window, rgap 13504 * will be zero and we will go ahead and acknowledge 13505 * the FIN.) 13506 */ 13507 flags &= ~TH_FIN; 13508 13509 /* Fix seg_len and make sure there is something left. */ 13510 seg_len += rgap; 13511 if (seg_len <= 0) { 13512 /* 13513 * Resets are only valid if they lie within our offered 13514 * window. If the RST bit is set, we just ignore this 13515 * segment. 13516 */ 13517 if (flags & TH_RST) { 13518 freemsg(mp); 13519 return; 13520 } 13521 13522 /* Per RFC 793, we need to send back an ACK. */ 13523 flags |= TH_ACK_NEEDED; 13524 13525 /* 13526 * Send SIGURG as soon as possible i.e. even 13527 * if the TH_URG was delivered in a window probe 13528 * packet (which will be unacceptable). 13529 * 13530 * We generate a signal if none has been generated 13531 * for this connection or if this is a new urgent 13532 * byte. Also send a zero-length "unmarked" message 13533 * to inform SIOCATMARK that this is not the mark. 13534 * 13535 * tcp_urp_last_valid is cleared when the T_exdata_ind 13536 * is sent up. This plus the check for old data 13537 * (gap >= 0) handles the wraparound of the sequence 13538 * number space without having to always track the 13539 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks 13540 * this max in its rcv_up variable). 13541 * 13542 * This prevents duplicate SIGURGS due to a "late" 13543 * zero-window probe when the T_EXDATA_IND has already 13544 * been sent up. 13545 */ 13546 if ((flags & TH_URG) && 13547 (!tcp->tcp_urp_last_valid || SEQ_GT(urp + seg_seq, 13548 tcp->tcp_urp_last))) { 13549 mp1 = allocb(0, BPRI_MED); 13550 if (mp1 == NULL) { 13551 freemsg(mp); 13552 return; 13553 } 13554 if (!TCP_IS_DETACHED(tcp) && 13555 !putnextctl1(tcp->tcp_rq, M_PCSIG, 13556 SIGURG)) { 13557 /* Try again on the rexmit. */ 13558 freemsg(mp1); 13559 freemsg(mp); 13560 return; 13561 } 13562 /* 13563 * If the next byte would be the mark 13564 * then mark with MARKNEXT else mark 13565 * with NOTMARKNEXT. 13566 */ 13567 if (gap == 0 && urp == 0) 13568 mp1->b_flag |= MSGMARKNEXT; 13569 else 13570 mp1->b_flag |= MSGNOTMARKNEXT; 13571 freemsg(tcp->tcp_urp_mark_mp); 13572 tcp->tcp_urp_mark_mp = mp1; 13573 flags |= TH_SEND_URP_MARK; 13574 tcp->tcp_urp_last_valid = B_TRUE; 13575 tcp->tcp_urp_last = urp + seg_seq; 13576 } 13577 /* 13578 * If this is a zero window probe, continue to 13579 * process the ACK part. But we need to set seg_len 13580 * to 0 to avoid data processing. Otherwise just 13581 * drop the segment and send back an ACK. 13582 */ 13583 if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) { 13584 flags &= ~(TH_SYN | TH_URG); 13585 seg_len = 0; 13586 goto process_ack; 13587 } else { 13588 freemsg(mp); 13589 goto ack_check; 13590 } 13591 } 13592 /* Pitch out of window stuff off the end. */ 13593 rgap = seg_len; 13594 mp2 = mp; 13595 do { 13596 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 13597 (uintptr_t)INT_MAX); 13598 rgap -= (int)(mp2->b_wptr - mp2->b_rptr); 13599 if (rgap < 0) { 13600 mp2->b_wptr += rgap; 13601 if ((mp1 = mp2->b_cont) != NULL) { 13602 mp2->b_cont = NULL; 13603 freemsg(mp1); 13604 } 13605 break; 13606 } 13607 } while ((mp2 = mp2->b_cont) != NULL); 13608 } 13609 ok:; 13610 /* 13611 * TCP should check ECN info for segments inside the window only. 13612 * Therefore the check should be done here. 13613 */ 13614 if (tcp->tcp_ecn_ok) { 13615 if (flags & TH_CWR) { 13616 tcp->tcp_ecn_echo_on = B_FALSE; 13617 } 13618 /* 13619 * Note that both ECN_CE and CWR can be set in the 13620 * same segment. In this case, we once again turn 13621 * on ECN_ECHO. 13622 */ 13623 if (tcp->tcp_ipversion == IPV4_VERSION) { 13624 uchar_t tos = ((ipha_t *)rptr)->ipha_type_of_service; 13625 13626 if ((tos & IPH_ECN_CE) == IPH_ECN_CE) { 13627 tcp->tcp_ecn_echo_on = B_TRUE; 13628 } 13629 } else { 13630 uint32_t vcf = ((ip6_t *)rptr)->ip6_vcf; 13631 13632 if ((vcf & htonl(IPH_ECN_CE << 20)) == 13633 htonl(IPH_ECN_CE << 20)) { 13634 tcp->tcp_ecn_echo_on = B_TRUE; 13635 } 13636 } 13637 } 13638 13639 /* 13640 * Check whether we can update tcp_ts_recent. This test is 13641 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 13642 * Extensions for High Performance: An Update", Internet Draft. 13643 */ 13644 if (tcp->tcp_snd_ts_ok && 13645 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 13646 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 13647 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 13648 tcp->tcp_last_rcv_lbolt = lbolt64; 13649 } 13650 13651 if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) { 13652 /* 13653 * FIN in an out of order segment. We record this in 13654 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq. 13655 * Clear the FIN so that any check on FIN flag will fail. 13656 * Remember that FIN also counts in the sequence number 13657 * space. So we need to ack out of order FIN only segments. 13658 */ 13659 if (flags & TH_FIN) { 13660 tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID; 13661 tcp->tcp_ofo_fin_seq = seg_seq + seg_len; 13662 flags &= ~TH_FIN; 13663 flags |= TH_ACK_NEEDED; 13664 } 13665 if (seg_len > 0) { 13666 /* Fill in the SACK blk list. */ 13667 if (tcp->tcp_snd_sack_ok) { 13668 ASSERT(tcp->tcp_sack_info != NULL); 13669 tcp_sack_insert(tcp->tcp_sack_list, 13670 seg_seq, seg_seq + seg_len, 13671 &(tcp->tcp_num_sack_blk)); 13672 } 13673 13674 /* 13675 * Attempt reassembly and see if we have something 13676 * ready to go. 13677 */ 13678 mp = tcp_reass(tcp, mp, seg_seq); 13679 /* Always ack out of order packets */ 13680 flags |= TH_ACK_NEEDED | TH_PUSH; 13681 if (mp) { 13682 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 13683 (uintptr_t)INT_MAX); 13684 seg_len = mp->b_cont ? msgdsize(mp) : 13685 (int)(mp->b_wptr - mp->b_rptr); 13686 seg_seq = tcp->tcp_rnxt; 13687 /* 13688 * A gap is filled and the seq num and len 13689 * of the gap match that of a previously 13690 * received FIN, put the FIN flag back in. 13691 */ 13692 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 13693 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 13694 flags |= TH_FIN; 13695 tcp->tcp_valid_bits &= 13696 ~TCP_OFO_FIN_VALID; 13697 } 13698 } else { 13699 /* 13700 * Keep going even with NULL mp. 13701 * There may be a useful ACK or something else 13702 * we don't want to miss. 13703 * 13704 * But TCP should not perform fast retransmit 13705 * because of the ack number. TCP uses 13706 * seg_len == 0 to determine if it is a pure 13707 * ACK. And this is not a pure ACK. 13708 */ 13709 seg_len = 0; 13710 ofo_seg = B_TRUE; 13711 } 13712 } 13713 } else if (seg_len > 0) { 13714 BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs); 13715 UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, seg_len); 13716 /* 13717 * If an out of order FIN was received before, and the seq 13718 * num and len of the new segment match that of the FIN, 13719 * put the FIN flag back in. 13720 */ 13721 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 13722 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 13723 flags |= TH_FIN; 13724 tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID; 13725 } 13726 } 13727 if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) { 13728 if (flags & TH_RST) { 13729 freemsg(mp); 13730 switch (tcp->tcp_state) { 13731 case TCPS_SYN_RCVD: 13732 (void) tcp_clean_death(tcp, ECONNREFUSED, 14); 13733 break; 13734 case TCPS_ESTABLISHED: 13735 case TCPS_FIN_WAIT_1: 13736 case TCPS_FIN_WAIT_2: 13737 case TCPS_CLOSE_WAIT: 13738 (void) tcp_clean_death(tcp, ECONNRESET, 15); 13739 break; 13740 case TCPS_CLOSING: 13741 case TCPS_LAST_ACK: 13742 (void) tcp_clean_death(tcp, 0, 16); 13743 break; 13744 default: 13745 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 13746 (void) tcp_clean_death(tcp, ENXIO, 17); 13747 break; 13748 } 13749 return; 13750 } 13751 if (flags & TH_SYN) { 13752 /* 13753 * See RFC 793, Page 71 13754 * 13755 * The seq number must be in the window as it should 13756 * be "fixed" above. If it is outside window, it should 13757 * be already rejected. Note that we allow seg_seq to be 13758 * rnxt + rwnd because we want to accept 0 window probe. 13759 */ 13760 ASSERT(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) && 13761 SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd)); 13762 freemsg(mp); 13763 /* 13764 * If the ACK flag is not set, just use our snxt as the 13765 * seq number of the RST segment. 13766 */ 13767 if (!(flags & TH_ACK)) { 13768 seg_ack = tcp->tcp_snxt; 13769 } 13770 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 13771 TH_RST|TH_ACK); 13772 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 13773 (void) tcp_clean_death(tcp, ECONNRESET, 18); 13774 return; 13775 } 13776 /* 13777 * urp could be -1 when the urp field in the packet is 0 13778 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent 13779 * byte was at seg_seq - 1, in which case we ignore the urgent flag. 13780 */ 13781 if (flags & TH_URG && urp >= 0) { 13782 if (!tcp->tcp_urp_last_valid || 13783 SEQ_GT(urp + seg_seq, tcp->tcp_urp_last)) { 13784 /* 13785 * If we haven't generated the signal yet for this 13786 * urgent pointer value, do it now. Also, send up a 13787 * zero-length M_DATA indicating whether or not this is 13788 * the mark. The latter is not needed when a 13789 * T_EXDATA_IND is sent up. However, if there are 13790 * allocation failures this code relies on the sender 13791 * retransmitting and the socket code for determining 13792 * the mark should not block waiting for the peer to 13793 * transmit. Thus, for simplicity we always send up the 13794 * mark indication. 13795 */ 13796 mp1 = allocb(0, BPRI_MED); 13797 if (mp1 == NULL) { 13798 freemsg(mp); 13799 return; 13800 } 13801 if (!TCP_IS_DETACHED(tcp) && 13802 !putnextctl1(tcp->tcp_rq, M_PCSIG, SIGURG)) { 13803 /* Try again on the rexmit. */ 13804 freemsg(mp1); 13805 freemsg(mp); 13806 return; 13807 } 13808 /* 13809 * Mark with NOTMARKNEXT for now. 13810 * The code below will change this to MARKNEXT 13811 * if we are at the mark. 13812 * 13813 * If there are allocation failures (e.g. in dupmsg 13814 * below) the next time tcp_rput_data sees the urgent 13815 * segment it will send up the MSG*MARKNEXT message. 13816 */ 13817 mp1->b_flag |= MSGNOTMARKNEXT; 13818 freemsg(tcp->tcp_urp_mark_mp); 13819 tcp->tcp_urp_mark_mp = mp1; 13820 flags |= TH_SEND_URP_MARK; 13821 #ifdef DEBUG 13822 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13823 "tcp_rput: sent M_PCSIG 2 seq %x urp %x " 13824 "last %x, %s", 13825 seg_seq, urp, tcp->tcp_urp_last, 13826 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 13827 #endif /* DEBUG */ 13828 tcp->tcp_urp_last_valid = B_TRUE; 13829 tcp->tcp_urp_last = urp + seg_seq; 13830 } else if (tcp->tcp_urp_mark_mp != NULL) { 13831 /* 13832 * An allocation failure prevented the previous 13833 * tcp_rput_data from sending up the allocated 13834 * MSG*MARKNEXT message - send it up this time 13835 * around. 13836 */ 13837 flags |= TH_SEND_URP_MARK; 13838 } 13839 13840 /* 13841 * If the urgent byte is in this segment, make sure that it is 13842 * all by itself. This makes it much easier to deal with the 13843 * possibility of an allocation failure on the T_exdata_ind. 13844 * Note that seg_len is the number of bytes in the segment, and 13845 * urp is the offset into the segment of the urgent byte. 13846 * urp < seg_len means that the urgent byte is in this segment. 13847 */ 13848 if (urp < seg_len) { 13849 if (seg_len != 1) { 13850 uint32_t tmp_rnxt; 13851 /* 13852 * Break it up and feed it back in. 13853 * Re-attach the IP header. 13854 */ 13855 mp->b_rptr = iphdr; 13856 if (urp > 0) { 13857 /* 13858 * There is stuff before the urgent 13859 * byte. 13860 */ 13861 mp1 = dupmsg(mp); 13862 if (!mp1) { 13863 /* 13864 * Trim from urgent byte on. 13865 * The rest will come back. 13866 */ 13867 (void) adjmsg(mp, 13868 urp - seg_len); 13869 tcp_rput_data(connp, 13870 mp, NULL); 13871 return; 13872 } 13873 (void) adjmsg(mp1, urp - seg_len); 13874 /* Feed this piece back in. */ 13875 tmp_rnxt = tcp->tcp_rnxt; 13876 tcp_rput_data(connp, mp1, NULL); 13877 /* 13878 * If the data passed back in was not 13879 * processed (ie: bad ACK) sending 13880 * the remainder back in will cause a 13881 * loop. In this case, drop the 13882 * packet and let the sender try 13883 * sending a good packet. 13884 */ 13885 if (tmp_rnxt == tcp->tcp_rnxt) { 13886 freemsg(mp); 13887 return; 13888 } 13889 } 13890 if (urp != seg_len - 1) { 13891 uint32_t tmp_rnxt; 13892 /* 13893 * There is stuff after the urgent 13894 * byte. 13895 */ 13896 mp1 = dupmsg(mp); 13897 if (!mp1) { 13898 /* 13899 * Trim everything beyond the 13900 * urgent byte. The rest will 13901 * come back. 13902 */ 13903 (void) adjmsg(mp, 13904 urp + 1 - seg_len); 13905 tcp_rput_data(connp, 13906 mp, NULL); 13907 return; 13908 } 13909 (void) adjmsg(mp1, urp + 1 - seg_len); 13910 tmp_rnxt = tcp->tcp_rnxt; 13911 tcp_rput_data(connp, mp1, NULL); 13912 /* 13913 * If the data passed back in was not 13914 * processed (ie: bad ACK) sending 13915 * the remainder back in will cause a 13916 * loop. In this case, drop the 13917 * packet and let the sender try 13918 * sending a good packet. 13919 */ 13920 if (tmp_rnxt == tcp->tcp_rnxt) { 13921 freemsg(mp); 13922 return; 13923 } 13924 } 13925 tcp_rput_data(connp, mp, NULL); 13926 return; 13927 } 13928 /* 13929 * This segment contains only the urgent byte. We 13930 * have to allocate the T_exdata_ind, if we can. 13931 */ 13932 if (!tcp->tcp_urp_mp) { 13933 struct T_exdata_ind *tei; 13934 mp1 = allocb(sizeof (struct T_exdata_ind), 13935 BPRI_MED); 13936 if (!mp1) { 13937 /* 13938 * Sigh... It'll be back. 13939 * Generate any MSG*MARK message now. 13940 */ 13941 freemsg(mp); 13942 seg_len = 0; 13943 if (flags & TH_SEND_URP_MARK) { 13944 13945 13946 ASSERT(tcp->tcp_urp_mark_mp); 13947 tcp->tcp_urp_mark_mp->b_flag &= 13948 ~MSGNOTMARKNEXT; 13949 tcp->tcp_urp_mark_mp->b_flag |= 13950 MSGMARKNEXT; 13951 } 13952 goto ack_check; 13953 } 13954 mp1->b_datap->db_type = M_PROTO; 13955 tei = (struct T_exdata_ind *)mp1->b_rptr; 13956 tei->PRIM_type = T_EXDATA_IND; 13957 tei->MORE_flag = 0; 13958 mp1->b_wptr = (uchar_t *)&tei[1]; 13959 tcp->tcp_urp_mp = mp1; 13960 #ifdef DEBUG 13961 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13962 "tcp_rput: allocated exdata_ind %s", 13963 tcp_display(tcp, NULL, 13964 DISP_PORT_ONLY)); 13965 #endif /* DEBUG */ 13966 /* 13967 * There is no need to send a separate MSG*MARK 13968 * message since the T_EXDATA_IND will be sent 13969 * now. 13970 */ 13971 flags &= ~TH_SEND_URP_MARK; 13972 freemsg(tcp->tcp_urp_mark_mp); 13973 tcp->tcp_urp_mark_mp = NULL; 13974 } 13975 /* 13976 * Now we are all set. On the next putnext upstream, 13977 * tcp_urp_mp will be non-NULL and will get prepended 13978 * to what has to be this piece containing the urgent 13979 * byte. If for any reason we abort this segment below, 13980 * if it comes back, we will have this ready, or it 13981 * will get blown off in close. 13982 */ 13983 } else if (urp == seg_len) { 13984 /* 13985 * The urgent byte is the next byte after this sequence 13986 * number. If there is data it is marked with 13987 * MSGMARKNEXT and any tcp_urp_mark_mp is discarded 13988 * since it is not needed. Otherwise, if the code 13989 * above just allocated a zero-length tcp_urp_mark_mp 13990 * message, that message is tagged with MSGMARKNEXT. 13991 * Sending up these MSGMARKNEXT messages makes 13992 * SIOCATMARK work correctly even though 13993 * the T_EXDATA_IND will not be sent up until the 13994 * urgent byte arrives. 13995 */ 13996 if (seg_len != 0) { 13997 flags |= TH_MARKNEXT_NEEDED; 13998 freemsg(tcp->tcp_urp_mark_mp); 13999 tcp->tcp_urp_mark_mp = NULL; 14000 flags &= ~TH_SEND_URP_MARK; 14001 } else if (tcp->tcp_urp_mark_mp != NULL) { 14002 flags |= TH_SEND_URP_MARK; 14003 tcp->tcp_urp_mark_mp->b_flag &= 14004 ~MSGNOTMARKNEXT; 14005 tcp->tcp_urp_mark_mp->b_flag |= MSGMARKNEXT; 14006 } 14007 #ifdef DEBUG 14008 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 14009 "tcp_rput: AT MARK, len %d, flags 0x%x, %s", 14010 seg_len, flags, 14011 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 14012 #endif /* DEBUG */ 14013 } else { 14014 /* Data left until we hit mark */ 14015 #ifdef DEBUG 14016 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 14017 "tcp_rput: URP %d bytes left, %s", 14018 urp - seg_len, tcp_display(tcp, NULL, 14019 DISP_PORT_ONLY)); 14020 #endif /* DEBUG */ 14021 } 14022 } 14023 14024 process_ack: 14025 if (!(flags & TH_ACK)) { 14026 freemsg(mp); 14027 goto xmit_check; 14028 } 14029 } 14030 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 14031 14032 if (tcp->tcp_ipversion == IPV6_VERSION && bytes_acked > 0) 14033 tcp->tcp_ip_forward_progress = B_TRUE; 14034 if (tcp->tcp_state == TCPS_SYN_RCVD) { 14035 if ((tcp->tcp_conn.tcp_eager_conn_ind != NULL) && 14036 ((tcp->tcp_kssl_ent == NULL) || !tcp->tcp_kssl_pending)) { 14037 /* 3-way handshake complete - pass up the T_CONN_IND */ 14038 tcp_t *listener = tcp->tcp_listener; 14039 mblk_t *mp = tcp->tcp_conn.tcp_eager_conn_ind; 14040 14041 tcp->tcp_tconnind_started = B_TRUE; 14042 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 14043 /* 14044 * We are here means eager is fine but it can 14045 * get a TH_RST at any point between now and till 14046 * accept completes and disappear. We need to 14047 * ensure that reference to eager is valid after 14048 * we get out of eager's perimeter. So we do 14049 * an extra refhold. 14050 */ 14051 CONN_INC_REF(connp); 14052 14053 /* 14054 * The listener also exists because of the refhold 14055 * done in tcp_conn_request. Its possible that it 14056 * might have closed. We will check that once we 14057 * get inside listeners context. 14058 */ 14059 CONN_INC_REF(listener->tcp_connp); 14060 if (listener->tcp_connp->conn_sqp == 14061 connp->conn_sqp) { 14062 tcp_send_conn_ind(listener->tcp_connp, mp, 14063 listener->tcp_connp->conn_sqp); 14064 CONN_DEC_REF(listener->tcp_connp); 14065 } else if (!tcp->tcp_loopback) { 14066 squeue_fill(listener->tcp_connp->conn_sqp, mp, 14067 tcp_send_conn_ind, 14068 listener->tcp_connp, SQTAG_TCP_CONN_IND); 14069 } else { 14070 squeue_enter(listener->tcp_connp->conn_sqp, mp, 14071 tcp_send_conn_ind, listener->tcp_connp, 14072 SQTAG_TCP_CONN_IND); 14073 } 14074 } 14075 14076 if (tcp->tcp_active_open) { 14077 /* 14078 * We are seeing the final ack in the three way 14079 * hand shake of a active open'ed connection 14080 * so we must send up a T_CONN_CON 14081 */ 14082 if (!tcp_conn_con(tcp, iphdr, tcph, mp, NULL)) { 14083 freemsg(mp); 14084 return; 14085 } 14086 /* 14087 * Don't fuse the loopback endpoints for 14088 * simultaneous active opens. 14089 */ 14090 if (tcp->tcp_loopback) { 14091 TCP_STAT(tcps, tcp_fusion_unfusable); 14092 tcp->tcp_unfusable = B_TRUE; 14093 } 14094 } 14095 14096 tcp->tcp_suna = tcp->tcp_iss + 1; /* One for the SYN */ 14097 bytes_acked--; 14098 /* SYN was acked - making progress */ 14099 if (tcp->tcp_ipversion == IPV6_VERSION) 14100 tcp->tcp_ip_forward_progress = B_TRUE; 14101 14102 /* 14103 * If SYN was retransmitted, need to reset all 14104 * retransmission info as this segment will be 14105 * treated as a dup ACK. 14106 */ 14107 if (tcp->tcp_rexmit) { 14108 tcp->tcp_rexmit = B_FALSE; 14109 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 14110 tcp->tcp_rexmit_max = tcp->tcp_snxt; 14111 tcp->tcp_snd_burst = tcp->tcp_localnet ? 14112 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 14113 tcp->tcp_ms_we_have_waited = 0; 14114 tcp->tcp_cwnd = mss; 14115 } 14116 14117 /* 14118 * We set the send window to zero here. 14119 * This is needed if there is data to be 14120 * processed already on the queue. 14121 * Later (at swnd_update label), the 14122 * "new_swnd > tcp_swnd" condition is satisfied 14123 * the XMIT_NEEDED flag is set in the current 14124 * (SYN_RCVD) state. This ensures tcp_wput_data() is 14125 * called if there is already data on queue in 14126 * this state. 14127 */ 14128 tcp->tcp_swnd = 0; 14129 14130 if (new_swnd > tcp->tcp_max_swnd) 14131 tcp->tcp_max_swnd = new_swnd; 14132 tcp->tcp_swl1 = seg_seq; 14133 tcp->tcp_swl2 = seg_ack; 14134 tcp->tcp_state = TCPS_ESTABLISHED; 14135 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 14136 14137 /* Fuse when both sides are in ESTABLISHED state */ 14138 if (tcp->tcp_loopback && do_tcp_fusion) 14139 tcp_fuse(tcp, iphdr, tcph); 14140 14141 } 14142 /* This code follows 4.4BSD-Lite2 mostly. */ 14143 if (bytes_acked < 0) 14144 goto est; 14145 14146 /* 14147 * If TCP is ECN capable and the congestion experience bit is 14148 * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be 14149 * done once per window (or more loosely, per RTT). 14150 */ 14151 if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max)) 14152 tcp->tcp_cwr = B_FALSE; 14153 if (tcp->tcp_ecn_ok && (flags & TH_ECE)) { 14154 if (!tcp->tcp_cwr) { 14155 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / mss; 14156 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss; 14157 tcp->tcp_cwnd = npkt * mss; 14158 /* 14159 * If the cwnd is 0, use the timer to clock out 14160 * new segments. This is required by the ECN spec. 14161 */ 14162 if (npkt == 0) { 14163 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 14164 /* 14165 * This makes sure that when the ACK comes 14166 * back, we will increase tcp_cwnd by 1 MSS. 14167 */ 14168 tcp->tcp_cwnd_cnt = 0; 14169 } 14170 tcp->tcp_cwr = B_TRUE; 14171 /* 14172 * This marks the end of the current window of in 14173 * flight data. That is why we don't use 14174 * tcp_suna + tcp_swnd. Only data in flight can 14175 * provide ECN info. 14176 */ 14177 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 14178 tcp->tcp_ecn_cwr_sent = B_FALSE; 14179 } 14180 } 14181 14182 mp1 = tcp->tcp_xmit_head; 14183 if (bytes_acked == 0) { 14184 if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) { 14185 int dupack_cnt; 14186 14187 BUMP_MIB(&tcps->tcps_mib, tcpInDupAck); 14188 /* 14189 * Fast retransmit. When we have seen exactly three 14190 * identical ACKs while we have unacked data 14191 * outstanding we take it as a hint that our peer 14192 * dropped something. 14193 * 14194 * If TCP is retransmitting, don't do fast retransmit. 14195 */ 14196 if (mp1 && tcp->tcp_suna != tcp->tcp_snxt && 14197 ! tcp->tcp_rexmit) { 14198 /* Do Limited Transmit */ 14199 if ((dupack_cnt = ++tcp->tcp_dupack_cnt) < 14200 tcps->tcps_dupack_fast_retransmit) { 14201 /* 14202 * RFC 3042 14203 * 14204 * What we need to do is temporarily 14205 * increase tcp_cwnd so that new 14206 * data can be sent if it is allowed 14207 * by the receive window (tcp_rwnd). 14208 * tcp_wput_data() will take care of 14209 * the rest. 14210 * 14211 * If the connection is SACK capable, 14212 * only do limited xmit when there 14213 * is SACK info. 14214 * 14215 * Note how tcp_cwnd is incremented. 14216 * The first dup ACK will increase 14217 * it by 1 MSS. The second dup ACK 14218 * will increase it by 2 MSS. This 14219 * means that only 1 new segment will 14220 * be sent for each dup ACK. 14221 */ 14222 if (tcp->tcp_unsent > 0 && 14223 (!tcp->tcp_snd_sack_ok || 14224 (tcp->tcp_snd_sack_ok && 14225 tcp->tcp_notsack_list != NULL))) { 14226 tcp->tcp_cwnd += mss << 14227 (tcp->tcp_dupack_cnt - 1); 14228 flags |= TH_LIMIT_XMIT; 14229 } 14230 } else if (dupack_cnt == 14231 tcps->tcps_dupack_fast_retransmit) { 14232 14233 /* 14234 * If we have reduced tcp_ssthresh 14235 * because of ECN, do not reduce it again 14236 * unless it is already one window of data 14237 * away. After one window of data, tcp_cwr 14238 * should then be cleared. Note that 14239 * for non ECN capable connection, tcp_cwr 14240 * should always be false. 14241 * 14242 * Adjust cwnd since the duplicate 14243 * ack indicates that a packet was 14244 * dropped (due to congestion.) 14245 */ 14246 if (!tcp->tcp_cwr) { 14247 npkt = ((tcp->tcp_snxt - 14248 tcp->tcp_suna) >> 1) / mss; 14249 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 14250 mss; 14251 tcp->tcp_cwnd = (npkt + 14252 tcp->tcp_dupack_cnt) * mss; 14253 } 14254 if (tcp->tcp_ecn_ok) { 14255 tcp->tcp_cwr = B_TRUE; 14256 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 14257 tcp->tcp_ecn_cwr_sent = B_FALSE; 14258 } 14259 14260 /* 14261 * We do Hoe's algorithm. Refer to her 14262 * paper "Improving the Start-up Behavior 14263 * of a Congestion Control Scheme for TCP," 14264 * appeared in SIGCOMM'96. 14265 * 14266 * Save highest seq no we have sent so far. 14267 * Be careful about the invisible FIN byte. 14268 */ 14269 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 14270 (tcp->tcp_unsent == 0)) { 14271 tcp->tcp_rexmit_max = tcp->tcp_fss; 14272 } else { 14273 tcp->tcp_rexmit_max = tcp->tcp_snxt; 14274 } 14275 14276 /* 14277 * Do not allow bursty traffic during. 14278 * fast recovery. Refer to Fall and Floyd's 14279 * paper "Simulation-based Comparisons of 14280 * Tahoe, Reno and SACK TCP" (in CCR?) 14281 * This is a best current practise. 14282 */ 14283 tcp->tcp_snd_burst = TCP_CWND_SS; 14284 14285 /* 14286 * For SACK: 14287 * Calculate tcp_pipe, which is the 14288 * estimated number of bytes in 14289 * network. 14290 * 14291 * tcp_fack is the highest sack'ed seq num 14292 * TCP has received. 14293 * 14294 * tcp_pipe is explained in the above quoted 14295 * Fall and Floyd's paper. tcp_fack is 14296 * explained in Mathis and Mahdavi's 14297 * "Forward Acknowledgment: Refining TCP 14298 * Congestion Control" in SIGCOMM '96. 14299 */ 14300 if (tcp->tcp_snd_sack_ok) { 14301 ASSERT(tcp->tcp_sack_info != NULL); 14302 if (tcp->tcp_notsack_list != NULL) { 14303 tcp->tcp_pipe = tcp->tcp_snxt - 14304 tcp->tcp_fack; 14305 tcp->tcp_sack_snxt = seg_ack; 14306 flags |= TH_NEED_SACK_REXMIT; 14307 } else { 14308 /* 14309 * Always initialize tcp_pipe 14310 * even though we don't have 14311 * any SACK info. If later 14312 * we get SACK info and 14313 * tcp_pipe is not initialized, 14314 * funny things will happen. 14315 */ 14316 tcp->tcp_pipe = 14317 tcp->tcp_cwnd_ssthresh; 14318 } 14319 } else { 14320 flags |= TH_REXMIT_NEEDED; 14321 } /* tcp_snd_sack_ok */ 14322 14323 } else { 14324 /* 14325 * Here we perform congestion 14326 * avoidance, but NOT slow start. 14327 * This is known as the Fast 14328 * Recovery Algorithm. 14329 */ 14330 if (tcp->tcp_snd_sack_ok && 14331 tcp->tcp_notsack_list != NULL) { 14332 flags |= TH_NEED_SACK_REXMIT; 14333 tcp->tcp_pipe -= mss; 14334 if (tcp->tcp_pipe < 0) 14335 tcp->tcp_pipe = 0; 14336 } else { 14337 /* 14338 * We know that one more packet has 14339 * left the pipe thus we can update 14340 * cwnd. 14341 */ 14342 cwnd = tcp->tcp_cwnd + mss; 14343 if (cwnd > tcp->tcp_cwnd_max) 14344 cwnd = tcp->tcp_cwnd_max; 14345 tcp->tcp_cwnd = cwnd; 14346 if (tcp->tcp_unsent > 0) 14347 flags |= TH_XMIT_NEEDED; 14348 } 14349 } 14350 } 14351 } else if (tcp->tcp_zero_win_probe) { 14352 /* 14353 * If the window has opened, need to arrange 14354 * to send additional data. 14355 */ 14356 if (new_swnd != 0) { 14357 /* tcp_suna != tcp_snxt */ 14358 /* Packet contains a window update */ 14359 BUMP_MIB(&tcps->tcps_mib, tcpInWinUpdate); 14360 tcp->tcp_zero_win_probe = 0; 14361 tcp->tcp_timer_backoff = 0; 14362 tcp->tcp_ms_we_have_waited = 0; 14363 14364 /* 14365 * Transmit starting with tcp_suna since 14366 * the one byte probe is not ack'ed. 14367 * If TCP has sent more than one identical 14368 * probe, tcp_rexmit will be set. That means 14369 * tcp_ss_rexmit() will send out the one 14370 * byte along with new data. Otherwise, 14371 * fake the retransmission. 14372 */ 14373 flags |= TH_XMIT_NEEDED; 14374 if (!tcp->tcp_rexmit) { 14375 tcp->tcp_rexmit = B_TRUE; 14376 tcp->tcp_dupack_cnt = 0; 14377 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 14378 tcp->tcp_rexmit_max = tcp->tcp_suna + 1; 14379 } 14380 } 14381 } 14382 goto swnd_update; 14383 } 14384 14385 /* 14386 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73. 14387 * If the ACK value acks something that we have not yet sent, it might 14388 * be an old duplicate segment. Send an ACK to re-synchronize the 14389 * other side. 14390 * Note: reset in response to unacceptable ACK in SYN_RECEIVE 14391 * state is handled above, so we can always just drop the segment and 14392 * send an ACK here. 14393 * 14394 * Should we send ACKs in response to ACK only segments? 14395 */ 14396 if (SEQ_GT(seg_ack, tcp->tcp_snxt)) { 14397 BUMP_MIB(&tcps->tcps_mib, tcpInAckUnsent); 14398 /* drop the received segment */ 14399 freemsg(mp); 14400 14401 /* 14402 * Send back an ACK. If tcp_drop_ack_unsent_cnt is 14403 * greater than 0, check if the number of such 14404 * bogus ACks is greater than that count. If yes, 14405 * don't send back any ACK. This prevents TCP from 14406 * getting into an ACK storm if somehow an attacker 14407 * successfully spoofs an acceptable segment to our 14408 * peer. 14409 */ 14410 if (tcp_drop_ack_unsent_cnt > 0 && 14411 ++tcp->tcp_in_ack_unsent > tcp_drop_ack_unsent_cnt) { 14412 TCP_STAT(tcps, tcp_in_ack_unsent_drop); 14413 return; 14414 } 14415 mp = tcp_ack_mp(tcp); 14416 if (mp != NULL) { 14417 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 14418 BUMP_LOCAL(tcp->tcp_obsegs); 14419 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 14420 tcp_send_data(tcp, tcp->tcp_wq, mp); 14421 } 14422 return; 14423 } 14424 14425 /* 14426 * TCP gets a new ACK, update the notsack'ed list to delete those 14427 * blocks that are covered by this ACK. 14428 */ 14429 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 14430 tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack, 14431 &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list)); 14432 } 14433 14434 /* 14435 * If we got an ACK after fast retransmit, check to see 14436 * if it is a partial ACK. If it is not and the congestion 14437 * window was inflated to account for the other side's 14438 * cached packets, retract it. If it is, do Hoe's algorithm. 14439 */ 14440 if (tcp->tcp_dupack_cnt >= tcps->tcps_dupack_fast_retransmit) { 14441 ASSERT(tcp->tcp_rexmit == B_FALSE); 14442 if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) { 14443 tcp->tcp_dupack_cnt = 0; 14444 /* 14445 * Restore the orig tcp_cwnd_ssthresh after 14446 * fast retransmit phase. 14447 */ 14448 if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) { 14449 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh; 14450 } 14451 tcp->tcp_rexmit_max = seg_ack; 14452 tcp->tcp_cwnd_cnt = 0; 14453 tcp->tcp_snd_burst = tcp->tcp_localnet ? 14454 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 14455 14456 /* 14457 * Remove all notsack info to avoid confusion with 14458 * the next fast retrasnmit/recovery phase. 14459 */ 14460 if (tcp->tcp_snd_sack_ok && 14461 tcp->tcp_notsack_list != NULL) { 14462 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 14463 } 14464 } else { 14465 if (tcp->tcp_snd_sack_ok && 14466 tcp->tcp_notsack_list != NULL) { 14467 flags |= TH_NEED_SACK_REXMIT; 14468 tcp->tcp_pipe -= mss; 14469 if (tcp->tcp_pipe < 0) 14470 tcp->tcp_pipe = 0; 14471 } else { 14472 /* 14473 * Hoe's algorithm: 14474 * 14475 * Retransmit the unack'ed segment and 14476 * restart fast recovery. Note that we 14477 * need to scale back tcp_cwnd to the 14478 * original value when we started fast 14479 * recovery. This is to prevent overly 14480 * aggressive behaviour in sending new 14481 * segments. 14482 */ 14483 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh + 14484 tcps->tcps_dupack_fast_retransmit * mss; 14485 tcp->tcp_cwnd_cnt = tcp->tcp_cwnd; 14486 flags |= TH_REXMIT_NEEDED; 14487 } 14488 } 14489 } else { 14490 tcp->tcp_dupack_cnt = 0; 14491 if (tcp->tcp_rexmit) { 14492 /* 14493 * TCP is retranmitting. If the ACK ack's all 14494 * outstanding data, update tcp_rexmit_max and 14495 * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt 14496 * to the correct value. 14497 * 14498 * Note that SEQ_LEQ() is used. This is to avoid 14499 * unnecessary fast retransmit caused by dup ACKs 14500 * received when TCP does slow start retransmission 14501 * after a time out. During this phase, TCP may 14502 * send out segments which are already received. 14503 * This causes dup ACKs to be sent back. 14504 */ 14505 if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) { 14506 if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) { 14507 tcp->tcp_rexmit_nxt = seg_ack; 14508 } 14509 if (seg_ack != tcp->tcp_rexmit_max) { 14510 flags |= TH_XMIT_NEEDED; 14511 } 14512 } else { 14513 tcp->tcp_rexmit = B_FALSE; 14514 tcp->tcp_xmit_zc_clean = B_FALSE; 14515 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 14516 tcp->tcp_snd_burst = tcp->tcp_localnet ? 14517 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 14518 } 14519 tcp->tcp_ms_we_have_waited = 0; 14520 } 14521 } 14522 14523 BUMP_MIB(&tcps->tcps_mib, tcpInAckSegs); 14524 UPDATE_MIB(&tcps->tcps_mib, tcpInAckBytes, bytes_acked); 14525 tcp->tcp_suna = seg_ack; 14526 if (tcp->tcp_zero_win_probe != 0) { 14527 tcp->tcp_zero_win_probe = 0; 14528 tcp->tcp_timer_backoff = 0; 14529 } 14530 14531 /* 14532 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed. 14533 * Note that it cannot be the SYN being ack'ed. The code flow 14534 * will not reach here. 14535 */ 14536 if (mp1 == NULL) { 14537 goto fin_acked; 14538 } 14539 14540 /* 14541 * Update the congestion window. 14542 * 14543 * If TCP is not ECN capable or TCP is ECN capable but the 14544 * congestion experience bit is not set, increase the tcp_cwnd as 14545 * usual. 14546 */ 14547 if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) { 14548 cwnd = tcp->tcp_cwnd; 14549 add = mss; 14550 14551 if (cwnd >= tcp->tcp_cwnd_ssthresh) { 14552 /* 14553 * This is to prevent an increase of less than 1 MSS of 14554 * tcp_cwnd. With partial increase, tcp_wput_data() 14555 * may send out tinygrams in order to preserve mblk 14556 * boundaries. 14557 * 14558 * By initializing tcp_cwnd_cnt to new tcp_cwnd and 14559 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is 14560 * increased by 1 MSS for every RTTs. 14561 */ 14562 if (tcp->tcp_cwnd_cnt <= 0) { 14563 tcp->tcp_cwnd_cnt = cwnd + add; 14564 } else { 14565 tcp->tcp_cwnd_cnt -= add; 14566 add = 0; 14567 } 14568 } 14569 tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max); 14570 } 14571 14572 /* See if the latest urgent data has been acknowledged */ 14573 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && 14574 SEQ_GT(seg_ack, tcp->tcp_urg)) 14575 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 14576 14577 /* Can we update the RTT estimates? */ 14578 if (tcp->tcp_snd_ts_ok) { 14579 /* Ignore zero timestamp echo-reply. */ 14580 if (tcpopt.tcp_opt_ts_ecr != 0) { 14581 tcp_set_rto(tcp, (int32_t)lbolt - 14582 (int32_t)tcpopt.tcp_opt_ts_ecr); 14583 } 14584 14585 /* If needed, restart the timer. */ 14586 if (tcp->tcp_set_timer == 1) { 14587 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 14588 tcp->tcp_set_timer = 0; 14589 } 14590 /* 14591 * Update tcp_csuna in case the other side stops sending 14592 * us timestamps. 14593 */ 14594 tcp->tcp_csuna = tcp->tcp_snxt; 14595 } else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) { 14596 /* 14597 * An ACK sequence we haven't seen before, so get the RTT 14598 * and update the RTO. But first check if the timestamp is 14599 * valid to use. 14600 */ 14601 if ((mp1->b_next != NULL) && 14602 SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next))) 14603 tcp_set_rto(tcp, (int32_t)lbolt - 14604 (int32_t)(intptr_t)mp1->b_prev); 14605 else 14606 BUMP_MIB(&tcps->tcps_mib, tcpRttNoUpdate); 14607 14608 /* Remeber the last sequence to be ACKed */ 14609 tcp->tcp_csuna = seg_ack; 14610 if (tcp->tcp_set_timer == 1) { 14611 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 14612 tcp->tcp_set_timer = 0; 14613 } 14614 } else { 14615 BUMP_MIB(&tcps->tcps_mib, tcpRttNoUpdate); 14616 } 14617 14618 /* Eat acknowledged bytes off the xmit queue. */ 14619 for (;;) { 14620 mblk_t *mp2; 14621 uchar_t *wptr; 14622 14623 wptr = mp1->b_wptr; 14624 ASSERT((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX); 14625 bytes_acked -= (int)(wptr - mp1->b_rptr); 14626 if (bytes_acked < 0) { 14627 mp1->b_rptr = wptr + bytes_acked; 14628 /* 14629 * Set a new timestamp if all the bytes timed by the 14630 * old timestamp have been ack'ed. 14631 */ 14632 if (SEQ_GT(seg_ack, 14633 (uint32_t)(uintptr_t)(mp1->b_next))) { 14634 mp1->b_prev = (mblk_t *)(uintptr_t)lbolt; 14635 mp1->b_next = NULL; 14636 } 14637 break; 14638 } 14639 mp1->b_next = NULL; 14640 mp1->b_prev = NULL; 14641 mp2 = mp1; 14642 mp1 = mp1->b_cont; 14643 14644 /* 14645 * This notification is required for some zero-copy 14646 * clients to maintain a copy semantic. After the data 14647 * is ack'ed, client is safe to modify or reuse the buffer. 14648 */ 14649 if (tcp->tcp_snd_zcopy_aware && 14650 (mp2->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 14651 tcp_zcopy_notify(tcp); 14652 freeb(mp2); 14653 if (bytes_acked == 0) { 14654 if (mp1 == NULL) { 14655 /* Everything is ack'ed, clear the tail. */ 14656 tcp->tcp_xmit_tail = NULL; 14657 /* 14658 * Cancel the timer unless we are still 14659 * waiting for an ACK for the FIN packet. 14660 */ 14661 if (tcp->tcp_timer_tid != 0 && 14662 tcp->tcp_snxt == tcp->tcp_suna) { 14663 (void) TCP_TIMER_CANCEL(tcp, 14664 tcp->tcp_timer_tid); 14665 tcp->tcp_timer_tid = 0; 14666 } 14667 goto pre_swnd_update; 14668 } 14669 if (mp2 != tcp->tcp_xmit_tail) 14670 break; 14671 tcp->tcp_xmit_tail = mp1; 14672 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 14673 (uintptr_t)INT_MAX); 14674 tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr - 14675 mp1->b_rptr); 14676 break; 14677 } 14678 if (mp1 == NULL) { 14679 /* 14680 * More was acked but there is nothing more 14681 * outstanding. This means that the FIN was 14682 * just acked or that we're talking to a clown. 14683 */ 14684 fin_acked: 14685 ASSERT(tcp->tcp_fin_sent); 14686 tcp->tcp_xmit_tail = NULL; 14687 if (tcp->tcp_fin_sent) { 14688 /* FIN was acked - making progress */ 14689 if (tcp->tcp_ipversion == IPV6_VERSION && 14690 !tcp->tcp_fin_acked) 14691 tcp->tcp_ip_forward_progress = B_TRUE; 14692 tcp->tcp_fin_acked = B_TRUE; 14693 if (tcp->tcp_linger_tid != 0 && 14694 TCP_TIMER_CANCEL(tcp, 14695 tcp->tcp_linger_tid) >= 0) { 14696 tcp_stop_lingering(tcp); 14697 freemsg(mp); 14698 mp = NULL; 14699 } 14700 } else { 14701 /* 14702 * We should never get here because 14703 * we have already checked that the 14704 * number of bytes ack'ed should be 14705 * smaller than or equal to what we 14706 * have sent so far (it is the 14707 * acceptability check of the ACK). 14708 * We can only get here if the send 14709 * queue is corrupted. 14710 * 14711 * Terminate the connection and 14712 * panic the system. It is better 14713 * for us to panic instead of 14714 * continuing to avoid other disaster. 14715 */ 14716 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 14717 tcp->tcp_rnxt, TH_RST|TH_ACK); 14718 panic("Memory corruption " 14719 "detected for connection %s.", 14720 tcp_display(tcp, NULL, 14721 DISP_ADDR_AND_PORT)); 14722 /*NOTREACHED*/ 14723 } 14724 goto pre_swnd_update; 14725 } 14726 ASSERT(mp2 != tcp->tcp_xmit_tail); 14727 } 14728 if (tcp->tcp_unsent) { 14729 flags |= TH_XMIT_NEEDED; 14730 } 14731 pre_swnd_update: 14732 tcp->tcp_xmit_head = mp1; 14733 swnd_update: 14734 /* 14735 * The following check is different from most other implementations. 14736 * For bi-directional transfer, when segments are dropped, the 14737 * "normal" check will not accept a window update in those 14738 * retransmitted segemnts. Failing to do that, TCP may send out 14739 * segments which are outside receiver's window. As TCP accepts 14740 * the ack in those retransmitted segments, if the window update in 14741 * the same segment is not accepted, TCP will incorrectly calculates 14742 * that it can send more segments. This can create a deadlock 14743 * with the receiver if its window becomes zero. 14744 */ 14745 if (SEQ_LT(tcp->tcp_swl2, seg_ack) || 14746 SEQ_LT(tcp->tcp_swl1, seg_seq) || 14747 (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) { 14748 /* 14749 * The criteria for update is: 14750 * 14751 * 1. the segment acknowledges some data. Or 14752 * 2. the segment is new, i.e. it has a higher seq num. Or 14753 * 3. the segment is not old and the advertised window is 14754 * larger than the previous advertised window. 14755 */ 14756 if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd) 14757 flags |= TH_XMIT_NEEDED; 14758 tcp->tcp_swnd = new_swnd; 14759 if (new_swnd > tcp->tcp_max_swnd) 14760 tcp->tcp_max_swnd = new_swnd; 14761 tcp->tcp_swl1 = seg_seq; 14762 tcp->tcp_swl2 = seg_ack; 14763 } 14764 est: 14765 if (tcp->tcp_state > TCPS_ESTABLISHED) { 14766 14767 switch (tcp->tcp_state) { 14768 case TCPS_FIN_WAIT_1: 14769 if (tcp->tcp_fin_acked) { 14770 tcp->tcp_state = TCPS_FIN_WAIT_2; 14771 /* 14772 * We implement the non-standard BSD/SunOS 14773 * FIN_WAIT_2 flushing algorithm. 14774 * If there is no user attached to this 14775 * TCP endpoint, then this TCP struct 14776 * could hang around forever in FIN_WAIT_2 14777 * state if the peer forgets to send us 14778 * a FIN. To prevent this, we wait only 14779 * 2*MSL (a convenient time value) for 14780 * the FIN to arrive. If it doesn't show up, 14781 * we flush the TCP endpoint. This algorithm, 14782 * though a violation of RFC-793, has worked 14783 * for over 10 years in BSD systems. 14784 * Note: SunOS 4.x waits 675 seconds before 14785 * flushing the FIN_WAIT_2 connection. 14786 */ 14787 TCP_TIMER_RESTART(tcp, 14788 tcps->tcps_fin_wait_2_flush_interval); 14789 } 14790 break; 14791 case TCPS_FIN_WAIT_2: 14792 break; /* Shutdown hook? */ 14793 case TCPS_LAST_ACK: 14794 freemsg(mp); 14795 if (tcp->tcp_fin_acked) { 14796 (void) tcp_clean_death(tcp, 0, 19); 14797 return; 14798 } 14799 goto xmit_check; 14800 case TCPS_CLOSING: 14801 if (tcp->tcp_fin_acked) { 14802 tcp->tcp_state = TCPS_TIME_WAIT; 14803 /* 14804 * Unconditionally clear the exclusive binding 14805 * bit so this TIME-WAIT connection won't 14806 * interfere with new ones. 14807 */ 14808 tcp->tcp_exclbind = 0; 14809 if (!TCP_IS_DETACHED(tcp)) { 14810 TCP_TIMER_RESTART(tcp, 14811 tcps->tcps_time_wait_interval); 14812 } else { 14813 tcp_time_wait_append(tcp); 14814 TCP_DBGSTAT(tcps, tcp_rput_time_wait); 14815 } 14816 } 14817 /*FALLTHRU*/ 14818 case TCPS_CLOSE_WAIT: 14819 freemsg(mp); 14820 goto xmit_check; 14821 default: 14822 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 14823 break; 14824 } 14825 } 14826 if (flags & TH_FIN) { 14827 /* Make sure we ack the fin */ 14828 flags |= TH_ACK_NEEDED; 14829 if (!tcp->tcp_fin_rcvd) { 14830 tcp->tcp_fin_rcvd = B_TRUE; 14831 tcp->tcp_rnxt++; 14832 tcph = tcp->tcp_tcph; 14833 U32_TO_ABE32(tcp->tcp_rnxt, tcph->th_ack); 14834 14835 /* 14836 * Generate the ordrel_ind at the end unless we 14837 * are an eager guy. 14838 * In the eager case tcp_rsrv will do this when run 14839 * after tcp_accept is done. 14840 */ 14841 if (tcp->tcp_listener == NULL && 14842 !TCP_IS_DETACHED(tcp) && (!tcp->tcp_hard_binding)) 14843 flags |= TH_ORDREL_NEEDED; 14844 switch (tcp->tcp_state) { 14845 case TCPS_SYN_RCVD: 14846 case TCPS_ESTABLISHED: 14847 tcp->tcp_state = TCPS_CLOSE_WAIT; 14848 /* Keepalive? */ 14849 break; 14850 case TCPS_FIN_WAIT_1: 14851 if (!tcp->tcp_fin_acked) { 14852 tcp->tcp_state = TCPS_CLOSING; 14853 break; 14854 } 14855 /* FALLTHRU */ 14856 case TCPS_FIN_WAIT_2: 14857 tcp->tcp_state = TCPS_TIME_WAIT; 14858 /* 14859 * Unconditionally clear the exclusive binding 14860 * bit so this TIME-WAIT connection won't 14861 * interfere with new ones. 14862 */ 14863 tcp->tcp_exclbind = 0; 14864 if (!TCP_IS_DETACHED(tcp)) { 14865 TCP_TIMER_RESTART(tcp, 14866 tcps->tcps_time_wait_interval); 14867 } else { 14868 tcp_time_wait_append(tcp); 14869 TCP_DBGSTAT(tcps, tcp_rput_time_wait); 14870 } 14871 if (seg_len) { 14872 /* 14873 * implies data piggybacked on FIN. 14874 * break to handle data. 14875 */ 14876 break; 14877 } 14878 freemsg(mp); 14879 goto ack_check; 14880 } 14881 } 14882 } 14883 if (mp == NULL) 14884 goto xmit_check; 14885 if (seg_len == 0) { 14886 freemsg(mp); 14887 goto xmit_check; 14888 } 14889 if (mp->b_rptr == mp->b_wptr) { 14890 /* 14891 * The header has been consumed, so we remove the 14892 * zero-length mblk here. 14893 */ 14894 mp1 = mp; 14895 mp = mp->b_cont; 14896 freeb(mp1); 14897 } 14898 tcph = tcp->tcp_tcph; 14899 tcp->tcp_rack_cnt++; 14900 { 14901 uint32_t cur_max; 14902 14903 cur_max = tcp->tcp_rack_cur_max; 14904 if (tcp->tcp_rack_cnt >= cur_max) { 14905 /* 14906 * We have more unacked data than we should - send 14907 * an ACK now. 14908 */ 14909 flags |= TH_ACK_NEEDED; 14910 cur_max++; 14911 if (cur_max > tcp->tcp_rack_abs_max) 14912 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 14913 else 14914 tcp->tcp_rack_cur_max = cur_max; 14915 } else if (TCP_IS_DETACHED(tcp)) { 14916 /* We don't have an ACK timer for detached TCP. */ 14917 flags |= TH_ACK_NEEDED; 14918 } else if (seg_len < mss) { 14919 /* 14920 * If we get a segment that is less than an mss, and we 14921 * already have unacknowledged data, and the amount 14922 * unacknowledged is not a multiple of mss, then we 14923 * better generate an ACK now. Otherwise, this may be 14924 * the tail piece of a transaction, and we would rather 14925 * wait for the response. 14926 */ 14927 uint32_t udif; 14928 ASSERT((uintptr_t)(tcp->tcp_rnxt - tcp->tcp_rack) <= 14929 (uintptr_t)INT_MAX); 14930 udif = (int)(tcp->tcp_rnxt - tcp->tcp_rack); 14931 if (udif && (udif % mss)) 14932 flags |= TH_ACK_NEEDED; 14933 else 14934 flags |= TH_ACK_TIMER_NEEDED; 14935 } else { 14936 /* Start delayed ack timer */ 14937 flags |= TH_ACK_TIMER_NEEDED; 14938 } 14939 } 14940 tcp->tcp_rnxt += seg_len; 14941 U32_TO_ABE32(tcp->tcp_rnxt, tcph->th_ack); 14942 14943 /* Update SACK list */ 14944 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 14945 tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt, 14946 &(tcp->tcp_num_sack_blk)); 14947 } 14948 14949 if (tcp->tcp_urp_mp) { 14950 tcp->tcp_urp_mp->b_cont = mp; 14951 mp = tcp->tcp_urp_mp; 14952 tcp->tcp_urp_mp = NULL; 14953 /* Ready for a new signal. */ 14954 tcp->tcp_urp_last_valid = B_FALSE; 14955 #ifdef DEBUG 14956 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 14957 "tcp_rput: sending exdata_ind %s", 14958 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 14959 #endif /* DEBUG */ 14960 } 14961 14962 /* 14963 * Check for ancillary data changes compared to last segment. 14964 */ 14965 if (tcp->tcp_ipv6_recvancillary != 0) { 14966 mp = tcp_rput_add_ancillary(tcp, mp, &ipp); 14967 if (mp == NULL) 14968 return; 14969 } 14970 14971 if (tcp->tcp_listener || tcp->tcp_hard_binding) { 14972 /* 14973 * Side queue inbound data until the accept happens. 14974 * tcp_accept/tcp_rput drains this when the accept happens. 14975 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or 14976 * T_EXDATA_IND) it is queued on b_next. 14977 * XXX Make urgent data use this. Requires: 14978 * Removing tcp_listener check for TH_URG 14979 * Making M_PCPROTO and MARK messages skip the eager case 14980 */ 14981 14982 if (tcp->tcp_kssl_pending) { 14983 DTRACE_PROBE1(kssl_mblk__ksslinput_pending, 14984 mblk_t *, mp); 14985 tcp_kssl_input(tcp, mp); 14986 } else { 14987 tcp_rcv_enqueue(tcp, mp, seg_len); 14988 } 14989 } else { 14990 if (mp->b_datap->db_type != M_DATA || 14991 (flags & TH_MARKNEXT_NEEDED)) { 14992 if (tcp->tcp_rcv_list != NULL) { 14993 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 14994 } 14995 ASSERT(tcp->tcp_rcv_list == NULL || 14996 tcp->tcp_fused_sigurg); 14997 if (flags & TH_MARKNEXT_NEEDED) { 14998 #ifdef DEBUG 14999 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 15000 "tcp_rput: sending MSGMARKNEXT %s", 15001 tcp_display(tcp, NULL, 15002 DISP_PORT_ONLY)); 15003 #endif /* DEBUG */ 15004 mp->b_flag |= MSGMARKNEXT; 15005 flags &= ~TH_MARKNEXT_NEEDED; 15006 } 15007 15008 /* Does this need SSL processing first? */ 15009 if ((tcp->tcp_kssl_ctx != NULL) && 15010 (DB_TYPE(mp) == M_DATA)) { 15011 DTRACE_PROBE1(kssl_mblk__ksslinput_data1, 15012 mblk_t *, mp); 15013 tcp_kssl_input(tcp, mp); 15014 } else { 15015 putnext(tcp->tcp_rq, mp); 15016 if (!canputnext(tcp->tcp_rq)) 15017 tcp->tcp_rwnd -= seg_len; 15018 } 15019 } else if ((flags & (TH_PUSH|TH_FIN)) || 15020 tcp->tcp_rcv_cnt + seg_len >= tcp->tcp_rq->q_hiwat >> 3) { 15021 if (tcp->tcp_rcv_list != NULL) { 15022 /* 15023 * Enqueue the new segment first and then 15024 * call tcp_rcv_drain() to send all data 15025 * up. The other way to do this is to 15026 * send all queued data up and then call 15027 * putnext() to send the new segment up. 15028 * This way can remove the else part later 15029 * on. 15030 * 15031 * We don't this to avoid one more call to 15032 * canputnext() as tcp_rcv_drain() needs to 15033 * call canputnext(). 15034 */ 15035 tcp_rcv_enqueue(tcp, mp, seg_len); 15036 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 15037 } else { 15038 /* Does this need SSL processing first? */ 15039 if ((tcp->tcp_kssl_ctx != NULL) && 15040 (DB_TYPE(mp) == M_DATA)) { 15041 DTRACE_PROBE1( 15042 kssl_mblk__ksslinput_data2, 15043 mblk_t *, mp); 15044 tcp_kssl_input(tcp, mp); 15045 } else { 15046 putnext(tcp->tcp_rq, mp); 15047 if (!canputnext(tcp->tcp_rq)) 15048 tcp->tcp_rwnd -= seg_len; 15049 } 15050 } 15051 } else { 15052 /* 15053 * Enqueue all packets when processing an mblk 15054 * from the co queue and also enqueue normal packets. 15055 * For packets which belong to SSL stream do SSL 15056 * processing first. 15057 */ 15058 if ((tcp->tcp_kssl_ctx != NULL) && 15059 (DB_TYPE(mp) == M_DATA)) { 15060 DTRACE_PROBE1(kssl_mblk__tcpksslin3, 15061 mblk_t *, mp); 15062 tcp_kssl_input(tcp, mp); 15063 } else { 15064 tcp_rcv_enqueue(tcp, mp, seg_len); 15065 } 15066 } 15067 /* 15068 * Make sure the timer is running if we have data waiting 15069 * for a push bit. This provides resiliency against 15070 * implementations that do not correctly generate push bits. 15071 */ 15072 if (tcp->tcp_rcv_list != NULL && tcp->tcp_push_tid == 0) { 15073 /* 15074 * The connection may be closed at this point, so don't 15075 * do anything for a detached tcp. 15076 */ 15077 if (!TCP_IS_DETACHED(tcp)) 15078 tcp->tcp_push_tid = TCP_TIMER(tcp, 15079 tcp_push_timer, 15080 MSEC_TO_TICK( 15081 tcps->tcps_push_timer_interval)); 15082 } 15083 } 15084 xmit_check: 15085 /* Is there anything left to do? */ 15086 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 15087 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED| 15088 TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_ACK_TIMER_NEEDED| 15089 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 15090 goto done; 15091 15092 /* Any transmit work to do and a non-zero window? */ 15093 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT| 15094 TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) { 15095 if (flags & TH_REXMIT_NEEDED) { 15096 uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna; 15097 15098 BUMP_MIB(&tcps->tcps_mib, tcpOutFastRetrans); 15099 if (snd_size > mss) 15100 snd_size = mss; 15101 if (snd_size > tcp->tcp_swnd) 15102 snd_size = tcp->tcp_swnd; 15103 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size, 15104 NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size, 15105 B_TRUE); 15106 15107 if (mp1 != NULL) { 15108 tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt; 15109 tcp->tcp_csuna = tcp->tcp_snxt; 15110 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 15111 UPDATE_MIB(&tcps->tcps_mib, 15112 tcpRetransBytes, snd_size); 15113 TCP_RECORD_TRACE(tcp, mp1, 15114 TCP_TRACE_SEND_PKT); 15115 tcp_send_data(tcp, tcp->tcp_wq, mp1); 15116 } 15117 } 15118 if (flags & TH_NEED_SACK_REXMIT) { 15119 tcp_sack_rxmit(tcp, &flags); 15120 } 15121 /* 15122 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send 15123 * out new segment. Note that tcp_rexmit should not be 15124 * set, otherwise TH_LIMIT_XMIT should not be set. 15125 */ 15126 if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) { 15127 if (!tcp->tcp_rexmit) { 15128 tcp_wput_data(tcp, NULL, B_FALSE); 15129 } else { 15130 tcp_ss_rexmit(tcp); 15131 } 15132 } 15133 /* 15134 * Adjust tcp_cwnd back to normal value after sending 15135 * new data segments. 15136 */ 15137 if (flags & TH_LIMIT_XMIT) { 15138 tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1); 15139 /* 15140 * This will restart the timer. Restarting the 15141 * timer is used to avoid a timeout before the 15142 * limited transmitted segment's ACK gets back. 15143 */ 15144 if (tcp->tcp_xmit_head != NULL) 15145 tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt; 15146 } 15147 15148 /* Anything more to do? */ 15149 if ((flags & (TH_ACK_NEEDED|TH_ACK_TIMER_NEEDED| 15150 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 15151 goto done; 15152 } 15153 ack_check: 15154 if (flags & TH_SEND_URP_MARK) { 15155 ASSERT(tcp->tcp_urp_mark_mp); 15156 /* 15157 * Send up any queued data and then send the mark message 15158 */ 15159 if (tcp->tcp_rcv_list != NULL) { 15160 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 15161 } 15162 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 15163 15164 mp1 = tcp->tcp_urp_mark_mp; 15165 tcp->tcp_urp_mark_mp = NULL; 15166 #ifdef DEBUG 15167 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 15168 "tcp_rput: sending zero-length %s %s", 15169 ((mp1->b_flag & MSGMARKNEXT) ? "MSGMARKNEXT" : 15170 "MSGNOTMARKNEXT"), 15171 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 15172 #endif /* DEBUG */ 15173 putnext(tcp->tcp_rq, mp1); 15174 flags &= ~TH_SEND_URP_MARK; 15175 } 15176 if (flags & TH_ACK_NEEDED) { 15177 /* 15178 * Time to send an ack for some reason. 15179 */ 15180 mp1 = tcp_ack_mp(tcp); 15181 15182 if (mp1 != NULL) { 15183 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 15184 tcp_send_data(tcp, tcp->tcp_wq, mp1); 15185 BUMP_LOCAL(tcp->tcp_obsegs); 15186 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 15187 } 15188 if (tcp->tcp_ack_tid != 0) { 15189 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 15190 tcp->tcp_ack_tid = 0; 15191 } 15192 } 15193 if (flags & TH_ACK_TIMER_NEEDED) { 15194 /* 15195 * Arrange for deferred ACK or push wait timeout. 15196 * Start timer if it is not already running. 15197 */ 15198 if (tcp->tcp_ack_tid == 0) { 15199 tcp->tcp_ack_tid = TCP_TIMER(tcp, tcp_ack_timer, 15200 MSEC_TO_TICK(tcp->tcp_localnet ? 15201 (clock_t)tcps->tcps_local_dack_interval : 15202 (clock_t)tcps->tcps_deferred_ack_interval)); 15203 } 15204 } 15205 if (flags & TH_ORDREL_NEEDED) { 15206 /* 15207 * Send up the ordrel_ind unless we are an eager guy. 15208 * In the eager case tcp_rsrv will do this when run 15209 * after tcp_accept is done. 15210 */ 15211 ASSERT(tcp->tcp_listener == NULL); 15212 if (tcp->tcp_rcv_list != NULL) { 15213 /* 15214 * Push any mblk(s) enqueued from co processing. 15215 */ 15216 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 15217 } 15218 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 15219 if ((mp1 = mi_tpi_ordrel_ind()) != NULL) { 15220 tcp->tcp_ordrel_done = B_TRUE; 15221 putnext(tcp->tcp_rq, mp1); 15222 if (tcp->tcp_deferred_clean_death) { 15223 /* 15224 * tcp_clean_death was deferred 15225 * for T_ORDREL_IND - do it now 15226 */ 15227 (void) tcp_clean_death(tcp, 15228 tcp->tcp_client_errno, 20); 15229 tcp->tcp_deferred_clean_death = B_FALSE; 15230 } 15231 } else { 15232 /* 15233 * Run the orderly release in the 15234 * service routine. 15235 */ 15236 qenable(tcp->tcp_rq); 15237 /* 15238 * Caveat(XXX): The machine may be so 15239 * overloaded that tcp_rsrv() is not scheduled 15240 * until after the endpoint has transitioned 15241 * to TCPS_TIME_WAIT 15242 * and tcp_time_wait_interval expires. Then 15243 * tcp_timer() will blow away state in tcp_t 15244 * and T_ORDREL_IND will never be delivered 15245 * upstream. Unlikely but potentially 15246 * a problem. 15247 */ 15248 } 15249 } 15250 done: 15251 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 15252 } 15253 15254 /* 15255 * This function does PAWS protection check. Returns B_TRUE if the 15256 * segment passes the PAWS test, else returns B_FALSE. 15257 */ 15258 boolean_t 15259 tcp_paws_check(tcp_t *tcp, tcph_t *tcph, tcp_opt_t *tcpoptp) 15260 { 15261 uint8_t flags; 15262 int options; 15263 uint8_t *up; 15264 15265 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 15266 /* 15267 * If timestamp option is aligned nicely, get values inline, 15268 * otherwise call general routine to parse. Only do that 15269 * if timestamp is the only option. 15270 */ 15271 if (TCP_HDR_LENGTH(tcph) == (uint32_t)TCP_MIN_HEADER_LENGTH + 15272 TCPOPT_REAL_TS_LEN && 15273 OK_32PTR((up = ((uint8_t *)tcph) + 15274 TCP_MIN_HEADER_LENGTH)) && 15275 *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) { 15276 tcpoptp->tcp_opt_ts_val = ABE32_TO_U32((up+4)); 15277 tcpoptp->tcp_opt_ts_ecr = ABE32_TO_U32((up+8)); 15278 15279 options = TCP_OPT_TSTAMP_PRESENT; 15280 } else { 15281 if (tcp->tcp_snd_sack_ok) { 15282 tcpoptp->tcp = tcp; 15283 } else { 15284 tcpoptp->tcp = NULL; 15285 } 15286 options = tcp_parse_options(tcph, tcpoptp); 15287 } 15288 15289 if (options & TCP_OPT_TSTAMP_PRESENT) { 15290 /* 15291 * Do PAWS per RFC 1323 section 4.2. Accept RST 15292 * regardless of the timestamp, page 18 RFC 1323.bis. 15293 */ 15294 if ((flags & TH_RST) == 0 && 15295 TSTMP_LT(tcpoptp->tcp_opt_ts_val, 15296 tcp->tcp_ts_recent)) { 15297 if (TSTMP_LT(lbolt64, tcp->tcp_last_rcv_lbolt + 15298 PAWS_TIMEOUT)) { 15299 /* This segment is not acceptable. */ 15300 return (B_FALSE); 15301 } else { 15302 /* 15303 * Connection has been idle for 15304 * too long. Reset the timestamp 15305 * and assume the segment is valid. 15306 */ 15307 tcp->tcp_ts_recent = 15308 tcpoptp->tcp_opt_ts_val; 15309 } 15310 } 15311 } else { 15312 /* 15313 * If we don't get a timestamp on every packet, we 15314 * figure we can't really trust 'em, so we stop sending 15315 * and parsing them. 15316 */ 15317 tcp->tcp_snd_ts_ok = B_FALSE; 15318 15319 tcp->tcp_hdr_len -= TCPOPT_REAL_TS_LEN; 15320 tcp->tcp_tcp_hdr_len -= TCPOPT_REAL_TS_LEN; 15321 tcp->tcp_tcph->th_offset_and_rsrvd[0] -= (3 << 4); 15322 /* 15323 * Adjust the tcp_mss accordingly. We also need to 15324 * adjust tcp_cwnd here in accordance with the new mss. 15325 * But we avoid doing a slow start here so as to not 15326 * to lose on the transfer rate built up so far. 15327 */ 15328 tcp_mss_set(tcp, tcp->tcp_mss + TCPOPT_REAL_TS_LEN, B_FALSE); 15329 if (tcp->tcp_snd_sack_ok) { 15330 ASSERT(tcp->tcp_sack_info != NULL); 15331 tcp->tcp_max_sack_blk = 4; 15332 } 15333 } 15334 return (B_TRUE); 15335 } 15336 15337 /* 15338 * Attach ancillary data to a received TCP segments for the 15339 * ancillary pieces requested by the application that are 15340 * different than they were in the previous data segment. 15341 * 15342 * Save the "current" values once memory allocation is ok so that 15343 * when memory allocation fails we can just wait for the next data segment. 15344 */ 15345 static mblk_t * 15346 tcp_rput_add_ancillary(tcp_t *tcp, mblk_t *mp, ip6_pkt_t *ipp) 15347 { 15348 struct T_optdata_ind *todi; 15349 int optlen; 15350 uchar_t *optptr; 15351 struct T_opthdr *toh; 15352 uint_t addflag; /* Which pieces to add */ 15353 mblk_t *mp1; 15354 15355 optlen = 0; 15356 addflag = 0; 15357 /* If app asked for pktinfo and the index has changed ... */ 15358 if ((ipp->ipp_fields & IPPF_IFINDEX) && 15359 ipp->ipp_ifindex != tcp->tcp_recvifindex && 15360 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO)) { 15361 optlen += sizeof (struct T_opthdr) + 15362 sizeof (struct in6_pktinfo); 15363 addflag |= TCP_IPV6_RECVPKTINFO; 15364 } 15365 /* If app asked for hoplimit and it has changed ... */ 15366 if ((ipp->ipp_fields & IPPF_HOPLIMIT) && 15367 ipp->ipp_hoplimit != tcp->tcp_recvhops && 15368 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPLIMIT)) { 15369 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 15370 addflag |= TCP_IPV6_RECVHOPLIMIT; 15371 } 15372 /* If app asked for tclass and it has changed ... */ 15373 if ((ipp->ipp_fields & IPPF_TCLASS) && 15374 ipp->ipp_tclass != tcp->tcp_recvtclass && 15375 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVTCLASS)) { 15376 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 15377 addflag |= TCP_IPV6_RECVTCLASS; 15378 } 15379 /* 15380 * If app asked for hopbyhop headers and it has changed ... 15381 * For security labels, note that (1) security labels can't change on 15382 * a connected socket at all, (2) we're connected to at most one peer, 15383 * (3) if anything changes, then it must be some other extra option. 15384 */ 15385 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPOPTS) && 15386 ip_cmpbuf(tcp->tcp_hopopts, tcp->tcp_hopoptslen, 15387 (ipp->ipp_fields & IPPF_HOPOPTS), 15388 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) { 15389 optlen += sizeof (struct T_opthdr) + ipp->ipp_hopoptslen - 15390 tcp->tcp_label_len; 15391 addflag |= TCP_IPV6_RECVHOPOPTS; 15392 if (!ip_allocbuf((void **)&tcp->tcp_hopopts, 15393 &tcp->tcp_hopoptslen, (ipp->ipp_fields & IPPF_HOPOPTS), 15394 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) 15395 return (mp); 15396 } 15397 /* If app asked for dst headers before routing headers ... */ 15398 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTDSTOPTS) && 15399 ip_cmpbuf(tcp->tcp_rtdstopts, tcp->tcp_rtdstoptslen, 15400 (ipp->ipp_fields & IPPF_RTDSTOPTS), 15401 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen)) { 15402 optlen += sizeof (struct T_opthdr) + 15403 ipp->ipp_rtdstoptslen; 15404 addflag |= TCP_IPV6_RECVRTDSTOPTS; 15405 if (!ip_allocbuf((void **)&tcp->tcp_rtdstopts, 15406 &tcp->tcp_rtdstoptslen, (ipp->ipp_fields & IPPF_RTDSTOPTS), 15407 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen)) 15408 return (mp); 15409 } 15410 /* If app asked for routing headers and it has changed ... */ 15411 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTHDR) && 15412 ip_cmpbuf(tcp->tcp_rthdr, tcp->tcp_rthdrlen, 15413 (ipp->ipp_fields & IPPF_RTHDR), 15414 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) { 15415 optlen += sizeof (struct T_opthdr) + ipp->ipp_rthdrlen; 15416 addflag |= TCP_IPV6_RECVRTHDR; 15417 if (!ip_allocbuf((void **)&tcp->tcp_rthdr, 15418 &tcp->tcp_rthdrlen, (ipp->ipp_fields & IPPF_RTHDR), 15419 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) 15420 return (mp); 15421 } 15422 /* If app asked for dest headers and it has changed ... */ 15423 if ((tcp->tcp_ipv6_recvancillary & 15424 (TCP_IPV6_RECVDSTOPTS | TCP_OLD_IPV6_RECVDSTOPTS)) && 15425 ip_cmpbuf(tcp->tcp_dstopts, tcp->tcp_dstoptslen, 15426 (ipp->ipp_fields & IPPF_DSTOPTS), 15427 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) { 15428 optlen += sizeof (struct T_opthdr) + ipp->ipp_dstoptslen; 15429 addflag |= TCP_IPV6_RECVDSTOPTS; 15430 if (!ip_allocbuf((void **)&tcp->tcp_dstopts, 15431 &tcp->tcp_dstoptslen, (ipp->ipp_fields & IPPF_DSTOPTS), 15432 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) 15433 return (mp); 15434 } 15435 15436 if (optlen == 0) { 15437 /* Nothing to add */ 15438 return (mp); 15439 } 15440 mp1 = allocb(sizeof (struct T_optdata_ind) + optlen, BPRI_MED); 15441 if (mp1 == NULL) { 15442 /* 15443 * Defer sending ancillary data until the next TCP segment 15444 * arrives. 15445 */ 15446 return (mp); 15447 } 15448 mp1->b_cont = mp; 15449 mp = mp1; 15450 mp->b_wptr += sizeof (*todi) + optlen; 15451 mp->b_datap->db_type = M_PROTO; 15452 todi = (struct T_optdata_ind *)mp->b_rptr; 15453 todi->PRIM_type = T_OPTDATA_IND; 15454 todi->DATA_flag = 1; /* MORE data */ 15455 todi->OPT_length = optlen; 15456 todi->OPT_offset = sizeof (*todi); 15457 optptr = (uchar_t *)&todi[1]; 15458 /* 15459 * If app asked for pktinfo and the index has changed ... 15460 * Note that the local address never changes for the connection. 15461 */ 15462 if (addflag & TCP_IPV6_RECVPKTINFO) { 15463 struct in6_pktinfo *pkti; 15464 15465 toh = (struct T_opthdr *)optptr; 15466 toh->level = IPPROTO_IPV6; 15467 toh->name = IPV6_PKTINFO; 15468 toh->len = sizeof (*toh) + sizeof (*pkti); 15469 toh->status = 0; 15470 optptr += sizeof (*toh); 15471 pkti = (struct in6_pktinfo *)optptr; 15472 if (tcp->tcp_ipversion == IPV6_VERSION) 15473 pkti->ipi6_addr = tcp->tcp_ip6h->ip6_src; 15474 else 15475 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 15476 &pkti->ipi6_addr); 15477 pkti->ipi6_ifindex = ipp->ipp_ifindex; 15478 optptr += sizeof (*pkti); 15479 ASSERT(OK_32PTR(optptr)); 15480 /* Save as "last" value */ 15481 tcp->tcp_recvifindex = ipp->ipp_ifindex; 15482 } 15483 /* If app asked for hoplimit and it has changed ... */ 15484 if (addflag & TCP_IPV6_RECVHOPLIMIT) { 15485 toh = (struct T_opthdr *)optptr; 15486 toh->level = IPPROTO_IPV6; 15487 toh->name = IPV6_HOPLIMIT; 15488 toh->len = sizeof (*toh) + sizeof (uint_t); 15489 toh->status = 0; 15490 optptr += sizeof (*toh); 15491 *(uint_t *)optptr = ipp->ipp_hoplimit; 15492 optptr += sizeof (uint_t); 15493 ASSERT(OK_32PTR(optptr)); 15494 /* Save as "last" value */ 15495 tcp->tcp_recvhops = ipp->ipp_hoplimit; 15496 } 15497 /* If app asked for tclass and it has changed ... */ 15498 if (addflag & TCP_IPV6_RECVTCLASS) { 15499 toh = (struct T_opthdr *)optptr; 15500 toh->level = IPPROTO_IPV6; 15501 toh->name = IPV6_TCLASS; 15502 toh->len = sizeof (*toh) + sizeof (uint_t); 15503 toh->status = 0; 15504 optptr += sizeof (*toh); 15505 *(uint_t *)optptr = ipp->ipp_tclass; 15506 optptr += sizeof (uint_t); 15507 ASSERT(OK_32PTR(optptr)); 15508 /* Save as "last" value */ 15509 tcp->tcp_recvtclass = ipp->ipp_tclass; 15510 } 15511 if (addflag & TCP_IPV6_RECVHOPOPTS) { 15512 toh = (struct T_opthdr *)optptr; 15513 toh->level = IPPROTO_IPV6; 15514 toh->name = IPV6_HOPOPTS; 15515 toh->len = sizeof (*toh) + ipp->ipp_hopoptslen - 15516 tcp->tcp_label_len; 15517 toh->status = 0; 15518 optptr += sizeof (*toh); 15519 bcopy((uchar_t *)ipp->ipp_hopopts + tcp->tcp_label_len, optptr, 15520 ipp->ipp_hopoptslen - tcp->tcp_label_len); 15521 optptr += ipp->ipp_hopoptslen - tcp->tcp_label_len; 15522 ASSERT(OK_32PTR(optptr)); 15523 /* Save as last value */ 15524 ip_savebuf((void **)&tcp->tcp_hopopts, &tcp->tcp_hopoptslen, 15525 (ipp->ipp_fields & IPPF_HOPOPTS), 15526 ipp->ipp_hopopts, ipp->ipp_hopoptslen); 15527 } 15528 if (addflag & TCP_IPV6_RECVRTDSTOPTS) { 15529 toh = (struct T_opthdr *)optptr; 15530 toh->level = IPPROTO_IPV6; 15531 toh->name = IPV6_RTHDRDSTOPTS; 15532 toh->len = sizeof (*toh) + ipp->ipp_rtdstoptslen; 15533 toh->status = 0; 15534 optptr += sizeof (*toh); 15535 bcopy(ipp->ipp_rtdstopts, optptr, ipp->ipp_rtdstoptslen); 15536 optptr += ipp->ipp_rtdstoptslen; 15537 ASSERT(OK_32PTR(optptr)); 15538 /* Save as last value */ 15539 ip_savebuf((void **)&tcp->tcp_rtdstopts, 15540 &tcp->tcp_rtdstoptslen, 15541 (ipp->ipp_fields & IPPF_RTDSTOPTS), 15542 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen); 15543 } 15544 if (addflag & TCP_IPV6_RECVRTHDR) { 15545 toh = (struct T_opthdr *)optptr; 15546 toh->level = IPPROTO_IPV6; 15547 toh->name = IPV6_RTHDR; 15548 toh->len = sizeof (*toh) + ipp->ipp_rthdrlen; 15549 toh->status = 0; 15550 optptr += sizeof (*toh); 15551 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen); 15552 optptr += ipp->ipp_rthdrlen; 15553 ASSERT(OK_32PTR(optptr)); 15554 /* Save as last value */ 15555 ip_savebuf((void **)&tcp->tcp_rthdr, &tcp->tcp_rthdrlen, 15556 (ipp->ipp_fields & IPPF_RTHDR), 15557 ipp->ipp_rthdr, ipp->ipp_rthdrlen); 15558 } 15559 if (addflag & (TCP_IPV6_RECVDSTOPTS | TCP_OLD_IPV6_RECVDSTOPTS)) { 15560 toh = (struct T_opthdr *)optptr; 15561 toh->level = IPPROTO_IPV6; 15562 toh->name = IPV6_DSTOPTS; 15563 toh->len = sizeof (*toh) + ipp->ipp_dstoptslen; 15564 toh->status = 0; 15565 optptr += sizeof (*toh); 15566 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen); 15567 optptr += ipp->ipp_dstoptslen; 15568 ASSERT(OK_32PTR(optptr)); 15569 /* Save as last value */ 15570 ip_savebuf((void **)&tcp->tcp_dstopts, &tcp->tcp_dstoptslen, 15571 (ipp->ipp_fields & IPPF_DSTOPTS), 15572 ipp->ipp_dstopts, ipp->ipp_dstoptslen); 15573 } 15574 ASSERT(optptr == mp->b_wptr); 15575 return (mp); 15576 } 15577 15578 15579 /* 15580 * Handle a *T_BIND_REQ that has failed either due to a T_ERROR_ACK 15581 * or a "bad" IRE detected by tcp_adapt_ire. 15582 * We can't tell if the failure was due to the laddr or the faddr 15583 * thus we clear out all addresses and ports. 15584 */ 15585 static void 15586 tcp_bind_failed(tcp_t *tcp, mblk_t *mp, int error) 15587 { 15588 queue_t *q = tcp->tcp_rq; 15589 tcph_t *tcph; 15590 struct T_error_ack *tea; 15591 conn_t *connp = tcp->tcp_connp; 15592 15593 15594 ASSERT(mp->b_datap->db_type == M_PCPROTO); 15595 15596 if (mp->b_cont) { 15597 freemsg(mp->b_cont); 15598 mp->b_cont = NULL; 15599 } 15600 tea = (struct T_error_ack *)mp->b_rptr; 15601 switch (tea->PRIM_type) { 15602 case T_BIND_ACK: 15603 /* 15604 * Need to unbind with classifier since we were just told that 15605 * our bind succeeded. 15606 */ 15607 tcp->tcp_hard_bound = B_FALSE; 15608 tcp->tcp_hard_binding = B_FALSE; 15609 15610 ipcl_hash_remove(connp); 15611 /* Reuse the mblk if possible */ 15612 ASSERT(mp->b_datap->db_lim - mp->b_datap->db_base >= 15613 sizeof (*tea)); 15614 mp->b_rptr = mp->b_datap->db_base; 15615 mp->b_wptr = mp->b_rptr + sizeof (*tea); 15616 tea = (struct T_error_ack *)mp->b_rptr; 15617 tea->PRIM_type = T_ERROR_ACK; 15618 tea->TLI_error = TSYSERR; 15619 tea->UNIX_error = error; 15620 if (tcp->tcp_state >= TCPS_SYN_SENT) { 15621 tea->ERROR_prim = T_CONN_REQ; 15622 } else { 15623 tea->ERROR_prim = O_T_BIND_REQ; 15624 } 15625 break; 15626 15627 case T_ERROR_ACK: 15628 if (tcp->tcp_state >= TCPS_SYN_SENT) 15629 tea->ERROR_prim = T_CONN_REQ; 15630 break; 15631 default: 15632 panic("tcp_bind_failed: unexpected TPI type"); 15633 /*NOTREACHED*/ 15634 } 15635 15636 tcp->tcp_state = TCPS_IDLE; 15637 if (tcp->tcp_ipversion == IPV4_VERSION) 15638 tcp->tcp_ipha->ipha_src = 0; 15639 else 15640 V6_SET_ZERO(tcp->tcp_ip6h->ip6_src); 15641 /* 15642 * Copy of the src addr. in tcp_t is needed since 15643 * the lookup funcs. can only look at tcp_t 15644 */ 15645 V6_SET_ZERO(tcp->tcp_ip_src_v6); 15646 15647 tcph = tcp->tcp_tcph; 15648 tcph->th_lport[0] = 0; 15649 tcph->th_lport[1] = 0; 15650 tcp_bind_hash_remove(tcp); 15651 bzero(&connp->u_port, sizeof (connp->u_port)); 15652 /* blow away saved option results if any */ 15653 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 15654 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 15655 15656 conn_delete_ire(tcp->tcp_connp, NULL); 15657 putnext(q, mp); 15658 } 15659 15660 /* 15661 * tcp_rput_other is called by tcp_rput to handle everything other than M_DATA 15662 * messages. 15663 */ 15664 void 15665 tcp_rput_other(tcp_t *tcp, mblk_t *mp) 15666 { 15667 mblk_t *mp1; 15668 uchar_t *rptr = mp->b_rptr; 15669 queue_t *q = tcp->tcp_rq; 15670 struct T_error_ack *tea; 15671 uint32_t mss; 15672 mblk_t *syn_mp; 15673 mblk_t *mdti; 15674 mblk_t *lsoi; 15675 int retval; 15676 mblk_t *ire_mp; 15677 tcp_stack_t *tcps = tcp->tcp_tcps; 15678 15679 switch (mp->b_datap->db_type) { 15680 case M_PROTO: 15681 case M_PCPROTO: 15682 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 15683 if ((mp->b_wptr - rptr) < sizeof (t_scalar_t)) 15684 break; 15685 tea = (struct T_error_ack *)rptr; 15686 switch (tea->PRIM_type) { 15687 case T_BIND_ACK: 15688 /* 15689 * Adapt Multidata information, if any. The 15690 * following tcp_mdt_update routine will free 15691 * the message. 15692 */ 15693 if ((mdti = tcp_mdt_info_mp(mp)) != NULL) { 15694 tcp_mdt_update(tcp, &((ip_mdt_info_t *)mdti-> 15695 b_rptr)->mdt_capab, B_TRUE); 15696 freemsg(mdti); 15697 } 15698 15699 /* 15700 * Check to update LSO information with tcp, and 15701 * tcp_lso_update routine will free the message. 15702 */ 15703 if ((lsoi = tcp_lso_info_mp(mp)) != NULL) { 15704 tcp_lso_update(tcp, &((ip_lso_info_t *)lsoi-> 15705 b_rptr)->lso_capab); 15706 freemsg(lsoi); 15707 } 15708 15709 /* Get the IRE, if we had requested for it */ 15710 ire_mp = tcp_ire_mp(mp); 15711 15712 if (tcp->tcp_hard_binding) { 15713 tcp->tcp_hard_binding = B_FALSE; 15714 tcp->tcp_hard_bound = B_TRUE; 15715 CL_INET_CONNECT(tcp); 15716 } else { 15717 if (ire_mp != NULL) 15718 freeb(ire_mp); 15719 goto after_syn_sent; 15720 } 15721 15722 retval = tcp_adapt_ire(tcp, ire_mp); 15723 if (ire_mp != NULL) 15724 freeb(ire_mp); 15725 if (retval == 0) { 15726 tcp_bind_failed(tcp, mp, 15727 (int)((tcp->tcp_state >= TCPS_SYN_SENT) ? 15728 ENETUNREACH : EADDRNOTAVAIL)); 15729 return; 15730 } 15731 /* 15732 * Don't let an endpoint connect to itself. 15733 * Also checked in tcp_connect() but that 15734 * check can't handle the case when the 15735 * local IP address is INADDR_ANY. 15736 */ 15737 if (tcp->tcp_ipversion == IPV4_VERSION) { 15738 if ((tcp->tcp_ipha->ipha_dst == 15739 tcp->tcp_ipha->ipha_src) && 15740 (BE16_EQL(tcp->tcp_tcph->th_lport, 15741 tcp->tcp_tcph->th_fport))) { 15742 tcp_bind_failed(tcp, mp, EADDRNOTAVAIL); 15743 return; 15744 } 15745 } else { 15746 if (IN6_ARE_ADDR_EQUAL( 15747 &tcp->tcp_ip6h->ip6_dst, 15748 &tcp->tcp_ip6h->ip6_src) && 15749 (BE16_EQL(tcp->tcp_tcph->th_lport, 15750 tcp->tcp_tcph->th_fport))) { 15751 tcp_bind_failed(tcp, mp, EADDRNOTAVAIL); 15752 return; 15753 } 15754 } 15755 ASSERT(tcp->tcp_state == TCPS_SYN_SENT); 15756 /* 15757 * This should not be possible! Just for 15758 * defensive coding... 15759 */ 15760 if (tcp->tcp_state != TCPS_SYN_SENT) 15761 goto after_syn_sent; 15762 15763 if (is_system_labeled() && 15764 !tcp_update_label(tcp, CONN_CRED(tcp->tcp_connp))) { 15765 tcp_bind_failed(tcp, mp, EHOSTUNREACH); 15766 return; 15767 } 15768 15769 ASSERT(q == tcp->tcp_rq); 15770 /* 15771 * tcp_adapt_ire() does not adjust 15772 * for TCP/IP header length. 15773 */ 15774 mss = tcp->tcp_mss - tcp->tcp_hdr_len; 15775 15776 /* 15777 * Just make sure our rwnd is at 15778 * least tcp_recv_hiwat_mss * MSS 15779 * large, and round up to the nearest 15780 * MSS. 15781 * 15782 * We do the round up here because 15783 * we need to get the interface 15784 * MTU first before we can do the 15785 * round up. 15786 */ 15787 tcp->tcp_rwnd = MAX(MSS_ROUNDUP(tcp->tcp_rwnd, mss), 15788 tcps->tcps_recv_hiwat_minmss * mss); 15789 q->q_hiwat = tcp->tcp_rwnd; 15790 tcp_set_ws_value(tcp); 15791 U32_TO_ABE16((tcp->tcp_rwnd >> tcp->tcp_rcv_ws), 15792 tcp->tcp_tcph->th_win); 15793 if (tcp->tcp_rcv_ws > 0 || tcps->tcps_wscale_always) 15794 tcp->tcp_snd_ws_ok = B_TRUE; 15795 15796 /* 15797 * Set tcp_snd_ts_ok to true 15798 * so that tcp_xmit_mp will 15799 * include the timestamp 15800 * option in the SYN segment. 15801 */ 15802 if (tcps->tcps_tstamp_always || 15803 (tcp->tcp_rcv_ws && tcps->tcps_tstamp_if_wscale)) { 15804 tcp->tcp_snd_ts_ok = B_TRUE; 15805 } 15806 15807 /* 15808 * tcp_snd_sack_ok can be set in 15809 * tcp_adapt_ire() if the sack metric 15810 * is set. So check it here also. 15811 */ 15812 if (tcps->tcps_sack_permitted == 2 || 15813 tcp->tcp_snd_sack_ok) { 15814 if (tcp->tcp_sack_info == NULL) { 15815 tcp->tcp_sack_info = 15816 kmem_cache_alloc( 15817 tcp_sack_info_cache, 15818 KM_SLEEP); 15819 } 15820 tcp->tcp_snd_sack_ok = B_TRUE; 15821 } 15822 15823 /* 15824 * Should we use ECN? Note that the current 15825 * default value (SunOS 5.9) of tcp_ecn_permitted 15826 * is 1. The reason for doing this is that there 15827 * are equipments out there that will drop ECN 15828 * enabled IP packets. Setting it to 1 avoids 15829 * compatibility problems. 15830 */ 15831 if (tcps->tcps_ecn_permitted == 2) 15832 tcp->tcp_ecn_ok = B_TRUE; 15833 15834 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 15835 syn_mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, 15836 tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 15837 if (syn_mp) { 15838 cred_t *cr; 15839 pid_t pid; 15840 15841 /* 15842 * Obtain the credential from the 15843 * thread calling connect(); the credential 15844 * lives on in the second mblk which 15845 * originated from T_CONN_REQ and is echoed 15846 * with the T_BIND_ACK from ip. If none 15847 * can be found, default to the creator 15848 * of the socket. 15849 */ 15850 if (mp->b_cont == NULL || 15851 (cr = DB_CRED(mp->b_cont)) == NULL) { 15852 cr = tcp->tcp_cred; 15853 pid = tcp->tcp_cpid; 15854 } else { 15855 pid = DB_CPID(mp->b_cont); 15856 } 15857 15858 TCP_RECORD_TRACE(tcp, syn_mp, 15859 TCP_TRACE_SEND_PKT); 15860 mblk_setcred(syn_mp, cr); 15861 DB_CPID(syn_mp) = pid; 15862 tcp_send_data(tcp, tcp->tcp_wq, syn_mp); 15863 } 15864 after_syn_sent: 15865 /* 15866 * A trailer mblk indicates a waiting client upstream. 15867 * We complete here the processing begun in 15868 * either tcp_bind() or tcp_connect() by passing 15869 * upstream the reply message they supplied. 15870 */ 15871 mp1 = mp; 15872 mp = mp->b_cont; 15873 freeb(mp1); 15874 if (mp) 15875 break; 15876 return; 15877 case T_ERROR_ACK: 15878 if (tcp->tcp_debug) { 15879 (void) strlog(TCP_MOD_ID, 0, 1, 15880 SL_TRACE|SL_ERROR, 15881 "tcp_rput_other: case T_ERROR_ACK, " 15882 "ERROR_prim == %d", 15883 tea->ERROR_prim); 15884 } 15885 switch (tea->ERROR_prim) { 15886 case O_T_BIND_REQ: 15887 case T_BIND_REQ: 15888 tcp_bind_failed(tcp, mp, 15889 (int)((tcp->tcp_state >= TCPS_SYN_SENT) ? 15890 ENETUNREACH : EADDRNOTAVAIL)); 15891 return; 15892 case T_UNBIND_REQ: 15893 tcp->tcp_hard_binding = B_FALSE; 15894 tcp->tcp_hard_bound = B_FALSE; 15895 if (mp->b_cont) { 15896 freemsg(mp->b_cont); 15897 mp->b_cont = NULL; 15898 } 15899 if (tcp->tcp_unbind_pending) 15900 tcp->tcp_unbind_pending = 0; 15901 else { 15902 /* From tcp_ip_unbind() - free */ 15903 freemsg(mp); 15904 return; 15905 } 15906 break; 15907 case T_SVR4_OPTMGMT_REQ: 15908 if (tcp->tcp_drop_opt_ack_cnt > 0) { 15909 /* T_OPTMGMT_REQ generated by TCP */ 15910 printf("T_SVR4_OPTMGMT_REQ failed " 15911 "%d/%d - dropped (cnt %d)\n", 15912 tea->TLI_error, tea->UNIX_error, 15913 tcp->tcp_drop_opt_ack_cnt); 15914 freemsg(mp); 15915 tcp->tcp_drop_opt_ack_cnt--; 15916 return; 15917 } 15918 break; 15919 } 15920 if (tea->ERROR_prim == T_SVR4_OPTMGMT_REQ && 15921 tcp->tcp_drop_opt_ack_cnt > 0) { 15922 printf("T_SVR4_OPTMGMT_REQ failed %d/%d " 15923 "- dropped (cnt %d)\n", 15924 tea->TLI_error, tea->UNIX_error, 15925 tcp->tcp_drop_opt_ack_cnt); 15926 freemsg(mp); 15927 tcp->tcp_drop_opt_ack_cnt--; 15928 return; 15929 } 15930 break; 15931 case T_OPTMGMT_ACK: 15932 if (tcp->tcp_drop_opt_ack_cnt > 0) { 15933 /* T_OPTMGMT_REQ generated by TCP */ 15934 freemsg(mp); 15935 tcp->tcp_drop_opt_ack_cnt--; 15936 return; 15937 } 15938 break; 15939 default: 15940 break; 15941 } 15942 break; 15943 case M_FLUSH: 15944 if (*rptr & FLUSHR) 15945 flushq(q, FLUSHDATA); 15946 break; 15947 default: 15948 /* M_CTL will be directly sent to tcp_icmp_error() */ 15949 ASSERT(DB_TYPE(mp) != M_CTL); 15950 break; 15951 } 15952 /* 15953 * Make sure we set this bit before sending the ACK for 15954 * bind. Otherwise accept could possibly run and free 15955 * this tcp struct. 15956 */ 15957 putnext(q, mp); 15958 } 15959 15960 /* 15961 * Called as the result of a qbufcall or a qtimeout to remedy a failure 15962 * to allocate a T_ordrel_ind in tcp_rsrv(). qenable(q) will make 15963 * tcp_rsrv() try again. 15964 */ 15965 static void 15966 tcp_ordrel_kick(void *arg) 15967 { 15968 conn_t *connp = (conn_t *)arg; 15969 tcp_t *tcp = connp->conn_tcp; 15970 15971 tcp->tcp_ordrelid = 0; 15972 tcp->tcp_timeout = B_FALSE; 15973 if (!TCP_IS_DETACHED(tcp) && tcp->tcp_rq != NULL && 15974 tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 15975 qenable(tcp->tcp_rq); 15976 } 15977 } 15978 15979 /* ARGSUSED */ 15980 static void 15981 tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2) 15982 { 15983 conn_t *connp = (conn_t *)arg; 15984 tcp_t *tcp = connp->conn_tcp; 15985 queue_t *q = tcp->tcp_rq; 15986 uint_t thwin; 15987 tcp_stack_t *tcps = tcp->tcp_tcps; 15988 15989 freeb(mp); 15990 15991 TCP_STAT(tcps, tcp_rsrv_calls); 15992 15993 if (TCP_IS_DETACHED(tcp) || q == NULL) { 15994 return; 15995 } 15996 15997 if (tcp->tcp_fused) { 15998 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 15999 16000 ASSERT(tcp->tcp_fused); 16001 ASSERT(peer_tcp != NULL && peer_tcp->tcp_fused); 16002 ASSERT(peer_tcp->tcp_loopback_peer == tcp); 16003 ASSERT(!TCP_IS_DETACHED(tcp)); 16004 ASSERT(tcp->tcp_connp->conn_sqp == 16005 peer_tcp->tcp_connp->conn_sqp); 16006 16007 /* 16008 * Normally we would not get backenabled in synchronous 16009 * streams mode, but in case this happens, we need to plug 16010 * synchronous streams during our drain to prevent a race 16011 * with tcp_fuse_rrw() or tcp_fuse_rinfop(). 16012 */ 16013 TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp); 16014 if (tcp->tcp_rcv_list != NULL) 16015 (void) tcp_rcv_drain(tcp->tcp_rq, tcp); 16016 16017 if (peer_tcp > tcp) { 16018 mutex_enter(&peer_tcp->tcp_non_sq_lock); 16019 mutex_enter(&tcp->tcp_non_sq_lock); 16020 } else { 16021 mutex_enter(&tcp->tcp_non_sq_lock); 16022 mutex_enter(&peer_tcp->tcp_non_sq_lock); 16023 } 16024 16025 if (peer_tcp->tcp_flow_stopped && 16026 (TCP_UNSENT_BYTES(peer_tcp) <= 16027 peer_tcp->tcp_xmit_lowater)) { 16028 tcp_clrqfull(peer_tcp); 16029 } 16030 mutex_exit(&peer_tcp->tcp_non_sq_lock); 16031 mutex_exit(&tcp->tcp_non_sq_lock); 16032 16033 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp); 16034 TCP_STAT(tcps, tcp_fusion_backenabled); 16035 return; 16036 } 16037 16038 if (canputnext(q)) { 16039 tcp->tcp_rwnd = q->q_hiwat; 16040 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 16041 << tcp->tcp_rcv_ws; 16042 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 16043 /* 16044 * Send back a window update immediately if TCP is above 16045 * ESTABLISHED state and the increase of the rcv window 16046 * that the other side knows is at least 1 MSS after flow 16047 * control is lifted. 16048 */ 16049 if (tcp->tcp_state >= TCPS_ESTABLISHED && 16050 (q->q_hiwat - thwin >= tcp->tcp_mss)) { 16051 tcp_xmit_ctl(NULL, tcp, 16052 (tcp->tcp_swnd == 0) ? tcp->tcp_suna : 16053 tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 16054 BUMP_MIB(&tcps->tcps_mib, tcpOutWinUpdate); 16055 } 16056 } 16057 /* Handle a failure to allocate a T_ORDREL_IND here */ 16058 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 16059 ASSERT(tcp->tcp_listener == NULL); 16060 if (tcp->tcp_rcv_list != NULL) { 16061 (void) tcp_rcv_drain(q, tcp); 16062 } 16063 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 16064 mp = mi_tpi_ordrel_ind(); 16065 if (mp) { 16066 tcp->tcp_ordrel_done = B_TRUE; 16067 putnext(q, mp); 16068 if (tcp->tcp_deferred_clean_death) { 16069 /* 16070 * tcp_clean_death was deferred for 16071 * T_ORDREL_IND - do it now 16072 */ 16073 tcp->tcp_deferred_clean_death = B_FALSE; 16074 (void) tcp_clean_death(tcp, 16075 tcp->tcp_client_errno, 22); 16076 } 16077 } else if (!tcp->tcp_timeout && tcp->tcp_ordrelid == 0) { 16078 /* 16079 * If there isn't already a timer running 16080 * start one. Use a 4 second 16081 * timer as a fallback since it can't fail. 16082 */ 16083 tcp->tcp_timeout = B_TRUE; 16084 tcp->tcp_ordrelid = TCP_TIMER(tcp, tcp_ordrel_kick, 16085 MSEC_TO_TICK(4000)); 16086 } 16087 } 16088 } 16089 16090 /* 16091 * The read side service routine is called mostly when we get back-enabled as a 16092 * result of flow control relief. Since we don't actually queue anything in 16093 * TCP, we have no data to send out of here. What we do is clear the receive 16094 * window, and send out a window update. 16095 * This routine is also called to drive an orderly release message upstream 16096 * if the attempt in tcp_rput failed. 16097 */ 16098 static void 16099 tcp_rsrv(queue_t *q) 16100 { 16101 conn_t *connp = Q_TO_CONN(q); 16102 tcp_t *tcp = connp->conn_tcp; 16103 mblk_t *mp; 16104 tcp_stack_t *tcps = tcp->tcp_tcps; 16105 16106 /* No code does a putq on the read side */ 16107 ASSERT(q->q_first == NULL); 16108 16109 /* Nothing to do for the default queue */ 16110 if (q == tcps->tcps_g_q) { 16111 return; 16112 } 16113 16114 mp = allocb(0, BPRI_HI); 16115 if (mp == NULL) { 16116 /* 16117 * We are under memory pressure. Return for now and we 16118 * we will be called again later. 16119 */ 16120 if (!tcp->tcp_timeout && tcp->tcp_ordrelid == 0) { 16121 /* 16122 * If there isn't already a timer running 16123 * start one. Use a 4 second 16124 * timer as a fallback since it can't fail. 16125 */ 16126 tcp->tcp_timeout = B_TRUE; 16127 tcp->tcp_ordrelid = TCP_TIMER(tcp, tcp_ordrel_kick, 16128 MSEC_TO_TICK(4000)); 16129 } 16130 return; 16131 } 16132 CONN_INC_REF(connp); 16133 squeue_enter(connp->conn_sqp, mp, tcp_rsrv_input, connp, 16134 SQTAG_TCP_RSRV); 16135 } 16136 16137 /* 16138 * tcp_rwnd_set() is called to adjust the receive window to a desired value. 16139 * We do not allow the receive window to shrink. After setting rwnd, 16140 * set the flow control hiwat of the stream. 16141 * 16142 * This function is called in 2 cases: 16143 * 16144 * 1) Before data transfer begins, in tcp_accept_comm() for accepting a 16145 * connection (passive open) and in tcp_rput_data() for active connect. 16146 * This is called after tcp_mss_set() when the desired MSS value is known. 16147 * This makes sure that our window size is a mutiple of the other side's 16148 * MSS. 16149 * 2) Handling SO_RCVBUF option. 16150 * 16151 * It is ASSUMED that the requested size is a multiple of the current MSS. 16152 * 16153 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the 16154 * user requests so. 16155 */ 16156 static int 16157 tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd) 16158 { 16159 uint32_t mss = tcp->tcp_mss; 16160 uint32_t old_max_rwnd; 16161 uint32_t max_transmittable_rwnd; 16162 boolean_t tcp_detached = TCP_IS_DETACHED(tcp); 16163 tcp_stack_t *tcps = tcp->tcp_tcps; 16164 16165 if (tcp->tcp_fused) { 16166 size_t sth_hiwat; 16167 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 16168 16169 ASSERT(peer_tcp != NULL); 16170 /* 16171 * Record the stream head's high water mark for 16172 * this endpoint; this is used for flow-control 16173 * purposes in tcp_fuse_output(). 16174 */ 16175 sth_hiwat = tcp_fuse_set_rcv_hiwat(tcp, rwnd); 16176 if (!tcp_detached) 16177 (void) mi_set_sth_hiwat(tcp->tcp_rq, sth_hiwat); 16178 16179 /* 16180 * In the fusion case, the maxpsz stream head value of 16181 * our peer is set according to its send buffer size 16182 * and our receive buffer size; since the latter may 16183 * have changed we need to update the peer's maxpsz. 16184 */ 16185 (void) tcp_maxpsz_set(peer_tcp, B_TRUE); 16186 return (rwnd); 16187 } 16188 16189 if (tcp_detached) 16190 old_max_rwnd = tcp->tcp_rwnd; 16191 else 16192 old_max_rwnd = tcp->tcp_rq->q_hiwat; 16193 16194 /* 16195 * Insist on a receive window that is at least 16196 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid 16197 * funny TCP interactions of Nagle algorithm, SWS avoidance 16198 * and delayed acknowledgement. 16199 */ 16200 rwnd = MAX(rwnd, tcps->tcps_recv_hiwat_minmss * mss); 16201 16202 /* 16203 * If window size info has already been exchanged, TCP should not 16204 * shrink the window. Shrinking window is doable if done carefully. 16205 * We may add that support later. But so far there is not a real 16206 * need to do that. 16207 */ 16208 if (rwnd < old_max_rwnd && tcp->tcp_state > TCPS_SYN_SENT) { 16209 /* MSS may have changed, do a round up again. */ 16210 rwnd = MSS_ROUNDUP(old_max_rwnd, mss); 16211 } 16212 16213 /* 16214 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check 16215 * can be applied even before the window scale option is decided. 16216 */ 16217 max_transmittable_rwnd = TCP_MAXWIN << tcp->tcp_rcv_ws; 16218 if (rwnd > max_transmittable_rwnd) { 16219 rwnd = max_transmittable_rwnd - 16220 (max_transmittable_rwnd % mss); 16221 if (rwnd < mss) 16222 rwnd = max_transmittable_rwnd; 16223 /* 16224 * If we're over the limit we may have to back down tcp_rwnd. 16225 * The increment below won't work for us. So we set all three 16226 * here and the increment below will have no effect. 16227 */ 16228 tcp->tcp_rwnd = old_max_rwnd = rwnd; 16229 } 16230 if (tcp->tcp_localnet) { 16231 tcp->tcp_rack_abs_max = 16232 MIN(tcps->tcps_local_dacks_max, rwnd / mss / 2); 16233 } else { 16234 /* 16235 * For a remote host on a different subnet (through a router), 16236 * we ack every other packet to be conforming to RFC1122. 16237 * tcp_deferred_acks_max is default to 2. 16238 */ 16239 tcp->tcp_rack_abs_max = 16240 MIN(tcps->tcps_deferred_acks_max, rwnd / mss / 2); 16241 } 16242 if (tcp->tcp_rack_cur_max > tcp->tcp_rack_abs_max) 16243 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 16244 else 16245 tcp->tcp_rack_cur_max = 0; 16246 /* 16247 * Increment the current rwnd by the amount the maximum grew (we 16248 * can not overwrite it since we might be in the middle of a 16249 * connection.) 16250 */ 16251 tcp->tcp_rwnd += rwnd - old_max_rwnd; 16252 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, tcp->tcp_tcph->th_win); 16253 if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max) 16254 tcp->tcp_cwnd_max = rwnd; 16255 16256 if (tcp_detached) 16257 return (rwnd); 16258 /* 16259 * We set the maximum receive window into rq->q_hiwat. 16260 * This is not actually used for flow control. 16261 */ 16262 tcp->tcp_rq->q_hiwat = rwnd; 16263 /* 16264 * Set the Stream head high water mark. This doesn't have to be 16265 * here, since we are simply using default values, but we would 16266 * prefer to choose these values algorithmically, with a likely 16267 * relationship to rwnd. 16268 */ 16269 (void) mi_set_sth_hiwat(tcp->tcp_rq, 16270 MAX(rwnd, tcps->tcps_sth_rcv_hiwat)); 16271 return (rwnd); 16272 } 16273 16274 /* 16275 * Return SNMP stuff in buffer in mpdata. 16276 */ 16277 mblk_t * 16278 tcp_snmp_get(queue_t *q, mblk_t *mpctl) 16279 { 16280 mblk_t *mpdata; 16281 mblk_t *mp_conn_ctl = NULL; 16282 mblk_t *mp_conn_tail; 16283 mblk_t *mp_attr_ctl = NULL; 16284 mblk_t *mp_attr_tail; 16285 mblk_t *mp6_conn_ctl = NULL; 16286 mblk_t *mp6_conn_tail; 16287 mblk_t *mp6_attr_ctl = NULL; 16288 mblk_t *mp6_attr_tail; 16289 struct opthdr *optp; 16290 mib2_tcpConnEntry_t tce; 16291 mib2_tcp6ConnEntry_t tce6; 16292 mib2_transportMLPEntry_t mlp; 16293 connf_t *connfp; 16294 int i; 16295 boolean_t ispriv; 16296 zoneid_t zoneid; 16297 int v4_conn_idx; 16298 int v6_conn_idx; 16299 conn_t *connp = Q_TO_CONN(q); 16300 tcp_stack_t *tcps; 16301 ip_stack_t *ipst; 16302 mblk_t *mp2ctl; 16303 16304 /* 16305 * make a copy of the original message 16306 */ 16307 mp2ctl = copymsg(mpctl); 16308 16309 if (mpctl == NULL || 16310 (mpdata = mpctl->b_cont) == NULL || 16311 (mp_conn_ctl = copymsg(mpctl)) == NULL || 16312 (mp_attr_ctl = copymsg(mpctl)) == NULL || 16313 (mp6_conn_ctl = copymsg(mpctl)) == NULL || 16314 (mp6_attr_ctl = copymsg(mpctl)) == NULL) { 16315 freemsg(mp_conn_ctl); 16316 freemsg(mp_attr_ctl); 16317 freemsg(mp6_conn_ctl); 16318 freemsg(mp6_attr_ctl); 16319 freemsg(mpctl); 16320 freemsg(mp2ctl); 16321 return (NULL); 16322 } 16323 16324 ipst = connp->conn_netstack->netstack_ip; 16325 tcps = connp->conn_netstack->netstack_tcp; 16326 16327 /* build table of connections -- need count in fixed part */ 16328 SET_MIB(tcps->tcps_mib.tcpRtoAlgorithm, 4); /* vanj */ 16329 SET_MIB(tcps->tcps_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min); 16330 SET_MIB(tcps->tcps_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max); 16331 SET_MIB(tcps->tcps_mib.tcpMaxConn, -1); 16332 SET_MIB(tcps->tcps_mib.tcpCurrEstab, 0); 16333 16334 ispriv = 16335 secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0; 16336 zoneid = Q_TO_CONN(q)->conn_zoneid; 16337 16338 v4_conn_idx = v6_conn_idx = 0; 16339 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL; 16340 16341 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 16342 ipst = tcps->tcps_netstack->netstack_ip; 16343 16344 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 16345 16346 connp = NULL; 16347 16348 while ((connp = 16349 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16350 tcp_t *tcp; 16351 boolean_t needattr; 16352 16353 if (connp->conn_zoneid != zoneid) 16354 continue; /* not in this zone */ 16355 16356 tcp = connp->conn_tcp; 16357 UPDATE_MIB(&tcps->tcps_mib, 16358 tcpHCInSegs, tcp->tcp_ibsegs); 16359 tcp->tcp_ibsegs = 0; 16360 UPDATE_MIB(&tcps->tcps_mib, 16361 tcpHCOutSegs, tcp->tcp_obsegs); 16362 tcp->tcp_obsegs = 0; 16363 16364 tce6.tcp6ConnState = tce.tcpConnState = 16365 tcp_snmp_state(tcp); 16366 if (tce.tcpConnState == MIB2_TCP_established || 16367 tce.tcpConnState == MIB2_TCP_closeWait) 16368 BUMP_MIB(&tcps->tcps_mib, tcpCurrEstab); 16369 16370 needattr = B_FALSE; 16371 bzero(&mlp, sizeof (mlp)); 16372 if (connp->conn_mlp_type != mlptSingle) { 16373 if (connp->conn_mlp_type == mlptShared || 16374 connp->conn_mlp_type == mlptBoth) 16375 mlp.tme_flags |= MIB2_TMEF_SHARED; 16376 if (connp->conn_mlp_type == mlptPrivate || 16377 connp->conn_mlp_type == mlptBoth) 16378 mlp.tme_flags |= MIB2_TMEF_PRIVATE; 16379 needattr = B_TRUE; 16380 } 16381 if (connp->conn_peercred != NULL) { 16382 ts_label_t *tsl; 16383 16384 tsl = crgetlabel(connp->conn_peercred); 16385 mlp.tme_doi = label2doi(tsl); 16386 mlp.tme_label = *label2bslabel(tsl); 16387 needattr = B_TRUE; 16388 } 16389 16390 /* Create a message to report on IPv6 entries */ 16391 if (tcp->tcp_ipversion == IPV6_VERSION) { 16392 tce6.tcp6ConnLocalAddress = tcp->tcp_ip_src_v6; 16393 tce6.tcp6ConnRemAddress = tcp->tcp_remote_v6; 16394 tce6.tcp6ConnLocalPort = ntohs(tcp->tcp_lport); 16395 tce6.tcp6ConnRemPort = ntohs(tcp->tcp_fport); 16396 tce6.tcp6ConnIfIndex = tcp->tcp_bound_if; 16397 /* Don't want just anybody seeing these... */ 16398 if (ispriv) { 16399 tce6.tcp6ConnEntryInfo.ce_snxt = 16400 tcp->tcp_snxt; 16401 tce6.tcp6ConnEntryInfo.ce_suna = 16402 tcp->tcp_suna; 16403 tce6.tcp6ConnEntryInfo.ce_rnxt = 16404 tcp->tcp_rnxt; 16405 tce6.tcp6ConnEntryInfo.ce_rack = 16406 tcp->tcp_rack; 16407 } else { 16408 /* 16409 * Netstat, unfortunately, uses this to 16410 * get send/receive queue sizes. How to fix? 16411 * Why not compute the difference only? 16412 */ 16413 tce6.tcp6ConnEntryInfo.ce_snxt = 16414 tcp->tcp_snxt - tcp->tcp_suna; 16415 tce6.tcp6ConnEntryInfo.ce_suna = 0; 16416 tce6.tcp6ConnEntryInfo.ce_rnxt = 16417 tcp->tcp_rnxt - tcp->tcp_rack; 16418 tce6.tcp6ConnEntryInfo.ce_rack = 0; 16419 } 16420 16421 tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd; 16422 tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 16423 tce6.tcp6ConnEntryInfo.ce_rto = tcp->tcp_rto; 16424 tce6.tcp6ConnEntryInfo.ce_mss = tcp->tcp_mss; 16425 tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state; 16426 16427 tce6.tcp6ConnCreationProcess = 16428 (tcp->tcp_cpid < 0) ? MIB2_UNKNOWN_PROCESS : 16429 tcp->tcp_cpid; 16430 tce6.tcp6ConnCreationTime = tcp->tcp_open_time; 16431 16432 (void) snmp_append_data2(mp6_conn_ctl->b_cont, 16433 &mp6_conn_tail, (char *)&tce6, sizeof (tce6)); 16434 16435 mlp.tme_connidx = v6_conn_idx++; 16436 if (needattr) 16437 (void) snmp_append_data2(mp6_attr_ctl->b_cont, 16438 &mp6_attr_tail, (char *)&mlp, sizeof (mlp)); 16439 } 16440 /* 16441 * Create an IPv4 table entry for IPv4 entries and also 16442 * for IPv6 entries which are bound to in6addr_any 16443 * but don't have IPV6_V6ONLY set. 16444 * (i.e. anything an IPv4 peer could connect to) 16445 */ 16446 if (tcp->tcp_ipversion == IPV4_VERSION || 16447 (tcp->tcp_state <= TCPS_LISTEN && 16448 !tcp->tcp_connp->conn_ipv6_v6only && 16449 IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip_src_v6))) { 16450 if (tcp->tcp_ipversion == IPV6_VERSION) { 16451 tce.tcpConnRemAddress = INADDR_ANY; 16452 tce.tcpConnLocalAddress = INADDR_ANY; 16453 } else { 16454 tce.tcpConnRemAddress = 16455 tcp->tcp_remote; 16456 tce.tcpConnLocalAddress = 16457 tcp->tcp_ip_src; 16458 } 16459 tce.tcpConnLocalPort = ntohs(tcp->tcp_lport); 16460 tce.tcpConnRemPort = ntohs(tcp->tcp_fport); 16461 /* Don't want just anybody seeing these... */ 16462 if (ispriv) { 16463 tce.tcpConnEntryInfo.ce_snxt = 16464 tcp->tcp_snxt; 16465 tce.tcpConnEntryInfo.ce_suna = 16466 tcp->tcp_suna; 16467 tce.tcpConnEntryInfo.ce_rnxt = 16468 tcp->tcp_rnxt; 16469 tce.tcpConnEntryInfo.ce_rack = 16470 tcp->tcp_rack; 16471 } else { 16472 /* 16473 * Netstat, unfortunately, uses this to 16474 * get send/receive queue sizes. How 16475 * to fix? 16476 * Why not compute the difference only? 16477 */ 16478 tce.tcpConnEntryInfo.ce_snxt = 16479 tcp->tcp_snxt - tcp->tcp_suna; 16480 tce.tcpConnEntryInfo.ce_suna = 0; 16481 tce.tcpConnEntryInfo.ce_rnxt = 16482 tcp->tcp_rnxt - tcp->tcp_rack; 16483 tce.tcpConnEntryInfo.ce_rack = 0; 16484 } 16485 16486 tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd; 16487 tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 16488 tce.tcpConnEntryInfo.ce_rto = tcp->tcp_rto; 16489 tce.tcpConnEntryInfo.ce_mss = tcp->tcp_mss; 16490 tce.tcpConnEntryInfo.ce_state = 16491 tcp->tcp_state; 16492 16493 tce.tcpConnCreationProcess = 16494 (tcp->tcp_cpid < 0) ? MIB2_UNKNOWN_PROCESS : 16495 tcp->tcp_cpid; 16496 tce.tcpConnCreationTime = tcp->tcp_open_time; 16497 16498 (void) snmp_append_data2(mp_conn_ctl->b_cont, 16499 &mp_conn_tail, (char *)&tce, sizeof (tce)); 16500 16501 mlp.tme_connidx = v4_conn_idx++; 16502 if (needattr) 16503 (void) snmp_append_data2( 16504 mp_attr_ctl->b_cont, 16505 &mp_attr_tail, (char *)&mlp, 16506 sizeof (mlp)); 16507 } 16508 } 16509 } 16510 16511 /* fixed length structure for IPv4 and IPv6 counters */ 16512 SET_MIB(tcps->tcps_mib.tcpConnTableSize, sizeof (mib2_tcpConnEntry_t)); 16513 SET_MIB(tcps->tcps_mib.tcp6ConnTableSize, 16514 sizeof (mib2_tcp6ConnEntry_t)); 16515 /* synchronize 32- and 64-bit counters */ 16516 SYNC32_MIB(&tcps->tcps_mib, tcpInSegs, tcpHCInSegs); 16517 SYNC32_MIB(&tcps->tcps_mib, tcpOutSegs, tcpHCOutSegs); 16518 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 16519 optp->level = MIB2_TCP; 16520 optp->name = 0; 16521 (void) snmp_append_data(mpdata, (char *)&tcps->tcps_mib, 16522 sizeof (tcps->tcps_mib)); 16523 optp->len = msgdsize(mpdata); 16524 qreply(q, mpctl); 16525 16526 /* table of connections... */ 16527 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[ 16528 sizeof (struct T_optmgmt_ack)]; 16529 optp->level = MIB2_TCP; 16530 optp->name = MIB2_TCP_CONN; 16531 optp->len = msgdsize(mp_conn_ctl->b_cont); 16532 qreply(q, mp_conn_ctl); 16533 16534 /* table of MLP attributes... */ 16535 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[ 16536 sizeof (struct T_optmgmt_ack)]; 16537 optp->level = MIB2_TCP; 16538 optp->name = EXPER_XPORT_MLP; 16539 optp->len = msgdsize(mp_attr_ctl->b_cont); 16540 if (optp->len == 0) 16541 freemsg(mp_attr_ctl); 16542 else 16543 qreply(q, mp_attr_ctl); 16544 16545 /* table of IPv6 connections... */ 16546 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[ 16547 sizeof (struct T_optmgmt_ack)]; 16548 optp->level = MIB2_TCP6; 16549 optp->name = MIB2_TCP6_CONN; 16550 optp->len = msgdsize(mp6_conn_ctl->b_cont); 16551 qreply(q, mp6_conn_ctl); 16552 16553 /* table of IPv6 MLP attributes... */ 16554 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[ 16555 sizeof (struct T_optmgmt_ack)]; 16556 optp->level = MIB2_TCP6; 16557 optp->name = EXPER_XPORT_MLP; 16558 optp->len = msgdsize(mp6_attr_ctl->b_cont); 16559 if (optp->len == 0) 16560 freemsg(mp6_attr_ctl); 16561 else 16562 qreply(q, mp6_attr_ctl); 16563 return (mp2ctl); 16564 } 16565 16566 /* Return 0 if invalid set request, 1 otherwise, including non-tcp requests */ 16567 /* ARGSUSED */ 16568 int 16569 tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len) 16570 { 16571 mib2_tcpConnEntry_t *tce = (mib2_tcpConnEntry_t *)ptr; 16572 16573 switch (level) { 16574 case MIB2_TCP: 16575 switch (name) { 16576 case 13: 16577 if (tce->tcpConnState != MIB2_TCP_deleteTCB) 16578 return (0); 16579 /* TODO: delete entry defined by tce */ 16580 return (1); 16581 default: 16582 return (0); 16583 } 16584 default: 16585 return (1); 16586 } 16587 } 16588 16589 /* Translate TCP state to MIB2 TCP state. */ 16590 static int 16591 tcp_snmp_state(tcp_t *tcp) 16592 { 16593 if (tcp == NULL) 16594 return (0); 16595 16596 switch (tcp->tcp_state) { 16597 case TCPS_CLOSED: 16598 case TCPS_IDLE: /* RFC1213 doesn't have analogue for IDLE & BOUND */ 16599 case TCPS_BOUND: 16600 return (MIB2_TCP_closed); 16601 case TCPS_LISTEN: 16602 return (MIB2_TCP_listen); 16603 case TCPS_SYN_SENT: 16604 return (MIB2_TCP_synSent); 16605 case TCPS_SYN_RCVD: 16606 return (MIB2_TCP_synReceived); 16607 case TCPS_ESTABLISHED: 16608 return (MIB2_TCP_established); 16609 case TCPS_CLOSE_WAIT: 16610 return (MIB2_TCP_closeWait); 16611 case TCPS_FIN_WAIT_1: 16612 return (MIB2_TCP_finWait1); 16613 case TCPS_CLOSING: 16614 return (MIB2_TCP_closing); 16615 case TCPS_LAST_ACK: 16616 return (MIB2_TCP_lastAck); 16617 case TCPS_FIN_WAIT_2: 16618 return (MIB2_TCP_finWait2); 16619 case TCPS_TIME_WAIT: 16620 return (MIB2_TCP_timeWait); 16621 default: 16622 return (0); 16623 } 16624 } 16625 16626 static char tcp_report_header[] = 16627 "TCP " MI_COL_HDRPAD_STR 16628 "zone dest snxt suna " 16629 "swnd rnxt rack rwnd rto mss w sw rw t " 16630 "recent [lport,fport] state"; 16631 16632 /* 16633 * TCP status report triggered via the Named Dispatch mechanism. 16634 */ 16635 /* ARGSUSED */ 16636 static void 16637 tcp_report_item(mblk_t *mp, tcp_t *tcp, int hashval, tcp_t *thisstream, 16638 cred_t *cr) 16639 { 16640 char hash[10], addrbuf[INET6_ADDRSTRLEN]; 16641 boolean_t ispriv = secpolicy_ip_config(cr, B_TRUE) == 0; 16642 char cflag; 16643 in6_addr_t v6dst; 16644 char buf[80]; 16645 uint_t print_len, buf_len; 16646 16647 buf_len = mp->b_datap->db_lim - mp->b_wptr; 16648 if (buf_len <= 0) 16649 return; 16650 16651 if (hashval >= 0) 16652 (void) sprintf(hash, "%03d ", hashval); 16653 else 16654 hash[0] = '\0'; 16655 16656 /* 16657 * Note that we use the remote address in the tcp_b structure. 16658 * This means that it will print out the real destination address, 16659 * not the next hop's address if source routing is used. This 16660 * avoid the confusion on the output because user may not 16661 * know that source routing is used for a connection. 16662 */ 16663 if (tcp->tcp_ipversion == IPV4_VERSION) { 16664 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_remote, &v6dst); 16665 } else { 16666 v6dst = tcp->tcp_remote_v6; 16667 } 16668 (void) inet_ntop(AF_INET6, &v6dst, addrbuf, sizeof (addrbuf)); 16669 /* 16670 * the ispriv checks are so that normal users cannot determine 16671 * sequence number information using NDD. 16672 */ 16673 16674 if (TCP_IS_DETACHED(tcp)) 16675 cflag = '*'; 16676 else 16677 cflag = ' '; 16678 print_len = snprintf((char *)mp->b_wptr, buf_len, 16679 "%s " MI_COL_PTRFMT_STR "%d %s %08x %08x %010d %08x %08x " 16680 "%010d %05ld %05d %1d %02d %02d %1d %08x %s%c\n", 16681 hash, 16682 (void *)tcp, 16683 tcp->tcp_connp->conn_zoneid, 16684 addrbuf, 16685 (ispriv) ? tcp->tcp_snxt : 0, 16686 (ispriv) ? tcp->tcp_suna : 0, 16687 tcp->tcp_swnd, 16688 (ispriv) ? tcp->tcp_rnxt : 0, 16689 (ispriv) ? tcp->tcp_rack : 0, 16690 tcp->tcp_rwnd, 16691 tcp->tcp_rto, 16692 tcp->tcp_mss, 16693 tcp->tcp_snd_ws_ok, 16694 tcp->tcp_snd_ws, 16695 tcp->tcp_rcv_ws, 16696 tcp->tcp_snd_ts_ok, 16697 tcp->tcp_ts_recent, 16698 tcp_display(tcp, buf, DISP_PORT_ONLY), cflag); 16699 if (print_len < buf_len) { 16700 ((mblk_t *)mp)->b_wptr += print_len; 16701 } else { 16702 ((mblk_t *)mp)->b_wptr += buf_len; 16703 } 16704 } 16705 16706 /* 16707 * TCP status report (for listeners only) triggered via the Named Dispatch 16708 * mechanism. 16709 */ 16710 /* ARGSUSED */ 16711 static void 16712 tcp_report_listener(mblk_t *mp, tcp_t *tcp, int hashval) 16713 { 16714 char addrbuf[INET6_ADDRSTRLEN]; 16715 in6_addr_t v6dst; 16716 uint_t print_len, buf_len; 16717 16718 buf_len = mp->b_datap->db_lim - mp->b_wptr; 16719 if (buf_len <= 0) 16720 return; 16721 16722 if (tcp->tcp_ipversion == IPV4_VERSION) { 16723 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, &v6dst); 16724 (void) inet_ntop(AF_INET6, &v6dst, addrbuf, sizeof (addrbuf)); 16725 } else { 16726 (void) inet_ntop(AF_INET6, &tcp->tcp_ip6h->ip6_src, 16727 addrbuf, sizeof (addrbuf)); 16728 } 16729 print_len = snprintf((char *)mp->b_wptr, buf_len, 16730 "%03d " 16731 MI_COL_PTRFMT_STR 16732 "%d %s %05u %08u %d/%d/%d%c\n", 16733 hashval, (void *)tcp, 16734 tcp->tcp_connp->conn_zoneid, 16735 addrbuf, 16736 (uint_t)BE16_TO_U16(tcp->tcp_tcph->th_lport), 16737 tcp->tcp_conn_req_seqnum, 16738 tcp->tcp_conn_req_cnt_q0, tcp->tcp_conn_req_cnt_q, 16739 tcp->tcp_conn_req_max, 16740 tcp->tcp_syn_defense ? '*' : ' '); 16741 if (print_len < buf_len) { 16742 ((mblk_t *)mp)->b_wptr += print_len; 16743 } else { 16744 ((mblk_t *)mp)->b_wptr += buf_len; 16745 } 16746 } 16747 16748 /* TCP status report triggered via the Named Dispatch mechanism. */ 16749 /* ARGSUSED */ 16750 static int 16751 tcp_status_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16752 { 16753 tcp_t *tcp; 16754 int i; 16755 conn_t *connp; 16756 connf_t *connfp; 16757 zoneid_t zoneid; 16758 tcp_stack_t *tcps; 16759 ip_stack_t *ipst; 16760 16761 zoneid = Q_TO_CONN(q)->conn_zoneid; 16762 tcps = Q_TO_TCP(q)->tcp_tcps; 16763 16764 /* 16765 * Because of the ndd constraint, at most we can have 64K buffer 16766 * to put in all TCP info. So to be more efficient, just 16767 * allocate a 64K buffer here, assuming we need that large buffer. 16768 * This may be a problem as any user can read tcp_status. Therefore 16769 * we limit the rate of doing this using tcp_ndd_get_info_interval. 16770 * This should be OK as normal users should not do this too often. 16771 */ 16772 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16773 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16774 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16775 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16776 return (0); 16777 } 16778 } 16779 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16780 /* The following may work even if we cannot get a large buf. */ 16781 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16782 return (0); 16783 } 16784 16785 (void) mi_mpprintf(mp, "%s", tcp_report_header); 16786 16787 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 16788 16789 ipst = tcps->tcps_netstack->netstack_ip; 16790 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 16791 16792 connp = NULL; 16793 16794 while ((connp = 16795 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16796 tcp = connp->conn_tcp; 16797 if (zoneid != GLOBAL_ZONEID && 16798 zoneid != connp->conn_zoneid) 16799 continue; 16800 tcp_report_item(mp->b_cont, tcp, -1, tcp, 16801 cr); 16802 } 16803 16804 } 16805 16806 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16807 return (0); 16808 } 16809 16810 /* TCP status report triggered via the Named Dispatch mechanism. */ 16811 /* ARGSUSED */ 16812 static int 16813 tcp_bind_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16814 { 16815 tf_t *tbf; 16816 tcp_t *tcp; 16817 int i; 16818 zoneid_t zoneid; 16819 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 16820 16821 zoneid = Q_TO_CONN(q)->conn_zoneid; 16822 16823 /* Refer to comments in tcp_status_report(). */ 16824 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16825 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16826 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16827 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16828 return (0); 16829 } 16830 } 16831 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16832 /* The following may work even if we cannot get a large buf. */ 16833 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16834 return (0); 16835 } 16836 16837 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16838 16839 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) { 16840 tbf = &tcps->tcps_bind_fanout[i]; 16841 mutex_enter(&tbf->tf_lock); 16842 for (tcp = tbf->tf_tcp; tcp != NULL; 16843 tcp = tcp->tcp_bind_hash) { 16844 if (zoneid != GLOBAL_ZONEID && 16845 zoneid != tcp->tcp_connp->conn_zoneid) 16846 continue; 16847 CONN_INC_REF(tcp->tcp_connp); 16848 tcp_report_item(mp->b_cont, tcp, i, 16849 Q_TO_TCP(q), cr); 16850 CONN_DEC_REF(tcp->tcp_connp); 16851 } 16852 mutex_exit(&tbf->tf_lock); 16853 } 16854 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16855 return (0); 16856 } 16857 16858 /* TCP status report triggered via the Named Dispatch mechanism. */ 16859 /* ARGSUSED */ 16860 static int 16861 tcp_listen_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16862 { 16863 connf_t *connfp; 16864 conn_t *connp; 16865 tcp_t *tcp; 16866 int i; 16867 zoneid_t zoneid; 16868 tcp_stack_t *tcps; 16869 ip_stack_t *ipst; 16870 16871 zoneid = Q_TO_CONN(q)->conn_zoneid; 16872 tcps = Q_TO_TCP(q)->tcp_tcps; 16873 16874 /* Refer to comments in tcp_status_report(). */ 16875 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16876 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16877 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16878 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16879 return (0); 16880 } 16881 } 16882 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16883 /* The following may work even if we cannot get a large buf. */ 16884 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16885 return (0); 16886 } 16887 16888 (void) mi_mpprintf(mp, 16889 " TCP " MI_COL_HDRPAD_STR 16890 "zone IP addr port seqnum backlog (q0/q/max)"); 16891 16892 ipst = tcps->tcps_netstack->netstack_ip; 16893 16894 for (i = 0; i < ipst->ips_ipcl_bind_fanout_size; i++) { 16895 connfp = &ipst->ips_ipcl_bind_fanout[i]; 16896 connp = NULL; 16897 while ((connp = 16898 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16899 tcp = connp->conn_tcp; 16900 if (zoneid != GLOBAL_ZONEID && 16901 zoneid != connp->conn_zoneid) 16902 continue; 16903 tcp_report_listener(mp->b_cont, tcp, i); 16904 } 16905 } 16906 16907 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16908 return (0); 16909 } 16910 16911 /* TCP status report triggered via the Named Dispatch mechanism. */ 16912 /* ARGSUSED */ 16913 static int 16914 tcp_conn_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16915 { 16916 connf_t *connfp; 16917 conn_t *connp; 16918 tcp_t *tcp; 16919 int i; 16920 zoneid_t zoneid; 16921 tcp_stack_t *tcps; 16922 ip_stack_t *ipst; 16923 16924 zoneid = Q_TO_CONN(q)->conn_zoneid; 16925 tcps = Q_TO_TCP(q)->tcp_tcps; 16926 ipst = tcps->tcps_netstack->netstack_ip; 16927 16928 /* Refer to comments in tcp_status_report(). */ 16929 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16930 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16931 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16932 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16933 return (0); 16934 } 16935 } 16936 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16937 /* The following may work even if we cannot get a large buf. */ 16938 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16939 return (0); 16940 } 16941 16942 (void) mi_mpprintf(mp, "tcp_conn_hash_size = %d", 16943 ipst->ips_ipcl_conn_fanout_size); 16944 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16945 16946 for (i = 0; i < ipst->ips_ipcl_conn_fanout_size; i++) { 16947 connfp = &ipst->ips_ipcl_conn_fanout[i]; 16948 connp = NULL; 16949 while ((connp = 16950 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16951 tcp = connp->conn_tcp; 16952 if (zoneid != GLOBAL_ZONEID && 16953 zoneid != connp->conn_zoneid) 16954 continue; 16955 tcp_report_item(mp->b_cont, tcp, i, 16956 Q_TO_TCP(q), cr); 16957 } 16958 } 16959 16960 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16961 return (0); 16962 } 16963 16964 /* TCP status report triggered via the Named Dispatch mechanism. */ 16965 /* ARGSUSED */ 16966 static int 16967 tcp_acceptor_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16968 { 16969 tf_t *tf; 16970 tcp_t *tcp; 16971 int i; 16972 zoneid_t zoneid; 16973 tcp_stack_t *tcps; 16974 16975 zoneid = Q_TO_CONN(q)->conn_zoneid; 16976 tcps = Q_TO_TCP(q)->tcp_tcps; 16977 16978 /* Refer to comments in tcp_status_report(). */ 16979 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16980 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16981 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16982 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16983 return (0); 16984 } 16985 } 16986 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16987 /* The following may work even if we cannot get a large buf. */ 16988 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16989 return (0); 16990 } 16991 16992 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16993 16994 for (i = 0; i < TCP_FANOUT_SIZE; i++) { 16995 tf = &tcps->tcps_acceptor_fanout[i]; 16996 mutex_enter(&tf->tf_lock); 16997 for (tcp = tf->tf_tcp; tcp != NULL; 16998 tcp = tcp->tcp_acceptor_hash) { 16999 if (zoneid != GLOBAL_ZONEID && 17000 zoneid != tcp->tcp_connp->conn_zoneid) 17001 continue; 17002 tcp_report_item(mp->b_cont, tcp, i, 17003 Q_TO_TCP(q), cr); 17004 } 17005 mutex_exit(&tf->tf_lock); 17006 } 17007 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 17008 return (0); 17009 } 17010 17011 /* 17012 * tcp_timer is the timer service routine. It handles the retransmission, 17013 * FIN_WAIT_2 flush, and zero window probe timeout events. It figures out 17014 * from the state of the tcp instance what kind of action needs to be done 17015 * at the time it is called. 17016 */ 17017 static void 17018 tcp_timer(void *arg) 17019 { 17020 mblk_t *mp; 17021 clock_t first_threshold; 17022 clock_t second_threshold; 17023 clock_t ms; 17024 uint32_t mss; 17025 conn_t *connp = (conn_t *)arg; 17026 tcp_t *tcp = connp->conn_tcp; 17027 tcp_stack_t *tcps = tcp->tcp_tcps; 17028 17029 tcp->tcp_timer_tid = 0; 17030 17031 if (tcp->tcp_fused) 17032 return; 17033 17034 first_threshold = tcp->tcp_first_timer_threshold; 17035 second_threshold = tcp->tcp_second_timer_threshold; 17036 switch (tcp->tcp_state) { 17037 case TCPS_IDLE: 17038 case TCPS_BOUND: 17039 case TCPS_LISTEN: 17040 return; 17041 case TCPS_SYN_RCVD: { 17042 tcp_t *listener = tcp->tcp_listener; 17043 17044 if (tcp->tcp_syn_rcvd_timeout == 0 && (listener != NULL)) { 17045 ASSERT(tcp->tcp_rq == listener->tcp_rq); 17046 /* it's our first timeout */ 17047 tcp->tcp_syn_rcvd_timeout = 1; 17048 mutex_enter(&listener->tcp_eager_lock); 17049 listener->tcp_syn_rcvd_timeout++; 17050 if (!tcp->tcp_dontdrop && !tcp->tcp_closemp_used) { 17051 /* 17052 * Make this eager available for drop if we 17053 * need to drop one to accomodate a new 17054 * incoming SYN request. 17055 */ 17056 MAKE_DROPPABLE(listener, tcp); 17057 } 17058 if (!listener->tcp_syn_defense && 17059 (listener->tcp_syn_rcvd_timeout > 17060 (tcps->tcps_conn_req_max_q0 >> 2)) && 17061 (tcps->tcps_conn_req_max_q0 > 200)) { 17062 /* We may be under attack. Put on a defense. */ 17063 listener->tcp_syn_defense = B_TRUE; 17064 cmn_err(CE_WARN, "High TCP connect timeout " 17065 "rate! System (port %d) may be under a " 17066 "SYN flood attack!", 17067 BE16_TO_U16(listener->tcp_tcph->th_lport)); 17068 17069 listener->tcp_ip_addr_cache = kmem_zalloc( 17070 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t), 17071 KM_NOSLEEP); 17072 } 17073 mutex_exit(&listener->tcp_eager_lock); 17074 } else if (listener != NULL) { 17075 mutex_enter(&listener->tcp_eager_lock); 17076 tcp->tcp_syn_rcvd_timeout++; 17077 if (tcp->tcp_syn_rcvd_timeout > 1 && 17078 !tcp->tcp_closemp_used) { 17079 /* 17080 * This is our second timeout. Put the tcp in 17081 * the list of droppable eagers to allow it to 17082 * be dropped, if needed. We don't check 17083 * whether tcp_dontdrop is set or not to 17084 * protect ourselve from a SYN attack where a 17085 * remote host can spoof itself as one of the 17086 * good IP source and continue to hold 17087 * resources too long. 17088 */ 17089 MAKE_DROPPABLE(listener, tcp); 17090 } 17091 mutex_exit(&listener->tcp_eager_lock); 17092 } 17093 } 17094 /* FALLTHRU */ 17095 case TCPS_SYN_SENT: 17096 first_threshold = tcp->tcp_first_ctimer_threshold; 17097 second_threshold = tcp->tcp_second_ctimer_threshold; 17098 break; 17099 case TCPS_ESTABLISHED: 17100 case TCPS_FIN_WAIT_1: 17101 case TCPS_CLOSING: 17102 case TCPS_CLOSE_WAIT: 17103 case TCPS_LAST_ACK: 17104 /* If we have data to rexmit */ 17105 if (tcp->tcp_suna != tcp->tcp_snxt) { 17106 clock_t time_to_wait; 17107 17108 BUMP_MIB(&tcps->tcps_mib, tcpTimRetrans); 17109 if (!tcp->tcp_xmit_head) 17110 break; 17111 time_to_wait = lbolt - 17112 (clock_t)tcp->tcp_xmit_head->b_prev; 17113 time_to_wait = tcp->tcp_rto - 17114 TICK_TO_MSEC(time_to_wait); 17115 /* 17116 * If the timer fires too early, 1 clock tick earlier, 17117 * restart the timer. 17118 */ 17119 if (time_to_wait > msec_per_tick) { 17120 TCP_STAT(tcps, tcp_timer_fire_early); 17121 TCP_TIMER_RESTART(tcp, time_to_wait); 17122 return; 17123 } 17124 /* 17125 * When we probe zero windows, we force the swnd open. 17126 * If our peer acks with a closed window swnd will be 17127 * set to zero by tcp_rput(). As long as we are 17128 * receiving acks tcp_rput will 17129 * reset 'tcp_ms_we_have_waited' so as not to trip the 17130 * first and second interval actions. NOTE: the timer 17131 * interval is allowed to continue its exponential 17132 * backoff. 17133 */ 17134 if (tcp->tcp_swnd == 0 || tcp->tcp_zero_win_probe) { 17135 if (tcp->tcp_debug) { 17136 (void) strlog(TCP_MOD_ID, 0, 1, 17137 SL_TRACE, "tcp_timer: zero win"); 17138 } 17139 } else { 17140 /* 17141 * After retransmission, we need to do 17142 * slow start. Set the ssthresh to one 17143 * half of current effective window and 17144 * cwnd to one MSS. Also reset 17145 * tcp_cwnd_cnt. 17146 * 17147 * Note that if tcp_ssthresh is reduced because 17148 * of ECN, do not reduce it again unless it is 17149 * already one window of data away (tcp_cwr 17150 * should then be cleared) or this is a 17151 * timeout for a retransmitted segment. 17152 */ 17153 uint32_t npkt; 17154 17155 if (!tcp->tcp_cwr || tcp->tcp_rexmit) { 17156 npkt = ((tcp->tcp_timer_backoff ? 17157 tcp->tcp_cwnd_ssthresh : 17158 tcp->tcp_snxt - 17159 tcp->tcp_suna) >> 1) / tcp->tcp_mss; 17160 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 17161 tcp->tcp_mss; 17162 } 17163 tcp->tcp_cwnd = tcp->tcp_mss; 17164 tcp->tcp_cwnd_cnt = 0; 17165 if (tcp->tcp_ecn_ok) { 17166 tcp->tcp_cwr = B_TRUE; 17167 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 17168 tcp->tcp_ecn_cwr_sent = B_FALSE; 17169 } 17170 } 17171 break; 17172 } 17173 /* 17174 * We have something to send yet we cannot send. The 17175 * reason can be: 17176 * 17177 * 1. Zero send window: we need to do zero window probe. 17178 * 2. Zero cwnd: because of ECN, we need to "clock out 17179 * segments. 17180 * 3. SWS avoidance: receiver may have shrunk window, 17181 * reset our knowledge. 17182 * 17183 * Note that condition 2 can happen with either 1 or 17184 * 3. But 1 and 3 are exclusive. 17185 */ 17186 if (tcp->tcp_unsent != 0) { 17187 if (tcp->tcp_cwnd == 0) { 17188 /* 17189 * Set tcp_cwnd to 1 MSS so that a 17190 * new segment can be sent out. We 17191 * are "clocking out" new data when 17192 * the network is really congested. 17193 */ 17194 ASSERT(tcp->tcp_ecn_ok); 17195 tcp->tcp_cwnd = tcp->tcp_mss; 17196 } 17197 if (tcp->tcp_swnd == 0) { 17198 /* Extend window for zero window probe */ 17199 tcp->tcp_swnd++; 17200 tcp->tcp_zero_win_probe = B_TRUE; 17201 BUMP_MIB(&tcps->tcps_mib, tcpOutWinProbe); 17202 } else { 17203 /* 17204 * Handle timeout from sender SWS avoidance. 17205 * Reset our knowledge of the max send window 17206 * since the receiver might have reduced its 17207 * receive buffer. Avoid setting tcp_max_swnd 17208 * to one since that will essentially disable 17209 * the SWS checks. 17210 * 17211 * Note that since we don't have a SWS 17212 * state variable, if the timeout is set 17213 * for ECN but not for SWS, this 17214 * code will also be executed. This is 17215 * fine as tcp_max_swnd is updated 17216 * constantly and it will not affect 17217 * anything. 17218 */ 17219 tcp->tcp_max_swnd = MAX(tcp->tcp_swnd, 2); 17220 } 17221 tcp_wput_data(tcp, NULL, B_FALSE); 17222 return; 17223 } 17224 /* Is there a FIN that needs to be to re retransmitted? */ 17225 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 17226 !tcp->tcp_fin_acked) 17227 break; 17228 /* Nothing to do, return without restarting timer. */ 17229 TCP_STAT(tcps, tcp_timer_fire_miss); 17230 return; 17231 case TCPS_FIN_WAIT_2: 17232 /* 17233 * User closed the TCP endpoint and peer ACK'ed our FIN. 17234 * We waited some time for for peer's FIN, but it hasn't 17235 * arrived. We flush the connection now to avoid 17236 * case where the peer has rebooted. 17237 */ 17238 if (TCP_IS_DETACHED(tcp)) { 17239 (void) tcp_clean_death(tcp, 0, 23); 17240 } else { 17241 TCP_TIMER_RESTART(tcp, 17242 tcps->tcps_fin_wait_2_flush_interval); 17243 } 17244 return; 17245 case TCPS_TIME_WAIT: 17246 (void) tcp_clean_death(tcp, 0, 24); 17247 return; 17248 default: 17249 if (tcp->tcp_debug) { 17250 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 17251 "tcp_timer: strange state (%d) %s", 17252 tcp->tcp_state, tcp_display(tcp, NULL, 17253 DISP_PORT_ONLY)); 17254 } 17255 return; 17256 } 17257 if ((ms = tcp->tcp_ms_we_have_waited) > second_threshold) { 17258 /* 17259 * For zero window probe, we need to send indefinitely, 17260 * unless we have not heard from the other side for some 17261 * time... 17262 */ 17263 if ((tcp->tcp_zero_win_probe == 0) || 17264 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) > 17265 second_threshold)) { 17266 BUMP_MIB(&tcps->tcps_mib, tcpTimRetransDrop); 17267 /* 17268 * If TCP is in SYN_RCVD state, send back a 17269 * RST|ACK as BSD does. Note that tcp_zero_win_probe 17270 * should be zero in TCPS_SYN_RCVD state. 17271 */ 17272 if (tcp->tcp_state == TCPS_SYN_RCVD) { 17273 tcp_xmit_ctl("tcp_timer: RST sent on timeout " 17274 "in SYN_RCVD", 17275 tcp, tcp->tcp_snxt, 17276 tcp->tcp_rnxt, TH_RST | TH_ACK); 17277 } 17278 (void) tcp_clean_death(tcp, 17279 tcp->tcp_client_errno ? 17280 tcp->tcp_client_errno : ETIMEDOUT, 25); 17281 return; 17282 } else { 17283 /* 17284 * Set tcp_ms_we_have_waited to second_threshold 17285 * so that in next timeout, we will do the above 17286 * check (lbolt - tcp_last_recv_time). This is 17287 * also to avoid overflow. 17288 * 17289 * We don't need to decrement tcp_timer_backoff 17290 * to avoid overflow because it will be decremented 17291 * later if new timeout value is greater than 17292 * tcp_rexmit_interval_max. In the case when 17293 * tcp_rexmit_interval_max is greater than 17294 * second_threshold, it means that we will wait 17295 * longer than second_threshold to send the next 17296 * window probe. 17297 */ 17298 tcp->tcp_ms_we_have_waited = second_threshold; 17299 } 17300 } else if (ms > first_threshold) { 17301 if (tcp->tcp_snd_zcopy_aware && (!tcp->tcp_xmit_zc_clean) && 17302 tcp->tcp_xmit_head != NULL) { 17303 tcp->tcp_xmit_head = 17304 tcp_zcopy_backoff(tcp, tcp->tcp_xmit_head, 1); 17305 } 17306 /* 17307 * We have been retransmitting for too long... The RTT 17308 * we calculated is probably incorrect. Reinitialize it. 17309 * Need to compensate for 0 tcp_rtt_sa. Reset 17310 * tcp_rtt_update so that we won't accidentally cache a 17311 * bad value. But only do this if this is not a zero 17312 * window probe. 17313 */ 17314 if (tcp->tcp_rtt_sa != 0 && tcp->tcp_zero_win_probe == 0) { 17315 tcp->tcp_rtt_sd += (tcp->tcp_rtt_sa >> 3) + 17316 (tcp->tcp_rtt_sa >> 5); 17317 tcp->tcp_rtt_sa = 0; 17318 tcp_ip_notify(tcp); 17319 tcp->tcp_rtt_update = 0; 17320 } 17321 } 17322 tcp->tcp_timer_backoff++; 17323 if ((ms = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 17324 tcps->tcps_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5)) < 17325 tcps->tcps_rexmit_interval_min) { 17326 /* 17327 * This means the original RTO is tcp_rexmit_interval_min. 17328 * So we will use tcp_rexmit_interval_min as the RTO value 17329 * and do the backoff. 17330 */ 17331 ms = tcps->tcps_rexmit_interval_min << tcp->tcp_timer_backoff; 17332 } else { 17333 ms <<= tcp->tcp_timer_backoff; 17334 } 17335 if (ms > tcps->tcps_rexmit_interval_max) { 17336 ms = tcps->tcps_rexmit_interval_max; 17337 /* 17338 * ms is at max, decrement tcp_timer_backoff to avoid 17339 * overflow. 17340 */ 17341 tcp->tcp_timer_backoff--; 17342 } 17343 tcp->tcp_ms_we_have_waited += ms; 17344 if (tcp->tcp_zero_win_probe == 0) { 17345 tcp->tcp_rto = ms; 17346 } 17347 TCP_TIMER_RESTART(tcp, ms); 17348 /* 17349 * This is after a timeout and tcp_rto is backed off. Set 17350 * tcp_set_timer to 1 so that next time RTO is updated, we will 17351 * restart the timer with a correct value. 17352 */ 17353 tcp->tcp_set_timer = 1; 17354 mss = tcp->tcp_snxt - tcp->tcp_suna; 17355 if (mss > tcp->tcp_mss) 17356 mss = tcp->tcp_mss; 17357 if (mss > tcp->tcp_swnd && tcp->tcp_swnd != 0) 17358 mss = tcp->tcp_swnd; 17359 17360 if ((mp = tcp->tcp_xmit_head) != NULL) 17361 mp->b_prev = (mblk_t *)lbolt; 17362 mp = tcp_xmit_mp(tcp, mp, mss, NULL, NULL, tcp->tcp_suna, B_TRUE, &mss, 17363 B_TRUE); 17364 17365 /* 17366 * When slow start after retransmission begins, start with 17367 * this seq no. tcp_rexmit_max marks the end of special slow 17368 * start phase. tcp_snd_burst controls how many segments 17369 * can be sent because of an ack. 17370 */ 17371 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 17372 tcp->tcp_snd_burst = TCP_CWND_SS; 17373 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 17374 (tcp->tcp_unsent == 0)) { 17375 tcp->tcp_rexmit_max = tcp->tcp_fss; 17376 } else { 17377 tcp->tcp_rexmit_max = tcp->tcp_snxt; 17378 } 17379 tcp->tcp_rexmit = B_TRUE; 17380 tcp->tcp_dupack_cnt = 0; 17381 17382 /* 17383 * Remove all rexmit SACK blk to start from fresh. 17384 */ 17385 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 17386 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 17387 tcp->tcp_num_notsack_blk = 0; 17388 tcp->tcp_cnt_notsack_list = 0; 17389 } 17390 if (mp == NULL) { 17391 return; 17392 } 17393 /* Attach credentials to retransmitted initial SYNs. */ 17394 if (tcp->tcp_state == TCPS_SYN_SENT) { 17395 mblk_setcred(mp, tcp->tcp_cred); 17396 DB_CPID(mp) = tcp->tcp_cpid; 17397 } 17398 17399 tcp->tcp_csuna = tcp->tcp_snxt; 17400 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 17401 UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, mss); 17402 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 17403 tcp_send_data(tcp, tcp->tcp_wq, mp); 17404 17405 } 17406 17407 /* tcp_unbind is called by tcp_wput_proto to handle T_UNBIND_REQ messages. */ 17408 static void 17409 tcp_unbind(tcp_t *tcp, mblk_t *mp) 17410 { 17411 conn_t *connp; 17412 17413 switch (tcp->tcp_state) { 17414 case TCPS_BOUND: 17415 case TCPS_LISTEN: 17416 break; 17417 default: 17418 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 17419 return; 17420 } 17421 17422 /* 17423 * Need to clean up all the eagers since after the unbind, segments 17424 * will no longer be delivered to this listener stream. 17425 */ 17426 mutex_enter(&tcp->tcp_eager_lock); 17427 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) { 17428 tcp_eager_cleanup(tcp, 0); 17429 } 17430 mutex_exit(&tcp->tcp_eager_lock); 17431 17432 if (tcp->tcp_ipversion == IPV4_VERSION) { 17433 tcp->tcp_ipha->ipha_src = 0; 17434 } else { 17435 V6_SET_ZERO(tcp->tcp_ip6h->ip6_src); 17436 } 17437 V6_SET_ZERO(tcp->tcp_ip_src_v6); 17438 bzero(tcp->tcp_tcph->th_lport, sizeof (tcp->tcp_tcph->th_lport)); 17439 tcp_bind_hash_remove(tcp); 17440 tcp->tcp_state = TCPS_IDLE; 17441 tcp->tcp_mdt = B_FALSE; 17442 /* Send M_FLUSH according to TPI */ 17443 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 17444 connp = tcp->tcp_connp; 17445 connp->conn_mdt_ok = B_FALSE; 17446 ipcl_hash_remove(connp); 17447 bzero(&connp->conn_ports, sizeof (connp->conn_ports)); 17448 mp = mi_tpi_ok_ack_alloc(mp); 17449 putnext(tcp->tcp_rq, mp); 17450 } 17451 17452 /* 17453 * Don't let port fall into the privileged range. 17454 * Since the extra privileged ports can be arbitrary we also 17455 * ensure that we exclude those from consideration. 17456 * tcp_g_epriv_ports is not sorted thus we loop over it until 17457 * there are no changes. 17458 * 17459 * Note: No locks are held when inspecting tcp_g_*epriv_ports 17460 * but instead the code relies on: 17461 * - the fact that the address of the array and its size never changes 17462 * - the atomic assignment of the elements of the array 17463 * 17464 * Returns 0 if there are no more ports available. 17465 * 17466 * TS note: skip multilevel ports. 17467 */ 17468 static in_port_t 17469 tcp_update_next_port(in_port_t port, const tcp_t *tcp, boolean_t random) 17470 { 17471 int i; 17472 boolean_t restart = B_FALSE; 17473 tcp_stack_t *tcps = tcp->tcp_tcps; 17474 17475 if (random && tcp_random_anon_port != 0) { 17476 (void) random_get_pseudo_bytes((uint8_t *)&port, 17477 sizeof (in_port_t)); 17478 /* 17479 * Unless changed by a sys admin, the smallest anon port 17480 * is 32768 and the largest anon port is 65535. It is 17481 * very likely (50%) for the random port to be smaller 17482 * than the smallest anon port. When that happens, 17483 * add port % (anon port range) to the smallest anon 17484 * port to get the random port. It should fall into the 17485 * valid anon port range. 17486 */ 17487 if (port < tcps->tcps_smallest_anon_port) { 17488 port = tcps->tcps_smallest_anon_port + 17489 port % (tcps->tcps_largest_anon_port - 17490 tcps->tcps_smallest_anon_port); 17491 } 17492 } 17493 17494 retry: 17495 if (port < tcps->tcps_smallest_anon_port) 17496 port = (in_port_t)tcps->tcps_smallest_anon_port; 17497 17498 if (port > tcps->tcps_largest_anon_port) { 17499 if (restart) 17500 return (0); 17501 restart = B_TRUE; 17502 port = (in_port_t)tcps->tcps_smallest_anon_port; 17503 } 17504 17505 if (port < tcps->tcps_smallest_nonpriv_port) 17506 port = (in_port_t)tcps->tcps_smallest_nonpriv_port; 17507 17508 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 17509 if (port == tcps->tcps_g_epriv_ports[i]) { 17510 port++; 17511 /* 17512 * Make sure whether the port is in the 17513 * valid range. 17514 */ 17515 goto retry; 17516 } 17517 } 17518 if (is_system_labeled() && 17519 (i = tsol_next_port(crgetzone(tcp->tcp_cred), port, 17520 IPPROTO_TCP, B_TRUE)) != 0) { 17521 port = i; 17522 goto retry; 17523 } 17524 return (port); 17525 } 17526 17527 /* 17528 * Return the next anonymous port in the privileged port range for 17529 * bind checking. It starts at IPPORT_RESERVED - 1 and goes 17530 * downwards. This is the same behavior as documented in the userland 17531 * library call rresvport(3N). 17532 * 17533 * TS note: skip multilevel ports. 17534 */ 17535 static in_port_t 17536 tcp_get_next_priv_port(const tcp_t *tcp) 17537 { 17538 static in_port_t next_priv_port = IPPORT_RESERVED - 1; 17539 in_port_t nextport; 17540 boolean_t restart = B_FALSE; 17541 tcp_stack_t *tcps = tcp->tcp_tcps; 17542 retry: 17543 if (next_priv_port < tcps->tcps_min_anonpriv_port || 17544 next_priv_port >= IPPORT_RESERVED) { 17545 next_priv_port = IPPORT_RESERVED - 1; 17546 if (restart) 17547 return (0); 17548 restart = B_TRUE; 17549 } 17550 if (is_system_labeled() && 17551 (nextport = tsol_next_port(crgetzone(tcp->tcp_cred), 17552 next_priv_port, IPPROTO_TCP, B_FALSE)) != 0) { 17553 next_priv_port = nextport; 17554 goto retry; 17555 } 17556 return (next_priv_port--); 17557 } 17558 17559 /* The write side r/w procedure. */ 17560 17561 #if CCS_STATS 17562 struct { 17563 struct { 17564 int64_t count, bytes; 17565 } tot, hit; 17566 } wrw_stats; 17567 #endif 17568 17569 /* 17570 * Call by tcp_wput() to handle all non data, except M_PROTO and M_PCPROTO, 17571 * messages. 17572 */ 17573 /* ARGSUSED */ 17574 static void 17575 tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2) 17576 { 17577 conn_t *connp = (conn_t *)arg; 17578 tcp_t *tcp = connp->conn_tcp; 17579 queue_t *q = tcp->tcp_wq; 17580 17581 ASSERT(DB_TYPE(mp) != M_IOCTL); 17582 /* 17583 * TCP is D_MP and qprocsoff() is done towards the end of the tcp_close. 17584 * Once the close starts, streamhead and sockfs will not let any data 17585 * packets come down (close ensures that there are no threads using the 17586 * queue and no new threads will come down) but since qprocsoff() 17587 * hasn't happened yet, a M_FLUSH or some non data message might 17588 * get reflected back (in response to our own FLUSHRW) and get 17589 * processed after tcp_close() is done. The conn would still be valid 17590 * because a ref would have added but we need to check the state 17591 * before actually processing the packet. 17592 */ 17593 if (TCP_IS_DETACHED(tcp) || (tcp->tcp_state == TCPS_CLOSED)) { 17594 freemsg(mp); 17595 return; 17596 } 17597 17598 switch (DB_TYPE(mp)) { 17599 case M_IOCDATA: 17600 tcp_wput_iocdata(tcp, mp); 17601 break; 17602 case M_FLUSH: 17603 tcp_wput_flush(tcp, mp); 17604 break; 17605 default: 17606 CALL_IP_WPUT(connp, q, mp); 17607 break; 17608 } 17609 } 17610 17611 /* 17612 * The TCP fast path write put procedure. 17613 * NOTE: the logic of the fast path is duplicated from tcp_wput_data() 17614 */ 17615 /* ARGSUSED */ 17616 void 17617 tcp_output(void *arg, mblk_t *mp, void *arg2) 17618 { 17619 int len; 17620 int hdrlen; 17621 int plen; 17622 mblk_t *mp1; 17623 uchar_t *rptr; 17624 uint32_t snxt; 17625 tcph_t *tcph; 17626 struct datab *db; 17627 uint32_t suna; 17628 uint32_t mss; 17629 ipaddr_t *dst; 17630 ipaddr_t *src; 17631 uint32_t sum; 17632 int usable; 17633 conn_t *connp = (conn_t *)arg; 17634 tcp_t *tcp = connp->conn_tcp; 17635 uint32_t msize; 17636 tcp_stack_t *tcps = tcp->tcp_tcps; 17637 17638 /* 17639 * Try and ASSERT the minimum possible references on the 17640 * conn early enough. Since we are executing on write side, 17641 * the connection is obviously not detached and that means 17642 * there is a ref each for TCP and IP. Since we are behind 17643 * the squeue, the minimum references needed are 3. If the 17644 * conn is in classifier hash list, there should be an 17645 * extra ref for that (we check both the possibilities). 17646 */ 17647 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 17648 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 17649 17650 ASSERT(DB_TYPE(mp) == M_DATA); 17651 msize = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp); 17652 17653 mutex_enter(&tcp->tcp_non_sq_lock); 17654 tcp->tcp_squeue_bytes -= msize; 17655 mutex_exit(&tcp->tcp_non_sq_lock); 17656 17657 /* Bypass tcp protocol for fused tcp loopback */ 17658 if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize)) 17659 return; 17660 17661 mss = tcp->tcp_mss; 17662 if (tcp->tcp_xmit_zc_clean) 17663 mp = tcp_zcopy_backoff(tcp, mp, 0); 17664 17665 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 17666 len = (int)(mp->b_wptr - mp->b_rptr); 17667 17668 /* 17669 * Criteria for fast path: 17670 * 17671 * 1. no unsent data 17672 * 2. single mblk in request 17673 * 3. connection established 17674 * 4. data in mblk 17675 * 5. len <= mss 17676 * 6. no tcp_valid bits 17677 */ 17678 if ((tcp->tcp_unsent != 0) || 17679 (tcp->tcp_cork) || 17680 (mp->b_cont != NULL) || 17681 (tcp->tcp_state != TCPS_ESTABLISHED) || 17682 (len == 0) || 17683 (len > mss) || 17684 (tcp->tcp_valid_bits != 0)) { 17685 tcp_wput_data(tcp, mp, B_FALSE); 17686 return; 17687 } 17688 17689 ASSERT(tcp->tcp_xmit_tail_unsent == 0); 17690 ASSERT(tcp->tcp_fin_sent == 0); 17691 17692 /* queue new packet onto retransmission queue */ 17693 if (tcp->tcp_xmit_head == NULL) { 17694 tcp->tcp_xmit_head = mp; 17695 } else { 17696 tcp->tcp_xmit_last->b_cont = mp; 17697 } 17698 tcp->tcp_xmit_last = mp; 17699 tcp->tcp_xmit_tail = mp; 17700 17701 /* find out how much we can send */ 17702 /* BEGIN CSTYLED */ 17703 /* 17704 * un-acked usable 17705 * |--------------|-----------------| 17706 * tcp_suna tcp_snxt tcp_suna+tcp_swnd 17707 */ 17708 /* END CSTYLED */ 17709 17710 /* start sending from tcp_snxt */ 17711 snxt = tcp->tcp_snxt; 17712 17713 /* 17714 * Check to see if this connection has been idled for some 17715 * time and no ACK is expected. If it is, we need to slow 17716 * start again to get back the connection's "self-clock" as 17717 * described in VJ's paper. 17718 * 17719 * Refer to the comment in tcp_mss_set() for the calculation 17720 * of tcp_cwnd after idle. 17721 */ 17722 if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet && 17723 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) { 17724 SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle); 17725 } 17726 17727 usable = tcp->tcp_swnd; /* tcp window size */ 17728 if (usable > tcp->tcp_cwnd) 17729 usable = tcp->tcp_cwnd; /* congestion window smaller */ 17730 usable -= snxt; /* subtract stuff already sent */ 17731 suna = tcp->tcp_suna; 17732 usable += suna; 17733 /* usable can be < 0 if the congestion window is smaller */ 17734 if (len > usable) { 17735 /* Can't send complete M_DATA in one shot */ 17736 goto slow; 17737 } 17738 17739 mutex_enter(&tcp->tcp_non_sq_lock); 17740 if (tcp->tcp_flow_stopped && 17741 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 17742 tcp_clrqfull(tcp); 17743 } 17744 mutex_exit(&tcp->tcp_non_sq_lock); 17745 17746 /* 17747 * determine if anything to send (Nagle). 17748 * 17749 * 1. len < tcp_mss (i.e. small) 17750 * 2. unacknowledged data present 17751 * 3. len < nagle limit 17752 * 4. last packet sent < nagle limit (previous packet sent) 17753 */ 17754 if ((len < mss) && (snxt != suna) && 17755 (len < (int)tcp->tcp_naglim) && 17756 (tcp->tcp_last_sent_len < tcp->tcp_naglim)) { 17757 /* 17758 * This was the first unsent packet and normally 17759 * mss < xmit_hiwater so there is no need to worry 17760 * about flow control. The next packet will go 17761 * through the flow control check in tcp_wput_data(). 17762 */ 17763 /* leftover work from above */ 17764 tcp->tcp_unsent = len; 17765 tcp->tcp_xmit_tail_unsent = len; 17766 17767 return; 17768 } 17769 17770 /* len <= tcp->tcp_mss && len == unsent so no silly window */ 17771 17772 if (snxt == suna) { 17773 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 17774 } 17775 17776 /* we have always sent something */ 17777 tcp->tcp_rack_cnt = 0; 17778 17779 tcp->tcp_snxt = snxt + len; 17780 tcp->tcp_rack = tcp->tcp_rnxt; 17781 17782 if ((mp1 = dupb(mp)) == 0) 17783 goto no_memory; 17784 mp->b_prev = (mblk_t *)(uintptr_t)lbolt; 17785 mp->b_next = (mblk_t *)(uintptr_t)snxt; 17786 17787 /* adjust tcp header information */ 17788 tcph = tcp->tcp_tcph; 17789 tcph->th_flags[0] = (TH_ACK|TH_PUSH); 17790 17791 sum = len + tcp->tcp_tcp_hdr_len + tcp->tcp_sum; 17792 sum = (sum >> 16) + (sum & 0xFFFF); 17793 U16_TO_ABE16(sum, tcph->th_sum); 17794 17795 U32_TO_ABE32(snxt, tcph->th_seq); 17796 17797 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 17798 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len); 17799 BUMP_LOCAL(tcp->tcp_obsegs); 17800 17801 /* Update the latest receive window size in TCP header. */ 17802 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 17803 tcph->th_win); 17804 17805 tcp->tcp_last_sent_len = (ushort_t)len; 17806 17807 plen = len + tcp->tcp_hdr_len; 17808 17809 if (tcp->tcp_ipversion == IPV4_VERSION) { 17810 tcp->tcp_ipha->ipha_length = htons(plen); 17811 } else { 17812 tcp->tcp_ip6h->ip6_plen = htons(plen - 17813 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 17814 } 17815 17816 /* see if we need to allocate a mblk for the headers */ 17817 hdrlen = tcp->tcp_hdr_len; 17818 rptr = mp1->b_rptr - hdrlen; 17819 db = mp1->b_datap; 17820 if ((db->db_ref != 2) || rptr < db->db_base || 17821 (!OK_32PTR(rptr))) { 17822 /* NOTE: we assume allocb returns an OK_32PTR */ 17823 mp = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 17824 tcps->tcps_wroff_xtra, BPRI_MED); 17825 if (!mp) { 17826 freemsg(mp1); 17827 goto no_memory; 17828 } 17829 mp->b_cont = mp1; 17830 mp1 = mp; 17831 /* Leave room for Link Level header */ 17832 /* hdrlen = tcp->tcp_hdr_len; */ 17833 rptr = &mp1->b_rptr[tcps->tcps_wroff_xtra]; 17834 mp1->b_wptr = &rptr[hdrlen]; 17835 } 17836 mp1->b_rptr = rptr; 17837 17838 /* Fill in the timestamp option. */ 17839 if (tcp->tcp_snd_ts_ok) { 17840 U32_TO_BE32((uint32_t)lbolt, 17841 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 17842 U32_TO_BE32(tcp->tcp_ts_recent, 17843 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 17844 } else { 17845 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 17846 } 17847 17848 /* copy header into outgoing packet */ 17849 dst = (ipaddr_t *)rptr; 17850 src = (ipaddr_t *)tcp->tcp_iphc; 17851 dst[0] = src[0]; 17852 dst[1] = src[1]; 17853 dst[2] = src[2]; 17854 dst[3] = src[3]; 17855 dst[4] = src[4]; 17856 dst[5] = src[5]; 17857 dst[6] = src[6]; 17858 dst[7] = src[7]; 17859 dst[8] = src[8]; 17860 dst[9] = src[9]; 17861 if (hdrlen -= 40) { 17862 hdrlen >>= 2; 17863 dst += 10; 17864 src += 10; 17865 do { 17866 *dst++ = *src++; 17867 } while (--hdrlen); 17868 } 17869 17870 /* 17871 * Set the ECN info in the TCP header. Note that this 17872 * is not the template header. 17873 */ 17874 if (tcp->tcp_ecn_ok) { 17875 SET_ECT(tcp, rptr); 17876 17877 tcph = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 17878 if (tcp->tcp_ecn_echo_on) 17879 tcph->th_flags[0] |= TH_ECE; 17880 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 17881 tcph->th_flags[0] |= TH_CWR; 17882 tcp->tcp_ecn_cwr_sent = B_TRUE; 17883 } 17884 } 17885 17886 if (tcp->tcp_ip_forward_progress) { 17887 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 17888 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 17889 tcp->tcp_ip_forward_progress = B_FALSE; 17890 } 17891 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 17892 tcp_send_data(tcp, tcp->tcp_wq, mp1); 17893 return; 17894 17895 /* 17896 * If we ran out of memory, we pretend to have sent the packet 17897 * and that it was lost on the wire. 17898 */ 17899 no_memory: 17900 return; 17901 17902 slow: 17903 /* leftover work from above */ 17904 tcp->tcp_unsent = len; 17905 tcp->tcp_xmit_tail_unsent = len; 17906 tcp_wput_data(tcp, NULL, B_FALSE); 17907 } 17908 17909 /* 17910 * The function called through squeue to get behind eager's perimeter to 17911 * finish the accept processing. 17912 */ 17913 /* ARGSUSED */ 17914 void 17915 tcp_accept_finish(void *arg, mblk_t *mp, void *arg2) 17916 { 17917 conn_t *connp = (conn_t *)arg; 17918 tcp_t *tcp = connp->conn_tcp; 17919 queue_t *q = tcp->tcp_rq; 17920 mblk_t *mp1; 17921 mblk_t *stropt_mp = mp; 17922 struct stroptions *stropt; 17923 uint_t thwin; 17924 tcp_stack_t *tcps = tcp->tcp_tcps; 17925 17926 /* 17927 * Drop the eager's ref on the listener, that was placed when 17928 * this eager began life in tcp_conn_request. 17929 */ 17930 CONN_DEC_REF(tcp->tcp_saved_listener->tcp_connp); 17931 17932 if (tcp->tcp_state <= TCPS_BOUND || tcp->tcp_accept_error) { 17933 /* 17934 * Someone blewoff the eager before we could finish 17935 * the accept. 17936 * 17937 * The only reason eager exists it because we put in 17938 * a ref on it when conn ind went up. We need to send 17939 * a disconnect indication up while the last reference 17940 * on the eager will be dropped by the squeue when we 17941 * return. 17942 */ 17943 ASSERT(tcp->tcp_listener == NULL); 17944 if (tcp->tcp_issocket || tcp->tcp_send_discon_ind) { 17945 struct T_discon_ind *tdi; 17946 17947 (void) putnextctl1(q, M_FLUSH, FLUSHRW); 17948 /* 17949 * Let us reuse the incoming mblk to avoid memory 17950 * allocation failure problems. We know that the 17951 * size of the incoming mblk i.e. stroptions is greater 17952 * than sizeof T_discon_ind. So the reallocb below 17953 * can't fail. 17954 */ 17955 freemsg(mp->b_cont); 17956 mp->b_cont = NULL; 17957 ASSERT(DB_REF(mp) == 1); 17958 mp = reallocb(mp, sizeof (struct T_discon_ind), 17959 B_FALSE); 17960 ASSERT(mp != NULL); 17961 DB_TYPE(mp) = M_PROTO; 17962 ((union T_primitives *)mp->b_rptr)->type = T_DISCON_IND; 17963 tdi = (struct T_discon_ind *)mp->b_rptr; 17964 if (tcp->tcp_issocket) { 17965 tdi->DISCON_reason = ECONNREFUSED; 17966 tdi->SEQ_number = 0; 17967 } else { 17968 tdi->DISCON_reason = ENOPROTOOPT; 17969 tdi->SEQ_number = 17970 tcp->tcp_conn_req_seqnum; 17971 } 17972 mp->b_wptr = mp->b_rptr + sizeof (struct T_discon_ind); 17973 putnext(q, mp); 17974 } else { 17975 freemsg(mp); 17976 } 17977 if (tcp->tcp_hard_binding) { 17978 tcp->tcp_hard_binding = B_FALSE; 17979 tcp->tcp_hard_bound = B_TRUE; 17980 } 17981 tcp->tcp_detached = B_FALSE; 17982 return; 17983 } 17984 17985 mp1 = stropt_mp->b_cont; 17986 stropt_mp->b_cont = NULL; 17987 ASSERT(DB_TYPE(stropt_mp) == M_SETOPTS); 17988 stropt = (struct stroptions *)stropt_mp->b_rptr; 17989 17990 while (mp1 != NULL) { 17991 mp = mp1; 17992 mp1 = mp1->b_cont; 17993 mp->b_cont = NULL; 17994 tcp->tcp_drop_opt_ack_cnt++; 17995 CALL_IP_WPUT(connp, tcp->tcp_wq, mp); 17996 } 17997 mp = NULL; 17998 17999 /* 18000 * For a loopback connection with tcp_direct_sockfs on, note that 18001 * we don't have to protect tcp_rcv_list yet because synchronous 18002 * streams has not yet been enabled and tcp_fuse_rrw() cannot 18003 * possibly race with us. 18004 */ 18005 18006 /* 18007 * Set the max window size (tcp_rq->q_hiwat) of the acceptor 18008 * properly. This is the first time we know of the acceptor' 18009 * queue. So we do it here. 18010 */ 18011 if (tcp->tcp_rcv_list == NULL) { 18012 /* 18013 * Recv queue is empty, tcp_rwnd should not have changed. 18014 * That means it should be equal to the listener's tcp_rwnd. 18015 */ 18016 tcp->tcp_rq->q_hiwat = tcp->tcp_rwnd; 18017 } else { 18018 #ifdef DEBUG 18019 uint_t cnt = 0; 18020 18021 mp1 = tcp->tcp_rcv_list; 18022 while ((mp = mp1) != NULL) { 18023 mp1 = mp->b_next; 18024 cnt += msgdsize(mp); 18025 } 18026 ASSERT(cnt != 0 && tcp->tcp_rcv_cnt == cnt); 18027 #endif 18028 /* There is some data, add them back to get the max. */ 18029 tcp->tcp_rq->q_hiwat = tcp->tcp_rwnd + tcp->tcp_rcv_cnt; 18030 } 18031 18032 stropt->so_flags = SO_HIWAT; 18033 stropt->so_hiwat = MAX(q->q_hiwat, tcps->tcps_sth_rcv_hiwat); 18034 18035 stropt->so_flags |= SO_MAXBLK; 18036 stropt->so_maxblk = tcp_maxpsz_set(tcp, B_FALSE); 18037 18038 /* 18039 * This is the first time we run on the correct 18040 * queue after tcp_accept. So fix all the q parameters 18041 * here. 18042 */ 18043 /* Allocate room for SACK options if needed. */ 18044 stropt->so_flags |= SO_WROFF; 18045 if (tcp->tcp_fused) { 18046 ASSERT(tcp->tcp_loopback); 18047 ASSERT(tcp->tcp_loopback_peer != NULL); 18048 /* 18049 * For fused tcp loopback, set the stream head's write 18050 * offset value to zero since we won't be needing any room 18051 * for TCP/IP headers. This would also improve performance 18052 * since it would reduce the amount of work done by kmem. 18053 * Non-fused tcp loopback case is handled separately below. 18054 */ 18055 stropt->so_wroff = 0; 18056 /* 18057 * Record the stream head's high water mark for this endpoint; 18058 * this is used for flow-control purposes in tcp_fuse_output(). 18059 */ 18060 stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(tcp, q->q_hiwat); 18061 /* 18062 * Update the peer's transmit parameters according to 18063 * our recently calculated high water mark value. 18064 */ 18065 (void) tcp_maxpsz_set(tcp->tcp_loopback_peer, B_TRUE); 18066 } else if (tcp->tcp_snd_sack_ok) { 18067 stropt->so_wroff = tcp->tcp_hdr_len + TCPOPT_MAX_SACK_LEN + 18068 (tcp->tcp_loopback ? 0 : tcps->tcps_wroff_xtra); 18069 } else { 18070 stropt->so_wroff = tcp->tcp_hdr_len + (tcp->tcp_loopback ? 0 : 18071 tcps->tcps_wroff_xtra); 18072 } 18073 18074 /* 18075 * If this is endpoint is handling SSL, then reserve extra 18076 * offset and space at the end. 18077 * Also have the stream head allocate SSL3_MAX_RECORD_LEN packets, 18078 * overriding the previous setting. The extra cost of signing and 18079 * encrypting multiple MSS-size records (12 of them with Ethernet), 18080 * instead of a single contiguous one by the stream head 18081 * largely outweighs the statistical reduction of ACKs, when 18082 * applicable. The peer will also save on decryption and verification 18083 * costs. 18084 */ 18085 if (tcp->tcp_kssl_ctx != NULL) { 18086 stropt->so_wroff += SSL3_WROFFSET; 18087 18088 stropt->so_flags |= SO_TAIL; 18089 stropt->so_tail = SSL3_MAX_TAIL_LEN; 18090 18091 stropt->so_flags |= SO_COPYOPT; 18092 stropt->so_copyopt = ZCVMUNSAFE; 18093 18094 stropt->so_maxblk = SSL3_MAX_RECORD_LEN; 18095 } 18096 18097 /* Send the options up */ 18098 putnext(q, stropt_mp); 18099 18100 /* 18101 * Pass up any data and/or a fin that has been received. 18102 * 18103 * Adjust receive window in case it had decreased 18104 * (because there is data <=> tcp_rcv_list != NULL) 18105 * while the connection was detached. Note that 18106 * in case the eager was flow-controlled, w/o this 18107 * code, the rwnd may never open up again! 18108 */ 18109 if (tcp->tcp_rcv_list != NULL) { 18110 /* We drain directly in case of fused tcp loopback */ 18111 if (!tcp->tcp_fused && canputnext(q)) { 18112 tcp->tcp_rwnd = q->q_hiwat; 18113 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 18114 << tcp->tcp_rcv_ws; 18115 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 18116 if (tcp->tcp_state >= TCPS_ESTABLISHED && 18117 (q->q_hiwat - thwin >= tcp->tcp_mss)) { 18118 tcp_xmit_ctl(NULL, 18119 tcp, (tcp->tcp_swnd == 0) ? 18120 tcp->tcp_suna : tcp->tcp_snxt, 18121 tcp->tcp_rnxt, TH_ACK); 18122 BUMP_MIB(&tcps->tcps_mib, tcpOutWinUpdate); 18123 } 18124 18125 } 18126 (void) tcp_rcv_drain(q, tcp); 18127 18128 /* 18129 * For fused tcp loopback, back-enable peer endpoint 18130 * if it's currently flow-controlled. 18131 */ 18132 if (tcp->tcp_fused) { 18133 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 18134 18135 ASSERT(peer_tcp != NULL); 18136 ASSERT(peer_tcp->tcp_fused); 18137 /* 18138 * In order to change the peer's tcp_flow_stopped, 18139 * we need to take locks for both end points. The 18140 * highest address is taken first. 18141 */ 18142 if (peer_tcp > tcp) { 18143 mutex_enter(&peer_tcp->tcp_non_sq_lock); 18144 mutex_enter(&tcp->tcp_non_sq_lock); 18145 } else { 18146 mutex_enter(&tcp->tcp_non_sq_lock); 18147 mutex_enter(&peer_tcp->tcp_non_sq_lock); 18148 } 18149 if (peer_tcp->tcp_flow_stopped) { 18150 tcp_clrqfull(peer_tcp); 18151 TCP_STAT(tcps, tcp_fusion_backenabled); 18152 } 18153 mutex_exit(&peer_tcp->tcp_non_sq_lock); 18154 mutex_exit(&tcp->tcp_non_sq_lock); 18155 } 18156 } 18157 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 18158 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 18159 mp = mi_tpi_ordrel_ind(); 18160 if (mp) { 18161 tcp->tcp_ordrel_done = B_TRUE; 18162 putnext(q, mp); 18163 if (tcp->tcp_deferred_clean_death) { 18164 /* 18165 * tcp_clean_death was deferred 18166 * for T_ORDREL_IND - do it now 18167 */ 18168 (void) tcp_clean_death(tcp, 18169 tcp->tcp_client_errno, 21); 18170 tcp->tcp_deferred_clean_death = B_FALSE; 18171 } 18172 } else { 18173 /* 18174 * Run the orderly release in the 18175 * service routine. 18176 */ 18177 qenable(q); 18178 } 18179 } 18180 if (tcp->tcp_hard_binding) { 18181 tcp->tcp_hard_binding = B_FALSE; 18182 tcp->tcp_hard_bound = B_TRUE; 18183 } 18184 18185 tcp->tcp_detached = B_FALSE; 18186 18187 /* We can enable synchronous streams now */ 18188 if (tcp->tcp_fused) { 18189 tcp_fuse_syncstr_enable_pair(tcp); 18190 } 18191 18192 if (tcp->tcp_ka_enabled) { 18193 tcp->tcp_ka_last_intrvl = 0; 18194 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 18195 MSEC_TO_TICK(tcp->tcp_ka_interval)); 18196 } 18197 18198 /* 18199 * At this point, eager is fully established and will 18200 * have the following references - 18201 * 18202 * 2 references for connection to exist (1 for TCP and 1 for IP). 18203 * 1 reference for the squeue which will be dropped by the squeue as 18204 * soon as this function returns. 18205 * There will be 1 additonal reference for being in classifier 18206 * hash list provided something bad hasn't happened. 18207 */ 18208 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 18209 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 18210 } 18211 18212 /* 18213 * The function called through squeue to get behind listener's perimeter to 18214 * send a deffered conn_ind. 18215 */ 18216 /* ARGSUSED */ 18217 void 18218 tcp_send_pending(void *arg, mblk_t *mp, void *arg2) 18219 { 18220 conn_t *connp = (conn_t *)arg; 18221 tcp_t *listener = connp->conn_tcp; 18222 18223 if (listener->tcp_state == TCPS_CLOSED || 18224 TCP_IS_DETACHED(listener)) { 18225 /* 18226 * If listener has closed, it would have caused a 18227 * a cleanup/blowoff to happen for the eager. 18228 */ 18229 tcp_t *tcp; 18230 struct T_conn_ind *conn_ind; 18231 18232 conn_ind = (struct T_conn_ind *)mp->b_rptr; 18233 bcopy(mp->b_rptr + conn_ind->OPT_offset, &tcp, 18234 conn_ind->OPT_length); 18235 /* 18236 * We need to drop the ref on eager that was put 18237 * tcp_rput_data() before trying to send the conn_ind 18238 * to listener. The conn_ind was deferred in tcp_send_conn_ind 18239 * and tcp_wput_accept() is sending this deferred conn_ind but 18240 * listener is closed so we drop the ref. 18241 */ 18242 CONN_DEC_REF(tcp->tcp_connp); 18243 freemsg(mp); 18244 return; 18245 } 18246 putnext(listener->tcp_rq, mp); 18247 } 18248 18249 18250 /* 18251 * This is the STREAMS entry point for T_CONN_RES coming down on 18252 * Acceptor STREAM when sockfs listener does accept processing. 18253 * Read the block comment on top of tcp_conn_request(). 18254 */ 18255 void 18256 tcp_wput_accept(queue_t *q, mblk_t *mp) 18257 { 18258 queue_t *rq = RD(q); 18259 struct T_conn_res *conn_res; 18260 tcp_t *eager; 18261 tcp_t *listener; 18262 struct T_ok_ack *ok; 18263 t_scalar_t PRIM_type; 18264 mblk_t *opt_mp; 18265 conn_t *econnp; 18266 18267 ASSERT(DB_TYPE(mp) == M_PROTO); 18268 18269 conn_res = (struct T_conn_res *)mp->b_rptr; 18270 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 18271 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_conn_res)) { 18272 mp = mi_tpi_err_ack_alloc(mp, TPROTO, 0); 18273 if (mp != NULL) 18274 putnext(rq, mp); 18275 return; 18276 } 18277 switch (conn_res->PRIM_type) { 18278 case O_T_CONN_RES: 18279 case T_CONN_RES: 18280 /* 18281 * We pass up an err ack if allocb fails. This will 18282 * cause sockfs to issue a T_DISCON_REQ which will cause 18283 * tcp_eager_blowoff to be called. sockfs will then call 18284 * rq->q_qinfo->qi_qclose to cleanup the acceptor stream. 18285 * we need to do the allocb up here because we have to 18286 * make sure rq->q_qinfo->qi_qclose still points to the 18287 * correct function (tcpclose_accept) in case allocb 18288 * fails. 18289 */ 18290 opt_mp = allocb(sizeof (struct stroptions), BPRI_HI); 18291 if (opt_mp == NULL) { 18292 mp = mi_tpi_err_ack_alloc(mp, TPROTO, 0); 18293 if (mp != NULL) 18294 putnext(rq, mp); 18295 return; 18296 } 18297 18298 bcopy(mp->b_rptr + conn_res->OPT_offset, 18299 &eager, conn_res->OPT_length); 18300 PRIM_type = conn_res->PRIM_type; 18301 mp->b_datap->db_type = M_PCPROTO; 18302 mp->b_wptr = mp->b_rptr + sizeof (struct T_ok_ack); 18303 ok = (struct T_ok_ack *)mp->b_rptr; 18304 ok->PRIM_type = T_OK_ACK; 18305 ok->CORRECT_prim = PRIM_type; 18306 econnp = eager->tcp_connp; 18307 econnp->conn_dev = (dev_t)RD(q)->q_ptr; 18308 econnp->conn_minor_arena = (vmem_t *)(WR(q)->q_ptr); 18309 eager->tcp_rq = rq; 18310 eager->tcp_wq = q; 18311 rq->q_ptr = econnp; 18312 rq->q_qinfo = &tcp_rinitv4; /* No open - same as rinitv6 */ 18313 q->q_ptr = econnp; 18314 q->q_qinfo = &tcp_winit; 18315 listener = eager->tcp_listener; 18316 eager->tcp_issocket = B_TRUE; 18317 18318 econnp->conn_zoneid = listener->tcp_connp->conn_zoneid; 18319 econnp->conn_allzones = listener->tcp_connp->conn_allzones; 18320 ASSERT(econnp->conn_netstack == 18321 listener->tcp_connp->conn_netstack); 18322 ASSERT(eager->tcp_tcps == listener->tcp_tcps); 18323 18324 /* Put the ref for IP */ 18325 CONN_INC_REF(econnp); 18326 18327 /* 18328 * We should have minimum of 3 references on the conn 18329 * at this point. One each for TCP and IP and one for 18330 * the T_conn_ind that was sent up when the 3-way handshake 18331 * completed. In the normal case we would also have another 18332 * reference (making a total of 4) for the conn being in the 18333 * classifier hash list. However the eager could have received 18334 * an RST subsequently and tcp_closei_local could have removed 18335 * the eager from the classifier hash list, hence we can't 18336 * assert that reference. 18337 */ 18338 ASSERT(econnp->conn_ref >= 3); 18339 18340 /* 18341 * Send the new local address also up to sockfs. There 18342 * should already be enough space in the mp that came 18343 * down from soaccept(). 18344 */ 18345 if (eager->tcp_family == AF_INET) { 18346 sin_t *sin; 18347 18348 ASSERT((mp->b_datap->db_lim - mp->b_datap->db_base) >= 18349 (sizeof (struct T_ok_ack) + sizeof (sin_t))); 18350 sin = (sin_t *)mp->b_wptr; 18351 mp->b_wptr += sizeof (sin_t); 18352 sin->sin_family = AF_INET; 18353 sin->sin_port = eager->tcp_lport; 18354 sin->sin_addr.s_addr = eager->tcp_ipha->ipha_src; 18355 } else { 18356 sin6_t *sin6; 18357 18358 ASSERT((mp->b_datap->db_lim - mp->b_datap->db_base) >= 18359 sizeof (struct T_ok_ack) + sizeof (sin6_t)); 18360 sin6 = (sin6_t *)mp->b_wptr; 18361 mp->b_wptr += sizeof (sin6_t); 18362 sin6->sin6_family = AF_INET6; 18363 sin6->sin6_port = eager->tcp_lport; 18364 if (eager->tcp_ipversion == IPV4_VERSION) { 18365 sin6->sin6_flowinfo = 0; 18366 IN6_IPADDR_TO_V4MAPPED( 18367 eager->tcp_ipha->ipha_src, 18368 &sin6->sin6_addr); 18369 } else { 18370 ASSERT(eager->tcp_ip6h != NULL); 18371 sin6->sin6_flowinfo = 18372 eager->tcp_ip6h->ip6_vcf & 18373 ~IPV6_VERS_AND_FLOW_MASK; 18374 sin6->sin6_addr = eager->tcp_ip6h->ip6_src; 18375 } 18376 sin6->sin6_scope_id = 0; 18377 sin6->__sin6_src_id = 0; 18378 } 18379 18380 putnext(rq, mp); 18381 18382 opt_mp->b_datap->db_type = M_SETOPTS; 18383 opt_mp->b_wptr += sizeof (struct stroptions); 18384 18385 /* 18386 * Prepare for inheriting IPV6_BOUND_IF and IPV6_RECVPKTINFO 18387 * from listener to acceptor. The message is chained on the 18388 * bind_mp which tcp_rput_other will send down to IP. 18389 */ 18390 if (listener->tcp_bound_if != 0) { 18391 /* allocate optmgmt req */ 18392 mp = tcp_setsockopt_mp(IPPROTO_IPV6, 18393 IPV6_BOUND_IF, (char *)&listener->tcp_bound_if, 18394 sizeof (int)); 18395 if (mp != NULL) 18396 linkb(opt_mp, mp); 18397 } 18398 if (listener->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) { 18399 uint_t on = 1; 18400 18401 /* allocate optmgmt req */ 18402 mp = tcp_setsockopt_mp(IPPROTO_IPV6, 18403 IPV6_RECVPKTINFO, (char *)&on, sizeof (on)); 18404 if (mp != NULL) 18405 linkb(opt_mp, mp); 18406 } 18407 18408 18409 mutex_enter(&listener->tcp_eager_lock); 18410 18411 if (listener->tcp_eager_prev_q0->tcp_conn_def_q0) { 18412 18413 tcp_t *tail; 18414 tcp_t *tcp; 18415 mblk_t *mp1; 18416 18417 tcp = listener->tcp_eager_prev_q0; 18418 /* 18419 * listener->tcp_eager_prev_q0 points to the TAIL of the 18420 * deferred T_conn_ind queue. We need to get to the head 18421 * of the queue in order to send up T_conn_ind the same 18422 * order as how the 3WHS is completed. 18423 */ 18424 while (tcp != listener) { 18425 if (!tcp->tcp_eager_prev_q0->tcp_conn_def_q0 && 18426 !tcp->tcp_kssl_pending) 18427 break; 18428 else 18429 tcp = tcp->tcp_eager_prev_q0; 18430 } 18431 /* None of the pending eagers can be sent up now */ 18432 if (tcp == listener) 18433 goto no_more_eagers; 18434 18435 mp1 = tcp->tcp_conn.tcp_eager_conn_ind; 18436 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 18437 /* Move from q0 to q */ 18438 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 18439 listener->tcp_conn_req_cnt_q0--; 18440 listener->tcp_conn_req_cnt_q++; 18441 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 18442 tcp->tcp_eager_prev_q0; 18443 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 18444 tcp->tcp_eager_next_q0; 18445 tcp->tcp_eager_prev_q0 = NULL; 18446 tcp->tcp_eager_next_q0 = NULL; 18447 tcp->tcp_conn_def_q0 = B_FALSE; 18448 18449 /* Make sure the tcp isn't in the list of droppables */ 18450 ASSERT(tcp->tcp_eager_next_drop_q0 == NULL && 18451 tcp->tcp_eager_prev_drop_q0 == NULL); 18452 18453 /* 18454 * Insert at end of the queue because sockfs sends 18455 * down T_CONN_RES in chronological order. Leaving 18456 * the older conn indications at front of the queue 18457 * helps reducing search time. 18458 */ 18459 tail = listener->tcp_eager_last_q; 18460 if (tail != NULL) { 18461 tail->tcp_eager_next_q = tcp; 18462 } else { 18463 listener->tcp_eager_next_q = tcp; 18464 } 18465 listener->tcp_eager_last_q = tcp; 18466 tcp->tcp_eager_next_q = NULL; 18467 18468 /* Need to get inside the listener perimeter */ 18469 CONN_INC_REF(listener->tcp_connp); 18470 squeue_fill(listener->tcp_connp->conn_sqp, mp1, 18471 tcp_send_pending, listener->tcp_connp, 18472 SQTAG_TCP_SEND_PENDING); 18473 } 18474 no_more_eagers: 18475 tcp_eager_unlink(eager); 18476 mutex_exit(&listener->tcp_eager_lock); 18477 18478 /* 18479 * At this point, the eager is detached from the listener 18480 * but we still have an extra refs on eager (apart from the 18481 * usual tcp references). The ref was placed in tcp_rput_data 18482 * before sending the conn_ind in tcp_send_conn_ind. 18483 * The ref will be dropped in tcp_accept_finish(). 18484 */ 18485 squeue_enter_nodrain(econnp->conn_sqp, opt_mp, 18486 tcp_accept_finish, econnp, SQTAG_TCP_ACCEPT_FINISH_Q0); 18487 return; 18488 default: 18489 mp = mi_tpi_err_ack_alloc(mp, TNOTSUPPORT, 0); 18490 if (mp != NULL) 18491 putnext(rq, mp); 18492 return; 18493 } 18494 } 18495 18496 void 18497 tcp_wput(queue_t *q, mblk_t *mp) 18498 { 18499 conn_t *connp = Q_TO_CONN(q); 18500 tcp_t *tcp; 18501 void (*output_proc)(); 18502 t_scalar_t type; 18503 uchar_t *rptr; 18504 struct iocblk *iocp; 18505 uint32_t msize; 18506 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 18507 18508 ASSERT(connp->conn_ref >= 2); 18509 18510 switch (DB_TYPE(mp)) { 18511 case M_DATA: 18512 tcp = connp->conn_tcp; 18513 ASSERT(tcp != NULL); 18514 18515 msize = msgdsize(mp); 18516 18517 mutex_enter(&tcp->tcp_non_sq_lock); 18518 tcp->tcp_squeue_bytes += msize; 18519 if (TCP_UNSENT_BYTES(tcp) > tcp->tcp_xmit_hiwater) { 18520 tcp_setqfull(tcp); 18521 } 18522 mutex_exit(&tcp->tcp_non_sq_lock); 18523 18524 CONN_INC_REF(connp); 18525 (*tcp_squeue_wput_proc)(connp->conn_sqp, mp, 18526 tcp_output, connp, SQTAG_TCP_OUTPUT); 18527 return; 18528 case M_PROTO: 18529 case M_PCPROTO: 18530 /* 18531 * if it is a snmp message, don't get behind the squeue 18532 */ 18533 tcp = connp->conn_tcp; 18534 rptr = mp->b_rptr; 18535 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) { 18536 type = ((union T_primitives *)rptr)->type; 18537 } else { 18538 if (tcp->tcp_debug) { 18539 (void) strlog(TCP_MOD_ID, 0, 1, 18540 SL_ERROR|SL_TRACE, 18541 "tcp_wput_proto, dropping one..."); 18542 } 18543 freemsg(mp); 18544 return; 18545 } 18546 if (type == T_SVR4_OPTMGMT_REQ) { 18547 cred_t *cr = DB_CREDDEF(mp, tcp->tcp_cred); 18548 if (snmpcom_req(q, mp, tcp_snmp_set, ip_snmp_get, 18549 cr)) { 18550 /* 18551 * This was a SNMP request 18552 */ 18553 return; 18554 } else { 18555 output_proc = tcp_wput_proto; 18556 } 18557 } else { 18558 output_proc = tcp_wput_proto; 18559 } 18560 break; 18561 case M_IOCTL: 18562 /* 18563 * Most ioctls can be processed right away without going via 18564 * squeues - process them right here. Those that do require 18565 * squeue (currently TCP_IOC_DEFAULT_Q and _SIOCSOCKFALLBACK) 18566 * are processed by tcp_wput_ioctl(). 18567 */ 18568 iocp = (struct iocblk *)mp->b_rptr; 18569 tcp = connp->conn_tcp; 18570 18571 switch (iocp->ioc_cmd) { 18572 case TCP_IOC_ABORT_CONN: 18573 tcp_ioctl_abort_conn(q, mp); 18574 return; 18575 case TI_GETPEERNAME: 18576 if (tcp->tcp_state < TCPS_SYN_RCVD) { 18577 iocp->ioc_error = ENOTCONN; 18578 iocp->ioc_count = 0; 18579 mp->b_datap->db_type = M_IOCACK; 18580 qreply(q, mp); 18581 return; 18582 } 18583 /* FALLTHRU */ 18584 case TI_GETMYNAME: 18585 mi_copyin(q, mp, NULL, 18586 SIZEOF_STRUCT(strbuf, iocp->ioc_flag)); 18587 return; 18588 case ND_SET: 18589 /* nd_getset does the necessary checks */ 18590 case ND_GET: 18591 if (!nd_getset(q, tcps->tcps_g_nd, mp)) { 18592 CALL_IP_WPUT(connp, q, mp); 18593 return; 18594 } 18595 qreply(q, mp); 18596 return; 18597 case TCP_IOC_DEFAULT_Q: 18598 /* 18599 * Wants to be the default wq. Check the credentials 18600 * first, the rest is executed via squeue. 18601 */ 18602 if (secpolicy_ip_config(iocp->ioc_cr, B_FALSE) != 0) { 18603 iocp->ioc_error = EPERM; 18604 iocp->ioc_count = 0; 18605 mp->b_datap->db_type = M_IOCACK; 18606 qreply(q, mp); 18607 return; 18608 } 18609 output_proc = tcp_wput_ioctl; 18610 break; 18611 default: 18612 output_proc = tcp_wput_ioctl; 18613 break; 18614 } 18615 break; 18616 default: 18617 output_proc = tcp_wput_nondata; 18618 break; 18619 } 18620 18621 CONN_INC_REF(connp); 18622 (*tcp_squeue_wput_proc)(connp->conn_sqp, mp, 18623 output_proc, connp, SQTAG_TCP_WPUT_OTHER); 18624 } 18625 18626 /* 18627 * Initial STREAMS write side put() procedure for sockets. It tries to 18628 * handle the T_CAPABILITY_REQ which sockfs sends down while setting 18629 * up the socket without using the squeue. Non T_CAPABILITY_REQ messages 18630 * are handled by tcp_wput() as usual. 18631 * 18632 * All further messages will also be handled by tcp_wput() because we cannot 18633 * be sure that the above short cut is safe later. 18634 */ 18635 static void 18636 tcp_wput_sock(queue_t *wq, mblk_t *mp) 18637 { 18638 conn_t *connp = Q_TO_CONN(wq); 18639 tcp_t *tcp = connp->conn_tcp; 18640 struct T_capability_req *car = (struct T_capability_req *)mp->b_rptr; 18641 18642 ASSERT(wq->q_qinfo == &tcp_sock_winit); 18643 wq->q_qinfo = &tcp_winit; 18644 18645 ASSERT(IPCL_IS_TCP(connp)); 18646 ASSERT(TCP_IS_SOCKET(tcp)); 18647 18648 if (DB_TYPE(mp) == M_PCPROTO && 18649 MBLKL(mp) == sizeof (struct T_capability_req) && 18650 car->PRIM_type == T_CAPABILITY_REQ) { 18651 tcp_capability_req(tcp, mp); 18652 return; 18653 } 18654 18655 tcp_wput(wq, mp); 18656 } 18657 18658 static boolean_t 18659 tcp_zcopy_check(tcp_t *tcp) 18660 { 18661 conn_t *connp = tcp->tcp_connp; 18662 ire_t *ire; 18663 boolean_t zc_enabled = B_FALSE; 18664 tcp_stack_t *tcps = tcp->tcp_tcps; 18665 18666 if (do_tcpzcopy == 2) 18667 zc_enabled = B_TRUE; 18668 else if (tcp->tcp_ipversion == IPV4_VERSION && 18669 IPCL_IS_CONNECTED(connp) && 18670 (connp->conn_flags & IPCL_CHECK_POLICY) == 0 && 18671 connp->conn_dontroute == 0 && 18672 !connp->conn_nexthop_set && 18673 connp->conn_outgoing_ill == NULL && 18674 connp->conn_nofailover_ill == NULL && 18675 do_tcpzcopy == 1) { 18676 /* 18677 * the checks above closely resemble the fast path checks 18678 * in tcp_send_data(). 18679 */ 18680 mutex_enter(&connp->conn_lock); 18681 ire = connp->conn_ire_cache; 18682 ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT)); 18683 if (ire != NULL && !(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18684 IRE_REFHOLD(ire); 18685 if (ire->ire_stq != NULL) { 18686 ill_t *ill = (ill_t *)ire->ire_stq->q_ptr; 18687 18688 zc_enabled = ill && (ill->ill_capabilities & 18689 ILL_CAPAB_ZEROCOPY) && 18690 (ill->ill_zerocopy_capab-> 18691 ill_zerocopy_flags != 0); 18692 } 18693 IRE_REFRELE(ire); 18694 } 18695 mutex_exit(&connp->conn_lock); 18696 } 18697 tcp->tcp_snd_zcopy_on = zc_enabled; 18698 if (!TCP_IS_DETACHED(tcp)) { 18699 if (zc_enabled) { 18700 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMSAFE); 18701 TCP_STAT(tcps, tcp_zcopy_on); 18702 } else { 18703 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMUNSAFE); 18704 TCP_STAT(tcps, tcp_zcopy_off); 18705 } 18706 } 18707 return (zc_enabled); 18708 } 18709 18710 static mblk_t * 18711 tcp_zcopy_disable(tcp_t *tcp, mblk_t *bp) 18712 { 18713 tcp_stack_t *tcps = tcp->tcp_tcps; 18714 18715 if (do_tcpzcopy == 2) 18716 return (bp); 18717 else if (tcp->tcp_snd_zcopy_on) { 18718 tcp->tcp_snd_zcopy_on = B_FALSE; 18719 if (!TCP_IS_DETACHED(tcp)) { 18720 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMUNSAFE); 18721 TCP_STAT(tcps, tcp_zcopy_disable); 18722 } 18723 } 18724 return (tcp_zcopy_backoff(tcp, bp, 0)); 18725 } 18726 18727 /* 18728 * Backoff from a zero-copy mblk by copying data to a new mblk and freeing 18729 * the original desballoca'ed segmapped mblk. 18730 */ 18731 static mblk_t * 18732 tcp_zcopy_backoff(tcp_t *tcp, mblk_t *bp, int fix_xmitlist) 18733 { 18734 mblk_t *head, *tail, *nbp; 18735 tcp_stack_t *tcps = tcp->tcp_tcps; 18736 18737 if (IS_VMLOANED_MBLK(bp)) { 18738 TCP_STAT(tcps, tcp_zcopy_backoff); 18739 if ((head = copyb(bp)) == NULL) { 18740 /* fail to backoff; leave it for the next backoff */ 18741 tcp->tcp_xmit_zc_clean = B_FALSE; 18742 return (bp); 18743 } 18744 if (bp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) { 18745 if (fix_xmitlist) 18746 tcp_zcopy_notify(tcp); 18747 else 18748 head->b_datap->db_struioflag |= STRUIO_ZCNOTIFY; 18749 } 18750 nbp = bp->b_cont; 18751 if (fix_xmitlist) { 18752 head->b_prev = bp->b_prev; 18753 head->b_next = bp->b_next; 18754 if (tcp->tcp_xmit_tail == bp) 18755 tcp->tcp_xmit_tail = head; 18756 } 18757 bp->b_next = NULL; 18758 bp->b_prev = NULL; 18759 freeb(bp); 18760 } else { 18761 head = bp; 18762 nbp = bp->b_cont; 18763 } 18764 tail = head; 18765 while (nbp) { 18766 if (IS_VMLOANED_MBLK(nbp)) { 18767 TCP_STAT(tcps, tcp_zcopy_backoff); 18768 if ((tail->b_cont = copyb(nbp)) == NULL) { 18769 tcp->tcp_xmit_zc_clean = B_FALSE; 18770 tail->b_cont = nbp; 18771 return (head); 18772 } 18773 tail = tail->b_cont; 18774 if (nbp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) { 18775 if (fix_xmitlist) 18776 tcp_zcopy_notify(tcp); 18777 else 18778 tail->b_datap->db_struioflag |= 18779 STRUIO_ZCNOTIFY; 18780 } 18781 bp = nbp; 18782 nbp = nbp->b_cont; 18783 if (fix_xmitlist) { 18784 tail->b_prev = bp->b_prev; 18785 tail->b_next = bp->b_next; 18786 if (tcp->tcp_xmit_tail == bp) 18787 tcp->tcp_xmit_tail = tail; 18788 } 18789 bp->b_next = NULL; 18790 bp->b_prev = NULL; 18791 freeb(bp); 18792 } else { 18793 tail->b_cont = nbp; 18794 tail = nbp; 18795 nbp = nbp->b_cont; 18796 } 18797 } 18798 if (fix_xmitlist) { 18799 tcp->tcp_xmit_last = tail; 18800 tcp->tcp_xmit_zc_clean = B_TRUE; 18801 } 18802 return (head); 18803 } 18804 18805 static void 18806 tcp_zcopy_notify(tcp_t *tcp) 18807 { 18808 struct stdata *stp; 18809 18810 if (tcp->tcp_detached) 18811 return; 18812 stp = STREAM(tcp->tcp_rq); 18813 mutex_enter(&stp->sd_lock); 18814 stp->sd_flag |= STZCNOTIFY; 18815 cv_broadcast(&stp->sd_zcopy_wait); 18816 mutex_exit(&stp->sd_lock); 18817 } 18818 18819 static boolean_t 18820 tcp_send_find_ire(tcp_t *tcp, ipaddr_t *dst, ire_t **irep) 18821 { 18822 ire_t *ire; 18823 conn_t *connp = tcp->tcp_connp; 18824 tcp_stack_t *tcps = tcp->tcp_tcps; 18825 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 18826 18827 mutex_enter(&connp->conn_lock); 18828 ire = connp->conn_ire_cache; 18829 ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT)); 18830 18831 if ((ire != NULL) && 18832 (((dst != NULL) && (ire->ire_addr == *dst)) || ((dst == NULL) && 18833 IN6_ARE_ADDR_EQUAL(&ire->ire_addr_v6, &tcp->tcp_ip6h->ip6_dst))) && 18834 !(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18835 IRE_REFHOLD(ire); 18836 mutex_exit(&connp->conn_lock); 18837 } else { 18838 boolean_t cached = B_FALSE; 18839 ts_label_t *tsl; 18840 18841 /* force a recheck later on */ 18842 tcp->tcp_ire_ill_check_done = B_FALSE; 18843 18844 TCP_DBGSTAT(tcps, tcp_ire_null1); 18845 connp->conn_ire_cache = NULL; 18846 mutex_exit(&connp->conn_lock); 18847 18848 if (ire != NULL) 18849 IRE_REFRELE_NOTR(ire); 18850 18851 tsl = crgetlabel(CONN_CRED(connp)); 18852 ire = (dst ? 18853 ire_cache_lookup(*dst, connp->conn_zoneid, tsl, ipst) : 18854 ire_cache_lookup_v6(&tcp->tcp_ip6h->ip6_dst, 18855 connp->conn_zoneid, tsl, ipst)); 18856 18857 if (ire == NULL) { 18858 TCP_STAT(tcps, tcp_ire_null); 18859 return (B_FALSE); 18860 } 18861 18862 IRE_REFHOLD_NOTR(ire); 18863 /* 18864 * Since we are inside the squeue, there cannot be another 18865 * thread in TCP trying to set the conn_ire_cache now. The 18866 * check for IRE_MARK_CONDEMNED ensures that an interface 18867 * unplumb thread has not yet started cleaning up the conns. 18868 * Hence we don't need to grab the conn lock. 18869 */ 18870 if (CONN_CACHE_IRE(connp)) { 18871 rw_enter(&ire->ire_bucket->irb_lock, RW_READER); 18872 if (!(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18873 TCP_CHECK_IREINFO(tcp, ire); 18874 connp->conn_ire_cache = ire; 18875 cached = B_TRUE; 18876 } 18877 rw_exit(&ire->ire_bucket->irb_lock); 18878 } 18879 18880 /* 18881 * We can continue to use the ire but since it was 18882 * not cached, we should drop the extra reference. 18883 */ 18884 if (!cached) 18885 IRE_REFRELE_NOTR(ire); 18886 18887 /* 18888 * Rampart note: no need to select a new label here, since 18889 * labels are not allowed to change during the life of a TCP 18890 * connection. 18891 */ 18892 } 18893 18894 *irep = ire; 18895 18896 return (B_TRUE); 18897 } 18898 18899 /* 18900 * Called from tcp_send() or tcp_send_data() to find workable IRE. 18901 * 18902 * 0 = success; 18903 * 1 = failed to find ire and ill. 18904 */ 18905 static boolean_t 18906 tcp_send_find_ire_ill(tcp_t *tcp, mblk_t *mp, ire_t **irep, ill_t **illp) 18907 { 18908 ipha_t *ipha; 18909 ipaddr_t dst; 18910 ire_t *ire; 18911 ill_t *ill; 18912 conn_t *connp = tcp->tcp_connp; 18913 mblk_t *ire_fp_mp; 18914 tcp_stack_t *tcps = tcp->tcp_tcps; 18915 18916 if (mp != NULL) 18917 ipha = (ipha_t *)mp->b_rptr; 18918 else 18919 ipha = tcp->tcp_ipha; 18920 dst = ipha->ipha_dst; 18921 18922 if (!tcp_send_find_ire(tcp, &dst, &ire)) 18923 return (B_FALSE); 18924 18925 if ((ire->ire_flags & RTF_MULTIRT) || 18926 (ire->ire_stq == NULL) || 18927 (ire->ire_nce == NULL) || 18928 ((ire_fp_mp = ire->ire_nce->nce_fp_mp) == NULL) || 18929 ((mp != NULL) && (ire->ire_max_frag < ntohs(ipha->ipha_length) || 18930 MBLKL(ire_fp_mp) > MBLKHEAD(mp)))) { 18931 TCP_STAT(tcps, tcp_ip_ire_send); 18932 IRE_REFRELE(ire); 18933 return (B_FALSE); 18934 } 18935 18936 ill = ire_to_ill(ire); 18937 if (connp->conn_outgoing_ill != NULL) { 18938 ill_t *conn_outgoing_ill = NULL; 18939 /* 18940 * Choose a good ill in the group to send the packets on. 18941 */ 18942 ire = conn_set_outgoing_ill(connp, ire, &conn_outgoing_ill); 18943 ill = ire_to_ill(ire); 18944 } 18945 ASSERT(ill != NULL); 18946 18947 if (!tcp->tcp_ire_ill_check_done) { 18948 tcp_ire_ill_check(tcp, ire, ill, B_TRUE); 18949 tcp->tcp_ire_ill_check_done = B_TRUE; 18950 } 18951 18952 *irep = ire; 18953 *illp = ill; 18954 18955 return (B_TRUE); 18956 } 18957 18958 static void 18959 tcp_send_data(tcp_t *tcp, queue_t *q, mblk_t *mp) 18960 { 18961 ipha_t *ipha; 18962 ipaddr_t src; 18963 ipaddr_t dst; 18964 uint32_t cksum; 18965 ire_t *ire; 18966 uint16_t *up; 18967 ill_t *ill; 18968 conn_t *connp = tcp->tcp_connp; 18969 uint32_t hcksum_txflags = 0; 18970 mblk_t *ire_fp_mp; 18971 uint_t ire_fp_mp_len; 18972 tcp_stack_t *tcps = tcp->tcp_tcps; 18973 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 18974 18975 ASSERT(DB_TYPE(mp) == M_DATA); 18976 18977 if (DB_CRED(mp) == NULL) 18978 mblk_setcred(mp, CONN_CRED(connp)); 18979 18980 ipha = (ipha_t *)mp->b_rptr; 18981 src = ipha->ipha_src; 18982 dst = ipha->ipha_dst; 18983 18984 /* 18985 * Drop off fast path for IPv6 and also if options are present or 18986 * we need to resolve a TS label. 18987 */ 18988 if (tcp->tcp_ipversion != IPV4_VERSION || 18989 !IPCL_IS_CONNECTED(connp) || 18990 !CONN_IS_LSO_MD_FASTPATH(connp) || 18991 (connp->conn_flags & IPCL_CHECK_POLICY) != 0 || 18992 !connp->conn_ulp_labeled || 18993 ipha->ipha_ident == IP_HDR_INCLUDED || 18994 ipha->ipha_version_and_hdr_length != IP_SIMPLE_HDR_VERSION || 18995 IPP_ENABLED(IPP_LOCAL_OUT, ipst)) { 18996 if (tcp->tcp_snd_zcopy_aware) 18997 mp = tcp_zcopy_disable(tcp, mp); 18998 TCP_STAT(tcps, tcp_ip_send); 18999 CALL_IP_WPUT(connp, q, mp); 19000 return; 19001 } 19002 19003 if (!tcp_send_find_ire_ill(tcp, mp, &ire, &ill)) { 19004 if (tcp->tcp_snd_zcopy_aware) 19005 mp = tcp_zcopy_backoff(tcp, mp, 0); 19006 CALL_IP_WPUT(connp, q, mp); 19007 return; 19008 } 19009 ire_fp_mp = ire->ire_nce->nce_fp_mp; 19010 ire_fp_mp_len = MBLKL(ire_fp_mp); 19011 19012 ASSERT(ipha->ipha_ident == 0 || ipha->ipha_ident == IP_HDR_INCLUDED); 19013 ipha->ipha_ident = (uint16_t)atomic_add_32_nv(&ire->ire_ident, 1); 19014 #ifndef _BIG_ENDIAN 19015 ipha->ipha_ident = (ipha->ipha_ident << 8) | (ipha->ipha_ident >> 8); 19016 #endif 19017 19018 /* 19019 * Check to see if we need to re-enable LSO/MDT for this connection 19020 * because it was previously disabled due to changes in the ill; 19021 * note that by doing it here, this re-enabling only applies when 19022 * the packet is not dispatched through CALL_IP_WPUT(). 19023 * 19024 * That means for IPv4, it is worth re-enabling LSO/MDT for the fastpath 19025 * case, since that's how we ended up here. For IPv6, we do the 19026 * re-enabling work in ip_xmit_v6(), albeit indirectly via squeue. 19027 */ 19028 if (connp->conn_lso_ok && !tcp->tcp_lso && ILL_LSO_TCP_USABLE(ill)) { 19029 /* 19030 * Restore LSO for this connection, so that next time around 19031 * it is eligible to go through tcp_lsosend() path again. 19032 */ 19033 TCP_STAT(tcps, tcp_lso_enabled); 19034 tcp->tcp_lso = B_TRUE; 19035 ip1dbg(("tcp_send_data: reenabling LSO for connp %p on " 19036 "interface %s\n", (void *)connp, ill->ill_name)); 19037 } else if (connp->conn_mdt_ok && !tcp->tcp_mdt && ILL_MDT_USABLE(ill)) { 19038 /* 19039 * Restore MDT for this connection, so that next time around 19040 * it is eligible to go through tcp_multisend() path again. 19041 */ 19042 TCP_STAT(tcps, tcp_mdt_conn_resumed1); 19043 tcp->tcp_mdt = B_TRUE; 19044 ip1dbg(("tcp_send_data: reenabling MDT for connp %p on " 19045 "interface %s\n", (void *)connp, ill->ill_name)); 19046 } 19047 19048 if (tcp->tcp_snd_zcopy_aware) { 19049 if ((ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) == 0 || 19050 (ill->ill_zerocopy_capab->ill_zerocopy_flags == 0)) 19051 mp = tcp_zcopy_disable(tcp, mp); 19052 /* 19053 * we shouldn't need to reset ipha as the mp containing 19054 * ipha should never be a zero-copy mp. 19055 */ 19056 } 19057 19058 if (ILL_HCKSUM_CAPABLE(ill) && dohwcksum) { 19059 ASSERT(ill->ill_hcksum_capab != NULL); 19060 hcksum_txflags = ill->ill_hcksum_capab->ill_hcksum_txflags; 19061 } 19062 19063 /* pseudo-header checksum (do it in parts for IP header checksum) */ 19064 cksum = (dst >> 16) + (dst & 0xFFFF) + (src >> 16) + (src & 0xFFFF); 19065 19066 ASSERT(ipha->ipha_version_and_hdr_length == IP_SIMPLE_HDR_VERSION); 19067 up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 19068 19069 IP_CKSUM_XMIT_FAST(ire->ire_ipversion, hcksum_txflags, mp, ipha, up, 19070 IPPROTO_TCP, IP_SIMPLE_HDR_LENGTH, ntohs(ipha->ipha_length), cksum); 19071 19072 /* Software checksum? */ 19073 if (DB_CKSUMFLAGS(mp) == 0) { 19074 TCP_STAT(tcps, tcp_out_sw_cksum); 19075 TCP_STAT_UPDATE(tcps, tcp_out_sw_cksum_bytes, 19076 ntohs(ipha->ipha_length) - IP_SIMPLE_HDR_LENGTH); 19077 } 19078 19079 ipha->ipha_fragment_offset_and_flags |= 19080 (uint32_t)htons(ire->ire_frag_flag); 19081 19082 /* Calculate IP header checksum if hardware isn't capable */ 19083 if (!(DB_CKSUMFLAGS(mp) & HCK_IPV4_HDRCKSUM)) { 19084 IP_HDR_CKSUM(ipha, cksum, ((uint32_t *)ipha)[0], 19085 ((uint16_t *)ipha)[4]); 19086 } 19087 19088 ASSERT(DB_TYPE(ire_fp_mp) == M_DATA); 19089 mp->b_rptr = (uchar_t *)ipha - ire_fp_mp_len; 19090 bcopy(ire_fp_mp->b_rptr, mp->b_rptr, ire_fp_mp_len); 19091 19092 UPDATE_OB_PKT_COUNT(ire); 19093 ire->ire_last_used_time = lbolt; 19094 19095 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutRequests); 19096 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits); 19097 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets, 19098 ntohs(ipha->ipha_length)); 19099 19100 if (ILL_DLS_CAPABLE(ill)) { 19101 /* 19102 * Send the packet directly to DLD, where it may be queued 19103 * depending on the availability of transmit resources at 19104 * the media layer. 19105 */ 19106 IP_DLS_ILL_TX(ill, ipha, mp, ipst); 19107 } else { 19108 ill_t *out_ill = (ill_t *)ire->ire_stq->q_ptr; 19109 DTRACE_PROBE4(ip4__physical__out__start, 19110 ill_t *, NULL, ill_t *, out_ill, 19111 ipha_t *, ipha, mblk_t *, mp); 19112 FW_HOOKS(ipst->ips_ip4_physical_out_event, 19113 ipst->ips_ipv4firewall_physical_out, 19114 NULL, out_ill, ipha, mp, mp, 0, ipst); 19115 DTRACE_PROBE1(ip4__physical__out__end, mblk_t *, mp); 19116 if (mp != NULL) 19117 putnext(ire->ire_stq, mp); 19118 } 19119 IRE_REFRELE(ire); 19120 } 19121 19122 /* 19123 * This handles the case when the receiver has shrunk its win. Per RFC 1122 19124 * if the receiver shrinks the window, i.e. moves the right window to the 19125 * left, the we should not send new data, but should retransmit normally the 19126 * old unacked data between suna and suna + swnd. We might has sent data 19127 * that is now outside the new window, pretend that we didn't send it. 19128 */ 19129 static void 19130 tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_count) 19131 { 19132 uint32_t snxt = tcp->tcp_snxt; 19133 mblk_t *xmit_tail; 19134 int32_t offset; 19135 19136 ASSERT(shrunk_count > 0); 19137 19138 /* Pretend we didn't send the data outside the window */ 19139 snxt -= shrunk_count; 19140 19141 /* Get the mblk and the offset in it per the shrunk window */ 19142 xmit_tail = tcp_get_seg_mp(tcp, snxt, &offset); 19143 19144 ASSERT(xmit_tail != NULL); 19145 19146 /* Reset all the values per the now shrunk window */ 19147 tcp->tcp_snxt = snxt; 19148 tcp->tcp_xmit_tail = xmit_tail; 19149 tcp->tcp_xmit_tail_unsent = xmit_tail->b_wptr - xmit_tail->b_rptr - 19150 offset; 19151 tcp->tcp_unsent += shrunk_count; 19152 19153 if (tcp->tcp_suna == tcp->tcp_snxt && tcp->tcp_swnd == 0) 19154 /* 19155 * Make sure the timer is running so that we will probe a zero 19156 * window. 19157 */ 19158 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 19159 } 19160 19161 19162 /* 19163 * The TCP normal data output path. 19164 * NOTE: the logic of the fast path is duplicated from this function. 19165 */ 19166 static void 19167 tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent) 19168 { 19169 int len; 19170 mblk_t *local_time; 19171 mblk_t *mp1; 19172 uint32_t snxt; 19173 int tail_unsent; 19174 int tcpstate; 19175 int usable = 0; 19176 mblk_t *xmit_tail; 19177 queue_t *q = tcp->tcp_wq; 19178 int32_t mss; 19179 int32_t num_sack_blk = 0; 19180 int32_t tcp_hdr_len; 19181 int32_t tcp_tcp_hdr_len; 19182 int mdt_thres; 19183 int rc; 19184 tcp_stack_t *tcps = tcp->tcp_tcps; 19185 ip_stack_t *ipst; 19186 19187 tcpstate = tcp->tcp_state; 19188 if (mp == NULL) { 19189 /* 19190 * tcp_wput_data() with NULL mp should only be called when 19191 * there is unsent data. 19192 */ 19193 ASSERT(tcp->tcp_unsent > 0); 19194 /* Really tacky... but we need this for detached closes. */ 19195 len = tcp->tcp_unsent; 19196 goto data_null; 19197 } 19198 19199 #if CCS_STATS 19200 wrw_stats.tot.count++; 19201 wrw_stats.tot.bytes += msgdsize(mp); 19202 #endif 19203 ASSERT(mp->b_datap->db_type == M_DATA); 19204 /* 19205 * Don't allow data after T_ORDREL_REQ or T_DISCON_REQ, 19206 * or before a connection attempt has begun. 19207 */ 19208 if (tcpstate < TCPS_SYN_SENT || tcpstate > TCPS_CLOSE_WAIT || 19209 (tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) { 19210 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) { 19211 #ifdef DEBUG 19212 cmn_err(CE_WARN, 19213 "tcp_wput_data: data after ordrel, %s", 19214 tcp_display(tcp, NULL, 19215 DISP_ADDR_AND_PORT)); 19216 #else 19217 if (tcp->tcp_debug) { 19218 (void) strlog(TCP_MOD_ID, 0, 1, 19219 SL_TRACE|SL_ERROR, 19220 "tcp_wput_data: data after ordrel, %s\n", 19221 tcp_display(tcp, NULL, 19222 DISP_ADDR_AND_PORT)); 19223 } 19224 #endif /* DEBUG */ 19225 } 19226 if (tcp->tcp_snd_zcopy_aware && 19227 (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) != 0) 19228 tcp_zcopy_notify(tcp); 19229 freemsg(mp); 19230 mutex_enter(&tcp->tcp_non_sq_lock); 19231 if (tcp->tcp_flow_stopped && 19232 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 19233 tcp_clrqfull(tcp); 19234 } 19235 mutex_exit(&tcp->tcp_non_sq_lock); 19236 return; 19237 } 19238 19239 /* Strip empties */ 19240 for (;;) { 19241 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 19242 (uintptr_t)INT_MAX); 19243 len = (int)(mp->b_wptr - mp->b_rptr); 19244 if (len > 0) 19245 break; 19246 mp1 = mp; 19247 mp = mp->b_cont; 19248 freeb(mp1); 19249 if (!mp) { 19250 return; 19251 } 19252 } 19253 19254 /* If we are the first on the list ... */ 19255 if (tcp->tcp_xmit_head == NULL) { 19256 tcp->tcp_xmit_head = mp; 19257 tcp->tcp_xmit_tail = mp; 19258 tcp->tcp_xmit_tail_unsent = len; 19259 } else { 19260 /* If tiny tx and room in txq tail, pullup to save mblks. */ 19261 struct datab *dp; 19262 19263 mp1 = tcp->tcp_xmit_last; 19264 if (len < tcp_tx_pull_len && 19265 (dp = mp1->b_datap)->db_ref == 1 && 19266 dp->db_lim - mp1->b_wptr >= len) { 19267 ASSERT(len > 0); 19268 ASSERT(!mp1->b_cont); 19269 if (len == 1) { 19270 *mp1->b_wptr++ = *mp->b_rptr; 19271 } else { 19272 bcopy(mp->b_rptr, mp1->b_wptr, len); 19273 mp1->b_wptr += len; 19274 } 19275 if (mp1 == tcp->tcp_xmit_tail) 19276 tcp->tcp_xmit_tail_unsent += len; 19277 mp1->b_cont = mp->b_cont; 19278 if (tcp->tcp_snd_zcopy_aware && 19279 (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 19280 mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY; 19281 freeb(mp); 19282 mp = mp1; 19283 } else { 19284 tcp->tcp_xmit_last->b_cont = mp; 19285 } 19286 len += tcp->tcp_unsent; 19287 } 19288 19289 /* Tack on however many more positive length mblks we have */ 19290 if ((mp1 = mp->b_cont) != NULL) { 19291 do { 19292 int tlen; 19293 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 19294 (uintptr_t)INT_MAX); 19295 tlen = (int)(mp1->b_wptr - mp1->b_rptr); 19296 if (tlen <= 0) { 19297 mp->b_cont = mp1->b_cont; 19298 freeb(mp1); 19299 } else { 19300 len += tlen; 19301 mp = mp1; 19302 } 19303 } while ((mp1 = mp->b_cont) != NULL); 19304 } 19305 tcp->tcp_xmit_last = mp; 19306 tcp->tcp_unsent = len; 19307 19308 if (urgent) 19309 usable = 1; 19310 19311 data_null: 19312 snxt = tcp->tcp_snxt; 19313 xmit_tail = tcp->tcp_xmit_tail; 19314 tail_unsent = tcp->tcp_xmit_tail_unsent; 19315 19316 /* 19317 * Note that tcp_mss has been adjusted to take into account the 19318 * timestamp option if applicable. Because SACK options do not 19319 * appear in every TCP segments and they are of variable lengths, 19320 * they cannot be included in tcp_mss. Thus we need to calculate 19321 * the actual segment length when we need to send a segment which 19322 * includes SACK options. 19323 */ 19324 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 19325 int32_t opt_len; 19326 19327 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 19328 tcp->tcp_num_sack_blk); 19329 opt_len = num_sack_blk * sizeof (sack_blk_t) + TCPOPT_NOP_LEN * 19330 2 + TCPOPT_HEADER_LEN; 19331 mss = tcp->tcp_mss - opt_len; 19332 tcp_hdr_len = tcp->tcp_hdr_len + opt_len; 19333 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len + opt_len; 19334 } else { 19335 mss = tcp->tcp_mss; 19336 tcp_hdr_len = tcp->tcp_hdr_len; 19337 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len; 19338 } 19339 19340 if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet && 19341 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) { 19342 SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle); 19343 } 19344 if (tcpstate == TCPS_SYN_RCVD) { 19345 /* 19346 * The three-way connection establishment handshake is not 19347 * complete yet. We want to queue the data for transmission 19348 * after entering ESTABLISHED state (RFC793). A jump to 19349 * "done" label effectively leaves data on the queue. 19350 */ 19351 goto done; 19352 } else { 19353 int usable_r; 19354 19355 /* 19356 * In the special case when cwnd is zero, which can only 19357 * happen if the connection is ECN capable, return now. 19358 * New segments is sent using tcp_timer(). The timer 19359 * is set in tcp_rput_data(). 19360 */ 19361 if (tcp->tcp_cwnd == 0) { 19362 /* 19363 * Note that tcp_cwnd is 0 before 3-way handshake is 19364 * finished. 19365 */ 19366 ASSERT(tcp->tcp_ecn_ok || 19367 tcp->tcp_state < TCPS_ESTABLISHED); 19368 return; 19369 } 19370 19371 /* NOTE: trouble if xmitting while SYN not acked? */ 19372 usable_r = snxt - tcp->tcp_suna; 19373 usable_r = tcp->tcp_swnd - usable_r; 19374 19375 /* 19376 * Check if the receiver has shrunk the window. If 19377 * tcp_wput_data() with NULL mp is called, tcp_fin_sent 19378 * cannot be set as there is unsent data, so FIN cannot 19379 * be sent out. Otherwise, we need to take into account 19380 * of FIN as it consumes an "invisible" sequence number. 19381 */ 19382 ASSERT(tcp->tcp_fin_sent == 0); 19383 if (usable_r < 0) { 19384 /* 19385 * The receiver has shrunk the window and we have sent 19386 * -usable_r date beyond the window, re-adjust. 19387 * 19388 * If TCP window scaling is enabled, there can be 19389 * round down error as the advertised receive window 19390 * is actually right shifted n bits. This means that 19391 * the lower n bits info is wiped out. It will look 19392 * like the window is shrunk. Do a check here to 19393 * see if the shrunk amount is actually within the 19394 * error in window calculation. If it is, just 19395 * return. Note that this check is inside the 19396 * shrunk window check. This makes sure that even 19397 * though tcp_process_shrunk_swnd() is not called, 19398 * we will stop further processing. 19399 */ 19400 if ((-usable_r >> tcp->tcp_snd_ws) > 0) { 19401 tcp_process_shrunk_swnd(tcp, -usable_r); 19402 } 19403 return; 19404 } 19405 19406 /* usable = MIN(swnd, cwnd) - unacked_bytes */ 19407 if (tcp->tcp_swnd > tcp->tcp_cwnd) 19408 usable_r -= tcp->tcp_swnd - tcp->tcp_cwnd; 19409 19410 /* usable = MIN(usable, unsent) */ 19411 if (usable_r > len) 19412 usable_r = len; 19413 19414 /* usable = MAX(usable, {1 for urgent, 0 for data}) */ 19415 if (usable_r > 0) { 19416 usable = usable_r; 19417 } else { 19418 /* Bypass all other unnecessary processing. */ 19419 goto done; 19420 } 19421 } 19422 19423 local_time = (mblk_t *)lbolt; 19424 19425 /* 19426 * "Our" Nagle Algorithm. This is not the same as in the old 19427 * BSD. This is more in line with the true intent of Nagle. 19428 * 19429 * The conditions are: 19430 * 1. The amount of unsent data (or amount of data which can be 19431 * sent, whichever is smaller) is less than Nagle limit. 19432 * 2. The last sent size is also less than Nagle limit. 19433 * 3. There is unack'ed data. 19434 * 4. Urgent pointer is not set. Send urgent data ignoring the 19435 * Nagle algorithm. This reduces the probability that urgent 19436 * bytes get "merged" together. 19437 * 5. The app has not closed the connection. This eliminates the 19438 * wait time of the receiving side waiting for the last piece of 19439 * (small) data. 19440 * 19441 * If all are satisified, exit without sending anything. Note 19442 * that Nagle limit can be smaller than 1 MSS. Nagle limit is 19443 * the smaller of 1 MSS and global tcp_naglim_def (default to be 19444 * 4095). 19445 */ 19446 if (usable < (int)tcp->tcp_naglim && 19447 tcp->tcp_naglim > tcp->tcp_last_sent_len && 19448 snxt != tcp->tcp_suna && 19449 !(tcp->tcp_valid_bits & TCP_URG_VALID) && 19450 !(tcp->tcp_valid_bits & TCP_FSS_VALID)) { 19451 goto done; 19452 } 19453 19454 if (tcp->tcp_cork) { 19455 /* 19456 * if the tcp->tcp_cork option is set, then we have to force 19457 * TCP not to send partial segment (smaller than MSS bytes). 19458 * We are calculating the usable now based on full mss and 19459 * will save the rest of remaining data for later. 19460 */ 19461 if (usable < mss) 19462 goto done; 19463 usable = (usable / mss) * mss; 19464 } 19465 19466 /* Update the latest receive window size in TCP header. */ 19467 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 19468 tcp->tcp_tcph->th_win); 19469 19470 /* 19471 * Determine if it's worthwhile to attempt LSO or MDT, based on: 19472 * 19473 * 1. Simple TCP/IP{v4,v6} (no options). 19474 * 2. IPSEC/IPQoS processing is not needed for the TCP connection. 19475 * 3. If the TCP connection is in ESTABLISHED state. 19476 * 4. The TCP is not detached. 19477 * 19478 * If any of the above conditions have changed during the 19479 * connection, stop using LSO/MDT and restore the stream head 19480 * parameters accordingly. 19481 */ 19482 ipst = tcps->tcps_netstack->netstack_ip; 19483 19484 if ((tcp->tcp_lso || tcp->tcp_mdt) && 19485 ((tcp->tcp_ipversion == IPV4_VERSION && 19486 tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) || 19487 (tcp->tcp_ipversion == IPV6_VERSION && 19488 tcp->tcp_ip_hdr_len != IPV6_HDR_LEN) || 19489 tcp->tcp_state != TCPS_ESTABLISHED || 19490 TCP_IS_DETACHED(tcp) || !CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp) || 19491 CONN_IPSEC_OUT_ENCAPSULATED(tcp->tcp_connp) || 19492 IPP_ENABLED(IPP_LOCAL_OUT, ipst))) { 19493 if (tcp->tcp_lso) { 19494 tcp->tcp_connp->conn_lso_ok = B_FALSE; 19495 tcp->tcp_lso = B_FALSE; 19496 } else { 19497 tcp->tcp_connp->conn_mdt_ok = B_FALSE; 19498 tcp->tcp_mdt = B_FALSE; 19499 } 19500 19501 /* Anything other than detached is considered pathological */ 19502 if (!TCP_IS_DETACHED(tcp)) { 19503 if (tcp->tcp_lso) 19504 TCP_STAT(tcps, tcp_lso_disabled); 19505 else 19506 TCP_STAT(tcps, tcp_mdt_conn_halted1); 19507 (void) tcp_maxpsz_set(tcp, B_TRUE); 19508 } 19509 } 19510 19511 /* Use MDT if sendable amount is greater than the threshold */ 19512 if (tcp->tcp_mdt && 19513 (mdt_thres = mss << tcp_mdt_smss_threshold, usable > mdt_thres) && 19514 (tail_unsent > mdt_thres || (xmit_tail->b_cont != NULL && 19515 MBLKL(xmit_tail->b_cont) > mdt_thres)) && 19516 (tcp->tcp_valid_bits == 0 || 19517 tcp->tcp_valid_bits == TCP_FSS_VALID)) { 19518 ASSERT(tcp->tcp_connp->conn_mdt_ok); 19519 rc = tcp_multisend(q, tcp, mss, tcp_hdr_len, tcp_tcp_hdr_len, 19520 num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail, 19521 local_time, mdt_thres); 19522 } else { 19523 rc = tcp_send(q, tcp, mss, tcp_hdr_len, tcp_tcp_hdr_len, 19524 num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail, 19525 local_time, INT_MAX); 19526 } 19527 19528 /* Pretend that all we were trying to send really got sent */ 19529 if (rc < 0 && tail_unsent < 0) { 19530 do { 19531 xmit_tail = xmit_tail->b_cont; 19532 xmit_tail->b_prev = local_time; 19533 ASSERT((uintptr_t)(xmit_tail->b_wptr - 19534 xmit_tail->b_rptr) <= (uintptr_t)INT_MAX); 19535 tail_unsent += (int)(xmit_tail->b_wptr - 19536 xmit_tail->b_rptr); 19537 } while (tail_unsent < 0); 19538 } 19539 done:; 19540 tcp->tcp_xmit_tail = xmit_tail; 19541 tcp->tcp_xmit_tail_unsent = tail_unsent; 19542 len = tcp->tcp_snxt - snxt; 19543 if (len) { 19544 /* 19545 * If new data was sent, need to update the notsack 19546 * list, which is, afterall, data blocks that have 19547 * not been sack'ed by the receiver. New data is 19548 * not sack'ed. 19549 */ 19550 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 19551 /* len is a negative value. */ 19552 tcp->tcp_pipe -= len; 19553 tcp_notsack_update(&(tcp->tcp_notsack_list), 19554 tcp->tcp_snxt, snxt, 19555 &(tcp->tcp_num_notsack_blk), 19556 &(tcp->tcp_cnt_notsack_list)); 19557 } 19558 tcp->tcp_snxt = snxt + tcp->tcp_fin_sent; 19559 tcp->tcp_rack = tcp->tcp_rnxt; 19560 tcp->tcp_rack_cnt = 0; 19561 if ((snxt + len) == tcp->tcp_suna) { 19562 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 19563 } 19564 } else if (snxt == tcp->tcp_suna && tcp->tcp_swnd == 0) { 19565 /* 19566 * Didn't send anything. Make sure the timer is running 19567 * so that we will probe a zero window. 19568 */ 19569 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 19570 } 19571 /* Note that len is the amount we just sent but with a negative sign */ 19572 tcp->tcp_unsent += len; 19573 mutex_enter(&tcp->tcp_non_sq_lock); 19574 if (tcp->tcp_flow_stopped) { 19575 if (TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 19576 tcp_clrqfull(tcp); 19577 } 19578 } else if (TCP_UNSENT_BYTES(tcp) >= tcp->tcp_xmit_hiwater) { 19579 tcp_setqfull(tcp); 19580 } 19581 mutex_exit(&tcp->tcp_non_sq_lock); 19582 } 19583 19584 /* 19585 * tcp_fill_header is called by tcp_send() and tcp_multisend() to fill the 19586 * outgoing TCP header with the template header, as well as other 19587 * options such as time-stamp, ECN and/or SACK. 19588 */ 19589 static void 19590 tcp_fill_header(tcp_t *tcp, uchar_t *rptr, clock_t now, int num_sack_blk) 19591 { 19592 tcph_t *tcp_tmpl, *tcp_h; 19593 uint32_t *dst, *src; 19594 int hdrlen; 19595 19596 ASSERT(OK_32PTR(rptr)); 19597 19598 /* Template header */ 19599 tcp_tmpl = tcp->tcp_tcph; 19600 19601 /* Header of outgoing packet */ 19602 tcp_h = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 19603 19604 /* dst and src are opaque 32-bit fields, used for copying */ 19605 dst = (uint32_t *)rptr; 19606 src = (uint32_t *)tcp->tcp_iphc; 19607 hdrlen = tcp->tcp_hdr_len; 19608 19609 /* Fill time-stamp option if needed */ 19610 if (tcp->tcp_snd_ts_ok) { 19611 U32_TO_BE32((uint32_t)now, 19612 (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 4); 19613 U32_TO_BE32(tcp->tcp_ts_recent, 19614 (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 8); 19615 } else { 19616 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 19617 } 19618 19619 /* 19620 * Copy the template header; is this really more efficient than 19621 * calling bcopy()? For simple IPv4/TCP, it may be the case, 19622 * but perhaps not for other scenarios. 19623 */ 19624 dst[0] = src[0]; 19625 dst[1] = src[1]; 19626 dst[2] = src[2]; 19627 dst[3] = src[3]; 19628 dst[4] = src[4]; 19629 dst[5] = src[5]; 19630 dst[6] = src[6]; 19631 dst[7] = src[7]; 19632 dst[8] = src[8]; 19633 dst[9] = src[9]; 19634 if (hdrlen -= 40) { 19635 hdrlen >>= 2; 19636 dst += 10; 19637 src += 10; 19638 do { 19639 *dst++ = *src++; 19640 } while (--hdrlen); 19641 } 19642 19643 /* 19644 * Set the ECN info in the TCP header if it is not a zero 19645 * window probe. Zero window probe is only sent in 19646 * tcp_wput_data() and tcp_timer(). 19647 */ 19648 if (tcp->tcp_ecn_ok && !tcp->tcp_zero_win_probe) { 19649 SET_ECT(tcp, rptr); 19650 19651 if (tcp->tcp_ecn_echo_on) 19652 tcp_h->th_flags[0] |= TH_ECE; 19653 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 19654 tcp_h->th_flags[0] |= TH_CWR; 19655 tcp->tcp_ecn_cwr_sent = B_TRUE; 19656 } 19657 } 19658 19659 /* Fill in SACK options */ 19660 if (num_sack_blk > 0) { 19661 uchar_t *wptr = rptr + tcp->tcp_hdr_len; 19662 sack_blk_t *tmp; 19663 int32_t i; 19664 19665 wptr[0] = TCPOPT_NOP; 19666 wptr[1] = TCPOPT_NOP; 19667 wptr[2] = TCPOPT_SACK; 19668 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 19669 sizeof (sack_blk_t); 19670 wptr += TCPOPT_REAL_SACK_LEN; 19671 19672 tmp = tcp->tcp_sack_list; 19673 for (i = 0; i < num_sack_blk; i++) { 19674 U32_TO_BE32(tmp[i].begin, wptr); 19675 wptr += sizeof (tcp_seq); 19676 U32_TO_BE32(tmp[i].end, wptr); 19677 wptr += sizeof (tcp_seq); 19678 } 19679 tcp_h->th_offset_and_rsrvd[0] += 19680 ((num_sack_blk * 2 + 1) << 4); 19681 } 19682 } 19683 19684 /* 19685 * tcp_mdt_add_attrs() is called by tcp_multisend() in order to attach 19686 * the destination address and SAP attribute, and if necessary, the 19687 * hardware checksum offload attribute to a Multidata message. 19688 */ 19689 static int 19690 tcp_mdt_add_attrs(multidata_t *mmd, const mblk_t *dlmp, const boolean_t hwcksum, 19691 const uint32_t start, const uint32_t stuff, const uint32_t end, 19692 const uint32_t flags, tcp_stack_t *tcps) 19693 { 19694 /* Add global destination address & SAP attribute */ 19695 if (dlmp == NULL || !ip_md_addr_attr(mmd, NULL, dlmp)) { 19696 ip1dbg(("tcp_mdt_add_attrs: can't add global physical " 19697 "destination address+SAP\n")); 19698 19699 if (dlmp != NULL) 19700 TCP_STAT(tcps, tcp_mdt_allocfail); 19701 return (-1); 19702 } 19703 19704 /* Add global hwcksum attribute */ 19705 if (hwcksum && 19706 !ip_md_hcksum_attr(mmd, NULL, start, stuff, end, flags)) { 19707 ip1dbg(("tcp_mdt_add_attrs: can't add global hardware " 19708 "checksum attribute\n")); 19709 19710 TCP_STAT(tcps, tcp_mdt_allocfail); 19711 return (-1); 19712 } 19713 19714 return (0); 19715 } 19716 19717 /* 19718 * Smaller and private version of pdescinfo_t used specifically for TCP, 19719 * which allows for only two payload spans per packet. 19720 */ 19721 typedef struct tcp_pdescinfo_s PDESCINFO_STRUCT(2) tcp_pdescinfo_t; 19722 19723 /* 19724 * tcp_multisend() is called by tcp_wput_data() for Multidata Transmit 19725 * scheme, and returns one the following: 19726 * 19727 * -1 = failed allocation. 19728 * 0 = success; burst count reached, or usable send window is too small, 19729 * and that we'd rather wait until later before sending again. 19730 */ 19731 static int 19732 tcp_multisend(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len, 19733 const int tcp_tcp_hdr_len, const int num_sack_blk, int *usable, 19734 uint_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 19735 const int mdt_thres) 19736 { 19737 mblk_t *md_mp_head, *md_mp, *md_pbuf, *md_pbuf_nxt, *md_hbuf; 19738 multidata_t *mmd; 19739 uint_t obsegs, obbytes, hdr_frag_sz; 19740 uint_t cur_hdr_off, cur_pld_off, base_pld_off, first_snxt; 19741 int num_burst_seg, max_pld; 19742 pdesc_t *pkt; 19743 tcp_pdescinfo_t tcp_pkt_info; 19744 pdescinfo_t *pkt_info; 19745 int pbuf_idx, pbuf_idx_nxt; 19746 int seg_len, len, spill, af; 19747 boolean_t add_buffer, zcopy, clusterwide; 19748 boolean_t buf_trunked = B_FALSE; 19749 boolean_t rconfirm = B_FALSE; 19750 boolean_t done = B_FALSE; 19751 uint32_t cksum; 19752 uint32_t hwcksum_flags; 19753 ire_t *ire = NULL; 19754 ill_t *ill; 19755 ipha_t *ipha; 19756 ip6_t *ip6h; 19757 ipaddr_t src, dst; 19758 ill_zerocopy_capab_t *zc_cap = NULL; 19759 uint16_t *up; 19760 int err; 19761 conn_t *connp; 19762 mblk_t *mp, *mp1, *fw_mp_head = NULL; 19763 uchar_t *pld_start; 19764 tcp_stack_t *tcps = tcp->tcp_tcps; 19765 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 19766 19767 #ifdef _BIG_ENDIAN 19768 #define IPVER(ip6h) ((((uint32_t *)ip6h)[0] >> 28) & 0x7) 19769 #else 19770 #define IPVER(ip6h) ((((uint32_t *)ip6h)[0] >> 4) & 0x7) 19771 #endif 19772 19773 #define PREP_NEW_MULTIDATA() { \ 19774 mmd = NULL; \ 19775 md_mp = md_hbuf = NULL; \ 19776 cur_hdr_off = 0; \ 19777 max_pld = tcp->tcp_mdt_max_pld; \ 19778 pbuf_idx = pbuf_idx_nxt = -1; \ 19779 add_buffer = B_TRUE; \ 19780 zcopy = B_FALSE; \ 19781 } 19782 19783 #define PREP_NEW_PBUF() { \ 19784 md_pbuf = md_pbuf_nxt = NULL; \ 19785 pbuf_idx = pbuf_idx_nxt = -1; \ 19786 cur_pld_off = 0; \ 19787 first_snxt = *snxt; \ 19788 ASSERT(*tail_unsent > 0); \ 19789 base_pld_off = MBLKL(*xmit_tail) - *tail_unsent; \ 19790 } 19791 19792 ASSERT(mdt_thres >= mss); 19793 ASSERT(*usable > 0 && *usable > mdt_thres); 19794 ASSERT(tcp->tcp_state == TCPS_ESTABLISHED); 19795 ASSERT(!TCP_IS_DETACHED(tcp)); 19796 ASSERT(tcp->tcp_valid_bits == 0 || 19797 tcp->tcp_valid_bits == TCP_FSS_VALID); 19798 ASSERT((tcp->tcp_ipversion == IPV4_VERSION && 19799 tcp->tcp_ip_hdr_len == IP_SIMPLE_HDR_LENGTH) || 19800 (tcp->tcp_ipversion == IPV6_VERSION && 19801 tcp->tcp_ip_hdr_len == IPV6_HDR_LEN)); 19802 19803 connp = tcp->tcp_connp; 19804 ASSERT(connp != NULL); 19805 ASSERT(CONN_IS_LSO_MD_FASTPATH(connp)); 19806 ASSERT(!CONN_IPSEC_OUT_ENCAPSULATED(connp)); 19807 19808 /* 19809 * Note that tcp will only declare at most 2 payload spans per 19810 * packet, which is much lower than the maximum allowable number 19811 * of packet spans per Multidata. For this reason, we use the 19812 * privately declared and smaller descriptor info structure, in 19813 * order to save some stack space. 19814 */ 19815 pkt_info = (pdescinfo_t *)&tcp_pkt_info; 19816 19817 af = (tcp->tcp_ipversion == IPV4_VERSION) ? AF_INET : AF_INET6; 19818 if (af == AF_INET) { 19819 dst = tcp->tcp_ipha->ipha_dst; 19820 src = tcp->tcp_ipha->ipha_src; 19821 ASSERT(!CLASSD(dst)); 19822 } 19823 ASSERT(af == AF_INET || 19824 !IN6_IS_ADDR_MULTICAST(&tcp->tcp_ip6h->ip6_dst)); 19825 19826 obsegs = obbytes = 0; 19827 num_burst_seg = tcp->tcp_snd_burst; 19828 md_mp_head = NULL; 19829 PREP_NEW_MULTIDATA(); 19830 19831 /* 19832 * Before we go on further, make sure there is an IRE that we can 19833 * use, and that the ILL supports MDT. Otherwise, there's no point 19834 * in proceeding any further, and we should just hand everything 19835 * off to the legacy path. 19836 */ 19837 if (!tcp_send_find_ire(tcp, (af == AF_INET) ? &dst : NULL, &ire)) 19838 goto legacy_send_no_md; 19839 19840 ASSERT(ire != NULL); 19841 ASSERT(af != AF_INET || ire->ire_ipversion == IPV4_VERSION); 19842 ASSERT(af == AF_INET || !IN6_IS_ADDR_V4MAPPED(&(ire->ire_addr_v6))); 19843 ASSERT(af == AF_INET || ire->ire_nce != NULL); 19844 ASSERT(!(ire->ire_type & IRE_BROADCAST)); 19845 /* 19846 * If we do support loopback for MDT (which requires modifications 19847 * to the receiving paths), the following assertions should go away, 19848 * and we would be sending the Multidata to loopback conn later on. 19849 */ 19850 ASSERT(!IRE_IS_LOCAL(ire)); 19851 ASSERT(ire->ire_stq != NULL); 19852 19853 ill = ire_to_ill(ire); 19854 ASSERT(ill != NULL); 19855 ASSERT(!ILL_MDT_CAPABLE(ill) || ill->ill_mdt_capab != NULL); 19856 19857 if (!tcp->tcp_ire_ill_check_done) { 19858 tcp_ire_ill_check(tcp, ire, ill, B_TRUE); 19859 tcp->tcp_ire_ill_check_done = B_TRUE; 19860 } 19861 19862 /* 19863 * If the underlying interface conditions have changed, or if the 19864 * new interface does not support MDT, go back to legacy path. 19865 */ 19866 if (!ILL_MDT_USABLE(ill) || (ire->ire_flags & RTF_MULTIRT) != 0) { 19867 /* don't go through this path anymore for this connection */ 19868 TCP_STAT(tcps, tcp_mdt_conn_halted2); 19869 tcp->tcp_mdt = B_FALSE; 19870 ip1dbg(("tcp_multisend: disabling MDT for connp %p on " 19871 "interface %s\n", (void *)connp, ill->ill_name)); 19872 /* IRE will be released prior to returning */ 19873 goto legacy_send_no_md; 19874 } 19875 19876 if (ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) 19877 zc_cap = ill->ill_zerocopy_capab; 19878 19879 /* 19880 * Check if we can take tcp fast-path. Note that "incomplete" 19881 * ire's (where the link-layer for next hop is not resolved 19882 * or where the fast-path header in nce_fp_mp is not available 19883 * yet) are sent down the legacy (slow) path. 19884 * NOTE: We should fix ip_xmit_v4 to handle M_MULTIDATA 19885 */ 19886 if (ire->ire_nce && ire->ire_nce->nce_state != ND_REACHABLE) { 19887 /* IRE will be released prior to returning */ 19888 goto legacy_send_no_md; 19889 } 19890 19891 /* go to legacy path if interface doesn't support zerocopy */ 19892 if (tcp->tcp_snd_zcopy_aware && do_tcpzcopy != 2 && 19893 (zc_cap == NULL || zc_cap->ill_zerocopy_flags == 0)) { 19894 /* IRE will be released prior to returning */ 19895 goto legacy_send_no_md; 19896 } 19897 19898 /* does the interface support hardware checksum offload? */ 19899 hwcksum_flags = 0; 19900 if (ILL_HCKSUM_CAPABLE(ill) && 19901 (ill->ill_hcksum_capab->ill_hcksum_txflags & 19902 (HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 | HCKSUM_INET_PARTIAL | 19903 HCKSUM_IPHDRCKSUM)) && dohwcksum) { 19904 if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19905 HCKSUM_IPHDRCKSUM) 19906 hwcksum_flags = HCK_IPV4_HDRCKSUM; 19907 19908 if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19909 (HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6)) 19910 hwcksum_flags |= HCK_FULLCKSUM; 19911 else if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19912 HCKSUM_INET_PARTIAL) 19913 hwcksum_flags |= HCK_PARTIALCKSUM; 19914 } 19915 19916 /* 19917 * Each header fragment consists of the leading extra space, 19918 * followed by the TCP/IP header, and the trailing extra space. 19919 * We make sure that each header fragment begins on a 32-bit 19920 * aligned memory address (tcp_mdt_hdr_head is already 32-bit 19921 * aligned in tcp_mdt_update). 19922 */ 19923 hdr_frag_sz = roundup((tcp->tcp_mdt_hdr_head + tcp_hdr_len + 19924 tcp->tcp_mdt_hdr_tail), 4); 19925 19926 /* are we starting from the beginning of data block? */ 19927 if (*tail_unsent == 0) { 19928 *xmit_tail = (*xmit_tail)->b_cont; 19929 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= (uintptr_t)INT_MAX); 19930 *tail_unsent = (int)MBLKL(*xmit_tail); 19931 } 19932 19933 /* 19934 * Here we create one or more Multidata messages, each made up of 19935 * one header buffer and up to N payload buffers. This entire 19936 * operation is done within two loops: 19937 * 19938 * The outer loop mostly deals with creating the Multidata message, 19939 * as well as the header buffer that gets added to it. It also 19940 * links the Multidata messages together such that all of them can 19941 * be sent down to the lower layer in a single putnext call; this 19942 * linking behavior depends on the tcp_mdt_chain tunable. 19943 * 19944 * The inner loop takes an existing Multidata message, and adds 19945 * one or more (up to tcp_mdt_max_pld) payload buffers to it. It 19946 * packetizes those buffers by filling up the corresponding header 19947 * buffer fragments with the proper IP and TCP headers, and by 19948 * describing the layout of each packet in the packet descriptors 19949 * that get added to the Multidata. 19950 */ 19951 do { 19952 /* 19953 * If usable send window is too small, or data blocks in 19954 * transmit list are smaller than our threshold (i.e. app 19955 * performs large writes followed by small ones), we hand 19956 * off the control over to the legacy path. Note that we'll 19957 * get back the control once it encounters a large block. 19958 */ 19959 if (*usable < mss || (*tail_unsent <= mdt_thres && 19960 (*xmit_tail)->b_cont != NULL && 19961 MBLKL((*xmit_tail)->b_cont) <= mdt_thres)) { 19962 /* send down what we've got so far */ 19963 if (md_mp_head != NULL) { 19964 tcp_multisend_data(tcp, ire, ill, md_mp_head, 19965 obsegs, obbytes, &rconfirm); 19966 } 19967 /* 19968 * Pass control over to tcp_send(), but tell it to 19969 * return to us once a large-size transmission is 19970 * possible. 19971 */ 19972 TCP_STAT(tcps, tcp_mdt_legacy_small); 19973 if ((err = tcp_send(q, tcp, mss, tcp_hdr_len, 19974 tcp_tcp_hdr_len, num_sack_blk, usable, snxt, 19975 tail_unsent, xmit_tail, local_time, 19976 mdt_thres)) <= 0) { 19977 /* burst count reached, or alloc failed */ 19978 IRE_REFRELE(ire); 19979 return (err); 19980 } 19981 19982 /* tcp_send() may have sent everything, so check */ 19983 if (*usable <= 0) { 19984 IRE_REFRELE(ire); 19985 return (0); 19986 } 19987 19988 TCP_STAT(tcps, tcp_mdt_legacy_ret); 19989 /* 19990 * We may have delivered the Multidata, so make sure 19991 * to re-initialize before the next round. 19992 */ 19993 md_mp_head = NULL; 19994 obsegs = obbytes = 0; 19995 num_burst_seg = tcp->tcp_snd_burst; 19996 PREP_NEW_MULTIDATA(); 19997 19998 /* are we starting from the beginning of data block? */ 19999 if (*tail_unsent == 0) { 20000 *xmit_tail = (*xmit_tail)->b_cont; 20001 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 20002 (uintptr_t)INT_MAX); 20003 *tail_unsent = (int)MBLKL(*xmit_tail); 20004 } 20005 } 20006 20007 /* 20008 * max_pld limits the number of mblks in tcp's transmit 20009 * queue that can be added to a Multidata message. Once 20010 * this counter reaches zero, no more additional mblks 20011 * can be added to it. What happens afterwards depends 20012 * on whether or not we are set to chain the Multidata 20013 * messages. If we are to link them together, reset 20014 * max_pld to its original value (tcp_mdt_max_pld) and 20015 * prepare to create a new Multidata message which will 20016 * get linked to md_mp_head. Else, leave it alone and 20017 * let the inner loop break on its own. 20018 */ 20019 if (tcp_mdt_chain && max_pld == 0) 20020 PREP_NEW_MULTIDATA(); 20021 20022 /* adding a payload buffer; re-initialize values */ 20023 if (add_buffer) 20024 PREP_NEW_PBUF(); 20025 20026 /* 20027 * If we don't have a Multidata, either because we just 20028 * (re)entered this outer loop, or after we branched off 20029 * to tcp_send above, setup the Multidata and header 20030 * buffer to be used. 20031 */ 20032 if (md_mp == NULL) { 20033 int md_hbuflen; 20034 uint32_t start, stuff; 20035 20036 /* 20037 * Calculate Multidata header buffer size large enough 20038 * to hold all of the headers that can possibly be 20039 * sent at this moment. We'd rather over-estimate 20040 * the size than running out of space; this is okay 20041 * since this buffer is small anyway. 20042 */ 20043 md_hbuflen = (howmany(*usable, mss) + 1) * hdr_frag_sz; 20044 20045 /* 20046 * Start and stuff offset for partial hardware 20047 * checksum offload; these are currently for IPv4. 20048 * For full checksum offload, they are set to zero. 20049 */ 20050 if ((hwcksum_flags & HCK_PARTIALCKSUM)) { 20051 if (af == AF_INET) { 20052 start = IP_SIMPLE_HDR_LENGTH; 20053 stuff = IP_SIMPLE_HDR_LENGTH + 20054 TCP_CHECKSUM_OFFSET; 20055 } else { 20056 start = IPV6_HDR_LEN; 20057 stuff = IPV6_HDR_LEN + 20058 TCP_CHECKSUM_OFFSET; 20059 } 20060 } else { 20061 start = stuff = 0; 20062 } 20063 20064 /* 20065 * Create the header buffer, Multidata, as well as 20066 * any necessary attributes (destination address, 20067 * SAP and hardware checksum offload) that should 20068 * be associated with the Multidata message. 20069 */ 20070 ASSERT(cur_hdr_off == 0); 20071 if ((md_hbuf = allocb(md_hbuflen, BPRI_HI)) == NULL || 20072 ((md_hbuf->b_wptr += md_hbuflen), 20073 (mmd = mmd_alloc(md_hbuf, &md_mp, 20074 KM_NOSLEEP)) == NULL) || (tcp_mdt_add_attrs(mmd, 20075 /* fastpath mblk */ 20076 ire->ire_nce->nce_res_mp, 20077 /* hardware checksum enabled */ 20078 (hwcksum_flags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)), 20079 /* hardware checksum offsets */ 20080 start, stuff, 0, 20081 /* hardware checksum flag */ 20082 hwcksum_flags, tcps) != 0)) { 20083 legacy_send: 20084 if (md_mp != NULL) { 20085 /* Unlink message from the chain */ 20086 if (md_mp_head != NULL) { 20087 err = (intptr_t)rmvb(md_mp_head, 20088 md_mp); 20089 /* 20090 * We can't assert that rmvb 20091 * did not return -1, since we 20092 * may get here before linkb 20093 * happens. We do, however, 20094 * check if we just removed the 20095 * only element in the list. 20096 */ 20097 if (err == 0) 20098 md_mp_head = NULL; 20099 } 20100 /* md_hbuf gets freed automatically */ 20101 TCP_STAT(tcps, tcp_mdt_discarded); 20102 freeb(md_mp); 20103 } else { 20104 /* Either allocb or mmd_alloc failed */ 20105 TCP_STAT(tcps, tcp_mdt_allocfail); 20106 if (md_hbuf != NULL) 20107 freeb(md_hbuf); 20108 } 20109 20110 /* send down what we've got so far */ 20111 if (md_mp_head != NULL) { 20112 tcp_multisend_data(tcp, ire, ill, 20113 md_mp_head, obsegs, obbytes, 20114 &rconfirm); 20115 } 20116 legacy_send_no_md: 20117 if (ire != NULL) 20118 IRE_REFRELE(ire); 20119 /* 20120 * Too bad; let the legacy path handle this. 20121 * We specify INT_MAX for the threshold, since 20122 * we gave up with the Multidata processings 20123 * and let the old path have it all. 20124 */ 20125 TCP_STAT(tcps, tcp_mdt_legacy_all); 20126 return (tcp_send(q, tcp, mss, tcp_hdr_len, 20127 tcp_tcp_hdr_len, num_sack_blk, usable, 20128 snxt, tail_unsent, xmit_tail, local_time, 20129 INT_MAX)); 20130 } 20131 20132 /* link to any existing ones, if applicable */ 20133 TCP_STAT(tcps, tcp_mdt_allocd); 20134 if (md_mp_head == NULL) { 20135 md_mp_head = md_mp; 20136 } else if (tcp_mdt_chain) { 20137 TCP_STAT(tcps, tcp_mdt_linked); 20138 linkb(md_mp_head, md_mp); 20139 } 20140 } 20141 20142 ASSERT(md_mp_head != NULL); 20143 ASSERT(tcp_mdt_chain || md_mp_head->b_cont == NULL); 20144 ASSERT(md_mp != NULL && mmd != NULL); 20145 ASSERT(md_hbuf != NULL); 20146 20147 /* 20148 * Packetize the transmittable portion of the data block; 20149 * each data block is essentially added to the Multidata 20150 * as a payload buffer. We also deal with adding more 20151 * than one payload buffers, which happens when the remaining 20152 * packetized portion of the current payload buffer is less 20153 * than MSS, while the next data block in transmit queue 20154 * has enough data to make up for one. This "spillover" 20155 * case essentially creates a split-packet, where portions 20156 * of the packet's payload fragments may span across two 20157 * virtually discontiguous address blocks. 20158 */ 20159 seg_len = mss; 20160 do { 20161 len = seg_len; 20162 20163 ASSERT(len > 0); 20164 ASSERT(max_pld >= 0); 20165 ASSERT(!add_buffer || cur_pld_off == 0); 20166 20167 /* 20168 * First time around for this payload buffer; note 20169 * in the case of a spillover, the following has 20170 * been done prior to adding the split-packet 20171 * descriptor to Multidata, and we don't want to 20172 * repeat the process. 20173 */ 20174 if (add_buffer) { 20175 ASSERT(mmd != NULL); 20176 ASSERT(md_pbuf == NULL); 20177 ASSERT(md_pbuf_nxt == NULL); 20178 ASSERT(pbuf_idx == -1 && pbuf_idx_nxt == -1); 20179 20180 /* 20181 * Have we reached the limit? We'd get to 20182 * this case when we're not chaining the 20183 * Multidata messages together, and since 20184 * we're done, terminate this loop. 20185 */ 20186 if (max_pld == 0) 20187 break; /* done */ 20188 20189 if ((md_pbuf = dupb(*xmit_tail)) == NULL) { 20190 TCP_STAT(tcps, tcp_mdt_allocfail); 20191 goto legacy_send; /* out_of_mem */ 20192 } 20193 20194 if (IS_VMLOANED_MBLK(md_pbuf) && !zcopy && 20195 zc_cap != NULL) { 20196 if (!ip_md_zcopy_attr(mmd, NULL, 20197 zc_cap->ill_zerocopy_flags)) { 20198 freeb(md_pbuf); 20199 TCP_STAT(tcps, 20200 tcp_mdt_allocfail); 20201 /* out_of_mem */ 20202 goto legacy_send; 20203 } 20204 zcopy = B_TRUE; 20205 } 20206 20207 md_pbuf->b_rptr += base_pld_off; 20208 20209 /* 20210 * Add a payload buffer to the Multidata; this 20211 * operation must not fail, or otherwise our 20212 * logic in this routine is broken. There 20213 * is no memory allocation done by the 20214 * routine, so any returned failure simply 20215 * tells us that we've done something wrong. 20216 * 20217 * A failure tells us that either we're adding 20218 * the same payload buffer more than once, or 20219 * we're trying to add more buffers than 20220 * allowed (max_pld calculation is wrong). 20221 * None of the above cases should happen, and 20222 * we panic because either there's horrible 20223 * heap corruption, and/or programming mistake. 20224 */ 20225 pbuf_idx = mmd_addpldbuf(mmd, md_pbuf); 20226 if (pbuf_idx < 0) { 20227 cmn_err(CE_PANIC, "tcp_multisend: " 20228 "payload buffer logic error " 20229 "detected for tcp %p mmd %p " 20230 "pbuf %p (%d)\n", 20231 (void *)tcp, (void *)mmd, 20232 (void *)md_pbuf, pbuf_idx); 20233 } 20234 20235 ASSERT(max_pld > 0); 20236 --max_pld; 20237 add_buffer = B_FALSE; 20238 } 20239 20240 ASSERT(md_mp_head != NULL); 20241 ASSERT(md_pbuf != NULL); 20242 ASSERT(md_pbuf_nxt == NULL); 20243 ASSERT(pbuf_idx != -1); 20244 ASSERT(pbuf_idx_nxt == -1); 20245 ASSERT(*usable > 0); 20246 20247 /* 20248 * We spillover to the next payload buffer only 20249 * if all of the following is true: 20250 * 20251 * 1. There is not enough data on the current 20252 * payload buffer to make up `len', 20253 * 2. We are allowed to send `len', 20254 * 3. The next payload buffer length is large 20255 * enough to accomodate `spill'. 20256 */ 20257 if ((spill = len - *tail_unsent) > 0 && 20258 *usable >= len && 20259 MBLKL((*xmit_tail)->b_cont) >= spill && 20260 max_pld > 0) { 20261 md_pbuf_nxt = dupb((*xmit_tail)->b_cont); 20262 if (md_pbuf_nxt == NULL) { 20263 TCP_STAT(tcps, tcp_mdt_allocfail); 20264 goto legacy_send; /* out_of_mem */ 20265 } 20266 20267 if (IS_VMLOANED_MBLK(md_pbuf_nxt) && !zcopy && 20268 zc_cap != NULL) { 20269 if (!ip_md_zcopy_attr(mmd, NULL, 20270 zc_cap->ill_zerocopy_flags)) { 20271 freeb(md_pbuf_nxt); 20272 TCP_STAT(tcps, 20273 tcp_mdt_allocfail); 20274 /* out_of_mem */ 20275 goto legacy_send; 20276 } 20277 zcopy = B_TRUE; 20278 } 20279 20280 /* 20281 * See comments above on the first call to 20282 * mmd_addpldbuf for explanation on the panic. 20283 */ 20284 pbuf_idx_nxt = mmd_addpldbuf(mmd, md_pbuf_nxt); 20285 if (pbuf_idx_nxt < 0) { 20286 panic("tcp_multisend: " 20287 "next payload buffer logic error " 20288 "detected for tcp %p mmd %p " 20289 "pbuf %p (%d)\n", 20290 (void *)tcp, (void *)mmd, 20291 (void *)md_pbuf_nxt, pbuf_idx_nxt); 20292 } 20293 20294 ASSERT(max_pld > 0); 20295 --max_pld; 20296 } else if (spill > 0) { 20297 /* 20298 * If there's a spillover, but the following 20299 * xmit_tail couldn't give us enough octets 20300 * to reach "len", then stop the current 20301 * Multidata creation and let the legacy 20302 * tcp_send() path take over. We don't want 20303 * to send the tiny segment as part of this 20304 * Multidata for performance reasons; instead, 20305 * we let the legacy path deal with grouping 20306 * it with the subsequent small mblks. 20307 */ 20308 if (*usable >= len && 20309 MBLKL((*xmit_tail)->b_cont) < spill) { 20310 max_pld = 0; 20311 break; /* done */ 20312 } 20313 20314 /* 20315 * We can't spillover, and we are near 20316 * the end of the current payload buffer, 20317 * so send what's left. 20318 */ 20319 ASSERT(*tail_unsent > 0); 20320 len = *tail_unsent; 20321 } 20322 20323 /* tail_unsent is negated if there is a spillover */ 20324 *tail_unsent -= len; 20325 *usable -= len; 20326 ASSERT(*usable >= 0); 20327 20328 if (*usable < mss) 20329 seg_len = *usable; 20330 /* 20331 * Sender SWS avoidance; see comments in tcp_send(); 20332 * everything else is the same, except that we only 20333 * do this here if there is no more data to be sent 20334 * following the current xmit_tail. We don't check 20335 * for 1-byte urgent data because we shouldn't get 20336 * here if TCP_URG_VALID is set. 20337 */ 20338 if (*usable > 0 && *usable < mss && 20339 ((md_pbuf_nxt == NULL && 20340 (*xmit_tail)->b_cont == NULL) || 20341 (md_pbuf_nxt != NULL && 20342 (*xmit_tail)->b_cont->b_cont == NULL)) && 20343 seg_len < (tcp->tcp_max_swnd >> 1) && 20344 (tcp->tcp_unsent - 20345 ((*snxt + len) - tcp->tcp_snxt)) > seg_len && 20346 !tcp->tcp_zero_win_probe) { 20347 if ((*snxt + len) == tcp->tcp_snxt && 20348 (*snxt + len) == tcp->tcp_suna) { 20349 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 20350 } 20351 done = B_TRUE; 20352 } 20353 20354 /* 20355 * Prime pump for IP's checksumming on our behalf; 20356 * include the adjustment for a source route if any. 20357 * Do this only for software/partial hardware checksum 20358 * offload, as this field gets zeroed out later for 20359 * the full hardware checksum offload case. 20360 */ 20361 if (!(hwcksum_flags & HCK_FULLCKSUM)) { 20362 cksum = len + tcp_tcp_hdr_len + tcp->tcp_sum; 20363 cksum = (cksum >> 16) + (cksum & 0xFFFF); 20364 U16_TO_ABE16(cksum, tcp->tcp_tcph->th_sum); 20365 } 20366 20367 U32_TO_ABE32(*snxt, tcp->tcp_tcph->th_seq); 20368 *snxt += len; 20369 20370 tcp->tcp_tcph->th_flags[0] = TH_ACK; 20371 /* 20372 * We set the PUSH bit only if TCP has no more buffered 20373 * data to be transmitted (or if sender SWS avoidance 20374 * takes place), as opposed to setting it for every 20375 * last packet in the burst. 20376 */ 20377 if (done || 20378 (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) == 0) 20379 tcp->tcp_tcph->th_flags[0] |= TH_PUSH; 20380 20381 /* 20382 * Set FIN bit if this is our last segment; snxt 20383 * already includes its length, and it will not 20384 * be adjusted after this point. 20385 */ 20386 if (tcp->tcp_valid_bits == TCP_FSS_VALID && 20387 *snxt == tcp->tcp_fss) { 20388 if (!tcp->tcp_fin_acked) { 20389 tcp->tcp_tcph->th_flags[0] |= TH_FIN; 20390 BUMP_MIB(&tcps->tcps_mib, 20391 tcpOutControl); 20392 } 20393 if (!tcp->tcp_fin_sent) { 20394 tcp->tcp_fin_sent = B_TRUE; 20395 /* 20396 * tcp state must be ESTABLISHED 20397 * in order for us to get here in 20398 * the first place. 20399 */ 20400 tcp->tcp_state = TCPS_FIN_WAIT_1; 20401 20402 /* 20403 * Upon returning from this routine, 20404 * tcp_wput_data() will set tcp_snxt 20405 * to be equal to snxt + tcp_fin_sent. 20406 * This is essentially the same as 20407 * setting it to tcp_fss + 1. 20408 */ 20409 } 20410 } 20411 20412 tcp->tcp_last_sent_len = (ushort_t)len; 20413 20414 len += tcp_hdr_len; 20415 if (tcp->tcp_ipversion == IPV4_VERSION) 20416 tcp->tcp_ipha->ipha_length = htons(len); 20417 else 20418 tcp->tcp_ip6h->ip6_plen = htons(len - 20419 ((char *)&tcp->tcp_ip6h[1] - 20420 tcp->tcp_iphc)); 20421 20422 pkt_info->flags = (PDESC_HBUF_REF | PDESC_PBUF_REF); 20423 20424 /* setup header fragment */ 20425 PDESC_HDR_ADD(pkt_info, 20426 md_hbuf->b_rptr + cur_hdr_off, /* base */ 20427 tcp->tcp_mdt_hdr_head, /* head room */ 20428 tcp_hdr_len, /* len */ 20429 tcp->tcp_mdt_hdr_tail); /* tail room */ 20430 20431 ASSERT(pkt_info->hdr_lim - pkt_info->hdr_base == 20432 hdr_frag_sz); 20433 ASSERT(MBLKIN(md_hbuf, 20434 (pkt_info->hdr_base - md_hbuf->b_rptr), 20435 PDESC_HDRSIZE(pkt_info))); 20436 20437 /* setup first payload fragment */ 20438 PDESC_PLD_INIT(pkt_info); 20439 PDESC_PLD_SPAN_ADD(pkt_info, 20440 pbuf_idx, /* index */ 20441 md_pbuf->b_rptr + cur_pld_off, /* start */ 20442 tcp->tcp_last_sent_len); /* len */ 20443 20444 /* create a split-packet in case of a spillover */ 20445 if (md_pbuf_nxt != NULL) { 20446 ASSERT(spill > 0); 20447 ASSERT(pbuf_idx_nxt > pbuf_idx); 20448 ASSERT(!add_buffer); 20449 20450 md_pbuf = md_pbuf_nxt; 20451 md_pbuf_nxt = NULL; 20452 pbuf_idx = pbuf_idx_nxt; 20453 pbuf_idx_nxt = -1; 20454 cur_pld_off = spill; 20455 20456 /* trim out first payload fragment */ 20457 PDESC_PLD_SPAN_TRIM(pkt_info, 0, spill); 20458 20459 /* setup second payload fragment */ 20460 PDESC_PLD_SPAN_ADD(pkt_info, 20461 pbuf_idx, /* index */ 20462 md_pbuf->b_rptr, /* start */ 20463 spill); /* len */ 20464 20465 if ((*xmit_tail)->b_next == NULL) { 20466 /* 20467 * Store the lbolt used for RTT 20468 * estimation. We can only record one 20469 * timestamp per mblk so we do it when 20470 * we reach the end of the payload 20471 * buffer. Also we only take a new 20472 * timestamp sample when the previous 20473 * timed data from the same mblk has 20474 * been ack'ed. 20475 */ 20476 (*xmit_tail)->b_prev = local_time; 20477 (*xmit_tail)->b_next = 20478 (mblk_t *)(uintptr_t)first_snxt; 20479 } 20480 20481 first_snxt = *snxt - spill; 20482 20483 /* 20484 * Advance xmit_tail; usable could be 0 by 20485 * the time we got here, but we made sure 20486 * above that we would only spillover to 20487 * the next data block if usable includes 20488 * the spilled-over amount prior to the 20489 * subtraction. Therefore, we are sure 20490 * that xmit_tail->b_cont can't be NULL. 20491 */ 20492 ASSERT((*xmit_tail)->b_cont != NULL); 20493 *xmit_tail = (*xmit_tail)->b_cont; 20494 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 20495 (uintptr_t)INT_MAX); 20496 *tail_unsent = (int)MBLKL(*xmit_tail) - spill; 20497 } else { 20498 cur_pld_off += tcp->tcp_last_sent_len; 20499 } 20500 20501 /* 20502 * Fill in the header using the template header, and 20503 * add options such as time-stamp, ECN and/or SACK, 20504 * as needed. 20505 */ 20506 tcp_fill_header(tcp, pkt_info->hdr_rptr, 20507 (clock_t)local_time, num_sack_blk); 20508 20509 /* take care of some IP header businesses */ 20510 if (af == AF_INET) { 20511 ipha = (ipha_t *)pkt_info->hdr_rptr; 20512 20513 ASSERT(OK_32PTR((uchar_t *)ipha)); 20514 ASSERT(PDESC_HDRL(pkt_info) >= 20515 IP_SIMPLE_HDR_LENGTH); 20516 ASSERT(ipha->ipha_version_and_hdr_length == 20517 IP_SIMPLE_HDR_VERSION); 20518 20519 /* 20520 * Assign ident value for current packet; see 20521 * related comments in ip_wput_ire() about the 20522 * contract private interface with clustering 20523 * group. 20524 */ 20525 clusterwide = B_FALSE; 20526 if (cl_inet_ipident != NULL) { 20527 ASSERT(cl_inet_isclusterwide != NULL); 20528 if ((*cl_inet_isclusterwide)(IPPROTO_IP, 20529 AF_INET, 20530 (uint8_t *)(uintptr_t)src)) { 20531 ipha->ipha_ident = 20532 (*cl_inet_ipident) 20533 (IPPROTO_IP, AF_INET, 20534 (uint8_t *)(uintptr_t)src, 20535 (uint8_t *)(uintptr_t)dst); 20536 clusterwide = B_TRUE; 20537 } 20538 } 20539 20540 if (!clusterwide) { 20541 ipha->ipha_ident = (uint16_t) 20542 atomic_add_32_nv( 20543 &ire->ire_ident, 1); 20544 } 20545 #ifndef _BIG_ENDIAN 20546 ipha->ipha_ident = (ipha->ipha_ident << 8) | 20547 (ipha->ipha_ident >> 8); 20548 #endif 20549 } else { 20550 ip6h = (ip6_t *)pkt_info->hdr_rptr; 20551 20552 ASSERT(OK_32PTR((uchar_t *)ip6h)); 20553 ASSERT(IPVER(ip6h) == IPV6_VERSION); 20554 ASSERT(ip6h->ip6_nxt == IPPROTO_TCP); 20555 ASSERT(PDESC_HDRL(pkt_info) >= 20556 (IPV6_HDR_LEN + TCP_CHECKSUM_OFFSET + 20557 TCP_CHECKSUM_SIZE)); 20558 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 20559 20560 if (tcp->tcp_ip_forward_progress) { 20561 rconfirm = B_TRUE; 20562 tcp->tcp_ip_forward_progress = B_FALSE; 20563 } 20564 } 20565 20566 /* at least one payload span, and at most two */ 20567 ASSERT(pkt_info->pld_cnt > 0 && pkt_info->pld_cnt < 3); 20568 20569 /* add the packet descriptor to Multidata */ 20570 if ((pkt = mmd_addpdesc(mmd, pkt_info, &err, 20571 KM_NOSLEEP)) == NULL) { 20572 /* 20573 * Any failure other than ENOMEM indicates 20574 * that we have passed in invalid pkt_info 20575 * or parameters to mmd_addpdesc, which must 20576 * not happen. 20577 * 20578 * EINVAL is a result of failure on boundary 20579 * checks against the pkt_info contents. It 20580 * should not happen, and we panic because 20581 * either there's horrible heap corruption, 20582 * and/or programming mistake. 20583 */ 20584 if (err != ENOMEM) { 20585 cmn_err(CE_PANIC, "tcp_multisend: " 20586 "pdesc logic error detected for " 20587 "tcp %p mmd %p pinfo %p (%d)\n", 20588 (void *)tcp, (void *)mmd, 20589 (void *)pkt_info, err); 20590 } 20591 TCP_STAT(tcps, tcp_mdt_addpdescfail); 20592 goto legacy_send; /* out_of_mem */ 20593 } 20594 ASSERT(pkt != NULL); 20595 20596 /* calculate IP header and TCP checksums */ 20597 if (af == AF_INET) { 20598 /* calculate pseudo-header checksum */ 20599 cksum = (dst >> 16) + (dst & 0xFFFF) + 20600 (src >> 16) + (src & 0xFFFF); 20601 20602 /* offset for TCP header checksum */ 20603 up = IPH_TCPH_CHECKSUMP(ipha, 20604 IP_SIMPLE_HDR_LENGTH); 20605 } else { 20606 up = (uint16_t *)&ip6h->ip6_src; 20607 20608 /* calculate pseudo-header checksum */ 20609 cksum = up[0] + up[1] + up[2] + up[3] + 20610 up[4] + up[5] + up[6] + up[7] + 20611 up[8] + up[9] + up[10] + up[11] + 20612 up[12] + up[13] + up[14] + up[15]; 20613 20614 /* Fold the initial sum */ 20615 cksum = (cksum & 0xffff) + (cksum >> 16); 20616 20617 up = (uint16_t *)(((uchar_t *)ip6h) + 20618 IPV6_HDR_LEN + TCP_CHECKSUM_OFFSET); 20619 } 20620 20621 if (hwcksum_flags & HCK_FULLCKSUM) { 20622 /* clear checksum field for hardware */ 20623 *up = 0; 20624 } else if (hwcksum_flags & HCK_PARTIALCKSUM) { 20625 uint32_t sum; 20626 20627 /* pseudo-header checksumming */ 20628 sum = *up + cksum + IP_TCP_CSUM_COMP; 20629 sum = (sum & 0xFFFF) + (sum >> 16); 20630 *up = (sum & 0xFFFF) + (sum >> 16); 20631 } else { 20632 /* software checksumming */ 20633 TCP_STAT(tcps, tcp_out_sw_cksum); 20634 TCP_STAT_UPDATE(tcps, tcp_out_sw_cksum_bytes, 20635 tcp->tcp_hdr_len + tcp->tcp_last_sent_len); 20636 *up = IP_MD_CSUM(pkt, tcp->tcp_ip_hdr_len, 20637 cksum + IP_TCP_CSUM_COMP); 20638 if (*up == 0) 20639 *up = 0xFFFF; 20640 } 20641 20642 /* IPv4 header checksum */ 20643 if (af == AF_INET) { 20644 ipha->ipha_fragment_offset_and_flags |= 20645 (uint32_t)htons(ire->ire_frag_flag); 20646 20647 if (hwcksum_flags & HCK_IPV4_HDRCKSUM) { 20648 ipha->ipha_hdr_checksum = 0; 20649 } else { 20650 IP_HDR_CKSUM(ipha, cksum, 20651 ((uint32_t *)ipha)[0], 20652 ((uint16_t *)ipha)[4]); 20653 } 20654 } 20655 20656 if (af == AF_INET && 20657 HOOKS4_INTERESTED_PHYSICAL_OUT(ipst) || 20658 af == AF_INET6 && 20659 HOOKS6_INTERESTED_PHYSICAL_OUT(ipst)) { 20660 /* build header(IP/TCP) mblk for this segment */ 20661 if ((mp = dupb(md_hbuf)) == NULL) 20662 goto legacy_send; 20663 20664 mp->b_rptr = pkt_info->hdr_rptr; 20665 mp->b_wptr = pkt_info->hdr_wptr; 20666 20667 /* build payload mblk for this segment */ 20668 if ((mp1 = dupb(*xmit_tail)) == NULL) { 20669 freemsg(mp); 20670 goto legacy_send; 20671 } 20672 mp1->b_wptr = md_pbuf->b_rptr + cur_pld_off; 20673 mp1->b_rptr = mp1->b_wptr - 20674 tcp->tcp_last_sent_len; 20675 linkb(mp, mp1); 20676 20677 pld_start = mp1->b_rptr; 20678 20679 if (af == AF_INET) { 20680 DTRACE_PROBE4( 20681 ip4__physical__out__start, 20682 ill_t *, NULL, 20683 ill_t *, ill, 20684 ipha_t *, ipha, 20685 mblk_t *, mp); 20686 FW_HOOKS( 20687 ipst->ips_ip4_physical_out_event, 20688 ipst->ips_ipv4firewall_physical_out, 20689 NULL, ill, ipha, mp, mp, 0, ipst); 20690 DTRACE_PROBE1( 20691 ip4__physical__out__end, 20692 mblk_t *, mp); 20693 } else { 20694 DTRACE_PROBE4( 20695 ip6__physical__out_start, 20696 ill_t *, NULL, 20697 ill_t *, ill, 20698 ip6_t *, ip6h, 20699 mblk_t *, mp); 20700 FW_HOOKS6( 20701 ipst->ips_ip6_physical_out_event, 20702 ipst->ips_ipv6firewall_physical_out, 20703 NULL, ill, ip6h, mp, mp, 0, ipst); 20704 DTRACE_PROBE1( 20705 ip6__physical__out__end, 20706 mblk_t *, mp); 20707 } 20708 20709 if (buf_trunked && mp != NULL) { 20710 /* 20711 * Need to pass it to normal path. 20712 */ 20713 CALL_IP_WPUT(tcp->tcp_connp, q, mp); 20714 } else if (mp == NULL || 20715 mp->b_rptr != pkt_info->hdr_rptr || 20716 mp->b_wptr != pkt_info->hdr_wptr || 20717 (mp1 = mp->b_cont) == NULL || 20718 mp1->b_rptr != pld_start || 20719 mp1->b_wptr != pld_start + 20720 tcp->tcp_last_sent_len || 20721 mp1->b_cont != NULL) { 20722 /* 20723 * Need to pass all packets of this 20724 * buffer to normal path, either when 20725 * packet is blocked, or when boundary 20726 * of header buffer or payload buffer 20727 * has been changed by FW_HOOKS[6]. 20728 */ 20729 buf_trunked = B_TRUE; 20730 if (md_mp_head != NULL) { 20731 err = (intptr_t)rmvb(md_mp_head, 20732 md_mp); 20733 if (err == 0) 20734 md_mp_head = NULL; 20735 } 20736 20737 /* send down what we've got so far */ 20738 if (md_mp_head != NULL) { 20739 tcp_multisend_data(tcp, ire, 20740 ill, md_mp_head, obsegs, 20741 obbytes, &rconfirm); 20742 } 20743 md_mp_head = NULL; 20744 20745 if (mp != NULL) 20746 CALL_IP_WPUT(tcp->tcp_connp, 20747 q, mp); 20748 20749 mp1 = fw_mp_head; 20750 do { 20751 mp = mp1; 20752 mp1 = mp1->b_next; 20753 mp->b_next = NULL; 20754 mp->b_prev = NULL; 20755 CALL_IP_WPUT(tcp->tcp_connp, 20756 q, mp); 20757 } while (mp1 != NULL); 20758 20759 fw_mp_head = NULL; 20760 } else { 20761 if (fw_mp_head == NULL) 20762 fw_mp_head = mp; 20763 else 20764 fw_mp_head->b_prev->b_next = mp; 20765 fw_mp_head->b_prev = mp; 20766 } 20767 } 20768 20769 /* advance header offset */ 20770 cur_hdr_off += hdr_frag_sz; 20771 20772 obbytes += tcp->tcp_last_sent_len; 20773 ++obsegs; 20774 } while (!done && *usable > 0 && --num_burst_seg > 0 && 20775 *tail_unsent > 0); 20776 20777 if ((*xmit_tail)->b_next == NULL) { 20778 /* 20779 * Store the lbolt used for RTT estimation. We can only 20780 * record one timestamp per mblk so we do it when we 20781 * reach the end of the payload buffer. Also we only 20782 * take a new timestamp sample when the previous timed 20783 * data from the same mblk has been ack'ed. 20784 */ 20785 (*xmit_tail)->b_prev = local_time; 20786 (*xmit_tail)->b_next = (mblk_t *)(uintptr_t)first_snxt; 20787 } 20788 20789 ASSERT(*tail_unsent >= 0); 20790 if (*tail_unsent > 0) { 20791 /* 20792 * We got here because we broke out of the above 20793 * loop due to of one of the following cases: 20794 * 20795 * 1. len < adjusted MSS (i.e. small), 20796 * 2. Sender SWS avoidance, 20797 * 3. max_pld is zero. 20798 * 20799 * We are done for this Multidata, so trim our 20800 * last payload buffer (if any) accordingly. 20801 */ 20802 if (md_pbuf != NULL) 20803 md_pbuf->b_wptr -= *tail_unsent; 20804 } else if (*usable > 0) { 20805 *xmit_tail = (*xmit_tail)->b_cont; 20806 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 20807 (uintptr_t)INT_MAX); 20808 *tail_unsent = (int)MBLKL(*xmit_tail); 20809 add_buffer = B_TRUE; 20810 } 20811 20812 while (fw_mp_head) { 20813 mp = fw_mp_head; 20814 fw_mp_head = fw_mp_head->b_next; 20815 mp->b_prev = mp->b_next = NULL; 20816 freemsg(mp); 20817 } 20818 if (buf_trunked) { 20819 TCP_STAT(tcps, tcp_mdt_discarded); 20820 freeb(md_mp); 20821 buf_trunked = B_FALSE; 20822 } 20823 } while (!done && *usable > 0 && num_burst_seg > 0 && 20824 (tcp_mdt_chain || max_pld > 0)); 20825 20826 if (md_mp_head != NULL) { 20827 /* send everything down */ 20828 tcp_multisend_data(tcp, ire, ill, md_mp_head, obsegs, obbytes, 20829 &rconfirm); 20830 } 20831 20832 #undef PREP_NEW_MULTIDATA 20833 #undef PREP_NEW_PBUF 20834 #undef IPVER 20835 20836 IRE_REFRELE(ire); 20837 return (0); 20838 } 20839 20840 /* 20841 * A wrapper function for sending one or more Multidata messages down to 20842 * the module below ip; this routine does not release the reference of the 20843 * IRE (caller does that). This routine is analogous to tcp_send_data(). 20844 */ 20845 static void 20846 tcp_multisend_data(tcp_t *tcp, ire_t *ire, const ill_t *ill, mblk_t *md_mp_head, 20847 const uint_t obsegs, const uint_t obbytes, boolean_t *rconfirm) 20848 { 20849 uint64_t delta; 20850 nce_t *nce; 20851 tcp_stack_t *tcps = tcp->tcp_tcps; 20852 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 20853 20854 ASSERT(ire != NULL && ill != NULL); 20855 ASSERT(ire->ire_stq != NULL); 20856 ASSERT(md_mp_head != NULL); 20857 ASSERT(rconfirm != NULL); 20858 20859 /* adjust MIBs and IRE timestamp */ 20860 TCP_RECORD_TRACE(tcp, md_mp_head, TCP_TRACE_SEND_PKT); 20861 tcp->tcp_obsegs += obsegs; 20862 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataSegs, obsegs); 20863 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, obbytes); 20864 TCP_STAT_UPDATE(tcps, tcp_mdt_pkt_out, obsegs); 20865 20866 if (tcp->tcp_ipversion == IPV4_VERSION) { 20867 TCP_STAT_UPDATE(tcps, tcp_mdt_pkt_out_v4, obsegs); 20868 } else { 20869 TCP_STAT_UPDATE(tcps, tcp_mdt_pkt_out_v6, obsegs); 20870 } 20871 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutRequests, obsegs); 20872 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits, obsegs); 20873 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets, obbytes); 20874 20875 ire->ire_ob_pkt_count += obsegs; 20876 if (ire->ire_ipif != NULL) 20877 atomic_add_32(&ire->ire_ipif->ipif_ob_pkt_count, obsegs); 20878 ire->ire_last_used_time = lbolt; 20879 20880 /* send it down */ 20881 if (ILL_DLS_CAPABLE(ill)) { 20882 ill_dls_capab_t *ill_dls = ill->ill_dls_capab; 20883 ill_dls->ill_tx(ill_dls->ill_tx_handle, md_mp_head); 20884 } else { 20885 putnext(ire->ire_stq, md_mp_head); 20886 } 20887 20888 /* we're done for TCP/IPv4 */ 20889 if (tcp->tcp_ipversion == IPV4_VERSION) 20890 return; 20891 20892 nce = ire->ire_nce; 20893 20894 ASSERT(nce != NULL); 20895 ASSERT(!(nce->nce_flags & (NCE_F_NONUD|NCE_F_PERMANENT))); 20896 ASSERT(nce->nce_state != ND_INCOMPLETE); 20897 20898 /* reachability confirmation? */ 20899 if (*rconfirm) { 20900 nce->nce_last = TICK_TO_MSEC(lbolt64); 20901 if (nce->nce_state != ND_REACHABLE) { 20902 mutex_enter(&nce->nce_lock); 20903 nce->nce_state = ND_REACHABLE; 20904 nce->nce_pcnt = ND_MAX_UNICAST_SOLICIT; 20905 mutex_exit(&nce->nce_lock); 20906 (void) untimeout(nce->nce_timeout_id); 20907 if (ip_debug > 2) { 20908 /* ip1dbg */ 20909 pr_addr_dbg("tcp_multisend_data: state " 20910 "for %s changed to REACHABLE\n", 20911 AF_INET6, &ire->ire_addr_v6); 20912 } 20913 } 20914 /* reset transport reachability confirmation */ 20915 *rconfirm = B_FALSE; 20916 } 20917 20918 delta = TICK_TO_MSEC(lbolt64) - nce->nce_last; 20919 ip1dbg(("tcp_multisend_data: delta = %" PRId64 20920 " ill_reachable_time = %d \n", delta, ill->ill_reachable_time)); 20921 20922 if (delta > (uint64_t)ill->ill_reachable_time) { 20923 mutex_enter(&nce->nce_lock); 20924 switch (nce->nce_state) { 20925 case ND_REACHABLE: 20926 case ND_STALE: 20927 /* 20928 * ND_REACHABLE is identical to ND_STALE in this 20929 * specific case. If reachable time has expired for 20930 * this neighbor (delta is greater than reachable 20931 * time), conceptually, the neighbor cache is no 20932 * longer in REACHABLE state, but already in STALE 20933 * state. So the correct transition here is to 20934 * ND_DELAY. 20935 */ 20936 nce->nce_state = ND_DELAY; 20937 mutex_exit(&nce->nce_lock); 20938 NDP_RESTART_TIMER(nce, 20939 ipst->ips_delay_first_probe_time); 20940 if (ip_debug > 3) { 20941 /* ip2dbg */ 20942 pr_addr_dbg("tcp_multisend_data: state " 20943 "for %s changed to DELAY\n", 20944 AF_INET6, &ire->ire_addr_v6); 20945 } 20946 break; 20947 case ND_DELAY: 20948 case ND_PROBE: 20949 mutex_exit(&nce->nce_lock); 20950 /* Timers have already started */ 20951 break; 20952 case ND_UNREACHABLE: 20953 /* 20954 * ndp timer has detected that this nce is 20955 * unreachable and initiated deleting this nce 20956 * and all its associated IREs. This is a race 20957 * where we found the ire before it was deleted 20958 * and have just sent out a packet using this 20959 * unreachable nce. 20960 */ 20961 mutex_exit(&nce->nce_lock); 20962 break; 20963 default: 20964 ASSERT(0); 20965 } 20966 } 20967 } 20968 20969 /* 20970 * Derived from tcp_send_data(). 20971 */ 20972 static void 20973 tcp_lsosend_data(tcp_t *tcp, mblk_t *mp, ire_t *ire, ill_t *ill, const int mss, 20974 int num_lso_seg) 20975 { 20976 ipha_t *ipha; 20977 mblk_t *ire_fp_mp; 20978 uint_t ire_fp_mp_len; 20979 uint32_t hcksum_txflags = 0; 20980 ipaddr_t src; 20981 ipaddr_t dst; 20982 uint32_t cksum; 20983 uint16_t *up; 20984 tcp_stack_t *tcps = tcp->tcp_tcps; 20985 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 20986 20987 ASSERT(DB_TYPE(mp) == M_DATA); 20988 ASSERT(tcp->tcp_state == TCPS_ESTABLISHED); 20989 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 20990 ASSERT(tcp->tcp_connp != NULL); 20991 ASSERT(CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp)); 20992 20993 ipha = (ipha_t *)mp->b_rptr; 20994 src = ipha->ipha_src; 20995 dst = ipha->ipha_dst; 20996 20997 ASSERT(ipha->ipha_ident == 0 || ipha->ipha_ident == IP_HDR_INCLUDED); 20998 ipha->ipha_ident = (uint16_t)atomic_add_32_nv(&ire->ire_ident, 20999 num_lso_seg); 21000 #ifndef _BIG_ENDIAN 21001 ipha->ipha_ident = (ipha->ipha_ident << 8) | (ipha->ipha_ident >> 8); 21002 #endif 21003 if (tcp->tcp_snd_zcopy_aware) { 21004 if ((ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) == 0 || 21005 (ill->ill_zerocopy_capab->ill_zerocopy_flags == 0)) 21006 mp = tcp_zcopy_disable(tcp, mp); 21007 } 21008 21009 if (ILL_HCKSUM_CAPABLE(ill) && dohwcksum) { 21010 ASSERT(ill->ill_hcksum_capab != NULL); 21011 hcksum_txflags = ill->ill_hcksum_capab->ill_hcksum_txflags; 21012 } 21013 21014 /* 21015 * Since the TCP checksum should be recalculated by h/w, we can just 21016 * zero the checksum field for HCK_FULLCKSUM, or calculate partial 21017 * pseudo-header checksum for HCK_PARTIALCKSUM. 21018 * The partial pseudo-header excludes TCP length, that was calculated 21019 * in tcp_send(), so to zero *up before further processing. 21020 */ 21021 cksum = (dst >> 16) + (dst & 0xFFFF) + (src >> 16) + (src & 0xFFFF); 21022 21023 up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 21024 *up = 0; 21025 21026 IP_CKSUM_XMIT_FAST(ire->ire_ipversion, hcksum_txflags, mp, ipha, up, 21027 IPPROTO_TCP, IP_SIMPLE_HDR_LENGTH, ntohs(ipha->ipha_length), cksum); 21028 21029 /* 21030 * Append LSO flag to DB_LSOFLAGS(mp) and set the mss to DB_LSOMSS(mp). 21031 */ 21032 DB_LSOFLAGS(mp) |= HW_LSO; 21033 DB_LSOMSS(mp) = mss; 21034 21035 ipha->ipha_fragment_offset_and_flags |= 21036 (uint32_t)htons(ire->ire_frag_flag); 21037 21038 ire_fp_mp = ire->ire_nce->nce_fp_mp; 21039 ire_fp_mp_len = MBLKL(ire_fp_mp); 21040 ASSERT(DB_TYPE(ire_fp_mp) == M_DATA); 21041 mp->b_rptr = (uchar_t *)ipha - ire_fp_mp_len; 21042 bcopy(ire_fp_mp->b_rptr, mp->b_rptr, ire_fp_mp_len); 21043 21044 UPDATE_OB_PKT_COUNT(ire); 21045 ire->ire_last_used_time = lbolt; 21046 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutRequests); 21047 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits); 21048 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets, 21049 ntohs(ipha->ipha_length)); 21050 21051 if (ILL_DLS_CAPABLE(ill)) { 21052 /* 21053 * Send the packet directly to DLD, where it may be queued 21054 * depending on the availability of transmit resources at 21055 * the media layer. 21056 */ 21057 IP_DLS_ILL_TX(ill, ipha, mp, ipst); 21058 } else { 21059 ill_t *out_ill = (ill_t *)ire->ire_stq->q_ptr; 21060 DTRACE_PROBE4(ip4__physical__out__start, 21061 ill_t *, NULL, ill_t *, out_ill, 21062 ipha_t *, ipha, mblk_t *, mp); 21063 FW_HOOKS(ipst->ips_ip4_physical_out_event, 21064 ipst->ips_ipv4firewall_physical_out, 21065 NULL, out_ill, ipha, mp, mp, 0, ipst); 21066 DTRACE_PROBE1(ip4__physical__out__end, mblk_t *, mp); 21067 if (mp != NULL) 21068 putnext(ire->ire_stq, mp); 21069 } 21070 } 21071 21072 /* 21073 * tcp_send() is called by tcp_wput_data() for non-Multidata transmission 21074 * scheme, and returns one of the following: 21075 * 21076 * -1 = failed allocation. 21077 * 0 = success; burst count reached, or usable send window is too small, 21078 * and that we'd rather wait until later before sending again. 21079 * 1 = success; we are called from tcp_multisend(), and both usable send 21080 * window and tail_unsent are greater than the MDT threshold, and thus 21081 * Multidata Transmit should be used instead. 21082 */ 21083 static int 21084 tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len, 21085 const int tcp_tcp_hdr_len, const int num_sack_blk, int *usable, 21086 uint_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 21087 const int mdt_thres) 21088 { 21089 int num_burst_seg = tcp->tcp_snd_burst; 21090 ire_t *ire = NULL; 21091 ill_t *ill = NULL; 21092 mblk_t *ire_fp_mp = NULL; 21093 uint_t ire_fp_mp_len = 0; 21094 int num_lso_seg = 1; 21095 uint_t lso_usable; 21096 boolean_t do_lso_send = B_FALSE; 21097 tcp_stack_t *tcps = tcp->tcp_tcps; 21098 21099 /* 21100 * Check LSO capability before any further work. And the similar check 21101 * need to be done in for(;;) loop. 21102 * LSO will be deployed when therer is more than one mss of available 21103 * data and a burst transmission is allowed. 21104 */ 21105 if (tcp->tcp_lso && 21106 (tcp->tcp_valid_bits == 0 || 21107 tcp->tcp_valid_bits == TCP_FSS_VALID) && 21108 num_burst_seg >= 2 && (*usable - 1) / mss >= 1) { 21109 /* 21110 * Try to find usable IRE/ILL and do basic check to the ILL. 21111 */ 21112 if (tcp_send_find_ire_ill(tcp, NULL, &ire, &ill)) { 21113 /* 21114 * Enable LSO with this transmission. 21115 * Since IRE has been hold in 21116 * tcp_send_find_ire_ill(), IRE_REFRELE(ire) 21117 * should be called before return. 21118 */ 21119 do_lso_send = B_TRUE; 21120 ire_fp_mp = ire->ire_nce->nce_fp_mp; 21121 ire_fp_mp_len = MBLKL(ire_fp_mp); 21122 /* Round up to multiple of 4 */ 21123 ire_fp_mp_len = ((ire_fp_mp_len + 3) / 4) * 4; 21124 } else { 21125 do_lso_send = B_FALSE; 21126 ill = NULL; 21127 } 21128 } 21129 21130 for (;;) { 21131 struct datab *db; 21132 tcph_t *tcph; 21133 uint32_t sum; 21134 mblk_t *mp, *mp1; 21135 uchar_t *rptr; 21136 int len; 21137 21138 /* 21139 * If we're called by tcp_multisend(), and the amount of 21140 * sendable data as well as the size of current xmit_tail 21141 * is beyond the MDT threshold, return to the caller and 21142 * let the large data transmit be done using MDT. 21143 */ 21144 if (*usable > 0 && *usable > mdt_thres && 21145 (*tail_unsent > mdt_thres || (*tail_unsent == 0 && 21146 MBLKL((*xmit_tail)->b_cont) > mdt_thres))) { 21147 ASSERT(tcp->tcp_mdt); 21148 return (1); /* success; do large send */ 21149 } 21150 21151 if (num_burst_seg == 0) 21152 break; /* success; burst count reached */ 21153 21154 /* 21155 * Calculate the maximum payload length we can send in *one* 21156 * time. 21157 */ 21158 if (do_lso_send) { 21159 /* 21160 * Check whether need to do LSO any more. 21161 */ 21162 if (num_burst_seg >= 2 && (*usable - 1) / mss >= 1) { 21163 lso_usable = MIN(tcp->tcp_lso_max, *usable); 21164 lso_usable = MIN(lso_usable, 21165 num_burst_seg * mss); 21166 21167 num_lso_seg = lso_usable / mss; 21168 if (lso_usable % mss) { 21169 num_lso_seg++; 21170 tcp->tcp_last_sent_len = (ushort_t) 21171 (lso_usable % mss); 21172 } else { 21173 tcp->tcp_last_sent_len = (ushort_t)mss; 21174 } 21175 } else { 21176 do_lso_send = B_FALSE; 21177 num_lso_seg = 1; 21178 lso_usable = mss; 21179 } 21180 } 21181 21182 ASSERT(num_lso_seg <= IP_MAXPACKET / mss + 1); 21183 21184 /* 21185 * Adjust num_burst_seg here. 21186 */ 21187 num_burst_seg -= num_lso_seg; 21188 21189 len = mss; 21190 if (len > *usable) { 21191 ASSERT(do_lso_send == B_FALSE); 21192 21193 len = *usable; 21194 if (len <= 0) { 21195 /* Terminate the loop */ 21196 break; /* success; too small */ 21197 } 21198 /* 21199 * Sender silly-window avoidance. 21200 * Ignore this if we are going to send a 21201 * zero window probe out. 21202 * 21203 * TODO: force data into microscopic window? 21204 * ==> (!pushed || (unsent > usable)) 21205 */ 21206 if (len < (tcp->tcp_max_swnd >> 1) && 21207 (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) > len && 21208 !((tcp->tcp_valid_bits & TCP_URG_VALID) && 21209 len == 1) && (! tcp->tcp_zero_win_probe)) { 21210 /* 21211 * If the retransmit timer is not running 21212 * we start it so that we will retransmit 21213 * in the case when the the receiver has 21214 * decremented the window. 21215 */ 21216 if (*snxt == tcp->tcp_snxt && 21217 *snxt == tcp->tcp_suna) { 21218 /* 21219 * We are not supposed to send 21220 * anything. So let's wait a little 21221 * bit longer before breaking SWS 21222 * avoidance. 21223 * 21224 * What should the value be? 21225 * Suggestion: MAX(init rexmit time, 21226 * tcp->tcp_rto) 21227 */ 21228 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 21229 } 21230 break; /* success; too small */ 21231 } 21232 } 21233 21234 tcph = tcp->tcp_tcph; 21235 21236 /* 21237 * The reason to adjust len here is that we need to set flags 21238 * and calculate checksum. 21239 */ 21240 if (do_lso_send) 21241 len = lso_usable; 21242 21243 *usable -= len; /* Approximate - can be adjusted later */ 21244 if (*usable > 0) 21245 tcph->th_flags[0] = TH_ACK; 21246 else 21247 tcph->th_flags[0] = (TH_ACK | TH_PUSH); 21248 21249 /* 21250 * Prime pump for IP's checksumming on our behalf 21251 * Include the adjustment for a source route if any. 21252 */ 21253 sum = len + tcp_tcp_hdr_len + tcp->tcp_sum; 21254 sum = (sum >> 16) + (sum & 0xFFFF); 21255 U16_TO_ABE16(sum, tcph->th_sum); 21256 21257 U32_TO_ABE32(*snxt, tcph->th_seq); 21258 21259 /* 21260 * Branch off to tcp_xmit_mp() if any of the VALID bits is 21261 * set. For the case when TCP_FSS_VALID is the only valid 21262 * bit (normal active close), branch off only when we think 21263 * that the FIN flag needs to be set. Note for this case, 21264 * that (snxt + len) may not reflect the actual seg_len, 21265 * as len may be further reduced in tcp_xmit_mp(). If len 21266 * gets modified, we will end up here again. 21267 */ 21268 if (tcp->tcp_valid_bits != 0 && 21269 (tcp->tcp_valid_bits != TCP_FSS_VALID || 21270 ((*snxt + len) == tcp->tcp_fss))) { 21271 uchar_t *prev_rptr; 21272 uint32_t prev_snxt = tcp->tcp_snxt; 21273 21274 if (*tail_unsent == 0) { 21275 ASSERT((*xmit_tail)->b_cont != NULL); 21276 *xmit_tail = (*xmit_tail)->b_cont; 21277 prev_rptr = (*xmit_tail)->b_rptr; 21278 *tail_unsent = (int)((*xmit_tail)->b_wptr - 21279 (*xmit_tail)->b_rptr); 21280 } else { 21281 prev_rptr = (*xmit_tail)->b_rptr; 21282 (*xmit_tail)->b_rptr = (*xmit_tail)->b_wptr - 21283 *tail_unsent; 21284 } 21285 mp = tcp_xmit_mp(tcp, *xmit_tail, len, NULL, NULL, 21286 *snxt, B_FALSE, (uint32_t *)&len, B_FALSE); 21287 /* Restore tcp_snxt so we get amount sent right. */ 21288 tcp->tcp_snxt = prev_snxt; 21289 if (prev_rptr == (*xmit_tail)->b_rptr) { 21290 /* 21291 * If the previous timestamp is still in use, 21292 * don't stomp on it. 21293 */ 21294 if ((*xmit_tail)->b_next == NULL) { 21295 (*xmit_tail)->b_prev = local_time; 21296 (*xmit_tail)->b_next = 21297 (mblk_t *)(uintptr_t)(*snxt); 21298 } 21299 } else 21300 (*xmit_tail)->b_rptr = prev_rptr; 21301 21302 if (mp == NULL) { 21303 if (ire != NULL) 21304 IRE_REFRELE(ire); 21305 return (-1); 21306 } 21307 mp1 = mp->b_cont; 21308 21309 if (len <= mss) /* LSO is unusable (!do_lso_send) */ 21310 tcp->tcp_last_sent_len = (ushort_t)len; 21311 while (mp1->b_cont) { 21312 *xmit_tail = (*xmit_tail)->b_cont; 21313 (*xmit_tail)->b_prev = local_time; 21314 (*xmit_tail)->b_next = 21315 (mblk_t *)(uintptr_t)(*snxt); 21316 mp1 = mp1->b_cont; 21317 } 21318 *snxt += len; 21319 *tail_unsent = (*xmit_tail)->b_wptr - mp1->b_wptr; 21320 BUMP_LOCAL(tcp->tcp_obsegs); 21321 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 21322 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len); 21323 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 21324 tcp_send_data(tcp, q, mp); 21325 continue; 21326 } 21327 21328 *snxt += len; /* Adjust later if we don't send all of len */ 21329 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 21330 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len); 21331 21332 if (*tail_unsent) { 21333 /* Are the bytes above us in flight? */ 21334 rptr = (*xmit_tail)->b_wptr - *tail_unsent; 21335 if (rptr != (*xmit_tail)->b_rptr) { 21336 *tail_unsent -= len; 21337 if (len <= mss) /* LSO is unusable */ 21338 tcp->tcp_last_sent_len = (ushort_t)len; 21339 len += tcp_hdr_len; 21340 if (tcp->tcp_ipversion == IPV4_VERSION) 21341 tcp->tcp_ipha->ipha_length = htons(len); 21342 else 21343 tcp->tcp_ip6h->ip6_plen = 21344 htons(len - 21345 ((char *)&tcp->tcp_ip6h[1] - 21346 tcp->tcp_iphc)); 21347 mp = dupb(*xmit_tail); 21348 if (mp == NULL) { 21349 if (ire != NULL) 21350 IRE_REFRELE(ire); 21351 return (-1); /* out_of_mem */ 21352 } 21353 mp->b_rptr = rptr; 21354 /* 21355 * If the old timestamp is no longer in use, 21356 * sample a new timestamp now. 21357 */ 21358 if ((*xmit_tail)->b_next == NULL) { 21359 (*xmit_tail)->b_prev = local_time; 21360 (*xmit_tail)->b_next = 21361 (mblk_t *)(uintptr_t)(*snxt-len); 21362 } 21363 goto must_alloc; 21364 } 21365 } else { 21366 *xmit_tail = (*xmit_tail)->b_cont; 21367 ASSERT((uintptr_t)((*xmit_tail)->b_wptr - 21368 (*xmit_tail)->b_rptr) <= (uintptr_t)INT_MAX); 21369 *tail_unsent = (int)((*xmit_tail)->b_wptr - 21370 (*xmit_tail)->b_rptr); 21371 } 21372 21373 (*xmit_tail)->b_prev = local_time; 21374 (*xmit_tail)->b_next = (mblk_t *)(uintptr_t)(*snxt - len); 21375 21376 *tail_unsent -= len; 21377 if (len <= mss) /* LSO is unusable (!do_lso_send) */ 21378 tcp->tcp_last_sent_len = (ushort_t)len; 21379 21380 len += tcp_hdr_len; 21381 if (tcp->tcp_ipversion == IPV4_VERSION) 21382 tcp->tcp_ipha->ipha_length = htons(len); 21383 else 21384 tcp->tcp_ip6h->ip6_plen = htons(len - 21385 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 21386 21387 mp = dupb(*xmit_tail); 21388 if (mp == NULL) { 21389 if (ire != NULL) 21390 IRE_REFRELE(ire); 21391 return (-1); /* out_of_mem */ 21392 } 21393 21394 len = tcp_hdr_len; 21395 /* 21396 * There are four reasons to allocate a new hdr mblk: 21397 * 1) The bytes above us are in use by another packet 21398 * 2) We don't have good alignment 21399 * 3) The mblk is being shared 21400 * 4) We don't have enough room for a header 21401 */ 21402 rptr = mp->b_rptr - len; 21403 if (!OK_32PTR(rptr) || 21404 ((db = mp->b_datap), db->db_ref != 2) || 21405 rptr < db->db_base + ire_fp_mp_len) { 21406 /* NOTE: we assume allocb returns an OK_32PTR */ 21407 21408 must_alloc:; 21409 mp1 = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 21410 tcps->tcps_wroff_xtra + ire_fp_mp_len, BPRI_MED); 21411 if (mp1 == NULL) { 21412 freemsg(mp); 21413 if (ire != NULL) 21414 IRE_REFRELE(ire); 21415 return (-1); /* out_of_mem */ 21416 } 21417 mp1->b_cont = mp; 21418 mp = mp1; 21419 /* Leave room for Link Level header */ 21420 len = tcp_hdr_len; 21421 rptr = 21422 &mp->b_rptr[tcps->tcps_wroff_xtra + ire_fp_mp_len]; 21423 mp->b_wptr = &rptr[len]; 21424 } 21425 21426 /* 21427 * Fill in the header using the template header, and add 21428 * options such as time-stamp, ECN and/or SACK, as needed. 21429 */ 21430 tcp_fill_header(tcp, rptr, (clock_t)local_time, num_sack_blk); 21431 21432 mp->b_rptr = rptr; 21433 21434 if (*tail_unsent) { 21435 int spill = *tail_unsent; 21436 21437 mp1 = mp->b_cont; 21438 if (mp1 == NULL) 21439 mp1 = mp; 21440 21441 /* 21442 * If we're a little short, tack on more mblks until 21443 * there is no more spillover. 21444 */ 21445 while (spill < 0) { 21446 mblk_t *nmp; 21447 int nmpsz; 21448 21449 nmp = (*xmit_tail)->b_cont; 21450 nmpsz = MBLKL(nmp); 21451 21452 /* 21453 * Excess data in mblk; can we split it? 21454 * If MDT is enabled for the connection, 21455 * keep on splitting as this is a transient 21456 * send path. 21457 */ 21458 if (!do_lso_send && !tcp->tcp_mdt && 21459 (spill + nmpsz > 0)) { 21460 /* 21461 * Don't split if stream head was 21462 * told to break up larger writes 21463 * into smaller ones. 21464 */ 21465 if (tcp->tcp_maxpsz > 0) 21466 break; 21467 21468 /* 21469 * Next mblk is less than SMSS/2 21470 * rounded up to nearest 64-byte; 21471 * let it get sent as part of the 21472 * next segment. 21473 */ 21474 if (tcp->tcp_localnet && 21475 !tcp->tcp_cork && 21476 (nmpsz < roundup((mss >> 1), 64))) 21477 break; 21478 } 21479 21480 *xmit_tail = nmp; 21481 ASSERT((uintptr_t)nmpsz <= (uintptr_t)INT_MAX); 21482 /* Stash for rtt use later */ 21483 (*xmit_tail)->b_prev = local_time; 21484 (*xmit_tail)->b_next = 21485 (mblk_t *)(uintptr_t)(*snxt - len); 21486 mp1->b_cont = dupb(*xmit_tail); 21487 mp1 = mp1->b_cont; 21488 21489 spill += nmpsz; 21490 if (mp1 == NULL) { 21491 *tail_unsent = spill; 21492 freemsg(mp); 21493 if (ire != NULL) 21494 IRE_REFRELE(ire); 21495 return (-1); /* out_of_mem */ 21496 } 21497 } 21498 21499 /* Trim back any surplus on the last mblk */ 21500 if (spill >= 0) { 21501 mp1->b_wptr -= spill; 21502 *tail_unsent = spill; 21503 } else { 21504 /* 21505 * We did not send everything we could in 21506 * order to remain within the b_cont limit. 21507 */ 21508 *usable -= spill; 21509 *snxt += spill; 21510 tcp->tcp_last_sent_len += spill; 21511 UPDATE_MIB(&tcps->tcps_mib, 21512 tcpOutDataBytes, spill); 21513 /* 21514 * Adjust the checksum 21515 */ 21516 tcph = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 21517 sum += spill; 21518 sum = (sum >> 16) + (sum & 0xFFFF); 21519 U16_TO_ABE16(sum, tcph->th_sum); 21520 if (tcp->tcp_ipversion == IPV4_VERSION) { 21521 sum = ntohs( 21522 ((ipha_t *)rptr)->ipha_length) + 21523 spill; 21524 ((ipha_t *)rptr)->ipha_length = 21525 htons(sum); 21526 } else { 21527 sum = ntohs( 21528 ((ip6_t *)rptr)->ip6_plen) + 21529 spill; 21530 ((ip6_t *)rptr)->ip6_plen = 21531 htons(sum); 21532 } 21533 *tail_unsent = 0; 21534 } 21535 } 21536 if (tcp->tcp_ip_forward_progress) { 21537 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 21538 *(uint32_t *)mp->b_rptr |= IP_FORWARD_PROG; 21539 tcp->tcp_ip_forward_progress = B_FALSE; 21540 } 21541 21542 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 21543 if (do_lso_send) { 21544 tcp_lsosend_data(tcp, mp, ire, ill, mss, 21545 num_lso_seg); 21546 tcp->tcp_obsegs += num_lso_seg; 21547 21548 TCP_STAT(tcps, tcp_lso_times); 21549 TCP_STAT_UPDATE(tcps, tcp_lso_pkt_out, num_lso_seg); 21550 } else { 21551 tcp_send_data(tcp, q, mp); 21552 BUMP_LOCAL(tcp->tcp_obsegs); 21553 } 21554 } 21555 21556 if (ire != NULL) 21557 IRE_REFRELE(ire); 21558 return (0); 21559 } 21560 21561 /* Unlink and return any mblk that looks like it contains a MDT info */ 21562 static mblk_t * 21563 tcp_mdt_info_mp(mblk_t *mp) 21564 { 21565 mblk_t *prev_mp; 21566 21567 for (;;) { 21568 prev_mp = mp; 21569 /* no more to process? */ 21570 if ((mp = mp->b_cont) == NULL) 21571 break; 21572 21573 switch (DB_TYPE(mp)) { 21574 case M_CTL: 21575 if (*(uint32_t *)mp->b_rptr != MDT_IOC_INFO_UPDATE) 21576 continue; 21577 ASSERT(prev_mp != NULL); 21578 prev_mp->b_cont = mp->b_cont; 21579 mp->b_cont = NULL; 21580 return (mp); 21581 default: 21582 break; 21583 } 21584 } 21585 return (mp); 21586 } 21587 21588 /* MDT info update routine, called when IP notifies us about MDT */ 21589 static void 21590 tcp_mdt_update(tcp_t *tcp, ill_mdt_capab_t *mdt_capab, boolean_t first) 21591 { 21592 boolean_t prev_state; 21593 tcp_stack_t *tcps = tcp->tcp_tcps; 21594 21595 /* 21596 * IP is telling us to abort MDT on this connection? We know 21597 * this because the capability is only turned off when IP 21598 * encounters some pathological cases, e.g. link-layer change 21599 * where the new driver doesn't support MDT, or in situation 21600 * where MDT usage on the link-layer has been switched off. 21601 * IP would not have sent us the initial MDT_IOC_INFO_UPDATE 21602 * if the link-layer doesn't support MDT, and if it does, it 21603 * will indicate that the feature is to be turned on. 21604 */ 21605 prev_state = tcp->tcp_mdt; 21606 tcp->tcp_mdt = (mdt_capab->ill_mdt_on != 0); 21607 if (!tcp->tcp_mdt && !first) { 21608 TCP_STAT(tcps, tcp_mdt_conn_halted3); 21609 ip1dbg(("tcp_mdt_update: disabling MDT for connp %p\n", 21610 (void *)tcp->tcp_connp)); 21611 } 21612 21613 /* 21614 * We currently only support MDT on simple TCP/{IPv4,IPv6}, 21615 * so disable MDT otherwise. The checks are done here 21616 * and in tcp_wput_data(). 21617 */ 21618 if (tcp->tcp_mdt && 21619 (tcp->tcp_ipversion == IPV4_VERSION && 21620 tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) || 21621 (tcp->tcp_ipversion == IPV6_VERSION && 21622 tcp->tcp_ip_hdr_len != IPV6_HDR_LEN)) 21623 tcp->tcp_mdt = B_FALSE; 21624 21625 if (tcp->tcp_mdt) { 21626 if (mdt_capab->ill_mdt_version != MDT_VERSION_2) { 21627 cmn_err(CE_NOTE, "tcp_mdt_update: unknown MDT " 21628 "version (%d), expected version is %d", 21629 mdt_capab->ill_mdt_version, MDT_VERSION_2); 21630 tcp->tcp_mdt = B_FALSE; 21631 return; 21632 } 21633 21634 /* 21635 * We need the driver to be able to handle at least three 21636 * spans per packet in order for tcp MDT to be utilized. 21637 * The first is for the header portion, while the rest are 21638 * needed to handle a packet that straddles across two 21639 * virtually non-contiguous buffers; a typical tcp packet 21640 * therefore consists of only two spans. Note that we take 21641 * a zero as "don't care". 21642 */ 21643 if (mdt_capab->ill_mdt_span_limit > 0 && 21644 mdt_capab->ill_mdt_span_limit < 3) { 21645 tcp->tcp_mdt = B_FALSE; 21646 return; 21647 } 21648 21649 /* a zero means driver wants default value */ 21650 tcp->tcp_mdt_max_pld = MIN(mdt_capab->ill_mdt_max_pld, 21651 tcps->tcps_mdt_max_pbufs); 21652 if (tcp->tcp_mdt_max_pld == 0) 21653 tcp->tcp_mdt_max_pld = tcps->tcps_mdt_max_pbufs; 21654 21655 /* ensure 32-bit alignment */ 21656 tcp->tcp_mdt_hdr_head = roundup(MAX(tcps->tcps_mdt_hdr_head_min, 21657 mdt_capab->ill_mdt_hdr_head), 4); 21658 tcp->tcp_mdt_hdr_tail = roundup(MAX(tcps->tcps_mdt_hdr_tail_min, 21659 mdt_capab->ill_mdt_hdr_tail), 4); 21660 21661 if (!first && !prev_state) { 21662 TCP_STAT(tcps, tcp_mdt_conn_resumed2); 21663 ip1dbg(("tcp_mdt_update: reenabling MDT for connp %p\n", 21664 (void *)tcp->tcp_connp)); 21665 } 21666 } 21667 } 21668 21669 /* Unlink and return any mblk that looks like it contains a LSO info */ 21670 static mblk_t * 21671 tcp_lso_info_mp(mblk_t *mp) 21672 { 21673 mblk_t *prev_mp; 21674 21675 for (;;) { 21676 prev_mp = mp; 21677 /* no more to process? */ 21678 if ((mp = mp->b_cont) == NULL) 21679 break; 21680 21681 switch (DB_TYPE(mp)) { 21682 case M_CTL: 21683 if (*(uint32_t *)mp->b_rptr != LSO_IOC_INFO_UPDATE) 21684 continue; 21685 ASSERT(prev_mp != NULL); 21686 prev_mp->b_cont = mp->b_cont; 21687 mp->b_cont = NULL; 21688 return (mp); 21689 default: 21690 break; 21691 } 21692 } 21693 21694 return (mp); 21695 } 21696 21697 /* LSO info update routine, called when IP notifies us about LSO */ 21698 static void 21699 tcp_lso_update(tcp_t *tcp, ill_lso_capab_t *lso_capab) 21700 { 21701 tcp_stack_t *tcps = tcp->tcp_tcps; 21702 21703 /* 21704 * IP is telling us to abort LSO on this connection? We know 21705 * this because the capability is only turned off when IP 21706 * encounters some pathological cases, e.g. link-layer change 21707 * where the new NIC/driver doesn't support LSO, or in situation 21708 * where LSO usage on the link-layer has been switched off. 21709 * IP would not have sent us the initial LSO_IOC_INFO_UPDATE 21710 * if the link-layer doesn't support LSO, and if it does, it 21711 * will indicate that the feature is to be turned on. 21712 */ 21713 tcp->tcp_lso = (lso_capab->ill_lso_on != 0); 21714 TCP_STAT(tcps, tcp_lso_enabled); 21715 21716 /* 21717 * We currently only support LSO on simple TCP/IPv4, 21718 * so disable LSO otherwise. The checks are done here 21719 * and in tcp_wput_data(). 21720 */ 21721 if (tcp->tcp_lso && 21722 (tcp->tcp_ipversion == IPV4_VERSION && 21723 tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) || 21724 (tcp->tcp_ipversion == IPV6_VERSION)) { 21725 tcp->tcp_lso = B_FALSE; 21726 TCP_STAT(tcps, tcp_lso_disabled); 21727 } else { 21728 tcp->tcp_lso_max = MIN(TCP_MAX_LSO_LENGTH, 21729 lso_capab->ill_lso_max); 21730 } 21731 } 21732 21733 static void 21734 tcp_ire_ill_check(tcp_t *tcp, ire_t *ire, ill_t *ill, boolean_t check_lso_mdt) 21735 { 21736 conn_t *connp = tcp->tcp_connp; 21737 tcp_stack_t *tcps = tcp->tcp_tcps; 21738 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 21739 21740 ASSERT(ire != NULL); 21741 21742 /* 21743 * We may be in the fastpath here, and although we essentially do 21744 * similar checks as in ip_bind_connected{_v6}/ip_xxinfo_return, 21745 * we try to keep things as brief as possible. After all, these 21746 * are only best-effort checks, and we do more thorough ones prior 21747 * to calling tcp_send()/tcp_multisend(). 21748 */ 21749 if ((ipst->ips_ip_lso_outbound || ipst->ips_ip_multidata_outbound) && 21750 check_lso_mdt && !(ire->ire_type & (IRE_LOCAL | IRE_LOOPBACK)) && 21751 ill != NULL && !CONN_IPSEC_OUT_ENCAPSULATED(connp) && 21752 !(ire->ire_flags & RTF_MULTIRT) && 21753 !IPP_ENABLED(IPP_LOCAL_OUT, ipst) && 21754 CONN_IS_LSO_MD_FASTPATH(connp)) { 21755 if (ipst->ips_ip_lso_outbound && ILL_LSO_CAPABLE(ill)) { 21756 /* Cache the result */ 21757 connp->conn_lso_ok = B_TRUE; 21758 21759 ASSERT(ill->ill_lso_capab != NULL); 21760 if (!ill->ill_lso_capab->ill_lso_on) { 21761 ill->ill_lso_capab->ill_lso_on = 1; 21762 ip1dbg(("tcp_ire_ill_check: connp %p enables " 21763 "LSO for interface %s\n", (void *)connp, 21764 ill->ill_name)); 21765 } 21766 tcp_lso_update(tcp, ill->ill_lso_capab); 21767 } else if (ipst->ips_ip_multidata_outbound && 21768 ILL_MDT_CAPABLE(ill)) { 21769 /* Cache the result */ 21770 connp->conn_mdt_ok = B_TRUE; 21771 21772 ASSERT(ill->ill_mdt_capab != NULL); 21773 if (!ill->ill_mdt_capab->ill_mdt_on) { 21774 ill->ill_mdt_capab->ill_mdt_on = 1; 21775 ip1dbg(("tcp_ire_ill_check: connp %p enables " 21776 "MDT for interface %s\n", (void *)connp, 21777 ill->ill_name)); 21778 } 21779 tcp_mdt_update(tcp, ill->ill_mdt_capab, B_TRUE); 21780 } 21781 } 21782 21783 /* 21784 * The goal is to reduce the number of generated tcp segments by 21785 * setting the maxpsz multiplier to 0; this will have an affect on 21786 * tcp_maxpsz_set(). With this behavior, tcp will pack more data 21787 * into each packet, up to SMSS bytes. Doing this reduces the number 21788 * of outbound segments and incoming ACKs, thus allowing for better 21789 * network and system performance. In contrast the legacy behavior 21790 * may result in sending less than SMSS size, because the last mblk 21791 * for some packets may have more data than needed to make up SMSS, 21792 * and the legacy code refused to "split" it. 21793 * 21794 * We apply the new behavior on following situations: 21795 * 21796 * 1) Loopback connections, 21797 * 2) Connections in which the remote peer is not on local subnet, 21798 * 3) Local subnet connections over the bge interface (see below). 21799 * 21800 * Ideally, we would like this behavior to apply for interfaces other 21801 * than bge. However, doing so would negatively impact drivers which 21802 * perform dynamic mapping and unmapping of DMA resources, which are 21803 * increased by setting the maxpsz multiplier to 0 (more mblks per 21804 * packet will be generated by tcp). The bge driver does not suffer 21805 * from this, as it copies the mblks into pre-mapped buffers, and 21806 * therefore does not require more I/O resources than before. 21807 * 21808 * Otherwise, this behavior is present on all network interfaces when 21809 * the destination endpoint is non-local, since reducing the number 21810 * of packets in general is good for the network. 21811 * 21812 * TODO We need to remove this hard-coded conditional for bge once 21813 * a better "self-tuning" mechanism, or a way to comprehend 21814 * the driver transmit strategy is devised. Until the solution 21815 * is found and well understood, we live with this hack. 21816 */ 21817 if (!tcp_static_maxpsz && 21818 (tcp->tcp_loopback || !tcp->tcp_localnet || 21819 (ill->ill_name_length > 3 && bcmp(ill->ill_name, "bge", 3) == 0))) { 21820 /* override the default value */ 21821 tcp->tcp_maxpsz = 0; 21822 21823 ip3dbg(("tcp_ire_ill_check: connp %p tcp_maxpsz %d on " 21824 "interface %s\n", (void *)connp, tcp->tcp_maxpsz, 21825 ill != NULL ? ill->ill_name : ipif_loopback_name)); 21826 } 21827 21828 /* set the stream head parameters accordingly */ 21829 (void) tcp_maxpsz_set(tcp, B_TRUE); 21830 } 21831 21832 /* tcp_wput_flush is called by tcp_wput_nondata to handle M_FLUSH messages. */ 21833 static void 21834 tcp_wput_flush(tcp_t *tcp, mblk_t *mp) 21835 { 21836 uchar_t fval = *mp->b_rptr; 21837 mblk_t *tail; 21838 queue_t *q = tcp->tcp_wq; 21839 21840 /* TODO: How should flush interact with urgent data? */ 21841 if ((fval & FLUSHW) && tcp->tcp_xmit_head && 21842 !(tcp->tcp_valid_bits & TCP_URG_VALID)) { 21843 /* 21844 * Flush only data that has not yet been put on the wire. If 21845 * we flush data that we have already transmitted, life, as we 21846 * know it, may come to an end. 21847 */ 21848 tail = tcp->tcp_xmit_tail; 21849 tail->b_wptr -= tcp->tcp_xmit_tail_unsent; 21850 tcp->tcp_xmit_tail_unsent = 0; 21851 tcp->tcp_unsent = 0; 21852 if (tail->b_wptr != tail->b_rptr) 21853 tail = tail->b_cont; 21854 if (tail) { 21855 mblk_t **excess = &tcp->tcp_xmit_head; 21856 for (;;) { 21857 mblk_t *mp1 = *excess; 21858 if (mp1 == tail) 21859 break; 21860 tcp->tcp_xmit_tail = mp1; 21861 tcp->tcp_xmit_last = mp1; 21862 excess = &mp1->b_cont; 21863 } 21864 *excess = NULL; 21865 tcp_close_mpp(&tail); 21866 if (tcp->tcp_snd_zcopy_aware) 21867 tcp_zcopy_notify(tcp); 21868 } 21869 /* 21870 * We have no unsent data, so unsent must be less than 21871 * tcp_xmit_lowater, so re-enable flow. 21872 */ 21873 mutex_enter(&tcp->tcp_non_sq_lock); 21874 if (tcp->tcp_flow_stopped) { 21875 tcp_clrqfull(tcp); 21876 } 21877 mutex_exit(&tcp->tcp_non_sq_lock); 21878 } 21879 /* 21880 * TODO: you can't just flush these, you have to increase rwnd for one 21881 * thing. For another, how should urgent data interact? 21882 */ 21883 if (fval & FLUSHR) { 21884 *mp->b_rptr = fval & ~FLUSHW; 21885 /* XXX */ 21886 qreply(q, mp); 21887 return; 21888 } 21889 freemsg(mp); 21890 } 21891 21892 /* 21893 * tcp_wput_iocdata is called by tcp_wput_nondata to handle all M_IOCDATA 21894 * messages. 21895 */ 21896 static void 21897 tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp) 21898 { 21899 mblk_t *mp1; 21900 STRUCT_HANDLE(strbuf, sb); 21901 uint16_t port; 21902 queue_t *q = tcp->tcp_wq; 21903 in6_addr_t v6addr; 21904 ipaddr_t v4addr; 21905 uint32_t flowinfo = 0; 21906 int addrlen; 21907 21908 /* Make sure it is one of ours. */ 21909 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 21910 case TI_GETMYNAME: 21911 case TI_GETPEERNAME: 21912 break; 21913 default: 21914 CALL_IP_WPUT(tcp->tcp_connp, q, mp); 21915 return; 21916 } 21917 switch (mi_copy_state(q, mp, &mp1)) { 21918 case -1: 21919 return; 21920 case MI_COPY_CASE(MI_COPY_IN, 1): 21921 break; 21922 case MI_COPY_CASE(MI_COPY_OUT, 1): 21923 /* Copy out the strbuf. */ 21924 mi_copyout(q, mp); 21925 return; 21926 case MI_COPY_CASE(MI_COPY_OUT, 2): 21927 /* All done. */ 21928 mi_copy_done(q, mp, 0); 21929 return; 21930 default: 21931 mi_copy_done(q, mp, EPROTO); 21932 return; 21933 } 21934 /* Check alignment of the strbuf */ 21935 if (!OK_32PTR(mp1->b_rptr)) { 21936 mi_copy_done(q, mp, EINVAL); 21937 return; 21938 } 21939 21940 STRUCT_SET_HANDLE(sb, ((struct iocblk *)mp->b_rptr)->ioc_flag, 21941 (void *)mp1->b_rptr); 21942 addrlen = tcp->tcp_family == AF_INET ? sizeof (sin_t) : sizeof (sin6_t); 21943 21944 if (STRUCT_FGET(sb, maxlen) < addrlen) { 21945 mi_copy_done(q, mp, EINVAL); 21946 return; 21947 } 21948 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 21949 case TI_GETMYNAME: 21950 if (tcp->tcp_family == AF_INET) { 21951 if (tcp->tcp_ipversion == IPV4_VERSION) { 21952 v4addr = tcp->tcp_ipha->ipha_src; 21953 } else { 21954 /* can't return an address in this case */ 21955 v4addr = 0; 21956 } 21957 } else { 21958 /* tcp->tcp_family == AF_INET6 */ 21959 if (tcp->tcp_ipversion == IPV4_VERSION) { 21960 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 21961 &v6addr); 21962 } else { 21963 v6addr = tcp->tcp_ip6h->ip6_src; 21964 } 21965 } 21966 port = tcp->tcp_lport; 21967 break; 21968 case TI_GETPEERNAME: 21969 if (tcp->tcp_family == AF_INET) { 21970 if (tcp->tcp_ipversion == IPV4_VERSION) { 21971 IN6_V4MAPPED_TO_IPADDR(&tcp->tcp_remote_v6, 21972 v4addr); 21973 } else { 21974 /* can't return an address in this case */ 21975 v4addr = 0; 21976 } 21977 } else { 21978 /* tcp->tcp_family == AF_INET6) */ 21979 v6addr = tcp->tcp_remote_v6; 21980 if (tcp->tcp_ipversion == IPV6_VERSION) { 21981 /* 21982 * No flowinfo if tcp->tcp_ipversion is v4. 21983 * 21984 * flowinfo was already initialized to zero 21985 * where it was declared above, so only 21986 * set it if ipversion is v6. 21987 */ 21988 flowinfo = tcp->tcp_ip6h->ip6_vcf & 21989 ~IPV6_VERS_AND_FLOW_MASK; 21990 } 21991 } 21992 port = tcp->tcp_fport; 21993 break; 21994 default: 21995 mi_copy_done(q, mp, EPROTO); 21996 return; 21997 } 21998 mp1 = mi_copyout_alloc(q, mp, STRUCT_FGETP(sb, buf), addrlen, B_TRUE); 21999 if (!mp1) 22000 return; 22001 22002 if (tcp->tcp_family == AF_INET) { 22003 sin_t *sin; 22004 22005 STRUCT_FSET(sb, len, (int)sizeof (sin_t)); 22006 sin = (sin_t *)mp1->b_rptr; 22007 mp1->b_wptr = (uchar_t *)&sin[1]; 22008 *sin = sin_null; 22009 sin->sin_family = AF_INET; 22010 sin->sin_addr.s_addr = v4addr; 22011 sin->sin_port = port; 22012 } else { 22013 /* tcp->tcp_family == AF_INET6 */ 22014 sin6_t *sin6; 22015 22016 STRUCT_FSET(sb, len, (int)sizeof (sin6_t)); 22017 sin6 = (sin6_t *)mp1->b_rptr; 22018 mp1->b_wptr = (uchar_t *)&sin6[1]; 22019 *sin6 = sin6_null; 22020 sin6->sin6_family = AF_INET6; 22021 sin6->sin6_flowinfo = flowinfo; 22022 sin6->sin6_addr = v6addr; 22023 sin6->sin6_port = port; 22024 } 22025 /* Copy out the address */ 22026 mi_copyout(q, mp); 22027 } 22028 22029 /* 22030 * tcp_wput_ioctl is called by tcp_wput_nondata() to handle all M_IOCTL 22031 * messages. 22032 */ 22033 /* ARGSUSED */ 22034 static void 22035 tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2) 22036 { 22037 conn_t *connp = (conn_t *)arg; 22038 tcp_t *tcp = connp->conn_tcp; 22039 queue_t *q = tcp->tcp_wq; 22040 struct iocblk *iocp; 22041 tcp_stack_t *tcps = tcp->tcp_tcps; 22042 22043 ASSERT(DB_TYPE(mp) == M_IOCTL); 22044 /* 22045 * Try and ASSERT the minimum possible references on the 22046 * conn early enough. Since we are executing on write side, 22047 * the connection is obviously not detached and that means 22048 * there is a ref each for TCP and IP. Since we are behind 22049 * the squeue, the minimum references needed are 3. If the 22050 * conn is in classifier hash list, there should be an 22051 * extra ref for that (we check both the possibilities). 22052 */ 22053 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 22054 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 22055 22056 iocp = (struct iocblk *)mp->b_rptr; 22057 switch (iocp->ioc_cmd) { 22058 case TCP_IOC_DEFAULT_Q: 22059 /* Wants to be the default wq. */ 22060 if (secpolicy_ip_config(iocp->ioc_cr, B_FALSE) != 0) { 22061 iocp->ioc_error = EPERM; 22062 iocp->ioc_count = 0; 22063 mp->b_datap->db_type = M_IOCACK; 22064 qreply(q, mp); 22065 return; 22066 } 22067 tcp_def_q_set(tcp, mp); 22068 return; 22069 case _SIOCSOCKFALLBACK: 22070 /* 22071 * Either sockmod is about to be popped and the socket 22072 * would now be treated as a plain stream, or a module 22073 * is about to be pushed so we could no longer use read- 22074 * side synchronous streams for fused loopback tcp. 22075 * Drain any queued data and disable direct sockfs 22076 * interface from now on. 22077 */ 22078 if (!tcp->tcp_issocket) { 22079 DB_TYPE(mp) = M_IOCNAK; 22080 iocp->ioc_error = EINVAL; 22081 } else { 22082 #ifdef _ILP32 22083 tcp->tcp_acceptor_id = (t_uscalar_t)RD(q); 22084 #else 22085 tcp->tcp_acceptor_id = tcp->tcp_connp->conn_dev; 22086 #endif 22087 /* 22088 * Insert this socket into the acceptor hash. 22089 * We might need it for T_CONN_RES message 22090 */ 22091 tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp); 22092 22093 if (tcp->tcp_fused) { 22094 /* 22095 * This is a fused loopback tcp; disable 22096 * read-side synchronous streams interface 22097 * and drain any queued data. It is okay 22098 * to do this for non-synchronous streams 22099 * fused tcp as well. 22100 */ 22101 tcp_fuse_disable_pair(tcp, B_FALSE); 22102 } 22103 tcp->tcp_issocket = B_FALSE; 22104 TCP_STAT(tcps, tcp_sock_fallback); 22105 22106 DB_TYPE(mp) = M_IOCACK; 22107 iocp->ioc_error = 0; 22108 } 22109 iocp->ioc_count = 0; 22110 iocp->ioc_rval = 0; 22111 qreply(q, mp); 22112 return; 22113 } 22114 CALL_IP_WPUT(connp, q, mp); 22115 } 22116 22117 /* 22118 * This routine is called by tcp_wput() to handle all TPI requests. 22119 */ 22120 /* ARGSUSED */ 22121 static void 22122 tcp_wput_proto(void *arg, mblk_t *mp, void *arg2) 22123 { 22124 conn_t *connp = (conn_t *)arg; 22125 tcp_t *tcp = connp->conn_tcp; 22126 union T_primitives *tprim = (union T_primitives *)mp->b_rptr; 22127 uchar_t *rptr; 22128 t_scalar_t type; 22129 int len; 22130 cred_t *cr = DB_CREDDEF(mp, tcp->tcp_cred); 22131 22132 /* 22133 * Try and ASSERT the minimum possible references on the 22134 * conn early enough. Since we are executing on write side, 22135 * the connection is obviously not detached and that means 22136 * there is a ref each for TCP and IP. Since we are behind 22137 * the squeue, the minimum references needed are 3. If the 22138 * conn is in classifier hash list, there should be an 22139 * extra ref for that (we check both the possibilities). 22140 */ 22141 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 22142 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 22143 22144 rptr = mp->b_rptr; 22145 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 22146 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) { 22147 type = ((union T_primitives *)rptr)->type; 22148 if (type == T_EXDATA_REQ) { 22149 uint32_t msize = msgdsize(mp->b_cont); 22150 22151 len = msize - 1; 22152 if (len < 0) { 22153 freemsg(mp); 22154 return; 22155 } 22156 /* 22157 * Try to force urgent data out on the wire. 22158 * Even if we have unsent data this will 22159 * at least send the urgent flag. 22160 * XXX does not handle more flag correctly. 22161 */ 22162 len += tcp->tcp_unsent; 22163 len += tcp->tcp_snxt; 22164 tcp->tcp_urg = len; 22165 tcp->tcp_valid_bits |= TCP_URG_VALID; 22166 22167 /* Bypass tcp protocol for fused tcp loopback */ 22168 if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize)) 22169 return; 22170 } else if (type != T_DATA_REQ) { 22171 goto non_urgent_data; 22172 } 22173 /* TODO: options, flags, ... from user */ 22174 /* Set length to zero for reclamation below */ 22175 tcp_wput_data(tcp, mp->b_cont, B_TRUE); 22176 freeb(mp); 22177 return; 22178 } else { 22179 if (tcp->tcp_debug) { 22180 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 22181 "tcp_wput_proto, dropping one..."); 22182 } 22183 freemsg(mp); 22184 return; 22185 } 22186 22187 non_urgent_data: 22188 22189 switch ((int)tprim->type) { 22190 case T_SSL_PROXY_BIND_REQ: /* an SSL proxy endpoint bind request */ 22191 /* 22192 * save the kssl_ent_t from the next block, and convert this 22193 * back to a normal bind_req. 22194 */ 22195 if (mp->b_cont != NULL) { 22196 ASSERT(MBLKL(mp->b_cont) >= sizeof (kssl_ent_t)); 22197 22198 if (tcp->tcp_kssl_ent != NULL) { 22199 kssl_release_ent(tcp->tcp_kssl_ent, NULL, 22200 KSSL_NO_PROXY); 22201 tcp->tcp_kssl_ent = NULL; 22202 } 22203 bcopy(mp->b_cont->b_rptr, &tcp->tcp_kssl_ent, 22204 sizeof (kssl_ent_t)); 22205 kssl_hold_ent(tcp->tcp_kssl_ent); 22206 freemsg(mp->b_cont); 22207 mp->b_cont = NULL; 22208 } 22209 tprim->type = T_BIND_REQ; 22210 22211 /* FALLTHROUGH */ 22212 case O_T_BIND_REQ: /* bind request */ 22213 case T_BIND_REQ: /* new semantics bind request */ 22214 tcp_bind(tcp, mp); 22215 break; 22216 case T_UNBIND_REQ: /* unbind request */ 22217 tcp_unbind(tcp, mp); 22218 break; 22219 case O_T_CONN_RES: /* old connection response XXX */ 22220 case T_CONN_RES: /* connection response */ 22221 tcp_accept(tcp, mp); 22222 break; 22223 case T_CONN_REQ: /* connection request */ 22224 tcp_connect(tcp, mp); 22225 break; 22226 case T_DISCON_REQ: /* disconnect request */ 22227 tcp_disconnect(tcp, mp); 22228 break; 22229 case T_CAPABILITY_REQ: 22230 tcp_capability_req(tcp, mp); /* capability request */ 22231 break; 22232 case T_INFO_REQ: /* information request */ 22233 tcp_info_req(tcp, mp); 22234 break; 22235 case T_SVR4_OPTMGMT_REQ: /* manage options req */ 22236 (void) svr4_optcom_req(tcp->tcp_wq, mp, cr, 22237 &tcp_opt_obj, B_TRUE); 22238 break; 22239 case T_OPTMGMT_REQ: 22240 /* 22241 * Note: no support for snmpcom_req() through new 22242 * T_OPTMGMT_REQ. See comments in ip.c 22243 */ 22244 /* Only IP is allowed to return meaningful value */ 22245 (void) tpi_optcom_req(tcp->tcp_wq, mp, cr, &tcp_opt_obj, 22246 B_TRUE); 22247 break; 22248 22249 case T_UNITDATA_REQ: /* unitdata request */ 22250 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0); 22251 break; 22252 case T_ORDREL_REQ: /* orderly release req */ 22253 freemsg(mp); 22254 22255 if (tcp->tcp_fused) 22256 tcp_unfuse(tcp); 22257 22258 if (tcp_xmit_end(tcp) != 0) { 22259 /* 22260 * We were crossing FINs and got a reset from 22261 * the other side. Just ignore it. 22262 */ 22263 if (tcp->tcp_debug) { 22264 (void) strlog(TCP_MOD_ID, 0, 1, 22265 SL_ERROR|SL_TRACE, 22266 "tcp_wput_proto, T_ORDREL_REQ out of " 22267 "state %s", 22268 tcp_display(tcp, NULL, 22269 DISP_ADDR_AND_PORT)); 22270 } 22271 } 22272 break; 22273 case T_ADDR_REQ: 22274 tcp_addr_req(tcp, mp); 22275 break; 22276 default: 22277 if (tcp->tcp_debug) { 22278 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 22279 "tcp_wput_proto, bogus TPI msg, type %d", 22280 tprim->type); 22281 } 22282 /* 22283 * We used to M_ERROR. Sending TNOTSUPPORT gives the user 22284 * to recover. 22285 */ 22286 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0); 22287 break; 22288 } 22289 } 22290 22291 /* 22292 * The TCP write service routine should never be called... 22293 */ 22294 /* ARGSUSED */ 22295 static void 22296 tcp_wsrv(queue_t *q) 22297 { 22298 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 22299 22300 TCP_STAT(tcps, tcp_wsrv_called); 22301 } 22302 22303 /* Non overlapping byte exchanger */ 22304 static void 22305 tcp_xchg(uchar_t *a, uchar_t *b, int len) 22306 { 22307 uchar_t uch; 22308 22309 while (len-- > 0) { 22310 uch = a[len]; 22311 a[len] = b[len]; 22312 b[len] = uch; 22313 } 22314 } 22315 22316 /* 22317 * Send out a control packet on the tcp connection specified. This routine 22318 * is typically called where we need a simple ACK or RST generated. 22319 */ 22320 static void 22321 tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, uint32_t ack, int ctl) 22322 { 22323 uchar_t *rptr; 22324 tcph_t *tcph; 22325 ipha_t *ipha = NULL; 22326 ip6_t *ip6h = NULL; 22327 uint32_t sum; 22328 int tcp_hdr_len; 22329 int tcp_ip_hdr_len; 22330 mblk_t *mp; 22331 tcp_stack_t *tcps = tcp->tcp_tcps; 22332 22333 /* 22334 * Save sum for use in source route later. 22335 */ 22336 ASSERT(tcp != NULL); 22337 sum = tcp->tcp_tcp_hdr_len + tcp->tcp_sum; 22338 tcp_hdr_len = tcp->tcp_hdr_len; 22339 tcp_ip_hdr_len = tcp->tcp_ip_hdr_len; 22340 22341 /* If a text string is passed in with the request, pass it to strlog. */ 22342 if (str != NULL && tcp->tcp_debug) { 22343 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 22344 "tcp_xmit_ctl: '%s', seq 0x%x, ack 0x%x, ctl 0x%x", 22345 str, seq, ack, ctl); 22346 } 22347 mp = allocb(tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + tcps->tcps_wroff_xtra, 22348 BPRI_MED); 22349 if (mp == NULL) { 22350 return; 22351 } 22352 rptr = &mp->b_rptr[tcps->tcps_wroff_xtra]; 22353 mp->b_rptr = rptr; 22354 mp->b_wptr = &rptr[tcp_hdr_len]; 22355 bcopy(tcp->tcp_iphc, rptr, tcp_hdr_len); 22356 22357 if (tcp->tcp_ipversion == IPV4_VERSION) { 22358 ipha = (ipha_t *)rptr; 22359 ipha->ipha_length = htons(tcp_hdr_len); 22360 } else { 22361 ip6h = (ip6_t *)rptr; 22362 ASSERT(tcp != NULL); 22363 ip6h->ip6_plen = htons(tcp->tcp_hdr_len - 22364 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 22365 } 22366 tcph = (tcph_t *)&rptr[tcp_ip_hdr_len]; 22367 tcph->th_flags[0] = (uint8_t)ctl; 22368 if (ctl & TH_RST) { 22369 BUMP_MIB(&tcps->tcps_mib, tcpOutRsts); 22370 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 22371 /* 22372 * Don't send TSopt w/ TH_RST packets per RFC 1323. 22373 */ 22374 if (tcp->tcp_snd_ts_ok && 22375 tcp->tcp_state > TCPS_SYN_SENT) { 22376 mp->b_wptr = &rptr[tcp_hdr_len - TCPOPT_REAL_TS_LEN]; 22377 *(mp->b_wptr) = TCPOPT_EOL; 22378 if (tcp->tcp_ipversion == IPV4_VERSION) { 22379 ipha->ipha_length = htons(tcp_hdr_len - 22380 TCPOPT_REAL_TS_LEN); 22381 } else { 22382 ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) - 22383 TCPOPT_REAL_TS_LEN); 22384 } 22385 tcph->th_offset_and_rsrvd[0] -= (3 << 4); 22386 sum -= TCPOPT_REAL_TS_LEN; 22387 } 22388 } 22389 if (ctl & TH_ACK) { 22390 if (tcp->tcp_snd_ts_ok) { 22391 U32_TO_BE32(lbolt, 22392 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 22393 U32_TO_BE32(tcp->tcp_ts_recent, 22394 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 22395 } 22396 22397 /* Update the latest receive window size in TCP header. */ 22398 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 22399 tcph->th_win); 22400 tcp->tcp_rack = ack; 22401 tcp->tcp_rack_cnt = 0; 22402 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 22403 } 22404 BUMP_LOCAL(tcp->tcp_obsegs); 22405 U32_TO_BE32(seq, tcph->th_seq); 22406 U32_TO_BE32(ack, tcph->th_ack); 22407 /* 22408 * Include the adjustment for a source route if any. 22409 */ 22410 sum = (sum >> 16) + (sum & 0xFFFF); 22411 U16_TO_BE16(sum, tcph->th_sum); 22412 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 22413 tcp_send_data(tcp, tcp->tcp_wq, mp); 22414 } 22415 22416 /* 22417 * If this routine returns B_TRUE, TCP can generate a RST in response 22418 * to a segment. If it returns B_FALSE, TCP should not respond. 22419 */ 22420 static boolean_t 22421 tcp_send_rst_chk(tcp_stack_t *tcps) 22422 { 22423 clock_t now; 22424 22425 /* 22426 * TCP needs to protect itself from generating too many RSTs. 22427 * This can be a DoS attack by sending us random segments 22428 * soliciting RSTs. 22429 * 22430 * What we do here is to have a limit of tcp_rst_sent_rate RSTs 22431 * in each 1 second interval. In this way, TCP still generate 22432 * RSTs in normal cases but when under attack, the impact is 22433 * limited. 22434 */ 22435 if (tcps->tcps_rst_sent_rate_enabled != 0) { 22436 now = lbolt; 22437 /* lbolt can wrap around. */ 22438 if ((tcps->tcps_last_rst_intrvl > now) || 22439 (TICK_TO_MSEC(now - tcps->tcps_last_rst_intrvl) > 22440 1*SECONDS)) { 22441 tcps->tcps_last_rst_intrvl = now; 22442 tcps->tcps_rst_cnt = 1; 22443 } else if (++tcps->tcps_rst_cnt > tcps->tcps_rst_sent_rate) { 22444 return (B_FALSE); 22445 } 22446 } 22447 return (B_TRUE); 22448 } 22449 22450 /* 22451 * Send down the advice IP ioctl to tell IP to mark an IRE temporary. 22452 */ 22453 static void 22454 tcp_ip_ire_mark_advice(tcp_t *tcp) 22455 { 22456 mblk_t *mp; 22457 ipic_t *ipic; 22458 22459 if (tcp->tcp_ipversion == IPV4_VERSION) { 22460 mp = tcp_ip_advise_mblk(&tcp->tcp_ipha->ipha_dst, IP_ADDR_LEN, 22461 &ipic); 22462 } else { 22463 mp = tcp_ip_advise_mblk(&tcp->tcp_ip6h->ip6_dst, IPV6_ADDR_LEN, 22464 &ipic); 22465 } 22466 if (mp == NULL) 22467 return; 22468 ipic->ipic_ire_marks |= IRE_MARK_TEMPORARY; 22469 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 22470 } 22471 22472 /* 22473 * Return an IP advice ioctl mblk and set ipic to be the pointer 22474 * to the advice structure. 22475 */ 22476 static mblk_t * 22477 tcp_ip_advise_mblk(void *addr, int addr_len, ipic_t **ipic) 22478 { 22479 struct iocblk *ioc; 22480 mblk_t *mp, *mp1; 22481 22482 mp = allocb(sizeof (ipic_t) + addr_len, BPRI_HI); 22483 if (mp == NULL) 22484 return (NULL); 22485 bzero(mp->b_rptr, sizeof (ipic_t) + addr_len); 22486 *ipic = (ipic_t *)mp->b_rptr; 22487 (*ipic)->ipic_cmd = IP_IOC_IRE_ADVISE_NO_REPLY; 22488 (*ipic)->ipic_addr_offset = sizeof (ipic_t); 22489 22490 bcopy(addr, *ipic + 1, addr_len); 22491 22492 (*ipic)->ipic_addr_length = addr_len; 22493 mp->b_wptr = &mp->b_rptr[sizeof (ipic_t) + addr_len]; 22494 22495 mp1 = mkiocb(IP_IOCTL); 22496 if (mp1 == NULL) { 22497 freemsg(mp); 22498 return (NULL); 22499 } 22500 mp1->b_cont = mp; 22501 ioc = (struct iocblk *)mp1->b_rptr; 22502 ioc->ioc_count = sizeof (ipic_t) + addr_len; 22503 22504 return (mp1); 22505 } 22506 22507 /* 22508 * Generate a reset based on an inbound packet, connp is set by caller 22509 * when RST is in response to an unexpected inbound packet for which 22510 * there is active tcp state in the system. 22511 * 22512 * IPSEC NOTE : Try to send the reply with the same protection as it came 22513 * in. We still have the ipsec_mp that the packet was attached to. Thus 22514 * the packet will go out at the same level of protection as it came in by 22515 * converting the IPSEC_IN to IPSEC_OUT. 22516 */ 22517 static void 22518 tcp_xmit_early_reset(char *str, mblk_t *mp, uint32_t seq, 22519 uint32_t ack, int ctl, uint_t ip_hdr_len, zoneid_t zoneid, 22520 tcp_stack_t *tcps, conn_t *connp) 22521 { 22522 ipha_t *ipha = NULL; 22523 ip6_t *ip6h = NULL; 22524 ushort_t len; 22525 tcph_t *tcph; 22526 int i; 22527 mblk_t *ipsec_mp; 22528 boolean_t mctl_present; 22529 ipic_t *ipic; 22530 ipaddr_t v4addr; 22531 in6_addr_t v6addr; 22532 int addr_len; 22533 void *addr; 22534 queue_t *q = tcps->tcps_g_q; 22535 tcp_t *tcp; 22536 cred_t *cr; 22537 mblk_t *nmp; 22538 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 22539 22540 if (tcps->tcps_g_q == NULL) { 22541 /* 22542 * For non-zero stackids the default queue isn't created 22543 * until the first open, thus there can be a need to send 22544 * a reset before then. But we can't do that, hence we just 22545 * drop the packet. Later during boot, when the default queue 22546 * has been setup, a retransmitted packet from the peer 22547 * will result in a reset. 22548 */ 22549 ASSERT(tcps->tcps_netstack->netstack_stackid != 22550 GLOBAL_NETSTACKID); 22551 freemsg(mp); 22552 return; 22553 } 22554 22555 if (connp != NULL) 22556 tcp = connp->conn_tcp; 22557 else 22558 tcp = Q_TO_TCP(q); 22559 22560 if (!tcp_send_rst_chk(tcps)) { 22561 tcps->tcps_rst_unsent++; 22562 freemsg(mp); 22563 return; 22564 } 22565 22566 if (mp->b_datap->db_type == M_CTL) { 22567 ipsec_mp = mp; 22568 mp = mp->b_cont; 22569 mctl_present = B_TRUE; 22570 } else { 22571 ipsec_mp = mp; 22572 mctl_present = B_FALSE; 22573 } 22574 22575 if (str && q && tcps->tcps_dbg) { 22576 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 22577 "tcp_xmit_early_reset: '%s', seq 0x%x, ack 0x%x, " 22578 "flags 0x%x", 22579 str, seq, ack, ctl); 22580 } 22581 if (mp->b_datap->db_ref != 1) { 22582 mblk_t *mp1 = copyb(mp); 22583 freemsg(mp); 22584 mp = mp1; 22585 if (!mp) { 22586 if (mctl_present) 22587 freeb(ipsec_mp); 22588 return; 22589 } else { 22590 if (mctl_present) { 22591 ipsec_mp->b_cont = mp; 22592 } else { 22593 ipsec_mp = mp; 22594 } 22595 } 22596 } else if (mp->b_cont) { 22597 freemsg(mp->b_cont); 22598 mp->b_cont = NULL; 22599 } 22600 /* 22601 * We skip reversing source route here. 22602 * (for now we replace all IP options with EOL) 22603 */ 22604 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22605 ipha = (ipha_t *)mp->b_rptr; 22606 for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++) 22607 mp->b_rptr[i] = IPOPT_EOL; 22608 /* 22609 * Make sure that src address isn't flagrantly invalid. 22610 * Not all broadcast address checking for the src address 22611 * is possible, since we don't know the netmask of the src 22612 * addr. No check for destination address is done, since 22613 * IP will not pass up a packet with a broadcast dest 22614 * address to TCP. Similar checks are done below for IPv6. 22615 */ 22616 if (ipha->ipha_src == 0 || ipha->ipha_src == INADDR_BROADCAST || 22617 CLASSD(ipha->ipha_src)) { 22618 freemsg(ipsec_mp); 22619 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 22620 return; 22621 } 22622 } else { 22623 ip6h = (ip6_t *)mp->b_rptr; 22624 22625 if (IN6_IS_ADDR_UNSPECIFIED(&ip6h->ip6_src) || 22626 IN6_IS_ADDR_MULTICAST(&ip6h->ip6_src)) { 22627 freemsg(ipsec_mp); 22628 BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsInDiscards); 22629 return; 22630 } 22631 22632 /* Remove any extension headers assuming partial overlay */ 22633 if (ip_hdr_len > IPV6_HDR_LEN) { 22634 uint8_t *to; 22635 22636 to = mp->b_rptr + ip_hdr_len - IPV6_HDR_LEN; 22637 ovbcopy(ip6h, to, IPV6_HDR_LEN); 22638 mp->b_rptr += ip_hdr_len - IPV6_HDR_LEN; 22639 ip_hdr_len = IPV6_HDR_LEN; 22640 ip6h = (ip6_t *)mp->b_rptr; 22641 ip6h->ip6_nxt = IPPROTO_TCP; 22642 } 22643 } 22644 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 22645 if (tcph->th_flags[0] & TH_RST) { 22646 freemsg(ipsec_mp); 22647 return; 22648 } 22649 tcph->th_offset_and_rsrvd[0] = (5 << 4); 22650 len = ip_hdr_len + sizeof (tcph_t); 22651 mp->b_wptr = &mp->b_rptr[len]; 22652 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22653 ipha->ipha_length = htons(len); 22654 /* Swap addresses */ 22655 v4addr = ipha->ipha_src; 22656 ipha->ipha_src = ipha->ipha_dst; 22657 ipha->ipha_dst = v4addr; 22658 ipha->ipha_ident = 0; 22659 ipha->ipha_ttl = (uchar_t)tcps->tcps_ipv4_ttl; 22660 addr_len = IP_ADDR_LEN; 22661 addr = &v4addr; 22662 } else { 22663 /* No ip6i_t in this case */ 22664 ip6h->ip6_plen = htons(len - IPV6_HDR_LEN); 22665 /* Swap addresses */ 22666 v6addr = ip6h->ip6_src; 22667 ip6h->ip6_src = ip6h->ip6_dst; 22668 ip6h->ip6_dst = v6addr; 22669 ip6h->ip6_hops = (uchar_t)tcps->tcps_ipv6_hoplimit; 22670 addr_len = IPV6_ADDR_LEN; 22671 addr = &v6addr; 22672 } 22673 tcp_xchg(tcph->th_fport, tcph->th_lport, 2); 22674 U32_TO_BE32(ack, tcph->th_ack); 22675 U32_TO_BE32(seq, tcph->th_seq); 22676 U16_TO_BE16(0, tcph->th_win); 22677 U16_TO_BE16(sizeof (tcph_t), tcph->th_sum); 22678 tcph->th_flags[0] = (uint8_t)ctl; 22679 if (ctl & TH_RST) { 22680 BUMP_MIB(&tcps->tcps_mib, tcpOutRsts); 22681 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 22682 } 22683 22684 /* IP trusts us to set up labels when required. */ 22685 if (is_system_labeled() && (cr = DB_CRED(mp)) != NULL && 22686 crgetlabel(cr) != NULL) { 22687 int err, adjust; 22688 22689 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) 22690 err = tsol_check_label(cr, &mp, &adjust, 22691 tcp->tcp_connp->conn_mac_exempt, 22692 tcps->tcps_netstack->netstack_ip); 22693 else 22694 err = tsol_check_label_v6(cr, &mp, &adjust, 22695 tcp->tcp_connp->conn_mac_exempt, 22696 tcps->tcps_netstack->netstack_ip); 22697 if (mctl_present) 22698 ipsec_mp->b_cont = mp; 22699 else 22700 ipsec_mp = mp; 22701 if (err != 0) { 22702 freemsg(ipsec_mp); 22703 return; 22704 } 22705 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22706 ipha = (ipha_t *)mp->b_rptr; 22707 adjust += ntohs(ipha->ipha_length); 22708 ipha->ipha_length = htons(adjust); 22709 } else { 22710 ip6h = (ip6_t *)mp->b_rptr; 22711 } 22712 } 22713 22714 if (mctl_present) { 22715 ipsec_in_t *ii = (ipsec_in_t *)ipsec_mp->b_rptr; 22716 22717 ASSERT(ii->ipsec_in_type == IPSEC_IN); 22718 if (!ipsec_in_to_out(ipsec_mp, ipha, ip6h)) { 22719 return; 22720 } 22721 } 22722 if (zoneid == ALL_ZONES) 22723 zoneid = GLOBAL_ZONEID; 22724 22725 /* Add the zoneid so ip_output routes it properly */ 22726 if ((nmp = ip_prepend_zoneid(ipsec_mp, zoneid, ipst)) == NULL) { 22727 freemsg(ipsec_mp); 22728 return; 22729 } 22730 ipsec_mp = nmp; 22731 22732 /* 22733 * NOTE: one might consider tracing a TCP packet here, but 22734 * this function has no active TCP state and no tcp structure 22735 * that has a trace buffer. If we traced here, we would have 22736 * to keep a local trace buffer in tcp_record_trace(). 22737 * 22738 * TSol note: The mblk that contains the incoming packet was 22739 * reused by tcp_xmit_listener_reset, so it already contains 22740 * the right credentials and we don't need to call mblk_setcred. 22741 * Also the conn's cred is not right since it is associated 22742 * with tcps_g_q. 22743 */ 22744 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, ipsec_mp); 22745 22746 /* 22747 * Tell IP to mark the IRE used for this destination temporary. 22748 * This way, we can limit our exposure to DoS attack because IP 22749 * creates an IRE for each destination. If there are too many, 22750 * the time to do any routing lookup will be extremely long. And 22751 * the lookup can be in interrupt context. 22752 * 22753 * Note that in normal circumstances, this marking should not 22754 * affect anything. It would be nice if only 1 message is 22755 * needed to inform IP that the IRE created for this RST should 22756 * not be added to the cache table. But there is currently 22757 * not such communication mechanism between TCP and IP. So 22758 * the best we can do now is to send the advice ioctl to IP 22759 * to mark the IRE temporary. 22760 */ 22761 if ((mp = tcp_ip_advise_mblk(addr, addr_len, &ipic)) != NULL) { 22762 ipic->ipic_ire_marks |= IRE_MARK_TEMPORARY; 22763 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 22764 } 22765 } 22766 22767 /* 22768 * Initiate closedown sequence on an active connection. (May be called as 22769 * writer.) Return value zero for OK return, non-zero for error return. 22770 */ 22771 static int 22772 tcp_xmit_end(tcp_t *tcp) 22773 { 22774 ipic_t *ipic; 22775 mblk_t *mp; 22776 tcp_stack_t *tcps = tcp->tcp_tcps; 22777 22778 if (tcp->tcp_state < TCPS_SYN_RCVD || 22779 tcp->tcp_state > TCPS_CLOSE_WAIT) { 22780 /* 22781 * Invalid state, only states TCPS_SYN_RCVD, 22782 * TCPS_ESTABLISHED and TCPS_CLOSE_WAIT are valid 22783 */ 22784 return (-1); 22785 } 22786 22787 tcp->tcp_fss = tcp->tcp_snxt + tcp->tcp_unsent; 22788 tcp->tcp_valid_bits |= TCP_FSS_VALID; 22789 /* 22790 * If there is nothing more unsent, send the FIN now. 22791 * Otherwise, it will go out with the last segment. 22792 */ 22793 if (tcp->tcp_unsent == 0) { 22794 mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, 22795 tcp->tcp_fss, B_FALSE, NULL, B_FALSE); 22796 22797 if (mp) { 22798 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 22799 tcp_send_data(tcp, tcp->tcp_wq, mp); 22800 } else { 22801 /* 22802 * Couldn't allocate msg. Pretend we got it out. 22803 * Wait for rexmit timeout. 22804 */ 22805 tcp->tcp_snxt = tcp->tcp_fss + 1; 22806 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 22807 } 22808 22809 /* 22810 * If needed, update tcp_rexmit_snxt as tcp_snxt is 22811 * changed. 22812 */ 22813 if (tcp->tcp_rexmit && tcp->tcp_rexmit_nxt == tcp->tcp_fss) { 22814 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 22815 } 22816 } else { 22817 /* 22818 * If tcp->tcp_cork is set, then the data will not get sent, 22819 * so we have to check that and unset it first. 22820 */ 22821 if (tcp->tcp_cork) 22822 tcp->tcp_cork = B_FALSE; 22823 tcp_wput_data(tcp, NULL, B_FALSE); 22824 } 22825 22826 /* 22827 * If TCP does not get enough samples of RTT or tcp_rtt_updates 22828 * is 0, don't update the cache. 22829 */ 22830 if (tcps->tcps_rtt_updates == 0 || 22831 tcp->tcp_rtt_update < tcps->tcps_rtt_updates) 22832 return (0); 22833 22834 /* 22835 * NOTE: should not update if source routes i.e. if tcp_remote if 22836 * different from the destination. 22837 */ 22838 if (tcp->tcp_ipversion == IPV4_VERSION) { 22839 if (tcp->tcp_remote != tcp->tcp_ipha->ipha_dst) { 22840 return (0); 22841 } 22842 mp = tcp_ip_advise_mblk(&tcp->tcp_ipha->ipha_dst, IP_ADDR_LEN, 22843 &ipic); 22844 } else { 22845 if (!(IN6_ARE_ADDR_EQUAL(&tcp->tcp_remote_v6, 22846 &tcp->tcp_ip6h->ip6_dst))) { 22847 return (0); 22848 } 22849 mp = tcp_ip_advise_mblk(&tcp->tcp_ip6h->ip6_dst, IPV6_ADDR_LEN, 22850 &ipic); 22851 } 22852 22853 /* Record route attributes in the IRE for use by future connections. */ 22854 if (mp == NULL) 22855 return (0); 22856 22857 /* 22858 * We do not have a good algorithm to update ssthresh at this time. 22859 * So don't do any update. 22860 */ 22861 ipic->ipic_rtt = tcp->tcp_rtt_sa; 22862 ipic->ipic_rtt_sd = tcp->tcp_rtt_sd; 22863 22864 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 22865 return (0); 22866 } 22867 22868 /* 22869 * Generate a "no listener here" RST in response to an "unknown" segment. 22870 * connp is set by caller when RST is in response to an unexpected 22871 * inbound packet for which there is active tcp state in the system. 22872 * Note that we are reusing the incoming mp to construct the outgoing RST. 22873 */ 22874 void 22875 tcp_xmit_listeners_reset(mblk_t *mp, uint_t ip_hdr_len, zoneid_t zoneid, 22876 tcp_stack_t *tcps, conn_t *connp) 22877 { 22878 uchar_t *rptr; 22879 uint32_t seg_len; 22880 tcph_t *tcph; 22881 uint32_t seg_seq; 22882 uint32_t seg_ack; 22883 uint_t flags; 22884 mblk_t *ipsec_mp; 22885 ipha_t *ipha; 22886 ip6_t *ip6h; 22887 boolean_t mctl_present = B_FALSE; 22888 boolean_t check = B_TRUE; 22889 boolean_t policy_present; 22890 ipsec_stack_t *ipss = tcps->tcps_netstack->netstack_ipsec; 22891 22892 TCP_STAT(tcps, tcp_no_listener); 22893 22894 ipsec_mp = mp; 22895 22896 if (mp->b_datap->db_type == M_CTL) { 22897 ipsec_in_t *ii; 22898 22899 mctl_present = B_TRUE; 22900 mp = mp->b_cont; 22901 22902 ii = (ipsec_in_t *)ipsec_mp->b_rptr; 22903 ASSERT(ii->ipsec_in_type == IPSEC_IN); 22904 if (ii->ipsec_in_dont_check) { 22905 check = B_FALSE; 22906 if (!ii->ipsec_in_secure) { 22907 freeb(ipsec_mp); 22908 mctl_present = B_FALSE; 22909 ipsec_mp = mp; 22910 } 22911 } 22912 } 22913 22914 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22915 policy_present = ipss->ipsec_inbound_v4_policy_present; 22916 ipha = (ipha_t *)mp->b_rptr; 22917 ip6h = NULL; 22918 } else { 22919 policy_present = ipss->ipsec_inbound_v6_policy_present; 22920 ipha = NULL; 22921 ip6h = (ip6_t *)mp->b_rptr; 22922 } 22923 22924 if (check && policy_present) { 22925 /* 22926 * The conn_t parameter is NULL because we already know 22927 * nobody's home. 22928 */ 22929 ipsec_mp = ipsec_check_global_policy( 22930 ipsec_mp, (conn_t *)NULL, ipha, ip6h, mctl_present, 22931 tcps->tcps_netstack); 22932 if (ipsec_mp == NULL) 22933 return; 22934 } 22935 if (is_system_labeled() && !tsol_can_reply_error(mp)) { 22936 DTRACE_PROBE2( 22937 tx__ip__log__error__nolistener__tcp, 22938 char *, "Could not reply with RST to mp(1)", 22939 mblk_t *, mp); 22940 ip2dbg(("tcp_xmit_listeners_reset: not permitted to reply\n")); 22941 freemsg(ipsec_mp); 22942 return; 22943 } 22944 22945 rptr = mp->b_rptr; 22946 22947 tcph = (tcph_t *)&rptr[ip_hdr_len]; 22948 seg_seq = BE32_TO_U32(tcph->th_seq); 22949 seg_ack = BE32_TO_U32(tcph->th_ack); 22950 flags = tcph->th_flags[0]; 22951 22952 seg_len = msgdsize(mp) - (TCP_HDR_LENGTH(tcph) + ip_hdr_len); 22953 if (flags & TH_RST) { 22954 freemsg(ipsec_mp); 22955 } else if (flags & TH_ACK) { 22956 tcp_xmit_early_reset("no tcp, reset", 22957 ipsec_mp, seg_ack, 0, TH_RST, ip_hdr_len, zoneid, tcps, 22958 connp); 22959 } else { 22960 if (flags & TH_SYN) { 22961 seg_len++; 22962 } else { 22963 /* 22964 * Here we violate the RFC. Note that a normal 22965 * TCP will never send a segment without the ACK 22966 * flag, except for RST or SYN segment. This 22967 * segment is neither. Just drop it on the 22968 * floor. 22969 */ 22970 freemsg(ipsec_mp); 22971 tcps->tcps_rst_unsent++; 22972 return; 22973 } 22974 22975 tcp_xmit_early_reset("no tcp, reset/ack", 22976 ipsec_mp, 0, seg_seq + seg_len, 22977 TH_RST | TH_ACK, ip_hdr_len, zoneid, tcps, connp); 22978 } 22979 } 22980 22981 /* 22982 * tcp_xmit_mp is called to return a pointer to an mblk chain complete with 22983 * ip and tcp header ready to pass down to IP. If the mp passed in is 22984 * non-NULL, then up to max_to_send bytes of data will be dup'ed off that 22985 * mblk. (If sendall is not set the dup'ing will stop at an mblk boundary 22986 * otherwise it will dup partial mblks.) 22987 * Otherwise, an appropriate ACK packet will be generated. This 22988 * routine is not usually called to send new data for the first time. It 22989 * is mostly called out of the timer for retransmits, and to generate ACKs. 22990 * 22991 * If offset is not NULL, the returned mblk chain's first mblk's b_rptr will 22992 * be adjusted by *offset. And after dupb(), the offset and the ending mblk 22993 * of the original mblk chain will be returned in *offset and *end_mp. 22994 */ 22995 mblk_t * 22996 tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, int32_t *offset, 22997 mblk_t **end_mp, uint32_t seq, boolean_t sendall, uint32_t *seg_len, 22998 boolean_t rexmit) 22999 { 23000 int data_length; 23001 int32_t off = 0; 23002 uint_t flags; 23003 mblk_t *mp1; 23004 mblk_t *mp2; 23005 uchar_t *rptr; 23006 tcph_t *tcph; 23007 int32_t num_sack_blk = 0; 23008 int32_t sack_opt_len = 0; 23009 tcp_stack_t *tcps = tcp->tcp_tcps; 23010 23011 /* Allocate for our maximum TCP header + link-level */ 23012 mp1 = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 23013 tcps->tcps_wroff_xtra, BPRI_MED); 23014 if (!mp1) 23015 return (NULL); 23016 data_length = 0; 23017 23018 /* 23019 * Note that tcp_mss has been adjusted to take into account the 23020 * timestamp option if applicable. Because SACK options do not 23021 * appear in every TCP segments and they are of variable lengths, 23022 * they cannot be included in tcp_mss. Thus we need to calculate 23023 * the actual segment length when we need to send a segment which 23024 * includes SACK options. 23025 */ 23026 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 23027 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 23028 tcp->tcp_num_sack_blk); 23029 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 23030 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 23031 if (max_to_send + sack_opt_len > tcp->tcp_mss) 23032 max_to_send -= sack_opt_len; 23033 } 23034 23035 if (offset != NULL) { 23036 off = *offset; 23037 /* We use offset as an indicator that end_mp is not NULL. */ 23038 *end_mp = NULL; 23039 } 23040 for (mp2 = mp1; mp && data_length != max_to_send; mp = mp->b_cont) { 23041 /* This could be faster with cooperation from downstream */ 23042 if (mp2 != mp1 && !sendall && 23043 data_length + (int)(mp->b_wptr - mp->b_rptr) > 23044 max_to_send) 23045 /* 23046 * Don't send the next mblk since the whole mblk 23047 * does not fit. 23048 */ 23049 break; 23050 mp2->b_cont = dupb(mp); 23051 mp2 = mp2->b_cont; 23052 if (!mp2) { 23053 freemsg(mp1); 23054 return (NULL); 23055 } 23056 mp2->b_rptr += off; 23057 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 23058 (uintptr_t)INT_MAX); 23059 23060 data_length += (int)(mp2->b_wptr - mp2->b_rptr); 23061 if (data_length > max_to_send) { 23062 mp2->b_wptr -= data_length - max_to_send; 23063 data_length = max_to_send; 23064 off = mp2->b_wptr - mp->b_rptr; 23065 break; 23066 } else { 23067 off = 0; 23068 } 23069 } 23070 if (offset != NULL) { 23071 *offset = off; 23072 *end_mp = mp; 23073 } 23074 if (seg_len != NULL) { 23075 *seg_len = data_length; 23076 } 23077 23078 /* Update the latest receive window size in TCP header. */ 23079 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 23080 tcp->tcp_tcph->th_win); 23081 23082 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra; 23083 mp1->b_rptr = rptr; 23084 mp1->b_wptr = rptr + tcp->tcp_hdr_len + sack_opt_len; 23085 bcopy(tcp->tcp_iphc, rptr, tcp->tcp_hdr_len); 23086 tcph = (tcph_t *)&rptr[tcp->tcp_ip_hdr_len]; 23087 U32_TO_ABE32(seq, tcph->th_seq); 23088 23089 /* 23090 * Use tcp_unsent to determine if the PUSH bit should be used assumes 23091 * that this function was called from tcp_wput_data. Thus, when called 23092 * to retransmit data the setting of the PUSH bit may appear some 23093 * what random in that it might get set when it should not. This 23094 * should not pose any performance issues. 23095 */ 23096 if (data_length != 0 && (tcp->tcp_unsent == 0 || 23097 tcp->tcp_unsent == data_length)) { 23098 flags = TH_ACK | TH_PUSH; 23099 } else { 23100 flags = TH_ACK; 23101 } 23102 23103 if (tcp->tcp_ecn_ok) { 23104 if (tcp->tcp_ecn_echo_on) 23105 flags |= TH_ECE; 23106 23107 /* 23108 * Only set ECT bit and ECN_CWR if a segment contains new data. 23109 * There is no TCP flow control for non-data segments, and 23110 * only data segment is transmitted reliably. 23111 */ 23112 if (data_length > 0 && !rexmit) { 23113 SET_ECT(tcp, rptr); 23114 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 23115 flags |= TH_CWR; 23116 tcp->tcp_ecn_cwr_sent = B_TRUE; 23117 } 23118 } 23119 } 23120 23121 if (tcp->tcp_valid_bits) { 23122 uint32_t u1; 23123 23124 if ((tcp->tcp_valid_bits & TCP_ISS_VALID) && 23125 seq == tcp->tcp_iss) { 23126 uchar_t *wptr; 23127 23128 /* 23129 * If TCP_ISS_VALID and the seq number is tcp_iss, 23130 * TCP can only be in SYN-SENT, SYN-RCVD or 23131 * FIN-WAIT-1 state. It can be FIN-WAIT-1 if 23132 * our SYN is not ack'ed but the app closes this 23133 * TCP connection. 23134 */ 23135 ASSERT(tcp->tcp_state == TCPS_SYN_SENT || 23136 tcp->tcp_state == TCPS_SYN_RCVD || 23137 tcp->tcp_state == TCPS_FIN_WAIT_1); 23138 23139 /* 23140 * Tack on the MSS option. It is always needed 23141 * for both active and passive open. 23142 * 23143 * MSS option value should be interface MTU - MIN 23144 * TCP/IP header according to RFC 793 as it means 23145 * the maximum segment size TCP can receive. But 23146 * to get around some broken middle boxes/end hosts 23147 * out there, we allow the option value to be the 23148 * same as the MSS option size on the peer side. 23149 * In this way, the other side will not send 23150 * anything larger than they can receive. 23151 * 23152 * Note that for SYN_SENT state, the ndd param 23153 * tcp_use_smss_as_mss_opt has no effect as we 23154 * don't know the peer's MSS option value. So 23155 * the only case we need to take care of is in 23156 * SYN_RCVD state, which is done later. 23157 */ 23158 wptr = mp1->b_wptr; 23159 wptr[0] = TCPOPT_MAXSEG; 23160 wptr[1] = TCPOPT_MAXSEG_LEN; 23161 wptr += 2; 23162 u1 = tcp->tcp_if_mtu - 23163 (tcp->tcp_ipversion == IPV4_VERSION ? 23164 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) - 23165 TCP_MIN_HEADER_LENGTH; 23166 U16_TO_BE16(u1, wptr); 23167 mp1->b_wptr = wptr + 2; 23168 /* Update the offset to cover the additional word */ 23169 tcph->th_offset_and_rsrvd[0] += (1 << 4); 23170 23171 /* 23172 * Note that the following way of filling in 23173 * TCP options are not optimal. Some NOPs can 23174 * be saved. But there is no need at this time 23175 * to optimize it. When it is needed, we will 23176 * do it. 23177 */ 23178 switch (tcp->tcp_state) { 23179 case TCPS_SYN_SENT: 23180 flags = TH_SYN; 23181 23182 if (tcp->tcp_snd_ts_ok) { 23183 uint32_t llbolt = (uint32_t)lbolt; 23184 23185 wptr = mp1->b_wptr; 23186 wptr[0] = TCPOPT_NOP; 23187 wptr[1] = TCPOPT_NOP; 23188 wptr[2] = TCPOPT_TSTAMP; 23189 wptr[3] = TCPOPT_TSTAMP_LEN; 23190 wptr += 4; 23191 U32_TO_BE32(llbolt, wptr); 23192 wptr += 4; 23193 ASSERT(tcp->tcp_ts_recent == 0); 23194 U32_TO_BE32(0L, wptr); 23195 mp1->b_wptr += TCPOPT_REAL_TS_LEN; 23196 tcph->th_offset_and_rsrvd[0] += 23197 (3 << 4); 23198 } 23199 23200 /* 23201 * Set up all the bits to tell other side 23202 * we are ECN capable. 23203 */ 23204 if (tcp->tcp_ecn_ok) { 23205 flags |= (TH_ECE | TH_CWR); 23206 } 23207 break; 23208 case TCPS_SYN_RCVD: 23209 flags |= TH_SYN; 23210 23211 /* 23212 * Reset the MSS option value to be SMSS 23213 * We should probably add back the bytes 23214 * for timestamp option and IPsec. We 23215 * don't do that as this is a workaround 23216 * for broken middle boxes/end hosts, it 23217 * is better for us to be more cautious. 23218 * They may not take these things into 23219 * account in their SMSS calculation. Thus 23220 * the peer's calculated SMSS may be smaller 23221 * than what it can be. This should be OK. 23222 */ 23223 if (tcps->tcps_use_smss_as_mss_opt) { 23224 u1 = tcp->tcp_mss; 23225 U16_TO_BE16(u1, wptr); 23226 } 23227 23228 /* 23229 * If the other side is ECN capable, reply 23230 * that we are also ECN capable. 23231 */ 23232 if (tcp->tcp_ecn_ok) 23233 flags |= TH_ECE; 23234 break; 23235 default: 23236 /* 23237 * The above ASSERT() makes sure that this 23238 * must be FIN-WAIT-1 state. Our SYN has 23239 * not been ack'ed so retransmit it. 23240 */ 23241 flags |= TH_SYN; 23242 break; 23243 } 23244 23245 if (tcp->tcp_snd_ws_ok) { 23246 wptr = mp1->b_wptr; 23247 wptr[0] = TCPOPT_NOP; 23248 wptr[1] = TCPOPT_WSCALE; 23249 wptr[2] = TCPOPT_WS_LEN; 23250 wptr[3] = (uchar_t)tcp->tcp_rcv_ws; 23251 mp1->b_wptr += TCPOPT_REAL_WS_LEN; 23252 tcph->th_offset_and_rsrvd[0] += (1 << 4); 23253 } 23254 23255 if (tcp->tcp_snd_sack_ok) { 23256 wptr = mp1->b_wptr; 23257 wptr[0] = TCPOPT_NOP; 23258 wptr[1] = TCPOPT_NOP; 23259 wptr[2] = TCPOPT_SACK_PERMITTED; 23260 wptr[3] = TCPOPT_SACK_OK_LEN; 23261 mp1->b_wptr += TCPOPT_REAL_SACK_OK_LEN; 23262 tcph->th_offset_and_rsrvd[0] += (1 << 4); 23263 } 23264 23265 /* allocb() of adequate mblk assures space */ 23266 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 23267 (uintptr_t)INT_MAX); 23268 u1 = (int)(mp1->b_wptr - mp1->b_rptr); 23269 /* 23270 * Get IP set to checksum on our behalf 23271 * Include the adjustment for a source route if any. 23272 */ 23273 u1 += tcp->tcp_sum; 23274 u1 = (u1 >> 16) + (u1 & 0xFFFF); 23275 U16_TO_BE16(u1, tcph->th_sum); 23276 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 23277 } 23278 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 23279 (seq + data_length) == tcp->tcp_fss) { 23280 if (!tcp->tcp_fin_acked) { 23281 flags |= TH_FIN; 23282 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 23283 } 23284 if (!tcp->tcp_fin_sent) { 23285 tcp->tcp_fin_sent = B_TRUE; 23286 switch (tcp->tcp_state) { 23287 case TCPS_SYN_RCVD: 23288 case TCPS_ESTABLISHED: 23289 tcp->tcp_state = TCPS_FIN_WAIT_1; 23290 break; 23291 case TCPS_CLOSE_WAIT: 23292 tcp->tcp_state = TCPS_LAST_ACK; 23293 break; 23294 } 23295 if (tcp->tcp_suna == tcp->tcp_snxt) 23296 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 23297 tcp->tcp_snxt = tcp->tcp_fss + 1; 23298 } 23299 } 23300 /* 23301 * Note the trick here. u1 is unsigned. When tcp_urg 23302 * is smaller than seq, u1 will become a very huge value. 23303 * So the comparison will fail. Also note that tcp_urp 23304 * should be positive, see RFC 793 page 17. 23305 */ 23306 u1 = tcp->tcp_urg - seq + TCP_OLD_URP_INTERPRETATION; 23307 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && u1 != 0 && 23308 u1 < (uint32_t)(64 * 1024)) { 23309 flags |= TH_URG; 23310 BUMP_MIB(&tcps->tcps_mib, tcpOutUrg); 23311 U32_TO_ABE16(u1, tcph->th_urp); 23312 } 23313 } 23314 tcph->th_flags[0] = (uchar_t)flags; 23315 tcp->tcp_rack = tcp->tcp_rnxt; 23316 tcp->tcp_rack_cnt = 0; 23317 23318 if (tcp->tcp_snd_ts_ok) { 23319 if (tcp->tcp_state != TCPS_SYN_SENT) { 23320 uint32_t llbolt = (uint32_t)lbolt; 23321 23322 U32_TO_BE32(llbolt, 23323 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 23324 U32_TO_BE32(tcp->tcp_ts_recent, 23325 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 23326 } 23327 } 23328 23329 if (num_sack_blk > 0) { 23330 uchar_t *wptr = (uchar_t *)tcph + tcp->tcp_tcp_hdr_len; 23331 sack_blk_t *tmp; 23332 int32_t i; 23333 23334 wptr[0] = TCPOPT_NOP; 23335 wptr[1] = TCPOPT_NOP; 23336 wptr[2] = TCPOPT_SACK; 23337 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 23338 sizeof (sack_blk_t); 23339 wptr += TCPOPT_REAL_SACK_LEN; 23340 23341 tmp = tcp->tcp_sack_list; 23342 for (i = 0; i < num_sack_blk; i++) { 23343 U32_TO_BE32(tmp[i].begin, wptr); 23344 wptr += sizeof (tcp_seq); 23345 U32_TO_BE32(tmp[i].end, wptr); 23346 wptr += sizeof (tcp_seq); 23347 } 23348 tcph->th_offset_and_rsrvd[0] += ((num_sack_blk * 2 + 1) << 4); 23349 } 23350 ASSERT((uintptr_t)(mp1->b_wptr - rptr) <= (uintptr_t)INT_MAX); 23351 data_length += (int)(mp1->b_wptr - rptr); 23352 if (tcp->tcp_ipversion == IPV4_VERSION) { 23353 ((ipha_t *)rptr)->ipha_length = htons(data_length); 23354 } else { 23355 ip6_t *ip6 = (ip6_t *)(rptr + 23356 (((ip6_t *)rptr)->ip6_nxt == IPPROTO_RAW ? 23357 sizeof (ip6i_t) : 0)); 23358 23359 ip6->ip6_plen = htons(data_length - 23360 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 23361 } 23362 23363 /* 23364 * Prime pump for IP 23365 * Include the adjustment for a source route if any. 23366 */ 23367 data_length -= tcp->tcp_ip_hdr_len; 23368 data_length += tcp->tcp_sum; 23369 data_length = (data_length >> 16) + (data_length & 0xFFFF); 23370 U16_TO_ABE16(data_length, tcph->th_sum); 23371 if (tcp->tcp_ip_forward_progress) { 23372 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 23373 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 23374 tcp->tcp_ip_forward_progress = B_FALSE; 23375 } 23376 return (mp1); 23377 } 23378 23379 /* This function handles the push timeout. */ 23380 void 23381 tcp_push_timer(void *arg) 23382 { 23383 conn_t *connp = (conn_t *)arg; 23384 tcp_t *tcp = connp->conn_tcp; 23385 tcp_stack_t *tcps = tcp->tcp_tcps; 23386 23387 TCP_DBGSTAT(tcps, tcp_push_timer_cnt); 23388 23389 ASSERT(tcp->tcp_listener == NULL); 23390 23391 /* 23392 * We need to plug synchronous streams during our drain to prevent 23393 * a race with tcp_fuse_rrw() or tcp_fusion_rinfop(). 23394 */ 23395 TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp); 23396 tcp->tcp_push_tid = 0; 23397 if ((tcp->tcp_rcv_list != NULL) && 23398 (tcp_rcv_drain(tcp->tcp_rq, tcp) == TH_ACK_NEEDED)) 23399 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 23400 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp); 23401 } 23402 23403 /* 23404 * This function handles delayed ACK timeout. 23405 */ 23406 static void 23407 tcp_ack_timer(void *arg) 23408 { 23409 conn_t *connp = (conn_t *)arg; 23410 tcp_t *tcp = connp->conn_tcp; 23411 mblk_t *mp; 23412 tcp_stack_t *tcps = tcp->tcp_tcps; 23413 23414 TCP_DBGSTAT(tcps, tcp_ack_timer_cnt); 23415 23416 tcp->tcp_ack_tid = 0; 23417 23418 if (tcp->tcp_fused) 23419 return; 23420 23421 /* 23422 * Do not send ACK if there is no outstanding unack'ed data. 23423 */ 23424 if (tcp->tcp_rnxt == tcp->tcp_rack) { 23425 return; 23426 } 23427 23428 if ((tcp->tcp_rnxt - tcp->tcp_rack) > tcp->tcp_mss) { 23429 /* 23430 * Make sure we don't allow deferred ACKs to result in 23431 * timer-based ACKing. If we have held off an ACK 23432 * when there was more than an mss here, and the timer 23433 * goes off, we have to worry about the possibility 23434 * that the sender isn't doing slow-start, or is out 23435 * of step with us for some other reason. We fall 23436 * permanently back in the direction of 23437 * ACK-every-other-packet as suggested in RFC 1122. 23438 */ 23439 if (tcp->tcp_rack_abs_max > 2) 23440 tcp->tcp_rack_abs_max--; 23441 tcp->tcp_rack_cur_max = 2; 23442 } 23443 mp = tcp_ack_mp(tcp); 23444 23445 if (mp != NULL) { 23446 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 23447 BUMP_LOCAL(tcp->tcp_obsegs); 23448 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 23449 BUMP_MIB(&tcps->tcps_mib, tcpOutAckDelayed); 23450 tcp_send_data(tcp, tcp->tcp_wq, mp); 23451 } 23452 } 23453 23454 23455 /* Generate an ACK-only (no data) segment for a TCP endpoint */ 23456 static mblk_t * 23457 tcp_ack_mp(tcp_t *tcp) 23458 { 23459 uint32_t seq_no; 23460 tcp_stack_t *tcps = tcp->tcp_tcps; 23461 23462 /* 23463 * There are a few cases to be considered while setting the sequence no. 23464 * Essentially, we can come here while processing an unacceptable pkt 23465 * in the TCPS_SYN_RCVD state, in which case we set the sequence number 23466 * to snxt (per RFC 793), note the swnd wouldn't have been set yet. 23467 * If we are here for a zero window probe, stick with suna. In all 23468 * other cases, we check if suna + swnd encompasses snxt and set 23469 * the sequence number to snxt, if so. If snxt falls outside the 23470 * window (the receiver probably shrunk its window), we will go with 23471 * suna + swnd, otherwise the sequence no will be unacceptable to the 23472 * receiver. 23473 */ 23474 if (tcp->tcp_zero_win_probe) { 23475 seq_no = tcp->tcp_suna; 23476 } else if (tcp->tcp_state == TCPS_SYN_RCVD) { 23477 ASSERT(tcp->tcp_swnd == 0); 23478 seq_no = tcp->tcp_snxt; 23479 } else { 23480 seq_no = SEQ_GT(tcp->tcp_snxt, 23481 (tcp->tcp_suna + tcp->tcp_swnd)) ? 23482 (tcp->tcp_suna + tcp->tcp_swnd) : tcp->tcp_snxt; 23483 } 23484 23485 if (tcp->tcp_valid_bits) { 23486 /* 23487 * For the complex case where we have to send some 23488 * controls (FIN or SYN), let tcp_xmit_mp do it. 23489 */ 23490 return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, seq_no, B_FALSE, 23491 NULL, B_FALSE)); 23492 } else { 23493 /* Generate a simple ACK */ 23494 int data_length; 23495 uchar_t *rptr; 23496 tcph_t *tcph; 23497 mblk_t *mp1; 23498 int32_t tcp_hdr_len; 23499 int32_t tcp_tcp_hdr_len; 23500 int32_t num_sack_blk = 0; 23501 int32_t sack_opt_len; 23502 23503 /* 23504 * Allocate space for TCP + IP headers 23505 * and link-level header 23506 */ 23507 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 23508 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 23509 tcp->tcp_num_sack_blk); 23510 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 23511 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 23512 tcp_hdr_len = tcp->tcp_hdr_len + sack_opt_len; 23513 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len + sack_opt_len; 23514 } else { 23515 tcp_hdr_len = tcp->tcp_hdr_len; 23516 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len; 23517 } 23518 mp1 = allocb(tcp_hdr_len + tcps->tcps_wroff_xtra, BPRI_MED); 23519 if (!mp1) 23520 return (NULL); 23521 23522 /* Update the latest receive window size in TCP header. */ 23523 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 23524 tcp->tcp_tcph->th_win); 23525 /* copy in prototype TCP + IP header */ 23526 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra; 23527 mp1->b_rptr = rptr; 23528 mp1->b_wptr = rptr + tcp_hdr_len; 23529 bcopy(tcp->tcp_iphc, rptr, tcp->tcp_hdr_len); 23530 23531 tcph = (tcph_t *)&rptr[tcp->tcp_ip_hdr_len]; 23532 23533 /* Set the TCP sequence number. */ 23534 U32_TO_ABE32(seq_no, tcph->th_seq); 23535 23536 /* Set up the TCP flag field. */ 23537 tcph->th_flags[0] = (uchar_t)TH_ACK; 23538 if (tcp->tcp_ecn_echo_on) 23539 tcph->th_flags[0] |= TH_ECE; 23540 23541 tcp->tcp_rack = tcp->tcp_rnxt; 23542 tcp->tcp_rack_cnt = 0; 23543 23544 /* fill in timestamp option if in use */ 23545 if (tcp->tcp_snd_ts_ok) { 23546 uint32_t llbolt = (uint32_t)lbolt; 23547 23548 U32_TO_BE32(llbolt, 23549 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 23550 U32_TO_BE32(tcp->tcp_ts_recent, 23551 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 23552 } 23553 23554 /* Fill in SACK options */ 23555 if (num_sack_blk > 0) { 23556 uchar_t *wptr = (uchar_t *)tcph + tcp->tcp_tcp_hdr_len; 23557 sack_blk_t *tmp; 23558 int32_t i; 23559 23560 wptr[0] = TCPOPT_NOP; 23561 wptr[1] = TCPOPT_NOP; 23562 wptr[2] = TCPOPT_SACK; 23563 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 23564 sizeof (sack_blk_t); 23565 wptr += TCPOPT_REAL_SACK_LEN; 23566 23567 tmp = tcp->tcp_sack_list; 23568 for (i = 0; i < num_sack_blk; i++) { 23569 U32_TO_BE32(tmp[i].begin, wptr); 23570 wptr += sizeof (tcp_seq); 23571 U32_TO_BE32(tmp[i].end, wptr); 23572 wptr += sizeof (tcp_seq); 23573 } 23574 tcph->th_offset_and_rsrvd[0] += ((num_sack_blk * 2 + 1) 23575 << 4); 23576 } 23577 23578 if (tcp->tcp_ipversion == IPV4_VERSION) { 23579 ((ipha_t *)rptr)->ipha_length = htons(tcp_hdr_len); 23580 } else { 23581 /* Check for ip6i_t header in sticky hdrs */ 23582 ip6_t *ip6 = (ip6_t *)(rptr + 23583 (((ip6_t *)rptr)->ip6_nxt == IPPROTO_RAW ? 23584 sizeof (ip6i_t) : 0)); 23585 23586 ip6->ip6_plen = htons(tcp_hdr_len - 23587 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 23588 } 23589 23590 /* 23591 * Prime pump for checksum calculation in IP. Include the 23592 * adjustment for a source route if any. 23593 */ 23594 data_length = tcp_tcp_hdr_len + tcp->tcp_sum; 23595 data_length = (data_length >> 16) + (data_length & 0xFFFF); 23596 U16_TO_ABE16(data_length, tcph->th_sum); 23597 23598 if (tcp->tcp_ip_forward_progress) { 23599 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 23600 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 23601 tcp->tcp_ip_forward_progress = B_FALSE; 23602 } 23603 return (mp1); 23604 } 23605 } 23606 23607 /* 23608 * To create a temporary tcp structure for inserting into bind hash list. 23609 * The parameter is assumed to be in network byte order, ready for use. 23610 */ 23611 /* ARGSUSED */ 23612 static tcp_t * 23613 tcp_alloc_temp_tcp(in_port_t port, tcp_stack_t *tcps) 23614 { 23615 conn_t *connp; 23616 tcp_t *tcp; 23617 23618 connp = ipcl_conn_create(IPCL_TCPCONN, KM_SLEEP, tcps->tcps_netstack); 23619 if (connp == NULL) 23620 return (NULL); 23621 23622 tcp = connp->conn_tcp; 23623 tcp->tcp_tcps = tcps; 23624 TCPS_REFHOLD(tcps); 23625 23626 /* 23627 * Only initialize the necessary info in those structures. Note 23628 * that since INADDR_ANY is all 0, we do not need to set 23629 * tcp_bound_source to INADDR_ANY here. 23630 */ 23631 tcp->tcp_state = TCPS_BOUND; 23632 tcp->tcp_lport = port; 23633 tcp->tcp_exclbind = 1; 23634 tcp->tcp_reserved_port = 1; 23635 23636 /* Just for place holding... */ 23637 tcp->tcp_ipversion = IPV4_VERSION; 23638 23639 return (tcp); 23640 } 23641 23642 /* 23643 * To remove a port range specified by lo_port and hi_port from the 23644 * reserved port ranges. This is one of the three public functions of 23645 * the reserved port interface. Note that a port range has to be removed 23646 * as a whole. Ports in a range cannot be removed individually. 23647 * 23648 * Params: 23649 * in_port_t lo_port: the beginning port of the reserved port range to 23650 * be deleted. 23651 * in_port_t hi_port: the ending port of the reserved port range to 23652 * be deleted. 23653 * 23654 * Return: 23655 * B_TRUE if the deletion is successful, B_FALSE otherwise. 23656 * 23657 * Assumes that nca is only for zoneid=0 23658 */ 23659 boolean_t 23660 tcp_reserved_port_del(in_port_t lo_port, in_port_t hi_port) 23661 { 23662 int i, j; 23663 int size; 23664 tcp_t **temp_tcp_array; 23665 tcp_t *tcp; 23666 tcp_stack_t *tcps; 23667 23668 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)->netstack_tcp; 23669 ASSERT(tcps != NULL); 23670 23671 rw_enter(&tcps->tcps_reserved_port_lock, RW_WRITER); 23672 23673 /* First make sure that the port ranage is indeed reserved. */ 23674 for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) { 23675 if (tcps->tcps_reserved_port[i].lo_port == lo_port) { 23676 hi_port = tcps->tcps_reserved_port[i].hi_port; 23677 temp_tcp_array = 23678 tcps->tcps_reserved_port[i].temp_tcp_array; 23679 break; 23680 } 23681 } 23682 if (i == tcps->tcps_reserved_port_array_size) { 23683 rw_exit(&tcps->tcps_reserved_port_lock); 23684 netstack_rele(tcps->tcps_netstack); 23685 return (B_FALSE); 23686 } 23687 23688 /* 23689 * Remove the range from the array. This simple loop is possible 23690 * because port ranges are inserted in ascending order. 23691 */ 23692 for (j = i; j < tcps->tcps_reserved_port_array_size - 1; j++) { 23693 tcps->tcps_reserved_port[j].lo_port = 23694 tcps->tcps_reserved_port[j+1].lo_port; 23695 tcps->tcps_reserved_port[j].hi_port = 23696 tcps->tcps_reserved_port[j+1].hi_port; 23697 tcps->tcps_reserved_port[j].temp_tcp_array = 23698 tcps->tcps_reserved_port[j+1].temp_tcp_array; 23699 } 23700 23701 /* Remove all the temporary tcp structures. */ 23702 size = hi_port - lo_port + 1; 23703 while (size > 0) { 23704 tcp = temp_tcp_array[size - 1]; 23705 ASSERT(tcp != NULL); 23706 tcp_bind_hash_remove(tcp); 23707 CONN_DEC_REF(tcp->tcp_connp); 23708 size--; 23709 } 23710 kmem_free(temp_tcp_array, (hi_port - lo_port + 1) * sizeof (tcp_t *)); 23711 tcps->tcps_reserved_port_array_size--; 23712 rw_exit(&tcps->tcps_reserved_port_lock); 23713 netstack_rele(tcps->tcps_netstack); 23714 return (B_TRUE); 23715 } 23716 23717 /* 23718 * Macro to remove temporary tcp structure from the bind hash list. The 23719 * first parameter is the list of tcp to be removed. The second parameter 23720 * is the number of tcps in the array. 23721 */ 23722 #define TCP_TMP_TCP_REMOVE(tcp_array, num, tcps) \ 23723 { \ 23724 while ((num) > 0) { \ 23725 tcp_t *tcp = (tcp_array)[(num) - 1]; \ 23726 tf_t *tbf; \ 23727 tcp_t *tcpnext; \ 23728 tbf = &tcps->tcps_bind_fanout[TCP_BIND_HASH(tcp->tcp_lport)]; \ 23729 mutex_enter(&tbf->tf_lock); \ 23730 tcpnext = tcp->tcp_bind_hash; \ 23731 if (tcpnext) { \ 23732 tcpnext->tcp_ptpbhn = \ 23733 tcp->tcp_ptpbhn; \ 23734 } \ 23735 *tcp->tcp_ptpbhn = tcpnext; \ 23736 mutex_exit(&tbf->tf_lock); \ 23737 kmem_free(tcp, sizeof (tcp_t)); \ 23738 (tcp_array)[(num) - 1] = NULL; \ 23739 (num)--; \ 23740 } \ 23741 } 23742 23743 /* 23744 * The public interface for other modules to call to reserve a port range 23745 * in TCP. The caller passes in how large a port range it wants. TCP 23746 * will try to find a range and return it via lo_port and hi_port. This is 23747 * used by NCA's nca_conn_init. 23748 * NCA can only be used in the global zone so this only affects the global 23749 * zone's ports. 23750 * 23751 * Params: 23752 * int size: the size of the port range to be reserved. 23753 * in_port_t *lo_port (referenced): returns the beginning port of the 23754 * reserved port range added. 23755 * in_port_t *hi_port (referenced): returns the ending port of the 23756 * reserved port range added. 23757 * 23758 * Return: 23759 * B_TRUE if the port reservation is successful, B_FALSE otherwise. 23760 * 23761 * Assumes that nca is only for zoneid=0 23762 */ 23763 boolean_t 23764 tcp_reserved_port_add(int size, in_port_t *lo_port, in_port_t *hi_port) 23765 { 23766 tcp_t *tcp; 23767 tcp_t *tmp_tcp; 23768 tcp_t **temp_tcp_array; 23769 tf_t *tbf; 23770 in_port_t net_port; 23771 in_port_t port; 23772 int32_t cur_size; 23773 int i, j; 23774 boolean_t used; 23775 tcp_rport_t tmp_ports[TCP_RESERVED_PORTS_ARRAY_MAX_SIZE]; 23776 zoneid_t zoneid = GLOBAL_ZONEID; 23777 tcp_stack_t *tcps; 23778 23779 /* Sanity check. */ 23780 if (size <= 0 || size > TCP_RESERVED_PORTS_RANGE_MAX) { 23781 return (B_FALSE); 23782 } 23783 23784 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)->netstack_tcp; 23785 ASSERT(tcps != NULL); 23786 23787 rw_enter(&tcps->tcps_reserved_port_lock, RW_WRITER); 23788 if (tcps->tcps_reserved_port_array_size == 23789 TCP_RESERVED_PORTS_ARRAY_MAX_SIZE) { 23790 rw_exit(&tcps->tcps_reserved_port_lock); 23791 netstack_rele(tcps->tcps_netstack); 23792 return (B_FALSE); 23793 } 23794 23795 /* 23796 * Find the starting port to try. Since the port ranges are ordered 23797 * in the reserved port array, we can do a simple search here. 23798 */ 23799 *lo_port = TCP_SMALLEST_RESERVED_PORT; 23800 *hi_port = TCP_LARGEST_RESERVED_PORT; 23801 for (i = 0; i < tcps->tcps_reserved_port_array_size; 23802 *lo_port = tcps->tcps_reserved_port[i].hi_port + 1, i++) { 23803 if (tcps->tcps_reserved_port[i].lo_port - *lo_port >= size) { 23804 *hi_port = tcps->tcps_reserved_port[i].lo_port - 1; 23805 break; 23806 } 23807 } 23808 /* No available port range. */ 23809 if (i == tcps->tcps_reserved_port_array_size && 23810 *hi_port - *lo_port < size) { 23811 rw_exit(&tcps->tcps_reserved_port_lock); 23812 netstack_rele(tcps->tcps_netstack); 23813 return (B_FALSE); 23814 } 23815 23816 temp_tcp_array = kmem_zalloc(size * sizeof (tcp_t *), KM_NOSLEEP); 23817 if (temp_tcp_array == NULL) { 23818 rw_exit(&tcps->tcps_reserved_port_lock); 23819 netstack_rele(tcps->tcps_netstack); 23820 return (B_FALSE); 23821 } 23822 23823 /* Go thru the port range to see if some ports are already bound. */ 23824 for (port = *lo_port, cur_size = 0; 23825 cur_size < size && port <= *hi_port; 23826 cur_size++, port++) { 23827 used = B_FALSE; 23828 net_port = htons(port); 23829 tbf = &tcps->tcps_bind_fanout[TCP_BIND_HASH(net_port)]; 23830 mutex_enter(&tbf->tf_lock); 23831 for (tcp = tbf->tf_tcp; tcp != NULL; 23832 tcp = tcp->tcp_bind_hash) { 23833 if (IPCL_ZONE_MATCH(tcp->tcp_connp, zoneid) && 23834 net_port == tcp->tcp_lport) { 23835 /* 23836 * A port is already bound. Search again 23837 * starting from port + 1. Release all 23838 * temporary tcps. 23839 */ 23840 mutex_exit(&tbf->tf_lock); 23841 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size, 23842 tcps); 23843 *lo_port = port + 1; 23844 cur_size = -1; 23845 used = B_TRUE; 23846 break; 23847 } 23848 } 23849 if (!used) { 23850 if ((tmp_tcp = tcp_alloc_temp_tcp(net_port, tcps)) == 23851 NULL) { 23852 /* 23853 * Allocation failure. Just fail the request. 23854 * Need to remove all those temporary tcp 23855 * structures. 23856 */ 23857 mutex_exit(&tbf->tf_lock); 23858 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size, 23859 tcps); 23860 rw_exit(&tcps->tcps_reserved_port_lock); 23861 kmem_free(temp_tcp_array, 23862 (hi_port - lo_port + 1) * 23863 sizeof (tcp_t *)); 23864 netstack_rele(tcps->tcps_netstack); 23865 return (B_FALSE); 23866 } 23867 temp_tcp_array[cur_size] = tmp_tcp; 23868 tcp_bind_hash_insert(tbf, tmp_tcp, B_TRUE); 23869 mutex_exit(&tbf->tf_lock); 23870 } 23871 } 23872 23873 /* 23874 * The current range is not large enough. We can actually do another 23875 * search if this search is done between 2 reserved port ranges. But 23876 * for first release, we just stop here and return saying that no port 23877 * range is available. 23878 */ 23879 if (cur_size < size) { 23880 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size, tcps); 23881 rw_exit(&tcps->tcps_reserved_port_lock); 23882 kmem_free(temp_tcp_array, size * sizeof (tcp_t *)); 23883 netstack_rele(tcps->tcps_netstack); 23884 return (B_FALSE); 23885 } 23886 *hi_port = port - 1; 23887 23888 /* 23889 * Insert range into array in ascending order. Since this function 23890 * must not be called often, we choose to use the simplest method. 23891 * The above array should not consume excessive stack space as 23892 * the size must be very small. If in future releases, we find 23893 * that we should provide more reserved port ranges, this function 23894 * has to be modified to be more efficient. 23895 */ 23896 if (tcps->tcps_reserved_port_array_size == 0) { 23897 tcps->tcps_reserved_port[0].lo_port = *lo_port; 23898 tcps->tcps_reserved_port[0].hi_port = *hi_port; 23899 tcps->tcps_reserved_port[0].temp_tcp_array = temp_tcp_array; 23900 } else { 23901 for (i = 0, j = 0; i < tcps->tcps_reserved_port_array_size; 23902 i++, j++) { 23903 if (*lo_port < tcps->tcps_reserved_port[i].lo_port && 23904 i == j) { 23905 tmp_ports[j].lo_port = *lo_port; 23906 tmp_ports[j].hi_port = *hi_port; 23907 tmp_ports[j].temp_tcp_array = temp_tcp_array; 23908 j++; 23909 } 23910 tmp_ports[j].lo_port = 23911 tcps->tcps_reserved_port[i].lo_port; 23912 tmp_ports[j].hi_port = 23913 tcps->tcps_reserved_port[i].hi_port; 23914 tmp_ports[j].temp_tcp_array = 23915 tcps->tcps_reserved_port[i].temp_tcp_array; 23916 } 23917 if (j == i) { 23918 tmp_ports[j].lo_port = *lo_port; 23919 tmp_ports[j].hi_port = *hi_port; 23920 tmp_ports[j].temp_tcp_array = temp_tcp_array; 23921 } 23922 bcopy(tmp_ports, tcps->tcps_reserved_port, sizeof (tmp_ports)); 23923 } 23924 tcps->tcps_reserved_port_array_size++; 23925 rw_exit(&tcps->tcps_reserved_port_lock); 23926 netstack_rele(tcps->tcps_netstack); 23927 return (B_TRUE); 23928 } 23929 23930 /* 23931 * Check to see if a port is in any reserved port range. 23932 * 23933 * Params: 23934 * in_port_t port: the port to be verified. 23935 * 23936 * Return: 23937 * B_TRUE is the port is inside a reserved port range, B_FALSE otherwise. 23938 */ 23939 boolean_t 23940 tcp_reserved_port_check(in_port_t port, tcp_stack_t *tcps) 23941 { 23942 int i; 23943 23944 rw_enter(&tcps->tcps_reserved_port_lock, RW_READER); 23945 for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) { 23946 if (port >= tcps->tcps_reserved_port[i].lo_port || 23947 port <= tcps->tcps_reserved_port[i].hi_port) { 23948 rw_exit(&tcps->tcps_reserved_port_lock); 23949 return (B_TRUE); 23950 } 23951 } 23952 rw_exit(&tcps->tcps_reserved_port_lock); 23953 return (B_FALSE); 23954 } 23955 23956 /* 23957 * To list all reserved port ranges. This is the function to handle 23958 * ndd tcp_reserved_port_list. 23959 */ 23960 /* ARGSUSED */ 23961 static int 23962 tcp_reserved_port_list(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 23963 { 23964 int i; 23965 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 23966 23967 rw_enter(&tcps->tcps_reserved_port_lock, RW_READER); 23968 if (tcps->tcps_reserved_port_array_size > 0) 23969 (void) mi_mpprintf(mp, "The following ports are reserved:"); 23970 else 23971 (void) mi_mpprintf(mp, "No port is reserved."); 23972 for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) { 23973 (void) mi_mpprintf(mp, "%d-%d", 23974 tcps->tcps_reserved_port[i].lo_port, 23975 tcps->tcps_reserved_port[i].hi_port); 23976 } 23977 rw_exit(&tcps->tcps_reserved_port_lock); 23978 return (0); 23979 } 23980 23981 /* 23982 * Hash list insertion routine for tcp_t structures. 23983 * Inserts entries with the ones bound to a specific IP address first 23984 * followed by those bound to INADDR_ANY. 23985 */ 23986 static void 23987 tcp_bind_hash_insert(tf_t *tbf, tcp_t *tcp, int caller_holds_lock) 23988 { 23989 tcp_t **tcpp; 23990 tcp_t *tcpnext; 23991 23992 if (tcp->tcp_ptpbhn != NULL) { 23993 ASSERT(!caller_holds_lock); 23994 tcp_bind_hash_remove(tcp); 23995 } 23996 tcpp = &tbf->tf_tcp; 23997 if (!caller_holds_lock) { 23998 mutex_enter(&tbf->tf_lock); 23999 } else { 24000 ASSERT(MUTEX_HELD(&tbf->tf_lock)); 24001 } 24002 tcpnext = tcpp[0]; 24003 if (tcpnext) { 24004 /* 24005 * If the new tcp bound to the INADDR_ANY address 24006 * and the first one in the list is not bound to 24007 * INADDR_ANY we skip all entries until we find the 24008 * first one bound to INADDR_ANY. 24009 * This makes sure that applications binding to a 24010 * specific address get preference over those binding to 24011 * INADDR_ANY. 24012 */ 24013 if (V6_OR_V4_INADDR_ANY(tcp->tcp_bound_source_v6) && 24014 !V6_OR_V4_INADDR_ANY(tcpnext->tcp_bound_source_v6)) { 24015 while ((tcpnext = tcpp[0]) != NULL && 24016 !V6_OR_V4_INADDR_ANY(tcpnext->tcp_bound_source_v6)) 24017 tcpp = &(tcpnext->tcp_bind_hash); 24018 if (tcpnext) 24019 tcpnext->tcp_ptpbhn = &tcp->tcp_bind_hash; 24020 } else 24021 tcpnext->tcp_ptpbhn = &tcp->tcp_bind_hash; 24022 } 24023 tcp->tcp_bind_hash = tcpnext; 24024 tcp->tcp_ptpbhn = tcpp; 24025 tcpp[0] = tcp; 24026 if (!caller_holds_lock) 24027 mutex_exit(&tbf->tf_lock); 24028 } 24029 24030 /* 24031 * Hash list removal routine for tcp_t structures. 24032 */ 24033 static void 24034 tcp_bind_hash_remove(tcp_t *tcp) 24035 { 24036 tcp_t *tcpnext; 24037 kmutex_t *lockp; 24038 tcp_stack_t *tcps = tcp->tcp_tcps; 24039 24040 if (tcp->tcp_ptpbhn == NULL) 24041 return; 24042 24043 /* 24044 * Extract the lock pointer in case there are concurrent 24045 * hash_remove's for this instance. 24046 */ 24047 ASSERT(tcp->tcp_lport != 0); 24048 lockp = &tcps->tcps_bind_fanout[TCP_BIND_HASH(tcp->tcp_lport)].tf_lock; 24049 24050 ASSERT(lockp != NULL); 24051 mutex_enter(lockp); 24052 if (tcp->tcp_ptpbhn) { 24053 tcpnext = tcp->tcp_bind_hash; 24054 if (tcpnext) { 24055 tcpnext->tcp_ptpbhn = tcp->tcp_ptpbhn; 24056 tcp->tcp_bind_hash = NULL; 24057 } 24058 *tcp->tcp_ptpbhn = tcpnext; 24059 tcp->tcp_ptpbhn = NULL; 24060 } 24061 mutex_exit(lockp); 24062 } 24063 24064 24065 /* 24066 * Hash list lookup routine for tcp_t structures. 24067 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF. 24068 */ 24069 static tcp_t * 24070 tcp_acceptor_hash_lookup(t_uscalar_t id, tcp_stack_t *tcps) 24071 { 24072 tf_t *tf; 24073 tcp_t *tcp; 24074 24075 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)]; 24076 mutex_enter(&tf->tf_lock); 24077 for (tcp = tf->tf_tcp; tcp != NULL; 24078 tcp = tcp->tcp_acceptor_hash) { 24079 if (tcp->tcp_acceptor_id == id) { 24080 CONN_INC_REF(tcp->tcp_connp); 24081 mutex_exit(&tf->tf_lock); 24082 return (tcp); 24083 } 24084 } 24085 mutex_exit(&tf->tf_lock); 24086 return (NULL); 24087 } 24088 24089 24090 /* 24091 * Hash list insertion routine for tcp_t structures. 24092 */ 24093 void 24094 tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp) 24095 { 24096 tf_t *tf; 24097 tcp_t **tcpp; 24098 tcp_t *tcpnext; 24099 tcp_stack_t *tcps = tcp->tcp_tcps; 24100 24101 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)]; 24102 24103 if (tcp->tcp_ptpahn != NULL) 24104 tcp_acceptor_hash_remove(tcp); 24105 tcpp = &tf->tf_tcp; 24106 mutex_enter(&tf->tf_lock); 24107 tcpnext = tcpp[0]; 24108 if (tcpnext) 24109 tcpnext->tcp_ptpahn = &tcp->tcp_acceptor_hash; 24110 tcp->tcp_acceptor_hash = tcpnext; 24111 tcp->tcp_ptpahn = tcpp; 24112 tcpp[0] = tcp; 24113 tcp->tcp_acceptor_lockp = &tf->tf_lock; /* For tcp_*_hash_remove */ 24114 mutex_exit(&tf->tf_lock); 24115 } 24116 24117 /* 24118 * Hash list removal routine for tcp_t structures. 24119 */ 24120 static void 24121 tcp_acceptor_hash_remove(tcp_t *tcp) 24122 { 24123 tcp_t *tcpnext; 24124 kmutex_t *lockp; 24125 24126 /* 24127 * Extract the lock pointer in case there are concurrent 24128 * hash_remove's for this instance. 24129 */ 24130 lockp = tcp->tcp_acceptor_lockp; 24131 24132 if (tcp->tcp_ptpahn == NULL) 24133 return; 24134 24135 ASSERT(lockp != NULL); 24136 mutex_enter(lockp); 24137 if (tcp->tcp_ptpahn) { 24138 tcpnext = tcp->tcp_acceptor_hash; 24139 if (tcpnext) { 24140 tcpnext->tcp_ptpahn = tcp->tcp_ptpahn; 24141 tcp->tcp_acceptor_hash = NULL; 24142 } 24143 *tcp->tcp_ptpahn = tcpnext; 24144 tcp->tcp_ptpahn = NULL; 24145 } 24146 mutex_exit(lockp); 24147 tcp->tcp_acceptor_lockp = NULL; 24148 } 24149 24150 /* ARGSUSED */ 24151 static int 24152 tcp_host_param_setvalue(queue_t *q, mblk_t *mp, char *value, caddr_t cp, int af) 24153 { 24154 int error = 0; 24155 int retval; 24156 char *end; 24157 tcp_hsp_t *hsp; 24158 tcp_hsp_t *hspprev; 24159 ipaddr_t addr = 0; /* Address we're looking for */ 24160 in6_addr_t v6addr; /* Address we're looking for */ 24161 uint32_t hash; /* Hash of that address */ 24162 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 24163 24164 /* 24165 * If the following variables are still zero after parsing the input 24166 * string, the user didn't specify them and we don't change them in 24167 * the HSP. 24168 */ 24169 24170 ipaddr_t mask = 0; /* Subnet mask */ 24171 in6_addr_t v6mask; 24172 long sendspace = 0; /* Send buffer size */ 24173 long recvspace = 0; /* Receive buffer size */ 24174 long timestamp = 0; /* Originate TCP TSTAMP option, 1 = yes */ 24175 boolean_t delete = B_FALSE; /* User asked to delete this HSP */ 24176 24177 rw_enter(&tcps->tcps_hsp_lock, RW_WRITER); 24178 24179 /* Parse and validate address */ 24180 if (af == AF_INET) { 24181 retval = inet_pton(af, value, &addr); 24182 if (retval == 1) 24183 IN6_IPADDR_TO_V4MAPPED(addr, &v6addr); 24184 } else if (af == AF_INET6) { 24185 retval = inet_pton(af, value, &v6addr); 24186 } else { 24187 error = EINVAL; 24188 goto done; 24189 } 24190 if (retval == 0) { 24191 error = EINVAL; 24192 goto done; 24193 } 24194 24195 while ((*value) && *value != ' ') 24196 value++; 24197 24198 /* Parse individual keywords, set variables if found */ 24199 while (*value) { 24200 /* Skip leading blanks */ 24201 24202 while (*value == ' ' || *value == '\t') 24203 value++; 24204 24205 /* If at end of string, we're done */ 24206 24207 if (!*value) 24208 break; 24209 24210 /* We have a word, figure out what it is */ 24211 24212 if (strncmp("mask", value, 4) == 0) { 24213 value += 4; 24214 while (*value == ' ' || *value == '\t') 24215 value++; 24216 /* Parse subnet mask */ 24217 if (af == AF_INET) { 24218 retval = inet_pton(af, value, &mask); 24219 if (retval == 1) { 24220 V4MASK_TO_V6(mask, v6mask); 24221 } 24222 } else if (af == AF_INET6) { 24223 retval = inet_pton(af, value, &v6mask); 24224 } 24225 if (retval != 1) { 24226 error = EINVAL; 24227 goto done; 24228 } 24229 while ((*value) && *value != ' ') 24230 value++; 24231 } else if (strncmp("sendspace", value, 9) == 0) { 24232 value += 9; 24233 24234 if (ddi_strtol(value, &end, 0, &sendspace) != 0 || 24235 sendspace < TCP_XMIT_HIWATER || 24236 sendspace >= (1L<<30)) { 24237 error = EINVAL; 24238 goto done; 24239 } 24240 value = end; 24241 } else if (strncmp("recvspace", value, 9) == 0) { 24242 value += 9; 24243 24244 if (ddi_strtol(value, &end, 0, &recvspace) != 0 || 24245 recvspace < TCP_RECV_HIWATER || 24246 recvspace >= (1L<<30)) { 24247 error = EINVAL; 24248 goto done; 24249 } 24250 value = end; 24251 } else if (strncmp("timestamp", value, 9) == 0) { 24252 value += 9; 24253 24254 if (ddi_strtol(value, &end, 0, ×tamp) != 0 || 24255 timestamp < 0 || timestamp > 1) { 24256 error = EINVAL; 24257 goto done; 24258 } 24259 24260 /* 24261 * We increment timestamp so we know it's been set; 24262 * this is undone when we put it in the HSP 24263 */ 24264 timestamp++; 24265 value = end; 24266 } else if (strncmp("delete", value, 6) == 0) { 24267 value += 6; 24268 delete = B_TRUE; 24269 } else { 24270 error = EINVAL; 24271 goto done; 24272 } 24273 } 24274 24275 /* Hash address for lookup */ 24276 24277 hash = TCP_HSP_HASH(addr); 24278 24279 if (delete) { 24280 /* 24281 * Note that deletes don't return an error if the thing 24282 * we're trying to delete isn't there. 24283 */ 24284 if (tcps->tcps_hsp_hash == NULL) 24285 goto done; 24286 hsp = tcps->tcps_hsp_hash[hash]; 24287 24288 if (hsp) { 24289 if (IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, 24290 &v6addr)) { 24291 tcps->tcps_hsp_hash[hash] = hsp->tcp_hsp_next; 24292 mi_free((char *)hsp); 24293 } else { 24294 hspprev = hsp; 24295 while ((hsp = hsp->tcp_hsp_next) != NULL) { 24296 if (IN6_ARE_ADDR_EQUAL( 24297 &hsp->tcp_hsp_addr_v6, &v6addr)) { 24298 hspprev->tcp_hsp_next = 24299 hsp->tcp_hsp_next; 24300 mi_free((char *)hsp); 24301 break; 24302 } 24303 hspprev = hsp; 24304 } 24305 } 24306 } 24307 } else { 24308 /* 24309 * We're adding/modifying an HSP. If we haven't already done 24310 * so, allocate the hash table. 24311 */ 24312 24313 if (!tcps->tcps_hsp_hash) { 24314 tcps->tcps_hsp_hash = (tcp_hsp_t **) 24315 mi_zalloc(sizeof (tcp_hsp_t *) * TCP_HSP_HASH_SIZE); 24316 if (!tcps->tcps_hsp_hash) { 24317 error = EINVAL; 24318 goto done; 24319 } 24320 } 24321 24322 /* Get head of hash chain */ 24323 24324 hsp = tcps->tcps_hsp_hash[hash]; 24325 24326 /* Try to find pre-existing hsp on hash chain */ 24327 /* Doesn't handle CIDR prefixes. */ 24328 while (hsp) { 24329 if (IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, &v6addr)) 24330 break; 24331 hsp = hsp->tcp_hsp_next; 24332 } 24333 24334 /* 24335 * If we didn't, create one with default values and put it 24336 * at head of hash chain 24337 */ 24338 24339 if (!hsp) { 24340 hsp = (tcp_hsp_t *)mi_zalloc(sizeof (tcp_hsp_t)); 24341 if (!hsp) { 24342 error = EINVAL; 24343 goto done; 24344 } 24345 hsp->tcp_hsp_next = tcps->tcps_hsp_hash[hash]; 24346 tcps->tcps_hsp_hash[hash] = hsp; 24347 } 24348 24349 /* Set values that the user asked us to change */ 24350 24351 hsp->tcp_hsp_addr_v6 = v6addr; 24352 if (IN6_IS_ADDR_V4MAPPED(&v6addr)) 24353 hsp->tcp_hsp_vers = IPV4_VERSION; 24354 else 24355 hsp->tcp_hsp_vers = IPV6_VERSION; 24356 hsp->tcp_hsp_subnet_v6 = v6mask; 24357 if (sendspace > 0) 24358 hsp->tcp_hsp_sendspace = sendspace; 24359 if (recvspace > 0) 24360 hsp->tcp_hsp_recvspace = recvspace; 24361 if (timestamp > 0) 24362 hsp->tcp_hsp_tstamp = timestamp - 1; 24363 } 24364 24365 done: 24366 rw_exit(&tcps->tcps_hsp_lock); 24367 return (error); 24368 } 24369 24370 /* Set callback routine passed to nd_load by tcp_param_register. */ 24371 /* ARGSUSED */ 24372 static int 24373 tcp_host_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr) 24374 { 24375 return (tcp_host_param_setvalue(q, mp, value, cp, AF_INET)); 24376 } 24377 /* ARGSUSED */ 24378 static int 24379 tcp_host_param_set_ipv6(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 24380 cred_t *cr) 24381 { 24382 return (tcp_host_param_setvalue(q, mp, value, cp, AF_INET6)); 24383 } 24384 24385 /* TCP host parameters report triggered via the Named Dispatch mechanism. */ 24386 /* ARGSUSED */ 24387 static int 24388 tcp_host_param_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 24389 { 24390 tcp_hsp_t *hsp; 24391 int i; 24392 char addrbuf[INET6_ADDRSTRLEN], subnetbuf[INET6_ADDRSTRLEN]; 24393 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 24394 24395 rw_enter(&tcps->tcps_hsp_lock, RW_READER); 24396 (void) mi_mpprintf(mp, 24397 "Hash HSP " MI_COL_HDRPAD_STR 24398 "Address Subnet Mask Send Receive TStamp"); 24399 if (tcps->tcps_hsp_hash) { 24400 for (i = 0; i < TCP_HSP_HASH_SIZE; i++) { 24401 hsp = tcps->tcps_hsp_hash[i]; 24402 while (hsp) { 24403 if (hsp->tcp_hsp_vers == IPV4_VERSION) { 24404 (void) inet_ntop(AF_INET, 24405 &hsp->tcp_hsp_addr, 24406 addrbuf, sizeof (addrbuf)); 24407 (void) inet_ntop(AF_INET, 24408 &hsp->tcp_hsp_subnet, 24409 subnetbuf, sizeof (subnetbuf)); 24410 } else { 24411 (void) inet_ntop(AF_INET6, 24412 &hsp->tcp_hsp_addr_v6, 24413 addrbuf, sizeof (addrbuf)); 24414 (void) inet_ntop(AF_INET6, 24415 &hsp->tcp_hsp_subnet_v6, 24416 subnetbuf, sizeof (subnetbuf)); 24417 } 24418 (void) mi_mpprintf(mp, 24419 " %03d " MI_COL_PTRFMT_STR 24420 "%s %s %010d %010d %d", 24421 i, 24422 (void *)hsp, 24423 addrbuf, 24424 subnetbuf, 24425 hsp->tcp_hsp_sendspace, 24426 hsp->tcp_hsp_recvspace, 24427 hsp->tcp_hsp_tstamp); 24428 24429 hsp = hsp->tcp_hsp_next; 24430 } 24431 } 24432 } 24433 rw_exit(&tcps->tcps_hsp_lock); 24434 return (0); 24435 } 24436 24437 24438 /* Data for fast netmask macro used by tcp_hsp_lookup */ 24439 24440 static ipaddr_t netmasks[] = { 24441 IN_CLASSA_NET, IN_CLASSA_NET, IN_CLASSB_NET, 24442 IN_CLASSC_NET | IN_CLASSD_NET /* Class C,D,E */ 24443 }; 24444 24445 #define netmask(addr) (netmasks[(ipaddr_t)(addr) >> 30]) 24446 24447 /* 24448 * XXX This routine should go away and instead we should use the metrics 24449 * associated with the routes to determine the default sndspace and rcvspace. 24450 */ 24451 static tcp_hsp_t * 24452 tcp_hsp_lookup(ipaddr_t addr, tcp_stack_t *tcps) 24453 { 24454 tcp_hsp_t *hsp = NULL; 24455 24456 /* Quick check without acquiring the lock. */ 24457 if (tcps->tcps_hsp_hash == NULL) 24458 return (NULL); 24459 24460 rw_enter(&tcps->tcps_hsp_lock, RW_READER); 24461 24462 /* This routine finds the best-matching HSP for address addr. */ 24463 24464 if (tcps->tcps_hsp_hash) { 24465 int i; 24466 ipaddr_t srchaddr; 24467 tcp_hsp_t *hsp_net; 24468 24469 /* We do three passes: host, network, and subnet. */ 24470 24471 srchaddr = addr; 24472 24473 for (i = 1; i <= 3; i++) { 24474 /* Look for exact match on srchaddr */ 24475 24476 hsp = tcps->tcps_hsp_hash[TCP_HSP_HASH(srchaddr)]; 24477 while (hsp) { 24478 if (hsp->tcp_hsp_vers == IPV4_VERSION && 24479 hsp->tcp_hsp_addr == srchaddr) 24480 break; 24481 hsp = hsp->tcp_hsp_next; 24482 } 24483 ASSERT(hsp == NULL || 24484 hsp->tcp_hsp_vers == IPV4_VERSION); 24485 24486 /* 24487 * If this is the first pass: 24488 * If we found a match, great, return it. 24489 * If not, search for the network on the second pass. 24490 */ 24491 24492 if (i == 1) 24493 if (hsp) 24494 break; 24495 else 24496 { 24497 srchaddr = addr & netmask(addr); 24498 continue; 24499 } 24500 24501 /* 24502 * If this is the second pass: 24503 * If we found a match, but there's a subnet mask, 24504 * save the match but try again using the subnet 24505 * mask on the third pass. 24506 * Otherwise, return whatever we found. 24507 */ 24508 24509 if (i == 2) { 24510 if (hsp && hsp->tcp_hsp_subnet) { 24511 hsp_net = hsp; 24512 srchaddr = addr & hsp->tcp_hsp_subnet; 24513 continue; 24514 } else { 24515 break; 24516 } 24517 } 24518 24519 /* 24520 * This must be the third pass. If we didn't find 24521 * anything, return the saved network HSP instead. 24522 */ 24523 24524 if (!hsp) 24525 hsp = hsp_net; 24526 } 24527 } 24528 24529 rw_exit(&tcps->tcps_hsp_lock); 24530 return (hsp); 24531 } 24532 24533 /* 24534 * XXX Equally broken as the IPv4 routine. Doesn't handle longest 24535 * match lookup. 24536 */ 24537 static tcp_hsp_t * 24538 tcp_hsp_lookup_ipv6(in6_addr_t *v6addr, tcp_stack_t *tcps) 24539 { 24540 tcp_hsp_t *hsp = NULL; 24541 24542 /* Quick check without acquiring the lock. */ 24543 if (tcps->tcps_hsp_hash == NULL) 24544 return (NULL); 24545 24546 rw_enter(&tcps->tcps_hsp_lock, RW_READER); 24547 24548 /* This routine finds the best-matching HSP for address addr. */ 24549 24550 if (tcps->tcps_hsp_hash) { 24551 int i; 24552 in6_addr_t v6srchaddr; 24553 tcp_hsp_t *hsp_net; 24554 24555 /* We do three passes: host, network, and subnet. */ 24556 24557 v6srchaddr = *v6addr; 24558 24559 for (i = 1; i <= 3; i++) { 24560 /* Look for exact match on srchaddr */ 24561 24562 hsp = tcps->tcps_hsp_hash[TCP_HSP_HASH( 24563 V4_PART_OF_V6(v6srchaddr))]; 24564 while (hsp) { 24565 if (hsp->tcp_hsp_vers == IPV6_VERSION && 24566 IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, 24567 &v6srchaddr)) 24568 break; 24569 hsp = hsp->tcp_hsp_next; 24570 } 24571 24572 /* 24573 * If this is the first pass: 24574 * If we found a match, great, return it. 24575 * If not, search for the network on the second pass. 24576 */ 24577 24578 if (i == 1) 24579 if (hsp) 24580 break; 24581 else { 24582 /* Assume a 64 bit mask */ 24583 v6srchaddr.s6_addr32[0] = 24584 v6addr->s6_addr32[0]; 24585 v6srchaddr.s6_addr32[1] = 24586 v6addr->s6_addr32[1]; 24587 v6srchaddr.s6_addr32[2] = 0; 24588 v6srchaddr.s6_addr32[3] = 0; 24589 continue; 24590 } 24591 24592 /* 24593 * If this is the second pass: 24594 * If we found a match, but there's a subnet mask, 24595 * save the match but try again using the subnet 24596 * mask on the third pass. 24597 * Otherwise, return whatever we found. 24598 */ 24599 24600 if (i == 2) { 24601 ASSERT(hsp == NULL || 24602 hsp->tcp_hsp_vers == IPV6_VERSION); 24603 if (hsp && 24604 !IN6_IS_ADDR_UNSPECIFIED( 24605 &hsp->tcp_hsp_subnet_v6)) { 24606 hsp_net = hsp; 24607 V6_MASK_COPY(*v6addr, 24608 hsp->tcp_hsp_subnet_v6, v6srchaddr); 24609 continue; 24610 } else { 24611 break; 24612 } 24613 } 24614 24615 /* 24616 * This must be the third pass. If we didn't find 24617 * anything, return the saved network HSP instead. 24618 */ 24619 24620 if (!hsp) 24621 hsp = hsp_net; 24622 } 24623 } 24624 24625 rw_exit(&tcps->tcps_hsp_lock); 24626 return (hsp); 24627 } 24628 24629 /* 24630 * Type three generator adapted from the random() function in 4.4 BSD: 24631 */ 24632 24633 /* 24634 * Copyright (c) 1983, 1993 24635 * The Regents of the University of California. All rights reserved. 24636 * 24637 * Redistribution and use in source and binary forms, with or without 24638 * modification, are permitted provided that the following conditions 24639 * are met: 24640 * 1. Redistributions of source code must retain the above copyright 24641 * notice, this list of conditions and the following disclaimer. 24642 * 2. Redistributions in binary form must reproduce the above copyright 24643 * notice, this list of conditions and the following disclaimer in the 24644 * documentation and/or other materials provided with the distribution. 24645 * 3. All advertising materials mentioning features or use of this software 24646 * must display the following acknowledgement: 24647 * This product includes software developed by the University of 24648 * California, Berkeley and its contributors. 24649 * 4. Neither the name of the University nor the names of its contributors 24650 * may be used to endorse or promote products derived from this software 24651 * without specific prior written permission. 24652 * 24653 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24654 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24655 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24656 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24657 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24658 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24659 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24660 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24661 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24662 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24663 * SUCH DAMAGE. 24664 */ 24665 24666 /* Type 3 -- x**31 + x**3 + 1 */ 24667 #define DEG_3 31 24668 #define SEP_3 3 24669 24670 24671 /* Protected by tcp_random_lock */ 24672 static int tcp_randtbl[DEG_3 + 1]; 24673 24674 static int *tcp_random_fptr = &tcp_randtbl[SEP_3 + 1]; 24675 static int *tcp_random_rptr = &tcp_randtbl[1]; 24676 24677 static int *tcp_random_state = &tcp_randtbl[1]; 24678 static int *tcp_random_end_ptr = &tcp_randtbl[DEG_3 + 1]; 24679 24680 kmutex_t tcp_random_lock; 24681 24682 void 24683 tcp_random_init(void) 24684 { 24685 int i; 24686 hrtime_t hrt; 24687 time_t wallclock; 24688 uint64_t result; 24689 24690 /* 24691 * Use high-res timer and current time for seed. Gethrtime() returns 24692 * a longlong, which may contain resolution down to nanoseconds. 24693 * The current time will either be a 32-bit or a 64-bit quantity. 24694 * XOR the two together in a 64-bit result variable. 24695 * Convert the result to a 32-bit value by multiplying the high-order 24696 * 32-bits by the low-order 32-bits. 24697 */ 24698 24699 hrt = gethrtime(); 24700 (void) drv_getparm(TIME, &wallclock); 24701 result = (uint64_t)wallclock ^ (uint64_t)hrt; 24702 mutex_enter(&tcp_random_lock); 24703 tcp_random_state[0] = ((result >> 32) & 0xffffffff) * 24704 (result & 0xffffffff); 24705 24706 for (i = 1; i < DEG_3; i++) 24707 tcp_random_state[i] = 1103515245 * tcp_random_state[i - 1] 24708 + 12345; 24709 tcp_random_fptr = &tcp_random_state[SEP_3]; 24710 tcp_random_rptr = &tcp_random_state[0]; 24711 mutex_exit(&tcp_random_lock); 24712 for (i = 0; i < 10 * DEG_3; i++) 24713 (void) tcp_random(); 24714 } 24715 24716 /* 24717 * tcp_random: Return a random number in the range [1 - (128K + 1)]. 24718 * This range is selected to be approximately centered on TCP_ISS / 2, 24719 * and easy to compute. We get this value by generating a 32-bit random 24720 * number, selecting out the high-order 17 bits, and then adding one so 24721 * that we never return zero. 24722 */ 24723 int 24724 tcp_random(void) 24725 { 24726 int i; 24727 24728 mutex_enter(&tcp_random_lock); 24729 *tcp_random_fptr += *tcp_random_rptr; 24730 24731 /* 24732 * The high-order bits are more random than the low-order bits, 24733 * so we select out the high-order 17 bits and add one so that 24734 * we never return zero. 24735 */ 24736 i = ((*tcp_random_fptr >> 15) & 0x1ffff) + 1; 24737 if (++tcp_random_fptr >= tcp_random_end_ptr) { 24738 tcp_random_fptr = tcp_random_state; 24739 ++tcp_random_rptr; 24740 } else if (++tcp_random_rptr >= tcp_random_end_ptr) 24741 tcp_random_rptr = tcp_random_state; 24742 24743 mutex_exit(&tcp_random_lock); 24744 return (i); 24745 } 24746 24747 /* 24748 * XXX This will go away when TPI is extended to send 24749 * info reqs to sockfs/timod ..... 24750 * Given a queue, set the max packet size for the write 24751 * side of the queue below stream head. This value is 24752 * cached on the stream head. 24753 * Returns 1 on success, 0 otherwise. 24754 */ 24755 static int 24756 setmaxps(queue_t *q, int maxpsz) 24757 { 24758 struct stdata *stp; 24759 queue_t *wq; 24760 stp = STREAM(q); 24761 24762 /* 24763 * At this point change of a queue parameter is not allowed 24764 * when a multiplexor is sitting on top. 24765 */ 24766 if (stp->sd_flag & STPLEX) 24767 return (0); 24768 24769 claimstr(stp->sd_wrq); 24770 wq = stp->sd_wrq->q_next; 24771 ASSERT(wq != NULL); 24772 (void) strqset(wq, QMAXPSZ, 0, maxpsz); 24773 releasestr(stp->sd_wrq); 24774 return (1); 24775 } 24776 24777 static int 24778 tcp_conprim_opt_process(tcp_t *tcp, mblk_t *mp, int *do_disconnectp, 24779 int *t_errorp, int *sys_errorp) 24780 { 24781 int error; 24782 int is_absreq_failure; 24783 t_scalar_t *opt_lenp; 24784 t_scalar_t opt_offset; 24785 int prim_type; 24786 struct T_conn_req *tcreqp; 24787 struct T_conn_res *tcresp; 24788 cred_t *cr; 24789 24790 cr = DB_CREDDEF(mp, tcp->tcp_cred); 24791 24792 prim_type = ((union T_primitives *)mp->b_rptr)->type; 24793 ASSERT(prim_type == T_CONN_REQ || prim_type == O_T_CONN_RES || 24794 prim_type == T_CONN_RES); 24795 24796 switch (prim_type) { 24797 case T_CONN_REQ: 24798 tcreqp = (struct T_conn_req *)mp->b_rptr; 24799 opt_offset = tcreqp->OPT_offset; 24800 opt_lenp = (t_scalar_t *)&tcreqp->OPT_length; 24801 break; 24802 case O_T_CONN_RES: 24803 case T_CONN_RES: 24804 tcresp = (struct T_conn_res *)mp->b_rptr; 24805 opt_offset = tcresp->OPT_offset; 24806 opt_lenp = (t_scalar_t *)&tcresp->OPT_length; 24807 break; 24808 } 24809 24810 *t_errorp = 0; 24811 *sys_errorp = 0; 24812 *do_disconnectp = 0; 24813 24814 error = tpi_optcom_buf(tcp->tcp_wq, mp, opt_lenp, 24815 opt_offset, cr, &tcp_opt_obj, 24816 NULL, &is_absreq_failure); 24817 24818 switch (error) { 24819 case 0: /* no error */ 24820 ASSERT(is_absreq_failure == 0); 24821 return (0); 24822 case ENOPROTOOPT: 24823 *t_errorp = TBADOPT; 24824 break; 24825 case EACCES: 24826 *t_errorp = TACCES; 24827 break; 24828 default: 24829 *t_errorp = TSYSERR; *sys_errorp = error; 24830 break; 24831 } 24832 if (is_absreq_failure != 0) { 24833 /* 24834 * The connection request should get the local ack 24835 * T_OK_ACK and then a T_DISCON_IND. 24836 */ 24837 *do_disconnectp = 1; 24838 } 24839 return (-1); 24840 } 24841 24842 /* 24843 * Split this function out so that if the secret changes, I'm okay. 24844 * 24845 * Initialize the tcp_iss_cookie and tcp_iss_key. 24846 */ 24847 24848 #define PASSWD_SIZE 16 /* MUST be multiple of 4 */ 24849 24850 static void 24851 tcp_iss_key_init(uint8_t *phrase, int len, tcp_stack_t *tcps) 24852 { 24853 struct { 24854 int32_t current_time; 24855 uint32_t randnum; 24856 uint16_t pad; 24857 uint8_t ether[6]; 24858 uint8_t passwd[PASSWD_SIZE]; 24859 } tcp_iss_cookie; 24860 time_t t; 24861 24862 /* 24863 * Start with the current absolute time. 24864 */ 24865 (void) drv_getparm(TIME, &t); 24866 tcp_iss_cookie.current_time = t; 24867 24868 /* 24869 * XXX - Need a more random number per RFC 1750, not this crap. 24870 * OTOH, if what follows is pretty random, then I'm in better shape. 24871 */ 24872 tcp_iss_cookie.randnum = (uint32_t)(gethrtime() + tcp_random()); 24873 tcp_iss_cookie.pad = 0x365c; /* Picked from HMAC pad values. */ 24874 24875 /* 24876 * The cpu_type_info is pretty non-random. Ugggh. It does serve 24877 * as a good template. 24878 */ 24879 bcopy(&cpu_list->cpu_type_info, &tcp_iss_cookie.passwd, 24880 min(PASSWD_SIZE, sizeof (cpu_list->cpu_type_info))); 24881 24882 /* 24883 * The pass-phrase. Normally this is supplied by user-called NDD. 24884 */ 24885 bcopy(phrase, &tcp_iss_cookie.passwd, min(PASSWD_SIZE, len)); 24886 24887 /* 24888 * See 4010593 if this section becomes a problem again, 24889 * but the local ethernet address is useful here. 24890 */ 24891 (void) localetheraddr(NULL, 24892 (struct ether_addr *)&tcp_iss_cookie.ether); 24893 24894 /* 24895 * Hash 'em all together. The MD5Final is called per-connection. 24896 */ 24897 mutex_enter(&tcps->tcps_iss_key_lock); 24898 MD5Init(&tcps->tcps_iss_key); 24899 MD5Update(&tcps->tcps_iss_key, (uchar_t *)&tcp_iss_cookie, 24900 sizeof (tcp_iss_cookie)); 24901 mutex_exit(&tcps->tcps_iss_key_lock); 24902 } 24903 24904 /* 24905 * Set the RFC 1948 pass phrase 24906 */ 24907 /* ARGSUSED */ 24908 static int 24909 tcp_1948_phrase_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 24910 cred_t *cr) 24911 { 24912 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 24913 24914 /* 24915 * Basically, value contains a new pass phrase. Pass it along! 24916 */ 24917 tcp_iss_key_init((uint8_t *)value, strlen(value), tcps); 24918 return (0); 24919 } 24920 24921 /* ARGSUSED */ 24922 static int 24923 tcp_sack_info_constructor(void *buf, void *cdrarg, int kmflags) 24924 { 24925 bzero(buf, sizeof (tcp_sack_info_t)); 24926 return (0); 24927 } 24928 24929 /* ARGSUSED */ 24930 static int 24931 tcp_iphc_constructor(void *buf, void *cdrarg, int kmflags) 24932 { 24933 bzero(buf, TCP_MAX_COMBINED_HEADER_LENGTH); 24934 return (0); 24935 } 24936 24937 /* 24938 * Make sure we wait until the default queue is setup, yet allow 24939 * tcp_g_q_create() to open a TCP stream. 24940 * We need to allow tcp_g_q_create() do do an open 24941 * of tcp, hence we compare curhread. 24942 * All others have to wait until the tcps_g_q has been 24943 * setup. 24944 */ 24945 void 24946 tcp_g_q_setup(tcp_stack_t *tcps) 24947 { 24948 mutex_enter(&tcps->tcps_g_q_lock); 24949 if (tcps->tcps_g_q != NULL) { 24950 mutex_exit(&tcps->tcps_g_q_lock); 24951 return; 24952 } 24953 if (tcps->tcps_g_q_creator == NULL) { 24954 /* This thread will set it up */ 24955 tcps->tcps_g_q_creator = curthread; 24956 mutex_exit(&tcps->tcps_g_q_lock); 24957 tcp_g_q_create(tcps); 24958 mutex_enter(&tcps->tcps_g_q_lock); 24959 ASSERT(tcps->tcps_g_q_creator == curthread); 24960 tcps->tcps_g_q_creator = NULL; 24961 cv_signal(&tcps->tcps_g_q_cv); 24962 ASSERT(tcps->tcps_g_q != NULL); 24963 mutex_exit(&tcps->tcps_g_q_lock); 24964 return; 24965 } 24966 /* Everybody but the creator has to wait */ 24967 if (tcps->tcps_g_q_creator != curthread) { 24968 while (tcps->tcps_g_q == NULL) 24969 cv_wait(&tcps->tcps_g_q_cv, &tcps->tcps_g_q_lock); 24970 } 24971 mutex_exit(&tcps->tcps_g_q_lock); 24972 } 24973 24974 #define IP "ip" 24975 24976 #define TCP6DEV "/devices/pseudo/tcp6@0:tcp6" 24977 24978 /* 24979 * Create a default tcp queue here instead of in strplumb 24980 */ 24981 void 24982 tcp_g_q_create(tcp_stack_t *tcps) 24983 { 24984 int error; 24985 ldi_handle_t lh = NULL; 24986 ldi_ident_t li = NULL; 24987 int rval; 24988 cred_t *cr; 24989 major_t IP_MAJ; 24990 24991 #ifdef NS_DEBUG 24992 (void) printf("tcp_g_q_create()\n"); 24993 #endif 24994 24995 IP_MAJ = ddi_name_to_major(IP); 24996 24997 ASSERT(tcps->tcps_g_q_creator == curthread); 24998 24999 error = ldi_ident_from_major(IP_MAJ, &li); 25000 if (error) { 25001 #ifdef DEBUG 25002 printf("tcp_g_q_create: lyr ident get failed error %d\n", 25003 error); 25004 #endif 25005 return; 25006 } 25007 25008 cr = zone_get_kcred(netstackid_to_zoneid( 25009 tcps->tcps_netstack->netstack_stackid)); 25010 ASSERT(cr != NULL); 25011 /* 25012 * We set the tcp default queue to IPv6 because IPv4 falls 25013 * back to IPv6 when it can't find a client, but 25014 * IPv6 does not fall back to IPv4. 25015 */ 25016 error = ldi_open_by_name(TCP6DEV, FREAD|FWRITE, cr, &lh, li); 25017 if (error) { 25018 #ifdef DEBUG 25019 printf("tcp_g_q_create: open of TCP6DEV failed error %d\n", 25020 error); 25021 #endif 25022 goto out; 25023 } 25024 25025 /* 25026 * This ioctl causes the tcp framework to cache a pointer to 25027 * this stream, so we don't want to close the stream after 25028 * this operation. 25029 * Use the kernel credentials that are for the zone we're in. 25030 */ 25031 error = ldi_ioctl(lh, TCP_IOC_DEFAULT_Q, 25032 (intptr_t)0, FKIOCTL, cr, &rval); 25033 if (error) { 25034 #ifdef DEBUG 25035 printf("tcp_g_q_create: ioctl TCP_IOC_DEFAULT_Q failed " 25036 "error %d\n", error); 25037 #endif 25038 goto out; 25039 } 25040 tcps->tcps_g_q_lh = lh; /* For tcp_g_q_close */ 25041 lh = NULL; 25042 out: 25043 /* Close layered handles */ 25044 if (li) 25045 ldi_ident_release(li); 25046 /* Keep cred around until _inactive needs it */ 25047 tcps->tcps_g_q_cr = cr; 25048 } 25049 25050 /* 25051 * We keep tcp_g_q set until all other tcp_t's in the zone 25052 * has gone away, and then when tcp_g_q_inactive() is called 25053 * we clear it. 25054 */ 25055 void 25056 tcp_g_q_destroy(tcp_stack_t *tcps) 25057 { 25058 #ifdef NS_DEBUG 25059 (void) printf("tcp_g_q_destroy()for stack %d\n", 25060 tcps->tcps_netstack->netstack_stackid); 25061 #endif 25062 25063 if (tcps->tcps_g_q == NULL) { 25064 return; /* Nothing to cleanup */ 25065 } 25066 /* 25067 * Drop reference corresponding to the default queue. 25068 * This reference was added from tcp_open when the default queue 25069 * was created, hence we compensate for this extra drop in 25070 * tcp_g_q_close. If the refcnt drops to zero here it means 25071 * the default queue was the last one to be open, in which 25072 * case, then tcp_g_q_inactive will be 25073 * called as a result of the refrele. 25074 */ 25075 TCPS_REFRELE(tcps); 25076 } 25077 25078 /* 25079 * Called when last tcp_t drops reference count using TCPS_REFRELE. 25080 * Run by tcp_q_q_inactive using a taskq. 25081 */ 25082 static void 25083 tcp_g_q_close(void *arg) 25084 { 25085 tcp_stack_t *tcps = arg; 25086 int error; 25087 ldi_handle_t lh = NULL; 25088 ldi_ident_t li = NULL; 25089 cred_t *cr; 25090 major_t IP_MAJ; 25091 25092 IP_MAJ = ddi_name_to_major(IP); 25093 25094 #ifdef NS_DEBUG 25095 (void) printf("tcp_g_q_inactive() for stack %d refcnt %d\n", 25096 tcps->tcps_netstack->netstack_stackid, 25097 tcps->tcps_netstack->netstack_refcnt); 25098 #endif 25099 lh = tcps->tcps_g_q_lh; 25100 if (lh == NULL) 25101 return; /* Nothing to cleanup */ 25102 25103 ASSERT(tcps->tcps_refcnt == 1); 25104 ASSERT(tcps->tcps_g_q != NULL); 25105 25106 error = ldi_ident_from_major(IP_MAJ, &li); 25107 if (error) { 25108 #ifdef DEBUG 25109 printf("tcp_g_q_inactive: lyr ident get failed error %d\n", 25110 error); 25111 #endif 25112 return; 25113 } 25114 25115 cr = tcps->tcps_g_q_cr; 25116 tcps->tcps_g_q_cr = NULL; 25117 ASSERT(cr != NULL); 25118 25119 /* 25120 * Make sure we can break the recursion when tcp_close decrements 25121 * the reference count causing g_q_inactive to be called again. 25122 */ 25123 tcps->tcps_g_q_lh = NULL; 25124 25125 /* close the default queue */ 25126 (void) ldi_close(lh, FREAD|FWRITE, cr); 25127 /* 25128 * At this point in time tcps and the rest of netstack_t might 25129 * have been deleted. 25130 */ 25131 tcps = NULL; 25132 25133 /* Close layered handles */ 25134 ldi_ident_release(li); 25135 crfree(cr); 25136 } 25137 25138 /* 25139 * Called when last tcp_t drops reference count using TCPS_REFRELE. 25140 * 25141 * Have to ensure that the ldi routines are not used by an 25142 * interrupt thread by using a taskq. 25143 */ 25144 void 25145 tcp_g_q_inactive(tcp_stack_t *tcps) 25146 { 25147 if (tcps->tcps_g_q_lh == NULL) 25148 return; /* Nothing to cleanup */ 25149 25150 ASSERT(tcps->tcps_refcnt == 0); 25151 TCPS_REFHOLD(tcps); /* Compensate for what g_q_destroy did */ 25152 25153 if (servicing_interrupt()) { 25154 (void) taskq_dispatch(tcp_taskq, tcp_g_q_close, 25155 (void *) tcps, TQ_SLEEP); 25156 } else { 25157 tcp_g_q_close(tcps); 25158 } 25159 } 25160 25161 /* 25162 * Called by IP when IP is loaded into the kernel 25163 */ 25164 void 25165 tcp_ddi_g_init(void) 25166 { 25167 tcp_timercache = kmem_cache_create("tcp_timercache", 25168 sizeof (tcp_timer_t) + sizeof (mblk_t), 0, 25169 NULL, NULL, NULL, NULL, NULL, 0); 25170 25171 tcp_sack_info_cache = kmem_cache_create("tcp_sack_info_cache", 25172 sizeof (tcp_sack_info_t), 0, 25173 tcp_sack_info_constructor, NULL, NULL, NULL, NULL, 0); 25174 25175 tcp_iphc_cache = kmem_cache_create("tcp_iphc_cache", 25176 TCP_MAX_COMBINED_HEADER_LENGTH, 0, 25177 tcp_iphc_constructor, NULL, NULL, NULL, NULL, 0); 25178 25179 mutex_init(&tcp_random_lock, NULL, MUTEX_DEFAULT, NULL); 25180 25181 /* Initialize the random number generator */ 25182 tcp_random_init(); 25183 25184 tcp_squeue_wput_proc = tcp_squeue_switch(tcp_squeue_wput); 25185 tcp_squeue_close_proc = tcp_squeue_switch(tcp_squeue_close); 25186 25187 /* A single callback independently of how many netstacks we have */ 25188 ip_squeue_init(tcp_squeue_add); 25189 25190 tcp_g_kstat = tcp_g_kstat_init(&tcp_g_statistics); 25191 25192 tcp_taskq = taskq_create("tcp_taskq", 1, minclsyspri, 1, 1, 25193 TASKQ_PREPOPULATE); 25194 25195 /* 25196 * We want to be informed each time a stack is created or 25197 * destroyed in the kernel, so we can maintain the 25198 * set of tcp_stack_t's. 25199 */ 25200 netstack_register(NS_TCP, tcp_stack_init, tcp_stack_shutdown, 25201 tcp_stack_fini); 25202 } 25203 25204 25205 /* 25206 * Initialize the TCP stack instance. 25207 */ 25208 static void * 25209 tcp_stack_init(netstackid_t stackid, netstack_t *ns) 25210 { 25211 tcp_stack_t *tcps; 25212 tcpparam_t *pa; 25213 int i; 25214 25215 tcps = (tcp_stack_t *)kmem_zalloc(sizeof (*tcps), KM_SLEEP); 25216 tcps->tcps_netstack = ns; 25217 25218 /* Initialize locks */ 25219 rw_init(&tcps->tcps_hsp_lock, NULL, RW_DEFAULT, NULL); 25220 mutex_init(&tcps->tcps_g_q_lock, NULL, MUTEX_DEFAULT, NULL); 25221 cv_init(&tcps->tcps_g_q_cv, NULL, CV_DEFAULT, NULL); 25222 mutex_init(&tcps->tcps_iss_key_lock, NULL, MUTEX_DEFAULT, NULL); 25223 mutex_init(&tcps->tcps_epriv_port_lock, NULL, MUTEX_DEFAULT, NULL); 25224 rw_init(&tcps->tcps_reserved_port_lock, NULL, RW_DEFAULT, NULL); 25225 25226 tcps->tcps_g_num_epriv_ports = TCP_NUM_EPRIV_PORTS; 25227 tcps->tcps_g_epriv_ports[0] = 2049; 25228 tcps->tcps_g_epriv_ports[1] = 4045; 25229 tcps->tcps_min_anonpriv_port = 512; 25230 25231 tcps->tcps_bind_fanout = kmem_zalloc(sizeof (tf_t) * 25232 TCP_BIND_FANOUT_SIZE, KM_SLEEP); 25233 tcps->tcps_acceptor_fanout = kmem_zalloc(sizeof (tf_t) * 25234 TCP_FANOUT_SIZE, KM_SLEEP); 25235 tcps->tcps_reserved_port = kmem_zalloc(sizeof (tcp_rport_t) * 25236 TCP_RESERVED_PORTS_ARRAY_MAX_SIZE, KM_SLEEP); 25237 25238 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) { 25239 mutex_init(&tcps->tcps_bind_fanout[i].tf_lock, NULL, 25240 MUTEX_DEFAULT, NULL); 25241 } 25242 25243 for (i = 0; i < TCP_FANOUT_SIZE; i++) { 25244 mutex_init(&tcps->tcps_acceptor_fanout[i].tf_lock, NULL, 25245 MUTEX_DEFAULT, NULL); 25246 } 25247 25248 /* TCP's IPsec code calls the packet dropper. */ 25249 ip_drop_register(&tcps->tcps_dropper, "TCP IPsec policy enforcement"); 25250 25251 pa = (tcpparam_t *)kmem_alloc(sizeof (lcl_tcp_param_arr), KM_SLEEP); 25252 tcps->tcps_params = pa; 25253 bcopy(lcl_tcp_param_arr, tcps->tcps_params, sizeof (lcl_tcp_param_arr)); 25254 25255 (void) tcp_param_register(&tcps->tcps_g_nd, tcps->tcps_params, 25256 A_CNT(lcl_tcp_param_arr), tcps); 25257 25258 /* 25259 * Note: To really walk the device tree you need the devinfo 25260 * pointer to your device which is only available after probe/attach. 25261 * The following is safe only because it uses ddi_root_node() 25262 */ 25263 tcp_max_optsize = optcom_max_optsize(tcp_opt_obj.odb_opt_des_arr, 25264 tcp_opt_obj.odb_opt_arr_cnt); 25265 25266 /* 25267 * Initialize RFC 1948 secret values. This will probably be reset once 25268 * by the boot scripts. 25269 * 25270 * Use NULL name, as the name is caught by the new lockstats. 25271 * 25272 * Initialize with some random, non-guessable string, like the global 25273 * T_INFO_ACK. 25274 */ 25275 25276 tcp_iss_key_init((uint8_t *)&tcp_g_t_info_ack, 25277 sizeof (tcp_g_t_info_ack), tcps); 25278 25279 tcps->tcps_kstat = tcp_kstat2_init(stackid, &tcps->tcps_statistics); 25280 tcps->tcps_mibkp = tcp_kstat_init(stackid, tcps); 25281 25282 return (tcps); 25283 } 25284 25285 /* 25286 * Called when the IP module is about to be unloaded. 25287 */ 25288 void 25289 tcp_ddi_g_destroy(void) 25290 { 25291 tcp_g_kstat_fini(tcp_g_kstat); 25292 tcp_g_kstat = NULL; 25293 bzero(&tcp_g_statistics, sizeof (tcp_g_statistics)); 25294 25295 mutex_destroy(&tcp_random_lock); 25296 25297 kmem_cache_destroy(tcp_timercache); 25298 kmem_cache_destroy(tcp_sack_info_cache); 25299 kmem_cache_destroy(tcp_iphc_cache); 25300 25301 netstack_unregister(NS_TCP); 25302 taskq_destroy(tcp_taskq); 25303 } 25304 25305 /* 25306 * Shut down the TCP stack instance. 25307 */ 25308 /* ARGSUSED */ 25309 static void 25310 tcp_stack_shutdown(netstackid_t stackid, void *arg) 25311 { 25312 tcp_stack_t *tcps = (tcp_stack_t *)arg; 25313 25314 tcp_g_q_destroy(tcps); 25315 } 25316 25317 /* 25318 * Free the TCP stack instance. 25319 */ 25320 static void 25321 tcp_stack_fini(netstackid_t stackid, void *arg) 25322 { 25323 tcp_stack_t *tcps = (tcp_stack_t *)arg; 25324 int i; 25325 25326 nd_free(&tcps->tcps_g_nd); 25327 kmem_free(tcps->tcps_params, sizeof (lcl_tcp_param_arr)); 25328 tcps->tcps_params = NULL; 25329 kmem_free(tcps->tcps_wroff_xtra_param, sizeof (tcpparam_t)); 25330 tcps->tcps_wroff_xtra_param = NULL; 25331 kmem_free(tcps->tcps_mdt_head_param, sizeof (tcpparam_t)); 25332 tcps->tcps_mdt_head_param = NULL; 25333 kmem_free(tcps->tcps_mdt_tail_param, sizeof (tcpparam_t)); 25334 tcps->tcps_mdt_tail_param = NULL; 25335 kmem_free(tcps->tcps_mdt_max_pbufs_param, sizeof (tcpparam_t)); 25336 tcps->tcps_mdt_max_pbufs_param = NULL; 25337 25338 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) { 25339 ASSERT(tcps->tcps_bind_fanout[i].tf_tcp == NULL); 25340 mutex_destroy(&tcps->tcps_bind_fanout[i].tf_lock); 25341 } 25342 25343 for (i = 0; i < TCP_FANOUT_SIZE; i++) { 25344 ASSERT(tcps->tcps_acceptor_fanout[i].tf_tcp == NULL); 25345 mutex_destroy(&tcps->tcps_acceptor_fanout[i].tf_lock); 25346 } 25347 25348 kmem_free(tcps->tcps_bind_fanout, sizeof (tf_t) * TCP_BIND_FANOUT_SIZE); 25349 tcps->tcps_bind_fanout = NULL; 25350 25351 kmem_free(tcps->tcps_acceptor_fanout, sizeof (tf_t) * TCP_FANOUT_SIZE); 25352 tcps->tcps_acceptor_fanout = NULL; 25353 25354 kmem_free(tcps->tcps_reserved_port, sizeof (tcp_rport_t) * 25355 TCP_RESERVED_PORTS_ARRAY_MAX_SIZE); 25356 tcps->tcps_reserved_port = NULL; 25357 25358 mutex_destroy(&tcps->tcps_iss_key_lock); 25359 rw_destroy(&tcps->tcps_hsp_lock); 25360 mutex_destroy(&tcps->tcps_g_q_lock); 25361 cv_destroy(&tcps->tcps_g_q_cv); 25362 mutex_destroy(&tcps->tcps_epriv_port_lock); 25363 rw_destroy(&tcps->tcps_reserved_port_lock); 25364 25365 ip_drop_unregister(&tcps->tcps_dropper); 25366 25367 tcp_kstat2_fini(stackid, tcps->tcps_kstat); 25368 tcps->tcps_kstat = NULL; 25369 bzero(&tcps->tcps_statistics, sizeof (tcps->tcps_statistics)); 25370 25371 tcp_kstat_fini(stackid, tcps->tcps_mibkp); 25372 tcps->tcps_mibkp = NULL; 25373 25374 kmem_free(tcps, sizeof (*tcps)); 25375 } 25376 25377 /* 25378 * Generate ISS, taking into account NDD changes may happen halfway through. 25379 * (If the iss is not zero, set it.) 25380 */ 25381 25382 static void 25383 tcp_iss_init(tcp_t *tcp) 25384 { 25385 MD5_CTX context; 25386 struct { uint32_t ports; in6_addr_t src; in6_addr_t dst; } arg; 25387 uint32_t answer[4]; 25388 tcp_stack_t *tcps = tcp->tcp_tcps; 25389 25390 tcps->tcps_iss_incr_extra += (ISS_INCR >> 1); 25391 tcp->tcp_iss = tcps->tcps_iss_incr_extra; 25392 switch (tcps->tcps_strong_iss) { 25393 case 2: 25394 mutex_enter(&tcps->tcps_iss_key_lock); 25395 context = tcps->tcps_iss_key; 25396 mutex_exit(&tcps->tcps_iss_key_lock); 25397 arg.ports = tcp->tcp_ports; 25398 if (tcp->tcp_ipversion == IPV4_VERSION) { 25399 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 25400 &arg.src); 25401 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_dst, 25402 &arg.dst); 25403 } else { 25404 arg.src = tcp->tcp_ip6h->ip6_src; 25405 arg.dst = tcp->tcp_ip6h->ip6_dst; 25406 } 25407 MD5Update(&context, (uchar_t *)&arg, sizeof (arg)); 25408 MD5Final((uchar_t *)answer, &context); 25409 tcp->tcp_iss += answer[0] ^ answer[1] ^ answer[2] ^ answer[3]; 25410 /* 25411 * Now that we've hashed into a unique per-connection sequence 25412 * space, add a random increment per strong_iss == 1. So I 25413 * guess we'll have to... 25414 */ 25415 /* FALLTHRU */ 25416 case 1: 25417 tcp->tcp_iss += (gethrtime() >> ISS_NSEC_SHT) + tcp_random(); 25418 break; 25419 default: 25420 tcp->tcp_iss += (uint32_t)gethrestime_sec() * ISS_INCR; 25421 break; 25422 } 25423 tcp->tcp_valid_bits = TCP_ISS_VALID; 25424 tcp->tcp_fss = tcp->tcp_iss - 1; 25425 tcp->tcp_suna = tcp->tcp_iss; 25426 tcp->tcp_snxt = tcp->tcp_iss + 1; 25427 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 25428 tcp->tcp_csuna = tcp->tcp_snxt; 25429 } 25430 25431 /* 25432 * Exported routine for extracting active tcp connection status. 25433 * 25434 * This is used by the Solaris Cluster Networking software to 25435 * gather a list of connections that need to be forwarded to 25436 * specific nodes in the cluster when configuration changes occur. 25437 * 25438 * The callback is invoked for each tcp_t structure. Returning 25439 * non-zero from the callback routine terminates the search. 25440 */ 25441 int 25442 cl_tcp_walk_list(int (*cl_callback)(cl_tcp_info_t *, void *), 25443 void *arg) 25444 { 25445 netstack_handle_t nh; 25446 netstack_t *ns; 25447 int ret = 0; 25448 25449 netstack_next_init(&nh); 25450 while ((ns = netstack_next(&nh)) != NULL) { 25451 ret = cl_tcp_walk_list_stack(cl_callback, arg, 25452 ns->netstack_tcp); 25453 netstack_rele(ns); 25454 } 25455 netstack_next_fini(&nh); 25456 return (ret); 25457 } 25458 25459 static int 25460 cl_tcp_walk_list_stack(int (*callback)(cl_tcp_info_t *, void *), void *arg, 25461 tcp_stack_t *tcps) 25462 { 25463 tcp_t *tcp; 25464 cl_tcp_info_t cl_tcpi; 25465 connf_t *connfp; 25466 conn_t *connp; 25467 int i; 25468 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 25469 25470 ASSERT(callback != NULL); 25471 25472 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 25473 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 25474 connp = NULL; 25475 25476 while ((connp = 25477 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 25478 25479 tcp = connp->conn_tcp; 25480 cl_tcpi.cl_tcpi_version = CL_TCPI_V1; 25481 cl_tcpi.cl_tcpi_ipversion = tcp->tcp_ipversion; 25482 cl_tcpi.cl_tcpi_state = tcp->tcp_state; 25483 cl_tcpi.cl_tcpi_lport = tcp->tcp_lport; 25484 cl_tcpi.cl_tcpi_fport = tcp->tcp_fport; 25485 /* 25486 * The macros tcp_laddr and tcp_faddr give the IPv4 25487 * addresses. They are copied implicitly below as 25488 * mapped addresses. 25489 */ 25490 cl_tcpi.cl_tcpi_laddr_v6 = tcp->tcp_ip_src_v6; 25491 if (tcp->tcp_ipversion == IPV4_VERSION) { 25492 cl_tcpi.cl_tcpi_faddr = 25493 tcp->tcp_ipha->ipha_dst; 25494 } else { 25495 cl_tcpi.cl_tcpi_faddr_v6 = 25496 tcp->tcp_ip6h->ip6_dst; 25497 } 25498 25499 /* 25500 * If the callback returns non-zero 25501 * we terminate the traversal. 25502 */ 25503 if ((*callback)(&cl_tcpi, arg) != 0) { 25504 CONN_DEC_REF(tcp->tcp_connp); 25505 return (1); 25506 } 25507 } 25508 } 25509 25510 return (0); 25511 } 25512 25513 /* 25514 * Macros used for accessing the different types of sockaddr 25515 * structures inside a tcp_ioc_abort_conn_t. 25516 */ 25517 #define TCP_AC_V4LADDR(acp) ((sin_t *)&(acp)->ac_local) 25518 #define TCP_AC_V4RADDR(acp) ((sin_t *)&(acp)->ac_remote) 25519 #define TCP_AC_V4LOCAL(acp) (TCP_AC_V4LADDR(acp)->sin_addr.s_addr) 25520 #define TCP_AC_V4REMOTE(acp) (TCP_AC_V4RADDR(acp)->sin_addr.s_addr) 25521 #define TCP_AC_V4LPORT(acp) (TCP_AC_V4LADDR(acp)->sin_port) 25522 #define TCP_AC_V4RPORT(acp) (TCP_AC_V4RADDR(acp)->sin_port) 25523 #define TCP_AC_V6LADDR(acp) ((sin6_t *)&(acp)->ac_local) 25524 #define TCP_AC_V6RADDR(acp) ((sin6_t *)&(acp)->ac_remote) 25525 #define TCP_AC_V6LOCAL(acp) (TCP_AC_V6LADDR(acp)->sin6_addr) 25526 #define TCP_AC_V6REMOTE(acp) (TCP_AC_V6RADDR(acp)->sin6_addr) 25527 #define TCP_AC_V6LPORT(acp) (TCP_AC_V6LADDR(acp)->sin6_port) 25528 #define TCP_AC_V6RPORT(acp) (TCP_AC_V6RADDR(acp)->sin6_port) 25529 25530 /* 25531 * Return the correct error code to mimic the behavior 25532 * of a connection reset. 25533 */ 25534 #define TCP_AC_GET_ERRCODE(state, err) { \ 25535 switch ((state)) { \ 25536 case TCPS_SYN_SENT: \ 25537 case TCPS_SYN_RCVD: \ 25538 (err) = ECONNREFUSED; \ 25539 break; \ 25540 case TCPS_ESTABLISHED: \ 25541 case TCPS_FIN_WAIT_1: \ 25542 case TCPS_FIN_WAIT_2: \ 25543 case TCPS_CLOSE_WAIT: \ 25544 (err) = ECONNRESET; \ 25545 break; \ 25546 case TCPS_CLOSING: \ 25547 case TCPS_LAST_ACK: \ 25548 case TCPS_TIME_WAIT: \ 25549 (err) = 0; \ 25550 break; \ 25551 default: \ 25552 (err) = ENXIO; \ 25553 } \ 25554 } 25555 25556 /* 25557 * Check if a tcp structure matches the info in acp. 25558 */ 25559 #define TCP_AC_ADDR_MATCH(acp, tcp) \ 25560 (((acp)->ac_local.ss_family == AF_INET) ? \ 25561 ((TCP_AC_V4LOCAL((acp)) == INADDR_ANY || \ 25562 TCP_AC_V4LOCAL((acp)) == (tcp)->tcp_ip_src) && \ 25563 (TCP_AC_V4REMOTE((acp)) == INADDR_ANY || \ 25564 TCP_AC_V4REMOTE((acp)) == (tcp)->tcp_remote) && \ 25565 (TCP_AC_V4LPORT((acp)) == 0 || \ 25566 TCP_AC_V4LPORT((acp)) == (tcp)->tcp_lport) && \ 25567 (TCP_AC_V4RPORT((acp)) == 0 || \ 25568 TCP_AC_V4RPORT((acp)) == (tcp)->tcp_fport) && \ 25569 (acp)->ac_start <= (tcp)->tcp_state && \ 25570 (acp)->ac_end >= (tcp)->tcp_state) : \ 25571 ((IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL((acp))) || \ 25572 IN6_ARE_ADDR_EQUAL(&TCP_AC_V6LOCAL((acp)), \ 25573 &(tcp)->tcp_ip_src_v6)) && \ 25574 (IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE((acp))) || \ 25575 IN6_ARE_ADDR_EQUAL(&TCP_AC_V6REMOTE((acp)), \ 25576 &(tcp)->tcp_remote_v6)) && \ 25577 (TCP_AC_V6LPORT((acp)) == 0 || \ 25578 TCP_AC_V6LPORT((acp)) == (tcp)->tcp_lport) && \ 25579 (TCP_AC_V6RPORT((acp)) == 0 || \ 25580 TCP_AC_V6RPORT((acp)) == (tcp)->tcp_fport) && \ 25581 (acp)->ac_start <= (tcp)->tcp_state && \ 25582 (acp)->ac_end >= (tcp)->tcp_state)) 25583 25584 #define TCP_AC_MATCH(acp, tcp) \ 25585 (((acp)->ac_zoneid == ALL_ZONES || \ 25586 (acp)->ac_zoneid == tcp->tcp_connp->conn_zoneid) ? \ 25587 TCP_AC_ADDR_MATCH(acp, tcp) : 0) 25588 25589 /* 25590 * Build a message containing a tcp_ioc_abort_conn_t structure 25591 * which is filled in with information from acp and tp. 25592 */ 25593 static mblk_t * 25594 tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *acp, tcp_t *tp) 25595 { 25596 mblk_t *mp; 25597 tcp_ioc_abort_conn_t *tacp; 25598 25599 mp = allocb(sizeof (uint32_t) + sizeof (*acp), BPRI_LO); 25600 if (mp == NULL) 25601 return (NULL); 25602 25603 mp->b_datap->db_type = M_CTL; 25604 25605 *((uint32_t *)mp->b_rptr) = TCP_IOC_ABORT_CONN; 25606 tacp = (tcp_ioc_abort_conn_t *)((uchar_t *)mp->b_rptr + 25607 sizeof (uint32_t)); 25608 25609 tacp->ac_start = acp->ac_start; 25610 tacp->ac_end = acp->ac_end; 25611 tacp->ac_zoneid = acp->ac_zoneid; 25612 25613 if (acp->ac_local.ss_family == AF_INET) { 25614 tacp->ac_local.ss_family = AF_INET; 25615 tacp->ac_remote.ss_family = AF_INET; 25616 TCP_AC_V4LOCAL(tacp) = tp->tcp_ip_src; 25617 TCP_AC_V4REMOTE(tacp) = tp->tcp_remote; 25618 TCP_AC_V4LPORT(tacp) = tp->tcp_lport; 25619 TCP_AC_V4RPORT(tacp) = tp->tcp_fport; 25620 } else { 25621 tacp->ac_local.ss_family = AF_INET6; 25622 tacp->ac_remote.ss_family = AF_INET6; 25623 TCP_AC_V6LOCAL(tacp) = tp->tcp_ip_src_v6; 25624 TCP_AC_V6REMOTE(tacp) = tp->tcp_remote_v6; 25625 TCP_AC_V6LPORT(tacp) = tp->tcp_lport; 25626 TCP_AC_V6RPORT(tacp) = tp->tcp_fport; 25627 } 25628 mp->b_wptr = (uchar_t *)mp->b_rptr + sizeof (uint32_t) + sizeof (*acp); 25629 return (mp); 25630 } 25631 25632 /* 25633 * Print a tcp_ioc_abort_conn_t structure. 25634 */ 25635 static void 25636 tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *acp) 25637 { 25638 char lbuf[128]; 25639 char rbuf[128]; 25640 sa_family_t af; 25641 in_port_t lport, rport; 25642 ushort_t logflags; 25643 25644 af = acp->ac_local.ss_family; 25645 25646 if (af == AF_INET) { 25647 (void) inet_ntop(af, (const void *)&TCP_AC_V4LOCAL(acp), 25648 lbuf, 128); 25649 (void) inet_ntop(af, (const void *)&TCP_AC_V4REMOTE(acp), 25650 rbuf, 128); 25651 lport = ntohs(TCP_AC_V4LPORT(acp)); 25652 rport = ntohs(TCP_AC_V4RPORT(acp)); 25653 } else { 25654 (void) inet_ntop(af, (const void *)&TCP_AC_V6LOCAL(acp), 25655 lbuf, 128); 25656 (void) inet_ntop(af, (const void *)&TCP_AC_V6REMOTE(acp), 25657 rbuf, 128); 25658 lport = ntohs(TCP_AC_V6LPORT(acp)); 25659 rport = ntohs(TCP_AC_V6RPORT(acp)); 25660 } 25661 25662 logflags = SL_TRACE | SL_NOTE; 25663 /* 25664 * Don't print this message to the console if the operation was done 25665 * to a non-global zone. 25666 */ 25667 if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES) 25668 logflags |= SL_CONSOLE; 25669 (void) strlog(TCP_MOD_ID, 0, 1, logflags, 25670 "TCP_IOC_ABORT_CONN: local = %s:%d, remote = %s:%d, " 25671 "start = %d, end = %d\n", lbuf, lport, rbuf, rport, 25672 acp->ac_start, acp->ac_end); 25673 } 25674 25675 /* 25676 * Called inside tcp_rput when a message built using 25677 * tcp_ioctl_abort_build_msg is put into a queue. 25678 * Note that when we get here there is no wildcard in acp any more. 25679 */ 25680 static void 25681 tcp_ioctl_abort_handler(tcp_t *tcp, mblk_t *mp) 25682 { 25683 tcp_ioc_abort_conn_t *acp; 25684 25685 acp = (tcp_ioc_abort_conn_t *)(mp->b_rptr + sizeof (uint32_t)); 25686 if (tcp->tcp_state <= acp->ac_end) { 25687 /* 25688 * If we get here, we are already on the correct 25689 * squeue. This ioctl follows the following path 25690 * tcp_wput -> tcp_wput_ioctl -> tcp_ioctl_abort_conn 25691 * ->tcp_ioctl_abort->squeue_fill (if on a 25692 * different squeue) 25693 */ 25694 int errcode; 25695 25696 TCP_AC_GET_ERRCODE(tcp->tcp_state, errcode); 25697 (void) tcp_clean_death(tcp, errcode, 26); 25698 } 25699 freemsg(mp); 25700 } 25701 25702 /* 25703 * Abort all matching connections on a hash chain. 25704 */ 25705 static int 25706 tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *acp, int index, int *count, 25707 boolean_t exact, tcp_stack_t *tcps) 25708 { 25709 int nmatch, err = 0; 25710 tcp_t *tcp; 25711 MBLKP mp, last, listhead = NULL; 25712 conn_t *tconnp; 25713 connf_t *connfp; 25714 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 25715 25716 connfp = &ipst->ips_ipcl_conn_fanout[index]; 25717 25718 startover: 25719 nmatch = 0; 25720 25721 mutex_enter(&connfp->connf_lock); 25722 for (tconnp = connfp->connf_head; tconnp != NULL; 25723 tconnp = tconnp->conn_next) { 25724 tcp = tconnp->conn_tcp; 25725 if (TCP_AC_MATCH(acp, tcp)) { 25726 CONN_INC_REF(tcp->tcp_connp); 25727 mp = tcp_ioctl_abort_build_msg(acp, tcp); 25728 if (mp == NULL) { 25729 err = ENOMEM; 25730 CONN_DEC_REF(tcp->tcp_connp); 25731 break; 25732 } 25733 mp->b_prev = (mblk_t *)tcp; 25734 25735 if (listhead == NULL) { 25736 listhead = mp; 25737 last = mp; 25738 } else { 25739 last->b_next = mp; 25740 last = mp; 25741 } 25742 nmatch++; 25743 if (exact) 25744 break; 25745 } 25746 25747 /* Avoid holding lock for too long. */ 25748 if (nmatch >= 500) 25749 break; 25750 } 25751 mutex_exit(&connfp->connf_lock); 25752 25753 /* Pass mp into the correct tcp */ 25754 while ((mp = listhead) != NULL) { 25755 listhead = listhead->b_next; 25756 tcp = (tcp_t *)mp->b_prev; 25757 mp->b_next = mp->b_prev = NULL; 25758 squeue_fill(tcp->tcp_connp->conn_sqp, mp, 25759 tcp_input, tcp->tcp_connp, SQTAG_TCP_ABORT_BUCKET); 25760 } 25761 25762 *count += nmatch; 25763 if (nmatch >= 500 && err == 0) 25764 goto startover; 25765 return (err); 25766 } 25767 25768 /* 25769 * Abort all connections that matches the attributes specified in acp. 25770 */ 25771 static int 25772 tcp_ioctl_abort(tcp_ioc_abort_conn_t *acp, tcp_stack_t *tcps) 25773 { 25774 sa_family_t af; 25775 uint32_t ports; 25776 uint16_t *pports; 25777 int err = 0, count = 0; 25778 boolean_t exact = B_FALSE; /* set when there is no wildcard */ 25779 int index = -1; 25780 ushort_t logflags; 25781 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 25782 25783 af = acp->ac_local.ss_family; 25784 25785 if (af == AF_INET) { 25786 if (TCP_AC_V4REMOTE(acp) != INADDR_ANY && 25787 TCP_AC_V4LPORT(acp) != 0 && TCP_AC_V4RPORT(acp) != 0) { 25788 pports = (uint16_t *)&ports; 25789 pports[1] = TCP_AC_V4LPORT(acp); 25790 pports[0] = TCP_AC_V4RPORT(acp); 25791 exact = (TCP_AC_V4LOCAL(acp) != INADDR_ANY); 25792 } 25793 } else { 25794 if (!IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE(acp)) && 25795 TCP_AC_V6LPORT(acp) != 0 && TCP_AC_V6RPORT(acp) != 0) { 25796 pports = (uint16_t *)&ports; 25797 pports[1] = TCP_AC_V6LPORT(acp); 25798 pports[0] = TCP_AC_V6RPORT(acp); 25799 exact = !IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL(acp)); 25800 } 25801 } 25802 25803 /* 25804 * For cases where remote addr, local port, and remote port are non- 25805 * wildcards, tcp_ioctl_abort_bucket will only be called once. 25806 */ 25807 if (index != -1) { 25808 err = tcp_ioctl_abort_bucket(acp, index, 25809 &count, exact, tcps); 25810 } else { 25811 /* 25812 * loop through all entries for wildcard case 25813 */ 25814 for (index = 0; 25815 index < ipst->ips_ipcl_conn_fanout_size; 25816 index++) { 25817 err = tcp_ioctl_abort_bucket(acp, index, 25818 &count, exact, tcps); 25819 if (err != 0) 25820 break; 25821 } 25822 } 25823 25824 logflags = SL_TRACE | SL_NOTE; 25825 /* 25826 * Don't print this message to the console if the operation was done 25827 * to a non-global zone. 25828 */ 25829 if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES) 25830 logflags |= SL_CONSOLE; 25831 (void) strlog(TCP_MOD_ID, 0, 1, logflags, "TCP_IOC_ABORT_CONN: " 25832 "aborted %d connection%c\n", count, ((count > 1) ? 's' : ' ')); 25833 if (err == 0 && count == 0) 25834 err = ENOENT; 25835 return (err); 25836 } 25837 25838 /* 25839 * Process the TCP_IOC_ABORT_CONN ioctl request. 25840 */ 25841 static void 25842 tcp_ioctl_abort_conn(queue_t *q, mblk_t *mp) 25843 { 25844 int err; 25845 IOCP iocp; 25846 MBLKP mp1; 25847 sa_family_t laf, raf; 25848 tcp_ioc_abort_conn_t *acp; 25849 zone_t *zptr; 25850 conn_t *connp = Q_TO_CONN(q); 25851 zoneid_t zoneid = connp->conn_zoneid; 25852 tcp_t *tcp = connp->conn_tcp; 25853 tcp_stack_t *tcps = tcp->tcp_tcps; 25854 25855 iocp = (IOCP)mp->b_rptr; 25856 25857 if ((mp1 = mp->b_cont) == NULL || 25858 iocp->ioc_count != sizeof (tcp_ioc_abort_conn_t)) { 25859 err = EINVAL; 25860 goto out; 25861 } 25862 25863 /* check permissions */ 25864 if (secpolicy_ip_config(iocp->ioc_cr, B_FALSE) != 0) { 25865 err = EPERM; 25866 goto out; 25867 } 25868 25869 if (mp1->b_cont != NULL) { 25870 freemsg(mp1->b_cont); 25871 mp1->b_cont = NULL; 25872 } 25873 25874 acp = (tcp_ioc_abort_conn_t *)mp1->b_rptr; 25875 laf = acp->ac_local.ss_family; 25876 raf = acp->ac_remote.ss_family; 25877 25878 /* check that a zone with the supplied zoneid exists */ 25879 if (acp->ac_zoneid != GLOBAL_ZONEID && acp->ac_zoneid != ALL_ZONES) { 25880 zptr = zone_find_by_id(zoneid); 25881 if (zptr != NULL) { 25882 zone_rele(zptr); 25883 } else { 25884 err = EINVAL; 25885 goto out; 25886 } 25887 } 25888 25889 /* 25890 * For exclusive stacks we set the zoneid to zero 25891 * to make TCP operate as if in the global zone. 25892 */ 25893 if (tcps->tcps_netstack->netstack_stackid != GLOBAL_NETSTACKID) 25894 acp->ac_zoneid = GLOBAL_ZONEID; 25895 25896 if (acp->ac_start < TCPS_SYN_SENT || acp->ac_end > TCPS_TIME_WAIT || 25897 acp->ac_start > acp->ac_end || laf != raf || 25898 (laf != AF_INET && laf != AF_INET6)) { 25899 err = EINVAL; 25900 goto out; 25901 } 25902 25903 tcp_ioctl_abort_dump(acp); 25904 err = tcp_ioctl_abort(acp, tcps); 25905 25906 out: 25907 if (mp1 != NULL) { 25908 freemsg(mp1); 25909 mp->b_cont = NULL; 25910 } 25911 25912 if (err != 0) 25913 miocnak(q, mp, 0, err); 25914 else 25915 miocack(q, mp, 0, 0); 25916 } 25917 25918 /* 25919 * tcp_time_wait_processing() handles processing of incoming packets when 25920 * the tcp is in the TIME_WAIT state. 25921 * A TIME_WAIT tcp that has an associated open TCP stream is never put 25922 * on the time wait list. 25923 */ 25924 void 25925 tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, uint32_t seg_seq, 25926 uint32_t seg_ack, int seg_len, tcph_t *tcph) 25927 { 25928 int32_t bytes_acked; 25929 int32_t gap; 25930 int32_t rgap; 25931 tcp_opt_t tcpopt; 25932 uint_t flags; 25933 uint32_t new_swnd = 0; 25934 conn_t *connp; 25935 tcp_stack_t *tcps = tcp->tcp_tcps; 25936 25937 BUMP_LOCAL(tcp->tcp_ibsegs); 25938 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_RECV_PKT); 25939 25940 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 25941 new_swnd = BE16_TO_U16(tcph->th_win) << 25942 ((tcph->th_flags[0] & TH_SYN) ? 0 : tcp->tcp_snd_ws); 25943 if (tcp->tcp_snd_ts_ok) { 25944 if (!tcp_paws_check(tcp, tcph, &tcpopt)) { 25945 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 25946 tcp->tcp_rnxt, TH_ACK); 25947 goto done; 25948 } 25949 } 25950 gap = seg_seq - tcp->tcp_rnxt; 25951 rgap = tcp->tcp_rwnd - (gap + seg_len); 25952 if (gap < 0) { 25953 BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs); 25954 UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes, 25955 (seg_len > -gap ? -gap : seg_len)); 25956 seg_len += gap; 25957 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 25958 if (flags & TH_RST) { 25959 goto done; 25960 } 25961 if ((flags & TH_FIN) && seg_len == -1) { 25962 /* 25963 * When TCP receives a duplicate FIN in 25964 * TIME_WAIT state, restart the 2 MSL timer. 25965 * See page 73 in RFC 793. Make sure this TCP 25966 * is already on the TIME_WAIT list. If not, 25967 * just restart the timer. 25968 */ 25969 if (TCP_IS_DETACHED(tcp)) { 25970 if (tcp_time_wait_remove(tcp, NULL) == 25971 B_TRUE) { 25972 tcp_time_wait_append(tcp); 25973 TCP_DBGSTAT(tcps, 25974 tcp_rput_time_wait); 25975 } 25976 } else { 25977 ASSERT(tcp != NULL); 25978 TCP_TIMER_RESTART(tcp, 25979 tcps->tcps_time_wait_interval); 25980 } 25981 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 25982 tcp->tcp_rnxt, TH_ACK); 25983 goto done; 25984 } 25985 flags |= TH_ACK_NEEDED; 25986 seg_len = 0; 25987 goto process_ack; 25988 } 25989 25990 /* Fix seg_seq, and chew the gap off the front. */ 25991 seg_seq = tcp->tcp_rnxt; 25992 } 25993 25994 if ((flags & TH_SYN) && gap > 0 && rgap < 0) { 25995 /* 25996 * Make sure that when we accept the connection, pick 25997 * an ISS greater than (tcp_snxt + ISS_INCR/2) for the 25998 * old connection. 25999 * 26000 * The next ISS generated is equal to tcp_iss_incr_extra 26001 * + ISS_INCR/2 + other components depending on the 26002 * value of tcp_strong_iss. We pre-calculate the new 26003 * ISS here and compare with tcp_snxt to determine if 26004 * we need to make adjustment to tcp_iss_incr_extra. 26005 * 26006 * The above calculation is ugly and is a 26007 * waste of CPU cycles... 26008 */ 26009 uint32_t new_iss = tcps->tcps_iss_incr_extra; 26010 int32_t adj; 26011 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 26012 26013 switch (tcps->tcps_strong_iss) { 26014 case 2: { 26015 /* Add time and MD5 components. */ 26016 uint32_t answer[4]; 26017 struct { 26018 uint32_t ports; 26019 in6_addr_t src; 26020 in6_addr_t dst; 26021 } arg; 26022 MD5_CTX context; 26023 26024 mutex_enter(&tcps->tcps_iss_key_lock); 26025 context = tcps->tcps_iss_key; 26026 mutex_exit(&tcps->tcps_iss_key_lock); 26027 arg.ports = tcp->tcp_ports; 26028 /* We use MAPPED addresses in tcp_iss_init */ 26029 arg.src = tcp->tcp_ip_src_v6; 26030 if (tcp->tcp_ipversion == IPV4_VERSION) { 26031 IN6_IPADDR_TO_V4MAPPED( 26032 tcp->tcp_ipha->ipha_dst, 26033 &arg.dst); 26034 } else { 26035 arg.dst = 26036 tcp->tcp_ip6h->ip6_dst; 26037 } 26038 MD5Update(&context, (uchar_t *)&arg, 26039 sizeof (arg)); 26040 MD5Final((uchar_t *)answer, &context); 26041 answer[0] ^= answer[1] ^ answer[2] ^ answer[3]; 26042 new_iss += (gethrtime() >> ISS_NSEC_SHT) + answer[0]; 26043 break; 26044 } 26045 case 1: 26046 /* Add time component and min random (i.e. 1). */ 26047 new_iss += (gethrtime() >> ISS_NSEC_SHT) + 1; 26048 break; 26049 default: 26050 /* Add only time component. */ 26051 new_iss += (uint32_t)gethrestime_sec() * ISS_INCR; 26052 break; 26053 } 26054 if ((adj = (int32_t)(tcp->tcp_snxt - new_iss)) > 0) { 26055 /* 26056 * New ISS not guaranteed to be ISS_INCR/2 26057 * ahead of the current tcp_snxt, so add the 26058 * difference to tcp_iss_incr_extra. 26059 */ 26060 tcps->tcps_iss_incr_extra += adj; 26061 } 26062 /* 26063 * If tcp_clean_death() can not perform the task now, 26064 * drop the SYN packet and let the other side re-xmit. 26065 * Otherwise pass the SYN packet back in, since the 26066 * old tcp state has been cleaned up or freed. 26067 */ 26068 if (tcp_clean_death(tcp, 0, 27) == -1) 26069 goto done; 26070 /* 26071 * We will come back to tcp_rput_data 26072 * on the global queue. Packets destined 26073 * for the global queue will be checked 26074 * with global policy. But the policy for 26075 * this packet has already been checked as 26076 * this was destined for the detached 26077 * connection. We need to bypass policy 26078 * check this time by attaching a dummy 26079 * ipsec_in with ipsec_in_dont_check set. 26080 */ 26081 connp = ipcl_classify(mp, tcp->tcp_connp->conn_zoneid, ipst); 26082 if (connp != NULL) { 26083 TCP_STAT(tcps, tcp_time_wait_syn_success); 26084 tcp_reinput(connp, mp, tcp->tcp_connp->conn_sqp); 26085 return; 26086 } 26087 goto done; 26088 } 26089 26090 /* 26091 * rgap is the amount of stuff received out of window. A negative 26092 * value is the amount out of window. 26093 */ 26094 if (rgap < 0) { 26095 BUMP_MIB(&tcps->tcps_mib, tcpInDataPastWinSegs); 26096 UPDATE_MIB(&tcps->tcps_mib, tcpInDataPastWinBytes, -rgap); 26097 /* Fix seg_len and make sure there is something left. */ 26098 seg_len += rgap; 26099 if (seg_len <= 0) { 26100 if (flags & TH_RST) { 26101 goto done; 26102 } 26103 flags |= TH_ACK_NEEDED; 26104 seg_len = 0; 26105 goto process_ack; 26106 } 26107 } 26108 /* 26109 * Check whether we can update tcp_ts_recent. This test is 26110 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 26111 * Extensions for High Performance: An Update", Internet Draft. 26112 */ 26113 if (tcp->tcp_snd_ts_ok && 26114 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 26115 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 26116 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 26117 tcp->tcp_last_rcv_lbolt = lbolt64; 26118 } 26119 26120 if (seg_seq != tcp->tcp_rnxt && seg_len > 0) { 26121 /* Always ack out of order packets */ 26122 flags |= TH_ACK_NEEDED; 26123 seg_len = 0; 26124 } else if (seg_len > 0) { 26125 BUMP_MIB(&tcps->tcps_mib, tcpInClosed); 26126 BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs); 26127 UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, seg_len); 26128 } 26129 if (flags & TH_RST) { 26130 (void) tcp_clean_death(tcp, 0, 28); 26131 goto done; 26132 } 26133 if (flags & TH_SYN) { 26134 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 26135 TH_RST|TH_ACK); 26136 /* 26137 * Do not delete the TCP structure if it is in 26138 * TIME_WAIT state. Refer to RFC 1122, 4.2.2.13. 26139 */ 26140 goto done; 26141 } 26142 process_ack: 26143 if (flags & TH_ACK) { 26144 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 26145 if (bytes_acked <= 0) { 26146 if (bytes_acked == 0 && seg_len == 0 && 26147 new_swnd == tcp->tcp_swnd) 26148 BUMP_MIB(&tcps->tcps_mib, tcpInDupAck); 26149 } else { 26150 /* Acks something not sent */ 26151 flags |= TH_ACK_NEEDED; 26152 } 26153 } 26154 if (flags & TH_ACK_NEEDED) { 26155 /* 26156 * Time to send an ack for some reason. 26157 */ 26158 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 26159 tcp->tcp_rnxt, TH_ACK); 26160 } 26161 done: 26162 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 26163 DB_CKSUMSTART(mp) = 0; 26164 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 26165 TCP_STAT(tcps, tcp_time_wait_syn_fail); 26166 } 26167 freemsg(mp); 26168 } 26169 26170 /* 26171 * Allocate a T_SVR4_OPTMGMT_REQ. 26172 * The caller needs to increment tcp_drop_opt_ack_cnt when sending these so 26173 * that tcp_rput_other can drop the acks. 26174 */ 26175 static mblk_t * 26176 tcp_setsockopt_mp(int level, int cmd, char *opt, int optlen) 26177 { 26178 mblk_t *mp; 26179 struct T_optmgmt_req *tor; 26180 struct opthdr *oh; 26181 uint_t size; 26182 char *optptr; 26183 26184 size = sizeof (*tor) + sizeof (*oh) + optlen; 26185 mp = allocb(size, BPRI_MED); 26186 if (mp == NULL) 26187 return (NULL); 26188 26189 mp->b_wptr += size; 26190 mp->b_datap->db_type = M_PROTO; 26191 tor = (struct T_optmgmt_req *)mp->b_rptr; 26192 tor->PRIM_type = T_SVR4_OPTMGMT_REQ; 26193 tor->MGMT_flags = T_NEGOTIATE; 26194 tor->OPT_length = sizeof (*oh) + optlen; 26195 tor->OPT_offset = (t_scalar_t)sizeof (*tor); 26196 26197 oh = (struct opthdr *)&tor[1]; 26198 oh->level = level; 26199 oh->name = cmd; 26200 oh->len = optlen; 26201 if (optlen != 0) { 26202 optptr = (char *)&oh[1]; 26203 bcopy(opt, optptr, optlen); 26204 } 26205 return (mp); 26206 } 26207 26208 /* 26209 * TCP Timers Implementation. 26210 */ 26211 timeout_id_t 26212 tcp_timeout(conn_t *connp, void (*f)(void *), clock_t tim) 26213 { 26214 mblk_t *mp; 26215 tcp_timer_t *tcpt; 26216 tcp_t *tcp = connp->conn_tcp; 26217 tcp_stack_t *tcps = tcp->tcp_tcps; 26218 26219 ASSERT(connp->conn_sqp != NULL); 26220 26221 TCP_DBGSTAT(tcps, tcp_timeout_calls); 26222 26223 if (tcp->tcp_timercache == NULL) { 26224 mp = tcp_timermp_alloc(KM_NOSLEEP | KM_PANIC); 26225 } else { 26226 TCP_DBGSTAT(tcps, tcp_timeout_cached_alloc); 26227 mp = tcp->tcp_timercache; 26228 tcp->tcp_timercache = mp->b_next; 26229 mp->b_next = NULL; 26230 ASSERT(mp->b_wptr == NULL); 26231 } 26232 26233 CONN_INC_REF(connp); 26234 tcpt = (tcp_timer_t *)mp->b_rptr; 26235 tcpt->connp = connp; 26236 tcpt->tcpt_proc = f; 26237 tcpt->tcpt_tid = timeout(tcp_timer_callback, mp, tim); 26238 return ((timeout_id_t)mp); 26239 } 26240 26241 static void 26242 tcp_timer_callback(void *arg) 26243 { 26244 mblk_t *mp = (mblk_t *)arg; 26245 tcp_timer_t *tcpt; 26246 conn_t *connp; 26247 26248 tcpt = (tcp_timer_t *)mp->b_rptr; 26249 connp = tcpt->connp; 26250 squeue_fill(connp->conn_sqp, mp, 26251 tcp_timer_handler, connp, SQTAG_TCP_TIMER); 26252 } 26253 26254 static void 26255 tcp_timer_handler(void *arg, mblk_t *mp, void *arg2) 26256 { 26257 tcp_timer_t *tcpt; 26258 conn_t *connp = (conn_t *)arg; 26259 tcp_t *tcp = connp->conn_tcp; 26260 26261 tcpt = (tcp_timer_t *)mp->b_rptr; 26262 ASSERT(connp == tcpt->connp); 26263 ASSERT((squeue_t *)arg2 == connp->conn_sqp); 26264 26265 /* 26266 * If the TCP has reached the closed state, don't proceed any 26267 * further. This TCP logically does not exist on the system. 26268 * tcpt_proc could for example access queues, that have already 26269 * been qprocoff'ed off. Also see comments at the start of tcp_input 26270 */ 26271 if (tcp->tcp_state != TCPS_CLOSED) { 26272 (*tcpt->tcpt_proc)(connp); 26273 } else { 26274 tcp->tcp_timer_tid = 0; 26275 } 26276 tcp_timer_free(connp->conn_tcp, mp); 26277 } 26278 26279 /* 26280 * There is potential race with untimeout and the handler firing at the same 26281 * time. The mblock may be freed by the handler while we are trying to use 26282 * it. But since both should execute on the same squeue, this race should not 26283 * occur. 26284 */ 26285 clock_t 26286 tcp_timeout_cancel(conn_t *connp, timeout_id_t id) 26287 { 26288 mblk_t *mp = (mblk_t *)id; 26289 tcp_timer_t *tcpt; 26290 clock_t delta; 26291 tcp_stack_t *tcps = connp->conn_tcp->tcp_tcps; 26292 26293 TCP_DBGSTAT(tcps, tcp_timeout_cancel_reqs); 26294 26295 if (mp == NULL) 26296 return (-1); 26297 26298 tcpt = (tcp_timer_t *)mp->b_rptr; 26299 ASSERT(tcpt->connp == connp); 26300 26301 delta = untimeout(tcpt->tcpt_tid); 26302 26303 if (delta >= 0) { 26304 TCP_DBGSTAT(tcps, tcp_timeout_canceled); 26305 tcp_timer_free(connp->conn_tcp, mp); 26306 CONN_DEC_REF(connp); 26307 } 26308 26309 return (delta); 26310 } 26311 26312 /* 26313 * Allocate space for the timer event. The allocation looks like mblk, but it is 26314 * not a proper mblk. To avoid confusion we set b_wptr to NULL. 26315 * 26316 * Dealing with failures: If we can't allocate from the timer cache we try 26317 * allocating from dblock caches using allocb_tryhard(). In this case b_wptr 26318 * points to b_rptr. 26319 * If we can't allocate anything using allocb_tryhard(), we perform a last 26320 * attempt and use kmem_alloc_tryhard(). In this case we set b_wptr to -1 and 26321 * save the actual allocation size in b_datap. 26322 */ 26323 mblk_t * 26324 tcp_timermp_alloc(int kmflags) 26325 { 26326 mblk_t *mp = (mblk_t *)kmem_cache_alloc(tcp_timercache, 26327 kmflags & ~KM_PANIC); 26328 26329 if (mp != NULL) { 26330 mp->b_next = mp->b_prev = NULL; 26331 mp->b_rptr = (uchar_t *)(&mp[1]); 26332 mp->b_wptr = NULL; 26333 mp->b_datap = NULL; 26334 mp->b_queue = NULL; 26335 mp->b_cont = NULL; 26336 } else if (kmflags & KM_PANIC) { 26337 /* 26338 * Failed to allocate memory for the timer. Try allocating from 26339 * dblock caches. 26340 */ 26341 /* ipclassifier calls this from a constructor - hence no tcps */ 26342 TCP_G_STAT(tcp_timermp_allocfail); 26343 mp = allocb_tryhard(sizeof (tcp_timer_t)); 26344 if (mp == NULL) { 26345 size_t size = 0; 26346 /* 26347 * Memory is really low. Try tryhard allocation. 26348 * 26349 * ipclassifier calls this from a constructor - 26350 * hence no tcps 26351 */ 26352 TCP_G_STAT(tcp_timermp_allocdblfail); 26353 mp = kmem_alloc_tryhard(sizeof (mblk_t) + 26354 sizeof (tcp_timer_t), &size, kmflags); 26355 mp->b_rptr = (uchar_t *)(&mp[1]); 26356 mp->b_next = mp->b_prev = NULL; 26357 mp->b_wptr = (uchar_t *)-1; 26358 mp->b_datap = (dblk_t *)size; 26359 mp->b_queue = NULL; 26360 mp->b_cont = NULL; 26361 } 26362 ASSERT(mp->b_wptr != NULL); 26363 } 26364 /* ipclassifier calls this from a constructor - hence no tcps */ 26365 TCP_G_DBGSTAT(tcp_timermp_alloced); 26366 26367 return (mp); 26368 } 26369 26370 /* 26371 * Free per-tcp timer cache. 26372 * It can only contain entries from tcp_timercache. 26373 */ 26374 void 26375 tcp_timermp_free(tcp_t *tcp) 26376 { 26377 mblk_t *mp; 26378 26379 while ((mp = tcp->tcp_timercache) != NULL) { 26380 ASSERT(mp->b_wptr == NULL); 26381 tcp->tcp_timercache = tcp->tcp_timercache->b_next; 26382 kmem_cache_free(tcp_timercache, mp); 26383 } 26384 } 26385 26386 /* 26387 * Free timer event. Put it on the per-tcp timer cache if there is not too many 26388 * events there already (currently at most two events are cached). 26389 * If the event is not allocated from the timer cache, free it right away. 26390 */ 26391 static void 26392 tcp_timer_free(tcp_t *tcp, mblk_t *mp) 26393 { 26394 mblk_t *mp1 = tcp->tcp_timercache; 26395 tcp_stack_t *tcps = tcp->tcp_tcps; 26396 26397 if (mp->b_wptr != NULL) { 26398 /* 26399 * This allocation is not from a timer cache, free it right 26400 * away. 26401 */ 26402 if (mp->b_wptr != (uchar_t *)-1) 26403 freeb(mp); 26404 else 26405 kmem_free(mp, (size_t)mp->b_datap); 26406 } else if (mp1 == NULL || mp1->b_next == NULL) { 26407 /* Cache this timer block for future allocations */ 26408 mp->b_rptr = (uchar_t *)(&mp[1]); 26409 mp->b_next = mp1; 26410 tcp->tcp_timercache = mp; 26411 } else { 26412 kmem_cache_free(tcp_timercache, mp); 26413 TCP_DBGSTAT(tcps, tcp_timermp_freed); 26414 } 26415 } 26416 26417 /* 26418 * End of TCP Timers implementation. 26419 */ 26420 26421 /* 26422 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL 26423 * on the specified backing STREAMS q. Note, the caller may make the 26424 * decision to call based on the tcp_t.tcp_flow_stopped value which 26425 * when check outside the q's lock is only an advisory check ... 26426 */ 26427 26428 void 26429 tcp_setqfull(tcp_t *tcp) 26430 { 26431 queue_t *q = tcp->tcp_wq; 26432 tcp_stack_t *tcps = tcp->tcp_tcps; 26433 26434 if (!(q->q_flag & QFULL)) { 26435 mutex_enter(QLOCK(q)); 26436 if (!(q->q_flag & QFULL)) { 26437 /* still need to set QFULL */ 26438 q->q_flag |= QFULL; 26439 tcp->tcp_flow_stopped = B_TRUE; 26440 mutex_exit(QLOCK(q)); 26441 TCP_STAT(tcps, tcp_flwctl_on); 26442 } else { 26443 mutex_exit(QLOCK(q)); 26444 } 26445 } 26446 } 26447 26448 void 26449 tcp_clrqfull(tcp_t *tcp) 26450 { 26451 queue_t *q = tcp->tcp_wq; 26452 26453 if (q->q_flag & QFULL) { 26454 mutex_enter(QLOCK(q)); 26455 if (q->q_flag & QFULL) { 26456 q->q_flag &= ~QFULL; 26457 tcp->tcp_flow_stopped = B_FALSE; 26458 mutex_exit(QLOCK(q)); 26459 if (q->q_flag & QWANTW) 26460 qbackenable(q, 0); 26461 } else { 26462 mutex_exit(QLOCK(q)); 26463 } 26464 } 26465 } 26466 26467 26468 /* 26469 * kstats related to squeues i.e. not per IP instance 26470 */ 26471 static void * 26472 tcp_g_kstat_init(tcp_g_stat_t *tcp_g_statp) 26473 { 26474 kstat_t *ksp; 26475 26476 tcp_g_stat_t template = { 26477 { "tcp_timermp_alloced", KSTAT_DATA_UINT64 }, 26478 { "tcp_timermp_allocfail", KSTAT_DATA_UINT64 }, 26479 { "tcp_timermp_allocdblfail", KSTAT_DATA_UINT64 }, 26480 { "tcp_freelist_cleanup", KSTAT_DATA_UINT64 }, 26481 }; 26482 26483 ksp = kstat_create(TCP_MOD_NAME, 0, "tcpstat_g", "net", 26484 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 26485 KSTAT_FLAG_VIRTUAL); 26486 26487 if (ksp == NULL) 26488 return (NULL); 26489 26490 bcopy(&template, tcp_g_statp, sizeof (template)); 26491 ksp->ks_data = (void *)tcp_g_statp; 26492 26493 kstat_install(ksp); 26494 return (ksp); 26495 } 26496 26497 static void 26498 tcp_g_kstat_fini(kstat_t *ksp) 26499 { 26500 if (ksp != NULL) { 26501 kstat_delete(ksp); 26502 } 26503 } 26504 26505 26506 static void * 26507 tcp_kstat2_init(netstackid_t stackid, tcp_stat_t *tcps_statisticsp) 26508 { 26509 kstat_t *ksp; 26510 26511 tcp_stat_t template = { 26512 { "tcp_time_wait", KSTAT_DATA_UINT64 }, 26513 { "tcp_time_wait_syn", KSTAT_DATA_UINT64 }, 26514 { "tcp_time_wait_success", KSTAT_DATA_UINT64 }, 26515 { "tcp_time_wait_fail", KSTAT_DATA_UINT64 }, 26516 { "tcp_reinput_syn", KSTAT_DATA_UINT64 }, 26517 { "tcp_ip_output", KSTAT_DATA_UINT64 }, 26518 { "tcp_detach_non_time_wait", KSTAT_DATA_UINT64 }, 26519 { "tcp_detach_time_wait", KSTAT_DATA_UINT64 }, 26520 { "tcp_time_wait_reap", KSTAT_DATA_UINT64 }, 26521 { "tcp_clean_death_nondetached", KSTAT_DATA_UINT64 }, 26522 { "tcp_reinit_calls", KSTAT_DATA_UINT64 }, 26523 { "tcp_eager_err1", KSTAT_DATA_UINT64 }, 26524 { "tcp_eager_err2", KSTAT_DATA_UINT64 }, 26525 { "tcp_eager_blowoff_calls", KSTAT_DATA_UINT64 }, 26526 { "tcp_eager_blowoff_q", KSTAT_DATA_UINT64 }, 26527 { "tcp_eager_blowoff_q0", KSTAT_DATA_UINT64 }, 26528 { "tcp_not_hard_bound", KSTAT_DATA_UINT64 }, 26529 { "tcp_no_listener", KSTAT_DATA_UINT64 }, 26530 { "tcp_found_eager", KSTAT_DATA_UINT64 }, 26531 { "tcp_wrong_queue", KSTAT_DATA_UINT64 }, 26532 { "tcp_found_eager_binding1", KSTAT_DATA_UINT64 }, 26533 { "tcp_found_eager_bound1", KSTAT_DATA_UINT64 }, 26534 { "tcp_eager_has_listener1", KSTAT_DATA_UINT64 }, 26535 { "tcp_open_alloc", KSTAT_DATA_UINT64 }, 26536 { "tcp_open_detached_alloc", KSTAT_DATA_UINT64 }, 26537 { "tcp_rput_time_wait", KSTAT_DATA_UINT64 }, 26538 { "tcp_listendrop", KSTAT_DATA_UINT64 }, 26539 { "tcp_listendropq0", KSTAT_DATA_UINT64 }, 26540 { "tcp_wrong_rq", KSTAT_DATA_UINT64 }, 26541 { "tcp_rsrv_calls", KSTAT_DATA_UINT64 }, 26542 { "tcp_eagerfree2", KSTAT_DATA_UINT64 }, 26543 { "tcp_eagerfree3", KSTAT_DATA_UINT64 }, 26544 { "tcp_eagerfree4", KSTAT_DATA_UINT64 }, 26545 { "tcp_eagerfree5", KSTAT_DATA_UINT64 }, 26546 { "tcp_timewait_syn_fail", KSTAT_DATA_UINT64 }, 26547 { "tcp_listen_badflags", KSTAT_DATA_UINT64 }, 26548 { "tcp_timeout_calls", KSTAT_DATA_UINT64 }, 26549 { "tcp_timeout_cached_alloc", KSTAT_DATA_UINT64 }, 26550 { "tcp_timeout_cancel_reqs", KSTAT_DATA_UINT64 }, 26551 { "tcp_timeout_canceled", KSTAT_DATA_UINT64 }, 26552 { "tcp_timermp_freed", KSTAT_DATA_UINT64 }, 26553 { "tcp_push_timer_cnt", KSTAT_DATA_UINT64 }, 26554 { "tcp_ack_timer_cnt", KSTAT_DATA_UINT64 }, 26555 { "tcp_ire_null1", KSTAT_DATA_UINT64 }, 26556 { "tcp_ire_null", KSTAT_DATA_UINT64 }, 26557 { "tcp_ip_send", KSTAT_DATA_UINT64 }, 26558 { "tcp_ip_ire_send", KSTAT_DATA_UINT64 }, 26559 { "tcp_wsrv_called", KSTAT_DATA_UINT64 }, 26560 { "tcp_flwctl_on", KSTAT_DATA_UINT64 }, 26561 { "tcp_timer_fire_early", KSTAT_DATA_UINT64 }, 26562 { "tcp_timer_fire_miss", KSTAT_DATA_UINT64 }, 26563 { "tcp_rput_v6_error", KSTAT_DATA_UINT64 }, 26564 { "tcp_out_sw_cksum", KSTAT_DATA_UINT64 }, 26565 { "tcp_out_sw_cksum_bytes", KSTAT_DATA_UINT64 }, 26566 { "tcp_zcopy_on", KSTAT_DATA_UINT64 }, 26567 { "tcp_zcopy_off", KSTAT_DATA_UINT64 }, 26568 { "tcp_zcopy_backoff", KSTAT_DATA_UINT64 }, 26569 { "tcp_zcopy_disable", KSTAT_DATA_UINT64 }, 26570 { "tcp_mdt_pkt_out", KSTAT_DATA_UINT64 }, 26571 { "tcp_mdt_pkt_out_v4", KSTAT_DATA_UINT64 }, 26572 { "tcp_mdt_pkt_out_v6", KSTAT_DATA_UINT64 }, 26573 { "tcp_mdt_discarded", KSTAT_DATA_UINT64 }, 26574 { "tcp_mdt_conn_halted1", KSTAT_DATA_UINT64 }, 26575 { "tcp_mdt_conn_halted2", KSTAT_DATA_UINT64 }, 26576 { "tcp_mdt_conn_halted3", KSTAT_DATA_UINT64 }, 26577 { "tcp_mdt_conn_resumed1", KSTAT_DATA_UINT64 }, 26578 { "tcp_mdt_conn_resumed2", KSTAT_DATA_UINT64 }, 26579 { "tcp_mdt_legacy_small", KSTAT_DATA_UINT64 }, 26580 { "tcp_mdt_legacy_all", KSTAT_DATA_UINT64 }, 26581 { "tcp_mdt_legacy_ret", KSTAT_DATA_UINT64 }, 26582 { "tcp_mdt_allocfail", KSTAT_DATA_UINT64 }, 26583 { "tcp_mdt_addpdescfail", KSTAT_DATA_UINT64 }, 26584 { "tcp_mdt_allocd", KSTAT_DATA_UINT64 }, 26585 { "tcp_mdt_linked", KSTAT_DATA_UINT64 }, 26586 { "tcp_fusion_flowctl", KSTAT_DATA_UINT64 }, 26587 { "tcp_fusion_backenabled", KSTAT_DATA_UINT64 }, 26588 { "tcp_fusion_urg", KSTAT_DATA_UINT64 }, 26589 { "tcp_fusion_putnext", KSTAT_DATA_UINT64 }, 26590 { "tcp_fusion_unfusable", KSTAT_DATA_UINT64 }, 26591 { "tcp_fusion_aborted", KSTAT_DATA_UINT64 }, 26592 { "tcp_fusion_unqualified", KSTAT_DATA_UINT64 }, 26593 { "tcp_fusion_rrw_busy", KSTAT_DATA_UINT64 }, 26594 { "tcp_fusion_rrw_msgcnt", KSTAT_DATA_UINT64 }, 26595 { "tcp_fusion_rrw_plugged", KSTAT_DATA_UINT64 }, 26596 { "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64 }, 26597 { "tcp_sock_fallback", KSTAT_DATA_UINT64 }, 26598 { "tcp_lso_enabled", KSTAT_DATA_UINT64 }, 26599 { "tcp_lso_disabled", KSTAT_DATA_UINT64 }, 26600 { "tcp_lso_times", KSTAT_DATA_UINT64 }, 26601 { "tcp_lso_pkt_out", KSTAT_DATA_UINT64 }, 26602 }; 26603 26604 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, "tcpstat", "net", 26605 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 26606 KSTAT_FLAG_VIRTUAL, stackid); 26607 26608 if (ksp == NULL) 26609 return (NULL); 26610 26611 bcopy(&template, tcps_statisticsp, sizeof (template)); 26612 ksp->ks_data = (void *)tcps_statisticsp; 26613 ksp->ks_private = (void *)(uintptr_t)stackid; 26614 26615 kstat_install(ksp); 26616 return (ksp); 26617 } 26618 26619 static void 26620 tcp_kstat2_fini(netstackid_t stackid, kstat_t *ksp) 26621 { 26622 if (ksp != NULL) { 26623 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 26624 kstat_delete_netstack(ksp, stackid); 26625 } 26626 } 26627 26628 /* 26629 * TCP Kstats implementation 26630 */ 26631 static void * 26632 tcp_kstat_init(netstackid_t stackid, tcp_stack_t *tcps) 26633 { 26634 kstat_t *ksp; 26635 26636 tcp_named_kstat_t template = { 26637 { "rtoAlgorithm", KSTAT_DATA_INT32, 0 }, 26638 { "rtoMin", KSTAT_DATA_INT32, 0 }, 26639 { "rtoMax", KSTAT_DATA_INT32, 0 }, 26640 { "maxConn", KSTAT_DATA_INT32, 0 }, 26641 { "activeOpens", KSTAT_DATA_UINT32, 0 }, 26642 { "passiveOpens", KSTAT_DATA_UINT32, 0 }, 26643 { "attemptFails", KSTAT_DATA_UINT32, 0 }, 26644 { "estabResets", KSTAT_DATA_UINT32, 0 }, 26645 { "currEstab", KSTAT_DATA_UINT32, 0 }, 26646 { "inSegs", KSTAT_DATA_UINT64, 0 }, 26647 { "outSegs", KSTAT_DATA_UINT64, 0 }, 26648 { "retransSegs", KSTAT_DATA_UINT32, 0 }, 26649 { "connTableSize", KSTAT_DATA_INT32, 0 }, 26650 { "outRsts", KSTAT_DATA_UINT32, 0 }, 26651 { "outDataSegs", KSTAT_DATA_UINT32, 0 }, 26652 { "outDataBytes", KSTAT_DATA_UINT32, 0 }, 26653 { "retransBytes", KSTAT_DATA_UINT32, 0 }, 26654 { "outAck", KSTAT_DATA_UINT32, 0 }, 26655 { "outAckDelayed", KSTAT_DATA_UINT32, 0 }, 26656 { "outUrg", KSTAT_DATA_UINT32, 0 }, 26657 { "outWinUpdate", KSTAT_DATA_UINT32, 0 }, 26658 { "outWinProbe", KSTAT_DATA_UINT32, 0 }, 26659 { "outControl", KSTAT_DATA_UINT32, 0 }, 26660 { "outFastRetrans", KSTAT_DATA_UINT32, 0 }, 26661 { "inAckSegs", KSTAT_DATA_UINT32, 0 }, 26662 { "inAckBytes", KSTAT_DATA_UINT32, 0 }, 26663 { "inDupAck", KSTAT_DATA_UINT32, 0 }, 26664 { "inAckUnsent", KSTAT_DATA_UINT32, 0 }, 26665 { "inDataInorderSegs", KSTAT_DATA_UINT32, 0 }, 26666 { "inDataInorderBytes", KSTAT_DATA_UINT32, 0 }, 26667 { "inDataUnorderSegs", KSTAT_DATA_UINT32, 0 }, 26668 { "inDataUnorderBytes", KSTAT_DATA_UINT32, 0 }, 26669 { "inDataDupSegs", KSTAT_DATA_UINT32, 0 }, 26670 { "inDataDupBytes", KSTAT_DATA_UINT32, 0 }, 26671 { "inDataPartDupSegs", KSTAT_DATA_UINT32, 0 }, 26672 { "inDataPartDupBytes", KSTAT_DATA_UINT32, 0 }, 26673 { "inDataPastWinSegs", KSTAT_DATA_UINT32, 0 }, 26674 { "inDataPastWinBytes", KSTAT_DATA_UINT32, 0 }, 26675 { "inWinProbe", KSTAT_DATA_UINT32, 0 }, 26676 { "inWinUpdate", KSTAT_DATA_UINT32, 0 }, 26677 { "inClosed", KSTAT_DATA_UINT32, 0 }, 26678 { "rttUpdate", KSTAT_DATA_UINT32, 0 }, 26679 { "rttNoUpdate", KSTAT_DATA_UINT32, 0 }, 26680 { "timRetrans", KSTAT_DATA_UINT32, 0 }, 26681 { "timRetransDrop", KSTAT_DATA_UINT32, 0 }, 26682 { "timKeepalive", KSTAT_DATA_UINT32, 0 }, 26683 { "timKeepaliveProbe", KSTAT_DATA_UINT32, 0 }, 26684 { "timKeepaliveDrop", KSTAT_DATA_UINT32, 0 }, 26685 { "listenDrop", KSTAT_DATA_UINT32, 0 }, 26686 { "listenDropQ0", KSTAT_DATA_UINT32, 0 }, 26687 { "halfOpenDrop", KSTAT_DATA_UINT32, 0 }, 26688 { "outSackRetransSegs", KSTAT_DATA_UINT32, 0 }, 26689 { "connTableSize6", KSTAT_DATA_INT32, 0 } 26690 }; 26691 26692 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, TCP_MOD_NAME, "mib2", 26693 KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0, stackid); 26694 26695 if (ksp == NULL) 26696 return (NULL); 26697 26698 template.rtoAlgorithm.value.ui32 = 4; 26699 template.rtoMin.value.ui32 = tcps->tcps_rexmit_interval_min; 26700 template.rtoMax.value.ui32 = tcps->tcps_rexmit_interval_max; 26701 template.maxConn.value.i32 = -1; 26702 26703 bcopy(&template, ksp->ks_data, sizeof (template)); 26704 ksp->ks_update = tcp_kstat_update; 26705 ksp->ks_private = (void *)(uintptr_t)stackid; 26706 26707 kstat_install(ksp); 26708 return (ksp); 26709 } 26710 26711 static void 26712 tcp_kstat_fini(netstackid_t stackid, kstat_t *ksp) 26713 { 26714 if (ksp != NULL) { 26715 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 26716 kstat_delete_netstack(ksp, stackid); 26717 } 26718 } 26719 26720 static int 26721 tcp_kstat_update(kstat_t *kp, int rw) 26722 { 26723 tcp_named_kstat_t *tcpkp; 26724 tcp_t *tcp; 26725 connf_t *connfp; 26726 conn_t *connp; 26727 int i; 26728 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 26729 netstack_t *ns; 26730 tcp_stack_t *tcps; 26731 ip_stack_t *ipst; 26732 26733 if ((kp == NULL) || (kp->ks_data == NULL)) 26734 return (EIO); 26735 26736 if (rw == KSTAT_WRITE) 26737 return (EACCES); 26738 26739 ns = netstack_find_by_stackid(stackid); 26740 if (ns == NULL) 26741 return (-1); 26742 tcps = ns->netstack_tcp; 26743 if (tcps == NULL) { 26744 netstack_rele(ns); 26745 return (-1); 26746 } 26747 tcpkp = (tcp_named_kstat_t *)kp->ks_data; 26748 26749 tcpkp->currEstab.value.ui32 = 0; 26750 26751 ipst = ns->netstack_ip; 26752 26753 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 26754 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 26755 connp = NULL; 26756 while ((connp = 26757 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 26758 tcp = connp->conn_tcp; 26759 switch (tcp_snmp_state(tcp)) { 26760 case MIB2_TCP_established: 26761 case MIB2_TCP_closeWait: 26762 tcpkp->currEstab.value.ui32++; 26763 break; 26764 } 26765 } 26766 } 26767 26768 tcpkp->activeOpens.value.ui32 = tcps->tcps_mib.tcpActiveOpens; 26769 tcpkp->passiveOpens.value.ui32 = tcps->tcps_mib.tcpPassiveOpens; 26770 tcpkp->attemptFails.value.ui32 = tcps->tcps_mib.tcpAttemptFails; 26771 tcpkp->estabResets.value.ui32 = tcps->tcps_mib.tcpEstabResets; 26772 tcpkp->inSegs.value.ui64 = tcps->tcps_mib.tcpHCInSegs; 26773 tcpkp->outSegs.value.ui64 = tcps->tcps_mib.tcpHCOutSegs; 26774 tcpkp->retransSegs.value.ui32 = tcps->tcps_mib.tcpRetransSegs; 26775 tcpkp->connTableSize.value.i32 = tcps->tcps_mib.tcpConnTableSize; 26776 tcpkp->outRsts.value.ui32 = tcps->tcps_mib.tcpOutRsts; 26777 tcpkp->outDataSegs.value.ui32 = tcps->tcps_mib.tcpOutDataSegs; 26778 tcpkp->outDataBytes.value.ui32 = tcps->tcps_mib.tcpOutDataBytes; 26779 tcpkp->retransBytes.value.ui32 = tcps->tcps_mib.tcpRetransBytes; 26780 tcpkp->outAck.value.ui32 = tcps->tcps_mib.tcpOutAck; 26781 tcpkp->outAckDelayed.value.ui32 = tcps->tcps_mib.tcpOutAckDelayed; 26782 tcpkp->outUrg.value.ui32 = tcps->tcps_mib.tcpOutUrg; 26783 tcpkp->outWinUpdate.value.ui32 = tcps->tcps_mib.tcpOutWinUpdate; 26784 tcpkp->outWinProbe.value.ui32 = tcps->tcps_mib.tcpOutWinProbe; 26785 tcpkp->outControl.value.ui32 = tcps->tcps_mib.tcpOutControl; 26786 tcpkp->outFastRetrans.value.ui32 = tcps->tcps_mib.tcpOutFastRetrans; 26787 tcpkp->inAckSegs.value.ui32 = tcps->tcps_mib.tcpInAckSegs; 26788 tcpkp->inAckBytes.value.ui32 = tcps->tcps_mib.tcpInAckBytes; 26789 tcpkp->inDupAck.value.ui32 = tcps->tcps_mib.tcpInDupAck; 26790 tcpkp->inAckUnsent.value.ui32 = tcps->tcps_mib.tcpInAckUnsent; 26791 tcpkp->inDataInorderSegs.value.ui32 = 26792 tcps->tcps_mib.tcpInDataInorderSegs; 26793 tcpkp->inDataInorderBytes.value.ui32 = 26794 tcps->tcps_mib.tcpInDataInorderBytes; 26795 tcpkp->inDataUnorderSegs.value.ui32 = 26796 tcps->tcps_mib.tcpInDataUnorderSegs; 26797 tcpkp->inDataUnorderBytes.value.ui32 = 26798 tcps->tcps_mib.tcpInDataUnorderBytes; 26799 tcpkp->inDataDupSegs.value.ui32 = tcps->tcps_mib.tcpInDataDupSegs; 26800 tcpkp->inDataDupBytes.value.ui32 = tcps->tcps_mib.tcpInDataDupBytes; 26801 tcpkp->inDataPartDupSegs.value.ui32 = 26802 tcps->tcps_mib.tcpInDataPartDupSegs; 26803 tcpkp->inDataPartDupBytes.value.ui32 = 26804 tcps->tcps_mib.tcpInDataPartDupBytes; 26805 tcpkp->inDataPastWinSegs.value.ui32 = 26806 tcps->tcps_mib.tcpInDataPastWinSegs; 26807 tcpkp->inDataPastWinBytes.value.ui32 = 26808 tcps->tcps_mib.tcpInDataPastWinBytes; 26809 tcpkp->inWinProbe.value.ui32 = tcps->tcps_mib.tcpInWinProbe; 26810 tcpkp->inWinUpdate.value.ui32 = tcps->tcps_mib.tcpInWinUpdate; 26811 tcpkp->inClosed.value.ui32 = tcps->tcps_mib.tcpInClosed; 26812 tcpkp->rttNoUpdate.value.ui32 = tcps->tcps_mib.tcpRttNoUpdate; 26813 tcpkp->rttUpdate.value.ui32 = tcps->tcps_mib.tcpRttUpdate; 26814 tcpkp->timRetrans.value.ui32 = tcps->tcps_mib.tcpTimRetrans; 26815 tcpkp->timRetransDrop.value.ui32 = tcps->tcps_mib.tcpTimRetransDrop; 26816 tcpkp->timKeepalive.value.ui32 = tcps->tcps_mib.tcpTimKeepalive; 26817 tcpkp->timKeepaliveProbe.value.ui32 = 26818 tcps->tcps_mib.tcpTimKeepaliveProbe; 26819 tcpkp->timKeepaliveDrop.value.ui32 = 26820 tcps->tcps_mib.tcpTimKeepaliveDrop; 26821 tcpkp->listenDrop.value.ui32 = tcps->tcps_mib.tcpListenDrop; 26822 tcpkp->listenDropQ0.value.ui32 = tcps->tcps_mib.tcpListenDropQ0; 26823 tcpkp->halfOpenDrop.value.ui32 = tcps->tcps_mib.tcpHalfOpenDrop; 26824 tcpkp->outSackRetransSegs.value.ui32 = 26825 tcps->tcps_mib.tcpOutSackRetransSegs; 26826 tcpkp->connTableSize6.value.i32 = tcps->tcps_mib.tcp6ConnTableSize; 26827 26828 netstack_rele(ns); 26829 return (0); 26830 } 26831 26832 void 26833 tcp_reinput(conn_t *connp, mblk_t *mp, squeue_t *sqp) 26834 { 26835 uint16_t hdr_len; 26836 ipha_t *ipha; 26837 uint8_t *nexthdrp; 26838 tcph_t *tcph; 26839 tcp_stack_t *tcps = connp->conn_tcp->tcp_tcps; 26840 26841 /* Already has an eager */ 26842 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 26843 TCP_STAT(tcps, tcp_reinput_syn); 26844 squeue_enter(connp->conn_sqp, mp, connp->conn_recv, 26845 connp, SQTAG_TCP_REINPUT_EAGER); 26846 return; 26847 } 26848 26849 switch (IPH_HDR_VERSION(mp->b_rptr)) { 26850 case IPV4_VERSION: 26851 ipha = (ipha_t *)mp->b_rptr; 26852 hdr_len = IPH_HDR_LENGTH(ipha); 26853 break; 26854 case IPV6_VERSION: 26855 if (!ip_hdr_length_nexthdr_v6(mp, (ip6_t *)mp->b_rptr, 26856 &hdr_len, &nexthdrp)) { 26857 CONN_DEC_REF(connp); 26858 freemsg(mp); 26859 return; 26860 } 26861 break; 26862 } 26863 26864 tcph = (tcph_t *)&mp->b_rptr[hdr_len]; 26865 if ((tcph->th_flags[0] & (TH_SYN|TH_ACK|TH_RST|TH_URG)) == TH_SYN) { 26866 mp->b_datap->db_struioflag |= STRUIO_EAGER; 26867 DB_CKSUMSTART(mp) = (intptr_t)sqp; 26868 } 26869 26870 squeue_fill(connp->conn_sqp, mp, connp->conn_recv, connp, 26871 SQTAG_TCP_REINPUT); 26872 } 26873 26874 static squeue_func_t 26875 tcp_squeue_switch(int val) 26876 { 26877 squeue_func_t rval = squeue_fill; 26878 26879 switch (val) { 26880 case 1: 26881 rval = squeue_enter_nodrain; 26882 break; 26883 case 2: 26884 rval = squeue_enter; 26885 break; 26886 default: 26887 break; 26888 } 26889 return (rval); 26890 } 26891 26892 /* 26893 * This is called once for each squeue - globally for all stack 26894 * instances. 26895 */ 26896 static void 26897 tcp_squeue_add(squeue_t *sqp) 26898 { 26899 tcp_squeue_priv_t *tcp_time_wait = kmem_zalloc( 26900 sizeof (tcp_squeue_priv_t), KM_SLEEP); 26901 26902 *squeue_getprivate(sqp, SQPRIVATE_TCP) = (intptr_t)tcp_time_wait; 26903 tcp_time_wait->tcp_time_wait_tid = timeout(tcp_time_wait_collector, 26904 sqp, TCP_TIME_WAIT_DELAY); 26905 if (tcp_free_list_max_cnt == 0) { 26906 int tcp_ncpus = ((boot_max_ncpus == -1) ? 26907 max_ncpus : boot_max_ncpus); 26908 26909 /* 26910 * Limit number of entries to 1% of availble memory / tcp_ncpus 26911 */ 26912 tcp_free_list_max_cnt = (freemem * PAGESIZE) / 26913 (tcp_ncpus * sizeof (tcp_t) * 100); 26914 } 26915 tcp_time_wait->tcp_free_list_cnt = 0; 26916 } 26917