1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 /* Copyright (c) 1990 Mentat Inc. */ 27 28 #pragma ident "%Z%%M% %I% %E% SMI" 29 const char tcp_version[] = "%Z%%M% %I% %E% SMI"; 30 31 32 #include <sys/types.h> 33 #include <sys/stream.h> 34 #include <sys/strsun.h> 35 #include <sys/strsubr.h> 36 #include <sys/stropts.h> 37 #include <sys/strlog.h> 38 #include <sys/strsun.h> 39 #define _SUN_TPI_VERSION 2 40 #include <sys/tihdr.h> 41 #include <sys/timod.h> 42 #include <sys/ddi.h> 43 #include <sys/sunddi.h> 44 #include <sys/suntpi.h> 45 #include <sys/xti_inet.h> 46 #include <sys/cmn_err.h> 47 #include <sys/debug.h> 48 #include <sys/sdt.h> 49 #include <sys/vtrace.h> 50 #include <sys/kmem.h> 51 #include <sys/ethernet.h> 52 #include <sys/cpuvar.h> 53 #include <sys/dlpi.h> 54 #include <sys/multidata.h> 55 #include <sys/multidata_impl.h> 56 #include <sys/pattr.h> 57 #include <sys/policy.h> 58 #include <sys/priv.h> 59 #include <sys/zone.h> 60 #include <sys/sunldi.h> 61 62 #include <sys/errno.h> 63 #include <sys/signal.h> 64 #include <sys/socket.h> 65 #include <sys/sockio.h> 66 #include <sys/isa_defs.h> 67 #include <sys/md5.h> 68 #include <sys/random.h> 69 #include <netinet/in.h> 70 #include <netinet/tcp.h> 71 #include <netinet/ip6.h> 72 #include <netinet/icmp6.h> 73 #include <net/if.h> 74 #include <net/route.h> 75 #include <inet/ipsec_impl.h> 76 77 #include <inet/common.h> 78 #include <inet/ip.h> 79 #include <inet/ip_impl.h> 80 #include <inet/ip6.h> 81 #include <inet/ip_ndp.h> 82 #include <inet/mi.h> 83 #include <inet/mib2.h> 84 #include <inet/nd.h> 85 #include <inet/optcom.h> 86 #include <inet/snmpcom.h> 87 #include <inet/kstatcom.h> 88 #include <inet/tcp.h> 89 #include <inet/tcp_impl.h> 90 #include <net/pfkeyv2.h> 91 #include <inet/ipsec_info.h> 92 #include <inet/ipdrop.h> 93 #include <inet/tcp_trace.h> 94 95 #include <inet/ipclassifier.h> 96 #include <inet/ip_ire.h> 97 #include <inet/ip_ftable.h> 98 #include <inet/ip_if.h> 99 #include <inet/ipp_common.h> 100 #include <inet/ip_netinfo.h> 101 #include <sys/squeue.h> 102 #include <inet/kssl/ksslapi.h> 103 #include <sys/tsol/label.h> 104 #include <sys/tsol/tnet.h> 105 #include <sys/sdt.h> 106 #include <rpc/pmap_prot.h> 107 108 /* 109 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433) 110 * 111 * (Read the detailed design doc in PSARC case directory) 112 * 113 * The entire tcp state is contained in tcp_t and conn_t structure 114 * which are allocated in tandem using ipcl_conn_create() and passing 115 * IPCL_CONNTCP as a flag. We use 'conn_ref' and 'conn_lock' to protect 116 * the references on the tcp_t. The tcp_t structure is never compressed 117 * and packets always land on the correct TCP perimeter from the time 118 * eager is created till the time tcp_t dies (as such the old mentat 119 * TCP global queue is not used for detached state and no IPSEC checking 120 * is required). The global queue is still allocated to send out resets 121 * for connection which have no listeners and IP directly calls 122 * tcp_xmit_listeners_reset() which does any policy check. 123 * 124 * Protection and Synchronisation mechanism: 125 * 126 * The tcp data structure does not use any kind of lock for protecting 127 * its state but instead uses 'squeues' for mutual exclusion from various 128 * read and write side threads. To access a tcp member, the thread should 129 * always be behind squeue (via squeue_enter, squeue_enter_nodrain, or 130 * squeue_fill). Since the squeues allow a direct function call, caller 131 * can pass any tcp function having prototype of edesc_t as argument 132 * (different from traditional STREAMs model where packets come in only 133 * designated entry points). The list of functions that can be directly 134 * called via squeue are listed before the usual function prototype. 135 * 136 * Referencing: 137 * 138 * TCP is MT-Hot and we use a reference based scheme to make sure that the 139 * tcp structure doesn't disappear when its needed. When the application 140 * creates an outgoing connection or accepts an incoming connection, we 141 * start out with 2 references on 'conn_ref'. One for TCP and one for IP. 142 * The IP reference is just a symbolic reference since ip_tcpclose() 143 * looks at tcp structure after tcp_close_output() returns which could 144 * have dropped the last TCP reference. So as long as the connection is 145 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the 146 * conn_t. The classifier puts its own reference when the connection is 147 * inserted in listen or connected hash. Anytime a thread needs to enter 148 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr 149 * on write side or by doing a classify on read side and then puts a 150 * reference on the conn before doing squeue_enter/tryenter/fill. For 151 * read side, the classifier itself puts the reference under fanout lock 152 * to make sure that tcp can't disappear before it gets processed. The 153 * squeue will drop this reference automatically so the called function 154 * doesn't have to do a DEC_REF. 155 * 156 * Opening a new connection: 157 * 158 * The outgoing connection open is pretty simple. tcp_open() does the 159 * work in creating the conn/tcp structure and initializing it. The 160 * squeue assignment is done based on the CPU the application 161 * is running on. So for outbound connections, processing is always done 162 * on application CPU which might be different from the incoming CPU 163 * being interrupted by the NIC. An optimal way would be to figure out 164 * the NIC <-> CPU binding at listen time, and assign the outgoing 165 * connection to the squeue attached to the CPU that will be interrupted 166 * for incoming packets (we know the NIC based on the bind IP address). 167 * This might seem like a problem if more data is going out but the 168 * fact is that in most cases the transmit is ACK driven transmit where 169 * the outgoing data normally sits on TCP's xmit queue waiting to be 170 * transmitted. 171 * 172 * Accepting a connection: 173 * 174 * This is a more interesting case because of various races involved in 175 * establishing a eager in its own perimeter. Read the meta comment on 176 * top of tcp_conn_request(). But briefly, the squeue is picked by 177 * ip_tcp_input()/ip_fanout_tcp_v6() based on the interrupted CPU. 178 * 179 * Closing a connection: 180 * 181 * The close is fairly straight forward. tcp_close() calls tcp_close_output() 182 * via squeue to do the close and mark the tcp as detached if the connection 183 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its 184 * reference but tcp_close() drop IP's reference always. So if tcp was 185 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP 186 * and 1 because it is in classifier's connected hash. This is the condition 187 * we use to determine that its OK to clean up the tcp outside of squeue 188 * when time wait expires (check the ref under fanout and conn_lock and 189 * if it is 2, remove it from fanout hash and kill it). 190 * 191 * Although close just drops the necessary references and marks the 192 * tcp_detached state, tcp_close needs to know the tcp_detached has been 193 * set (under squeue) before letting the STREAM go away (because a 194 * inbound packet might attempt to go up the STREAM while the close 195 * has happened and tcp_detached is not set). So a special lock and 196 * flag is used along with a condition variable (tcp_closelock, tcp_closed, 197 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked 198 * tcp_detached. 199 * 200 * Special provisions and fast paths: 201 * 202 * We make special provision for (AF_INET, SOCK_STREAM) sockets which 203 * can't have 'ipv6_recvpktinfo' set and for these type of sockets, IP 204 * will never send a M_CTL to TCP. As such, ip_tcp_input() which handles 205 * all TCP packets from the wire makes a IPCL_IS_TCP4_CONNECTED_NO_POLICY 206 * check to send packets directly to tcp_rput_data via squeue. Everyone 207 * else comes through tcp_input() on the read side. 208 * 209 * We also make special provisions for sockfs by marking tcp_issocket 210 * whenever we have only sockfs on top of TCP. This allows us to skip 211 * putting the tcp in acceptor hash since a sockfs listener can never 212 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM 213 * since eager has already been allocated and the accept now happens 214 * on acceptor STREAM. There is a big blob of comment on top of 215 * tcp_conn_request explaining the new accept. When socket is POP'd, 216 * sockfs sends us an ioctl to mark the fact and we go back to old 217 * behaviour. Once tcp_issocket is unset, its never set for the 218 * life of that connection. 219 * 220 * IPsec notes : 221 * 222 * Since a packet is always executed on the correct TCP perimeter 223 * all IPsec processing is defered to IP including checking new 224 * connections and setting IPSEC policies for new connection. The 225 * only exception is tcp_xmit_listeners_reset() which is called 226 * directly from IP and needs to policy check to see if TH_RST 227 * can be sent out. 228 * 229 * PFHooks notes : 230 * 231 * For mdt case, one meta buffer contains multiple packets. Mblks for every 232 * packet are assembled and passed to the hooks. When packets are blocked, 233 * or boundary of any packet is changed, the mdt processing is stopped, and 234 * packets of the meta buffer are send to the IP path one by one. 235 */ 236 237 extern major_t TCP6_MAJ; 238 239 /* 240 * Values for squeue switch: 241 * 1: squeue_enter_nodrain 242 * 2: squeue_enter 243 * 3: squeue_fill 244 */ 245 int tcp_squeue_close = 2; /* Setable in /etc/system */ 246 int tcp_squeue_wput = 2; 247 248 squeue_func_t tcp_squeue_close_proc; 249 squeue_func_t tcp_squeue_wput_proc; 250 251 /* 252 * This controls how tiny a write must be before we try to copy it 253 * into the the mblk on the tail of the transmit queue. Not much 254 * speedup is observed for values larger than sixteen. Zero will 255 * disable the optimisation. 256 */ 257 int tcp_tx_pull_len = 16; 258 259 /* 260 * TCP Statistics. 261 * 262 * How TCP statistics work. 263 * 264 * There are two types of statistics invoked by two macros. 265 * 266 * TCP_STAT(name) does non-atomic increment of a named stat counter. It is 267 * supposed to be used in non MT-hot paths of the code. 268 * 269 * TCP_DBGSTAT(name) does atomic increment of a named stat counter. It is 270 * supposed to be used for DEBUG purposes and may be used on a hot path. 271 * 272 * Both TCP_STAT and TCP_DBGSTAT counters are available using kstat 273 * (use "kstat tcp" to get them). 274 * 275 * There is also additional debugging facility that marks tcp_clean_death() 276 * instances and saves them in tcp_t structure. It is triggered by 277 * TCP_TAG_CLEAN_DEATH define. Also, there is a global array of counters for 278 * tcp_clean_death() calls that counts the number of times each tag was hit. It 279 * is triggered by TCP_CLD_COUNTERS define. 280 * 281 * How to add new counters. 282 * 283 * 1) Add a field in the tcp_stat structure describing your counter. 284 * 2) Add a line in the template in tcp_kstat2_init() with the name 285 * of the counter. 286 * 287 * IMPORTANT!! - make sure that both are in sync !! 288 * 3) Use either TCP_STAT or TCP_DBGSTAT with the name. 289 * 290 * Please avoid using private counters which are not kstat-exported. 291 * 292 * TCP_TAG_CLEAN_DEATH set to 1 enables tagging of tcp_clean_death() instances 293 * in tcp_t structure. 294 * 295 * TCP_MAX_CLEAN_DEATH_TAG is the maximum number of possible clean death tags. 296 */ 297 298 #ifndef TCP_DEBUG_COUNTER 299 #ifdef DEBUG 300 #define TCP_DEBUG_COUNTER 1 301 #else 302 #define TCP_DEBUG_COUNTER 0 303 #endif 304 #endif 305 306 #define TCP_CLD_COUNTERS 0 307 308 #define TCP_TAG_CLEAN_DEATH 1 309 #define TCP_MAX_CLEAN_DEATH_TAG 32 310 311 #ifdef lint 312 static int _lint_dummy_; 313 #endif 314 315 #if TCP_CLD_COUNTERS 316 static uint_t tcp_clean_death_stat[TCP_MAX_CLEAN_DEATH_TAG]; 317 #define TCP_CLD_STAT(x) tcp_clean_death_stat[x]++ 318 #elif defined(lint) 319 #define TCP_CLD_STAT(x) ASSERT(_lint_dummy_ == 0); 320 #else 321 #define TCP_CLD_STAT(x) 322 #endif 323 324 #if TCP_DEBUG_COUNTER 325 #define TCP_DBGSTAT(tcps, x) \ 326 atomic_add_64(&((tcps)->tcps_statistics.x.value.ui64), 1) 327 #define TCP_G_DBGSTAT(x) \ 328 atomic_add_64(&(tcp_g_statistics.x.value.ui64), 1) 329 #elif defined(lint) 330 #define TCP_DBGSTAT(tcps, x) ASSERT(_lint_dummy_ == 0); 331 #define TCP_G_DBGSTAT(x) ASSERT(_lint_dummy_ == 0); 332 #else 333 #define TCP_DBGSTAT(tcps, x) 334 #define TCP_G_DBGSTAT(x) 335 #endif 336 337 #define TCP_G_STAT(x) (tcp_g_statistics.x.value.ui64++) 338 339 tcp_g_stat_t tcp_g_statistics; 340 kstat_t *tcp_g_kstat; 341 342 /* 343 * Call either ip_output or ip_output_v6. This replaces putnext() calls on the 344 * tcp write side. 345 */ 346 #define CALL_IP_WPUT(connp, q, mp) { \ 347 tcp_stack_t *tcps; \ 348 \ 349 tcps = connp->conn_netstack->netstack_tcp; \ 350 ASSERT(((q)->q_flag & QREADR) == 0); \ 351 TCP_DBGSTAT(tcps, tcp_ip_output); \ 352 connp->conn_send(connp, (mp), (q), IP_WPUT); \ 353 } 354 355 /* Macros for timestamp comparisons */ 356 #define TSTMP_GEQ(a, b) ((int32_t)((a)-(b)) >= 0) 357 #define TSTMP_LT(a, b) ((int32_t)((a)-(b)) < 0) 358 359 /* 360 * Parameters for TCP Initial Send Sequence number (ISS) generation. When 361 * tcp_strong_iss is set to 1, which is the default, the ISS is calculated 362 * by adding three components: a time component which grows by 1 every 4096 363 * nanoseconds (versus every 4 microseconds suggested by RFC 793, page 27); 364 * a per-connection component which grows by 125000 for every new connection; 365 * and an "extra" component that grows by a random amount centered 366 * approximately on 64000. This causes the the ISS generator to cycle every 367 * 4.89 hours if no TCP connections are made, and faster if connections are 368 * made. 369 * 370 * When tcp_strong_iss is set to 0, ISS is calculated by adding two 371 * components: a time component which grows by 250000 every second; and 372 * a per-connection component which grows by 125000 for every new connections. 373 * 374 * A third method, when tcp_strong_iss is set to 2, for generating ISS is 375 * prescribed by Steve Bellovin. This involves adding time, the 125000 per 376 * connection, and a one-way hash (MD5) of the connection ID <sport, dport, 377 * src, dst>, a "truly" random (per RFC 1750) number, and a console-entered 378 * password. 379 */ 380 #define ISS_INCR 250000 381 #define ISS_NSEC_SHT 12 382 383 static sin_t sin_null; /* Zero address for quick clears */ 384 static sin6_t sin6_null; /* Zero address for quick clears */ 385 386 /* 387 * This implementation follows the 4.3BSD interpretation of the urgent 388 * pointer and not RFC 1122. Switching to RFC 1122 behavior would cause 389 * incompatible changes in protocols like telnet and rlogin. 390 */ 391 #define TCP_OLD_URP_INTERPRETATION 1 392 393 #define TCP_IS_DETACHED_NONEAGER(tcp) \ 394 (TCP_IS_DETACHED(tcp) && \ 395 (!(tcp)->tcp_hard_binding)) 396 397 /* 398 * TCP reassembly macros. We hide starting and ending sequence numbers in 399 * b_next and b_prev of messages on the reassembly queue. The messages are 400 * chained using b_cont. These macros are used in tcp_reass() so we don't 401 * have to see the ugly casts and assignments. 402 */ 403 #define TCP_REASS_SEQ(mp) ((uint32_t)(uintptr_t)((mp)->b_next)) 404 #define TCP_REASS_SET_SEQ(mp, u) ((mp)->b_next = \ 405 (mblk_t *)(uintptr_t)(u)) 406 #define TCP_REASS_END(mp) ((uint32_t)(uintptr_t)((mp)->b_prev)) 407 #define TCP_REASS_SET_END(mp, u) ((mp)->b_prev = \ 408 (mblk_t *)(uintptr_t)(u)) 409 410 /* 411 * Implementation of TCP Timers. 412 * ============================= 413 * 414 * INTERFACE: 415 * 416 * There are two basic functions dealing with tcp timers: 417 * 418 * timeout_id_t tcp_timeout(connp, func, time) 419 * clock_t tcp_timeout_cancel(connp, timeout_id) 420 * TCP_TIMER_RESTART(tcp, intvl) 421 * 422 * tcp_timeout() starts a timer for the 'tcp' instance arranging to call 'func' 423 * after 'time' ticks passed. The function called by timeout() must adhere to 424 * the same restrictions as a driver soft interrupt handler - it must not sleep 425 * or call other functions that might sleep. The value returned is the opaque 426 * non-zero timeout identifier that can be passed to tcp_timeout_cancel() to 427 * cancel the request. The call to tcp_timeout() may fail in which case it 428 * returns zero. This is different from the timeout(9F) function which never 429 * fails. 430 * 431 * The call-back function 'func' always receives 'connp' as its single 432 * argument. It is always executed in the squeue corresponding to the tcp 433 * structure. The tcp structure is guaranteed to be present at the time the 434 * call-back is called. 435 * 436 * NOTE: The call-back function 'func' is never called if tcp is in 437 * the TCPS_CLOSED state. 438 * 439 * tcp_timeout_cancel() attempts to cancel a pending tcp_timeout() 440 * request. locks acquired by the call-back routine should not be held across 441 * the call to tcp_timeout_cancel() or a deadlock may result. 442 * 443 * tcp_timeout_cancel() returns -1 if it can not cancel the timeout request. 444 * Otherwise, it returns an integer value greater than or equal to 0. In 445 * particular, if the call-back function is already placed on the squeue, it can 446 * not be canceled. 447 * 448 * NOTE: both tcp_timeout() and tcp_timeout_cancel() should always be called 449 * within squeue context corresponding to the tcp instance. Since the 450 * call-back is also called via the same squeue, there are no race 451 * conditions described in untimeout(9F) manual page since all calls are 452 * strictly serialized. 453 * 454 * TCP_TIMER_RESTART() is a macro that attempts to cancel a pending timeout 455 * stored in tcp_timer_tid and starts a new one using 456 * MSEC_TO_TICK(intvl). It always uses tcp_timer() function as a call-back 457 * and stores the return value of tcp_timeout() in the tcp->tcp_timer_tid 458 * field. 459 * 460 * NOTE: since the timeout cancellation is not guaranteed, the cancelled 461 * call-back may still be called, so it is possible tcp_timer() will be 462 * called several times. This should not be a problem since tcp_timer() 463 * should always check the tcp instance state. 464 * 465 * 466 * IMPLEMENTATION: 467 * 468 * TCP timers are implemented using three-stage process. The call to 469 * tcp_timeout() uses timeout(9F) function to call tcp_timer_callback() function 470 * when the timer expires. The tcp_timer_callback() arranges the call of the 471 * tcp_timer_handler() function via squeue corresponding to the tcp 472 * instance. The tcp_timer_handler() calls actual requested timeout call-back 473 * and passes tcp instance as an argument to it. Information is passed between 474 * stages using the tcp_timer_t structure which contains the connp pointer, the 475 * tcp call-back to call and the timeout id returned by the timeout(9F). 476 * 477 * The tcp_timer_t structure is not used directly, it is embedded in an mblk_t - 478 * like structure that is used to enter an squeue. The mp->b_rptr of this pseudo 479 * mblk points to the beginning of tcp_timer_t structure. The tcp_timeout() 480 * returns the pointer to this mblk. 481 * 482 * The pseudo mblk is allocated from a special tcp_timer_cache kmem cache. It 483 * looks like a normal mblk without actual dblk attached to it. 484 * 485 * To optimize performance each tcp instance holds a small cache of timer 486 * mblocks. In the current implementation it caches up to two timer mblocks per 487 * tcp instance. The cache is preserved over tcp frees and is only freed when 488 * the whole tcp structure is destroyed by its kmem destructor. Since all tcp 489 * timer processing happens on a corresponding squeue, the cache manipulation 490 * does not require any locks. Experiments show that majority of timer mblocks 491 * allocations are satisfied from the tcp cache and do not involve kmem calls. 492 * 493 * The tcp_timeout() places a refhold on the connp instance which guarantees 494 * that it will be present at the time the call-back function fires. The 495 * tcp_timer_handler() drops the reference after calling the call-back, so the 496 * call-back function does not need to manipulate the references explicitly. 497 */ 498 499 typedef struct tcp_timer_s { 500 conn_t *connp; 501 void (*tcpt_proc)(void *); 502 timeout_id_t tcpt_tid; 503 } tcp_timer_t; 504 505 static kmem_cache_t *tcp_timercache; 506 kmem_cache_t *tcp_sack_info_cache; 507 kmem_cache_t *tcp_iphc_cache; 508 509 /* 510 * For scalability, we must not run a timer for every TCP connection 511 * in TIME_WAIT state. To see why, consider (for time wait interval of 512 * 4 minutes): 513 * 1000 connections/sec * 240 seconds/time wait = 240,000 active conn's 514 * 515 * This list is ordered by time, so you need only delete from the head 516 * until you get to entries which aren't old enough to delete yet. 517 * The list consists of only the detached TIME_WAIT connections. 518 * 519 * Note that the timer (tcp_time_wait_expire) is started when the tcp_t 520 * becomes detached TIME_WAIT (either by changing the state and already 521 * being detached or the other way around). This means that the TIME_WAIT 522 * state can be extended (up to doubled) if the connection doesn't become 523 * detached for a long time. 524 * 525 * The list manipulations (including tcp_time_wait_next/prev) 526 * are protected by the tcp_time_wait_lock. The content of the 527 * detached TIME_WAIT connections is protected by the normal perimeters. 528 * 529 * This list is per squeue and squeues are shared across the tcp_stack_t's. 530 * Things on tcp_time_wait_head remain associated with the tcp_stack_t 531 * and conn_netstack. 532 * The tcp_t's that are added to tcp_free_list are disassociated and 533 * have NULL tcp_tcps and conn_netstack pointers. 534 */ 535 typedef struct tcp_squeue_priv_s { 536 kmutex_t tcp_time_wait_lock; 537 timeout_id_t tcp_time_wait_tid; 538 tcp_t *tcp_time_wait_head; 539 tcp_t *tcp_time_wait_tail; 540 tcp_t *tcp_free_list; 541 uint_t tcp_free_list_cnt; 542 } tcp_squeue_priv_t; 543 544 /* 545 * TCP_TIME_WAIT_DELAY governs how often the time_wait_collector runs. 546 * Running it every 5 seconds seems to give the best results. 547 */ 548 #define TCP_TIME_WAIT_DELAY drv_usectohz(5000000) 549 550 /* 551 * To prevent memory hog, limit the number of entries in tcp_free_list 552 * to 1% of available memory / number of cpus 553 */ 554 uint_t tcp_free_list_max_cnt = 0; 555 556 #define TCP_XMIT_LOWATER 4096 557 #define TCP_XMIT_HIWATER 49152 558 #define TCP_RECV_LOWATER 2048 559 #define TCP_RECV_HIWATER 49152 560 561 /* 562 * PAWS needs a timer for 24 days. This is the number of ticks in 24 days 563 */ 564 #define PAWS_TIMEOUT ((clock_t)(24*24*60*60*hz)) 565 566 #define TIDUSZ 4096 /* transport interface data unit size */ 567 568 /* 569 * Bind hash list size and has function. It has to be a power of 2 for 570 * hashing. 571 */ 572 #define TCP_BIND_FANOUT_SIZE 512 573 #define TCP_BIND_HASH(lport) (ntohs(lport) & (TCP_BIND_FANOUT_SIZE - 1)) 574 /* 575 * Size of listen and acceptor hash list. It has to be a power of 2 for 576 * hashing. 577 */ 578 #define TCP_FANOUT_SIZE 256 579 580 #ifdef _ILP32 581 #define TCP_ACCEPTOR_HASH(accid) \ 582 (((uint_t)(accid) >> 8) & (TCP_FANOUT_SIZE - 1)) 583 #else 584 #define TCP_ACCEPTOR_HASH(accid) \ 585 ((uint_t)(accid) & (TCP_FANOUT_SIZE - 1)) 586 #endif /* _ILP32 */ 587 588 #define IP_ADDR_CACHE_SIZE 2048 589 #define IP_ADDR_CACHE_HASH(faddr) \ 590 (ntohl(faddr) & (IP_ADDR_CACHE_SIZE -1)) 591 592 /* Hash for HSPs uses all 32 bits, since both networks and hosts are in table */ 593 #define TCP_HSP_HASH_SIZE 256 594 595 #define TCP_HSP_HASH(addr) \ 596 (((addr>>24) ^ (addr >>16) ^ \ 597 (addr>>8) ^ (addr)) % TCP_HSP_HASH_SIZE) 598 599 /* 600 * TCP options struct returned from tcp_parse_options. 601 */ 602 typedef struct tcp_opt_s { 603 uint32_t tcp_opt_mss; 604 uint32_t tcp_opt_wscale; 605 uint32_t tcp_opt_ts_val; 606 uint32_t tcp_opt_ts_ecr; 607 tcp_t *tcp; 608 } tcp_opt_t; 609 610 /* 611 * RFC1323-recommended phrasing of TSTAMP option, for easier parsing 612 */ 613 614 #ifdef _BIG_ENDIAN 615 #define TCPOPT_NOP_NOP_TSTAMP ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | \ 616 (TCPOPT_TSTAMP << 8) | 10) 617 #else 618 #define TCPOPT_NOP_NOP_TSTAMP ((10 << 24) | (TCPOPT_TSTAMP << 16) | \ 619 (TCPOPT_NOP << 8) | TCPOPT_NOP) 620 #endif 621 622 /* 623 * Flags returned from tcp_parse_options. 624 */ 625 #define TCP_OPT_MSS_PRESENT 1 626 #define TCP_OPT_WSCALE_PRESENT 2 627 #define TCP_OPT_TSTAMP_PRESENT 4 628 #define TCP_OPT_SACK_OK_PRESENT 8 629 #define TCP_OPT_SACK_PRESENT 16 630 631 /* TCP option length */ 632 #define TCPOPT_NOP_LEN 1 633 #define TCPOPT_MAXSEG_LEN 4 634 #define TCPOPT_WS_LEN 3 635 #define TCPOPT_REAL_WS_LEN (TCPOPT_WS_LEN+1) 636 #define TCPOPT_TSTAMP_LEN 10 637 #define TCPOPT_REAL_TS_LEN (TCPOPT_TSTAMP_LEN+2) 638 #define TCPOPT_SACK_OK_LEN 2 639 #define TCPOPT_REAL_SACK_OK_LEN (TCPOPT_SACK_OK_LEN+2) 640 #define TCPOPT_REAL_SACK_LEN 4 641 #define TCPOPT_MAX_SACK_LEN 36 642 #define TCPOPT_HEADER_LEN 2 643 644 /* TCP cwnd burst factor. */ 645 #define TCP_CWND_INFINITE 65535 646 #define TCP_CWND_SS 3 647 #define TCP_CWND_NORMAL 5 648 649 /* Maximum TCP initial cwin (start/restart). */ 650 #define TCP_MAX_INIT_CWND 8 651 652 /* 653 * Initialize cwnd according to RFC 3390. def_max_init_cwnd is 654 * either tcp_slow_start_initial or tcp_slow_start_after idle 655 * depending on the caller. If the upper layer has not used the 656 * TCP_INIT_CWND option to change the initial cwnd, tcp_init_cwnd 657 * should be 0 and we use the formula in RFC 3390 to set tcp_cwnd. 658 * If the upper layer has changed set the tcp_init_cwnd, just use 659 * it to calculate the tcp_cwnd. 660 */ 661 #define SET_TCP_INIT_CWND(tcp, mss, def_max_init_cwnd) \ 662 { \ 663 if ((tcp)->tcp_init_cwnd == 0) { \ 664 (tcp)->tcp_cwnd = MIN(def_max_init_cwnd * (mss), \ 665 MIN(4 * (mss), MAX(2 * (mss), 4380 / (mss) * (mss)))); \ 666 } else { \ 667 (tcp)->tcp_cwnd = (tcp)->tcp_init_cwnd * (mss); \ 668 } \ 669 tcp->tcp_cwnd_cnt = 0; \ 670 } 671 672 /* TCP Timer control structure */ 673 typedef struct tcpt_s { 674 pfv_t tcpt_pfv; /* The routine we are to call */ 675 tcp_t *tcpt_tcp; /* The parameter we are to pass in */ 676 } tcpt_t; 677 678 /* Host Specific Parameter structure */ 679 typedef struct tcp_hsp { 680 struct tcp_hsp *tcp_hsp_next; 681 in6_addr_t tcp_hsp_addr_v6; 682 in6_addr_t tcp_hsp_subnet_v6; 683 uint_t tcp_hsp_vers; /* IPV4_VERSION | IPV6_VERSION */ 684 int32_t tcp_hsp_sendspace; 685 int32_t tcp_hsp_recvspace; 686 int32_t tcp_hsp_tstamp; 687 } tcp_hsp_t; 688 #define tcp_hsp_addr V4_PART_OF_V6(tcp_hsp_addr_v6) 689 #define tcp_hsp_subnet V4_PART_OF_V6(tcp_hsp_subnet_v6) 690 691 /* 692 * Functions called directly via squeue having a prototype of edesc_t. 693 */ 694 void tcp_conn_request(void *arg, mblk_t *mp, void *arg2); 695 static void tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2); 696 void tcp_accept_finish(void *arg, mblk_t *mp, void *arg2); 697 static void tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2); 698 static void tcp_wput_proto(void *arg, mblk_t *mp, void *arg2); 699 void tcp_input(void *arg, mblk_t *mp, void *arg2); 700 void tcp_rput_data(void *arg, mblk_t *mp, void *arg2); 701 static void tcp_close_output(void *arg, mblk_t *mp, void *arg2); 702 void tcp_output(void *arg, mblk_t *mp, void *arg2); 703 static void tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2); 704 static void tcp_timer_handler(void *arg, mblk_t *mp, void *arg2); 705 static void tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2); 706 707 708 /* Prototype for TCP functions */ 709 static void tcp_random_init(void); 710 int tcp_random(void); 711 static void tcp_accept(tcp_t *tcp, mblk_t *mp); 712 static void tcp_accept_swap(tcp_t *listener, tcp_t *acceptor, 713 tcp_t *eager); 714 static int tcp_adapt_ire(tcp_t *tcp, mblk_t *ire_mp); 715 static in_port_t tcp_bindi(tcp_t *tcp, in_port_t port, const in6_addr_t *laddr, 716 int reuseaddr, boolean_t quick_connect, boolean_t bind_to_req_port_only, 717 boolean_t user_specified); 718 static void tcp_closei_local(tcp_t *tcp); 719 static void tcp_close_detached(tcp_t *tcp); 720 static boolean_t tcp_conn_con(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph, 721 mblk_t *idmp, mblk_t **defermp); 722 static void tcp_connect(tcp_t *tcp, mblk_t *mp); 723 static void tcp_connect_ipv4(tcp_t *tcp, mblk_t *mp, ipaddr_t *dstaddrp, 724 in_port_t dstport, uint_t srcid); 725 static void tcp_connect_ipv6(tcp_t *tcp, mblk_t *mp, in6_addr_t *dstaddrp, 726 in_port_t dstport, uint32_t flowinfo, uint_t srcid, 727 uint32_t scope_id); 728 static int tcp_clean_death(tcp_t *tcp, int err, uint8_t tag); 729 static void tcp_def_q_set(tcp_t *tcp, mblk_t *mp); 730 static void tcp_disconnect(tcp_t *tcp, mblk_t *mp); 731 static char *tcp_display(tcp_t *tcp, char *, char); 732 static boolean_t tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum); 733 static void tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only); 734 static void tcp_eager_unlink(tcp_t *tcp); 735 static void tcp_err_ack(tcp_t *tcp, mblk_t *mp, int tlierr, 736 int unixerr); 737 static void tcp_err_ack_prim(tcp_t *tcp, mblk_t *mp, int primitive, 738 int tlierr, int unixerr); 739 static int tcp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, 740 cred_t *cr); 741 static int tcp_extra_priv_ports_add(queue_t *q, mblk_t *mp, 742 char *value, caddr_t cp, cred_t *cr); 743 static int tcp_extra_priv_ports_del(queue_t *q, mblk_t *mp, 744 char *value, caddr_t cp, cred_t *cr); 745 static int tcp_tpistate(tcp_t *tcp); 746 static void tcp_bind_hash_insert(tf_t *tf, tcp_t *tcp, 747 int caller_holds_lock); 748 static void tcp_bind_hash_remove(tcp_t *tcp); 749 static tcp_t *tcp_acceptor_hash_lookup(t_uscalar_t id, tcp_stack_t *); 750 void tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp); 751 static void tcp_acceptor_hash_remove(tcp_t *tcp); 752 static void tcp_capability_req(tcp_t *tcp, mblk_t *mp); 753 static void tcp_info_req(tcp_t *tcp, mblk_t *mp); 754 static void tcp_addr_req(tcp_t *tcp, mblk_t *mp); 755 static void tcp_addr_req_ipv6(tcp_t *tcp, mblk_t *mp); 756 void tcp_g_q_setup(tcp_stack_t *); 757 void tcp_g_q_create(tcp_stack_t *); 758 void tcp_g_q_destroy(tcp_stack_t *); 759 static int tcp_header_init_ipv4(tcp_t *tcp); 760 static int tcp_header_init_ipv6(tcp_t *tcp); 761 int tcp_init(tcp_t *tcp, queue_t *q); 762 static int tcp_init_values(tcp_t *tcp); 763 static mblk_t *tcp_ip_advise_mblk(void *addr, int addr_len, ipic_t **ipic); 764 static mblk_t *tcp_ip_bind_mp(tcp_t *tcp, t_scalar_t bind_prim, 765 t_scalar_t addr_length); 766 static void tcp_ip_ire_mark_advice(tcp_t *tcp); 767 static void tcp_ip_notify(tcp_t *tcp); 768 static mblk_t *tcp_ire_mp(mblk_t *mp); 769 static void tcp_iss_init(tcp_t *tcp); 770 static void tcp_keepalive_killer(void *arg); 771 static int tcp_parse_options(tcph_t *tcph, tcp_opt_t *tcpopt); 772 static void tcp_mss_set(tcp_t *tcp, uint32_t size, boolean_t do_ss); 773 static int tcp_conprim_opt_process(tcp_t *tcp, mblk_t *mp, 774 int *do_disconnectp, int *t_errorp, int *sys_errorp); 775 static boolean_t tcp_allow_connopt_set(int level, int name); 776 int tcp_opt_default(queue_t *q, int level, int name, uchar_t *ptr); 777 int tcp_opt_get(queue_t *q, int level, int name, uchar_t *ptr); 778 int tcp_opt_set(queue_t *q, uint_t optset_context, int level, 779 int name, uint_t inlen, uchar_t *invalp, uint_t *outlenp, 780 uchar_t *outvalp, void *thisdg_attrs, cred_t *cr, 781 mblk_t *mblk); 782 static void tcp_opt_reverse(tcp_t *tcp, ipha_t *ipha); 783 static int tcp_opt_set_header(tcp_t *tcp, boolean_t checkonly, 784 uchar_t *ptr, uint_t len); 785 static int tcp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr); 786 static boolean_t tcp_param_register(IDP *ndp, tcpparam_t *tcppa, int cnt, 787 tcp_stack_t *); 788 static int tcp_param_set(queue_t *q, mblk_t *mp, char *value, 789 caddr_t cp, cred_t *cr); 790 static int tcp_param_set_aligned(queue_t *q, mblk_t *mp, char *value, 791 caddr_t cp, cred_t *cr); 792 static void tcp_iss_key_init(uint8_t *phrase, int len, tcp_stack_t *); 793 static int tcp_1948_phrase_set(queue_t *q, mblk_t *mp, char *value, 794 caddr_t cp, cred_t *cr); 795 static void tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_cnt); 796 static mblk_t *tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start); 797 static void tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp); 798 static void tcp_reinit(tcp_t *tcp); 799 static void tcp_reinit_values(tcp_t *tcp); 800 static void tcp_report_item(mblk_t *mp, tcp_t *tcp, int hashval, 801 tcp_t *thisstream, cred_t *cr); 802 803 static uint_t tcp_rcv_drain(queue_t *q, tcp_t *tcp); 804 static void tcp_sack_rxmit(tcp_t *tcp, uint_t *flags); 805 static boolean_t tcp_send_rst_chk(tcp_stack_t *); 806 static void tcp_ss_rexmit(tcp_t *tcp); 807 static mblk_t *tcp_rput_add_ancillary(tcp_t *tcp, mblk_t *mp, ip6_pkt_t *ipp); 808 static void tcp_process_options(tcp_t *, tcph_t *); 809 static void tcp_rput_common(tcp_t *tcp, mblk_t *mp); 810 static void tcp_rsrv(queue_t *q); 811 static int tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd); 812 static int tcp_snmp_state(tcp_t *tcp); 813 static int tcp_status_report(queue_t *q, mblk_t *mp, caddr_t cp, 814 cred_t *cr); 815 static int tcp_bind_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 816 cred_t *cr); 817 static int tcp_listen_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 818 cred_t *cr); 819 static int tcp_conn_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 820 cred_t *cr); 821 static int tcp_acceptor_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, 822 cred_t *cr); 823 static int tcp_host_param_set(queue_t *q, mblk_t *mp, char *value, 824 caddr_t cp, cred_t *cr); 825 static int tcp_host_param_set_ipv6(queue_t *q, mblk_t *mp, char *value, 826 caddr_t cp, cred_t *cr); 827 static int tcp_host_param_report(queue_t *q, mblk_t *mp, caddr_t cp, 828 cred_t *cr); 829 static void tcp_timer(void *arg); 830 static void tcp_timer_callback(void *); 831 static in_port_t tcp_update_next_port(in_port_t port, const tcp_t *tcp, 832 boolean_t random); 833 static in_port_t tcp_get_next_priv_port(const tcp_t *); 834 static void tcp_wput_sock(queue_t *q, mblk_t *mp); 835 void tcp_wput_accept(queue_t *q, mblk_t *mp); 836 static void tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent); 837 static void tcp_wput_flush(tcp_t *tcp, mblk_t *mp); 838 static void tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp); 839 static int tcp_send(queue_t *q, tcp_t *tcp, const int mss, 840 const int tcp_hdr_len, const int tcp_tcp_hdr_len, 841 const int num_sack_blk, int *usable, uint_t *snxt, 842 int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 843 const int mdt_thres); 844 static int tcp_multisend(queue_t *q, tcp_t *tcp, const int mss, 845 const int tcp_hdr_len, const int tcp_tcp_hdr_len, 846 const int num_sack_blk, int *usable, uint_t *snxt, 847 int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 848 const int mdt_thres); 849 static void tcp_fill_header(tcp_t *tcp, uchar_t *rptr, clock_t now, 850 int num_sack_blk); 851 static void tcp_wsrv(queue_t *q); 852 static int tcp_xmit_end(tcp_t *tcp); 853 static void tcp_ack_timer(void *arg); 854 static mblk_t *tcp_ack_mp(tcp_t *tcp); 855 static void tcp_xmit_early_reset(char *str, mblk_t *mp, 856 uint32_t seq, uint32_t ack, int ctl, uint_t ip_hdr_len, 857 zoneid_t zoneid, tcp_stack_t *); 858 static void tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, 859 uint32_t ack, int ctl); 860 static tcp_hsp_t *tcp_hsp_lookup(ipaddr_t addr, tcp_stack_t *); 861 static tcp_hsp_t *tcp_hsp_lookup_ipv6(in6_addr_t *addr, tcp_stack_t *); 862 static int setmaxps(queue_t *q, int maxpsz); 863 static void tcp_set_rto(tcp_t *, time_t); 864 static boolean_t tcp_check_policy(tcp_t *, mblk_t *, ipha_t *, ip6_t *, 865 boolean_t, boolean_t); 866 static void tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, 867 boolean_t ipsec_mctl); 868 static mblk_t *tcp_setsockopt_mp(int level, int cmd, 869 char *opt, int optlen); 870 static int tcp_build_hdrs(queue_t *, tcp_t *); 871 static void tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, 872 uint32_t seg_seq, uint32_t seg_ack, int seg_len, 873 tcph_t *tcph); 874 boolean_t tcp_paws_check(tcp_t *tcp, tcph_t *tcph, tcp_opt_t *tcpoptp); 875 boolean_t tcp_reserved_port_add(int, in_port_t *, in_port_t *); 876 boolean_t tcp_reserved_port_del(in_port_t, in_port_t); 877 boolean_t tcp_reserved_port_check(in_port_t, tcp_stack_t *); 878 static tcp_t *tcp_alloc_temp_tcp(in_port_t, tcp_stack_t *); 879 static int tcp_reserved_port_list(queue_t *, mblk_t *, caddr_t, cred_t *); 880 static mblk_t *tcp_mdt_info_mp(mblk_t *); 881 static void tcp_mdt_update(tcp_t *, ill_mdt_capab_t *, boolean_t); 882 static int tcp_mdt_add_attrs(multidata_t *, const mblk_t *, 883 const boolean_t, const uint32_t, const uint32_t, 884 const uint32_t, const uint32_t, tcp_stack_t *); 885 static void tcp_multisend_data(tcp_t *, ire_t *, const ill_t *, mblk_t *, 886 const uint_t, const uint_t, boolean_t *); 887 static mblk_t *tcp_lso_info_mp(mblk_t *); 888 static void tcp_lso_update(tcp_t *, ill_lso_capab_t *); 889 static void tcp_send_data(tcp_t *, queue_t *, mblk_t *); 890 extern mblk_t *tcp_timermp_alloc(int); 891 extern void tcp_timermp_free(tcp_t *); 892 static void tcp_timer_free(tcp_t *tcp, mblk_t *mp); 893 static void tcp_stop_lingering(tcp_t *tcp); 894 static void tcp_close_linger_timeout(void *arg); 895 static void *tcp_stack_init(netstackid_t stackid, netstack_t *ns); 896 static void tcp_stack_shutdown(netstackid_t stackid, void *arg); 897 static void tcp_stack_fini(netstackid_t stackid, void *arg); 898 static void *tcp_g_kstat_init(tcp_g_stat_t *); 899 static void tcp_g_kstat_fini(kstat_t *); 900 static void *tcp_kstat_init(netstackid_t, tcp_stack_t *); 901 static void tcp_kstat_fini(netstackid_t, kstat_t *); 902 static void *tcp_kstat2_init(netstackid_t, tcp_stat_t *); 903 static void tcp_kstat2_fini(netstackid_t, kstat_t *); 904 static int tcp_kstat_update(kstat_t *kp, int rw); 905 void tcp_reinput(conn_t *connp, mblk_t *mp, squeue_t *sqp); 906 static int tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 907 tcph_t *tcph, uint_t ipvers, mblk_t *idmp); 908 static int tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, ipha_t *ipha, 909 tcph_t *tcph, mblk_t *idmp); 910 static squeue_func_t tcp_squeue_switch(int); 911 912 static int tcp_open(queue_t *, dev_t *, int, int, cred_t *); 913 static int tcp_close(queue_t *, int); 914 static int tcpclose_accept(queue_t *); 915 static int tcp_modclose(queue_t *); 916 static void tcp_wput_mod(queue_t *, mblk_t *); 917 918 static void tcp_squeue_add(squeue_t *); 919 static boolean_t tcp_zcopy_check(tcp_t *); 920 static void tcp_zcopy_notify(tcp_t *); 921 static mblk_t *tcp_zcopy_disable(tcp_t *, mblk_t *); 922 static mblk_t *tcp_zcopy_backoff(tcp_t *, mblk_t *, int); 923 static void tcp_ire_ill_check(tcp_t *, ire_t *, ill_t *, boolean_t); 924 925 extern void tcp_kssl_input(tcp_t *, mblk_t *); 926 927 void tcp_eager_kill(void *arg, mblk_t *mp, void *arg2); 928 void tcp_clean_death_wrapper(void *arg, mblk_t *mp, void *arg2); 929 930 /* 931 * Routines related to the TCP_IOC_ABORT_CONN ioctl command. 932 * 933 * TCP_IOC_ABORT_CONN is a non-transparent ioctl command used for aborting 934 * TCP connections. To invoke this ioctl, a tcp_ioc_abort_conn_t structure 935 * (defined in tcp.h) needs to be filled in and passed into the kernel 936 * via an I_STR ioctl command (see streamio(7I)). The tcp_ioc_abort_conn_t 937 * structure contains the four-tuple of a TCP connection and a range of TCP 938 * states (specified by ac_start and ac_end). The use of wildcard addresses 939 * and ports is allowed. Connections with a matching four tuple and a state 940 * within the specified range will be aborted. The valid states for the 941 * ac_start and ac_end fields are in the range TCPS_SYN_SENT to TCPS_TIME_WAIT, 942 * inclusive. 943 * 944 * An application which has its connection aborted by this ioctl will receive 945 * an error that is dependent on the connection state at the time of the abort. 946 * If the connection state is < TCPS_TIME_WAIT, an application should behave as 947 * though a RST packet has been received. If the connection state is equal to 948 * TCPS_TIME_WAIT, the 2MSL timeout will immediately be canceled by the kernel 949 * and all resources associated with the connection will be freed. 950 */ 951 static mblk_t *tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *, tcp_t *); 952 static void tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *); 953 static void tcp_ioctl_abort_handler(tcp_t *, mblk_t *); 954 static int tcp_ioctl_abort(tcp_ioc_abort_conn_t *, tcp_stack_t *tcps); 955 static void tcp_ioctl_abort_conn(queue_t *, mblk_t *); 956 static int tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *, int, int *, 957 boolean_t, tcp_stack_t *); 958 959 static struct module_info tcp_rinfo = { 960 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, TCP_RECV_HIWATER, TCP_RECV_LOWATER 961 }; 962 963 static struct module_info tcp_winfo = { 964 TCP_MOD_ID, TCP_MOD_NAME, 0, INFPSZ, 127, 16 965 }; 966 967 /* 968 * Entry points for TCP as a module. It only allows SNMP requests 969 * to pass through. 970 */ 971 struct qinit tcp_mod_rinit = { 972 (pfi_t)putnext, NULL, tcp_open, ip_snmpmod_close, NULL, &tcp_rinfo, 973 }; 974 975 struct qinit tcp_mod_winit = { 976 (pfi_t)ip_snmpmod_wput, NULL, tcp_open, ip_snmpmod_close, NULL, 977 &tcp_rinfo 978 }; 979 980 /* 981 * Entry points for TCP as a device. The normal case which supports 982 * the TCP functionality. 983 */ 984 struct qinit tcp_rinit = { 985 NULL, (pfi_t)tcp_rsrv, tcp_open, tcp_close, NULL, &tcp_rinfo 986 }; 987 988 struct qinit tcp_winit = { 989 (pfi_t)tcp_wput, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo 990 }; 991 992 /* Initial entry point for TCP in socket mode. */ 993 struct qinit tcp_sock_winit = { 994 (pfi_t)tcp_wput_sock, (pfi_t)tcp_wsrv, NULL, NULL, NULL, &tcp_winfo 995 }; 996 997 /* 998 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing 999 * an accept. Avoid allocating data structures since eager has already 1000 * been created. 1001 */ 1002 struct qinit tcp_acceptor_rinit = { 1003 NULL, (pfi_t)tcp_rsrv, NULL, tcpclose_accept, NULL, &tcp_winfo 1004 }; 1005 1006 struct qinit tcp_acceptor_winit = { 1007 (pfi_t)tcp_wput_accept, NULL, NULL, NULL, NULL, &tcp_winfo 1008 }; 1009 1010 /* 1011 * Entry points for TCP loopback (read side only) 1012 */ 1013 struct qinit tcp_loopback_rinit = { 1014 (pfi_t)0, (pfi_t)tcp_rsrv, tcp_open, tcp_close, (pfi_t)0, 1015 &tcp_rinfo, NULL, tcp_fuse_rrw, tcp_fuse_rinfop, STRUIOT_STANDARD 1016 }; 1017 1018 struct streamtab tcpinfo = { 1019 &tcp_rinit, &tcp_winit 1020 }; 1021 1022 /* 1023 * Have to ensure that tcp_g_q_close is not done by an 1024 * interrupt thread. 1025 */ 1026 static taskq_t *tcp_taskq; 1027 1028 /* 1029 * TCP has a private interface for other kernel modules to reserve a 1030 * port range for them to use. Once reserved, TCP will not use any ports 1031 * in the range. This interface relies on the TCP_EXCLBIND feature. If 1032 * the semantics of TCP_EXCLBIND is changed, implementation of this interface 1033 * has to be verified. 1034 * 1035 * There can be TCP_RESERVED_PORTS_ARRAY_MAX_SIZE port ranges. Each port 1036 * range can cover at most TCP_RESERVED_PORTS_RANGE_MAX ports. A port 1037 * range is [port a, port b] inclusive. And each port range is between 1038 * TCP_LOWESET_RESERVED_PORT and TCP_LARGEST_RESERVED_PORT inclusive. 1039 * 1040 * Note that the default anonymous port range starts from 32768. There is 1041 * no port "collision" between that and the reserved port range. If there 1042 * is port collision (because the default smallest anonymous port is lowered 1043 * or some apps specifically bind to ports in the reserved port range), the 1044 * system may not be able to reserve a port range even there are enough 1045 * unbound ports as a reserved port range contains consecutive ports . 1046 */ 1047 #define TCP_RESERVED_PORTS_ARRAY_MAX_SIZE 5 1048 #define TCP_RESERVED_PORTS_RANGE_MAX 1000 1049 #define TCP_SMALLEST_RESERVED_PORT 10240 1050 #define TCP_LARGEST_RESERVED_PORT 20480 1051 1052 /* Structure to represent those reserved port ranges. */ 1053 typedef struct tcp_rport_s { 1054 in_port_t lo_port; 1055 in_port_t hi_port; 1056 tcp_t **temp_tcp_array; 1057 } tcp_rport_t; 1058 1059 /* Setable only in /etc/system. Move to ndd? */ 1060 boolean_t tcp_icmp_source_quench = B_FALSE; 1061 1062 /* 1063 * Following assumes TPI alignment requirements stay along 32 bit 1064 * boundaries 1065 */ 1066 #define ROUNDUP32(x) \ 1067 (((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1)) 1068 1069 /* Template for response to info request. */ 1070 static struct T_info_ack tcp_g_t_info_ack = { 1071 T_INFO_ACK, /* PRIM_type */ 1072 0, /* TSDU_size */ 1073 T_INFINITE, /* ETSDU_size */ 1074 T_INVALID, /* CDATA_size */ 1075 T_INVALID, /* DDATA_size */ 1076 sizeof (sin_t), /* ADDR_size */ 1077 0, /* OPT_size - not initialized here */ 1078 TIDUSZ, /* TIDU_size */ 1079 T_COTS_ORD, /* SERV_type */ 1080 TCPS_IDLE, /* CURRENT_state */ 1081 (XPG4_1|EXPINLINE) /* PROVIDER_flag */ 1082 }; 1083 1084 static struct T_info_ack tcp_g_t_info_ack_v6 = { 1085 T_INFO_ACK, /* PRIM_type */ 1086 0, /* TSDU_size */ 1087 T_INFINITE, /* ETSDU_size */ 1088 T_INVALID, /* CDATA_size */ 1089 T_INVALID, /* DDATA_size */ 1090 sizeof (sin6_t), /* ADDR_size */ 1091 0, /* OPT_size - not initialized here */ 1092 TIDUSZ, /* TIDU_size */ 1093 T_COTS_ORD, /* SERV_type */ 1094 TCPS_IDLE, /* CURRENT_state */ 1095 (XPG4_1|EXPINLINE) /* PROVIDER_flag */ 1096 }; 1097 1098 #define MS 1L 1099 #define SECONDS (1000 * MS) 1100 #define MINUTES (60 * SECONDS) 1101 #define HOURS (60 * MINUTES) 1102 #define DAYS (24 * HOURS) 1103 1104 #define PARAM_MAX (~(uint32_t)0) 1105 1106 /* Max size IP datagram is 64k - 1 */ 1107 #define TCP_MSS_MAX_IPV4 (IP_MAXPACKET - (sizeof (ipha_t) + sizeof (tcph_t))) 1108 #define TCP_MSS_MAX_IPV6 (IP_MAXPACKET - (sizeof (ip6_t) + sizeof (tcph_t))) 1109 /* Max of the above */ 1110 #define TCP_MSS_MAX TCP_MSS_MAX_IPV4 1111 1112 /* Largest TCP port number */ 1113 #define TCP_MAX_PORT (64 * 1024 - 1) 1114 1115 /* 1116 * tcp_wroff_xtra is the extra space in front of TCP/IP header for link 1117 * layer header. It has to be a multiple of 4. 1118 */ 1119 static tcpparam_t lcl_tcp_wroff_xtra_param = { 0, 256, 32, "tcp_wroff_xtra" }; 1120 #define tcps_wroff_xtra tcps_wroff_xtra_param->tcp_param_val 1121 1122 /* 1123 * All of these are alterable, within the min/max values given, at run time. 1124 * Note that the default value of "tcp_time_wait_interval" is four minutes, 1125 * per the TCP spec. 1126 */ 1127 /* BEGIN CSTYLED */ 1128 static tcpparam_t lcl_tcp_param_arr[] = { 1129 /*min max value name */ 1130 { 1*SECONDS, 10*MINUTES, 1*MINUTES, "tcp_time_wait_interval"}, 1131 { 1, PARAM_MAX, 128, "tcp_conn_req_max_q" }, 1132 { 0, PARAM_MAX, 1024, "tcp_conn_req_max_q0" }, 1133 { 1, 1024, 1, "tcp_conn_req_min" }, 1134 { 0*MS, 20*SECONDS, 0*MS, "tcp_conn_grace_period" }, 1135 { 128, (1<<30), 1024*1024, "tcp_cwnd_max" }, 1136 { 0, 10, 0, "tcp_debug" }, 1137 { 1024, (32*1024), 1024, "tcp_smallest_nonpriv_port"}, 1138 { 1*SECONDS, PARAM_MAX, 3*MINUTES, "tcp_ip_abort_cinterval"}, 1139 { 1*SECONDS, PARAM_MAX, 3*MINUTES, "tcp_ip_abort_linterval"}, 1140 { 500*MS, PARAM_MAX, 8*MINUTES, "tcp_ip_abort_interval"}, 1141 { 1*SECONDS, PARAM_MAX, 10*SECONDS, "tcp_ip_notify_cinterval"}, 1142 { 500*MS, PARAM_MAX, 10*SECONDS, "tcp_ip_notify_interval"}, 1143 { 1, 255, 64, "tcp_ipv4_ttl"}, 1144 { 10*SECONDS, 10*DAYS, 2*HOURS, "tcp_keepalive_interval"}, 1145 { 0, 100, 10, "tcp_maxpsz_multiplier" }, 1146 { 1, TCP_MSS_MAX_IPV4, 536, "tcp_mss_def_ipv4"}, 1147 { 1, TCP_MSS_MAX_IPV4, TCP_MSS_MAX_IPV4, "tcp_mss_max_ipv4"}, 1148 { 1, TCP_MSS_MAX, 108, "tcp_mss_min"}, 1149 { 1, (64*1024)-1, (4*1024)-1, "tcp_naglim_def"}, 1150 { 1*MS, 20*SECONDS, 3*SECONDS, "tcp_rexmit_interval_initial"}, 1151 { 1*MS, 2*HOURS, 60*SECONDS, "tcp_rexmit_interval_max"}, 1152 { 1*MS, 2*HOURS, 400*MS, "tcp_rexmit_interval_min"}, 1153 { 1*MS, 1*MINUTES, 100*MS, "tcp_deferred_ack_interval" }, 1154 { 0, 16, 0, "tcp_snd_lowat_fraction" }, 1155 { 0, 128000, 0, "tcp_sth_rcv_hiwat" }, 1156 { 0, 128000, 0, "tcp_sth_rcv_lowat" }, 1157 { 1, 10000, 3, "tcp_dupack_fast_retransmit" }, 1158 { 0, 1, 0, "tcp_ignore_path_mtu" }, 1159 { 1024, TCP_MAX_PORT, 32*1024, "tcp_smallest_anon_port"}, 1160 { 1024, TCP_MAX_PORT, TCP_MAX_PORT, "tcp_largest_anon_port"}, 1161 { TCP_XMIT_LOWATER, (1<<30), TCP_XMIT_HIWATER,"tcp_xmit_hiwat"}, 1162 { TCP_XMIT_LOWATER, (1<<30), TCP_XMIT_LOWATER,"tcp_xmit_lowat"}, 1163 { TCP_RECV_LOWATER, (1<<30), TCP_RECV_HIWATER,"tcp_recv_hiwat"}, 1164 { 1, 65536, 4, "tcp_recv_hiwat_minmss"}, 1165 { 1*SECONDS, PARAM_MAX, 675*SECONDS, "tcp_fin_wait_2_flush_interval"}, 1166 { 0, TCP_MSS_MAX, 64, "tcp_co_min"}, 1167 { 8192, (1<<30), 1024*1024, "tcp_max_buf"}, 1168 /* 1169 * Question: What default value should I set for tcp_strong_iss? 1170 */ 1171 { 0, 2, 1, "tcp_strong_iss"}, 1172 { 0, 65536, 20, "tcp_rtt_updates"}, 1173 { 0, 1, 1, "tcp_wscale_always"}, 1174 { 0, 1, 0, "tcp_tstamp_always"}, 1175 { 0, 1, 1, "tcp_tstamp_if_wscale"}, 1176 { 0*MS, 2*HOURS, 0*MS, "tcp_rexmit_interval_extra"}, 1177 { 0, 16, 2, "tcp_deferred_acks_max"}, 1178 { 1, 16384, 4, "tcp_slow_start_after_idle"}, 1179 { 1, 4, 4, "tcp_slow_start_initial"}, 1180 { 10*MS, 50*MS, 20*MS, "tcp_co_timer_interval"}, 1181 { 0, 2, 2, "tcp_sack_permitted"}, 1182 { 0, 1, 0, "tcp_trace"}, 1183 { 0, 1, 1, "tcp_compression_enabled"}, 1184 { 0, IPV6_MAX_HOPS, IPV6_DEFAULT_HOPS, "tcp_ipv6_hoplimit"}, 1185 { 1, TCP_MSS_MAX_IPV6, 1220, "tcp_mss_def_ipv6"}, 1186 { 1, TCP_MSS_MAX_IPV6, TCP_MSS_MAX_IPV6, "tcp_mss_max_ipv6"}, 1187 { 0, 1, 0, "tcp_rev_src_routes"}, 1188 { 10*MS, 500*MS, 50*MS, "tcp_local_dack_interval"}, 1189 { 100*MS, 60*SECONDS, 1*SECONDS, "tcp_ndd_get_info_interval"}, 1190 { 0, 16, 8, "tcp_local_dacks_max"}, 1191 { 0, 2, 1, "tcp_ecn_permitted"}, 1192 { 0, 1, 1, "tcp_rst_sent_rate_enabled"}, 1193 { 0, PARAM_MAX, 40, "tcp_rst_sent_rate"}, 1194 { 0, 100*MS, 50*MS, "tcp_push_timer_interval"}, 1195 { 0, 1, 0, "tcp_use_smss_as_mss_opt"}, 1196 { 0, PARAM_MAX, 8*MINUTES, "tcp_keepalive_abort_interval"}, 1197 }; 1198 /* END CSTYLED */ 1199 1200 /* 1201 * tcp_mdt_hdr_{head,tail}_min are the leading and trailing spaces of 1202 * each header fragment in the header buffer. Each parameter value has 1203 * to be a multiple of 4 (32-bit aligned). 1204 */ 1205 static tcpparam_t lcl_tcp_mdt_head_param = 1206 { 32, 256, 32, "tcp_mdt_hdr_head_min" }; 1207 static tcpparam_t lcl_tcp_mdt_tail_param = 1208 { 0, 256, 32, "tcp_mdt_hdr_tail_min" }; 1209 #define tcps_mdt_hdr_head_min tcps_mdt_head_param->tcp_param_val 1210 #define tcps_mdt_hdr_tail_min tcps_mdt_tail_param->tcp_param_val 1211 1212 /* 1213 * tcp_mdt_max_pbufs is the upper limit value that tcp uses to figure out 1214 * the maximum number of payload buffers associated per Multidata. 1215 */ 1216 static tcpparam_t lcl_tcp_mdt_max_pbufs_param = 1217 { 1, MULTIDATA_MAX_PBUFS, MULTIDATA_MAX_PBUFS, "tcp_mdt_max_pbufs" }; 1218 #define tcps_mdt_max_pbufs tcps_mdt_max_pbufs_param->tcp_param_val 1219 1220 /* Round up the value to the nearest mss. */ 1221 #define MSS_ROUNDUP(value, mss) ((((value) - 1) / (mss) + 1) * (mss)) 1222 1223 /* 1224 * Set ECN capable transport (ECT) code point in IP header. 1225 * 1226 * Note that there are 2 ECT code points '01' and '10', which are called 1227 * ECT(1) and ECT(0) respectively. Here we follow the original ECT code 1228 * point ECT(0) for TCP as described in RFC 2481. 1229 */ 1230 #define SET_ECT(tcp, iph) \ 1231 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1232 /* We need to clear the code point first. */ \ 1233 ((ipha_t *)(iph))->ipha_type_of_service &= 0xFC; \ 1234 ((ipha_t *)(iph))->ipha_type_of_service |= IPH_ECN_ECT0; \ 1235 } else { \ 1236 ((ip6_t *)(iph))->ip6_vcf &= htonl(0xFFCFFFFF); \ 1237 ((ip6_t *)(iph))->ip6_vcf |= htonl(IPH_ECN_ECT0 << 20); \ 1238 } 1239 1240 /* 1241 * The format argument to pass to tcp_display(). 1242 * DISP_PORT_ONLY means that the returned string has only port info. 1243 * DISP_ADDR_AND_PORT means that the returned string also contains the 1244 * remote and local IP address. 1245 */ 1246 #define DISP_PORT_ONLY 1 1247 #define DISP_ADDR_AND_PORT 2 1248 1249 #define NDD_TOO_QUICK_MSG \ 1250 "ndd get info rate too high for non-privileged users, try again " \ 1251 "later.\n" 1252 #define NDD_OUT_OF_BUF_MSG "<< Out of buffer >>\n" 1253 1254 #define IS_VMLOANED_MBLK(mp) \ 1255 (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0) 1256 1257 1258 /* Enable or disable b_cont M_MULTIDATA chaining for MDT. */ 1259 boolean_t tcp_mdt_chain = B_TRUE; 1260 1261 /* 1262 * MDT threshold in the form of effective send MSS multiplier; we take 1263 * the MDT path if the amount of unsent data exceeds the threshold value 1264 * (default threshold is 1*SMSS). 1265 */ 1266 uint_t tcp_mdt_smss_threshold = 1; 1267 1268 uint32_t do_tcpzcopy = 1; /* 0: disable, 1: enable, 2: force */ 1269 1270 /* 1271 * Forces all connections to obey the value of the tcps_maxpsz_multiplier 1272 * tunable settable via NDD. Otherwise, the per-connection behavior is 1273 * determined dynamically during tcp_adapt_ire(), which is the default. 1274 */ 1275 boolean_t tcp_static_maxpsz = B_FALSE; 1276 1277 /* Setable in /etc/system */ 1278 /* If set to 0, pick ephemeral port sequentially; otherwise randomly. */ 1279 uint32_t tcp_random_anon_port = 1; 1280 1281 /* 1282 * To reach to an eager in Q0 which can be dropped due to an incoming 1283 * new SYN request when Q0 is full, a new doubly linked list is 1284 * introduced. This list allows to select an eager from Q0 in O(1) time. 1285 * This is needed to avoid spending too much time walking through the 1286 * long list of eagers in Q0 when tcp_drop_q0() is called. Each member of 1287 * this new list has to be a member of Q0. 1288 * This list is headed by listener's tcp_t. When the list is empty, 1289 * both the pointers - tcp_eager_next_drop_q0 and tcp_eager_prev_drop_q0, 1290 * of listener's tcp_t point to listener's tcp_t itself. 1291 * 1292 * Given an eager in Q0 and a listener, MAKE_DROPPABLE() puts the eager 1293 * in the list. MAKE_UNDROPPABLE() takes the eager out of the list. 1294 * These macros do not affect the eager's membership to Q0. 1295 */ 1296 1297 1298 #define MAKE_DROPPABLE(listener, eager) \ 1299 if ((eager)->tcp_eager_next_drop_q0 == NULL) { \ 1300 (listener)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0\ 1301 = (eager); \ 1302 (eager)->tcp_eager_prev_drop_q0 = (listener); \ 1303 (eager)->tcp_eager_next_drop_q0 = \ 1304 (listener)->tcp_eager_next_drop_q0; \ 1305 (listener)->tcp_eager_next_drop_q0 = (eager); \ 1306 } 1307 1308 #define MAKE_UNDROPPABLE(eager) \ 1309 if ((eager)->tcp_eager_next_drop_q0 != NULL) { \ 1310 (eager)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0 \ 1311 = (eager)->tcp_eager_prev_drop_q0; \ 1312 (eager)->tcp_eager_prev_drop_q0->tcp_eager_next_drop_q0 \ 1313 = (eager)->tcp_eager_next_drop_q0; \ 1314 (eager)->tcp_eager_prev_drop_q0 = NULL; \ 1315 (eager)->tcp_eager_next_drop_q0 = NULL; \ 1316 } 1317 1318 /* 1319 * If tcp_drop_ack_unsent_cnt is greater than 0, when TCP receives more 1320 * than tcp_drop_ack_unsent_cnt number of ACKs which acknowledge unsent 1321 * data, TCP will not respond with an ACK. RFC 793 requires that 1322 * TCP responds with an ACK for such a bogus ACK. By not following 1323 * the RFC, we prevent TCP from getting into an ACK storm if somehow 1324 * an attacker successfully spoofs an acceptable segment to our 1325 * peer; or when our peer is "confused." 1326 */ 1327 uint32_t tcp_drop_ack_unsent_cnt = 10; 1328 1329 /* 1330 * Hook functions to enable cluster networking 1331 * On non-clustered systems these vectors must always be NULL. 1332 */ 1333 1334 void (*cl_inet_listen)(uint8_t protocol, sa_family_t addr_family, 1335 uint8_t *laddrp, in_port_t lport) = NULL; 1336 void (*cl_inet_unlisten)(uint8_t protocol, sa_family_t addr_family, 1337 uint8_t *laddrp, in_port_t lport) = NULL; 1338 void (*cl_inet_connect)(uint8_t protocol, sa_family_t addr_family, 1339 uint8_t *laddrp, in_port_t lport, 1340 uint8_t *faddrp, in_port_t fport) = NULL; 1341 void (*cl_inet_disconnect)(uint8_t protocol, sa_family_t addr_family, 1342 uint8_t *laddrp, in_port_t lport, 1343 uint8_t *faddrp, in_port_t fport) = NULL; 1344 1345 /* 1346 * The following are defined in ip.c 1347 */ 1348 extern int (*cl_inet_isclusterwide)(uint8_t protocol, sa_family_t addr_family, 1349 uint8_t *laddrp); 1350 extern uint32_t (*cl_inet_ipident)(uint8_t protocol, sa_family_t addr_family, 1351 uint8_t *laddrp, uint8_t *faddrp); 1352 1353 #define CL_INET_CONNECT(tcp) { \ 1354 if (cl_inet_connect != NULL) { \ 1355 /* \ 1356 * Running in cluster mode - register active connection \ 1357 * information \ 1358 */ \ 1359 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1360 if ((tcp)->tcp_ipha->ipha_src != 0) { \ 1361 (*cl_inet_connect)(IPPROTO_TCP, AF_INET,\ 1362 (uint8_t *)(&((tcp)->tcp_ipha->ipha_src)),\ 1363 (in_port_t)(tcp)->tcp_lport, \ 1364 (uint8_t *)(&((tcp)->tcp_ipha->ipha_dst)),\ 1365 (in_port_t)(tcp)->tcp_fport); \ 1366 } \ 1367 } else { \ 1368 if (!IN6_IS_ADDR_UNSPECIFIED( \ 1369 &(tcp)->tcp_ip6h->ip6_src)) {\ 1370 (*cl_inet_connect)(IPPROTO_TCP, AF_INET6,\ 1371 (uint8_t *)(&((tcp)->tcp_ip6h->ip6_src)),\ 1372 (in_port_t)(tcp)->tcp_lport, \ 1373 (uint8_t *)(&((tcp)->tcp_ip6h->ip6_dst)),\ 1374 (in_port_t)(tcp)->tcp_fport); \ 1375 } \ 1376 } \ 1377 } \ 1378 } 1379 1380 #define CL_INET_DISCONNECT(tcp) { \ 1381 if (cl_inet_disconnect != NULL) { \ 1382 /* \ 1383 * Running in cluster mode - deregister active \ 1384 * connection information \ 1385 */ \ 1386 if ((tcp)->tcp_ipversion == IPV4_VERSION) { \ 1387 if ((tcp)->tcp_ip_src != 0) { \ 1388 (*cl_inet_disconnect)(IPPROTO_TCP, \ 1389 AF_INET, \ 1390 (uint8_t *)(&((tcp)->tcp_ip_src)),\ 1391 (in_port_t)(tcp)->tcp_lport, \ 1392 (uint8_t *) \ 1393 (&((tcp)->tcp_ipha->ipha_dst)),\ 1394 (in_port_t)(tcp)->tcp_fport); \ 1395 } \ 1396 } else { \ 1397 if (!IN6_IS_ADDR_UNSPECIFIED( \ 1398 &(tcp)->tcp_ip_src_v6)) { \ 1399 (*cl_inet_disconnect)(IPPROTO_TCP, AF_INET6,\ 1400 (uint8_t *)(&((tcp)->tcp_ip_src_v6)),\ 1401 (in_port_t)(tcp)->tcp_lport, \ 1402 (uint8_t *) \ 1403 (&((tcp)->tcp_ip6h->ip6_dst)),\ 1404 (in_port_t)(tcp)->tcp_fport); \ 1405 } \ 1406 } \ 1407 } \ 1408 } 1409 1410 /* 1411 * Cluster networking hook for traversing current connection list. 1412 * This routine is used to extract the current list of live connections 1413 * which must continue to to be dispatched to this node. 1414 */ 1415 int cl_tcp_walk_list(int (*callback)(cl_tcp_info_t *, void *), void *arg); 1416 1417 static int cl_tcp_walk_list_stack(int (*callback)(cl_tcp_info_t *, void *), 1418 void *arg, tcp_stack_t *tcps); 1419 1420 /* 1421 * Figure out the value of window scale opton. Note that the rwnd is 1422 * ASSUMED to be rounded up to the nearest MSS before the calculation. 1423 * We cannot find the scale value and then do a round up of tcp_rwnd 1424 * because the scale value may not be correct after that. 1425 * 1426 * Set the compiler flag to make this function inline. 1427 */ 1428 static void 1429 tcp_set_ws_value(tcp_t *tcp) 1430 { 1431 int i; 1432 uint32_t rwnd = tcp->tcp_rwnd; 1433 1434 for (i = 0; rwnd > TCP_MAXWIN && i < TCP_MAX_WINSHIFT; 1435 i++, rwnd >>= 1) 1436 ; 1437 tcp->tcp_rcv_ws = i; 1438 } 1439 1440 /* 1441 * Remove a connection from the list of detached TIME_WAIT connections. 1442 * It returns B_FALSE if it can't remove the connection from the list 1443 * as the connection has already been removed from the list due to an 1444 * earlier call to tcp_time_wait_remove(); otherwise it returns B_TRUE. 1445 */ 1446 static boolean_t 1447 tcp_time_wait_remove(tcp_t *tcp, tcp_squeue_priv_t *tcp_time_wait) 1448 { 1449 boolean_t locked = B_FALSE; 1450 1451 if (tcp_time_wait == NULL) { 1452 tcp_time_wait = *((tcp_squeue_priv_t **) 1453 squeue_getprivate(tcp->tcp_connp->conn_sqp, SQPRIVATE_TCP)); 1454 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1455 locked = B_TRUE; 1456 } else { 1457 ASSERT(MUTEX_HELD(&tcp_time_wait->tcp_time_wait_lock)); 1458 } 1459 1460 if (tcp->tcp_time_wait_expire == 0) { 1461 ASSERT(tcp->tcp_time_wait_next == NULL); 1462 ASSERT(tcp->tcp_time_wait_prev == NULL); 1463 if (locked) 1464 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1465 return (B_FALSE); 1466 } 1467 ASSERT(TCP_IS_DETACHED(tcp)); 1468 ASSERT(tcp->tcp_state == TCPS_TIME_WAIT); 1469 1470 if (tcp == tcp_time_wait->tcp_time_wait_head) { 1471 ASSERT(tcp->tcp_time_wait_prev == NULL); 1472 tcp_time_wait->tcp_time_wait_head = tcp->tcp_time_wait_next; 1473 if (tcp_time_wait->tcp_time_wait_head != NULL) { 1474 tcp_time_wait->tcp_time_wait_head->tcp_time_wait_prev = 1475 NULL; 1476 } else { 1477 tcp_time_wait->tcp_time_wait_tail = NULL; 1478 } 1479 } else if (tcp == tcp_time_wait->tcp_time_wait_tail) { 1480 ASSERT(tcp != tcp_time_wait->tcp_time_wait_head); 1481 ASSERT(tcp->tcp_time_wait_next == NULL); 1482 tcp_time_wait->tcp_time_wait_tail = tcp->tcp_time_wait_prev; 1483 ASSERT(tcp_time_wait->tcp_time_wait_tail != NULL); 1484 tcp_time_wait->tcp_time_wait_tail->tcp_time_wait_next = NULL; 1485 } else { 1486 ASSERT(tcp->tcp_time_wait_prev->tcp_time_wait_next == tcp); 1487 ASSERT(tcp->tcp_time_wait_next->tcp_time_wait_prev == tcp); 1488 tcp->tcp_time_wait_prev->tcp_time_wait_next = 1489 tcp->tcp_time_wait_next; 1490 tcp->tcp_time_wait_next->tcp_time_wait_prev = 1491 tcp->tcp_time_wait_prev; 1492 } 1493 tcp->tcp_time_wait_next = NULL; 1494 tcp->tcp_time_wait_prev = NULL; 1495 tcp->tcp_time_wait_expire = 0; 1496 1497 if (locked) 1498 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1499 return (B_TRUE); 1500 } 1501 1502 /* 1503 * Add a connection to the list of detached TIME_WAIT connections 1504 * and set its time to expire. 1505 */ 1506 static void 1507 tcp_time_wait_append(tcp_t *tcp) 1508 { 1509 tcp_stack_t *tcps = tcp->tcp_tcps; 1510 tcp_squeue_priv_t *tcp_time_wait = 1511 *((tcp_squeue_priv_t **)squeue_getprivate(tcp->tcp_connp->conn_sqp, 1512 SQPRIVATE_TCP)); 1513 1514 tcp_timers_stop(tcp); 1515 1516 /* Freed above */ 1517 ASSERT(tcp->tcp_timer_tid == 0); 1518 ASSERT(tcp->tcp_ack_tid == 0); 1519 1520 /* must have happened at the time of detaching the tcp */ 1521 ASSERT(tcp->tcp_ptpahn == NULL); 1522 ASSERT(tcp->tcp_flow_stopped == 0); 1523 ASSERT(tcp->tcp_time_wait_next == NULL); 1524 ASSERT(tcp->tcp_time_wait_prev == NULL); 1525 ASSERT(tcp->tcp_time_wait_expire == NULL); 1526 ASSERT(tcp->tcp_listener == NULL); 1527 1528 tcp->tcp_time_wait_expire = ddi_get_lbolt(); 1529 /* 1530 * The value computed below in tcp->tcp_time_wait_expire may 1531 * appear negative or wrap around. That is ok since our 1532 * interest is only in the difference between the current lbolt 1533 * value and tcp->tcp_time_wait_expire. But the value should not 1534 * be zero, since it means the tcp is not in the TIME_WAIT list. 1535 * The corresponding comparison in tcp_time_wait_collector() uses 1536 * modular arithmetic. 1537 */ 1538 tcp->tcp_time_wait_expire += 1539 drv_usectohz(tcps->tcps_time_wait_interval * 1000); 1540 if (tcp->tcp_time_wait_expire == 0) 1541 tcp->tcp_time_wait_expire = 1; 1542 1543 ASSERT(TCP_IS_DETACHED(tcp)); 1544 ASSERT(tcp->tcp_state == TCPS_TIME_WAIT); 1545 ASSERT(tcp->tcp_time_wait_next == NULL); 1546 ASSERT(tcp->tcp_time_wait_prev == NULL); 1547 TCP_DBGSTAT(tcps, tcp_time_wait); 1548 1549 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1550 if (tcp_time_wait->tcp_time_wait_head == NULL) { 1551 ASSERT(tcp_time_wait->tcp_time_wait_tail == NULL); 1552 tcp_time_wait->tcp_time_wait_head = tcp; 1553 } else { 1554 ASSERT(tcp_time_wait->tcp_time_wait_tail != NULL); 1555 ASSERT(tcp_time_wait->tcp_time_wait_tail->tcp_state == 1556 TCPS_TIME_WAIT); 1557 tcp_time_wait->tcp_time_wait_tail->tcp_time_wait_next = tcp; 1558 tcp->tcp_time_wait_prev = tcp_time_wait->tcp_time_wait_tail; 1559 } 1560 tcp_time_wait->tcp_time_wait_tail = tcp; 1561 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1562 } 1563 1564 /* ARGSUSED */ 1565 void 1566 tcp_timewait_output(void *arg, mblk_t *mp, void *arg2) 1567 { 1568 conn_t *connp = (conn_t *)arg; 1569 tcp_t *tcp = connp->conn_tcp; 1570 tcp_stack_t *tcps = tcp->tcp_tcps; 1571 1572 ASSERT(tcp != NULL); 1573 if (tcp->tcp_state == TCPS_CLOSED) { 1574 return; 1575 } 1576 1577 ASSERT((tcp->tcp_family == AF_INET && 1578 tcp->tcp_ipversion == IPV4_VERSION) || 1579 (tcp->tcp_family == AF_INET6 && 1580 (tcp->tcp_ipversion == IPV4_VERSION || 1581 tcp->tcp_ipversion == IPV6_VERSION))); 1582 ASSERT(!tcp->tcp_listener); 1583 1584 TCP_STAT(tcps, tcp_time_wait_reap); 1585 ASSERT(TCP_IS_DETACHED(tcp)); 1586 1587 /* 1588 * Because they have no upstream client to rebind or tcp_close() 1589 * them later, we axe the connection here and now. 1590 */ 1591 tcp_close_detached(tcp); 1592 } 1593 1594 /* 1595 * Remove cached/latched IPsec references. 1596 */ 1597 void 1598 tcp_ipsec_cleanup(tcp_t *tcp) 1599 { 1600 conn_t *connp = tcp->tcp_connp; 1601 1602 if (connp->conn_flags & IPCL_TCPCONN) { 1603 if (connp->conn_latch != NULL) { 1604 IPLATCH_REFRELE(connp->conn_latch, 1605 connp->conn_netstack); 1606 connp->conn_latch = NULL; 1607 } 1608 if (connp->conn_policy != NULL) { 1609 IPPH_REFRELE(connp->conn_policy, connp->conn_netstack); 1610 connp->conn_policy = NULL; 1611 } 1612 } 1613 } 1614 1615 /* 1616 * Cleaup before placing on free list. 1617 * Disassociate from the netstack/tcp_stack_t since the freelist 1618 * is per squeue and not per netstack. 1619 */ 1620 void 1621 tcp_cleanup(tcp_t *tcp) 1622 { 1623 mblk_t *mp; 1624 char *tcp_iphc; 1625 int tcp_iphc_len; 1626 int tcp_hdr_grown; 1627 tcp_sack_info_t *tcp_sack_info; 1628 conn_t *connp = tcp->tcp_connp; 1629 tcp_stack_t *tcps = tcp->tcp_tcps; 1630 netstack_t *ns = tcps->tcps_netstack; 1631 1632 tcp_bind_hash_remove(tcp); 1633 1634 /* Cleanup that which needs the netstack first */ 1635 tcp_ipsec_cleanup(tcp); 1636 1637 tcp_free(tcp); 1638 1639 /* Release any SSL context */ 1640 if (tcp->tcp_kssl_ent != NULL) { 1641 kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); 1642 tcp->tcp_kssl_ent = NULL; 1643 } 1644 1645 if (tcp->tcp_kssl_ctx != NULL) { 1646 kssl_release_ctx(tcp->tcp_kssl_ctx); 1647 tcp->tcp_kssl_ctx = NULL; 1648 } 1649 tcp->tcp_kssl_pending = B_FALSE; 1650 1651 conn_delete_ire(connp, NULL); 1652 1653 /* 1654 * Since we will bzero the entire structure, we need to 1655 * remove it and reinsert it in global hash list. We 1656 * know the walkers can't get to this conn because we 1657 * had set CONDEMNED flag earlier and checked reference 1658 * under conn_lock so walker won't pick it and when we 1659 * go the ipcl_globalhash_remove() below, no walker 1660 * can get to it. 1661 */ 1662 ipcl_globalhash_remove(connp); 1663 1664 /* 1665 * Now it is safe to decrement the reference counts. 1666 * This might be the last reference on the netstack and TCPS 1667 * in which case it will cause the tcp_g_q_close and 1668 * the freeing of the IP Instance. 1669 */ 1670 connp->conn_netstack = NULL; 1671 netstack_rele(ns); 1672 ASSERT(tcps != NULL); 1673 tcp->tcp_tcps = NULL; 1674 TCPS_REFRELE(tcps); 1675 1676 /* Save some state */ 1677 mp = tcp->tcp_timercache; 1678 1679 tcp_sack_info = tcp->tcp_sack_info; 1680 tcp_iphc = tcp->tcp_iphc; 1681 tcp_iphc_len = tcp->tcp_iphc_len; 1682 tcp_hdr_grown = tcp->tcp_hdr_grown; 1683 1684 if (connp->conn_cred != NULL) 1685 crfree(connp->conn_cred); 1686 if (connp->conn_peercred != NULL) 1687 crfree(connp->conn_peercred); 1688 bzero(connp, sizeof (conn_t)); 1689 bzero(tcp, sizeof (tcp_t)); 1690 1691 /* restore the state */ 1692 tcp->tcp_timercache = mp; 1693 1694 tcp->tcp_sack_info = tcp_sack_info; 1695 tcp->tcp_iphc = tcp_iphc; 1696 tcp->tcp_iphc_len = tcp_iphc_len; 1697 tcp->tcp_hdr_grown = tcp_hdr_grown; 1698 1699 1700 tcp->tcp_connp = connp; 1701 1702 connp->conn_tcp = tcp; 1703 connp->conn_flags = IPCL_TCPCONN; 1704 connp->conn_state_flags = CONN_INCIPIENT; 1705 connp->conn_ulp = IPPROTO_TCP; 1706 connp->conn_ref = 1; 1707 } 1708 1709 /* 1710 * Blows away all tcps whose TIME_WAIT has expired. List traversal 1711 * is done forwards from the head. 1712 * This walks all stack instances since 1713 * tcp_time_wait remains global across all stacks. 1714 */ 1715 /* ARGSUSED */ 1716 void 1717 tcp_time_wait_collector(void *arg) 1718 { 1719 tcp_t *tcp; 1720 clock_t now; 1721 mblk_t *mp; 1722 conn_t *connp; 1723 kmutex_t *lock; 1724 boolean_t removed; 1725 1726 squeue_t *sqp = (squeue_t *)arg; 1727 tcp_squeue_priv_t *tcp_time_wait = 1728 *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP)); 1729 1730 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1731 tcp_time_wait->tcp_time_wait_tid = 0; 1732 1733 if (tcp_time_wait->tcp_free_list != NULL && 1734 tcp_time_wait->tcp_free_list->tcp_in_free_list == B_TRUE) { 1735 TCP_G_STAT(tcp_freelist_cleanup); 1736 while ((tcp = tcp_time_wait->tcp_free_list) != NULL) { 1737 tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next; 1738 tcp->tcp_time_wait_next = NULL; 1739 tcp_time_wait->tcp_free_list_cnt--; 1740 ASSERT(tcp->tcp_tcps == NULL); 1741 CONN_DEC_REF(tcp->tcp_connp); 1742 } 1743 ASSERT(tcp_time_wait->tcp_free_list_cnt == 0); 1744 } 1745 1746 /* 1747 * In order to reap time waits reliably, we should use a 1748 * source of time that is not adjustable by the user -- hence 1749 * the call to ddi_get_lbolt(). 1750 */ 1751 now = ddi_get_lbolt(); 1752 while ((tcp = tcp_time_wait->tcp_time_wait_head) != NULL) { 1753 /* 1754 * Compare times using modular arithmetic, since 1755 * lbolt can wrapover. 1756 */ 1757 if ((now - tcp->tcp_time_wait_expire) < 0) { 1758 break; 1759 } 1760 1761 removed = tcp_time_wait_remove(tcp, tcp_time_wait); 1762 ASSERT(removed); 1763 1764 connp = tcp->tcp_connp; 1765 ASSERT(connp->conn_fanout != NULL); 1766 lock = &connp->conn_fanout->connf_lock; 1767 /* 1768 * This is essentially a TW reclaim fast path optimization for 1769 * performance where the timewait collector checks under the 1770 * fanout lock (so that no one else can get access to the 1771 * conn_t) that the refcnt is 2 i.e. one for TCP and one for 1772 * the classifier hash list. If ref count is indeed 2, we can 1773 * just remove the conn under the fanout lock and avoid 1774 * cleaning up the conn under the squeue, provided that 1775 * clustering callbacks are not enabled. If clustering is 1776 * enabled, we need to make the clustering callback before 1777 * setting the CONDEMNED flag and after dropping all locks and 1778 * so we forego this optimization and fall back to the slow 1779 * path. Also please see the comments in tcp_closei_local 1780 * regarding the refcnt logic. 1781 * 1782 * Since we are holding the tcp_time_wait_lock, its better 1783 * not to block on the fanout_lock because other connections 1784 * can't add themselves to time_wait list. So we do a 1785 * tryenter instead of mutex_enter. 1786 */ 1787 if (mutex_tryenter(lock)) { 1788 mutex_enter(&connp->conn_lock); 1789 if ((connp->conn_ref == 2) && 1790 (cl_inet_disconnect == NULL)) { 1791 ipcl_hash_remove_locked(connp, 1792 connp->conn_fanout); 1793 /* 1794 * Set the CONDEMNED flag now itself so that 1795 * the refcnt cannot increase due to any 1796 * walker. But we have still not cleaned up 1797 * conn_ire_cache. This is still ok since 1798 * we are going to clean it up in tcp_cleanup 1799 * immediately and any interface unplumb 1800 * thread will wait till the ire is blown away 1801 */ 1802 connp->conn_state_flags |= CONN_CONDEMNED; 1803 mutex_exit(lock); 1804 mutex_exit(&connp->conn_lock); 1805 if (tcp_time_wait->tcp_free_list_cnt < 1806 tcp_free_list_max_cnt) { 1807 /* Add to head of tcp_free_list */ 1808 mutex_exit( 1809 &tcp_time_wait->tcp_time_wait_lock); 1810 tcp_cleanup(tcp); 1811 ASSERT(connp->conn_latch == NULL); 1812 ASSERT(connp->conn_policy == NULL); 1813 ASSERT(tcp->tcp_tcps == NULL); 1814 ASSERT(connp->conn_netstack == NULL); 1815 1816 mutex_enter( 1817 &tcp_time_wait->tcp_time_wait_lock); 1818 tcp->tcp_time_wait_next = 1819 tcp_time_wait->tcp_free_list; 1820 tcp_time_wait->tcp_free_list = tcp; 1821 tcp_time_wait->tcp_free_list_cnt++; 1822 continue; 1823 } else { 1824 /* Do not add to tcp_free_list */ 1825 mutex_exit( 1826 &tcp_time_wait->tcp_time_wait_lock); 1827 tcp_bind_hash_remove(tcp); 1828 conn_delete_ire(tcp->tcp_connp, NULL); 1829 tcp_ipsec_cleanup(tcp); 1830 CONN_DEC_REF(tcp->tcp_connp); 1831 } 1832 } else { 1833 CONN_INC_REF_LOCKED(connp); 1834 mutex_exit(lock); 1835 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1836 mutex_exit(&connp->conn_lock); 1837 /* 1838 * We can reuse the closemp here since conn has 1839 * detached (otherwise we wouldn't even be in 1840 * time_wait list). tcp_closemp_used can safely 1841 * be changed without taking a lock as no other 1842 * thread can concurrently access it at this 1843 * point in the connection lifecycle. 1844 */ 1845 1846 if (tcp->tcp_closemp.b_prev == NULL) 1847 tcp->tcp_closemp_used = B_TRUE; 1848 else 1849 cmn_err(CE_PANIC, 1850 "tcp_timewait_collector: " 1851 "concurrent use of tcp_closemp: " 1852 "connp %p tcp %p\n", (void *)connp, 1853 (void *)tcp); 1854 1855 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15); 1856 mp = &tcp->tcp_closemp; 1857 squeue_fill(connp->conn_sqp, mp, 1858 tcp_timewait_output, connp, 1859 SQTAG_TCP_TIMEWAIT); 1860 } 1861 } else { 1862 mutex_enter(&connp->conn_lock); 1863 CONN_INC_REF_LOCKED(connp); 1864 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1865 mutex_exit(&connp->conn_lock); 1866 /* 1867 * We can reuse the closemp here since conn has 1868 * detached (otherwise we wouldn't even be in 1869 * time_wait list). tcp_closemp_used can safely 1870 * be changed without taking a lock as no other 1871 * thread can concurrently access it at this 1872 * point in the connection lifecycle. 1873 */ 1874 1875 if (tcp->tcp_closemp.b_prev == NULL) 1876 tcp->tcp_closemp_used = B_TRUE; 1877 else 1878 cmn_err(CE_PANIC, "tcp_timewait_collector: " 1879 "concurrent use of tcp_closemp: " 1880 "connp %p tcp %p\n", (void *)connp, 1881 (void *)tcp); 1882 1883 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15); 1884 mp = &tcp->tcp_closemp; 1885 squeue_fill(connp->conn_sqp, mp, 1886 tcp_timewait_output, connp, 0); 1887 } 1888 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 1889 } 1890 1891 if (tcp_time_wait->tcp_free_list != NULL) 1892 tcp_time_wait->tcp_free_list->tcp_in_free_list = B_TRUE; 1893 1894 tcp_time_wait->tcp_time_wait_tid = 1895 timeout(tcp_time_wait_collector, sqp, TCP_TIME_WAIT_DELAY); 1896 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 1897 } 1898 /* 1899 * Reply to a clients T_CONN_RES TPI message. This function 1900 * is used only for TLI/XTI listener. Sockfs sends T_CONN_RES 1901 * on the acceptor STREAM and processed in tcp_wput_accept(). 1902 * Read the block comment on top of tcp_conn_request(). 1903 */ 1904 static void 1905 tcp_accept(tcp_t *listener, mblk_t *mp) 1906 { 1907 tcp_t *acceptor; 1908 tcp_t *eager; 1909 tcp_t *tcp; 1910 struct T_conn_res *tcr; 1911 t_uscalar_t acceptor_id; 1912 t_scalar_t seqnum; 1913 mblk_t *opt_mp = NULL; /* T_OPTMGMT_REQ messages */ 1914 mblk_t *ok_mp; 1915 mblk_t *mp1; 1916 tcp_stack_t *tcps = listener->tcp_tcps; 1917 1918 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tcr)) { 1919 tcp_err_ack(listener, mp, TPROTO, 0); 1920 return; 1921 } 1922 tcr = (struct T_conn_res *)mp->b_rptr; 1923 1924 /* 1925 * Under ILP32 the stream head points tcr->ACCEPTOR_id at the 1926 * read side queue of the streams device underneath us i.e. the 1927 * read side queue of 'ip'. Since we can't deference QUEUE_ptr we 1928 * look it up in the queue_hash. Under LP64 it sends down the 1929 * minor_t of the accepting endpoint. 1930 * 1931 * Once the acceptor/eager are modified (in tcp_accept_swap) the 1932 * fanout hash lock is held. 1933 * This prevents any thread from entering the acceptor queue from 1934 * below (since it has not been hard bound yet i.e. any inbound 1935 * packets will arrive on the listener or default tcp queue and 1936 * go through tcp_lookup). 1937 * The CONN_INC_REF will prevent the acceptor from closing. 1938 * 1939 * XXX It is still possible for a tli application to send down data 1940 * on the accepting stream while another thread calls t_accept. 1941 * This should not be a problem for well-behaved applications since 1942 * the T_OK_ACK is sent after the queue swapping is completed. 1943 * 1944 * If the accepting fd is the same as the listening fd, avoid 1945 * queue hash lookup since that will return an eager listener in a 1946 * already established state. 1947 */ 1948 acceptor_id = tcr->ACCEPTOR_id; 1949 mutex_enter(&listener->tcp_eager_lock); 1950 if (listener->tcp_acceptor_id == acceptor_id) { 1951 eager = listener->tcp_eager_next_q; 1952 /* only count how many T_CONN_INDs so don't count q0 */ 1953 if ((listener->tcp_conn_req_cnt_q != 1) || 1954 (eager->tcp_conn_req_seqnum != tcr->SEQ_number)) { 1955 mutex_exit(&listener->tcp_eager_lock); 1956 tcp_err_ack(listener, mp, TBADF, 0); 1957 return; 1958 } 1959 if (listener->tcp_conn_req_cnt_q0 != 0) { 1960 /* Throw away all the eagers on q0. */ 1961 tcp_eager_cleanup(listener, 1); 1962 } 1963 if (listener->tcp_syn_defense) { 1964 listener->tcp_syn_defense = B_FALSE; 1965 if (listener->tcp_ip_addr_cache != NULL) { 1966 kmem_free(listener->tcp_ip_addr_cache, 1967 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 1968 listener->tcp_ip_addr_cache = NULL; 1969 } 1970 } 1971 /* 1972 * Transfer tcp_conn_req_max to the eager so that when 1973 * a disconnect occurs we can revert the endpoint to the 1974 * listen state. 1975 */ 1976 eager->tcp_conn_req_max = listener->tcp_conn_req_max; 1977 ASSERT(listener->tcp_conn_req_cnt_q0 == 0); 1978 /* 1979 * Get a reference on the acceptor just like the 1980 * tcp_acceptor_hash_lookup below. 1981 */ 1982 acceptor = listener; 1983 CONN_INC_REF(acceptor->tcp_connp); 1984 } else { 1985 acceptor = tcp_acceptor_hash_lookup(acceptor_id, tcps); 1986 if (acceptor == NULL) { 1987 if (listener->tcp_debug) { 1988 (void) strlog(TCP_MOD_ID, 0, 1, 1989 SL_ERROR|SL_TRACE, 1990 "tcp_accept: did not find acceptor 0x%x\n", 1991 acceptor_id); 1992 } 1993 mutex_exit(&listener->tcp_eager_lock); 1994 tcp_err_ack(listener, mp, TPROVMISMATCH, 0); 1995 return; 1996 } 1997 /* 1998 * Verify acceptor state. The acceptable states for an acceptor 1999 * include TCPS_IDLE and TCPS_BOUND. 2000 */ 2001 switch (acceptor->tcp_state) { 2002 case TCPS_IDLE: 2003 /* FALLTHRU */ 2004 case TCPS_BOUND: 2005 break; 2006 default: 2007 CONN_DEC_REF(acceptor->tcp_connp); 2008 mutex_exit(&listener->tcp_eager_lock); 2009 tcp_err_ack(listener, mp, TOUTSTATE, 0); 2010 return; 2011 } 2012 } 2013 2014 /* The listener must be in TCPS_LISTEN */ 2015 if (listener->tcp_state != TCPS_LISTEN) { 2016 CONN_DEC_REF(acceptor->tcp_connp); 2017 mutex_exit(&listener->tcp_eager_lock); 2018 tcp_err_ack(listener, mp, TOUTSTATE, 0); 2019 return; 2020 } 2021 2022 /* 2023 * Rendezvous with an eager connection request packet hanging off 2024 * 'tcp' that has the 'seqnum' tag. We tagged the detached open 2025 * tcp structure when the connection packet arrived in 2026 * tcp_conn_request(). 2027 */ 2028 seqnum = tcr->SEQ_number; 2029 eager = listener; 2030 do { 2031 eager = eager->tcp_eager_next_q; 2032 if (eager == NULL) { 2033 CONN_DEC_REF(acceptor->tcp_connp); 2034 mutex_exit(&listener->tcp_eager_lock); 2035 tcp_err_ack(listener, mp, TBADSEQ, 0); 2036 return; 2037 } 2038 } while (eager->tcp_conn_req_seqnum != seqnum); 2039 mutex_exit(&listener->tcp_eager_lock); 2040 2041 /* 2042 * At this point, both acceptor and listener have 2 ref 2043 * that they begin with. Acceptor has one additional ref 2044 * we placed in lookup while listener has 3 additional 2045 * ref for being behind the squeue (tcp_accept() is 2046 * done on listener's squeue); being in classifier hash; 2047 * and eager's ref on listener. 2048 */ 2049 ASSERT(listener->tcp_connp->conn_ref >= 5); 2050 ASSERT(acceptor->tcp_connp->conn_ref >= 3); 2051 2052 /* 2053 * The eager at this point is set in its own squeue and 2054 * could easily have been killed (tcp_accept_finish will 2055 * deal with that) because of a TH_RST so we can only 2056 * ASSERT for a single ref. 2057 */ 2058 ASSERT(eager->tcp_connp->conn_ref >= 1); 2059 2060 /* Pre allocate the stroptions mblk also */ 2061 opt_mp = allocb(sizeof (struct stroptions), BPRI_HI); 2062 if (opt_mp == NULL) { 2063 CONN_DEC_REF(acceptor->tcp_connp); 2064 CONN_DEC_REF(eager->tcp_connp); 2065 tcp_err_ack(listener, mp, TSYSERR, ENOMEM); 2066 return; 2067 } 2068 DB_TYPE(opt_mp) = M_SETOPTS; 2069 opt_mp->b_wptr += sizeof (struct stroptions); 2070 2071 /* 2072 * Prepare for inheriting IPV6_BOUND_IF and IPV6_RECVPKTINFO 2073 * from listener to acceptor. The message is chained on opt_mp 2074 * which will be sent onto eager's squeue. 2075 */ 2076 if (listener->tcp_bound_if != 0) { 2077 /* allocate optmgmt req */ 2078 mp1 = tcp_setsockopt_mp(IPPROTO_IPV6, 2079 IPV6_BOUND_IF, (char *)&listener->tcp_bound_if, 2080 sizeof (int)); 2081 if (mp1 != NULL) 2082 linkb(opt_mp, mp1); 2083 } 2084 if (listener->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) { 2085 uint_t on = 1; 2086 2087 /* allocate optmgmt req */ 2088 mp1 = tcp_setsockopt_mp(IPPROTO_IPV6, 2089 IPV6_RECVPKTINFO, (char *)&on, sizeof (on)); 2090 if (mp1 != NULL) 2091 linkb(opt_mp, mp1); 2092 } 2093 2094 /* Re-use mp1 to hold a copy of mp, in case reallocb fails */ 2095 if ((mp1 = copymsg(mp)) == NULL) { 2096 CONN_DEC_REF(acceptor->tcp_connp); 2097 CONN_DEC_REF(eager->tcp_connp); 2098 freemsg(opt_mp); 2099 tcp_err_ack(listener, mp, TSYSERR, ENOMEM); 2100 return; 2101 } 2102 2103 tcr = (struct T_conn_res *)mp1->b_rptr; 2104 2105 /* 2106 * This is an expanded version of mi_tpi_ok_ack_alloc() 2107 * which allocates a larger mblk and appends the new 2108 * local address to the ok_ack. The address is copied by 2109 * soaccept() for getsockname(). 2110 */ 2111 { 2112 int extra; 2113 2114 extra = (eager->tcp_family == AF_INET) ? 2115 sizeof (sin_t) : sizeof (sin6_t); 2116 2117 /* 2118 * Try to re-use mp, if possible. Otherwise, allocate 2119 * an mblk and return it as ok_mp. In any case, mp 2120 * is no longer usable upon return. 2121 */ 2122 if ((ok_mp = mi_tpi_ok_ack_alloc_extra(mp, extra)) == NULL) { 2123 CONN_DEC_REF(acceptor->tcp_connp); 2124 CONN_DEC_REF(eager->tcp_connp); 2125 freemsg(opt_mp); 2126 /* Original mp has been freed by now, so use mp1 */ 2127 tcp_err_ack(listener, mp1, TSYSERR, ENOMEM); 2128 return; 2129 } 2130 2131 mp = NULL; /* We should never use mp after this point */ 2132 2133 switch (extra) { 2134 case sizeof (sin_t): { 2135 sin_t *sin = (sin_t *)ok_mp->b_wptr; 2136 2137 ok_mp->b_wptr += extra; 2138 sin->sin_family = AF_INET; 2139 sin->sin_port = eager->tcp_lport; 2140 sin->sin_addr.s_addr = 2141 eager->tcp_ipha->ipha_src; 2142 break; 2143 } 2144 case sizeof (sin6_t): { 2145 sin6_t *sin6 = (sin6_t *)ok_mp->b_wptr; 2146 2147 ok_mp->b_wptr += extra; 2148 sin6->sin6_family = AF_INET6; 2149 sin6->sin6_port = eager->tcp_lport; 2150 if (eager->tcp_ipversion == IPV4_VERSION) { 2151 sin6->sin6_flowinfo = 0; 2152 IN6_IPADDR_TO_V4MAPPED( 2153 eager->tcp_ipha->ipha_src, 2154 &sin6->sin6_addr); 2155 } else { 2156 ASSERT(eager->tcp_ip6h != NULL); 2157 sin6->sin6_flowinfo = 2158 eager->tcp_ip6h->ip6_vcf & 2159 ~IPV6_VERS_AND_FLOW_MASK; 2160 sin6->sin6_addr = 2161 eager->tcp_ip6h->ip6_src; 2162 } 2163 sin6->sin6_scope_id = 0; 2164 sin6->__sin6_src_id = 0; 2165 break; 2166 } 2167 default: 2168 break; 2169 } 2170 ASSERT(ok_mp->b_wptr <= ok_mp->b_datap->db_lim); 2171 } 2172 2173 /* 2174 * If there are no options we know that the T_CONN_RES will 2175 * succeed. However, we can't send the T_OK_ACK upstream until 2176 * the tcp_accept_swap is done since it would be dangerous to 2177 * let the application start using the new fd prior to the swap. 2178 */ 2179 tcp_accept_swap(listener, acceptor, eager); 2180 2181 /* 2182 * tcp_accept_swap unlinks eager from listener but does not drop 2183 * the eager's reference on the listener. 2184 */ 2185 ASSERT(eager->tcp_listener == NULL); 2186 ASSERT(listener->tcp_connp->conn_ref >= 5); 2187 2188 /* 2189 * The eager is now associated with its own queue. Insert in 2190 * the hash so that the connection can be reused for a future 2191 * T_CONN_RES. 2192 */ 2193 tcp_acceptor_hash_insert(acceptor_id, eager); 2194 2195 /* 2196 * We now do the processing of options with T_CONN_RES. 2197 * We delay till now since we wanted to have queue to pass to 2198 * option processing routines that points back to the right 2199 * instance structure which does not happen until after 2200 * tcp_accept_swap(). 2201 * 2202 * Note: 2203 * The sanity of the logic here assumes that whatever options 2204 * are appropriate to inherit from listner=>eager are done 2205 * before this point, and whatever were to be overridden (or not) 2206 * in transfer logic from eager=>acceptor in tcp_accept_swap(). 2207 * [ Warning: acceptor endpoint can have T_OPTMGMT_REQ done to it 2208 * before its ACCEPTOR_id comes down in T_CONN_RES ] 2209 * This may not be true at this point in time but can be fixed 2210 * independently. This option processing code starts with 2211 * the instantiated acceptor instance and the final queue at 2212 * this point. 2213 */ 2214 2215 if (tcr->OPT_length != 0) { 2216 /* Options to process */ 2217 int t_error = 0; 2218 int sys_error = 0; 2219 int do_disconnect = 0; 2220 2221 if (tcp_conprim_opt_process(eager, mp1, 2222 &do_disconnect, &t_error, &sys_error) < 0) { 2223 eager->tcp_accept_error = 1; 2224 if (do_disconnect) { 2225 /* 2226 * An option failed which does not allow 2227 * connection to be accepted. 2228 * 2229 * We allow T_CONN_RES to succeed and 2230 * put a T_DISCON_IND on the eager queue. 2231 */ 2232 ASSERT(t_error == 0 && sys_error == 0); 2233 eager->tcp_send_discon_ind = 1; 2234 } else { 2235 ASSERT(t_error != 0); 2236 freemsg(ok_mp); 2237 /* 2238 * Original mp was either freed or set 2239 * to ok_mp above, so use mp1 instead. 2240 */ 2241 tcp_err_ack(listener, mp1, t_error, sys_error); 2242 goto finish; 2243 } 2244 } 2245 /* 2246 * Most likely success in setting options (except if 2247 * eager->tcp_send_discon_ind set). 2248 * mp1 option buffer represented by OPT_length/offset 2249 * potentially modified and contains results of setting 2250 * options at this point 2251 */ 2252 } 2253 2254 /* We no longer need mp1, since all options processing has passed */ 2255 freemsg(mp1); 2256 2257 putnext(listener->tcp_rq, ok_mp); 2258 2259 mutex_enter(&listener->tcp_eager_lock); 2260 if (listener->tcp_eager_prev_q0->tcp_conn_def_q0) { 2261 tcp_t *tail; 2262 mblk_t *conn_ind; 2263 2264 /* 2265 * This path should not be executed if listener and 2266 * acceptor streams are the same. 2267 */ 2268 ASSERT(listener != acceptor); 2269 2270 tcp = listener->tcp_eager_prev_q0; 2271 /* 2272 * listener->tcp_eager_prev_q0 points to the TAIL of the 2273 * deferred T_conn_ind queue. We need to get to the head of 2274 * the queue in order to send up T_conn_ind the same order as 2275 * how the 3WHS is completed. 2276 */ 2277 while (tcp != listener) { 2278 if (!tcp->tcp_eager_prev_q0->tcp_conn_def_q0) 2279 break; 2280 else 2281 tcp = tcp->tcp_eager_prev_q0; 2282 } 2283 ASSERT(tcp != listener); 2284 conn_ind = tcp->tcp_conn.tcp_eager_conn_ind; 2285 ASSERT(conn_ind != NULL); 2286 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 2287 2288 /* Move from q0 to q */ 2289 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 2290 listener->tcp_conn_req_cnt_q0--; 2291 listener->tcp_conn_req_cnt_q++; 2292 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 2293 tcp->tcp_eager_prev_q0; 2294 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 2295 tcp->tcp_eager_next_q0; 2296 tcp->tcp_eager_prev_q0 = NULL; 2297 tcp->tcp_eager_next_q0 = NULL; 2298 tcp->tcp_conn_def_q0 = B_FALSE; 2299 2300 /* Make sure the tcp isn't in the list of droppables */ 2301 ASSERT(tcp->tcp_eager_next_drop_q0 == NULL && 2302 tcp->tcp_eager_prev_drop_q0 == NULL); 2303 2304 /* 2305 * Insert at end of the queue because sockfs sends 2306 * down T_CONN_RES in chronological order. Leaving 2307 * the older conn indications at front of the queue 2308 * helps reducing search time. 2309 */ 2310 tail = listener->tcp_eager_last_q; 2311 if (tail != NULL) 2312 tail->tcp_eager_next_q = tcp; 2313 else 2314 listener->tcp_eager_next_q = tcp; 2315 listener->tcp_eager_last_q = tcp; 2316 tcp->tcp_eager_next_q = NULL; 2317 mutex_exit(&listener->tcp_eager_lock); 2318 putnext(tcp->tcp_rq, conn_ind); 2319 } else { 2320 mutex_exit(&listener->tcp_eager_lock); 2321 } 2322 2323 /* 2324 * Done with the acceptor - free it 2325 * 2326 * Note: from this point on, no access to listener should be made 2327 * as listener can be equal to acceptor. 2328 */ 2329 finish: 2330 ASSERT(acceptor->tcp_detached); 2331 ASSERT(tcps->tcps_g_q != NULL); 2332 acceptor->tcp_rq = tcps->tcps_g_q; 2333 acceptor->tcp_wq = WR(tcps->tcps_g_q); 2334 (void) tcp_clean_death(acceptor, 0, 2); 2335 CONN_DEC_REF(acceptor->tcp_connp); 2336 2337 /* 2338 * In case we already received a FIN we have to make tcp_rput send 2339 * the ordrel_ind. This will also send up a window update if the window 2340 * has opened up. 2341 * 2342 * In the normal case of a successful connection acceptance 2343 * we give the O_T_BIND_REQ to the read side put procedure as an 2344 * indication that this was just accepted. This tells tcp_rput to 2345 * pass up any data queued in tcp_rcv_list. 2346 * 2347 * In the fringe case where options sent with T_CONN_RES failed and 2348 * we required, we would be indicating a T_DISCON_IND to blow 2349 * away this connection. 2350 */ 2351 2352 /* 2353 * XXX: we currently have a problem if XTI application closes the 2354 * acceptor stream in between. This problem exists in on10-gate also 2355 * and is well know but nothing can be done short of major rewrite 2356 * to fix it. Now it is possible to take care of it by assigning TLI/XTI 2357 * eager same squeue as listener (we can distinguish non socket 2358 * listeners at the time of handling a SYN in tcp_conn_request) 2359 * and do most of the work that tcp_accept_finish does here itself 2360 * and then get behind the acceptor squeue to access the acceptor 2361 * queue. 2362 */ 2363 /* 2364 * We already have a ref on tcp so no need to do one before squeue_fill 2365 */ 2366 squeue_fill(eager->tcp_connp->conn_sqp, opt_mp, 2367 tcp_accept_finish, eager->tcp_connp, SQTAG_TCP_ACCEPT_FINISH); 2368 } 2369 2370 /* 2371 * Swap information between the eager and acceptor for a TLI/XTI client. 2372 * The sockfs accept is done on the acceptor stream and control goes 2373 * through tcp_wput_accept() and tcp_accept()/tcp_accept_swap() is not 2374 * called. In either case, both the eager and listener are in their own 2375 * perimeter (squeue) and the code has to deal with potential race. 2376 * 2377 * See the block comment on top of tcp_accept() and tcp_wput_accept(). 2378 */ 2379 static void 2380 tcp_accept_swap(tcp_t *listener, tcp_t *acceptor, tcp_t *eager) 2381 { 2382 conn_t *econnp, *aconnp; 2383 2384 ASSERT(eager->tcp_rq == listener->tcp_rq); 2385 ASSERT(eager->tcp_detached && !acceptor->tcp_detached); 2386 ASSERT(!eager->tcp_hard_bound); 2387 ASSERT(!TCP_IS_SOCKET(acceptor)); 2388 ASSERT(!TCP_IS_SOCKET(eager)); 2389 ASSERT(!TCP_IS_SOCKET(listener)); 2390 2391 acceptor->tcp_detached = B_TRUE; 2392 /* 2393 * To permit stream re-use by TLI/XTI, the eager needs a copy of 2394 * the acceptor id. 2395 */ 2396 eager->tcp_acceptor_id = acceptor->tcp_acceptor_id; 2397 2398 /* remove eager from listen list... */ 2399 mutex_enter(&listener->tcp_eager_lock); 2400 tcp_eager_unlink(eager); 2401 ASSERT(eager->tcp_eager_next_q == NULL && 2402 eager->tcp_eager_last_q == NULL); 2403 ASSERT(eager->tcp_eager_next_q0 == NULL && 2404 eager->tcp_eager_prev_q0 == NULL); 2405 mutex_exit(&listener->tcp_eager_lock); 2406 eager->tcp_rq = acceptor->tcp_rq; 2407 eager->tcp_wq = acceptor->tcp_wq; 2408 2409 econnp = eager->tcp_connp; 2410 aconnp = acceptor->tcp_connp; 2411 2412 eager->tcp_rq->q_ptr = econnp; 2413 eager->tcp_wq->q_ptr = econnp; 2414 2415 /* 2416 * In the TLI/XTI loopback case, we are inside the listener's squeue, 2417 * which might be a different squeue from our peer TCP instance. 2418 * For TCP Fusion, the peer expects that whenever tcp_detached is 2419 * clear, our TCP queues point to the acceptor's queues. Thus, use 2420 * membar_producer() to ensure that the assignments of tcp_rq/tcp_wq 2421 * above reach global visibility prior to the clearing of tcp_detached. 2422 */ 2423 membar_producer(); 2424 eager->tcp_detached = B_FALSE; 2425 2426 ASSERT(eager->tcp_ack_tid == 0); 2427 2428 econnp->conn_dev = aconnp->conn_dev; 2429 if (eager->tcp_cred != NULL) 2430 crfree(eager->tcp_cred); 2431 eager->tcp_cred = econnp->conn_cred = aconnp->conn_cred; 2432 ASSERT(econnp->conn_netstack == aconnp->conn_netstack); 2433 ASSERT(eager->tcp_tcps == acceptor->tcp_tcps); 2434 2435 aconnp->conn_cred = NULL; 2436 2437 econnp->conn_zoneid = aconnp->conn_zoneid; 2438 econnp->conn_allzones = aconnp->conn_allzones; 2439 2440 econnp->conn_mac_exempt = aconnp->conn_mac_exempt; 2441 aconnp->conn_mac_exempt = B_FALSE; 2442 2443 ASSERT(aconnp->conn_peercred == NULL); 2444 2445 /* Do the IPC initialization */ 2446 CONN_INC_REF(econnp); 2447 2448 econnp->conn_multicast_loop = aconnp->conn_multicast_loop; 2449 econnp->conn_af_isv6 = aconnp->conn_af_isv6; 2450 econnp->conn_pkt_isv6 = aconnp->conn_pkt_isv6; 2451 econnp->conn_ulp = aconnp->conn_ulp; 2452 2453 /* Done with old IPC. Drop its ref on its connp */ 2454 CONN_DEC_REF(aconnp); 2455 } 2456 2457 2458 /* 2459 * Adapt to the information, such as rtt and rtt_sd, provided from the 2460 * ire cached in conn_cache_ire. If no ire cached, do a ire lookup. 2461 * 2462 * Checks for multicast and broadcast destination address. 2463 * Returns zero on failure; non-zero if ok. 2464 * 2465 * Note that the MSS calculation here is based on the info given in 2466 * the IRE. We do not do any calculation based on TCP options. They 2467 * will be handled in tcp_rput_other() and tcp_rput_data() when TCP 2468 * knows which options to use. 2469 * 2470 * Note on how TCP gets its parameters for a connection. 2471 * 2472 * When a tcp_t structure is allocated, it gets all the default parameters. 2473 * In tcp_adapt_ire(), it gets those metric parameters, like rtt, rtt_sd, 2474 * spipe, rpipe, ... from the route metrics. Route metric overrides the 2475 * default. But if there is an associated tcp_host_param, it will override 2476 * the metrics. 2477 * 2478 * An incoming SYN with a multicast or broadcast destination address, is dropped 2479 * in 1 of 2 places. 2480 * 2481 * 1. If the packet was received over the wire it is dropped in 2482 * ip_rput_process_broadcast() 2483 * 2484 * 2. If the packet was received through internal IP loopback, i.e. the packet 2485 * was generated and received on the same machine, it is dropped in 2486 * ip_wput_local() 2487 * 2488 * An incoming SYN with a multicast or broadcast source address is always 2489 * dropped in tcp_adapt_ire. The same logic in tcp_adapt_ire also serves to 2490 * reject an attempt to connect to a broadcast or multicast (destination) 2491 * address. 2492 */ 2493 static int 2494 tcp_adapt_ire(tcp_t *tcp, mblk_t *ire_mp) 2495 { 2496 tcp_hsp_t *hsp; 2497 ire_t *ire; 2498 ire_t *sire = NULL; 2499 iulp_t *ire_uinfo = NULL; 2500 uint32_t mss_max; 2501 uint32_t mss; 2502 boolean_t tcp_detached = TCP_IS_DETACHED(tcp); 2503 conn_t *connp = tcp->tcp_connp; 2504 boolean_t ire_cacheable = B_FALSE; 2505 zoneid_t zoneid = connp->conn_zoneid; 2506 int match_flags = MATCH_IRE_RECURSIVE | MATCH_IRE_DEFAULT | 2507 MATCH_IRE_SECATTR; 2508 ts_label_t *tsl = crgetlabel(CONN_CRED(connp)); 2509 ill_t *ill = NULL; 2510 boolean_t incoming = (ire_mp == NULL); 2511 tcp_stack_t *tcps = tcp->tcp_tcps; 2512 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 2513 2514 ASSERT(connp->conn_ire_cache == NULL); 2515 2516 if (tcp->tcp_ipversion == IPV4_VERSION) { 2517 2518 if (CLASSD(tcp->tcp_connp->conn_rem)) { 2519 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 2520 return (0); 2521 } 2522 /* 2523 * If IP_NEXTHOP is set, then look for an IRE_CACHE 2524 * for the destination with the nexthop as gateway. 2525 * ire_ctable_lookup() is used because this particular 2526 * ire, if it exists, will be marked private. 2527 * If that is not available, use the interface ire 2528 * for the nexthop. 2529 * 2530 * TSol: tcp_update_label will detect label mismatches based 2531 * only on the destination's label, but that would not 2532 * detect label mismatches based on the security attributes 2533 * of routes or next hop gateway. Hence we need to pass the 2534 * label to ire_ftable_lookup below in order to locate the 2535 * right prefix (and/or) ire cache. Similarly we also need 2536 * pass the label to the ire_cache_lookup below to locate 2537 * the right ire that also matches on the label. 2538 */ 2539 if (tcp->tcp_connp->conn_nexthop_set) { 2540 ire = ire_ctable_lookup(tcp->tcp_connp->conn_rem, 2541 tcp->tcp_connp->conn_nexthop_v4, 0, NULL, zoneid, 2542 tsl, MATCH_IRE_MARK_PRIVATE_ADDR | MATCH_IRE_GW, 2543 ipst); 2544 if (ire == NULL) { 2545 ire = ire_ftable_lookup( 2546 tcp->tcp_connp->conn_nexthop_v4, 2547 0, 0, IRE_INTERFACE, NULL, NULL, zoneid, 0, 2548 tsl, match_flags, ipst); 2549 if (ire == NULL) 2550 return (0); 2551 } else { 2552 ire_uinfo = &ire->ire_uinfo; 2553 } 2554 } else { 2555 ire = ire_cache_lookup(tcp->tcp_connp->conn_rem, 2556 zoneid, tsl, ipst); 2557 if (ire != NULL) { 2558 ire_cacheable = B_TRUE; 2559 ire_uinfo = (ire_mp != NULL) ? 2560 &((ire_t *)ire_mp->b_rptr)->ire_uinfo: 2561 &ire->ire_uinfo; 2562 2563 } else { 2564 if (ire_mp == NULL) { 2565 ire = ire_ftable_lookup( 2566 tcp->tcp_connp->conn_rem, 2567 0, 0, 0, NULL, &sire, zoneid, 0, 2568 tsl, (MATCH_IRE_RECURSIVE | 2569 MATCH_IRE_DEFAULT), ipst); 2570 if (ire == NULL) 2571 return (0); 2572 ire_uinfo = (sire != NULL) ? 2573 &sire->ire_uinfo : 2574 &ire->ire_uinfo; 2575 } else { 2576 ire = (ire_t *)ire_mp->b_rptr; 2577 ire_uinfo = 2578 &((ire_t *) 2579 ire_mp->b_rptr)->ire_uinfo; 2580 } 2581 } 2582 } 2583 ASSERT(ire != NULL); 2584 2585 if ((ire->ire_src_addr == INADDR_ANY) || 2586 (ire->ire_type & IRE_BROADCAST)) { 2587 /* 2588 * ire->ire_mp is non null when ire_mp passed in is used 2589 * ire->ire_mp is set in ip_bind_insert_ire[_v6](). 2590 */ 2591 if (ire->ire_mp == NULL) 2592 ire_refrele(ire); 2593 if (sire != NULL) 2594 ire_refrele(sire); 2595 return (0); 2596 } 2597 2598 if (tcp->tcp_ipha->ipha_src == INADDR_ANY) { 2599 ipaddr_t src_addr; 2600 2601 /* 2602 * ip_bind_connected() has stored the correct source 2603 * address in conn_src. 2604 */ 2605 src_addr = tcp->tcp_connp->conn_src; 2606 tcp->tcp_ipha->ipha_src = src_addr; 2607 /* 2608 * Copy of the src addr. in tcp_t is needed 2609 * for the lookup funcs. 2610 */ 2611 IN6_IPADDR_TO_V4MAPPED(src_addr, &tcp->tcp_ip_src_v6); 2612 } 2613 /* 2614 * Set the fragment bit so that IP will tell us if the MTU 2615 * should change. IP tells us the latest setting of 2616 * ip_path_mtu_discovery through ire_frag_flag. 2617 */ 2618 if (ipst->ips_ip_path_mtu_discovery) { 2619 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 2620 htons(IPH_DF); 2621 } 2622 /* 2623 * If ire_uinfo is NULL, this is the IRE_INTERFACE case 2624 * for IP_NEXTHOP. No cache ire has been found for the 2625 * destination and we are working with the nexthop's 2626 * interface ire. Since we need to forward all packets 2627 * to the nexthop first, we "blindly" set tcp_localnet 2628 * to false, eventhough the destination may also be 2629 * onlink. 2630 */ 2631 if (ire_uinfo == NULL) 2632 tcp->tcp_localnet = 0; 2633 else 2634 tcp->tcp_localnet = (ire->ire_gateway_addr == 0); 2635 } else { 2636 /* 2637 * For incoming connection ire_mp = NULL 2638 * For outgoing connection ire_mp != NULL 2639 * Technically we should check conn_incoming_ill 2640 * when ire_mp is NULL and conn_outgoing_ill when 2641 * ire_mp is non-NULL. But this is performance 2642 * critical path and for IPV*_BOUND_IF, outgoing 2643 * and incoming ill are always set to the same value. 2644 */ 2645 ill_t *dst_ill = NULL; 2646 ipif_t *dst_ipif = NULL; 2647 2648 ASSERT(connp->conn_outgoing_ill == connp->conn_incoming_ill); 2649 2650 if (connp->conn_outgoing_ill != NULL) { 2651 /* Outgoing or incoming path */ 2652 int err; 2653 2654 dst_ill = conn_get_held_ill(connp, 2655 &connp->conn_outgoing_ill, &err); 2656 if (err == ILL_LOOKUP_FAILED || dst_ill == NULL) { 2657 ip1dbg(("tcp_adapt_ire: ill_lookup failed\n")); 2658 return (0); 2659 } 2660 match_flags |= MATCH_IRE_ILL; 2661 dst_ipif = dst_ill->ill_ipif; 2662 } 2663 ire = ire_ctable_lookup_v6(&tcp->tcp_connp->conn_remv6, 2664 0, 0, dst_ipif, zoneid, tsl, match_flags, ipst); 2665 2666 if (ire != NULL) { 2667 ire_cacheable = B_TRUE; 2668 ire_uinfo = (ire_mp != NULL) ? 2669 &((ire_t *)ire_mp->b_rptr)->ire_uinfo: 2670 &ire->ire_uinfo; 2671 } else { 2672 if (ire_mp == NULL) { 2673 ire = ire_ftable_lookup_v6( 2674 &tcp->tcp_connp->conn_remv6, 2675 0, 0, 0, dst_ipif, &sire, zoneid, 2676 0, tsl, match_flags, ipst); 2677 if (ire == NULL) { 2678 if (dst_ill != NULL) 2679 ill_refrele(dst_ill); 2680 return (0); 2681 } 2682 ire_uinfo = (sire != NULL) ? &sire->ire_uinfo : 2683 &ire->ire_uinfo; 2684 } else { 2685 ire = (ire_t *)ire_mp->b_rptr; 2686 ire_uinfo = 2687 &((ire_t *)ire_mp->b_rptr)->ire_uinfo; 2688 } 2689 } 2690 if (dst_ill != NULL) 2691 ill_refrele(dst_ill); 2692 2693 ASSERT(ire != NULL); 2694 ASSERT(ire_uinfo != NULL); 2695 2696 if (IN6_IS_ADDR_UNSPECIFIED(&ire->ire_src_addr_v6) || 2697 IN6_IS_ADDR_MULTICAST(&ire->ire_addr_v6)) { 2698 /* 2699 * ire->ire_mp is non null when ire_mp passed in is used 2700 * ire->ire_mp is set in ip_bind_insert_ire[_v6](). 2701 */ 2702 if (ire->ire_mp == NULL) 2703 ire_refrele(ire); 2704 if (sire != NULL) 2705 ire_refrele(sire); 2706 return (0); 2707 } 2708 2709 if (IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip6h->ip6_src)) { 2710 in6_addr_t src_addr; 2711 2712 /* 2713 * ip_bind_connected_v6() has stored the correct source 2714 * address per IPv6 addr. selection policy in 2715 * conn_src_v6. 2716 */ 2717 src_addr = tcp->tcp_connp->conn_srcv6; 2718 2719 tcp->tcp_ip6h->ip6_src = src_addr; 2720 /* 2721 * Copy of the src addr. in tcp_t is needed 2722 * for the lookup funcs. 2723 */ 2724 tcp->tcp_ip_src_v6 = src_addr; 2725 ASSERT(IN6_ARE_ADDR_EQUAL(&tcp->tcp_ip6h->ip6_src, 2726 &connp->conn_srcv6)); 2727 } 2728 tcp->tcp_localnet = 2729 IN6_IS_ADDR_UNSPECIFIED(&ire->ire_gateway_addr_v6); 2730 } 2731 2732 /* 2733 * This allows applications to fail quickly when connections are made 2734 * to dead hosts. Hosts can be labeled dead by adding a reject route 2735 * with both the RTF_REJECT and RTF_PRIVATE flags set. 2736 */ 2737 if ((ire->ire_flags & RTF_REJECT) && 2738 (ire->ire_flags & RTF_PRIVATE)) 2739 goto error; 2740 2741 /* 2742 * Make use of the cached rtt and rtt_sd values to calculate the 2743 * initial RTO. Note that they are already initialized in 2744 * tcp_init_values(). 2745 * If ire_uinfo is NULL, i.e., we do not have a cache ire for 2746 * IP_NEXTHOP, but instead are using the interface ire for the 2747 * nexthop, then we do not use the ire_uinfo from that ire to 2748 * do any initializations. 2749 */ 2750 if (ire_uinfo != NULL) { 2751 if (ire_uinfo->iulp_rtt != 0) { 2752 clock_t rto; 2753 2754 tcp->tcp_rtt_sa = ire_uinfo->iulp_rtt; 2755 tcp->tcp_rtt_sd = ire_uinfo->iulp_rtt_sd; 2756 rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 2757 tcps->tcps_rexmit_interval_extra + 2758 (tcp->tcp_rtt_sa >> 5); 2759 2760 if (rto > tcps->tcps_rexmit_interval_max) { 2761 tcp->tcp_rto = tcps->tcps_rexmit_interval_max; 2762 } else if (rto < tcps->tcps_rexmit_interval_min) { 2763 tcp->tcp_rto = tcps->tcps_rexmit_interval_min; 2764 } else { 2765 tcp->tcp_rto = rto; 2766 } 2767 } 2768 if (ire_uinfo->iulp_ssthresh != 0) 2769 tcp->tcp_cwnd_ssthresh = ire_uinfo->iulp_ssthresh; 2770 else 2771 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN; 2772 if (ire_uinfo->iulp_spipe > 0) { 2773 tcp->tcp_xmit_hiwater = MIN(ire_uinfo->iulp_spipe, 2774 tcps->tcps_max_buf); 2775 if (tcps->tcps_snd_lowat_fraction != 0) 2776 tcp->tcp_xmit_lowater = tcp->tcp_xmit_hiwater / 2777 tcps->tcps_snd_lowat_fraction; 2778 (void) tcp_maxpsz_set(tcp, B_TRUE); 2779 } 2780 /* 2781 * Note that up till now, acceptor always inherits receive 2782 * window from the listener. But if there is a metrics 2783 * associated with a host, we should use that instead of 2784 * inheriting it from listener. Thus we need to pass this 2785 * info back to the caller. 2786 */ 2787 if (ire_uinfo->iulp_rpipe > 0) { 2788 tcp->tcp_rwnd = MIN(ire_uinfo->iulp_rpipe, 2789 tcps->tcps_max_buf); 2790 } 2791 2792 if (ire_uinfo->iulp_rtomax > 0) { 2793 tcp->tcp_second_timer_threshold = 2794 ire_uinfo->iulp_rtomax; 2795 } 2796 2797 /* 2798 * Use the metric option settings, iulp_tstamp_ok and 2799 * iulp_wscale_ok, only for active open. What this means 2800 * is that if the other side uses timestamp or window 2801 * scale option, TCP will also use those options. That 2802 * is for passive open. If the application sets a 2803 * large window, window scale is enabled regardless of 2804 * the value in iulp_wscale_ok. This is the behavior 2805 * since 2.6. So we keep it. 2806 * The only case left in passive open processing is the 2807 * check for SACK. 2808 * For ECN, it should probably be like SACK. But the 2809 * current value is binary, so we treat it like the other 2810 * cases. The metric only controls active open.For passive 2811 * open, the ndd param, tcp_ecn_permitted, controls the 2812 * behavior. 2813 */ 2814 if (!tcp_detached) { 2815 /* 2816 * The if check means that the following can only 2817 * be turned on by the metrics only IRE, but not off. 2818 */ 2819 if (ire_uinfo->iulp_tstamp_ok) 2820 tcp->tcp_snd_ts_ok = B_TRUE; 2821 if (ire_uinfo->iulp_wscale_ok) 2822 tcp->tcp_snd_ws_ok = B_TRUE; 2823 if (ire_uinfo->iulp_sack == 2) 2824 tcp->tcp_snd_sack_ok = B_TRUE; 2825 if (ire_uinfo->iulp_ecn_ok) 2826 tcp->tcp_ecn_ok = B_TRUE; 2827 } else { 2828 /* 2829 * Passive open. 2830 * 2831 * As above, the if check means that SACK can only be 2832 * turned on by the metric only IRE. 2833 */ 2834 if (ire_uinfo->iulp_sack > 0) { 2835 tcp->tcp_snd_sack_ok = B_TRUE; 2836 } 2837 } 2838 } 2839 2840 2841 /* 2842 * XXX: Note that currently, ire_max_frag can be as small as 68 2843 * because of PMTUd. So tcp_mss may go to negative if combined 2844 * length of all those options exceeds 28 bytes. But because 2845 * of the tcp_mss_min check below, we may not have a problem if 2846 * tcp_mss_min is of a reasonable value. The default is 1 so 2847 * the negative problem still exists. And the check defeats PMTUd. 2848 * In fact, if PMTUd finds that the MSS should be smaller than 2849 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min 2850 * value. 2851 * 2852 * We do not deal with that now. All those problems related to 2853 * PMTUd will be fixed later. 2854 */ 2855 ASSERT(ire->ire_max_frag != 0); 2856 mss = tcp->tcp_if_mtu = ire->ire_max_frag; 2857 if (tcp->tcp_ipp_fields & IPPF_USE_MIN_MTU) { 2858 if (tcp->tcp_ipp_use_min_mtu == IPV6_USE_MIN_MTU_NEVER) { 2859 mss = MIN(mss, IPV6_MIN_MTU); 2860 } 2861 } 2862 2863 /* Sanity check for MSS value. */ 2864 if (tcp->tcp_ipversion == IPV4_VERSION) 2865 mss_max = tcps->tcps_mss_max_ipv4; 2866 else 2867 mss_max = tcps->tcps_mss_max_ipv6; 2868 2869 if (tcp->tcp_ipversion == IPV6_VERSION && 2870 (ire->ire_frag_flag & IPH_FRAG_HDR)) { 2871 /* 2872 * After receiving an ICMPv6 "packet too big" message with a 2873 * MTU < 1280, and for multirouted IPv6 packets, the IP layer 2874 * will insert a 8-byte fragment header in every packet; we 2875 * reduce the MSS by that amount here. 2876 */ 2877 mss -= sizeof (ip6_frag_t); 2878 } 2879 2880 if (tcp->tcp_ipsec_overhead == 0) 2881 tcp->tcp_ipsec_overhead = conn_ipsec_length(connp); 2882 2883 mss -= tcp->tcp_ipsec_overhead; 2884 2885 if (mss < tcps->tcps_mss_min) 2886 mss = tcps->tcps_mss_min; 2887 if (mss > mss_max) 2888 mss = mss_max; 2889 2890 /* Note that this is the maximum MSS, excluding all options. */ 2891 tcp->tcp_mss = mss; 2892 2893 /* 2894 * Initialize the ISS here now that we have the full connection ID. 2895 * The RFC 1948 method of initial sequence number generation requires 2896 * knowledge of the full connection ID before setting the ISS. 2897 */ 2898 2899 tcp_iss_init(tcp); 2900 2901 if (ire->ire_type & (IRE_LOOPBACK | IRE_LOCAL)) 2902 tcp->tcp_loopback = B_TRUE; 2903 2904 if (tcp->tcp_ipversion == IPV4_VERSION) { 2905 hsp = tcp_hsp_lookup(tcp->tcp_remote, tcps); 2906 } else { 2907 hsp = tcp_hsp_lookup_ipv6(&tcp->tcp_remote_v6, tcps); 2908 } 2909 2910 if (hsp != NULL) { 2911 /* Only modify if we're going to make them bigger */ 2912 if (hsp->tcp_hsp_sendspace > tcp->tcp_xmit_hiwater) { 2913 tcp->tcp_xmit_hiwater = hsp->tcp_hsp_sendspace; 2914 if (tcps->tcps_snd_lowat_fraction != 0) 2915 tcp->tcp_xmit_lowater = tcp->tcp_xmit_hiwater / 2916 tcps->tcps_snd_lowat_fraction; 2917 } 2918 2919 if (hsp->tcp_hsp_recvspace > tcp->tcp_rwnd) { 2920 tcp->tcp_rwnd = hsp->tcp_hsp_recvspace; 2921 } 2922 2923 /* Copy timestamp flag only for active open */ 2924 if (!tcp_detached) 2925 tcp->tcp_snd_ts_ok = hsp->tcp_hsp_tstamp; 2926 } 2927 2928 if (sire != NULL) 2929 IRE_REFRELE(sire); 2930 2931 /* 2932 * If we got an IRE_CACHE and an ILL, go through their properties; 2933 * otherwise, this is deferred until later when we have an IRE_CACHE. 2934 */ 2935 if (tcp->tcp_loopback || 2936 (ire_cacheable && (ill = ire_to_ill(ire)) != NULL)) { 2937 /* 2938 * For incoming, see if this tcp may be MDT-capable. For 2939 * outgoing, this process has been taken care of through 2940 * tcp_rput_other. 2941 */ 2942 tcp_ire_ill_check(tcp, ire, ill, incoming); 2943 tcp->tcp_ire_ill_check_done = B_TRUE; 2944 } 2945 2946 mutex_enter(&connp->conn_lock); 2947 /* 2948 * Make sure that conn is not marked incipient 2949 * for incoming connections. A blind 2950 * removal of incipient flag is cheaper than 2951 * check and removal. 2952 */ 2953 connp->conn_state_flags &= ~CONN_INCIPIENT; 2954 2955 /* 2956 * Must not cache forwarding table routes 2957 * or recache an IRE after the conn_t has 2958 * had conn_ire_cache cleared and is flagged 2959 * unusable, (see the CONN_CACHE_IRE() macro). 2960 */ 2961 if (ire_cacheable && CONN_CACHE_IRE(connp)) { 2962 rw_enter(&ire->ire_bucket->irb_lock, RW_READER); 2963 if (!(ire->ire_marks & IRE_MARK_CONDEMNED)) { 2964 connp->conn_ire_cache = ire; 2965 IRE_UNTRACE_REF(ire); 2966 rw_exit(&ire->ire_bucket->irb_lock); 2967 mutex_exit(&connp->conn_lock); 2968 return (1); 2969 } 2970 rw_exit(&ire->ire_bucket->irb_lock); 2971 } 2972 mutex_exit(&connp->conn_lock); 2973 2974 if (ire->ire_mp == NULL) 2975 ire_refrele(ire); 2976 return (1); 2977 2978 error: 2979 if (ire->ire_mp == NULL) 2980 ire_refrele(ire); 2981 if (sire != NULL) 2982 ire_refrele(sire); 2983 return (0); 2984 } 2985 2986 /* 2987 * tcp_bind is called (holding the writer lock) by tcp_wput_proto to process a 2988 * O_T_BIND_REQ/T_BIND_REQ message. 2989 */ 2990 static void 2991 tcp_bind(tcp_t *tcp, mblk_t *mp) 2992 { 2993 sin_t *sin; 2994 sin6_t *sin6; 2995 mblk_t *mp1; 2996 in_port_t requested_port; 2997 in_port_t allocated_port; 2998 struct T_bind_req *tbr; 2999 boolean_t bind_to_req_port_only; 3000 boolean_t backlog_update = B_FALSE; 3001 boolean_t user_specified; 3002 in6_addr_t v6addr; 3003 ipaddr_t v4addr; 3004 uint_t origipversion; 3005 int err; 3006 queue_t *q = tcp->tcp_wq; 3007 conn_t *connp; 3008 mlp_type_t addrtype, mlptype; 3009 zone_t *zone; 3010 cred_t *cr; 3011 in_port_t mlp_port; 3012 tcp_stack_t *tcps = tcp->tcp_tcps; 3013 3014 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 3015 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tbr)) { 3016 if (tcp->tcp_debug) { 3017 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 3018 "tcp_bind: bad req, len %u", 3019 (uint_t)(mp->b_wptr - mp->b_rptr)); 3020 } 3021 tcp_err_ack(tcp, mp, TPROTO, 0); 3022 return; 3023 } 3024 /* Make sure the largest address fits */ 3025 mp1 = reallocb(mp, sizeof (struct T_bind_ack) + sizeof (sin6_t) + 1, 1); 3026 if (mp1 == NULL) { 3027 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 3028 return; 3029 } 3030 mp = mp1; 3031 tbr = (struct T_bind_req *)mp->b_rptr; 3032 if (tcp->tcp_state >= TCPS_BOUND) { 3033 if ((tcp->tcp_state == TCPS_BOUND || 3034 tcp->tcp_state == TCPS_LISTEN) && 3035 tcp->tcp_conn_req_max != tbr->CONIND_number && 3036 tbr->CONIND_number > 0) { 3037 /* 3038 * Handle listen() increasing CONIND_number. 3039 * This is more "liberal" then what the TPI spec 3040 * requires but is needed to avoid a t_unbind 3041 * when handling listen() since the port number 3042 * might be "stolen" between the unbind and bind. 3043 */ 3044 backlog_update = B_TRUE; 3045 goto do_bind; 3046 } 3047 if (tcp->tcp_debug) { 3048 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 3049 "tcp_bind: bad state, %d", tcp->tcp_state); 3050 } 3051 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 3052 return; 3053 } 3054 origipversion = tcp->tcp_ipversion; 3055 3056 switch (tbr->ADDR_length) { 3057 case 0: /* request for a generic port */ 3058 tbr->ADDR_offset = sizeof (struct T_bind_req); 3059 if (tcp->tcp_family == AF_INET) { 3060 tbr->ADDR_length = sizeof (sin_t); 3061 sin = (sin_t *)&tbr[1]; 3062 *sin = sin_null; 3063 sin->sin_family = AF_INET; 3064 mp->b_wptr = (uchar_t *)&sin[1]; 3065 tcp->tcp_ipversion = IPV4_VERSION; 3066 IN6_IPADDR_TO_V4MAPPED(INADDR_ANY, &v6addr); 3067 } else { 3068 ASSERT(tcp->tcp_family == AF_INET6); 3069 tbr->ADDR_length = sizeof (sin6_t); 3070 sin6 = (sin6_t *)&tbr[1]; 3071 *sin6 = sin6_null; 3072 sin6->sin6_family = AF_INET6; 3073 mp->b_wptr = (uchar_t *)&sin6[1]; 3074 tcp->tcp_ipversion = IPV6_VERSION; 3075 V6_SET_ZERO(v6addr); 3076 } 3077 requested_port = 0; 3078 break; 3079 3080 case sizeof (sin_t): /* Complete IPv4 address */ 3081 sin = (sin_t *)mi_offset_param(mp, tbr->ADDR_offset, 3082 sizeof (sin_t)); 3083 if (sin == NULL || !OK_32PTR((char *)sin)) { 3084 if (tcp->tcp_debug) { 3085 (void) strlog(TCP_MOD_ID, 0, 1, 3086 SL_ERROR|SL_TRACE, 3087 "tcp_bind: bad address parameter, " 3088 "offset %d, len %d", 3089 tbr->ADDR_offset, tbr->ADDR_length); 3090 } 3091 tcp_err_ack(tcp, mp, TPROTO, 0); 3092 return; 3093 } 3094 /* 3095 * With sockets sockfs will accept bogus sin_family in 3096 * bind() and replace it with the family used in the socket 3097 * call. 3098 */ 3099 if (sin->sin_family != AF_INET || 3100 tcp->tcp_family != AF_INET) { 3101 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 3102 return; 3103 } 3104 requested_port = ntohs(sin->sin_port); 3105 tcp->tcp_ipversion = IPV4_VERSION; 3106 v4addr = sin->sin_addr.s_addr; 3107 IN6_IPADDR_TO_V4MAPPED(v4addr, &v6addr); 3108 break; 3109 3110 case sizeof (sin6_t): /* Complete IPv6 address */ 3111 sin6 = (sin6_t *)mi_offset_param(mp, 3112 tbr->ADDR_offset, sizeof (sin6_t)); 3113 if (sin6 == NULL || !OK_32PTR((char *)sin6)) { 3114 if (tcp->tcp_debug) { 3115 (void) strlog(TCP_MOD_ID, 0, 1, 3116 SL_ERROR|SL_TRACE, 3117 "tcp_bind: bad IPv6 address parameter, " 3118 "offset %d, len %d", tbr->ADDR_offset, 3119 tbr->ADDR_length); 3120 } 3121 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 3122 return; 3123 } 3124 if (sin6->sin6_family != AF_INET6 || 3125 tcp->tcp_family != AF_INET6) { 3126 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 3127 return; 3128 } 3129 requested_port = ntohs(sin6->sin6_port); 3130 tcp->tcp_ipversion = IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr) ? 3131 IPV4_VERSION : IPV6_VERSION; 3132 v6addr = sin6->sin6_addr; 3133 break; 3134 3135 default: 3136 if (tcp->tcp_debug) { 3137 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 3138 "tcp_bind: bad address length, %d", 3139 tbr->ADDR_length); 3140 } 3141 tcp_err_ack(tcp, mp, TBADADDR, 0); 3142 return; 3143 } 3144 tcp->tcp_bound_source_v6 = v6addr; 3145 3146 /* Check for change in ipversion */ 3147 if (origipversion != tcp->tcp_ipversion) { 3148 ASSERT(tcp->tcp_family == AF_INET6); 3149 err = tcp->tcp_ipversion == IPV6_VERSION ? 3150 tcp_header_init_ipv6(tcp) : tcp_header_init_ipv4(tcp); 3151 if (err) { 3152 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 3153 return; 3154 } 3155 } 3156 3157 /* 3158 * Initialize family specific fields. Copy of the src addr. 3159 * in tcp_t is needed for the lookup funcs. 3160 */ 3161 if (tcp->tcp_ipversion == IPV6_VERSION) { 3162 tcp->tcp_ip6h->ip6_src = v6addr; 3163 } else { 3164 IN6_V4MAPPED_TO_IPADDR(&v6addr, tcp->tcp_ipha->ipha_src); 3165 } 3166 tcp->tcp_ip_src_v6 = v6addr; 3167 3168 /* 3169 * For O_T_BIND_REQ: 3170 * Verify that the target port/addr is available, or choose 3171 * another. 3172 * For T_BIND_REQ: 3173 * Verify that the target port/addr is available or fail. 3174 * In both cases when it succeeds the tcp is inserted in the 3175 * bind hash table. This ensures that the operation is atomic 3176 * under the lock on the hash bucket. 3177 */ 3178 bind_to_req_port_only = requested_port != 0 && 3179 tbr->PRIM_type != O_T_BIND_REQ; 3180 /* 3181 * Get a valid port (within the anonymous range and should not 3182 * be a privileged one) to use if the user has not given a port. 3183 * If multiple threads are here, they may all start with 3184 * with the same initial port. But, it should be fine as long as 3185 * tcp_bindi will ensure that no two threads will be assigned 3186 * the same port. 3187 * 3188 * NOTE: XXX If a privileged process asks for an anonymous port, we 3189 * still check for ports only in the range > tcp_smallest_non_priv_port, 3190 * unless TCP_ANONPRIVBIND option is set. 3191 */ 3192 mlptype = mlptSingle; 3193 mlp_port = requested_port; 3194 if (requested_port == 0) { 3195 requested_port = tcp->tcp_anon_priv_bind ? 3196 tcp_get_next_priv_port(tcp) : 3197 tcp_update_next_port(tcps->tcps_next_port_to_try, 3198 tcp, B_TRUE); 3199 if (requested_port == 0) { 3200 tcp_err_ack(tcp, mp, TNOADDR, 0); 3201 return; 3202 } 3203 user_specified = B_FALSE; 3204 3205 /* 3206 * If the user went through one of the RPC interfaces to create 3207 * this socket and RPC is MLP in this zone, then give him an 3208 * anonymous MLP. 3209 */ 3210 cr = DB_CREDDEF(mp, tcp->tcp_cred); 3211 connp = tcp->tcp_connp; 3212 if (connp->conn_anon_mlp && is_system_labeled()) { 3213 zone = crgetzone(cr); 3214 addrtype = tsol_mlp_addr_type(zone->zone_id, 3215 IPV6_VERSION, &v6addr, 3216 tcps->tcps_netstack->netstack_ip); 3217 if (addrtype == mlptSingle) { 3218 tcp_err_ack(tcp, mp, TNOADDR, 0); 3219 return; 3220 } 3221 mlptype = tsol_mlp_port_type(zone, IPPROTO_TCP, 3222 PMAPPORT, addrtype); 3223 mlp_port = PMAPPORT; 3224 } 3225 } else { 3226 int i; 3227 boolean_t priv = B_FALSE; 3228 3229 /* 3230 * If the requested_port is in the well-known privileged range, 3231 * verify that the stream was opened by a privileged user. 3232 * Note: No locks are held when inspecting tcp_g_*epriv_ports 3233 * but instead the code relies on: 3234 * - the fact that the address of the array and its size never 3235 * changes 3236 * - the atomic assignment of the elements of the array 3237 */ 3238 cr = DB_CREDDEF(mp, tcp->tcp_cred); 3239 if (requested_port < tcps->tcps_smallest_nonpriv_port) { 3240 priv = B_TRUE; 3241 } else { 3242 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 3243 if (requested_port == 3244 tcps->tcps_g_epriv_ports[i]) { 3245 priv = B_TRUE; 3246 break; 3247 } 3248 } 3249 } 3250 if (priv) { 3251 if (secpolicy_net_privaddr(cr, requested_port) != 0) { 3252 if (tcp->tcp_debug) { 3253 (void) strlog(TCP_MOD_ID, 0, 1, 3254 SL_ERROR|SL_TRACE, 3255 "tcp_bind: no priv for port %d", 3256 requested_port); 3257 } 3258 tcp_err_ack(tcp, mp, TACCES, 0); 3259 return; 3260 } 3261 } 3262 user_specified = B_TRUE; 3263 3264 connp = tcp->tcp_connp; 3265 if (is_system_labeled()) { 3266 zone = crgetzone(cr); 3267 addrtype = tsol_mlp_addr_type(zone->zone_id, 3268 IPV6_VERSION, &v6addr, 3269 tcps->tcps_netstack->netstack_ip); 3270 if (addrtype == mlptSingle) { 3271 tcp_err_ack(tcp, mp, TNOADDR, 0); 3272 return; 3273 } 3274 mlptype = tsol_mlp_port_type(zone, IPPROTO_TCP, 3275 requested_port, addrtype); 3276 } 3277 } 3278 3279 if (mlptype != mlptSingle) { 3280 if (secpolicy_net_bindmlp(cr) != 0) { 3281 if (tcp->tcp_debug) { 3282 (void) strlog(TCP_MOD_ID, 0, 1, 3283 SL_ERROR|SL_TRACE, 3284 "tcp_bind: no priv for multilevel port %d", 3285 requested_port); 3286 } 3287 tcp_err_ack(tcp, mp, TACCES, 0); 3288 return; 3289 } 3290 3291 /* 3292 * If we're specifically binding a shared IP address and the 3293 * port is MLP on shared addresses, then check to see if this 3294 * zone actually owns the MLP. Reject if not. 3295 */ 3296 if (mlptype == mlptShared && addrtype == mlptShared) { 3297 /* 3298 * No need to handle exclusive-stack zones since 3299 * ALL_ZONES only applies to the shared stack. 3300 */ 3301 zoneid_t mlpzone; 3302 3303 mlpzone = tsol_mlp_findzone(IPPROTO_TCP, 3304 htons(mlp_port)); 3305 if (connp->conn_zoneid != mlpzone) { 3306 if (tcp->tcp_debug) { 3307 (void) strlog(TCP_MOD_ID, 0, 1, 3308 SL_ERROR|SL_TRACE, 3309 "tcp_bind: attempt to bind port " 3310 "%d on shared addr in zone %d " 3311 "(should be %d)", 3312 mlp_port, connp->conn_zoneid, 3313 mlpzone); 3314 } 3315 tcp_err_ack(tcp, mp, TACCES, 0); 3316 return; 3317 } 3318 } 3319 3320 if (!user_specified) { 3321 err = tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3322 requested_port, B_TRUE); 3323 if (err != 0) { 3324 if (tcp->tcp_debug) { 3325 (void) strlog(TCP_MOD_ID, 0, 1, 3326 SL_ERROR|SL_TRACE, 3327 "tcp_bind: cannot establish anon " 3328 "MLP for port %d", 3329 requested_port); 3330 } 3331 tcp_err_ack(tcp, mp, TSYSERR, err); 3332 return; 3333 } 3334 connp->conn_anon_port = B_TRUE; 3335 } 3336 connp->conn_mlp_type = mlptype; 3337 } 3338 3339 allocated_port = tcp_bindi(tcp, requested_port, &v6addr, 3340 tcp->tcp_reuseaddr, B_FALSE, bind_to_req_port_only, user_specified); 3341 3342 if (allocated_port == 0) { 3343 connp->conn_mlp_type = mlptSingle; 3344 if (connp->conn_anon_port) { 3345 connp->conn_anon_port = B_FALSE; 3346 (void) tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3347 requested_port, B_FALSE); 3348 } 3349 if (bind_to_req_port_only) { 3350 if (tcp->tcp_debug) { 3351 (void) strlog(TCP_MOD_ID, 0, 1, 3352 SL_ERROR|SL_TRACE, 3353 "tcp_bind: requested addr busy"); 3354 } 3355 tcp_err_ack(tcp, mp, TADDRBUSY, 0); 3356 } else { 3357 /* If we are out of ports, fail the bind. */ 3358 if (tcp->tcp_debug) { 3359 (void) strlog(TCP_MOD_ID, 0, 1, 3360 SL_ERROR|SL_TRACE, 3361 "tcp_bind: out of ports?"); 3362 } 3363 tcp_err_ack(tcp, mp, TNOADDR, 0); 3364 } 3365 return; 3366 } 3367 ASSERT(tcp->tcp_state == TCPS_BOUND); 3368 do_bind: 3369 if (!backlog_update) { 3370 if (tcp->tcp_family == AF_INET) 3371 sin->sin_port = htons(allocated_port); 3372 else 3373 sin6->sin6_port = htons(allocated_port); 3374 } 3375 if (tcp->tcp_family == AF_INET) { 3376 if (tbr->CONIND_number != 0) { 3377 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3378 sizeof (sin_t)); 3379 } else { 3380 /* Just verify the local IP address */ 3381 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, IP_ADDR_LEN); 3382 } 3383 } else { 3384 if (tbr->CONIND_number != 0) { 3385 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3386 sizeof (sin6_t)); 3387 } else { 3388 /* Just verify the local IP address */ 3389 mp1 = tcp_ip_bind_mp(tcp, tbr->PRIM_type, 3390 IPV6_ADDR_LEN); 3391 } 3392 } 3393 if (mp1 == NULL) { 3394 if (connp->conn_anon_port) { 3395 connp->conn_anon_port = B_FALSE; 3396 (void) tsol_mlp_anon(zone, mlptype, connp->conn_ulp, 3397 requested_port, B_FALSE); 3398 } 3399 connp->conn_mlp_type = mlptSingle; 3400 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 3401 return; 3402 } 3403 3404 tbr->PRIM_type = T_BIND_ACK; 3405 mp->b_datap->db_type = M_PCPROTO; 3406 3407 /* Chain in the reply mp for tcp_rput() */ 3408 mp1->b_cont = mp; 3409 mp = mp1; 3410 3411 tcp->tcp_conn_req_max = tbr->CONIND_number; 3412 if (tcp->tcp_conn_req_max) { 3413 if (tcp->tcp_conn_req_max < tcps->tcps_conn_req_min) 3414 tcp->tcp_conn_req_max = tcps->tcps_conn_req_min; 3415 if (tcp->tcp_conn_req_max > tcps->tcps_conn_req_max_q) 3416 tcp->tcp_conn_req_max = tcps->tcps_conn_req_max_q; 3417 /* 3418 * If this is a listener, do not reset the eager list 3419 * and other stuffs. Note that we don't check if the 3420 * existing eager list meets the new tcp_conn_req_max 3421 * requirement. 3422 */ 3423 if (tcp->tcp_state != TCPS_LISTEN) { 3424 tcp->tcp_state = TCPS_LISTEN; 3425 /* Initialize the chain. Don't need the eager_lock */ 3426 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp; 3427 tcp->tcp_eager_next_drop_q0 = tcp; 3428 tcp->tcp_eager_prev_drop_q0 = tcp; 3429 tcp->tcp_second_ctimer_threshold = 3430 tcps->tcps_ip_abort_linterval; 3431 } 3432 } 3433 3434 /* 3435 * We can call ip_bind directly which returns a T_BIND_ACK mp. The 3436 * processing continues in tcp_rput_other(). 3437 */ 3438 if (tcp->tcp_family == AF_INET6) { 3439 ASSERT(tcp->tcp_connp->conn_af_isv6); 3440 mp = ip_bind_v6(q, mp, tcp->tcp_connp, &tcp->tcp_sticky_ipp); 3441 } else { 3442 ASSERT(!tcp->tcp_connp->conn_af_isv6); 3443 mp = ip_bind_v4(q, mp, tcp->tcp_connp); 3444 } 3445 /* 3446 * If the bind cannot complete immediately 3447 * IP will arrange to call tcp_rput_other 3448 * when the bind completes. 3449 */ 3450 if (mp != NULL) { 3451 tcp_rput_other(tcp, mp); 3452 } else { 3453 /* 3454 * Bind will be resumed later. Need to ensure 3455 * that conn doesn't disappear when that happens. 3456 * This will be decremented in ip_resume_tcp_bind(). 3457 */ 3458 CONN_INC_REF(tcp->tcp_connp); 3459 } 3460 } 3461 3462 3463 /* 3464 * If the "bind_to_req_port_only" parameter is set, if the requested port 3465 * number is available, return it, If not return 0 3466 * 3467 * If "bind_to_req_port_only" parameter is not set and 3468 * If the requested port number is available, return it. If not, return 3469 * the first anonymous port we happen across. If no anonymous ports are 3470 * available, return 0. addr is the requested local address, if any. 3471 * 3472 * In either case, when succeeding update the tcp_t to record the port number 3473 * and insert it in the bind hash table. 3474 * 3475 * Note that TCP over IPv4 and IPv6 sockets can use the same port number 3476 * without setting SO_REUSEADDR. This is needed so that they 3477 * can be viewed as two independent transport protocols. 3478 */ 3479 static in_port_t 3480 tcp_bindi(tcp_t *tcp, in_port_t port, const in6_addr_t *laddr, 3481 int reuseaddr, boolean_t quick_connect, 3482 boolean_t bind_to_req_port_only, boolean_t user_specified) 3483 { 3484 /* number of times we have run around the loop */ 3485 int count = 0; 3486 /* maximum number of times to run around the loop */ 3487 int loopmax; 3488 conn_t *connp = tcp->tcp_connp; 3489 zoneid_t zoneid = connp->conn_zoneid; 3490 tcp_stack_t *tcps = tcp->tcp_tcps; 3491 3492 /* 3493 * Lookup for free addresses is done in a loop and "loopmax" 3494 * influences how long we spin in the loop 3495 */ 3496 if (bind_to_req_port_only) { 3497 /* 3498 * If the requested port is busy, don't bother to look 3499 * for a new one. Setting loop maximum count to 1 has 3500 * that effect. 3501 */ 3502 loopmax = 1; 3503 } else { 3504 /* 3505 * If the requested port is busy, look for a free one 3506 * in the anonymous port range. 3507 * Set loopmax appropriately so that one does not look 3508 * forever in the case all of the anonymous ports are in use. 3509 */ 3510 if (tcp->tcp_anon_priv_bind) { 3511 /* 3512 * loopmax = 3513 * (IPPORT_RESERVED-1) - tcp_min_anonpriv_port + 1 3514 */ 3515 loopmax = IPPORT_RESERVED - 3516 tcps->tcps_min_anonpriv_port; 3517 } else { 3518 loopmax = (tcps->tcps_largest_anon_port - 3519 tcps->tcps_smallest_anon_port + 1); 3520 } 3521 } 3522 do { 3523 uint16_t lport; 3524 tf_t *tbf; 3525 tcp_t *ltcp; 3526 conn_t *lconnp; 3527 3528 lport = htons(port); 3529 3530 /* 3531 * Ensure that the tcp_t is not currently in the bind hash. 3532 * Hold the lock on the hash bucket to ensure that 3533 * the duplicate check plus the insertion is an atomic 3534 * operation. 3535 * 3536 * This function does an inline lookup on the bind hash list 3537 * Make sure that we access only members of tcp_t 3538 * and that we don't look at tcp_tcp, since we are not 3539 * doing a CONN_INC_REF. 3540 */ 3541 tcp_bind_hash_remove(tcp); 3542 tbf = &tcps->tcps_bind_fanout[TCP_BIND_HASH(lport)]; 3543 mutex_enter(&tbf->tf_lock); 3544 for (ltcp = tbf->tf_tcp; ltcp != NULL; 3545 ltcp = ltcp->tcp_bind_hash) { 3546 boolean_t not_socket; 3547 boolean_t exclbind; 3548 3549 if (lport != ltcp->tcp_lport) 3550 continue; 3551 3552 lconnp = ltcp->tcp_connp; 3553 3554 /* 3555 * On a labeled system, we must treat bindings to ports 3556 * on shared IP addresses by sockets with MAC exemption 3557 * privilege as being in all zones, as there's 3558 * otherwise no way to identify the right receiver. 3559 */ 3560 if (!IPCL_ZONE_MATCH(ltcp->tcp_connp, zoneid) && 3561 !lconnp->conn_mac_exempt && 3562 !connp->conn_mac_exempt) 3563 continue; 3564 3565 /* 3566 * If TCP_EXCLBIND is set for either the bound or 3567 * binding endpoint, the semantics of bind 3568 * is changed according to the following. 3569 * 3570 * spec = specified address (v4 or v6) 3571 * unspec = unspecified address (v4 or v6) 3572 * A = specified addresses are different for endpoints 3573 * 3574 * bound bind to allowed 3575 * ------------------------------------- 3576 * unspec unspec no 3577 * unspec spec no 3578 * spec unspec no 3579 * spec spec yes if A 3580 * 3581 * For labeled systems, SO_MAC_EXEMPT behaves the same 3582 * as TCP_EXCLBIND, except that zoneid is ignored. 3583 * 3584 * Note: 3585 * 3586 * 1. Because of TLI semantics, an endpoint can go 3587 * back from, say TCP_ESTABLISHED to TCPS_LISTEN or 3588 * TCPS_BOUND, depending on whether it is originally 3589 * a listener or not. That is why we need to check 3590 * for states greater than or equal to TCPS_BOUND 3591 * here. 3592 * 3593 * 2. Ideally, we should only check for state equals 3594 * to TCPS_LISTEN. And the following check should be 3595 * added. 3596 * 3597 * if (ltcp->tcp_state == TCPS_LISTEN || 3598 * !reuseaddr || !ltcp->tcp_reuseaddr) { 3599 * ... 3600 * } 3601 * 3602 * The semantics will be changed to this. If the 3603 * endpoint on the list is in state not equal to 3604 * TCPS_LISTEN and both endpoints have SO_REUSEADDR 3605 * set, let the bind succeed. 3606 * 3607 * Because of (1), we cannot do that for TLI 3608 * endpoints. But we can do that for socket endpoints. 3609 * If in future, we can change this going back 3610 * semantics, we can use the above check for TLI also. 3611 */ 3612 not_socket = !(TCP_IS_SOCKET(ltcp) && 3613 TCP_IS_SOCKET(tcp)); 3614 exclbind = ltcp->tcp_exclbind || tcp->tcp_exclbind; 3615 3616 if (lconnp->conn_mac_exempt || connp->conn_mac_exempt || 3617 (exclbind && (not_socket || 3618 ltcp->tcp_state <= TCPS_ESTABLISHED))) { 3619 if (V6_OR_V4_INADDR_ANY( 3620 ltcp->tcp_bound_source_v6) || 3621 V6_OR_V4_INADDR_ANY(*laddr) || 3622 IN6_ARE_ADDR_EQUAL(laddr, 3623 <cp->tcp_bound_source_v6)) { 3624 break; 3625 } 3626 continue; 3627 } 3628 3629 /* 3630 * Check ipversion to allow IPv4 and IPv6 sockets to 3631 * have disjoint port number spaces, if *_EXCLBIND 3632 * is not set and only if the application binds to a 3633 * specific port. We use the same autoassigned port 3634 * number space for IPv4 and IPv6 sockets. 3635 */ 3636 if (tcp->tcp_ipversion != ltcp->tcp_ipversion && 3637 bind_to_req_port_only) 3638 continue; 3639 3640 /* 3641 * Ideally, we should make sure that the source 3642 * address, remote address, and remote port in the 3643 * four tuple for this tcp-connection is unique. 3644 * However, trying to find out the local source 3645 * address would require too much code duplication 3646 * with IP, since IP needs needs to have that code 3647 * to support userland TCP implementations. 3648 */ 3649 if (quick_connect && 3650 (ltcp->tcp_state > TCPS_LISTEN) && 3651 ((tcp->tcp_fport != ltcp->tcp_fport) || 3652 !IN6_ARE_ADDR_EQUAL(&tcp->tcp_remote_v6, 3653 <cp->tcp_remote_v6))) 3654 continue; 3655 3656 if (!reuseaddr) { 3657 /* 3658 * No socket option SO_REUSEADDR. 3659 * If existing port is bound to 3660 * a non-wildcard IP address 3661 * and the requesting stream is 3662 * bound to a distinct 3663 * different IP addresses 3664 * (non-wildcard, also), keep 3665 * going. 3666 */ 3667 if (!V6_OR_V4_INADDR_ANY(*laddr) && 3668 !V6_OR_V4_INADDR_ANY( 3669 ltcp->tcp_bound_source_v6) && 3670 !IN6_ARE_ADDR_EQUAL(laddr, 3671 <cp->tcp_bound_source_v6)) 3672 continue; 3673 if (ltcp->tcp_state >= TCPS_BOUND) { 3674 /* 3675 * This port is being used and 3676 * its state is >= TCPS_BOUND, 3677 * so we can't bind to it. 3678 */ 3679 break; 3680 } 3681 } else { 3682 /* 3683 * socket option SO_REUSEADDR is set on the 3684 * binding tcp_t. 3685 * 3686 * If two streams are bound to 3687 * same IP address or both addr 3688 * and bound source are wildcards 3689 * (INADDR_ANY), we want to stop 3690 * searching. 3691 * We have found a match of IP source 3692 * address and source port, which is 3693 * refused regardless of the 3694 * SO_REUSEADDR setting, so we break. 3695 */ 3696 if (IN6_ARE_ADDR_EQUAL(laddr, 3697 <cp->tcp_bound_source_v6) && 3698 (ltcp->tcp_state == TCPS_LISTEN || 3699 ltcp->tcp_state == TCPS_BOUND)) 3700 break; 3701 } 3702 } 3703 if (ltcp != NULL) { 3704 /* The port number is busy */ 3705 mutex_exit(&tbf->tf_lock); 3706 } else { 3707 /* 3708 * This port is ours. Insert in fanout and mark as 3709 * bound to prevent others from getting the port 3710 * number. 3711 */ 3712 tcp->tcp_state = TCPS_BOUND; 3713 tcp->tcp_lport = htons(port); 3714 *(uint16_t *)tcp->tcp_tcph->th_lport = tcp->tcp_lport; 3715 3716 ASSERT(&tcps->tcps_bind_fanout[TCP_BIND_HASH( 3717 tcp->tcp_lport)] == tbf); 3718 tcp_bind_hash_insert(tbf, tcp, 1); 3719 3720 mutex_exit(&tbf->tf_lock); 3721 3722 /* 3723 * We don't want tcp_next_port_to_try to "inherit" 3724 * a port number supplied by the user in a bind. 3725 */ 3726 if (user_specified) 3727 return (port); 3728 3729 /* 3730 * This is the only place where tcp_next_port_to_try 3731 * is updated. After the update, it may or may not 3732 * be in the valid range. 3733 */ 3734 if (!tcp->tcp_anon_priv_bind) 3735 tcps->tcps_next_port_to_try = port + 1; 3736 return (port); 3737 } 3738 3739 if (tcp->tcp_anon_priv_bind) { 3740 port = tcp_get_next_priv_port(tcp); 3741 } else { 3742 if (count == 0 && user_specified) { 3743 /* 3744 * We may have to return an anonymous port. So 3745 * get one to start with. 3746 */ 3747 port = 3748 tcp_update_next_port( 3749 tcps->tcps_next_port_to_try, 3750 tcp, B_TRUE); 3751 user_specified = B_FALSE; 3752 } else { 3753 port = tcp_update_next_port(port + 1, tcp, 3754 B_FALSE); 3755 } 3756 } 3757 if (port == 0) 3758 break; 3759 3760 /* 3761 * Don't let this loop run forever in the case where 3762 * all of the anonymous ports are in use. 3763 */ 3764 } while (++count < loopmax); 3765 return (0); 3766 } 3767 3768 /* 3769 * tcp_clean_death / tcp_close_detached must not be called more than once 3770 * on a tcp. Thus every function that potentially calls tcp_clean_death 3771 * must check for the tcp state before calling tcp_clean_death. 3772 * Eg. tcp_input, tcp_rput_data, tcp_eager_kill, tcp_clean_death_wrapper, 3773 * tcp_timer_handler, all check for the tcp state. 3774 */ 3775 /* ARGSUSED */ 3776 void 3777 tcp_clean_death_wrapper(void *arg, mblk_t *mp, void *arg2) 3778 { 3779 tcp_t *tcp = ((conn_t *)arg)->conn_tcp; 3780 3781 freemsg(mp); 3782 if (tcp->tcp_state > TCPS_BOUND) 3783 (void) tcp_clean_death(((conn_t *)arg)->conn_tcp, ETIMEDOUT, 5); 3784 } 3785 3786 /* 3787 * We are dying for some reason. Try to do it gracefully. (May be called 3788 * as writer.) 3789 * 3790 * Return -1 if the structure was not cleaned up (if the cleanup had to be 3791 * done by a service procedure). 3792 * TBD - Should the return value distinguish between the tcp_t being 3793 * freed and it being reinitialized? 3794 */ 3795 static int 3796 tcp_clean_death(tcp_t *tcp, int err, uint8_t tag) 3797 { 3798 mblk_t *mp; 3799 queue_t *q; 3800 tcp_stack_t *tcps = tcp->tcp_tcps; 3801 3802 TCP_CLD_STAT(tag); 3803 3804 #if TCP_TAG_CLEAN_DEATH 3805 tcp->tcp_cleandeathtag = tag; 3806 #endif 3807 3808 if (tcp->tcp_fused) 3809 tcp_unfuse(tcp); 3810 3811 if (tcp->tcp_linger_tid != 0 && 3812 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) { 3813 tcp_stop_lingering(tcp); 3814 } 3815 3816 ASSERT(tcp != NULL); 3817 ASSERT((tcp->tcp_family == AF_INET && 3818 tcp->tcp_ipversion == IPV4_VERSION) || 3819 (tcp->tcp_family == AF_INET6 && 3820 (tcp->tcp_ipversion == IPV4_VERSION || 3821 tcp->tcp_ipversion == IPV6_VERSION))); 3822 3823 if (TCP_IS_DETACHED(tcp)) { 3824 if (tcp->tcp_hard_binding) { 3825 /* 3826 * Its an eager that we are dealing with. We close the 3827 * eager but in case a conn_ind has already gone to the 3828 * listener, let tcp_accept_finish() send a discon_ind 3829 * to the listener and drop the last reference. If the 3830 * listener doesn't even know about the eager i.e. the 3831 * conn_ind hasn't gone up, blow away the eager and drop 3832 * the last reference as well. If the conn_ind has gone 3833 * up, state should be BOUND. tcp_accept_finish 3834 * will figure out that the connection has received a 3835 * RST and will send a DISCON_IND to the application. 3836 */ 3837 tcp_closei_local(tcp); 3838 if (!tcp->tcp_tconnind_started) { 3839 CONN_DEC_REF(tcp->tcp_connp); 3840 } else { 3841 tcp->tcp_state = TCPS_BOUND; 3842 } 3843 } else { 3844 tcp_close_detached(tcp); 3845 } 3846 return (0); 3847 } 3848 3849 TCP_STAT(tcps, tcp_clean_death_nondetached); 3850 3851 /* 3852 * If T_ORDREL_IND has not been sent yet (done when service routine 3853 * is run) postpone cleaning up the endpoint until service routine 3854 * has sent up the T_ORDREL_IND. Avoid clearing out an existing 3855 * client_errno since tcp_close uses the client_errno field. 3856 */ 3857 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 3858 if (err != 0) 3859 tcp->tcp_client_errno = err; 3860 3861 tcp->tcp_deferred_clean_death = B_TRUE; 3862 return (-1); 3863 } 3864 3865 q = tcp->tcp_rq; 3866 3867 /* Trash all inbound data */ 3868 flushq(q, FLUSHALL); 3869 3870 /* 3871 * If we are at least part way open and there is error 3872 * (err==0 implies no error) 3873 * notify our client by a T_DISCON_IND. 3874 */ 3875 if ((tcp->tcp_state >= TCPS_SYN_SENT) && err) { 3876 if (tcp->tcp_state >= TCPS_ESTABLISHED && 3877 !TCP_IS_SOCKET(tcp)) { 3878 /* 3879 * Send M_FLUSH according to TPI. Because sockets will 3880 * (and must) ignore FLUSHR we do that only for TPI 3881 * endpoints and sockets in STREAMS mode. 3882 */ 3883 (void) putnextctl1(q, M_FLUSH, FLUSHR); 3884 } 3885 if (tcp->tcp_debug) { 3886 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 3887 "tcp_clean_death: discon err %d", err); 3888 } 3889 mp = mi_tpi_discon_ind(NULL, err, 0); 3890 if (mp != NULL) { 3891 putnext(q, mp); 3892 } else { 3893 if (tcp->tcp_debug) { 3894 (void) strlog(TCP_MOD_ID, 0, 1, 3895 SL_ERROR|SL_TRACE, 3896 "tcp_clean_death, sending M_ERROR"); 3897 } 3898 (void) putnextctl1(q, M_ERROR, EPROTO); 3899 } 3900 if (tcp->tcp_state <= TCPS_SYN_RCVD) { 3901 /* SYN_SENT or SYN_RCVD */ 3902 BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails); 3903 } else if (tcp->tcp_state <= TCPS_CLOSE_WAIT) { 3904 /* ESTABLISHED or CLOSE_WAIT */ 3905 BUMP_MIB(&tcps->tcps_mib, tcpEstabResets); 3906 } 3907 } 3908 3909 tcp_reinit(tcp); 3910 return (-1); 3911 } 3912 3913 /* 3914 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout 3915 * to expire, stop the wait and finish the close. 3916 */ 3917 static void 3918 tcp_stop_lingering(tcp_t *tcp) 3919 { 3920 clock_t delta = 0; 3921 tcp_stack_t *tcps = tcp->tcp_tcps; 3922 3923 tcp->tcp_linger_tid = 0; 3924 if (tcp->tcp_state > TCPS_LISTEN) { 3925 tcp_acceptor_hash_remove(tcp); 3926 mutex_enter(&tcp->tcp_non_sq_lock); 3927 if (tcp->tcp_flow_stopped) { 3928 tcp_clrqfull(tcp); 3929 } 3930 mutex_exit(&tcp->tcp_non_sq_lock); 3931 3932 if (tcp->tcp_timer_tid != 0) { 3933 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 3934 tcp->tcp_timer_tid = 0; 3935 } 3936 /* 3937 * Need to cancel those timers which will not be used when 3938 * TCP is detached. This has to be done before the tcp_wq 3939 * is set to the global queue. 3940 */ 3941 tcp_timers_stop(tcp); 3942 3943 3944 tcp->tcp_detached = B_TRUE; 3945 ASSERT(tcps->tcps_g_q != NULL); 3946 tcp->tcp_rq = tcps->tcps_g_q; 3947 tcp->tcp_wq = WR(tcps->tcps_g_q); 3948 3949 if (tcp->tcp_state == TCPS_TIME_WAIT) { 3950 tcp_time_wait_append(tcp); 3951 TCP_DBGSTAT(tcps, tcp_detach_time_wait); 3952 goto finish; 3953 } 3954 3955 /* 3956 * If delta is zero the timer event wasn't executed and was 3957 * successfully canceled. In this case we need to restart it 3958 * with the minimal delta possible. 3959 */ 3960 if (delta >= 0) { 3961 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer, 3962 delta ? delta : 1); 3963 } 3964 } else { 3965 tcp_closei_local(tcp); 3966 CONN_DEC_REF(tcp->tcp_connp); 3967 } 3968 finish: 3969 /* Signal closing thread that it can complete close */ 3970 mutex_enter(&tcp->tcp_closelock); 3971 tcp->tcp_detached = B_TRUE; 3972 ASSERT(tcps->tcps_g_q != NULL); 3973 tcp->tcp_rq = tcps->tcps_g_q; 3974 tcp->tcp_wq = WR(tcps->tcps_g_q); 3975 tcp->tcp_closed = 1; 3976 cv_signal(&tcp->tcp_closecv); 3977 mutex_exit(&tcp->tcp_closelock); 3978 } 3979 3980 /* 3981 * Handle lingering timeouts. This function is called when the SO_LINGER timeout 3982 * expires. 3983 */ 3984 static void 3985 tcp_close_linger_timeout(void *arg) 3986 { 3987 conn_t *connp = (conn_t *)arg; 3988 tcp_t *tcp = connp->conn_tcp; 3989 3990 tcp->tcp_client_errno = ETIMEDOUT; 3991 tcp_stop_lingering(tcp); 3992 } 3993 3994 static int 3995 tcp_close(queue_t *q, int flags) 3996 { 3997 conn_t *connp = Q_TO_CONN(q); 3998 tcp_t *tcp = connp->conn_tcp; 3999 mblk_t *mp = &tcp->tcp_closemp; 4000 boolean_t conn_ioctl_cleanup_reqd = B_FALSE; 4001 boolean_t linger_interrupted = B_FALSE; 4002 mblk_t *bp; 4003 4004 ASSERT(WR(q)->q_next == NULL); 4005 ASSERT(connp->conn_ref >= 2); 4006 ASSERT((connp->conn_flags & IPCL_TCPMOD) == 0); 4007 4008 /* 4009 * We are being closed as /dev/tcp or /dev/tcp6. 4010 * 4011 * Mark the conn as closing. ill_pending_mp_add will not 4012 * add any mp to the pending mp list, after this conn has 4013 * started closing. Same for sq_pending_mp_add 4014 */ 4015 mutex_enter(&connp->conn_lock); 4016 connp->conn_state_flags |= CONN_CLOSING; 4017 if (connp->conn_oper_pending_ill != NULL) 4018 conn_ioctl_cleanup_reqd = B_TRUE; 4019 CONN_INC_REF_LOCKED(connp); 4020 mutex_exit(&connp->conn_lock); 4021 tcp->tcp_closeflags = (uint8_t)flags; 4022 ASSERT(connp->conn_ref >= 3); 4023 4024 /* 4025 * tcp_closemp_used is used below without any protection of a lock 4026 * as we don't expect any one else to use it concurrently at this 4027 * point otherwise it would be a major defect. 4028 */ 4029 4030 if (mp->b_prev == NULL) 4031 tcp->tcp_closemp_used = B_TRUE; 4032 else 4033 cmn_err(CE_PANIC, "tcp_close: concurrent use of tcp_closemp: " 4034 "connp %p tcp %p\n", (void *)connp, (void *)tcp); 4035 4036 TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15); 4037 4038 (*tcp_squeue_close_proc)(connp->conn_sqp, mp, 4039 tcp_close_output, connp, SQTAG_IP_TCP_CLOSE); 4040 4041 mutex_enter(&tcp->tcp_closelock); 4042 while (!tcp->tcp_closed) { 4043 if (!cv_wait_sig(&tcp->tcp_closecv, &tcp->tcp_closelock)) { 4044 /* 4045 * We got interrupted. Check if we are lingering, 4046 * if yes, post a message to stop and wait until 4047 * tcp_closed is set. If we aren't lingering, 4048 * just go back around. 4049 */ 4050 if (tcp->tcp_linger && 4051 tcp->tcp_lingertime > 0 && 4052 !linger_interrupted) { 4053 mutex_exit(&tcp->tcp_closelock); 4054 /* Entering squeue, bump ref count. */ 4055 CONN_INC_REF(connp); 4056 bp = allocb_wait(0, BPRI_HI, STR_NOSIG, NULL); 4057 squeue_enter(connp->conn_sqp, bp, 4058 tcp_linger_interrupted, connp, 4059 SQTAG_IP_TCP_CLOSE); 4060 linger_interrupted = B_TRUE; 4061 mutex_enter(&tcp->tcp_closelock); 4062 } 4063 } 4064 } 4065 mutex_exit(&tcp->tcp_closelock); 4066 4067 /* 4068 * In the case of listener streams that have eagers in the q or q0 4069 * we wait for the eagers to drop their reference to us. tcp_rq and 4070 * tcp_wq of the eagers point to our queues. By waiting for the 4071 * refcnt to drop to 1, we are sure that the eagers have cleaned 4072 * up their queue pointers and also dropped their references to us. 4073 */ 4074 if (tcp->tcp_wait_for_eagers) { 4075 mutex_enter(&connp->conn_lock); 4076 while (connp->conn_ref != 1) { 4077 cv_wait(&connp->conn_cv, &connp->conn_lock); 4078 } 4079 mutex_exit(&connp->conn_lock); 4080 } 4081 /* 4082 * ioctl cleanup. The mp is queued in the 4083 * ill_pending_mp or in the sq_pending_mp. 4084 */ 4085 if (conn_ioctl_cleanup_reqd) 4086 conn_ioctl_cleanup(connp); 4087 4088 qprocsoff(q); 4089 inet_minor_free(ip_minor_arena, connp->conn_dev); 4090 4091 tcp->tcp_cpid = -1; 4092 4093 /* 4094 * Drop IP's reference on the conn. This is the last reference 4095 * on the connp if the state was less than established. If the 4096 * connection has gone into timewait state, then we will have 4097 * one ref for the TCP and one more ref (total of two) for the 4098 * classifier connected hash list (a timewait connections stays 4099 * in connected hash till closed). 4100 * 4101 * We can't assert the references because there might be other 4102 * transient reference places because of some walkers or queued 4103 * packets in squeue for the timewait state. 4104 */ 4105 CONN_DEC_REF(connp); 4106 q->q_ptr = WR(q)->q_ptr = NULL; 4107 return (0); 4108 } 4109 4110 static int 4111 tcpclose_accept(queue_t *q) 4112 { 4113 ASSERT(WR(q)->q_qinfo == &tcp_acceptor_winit); 4114 4115 /* 4116 * We had opened an acceptor STREAM for sockfs which is 4117 * now being closed due to some error. 4118 */ 4119 qprocsoff(q); 4120 inet_minor_free(ip_minor_arena, (dev_t)q->q_ptr); 4121 q->q_ptr = WR(q)->q_ptr = NULL; 4122 return (0); 4123 } 4124 4125 /* 4126 * Called by tcp_close() routine via squeue when lingering is 4127 * interrupted by a signal. 4128 */ 4129 4130 /* ARGSUSED */ 4131 static void 4132 tcp_linger_interrupted(void *arg, mblk_t *mp, void *arg2) 4133 { 4134 conn_t *connp = (conn_t *)arg; 4135 tcp_t *tcp = connp->conn_tcp; 4136 4137 freeb(mp); 4138 if (tcp->tcp_linger_tid != 0 && 4139 TCP_TIMER_CANCEL(tcp, tcp->tcp_linger_tid) >= 0) { 4140 tcp_stop_lingering(tcp); 4141 tcp->tcp_client_errno = EINTR; 4142 } 4143 } 4144 4145 /* 4146 * Called by streams close routine via squeues when our client blows off her 4147 * descriptor, we take this to mean: "close the stream state NOW, close the tcp 4148 * connection politely" When SO_LINGER is set (with a non-zero linger time and 4149 * it is not a nonblocking socket) then this routine sleeps until the FIN is 4150 * acked. 4151 * 4152 * NOTE: tcp_close potentially returns error when lingering. 4153 * However, the stream head currently does not pass these errors 4154 * to the application. 4.4BSD only returns EINTR and EWOULDBLOCK 4155 * errors to the application (from tsleep()) and not errors 4156 * like ECONNRESET caused by receiving a reset packet. 4157 */ 4158 4159 /* ARGSUSED */ 4160 static void 4161 tcp_close_output(void *arg, mblk_t *mp, void *arg2) 4162 { 4163 char *msg; 4164 conn_t *connp = (conn_t *)arg; 4165 tcp_t *tcp = connp->conn_tcp; 4166 clock_t delta = 0; 4167 tcp_stack_t *tcps = tcp->tcp_tcps; 4168 4169 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 4170 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 4171 4172 /* Cancel any pending timeout */ 4173 if (tcp->tcp_ordrelid != 0) { 4174 if (tcp->tcp_timeout) { 4175 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ordrelid); 4176 } 4177 tcp->tcp_ordrelid = 0; 4178 tcp->tcp_timeout = B_FALSE; 4179 } 4180 4181 mutex_enter(&tcp->tcp_eager_lock); 4182 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) { 4183 /* Cleanup for listener */ 4184 tcp_eager_cleanup(tcp, 0); 4185 tcp->tcp_wait_for_eagers = 1; 4186 } 4187 mutex_exit(&tcp->tcp_eager_lock); 4188 4189 connp->conn_mdt_ok = B_FALSE; 4190 tcp->tcp_mdt = B_FALSE; 4191 4192 connp->conn_lso_ok = B_FALSE; 4193 tcp->tcp_lso = B_FALSE; 4194 4195 msg = NULL; 4196 switch (tcp->tcp_state) { 4197 case TCPS_CLOSED: 4198 case TCPS_IDLE: 4199 case TCPS_BOUND: 4200 case TCPS_LISTEN: 4201 break; 4202 case TCPS_SYN_SENT: 4203 msg = "tcp_close, during connect"; 4204 break; 4205 case TCPS_SYN_RCVD: 4206 /* 4207 * Close during the connect 3-way handshake 4208 * but here there may or may not be pending data 4209 * already on queue. Process almost same as in 4210 * the ESTABLISHED state. 4211 */ 4212 /* FALLTHRU */ 4213 default: 4214 if (tcp->tcp_fused) 4215 tcp_unfuse(tcp); 4216 4217 /* 4218 * If SO_LINGER has set a zero linger time, abort the 4219 * connection with a reset. 4220 */ 4221 if (tcp->tcp_linger && tcp->tcp_lingertime == 0) { 4222 msg = "tcp_close, zero lingertime"; 4223 break; 4224 } 4225 4226 ASSERT(tcp->tcp_hard_bound || tcp->tcp_hard_binding); 4227 /* 4228 * Abort connection if there is unread data queued. 4229 */ 4230 if (tcp->tcp_rcv_list || tcp->tcp_reass_head) { 4231 msg = "tcp_close, unread data"; 4232 break; 4233 } 4234 /* 4235 * tcp_hard_bound is now cleared thus all packets go through 4236 * tcp_lookup. This fact is used by tcp_detach below. 4237 * 4238 * We have done a qwait() above which could have possibly 4239 * drained more messages in turn causing transition to a 4240 * different state. Check whether we have to do the rest 4241 * of the processing or not. 4242 */ 4243 if (tcp->tcp_state <= TCPS_LISTEN) 4244 break; 4245 4246 /* 4247 * Transmit the FIN before detaching the tcp_t. 4248 * After tcp_detach returns this queue/perimeter 4249 * no longer owns the tcp_t thus others can modify it. 4250 */ 4251 (void) tcp_xmit_end(tcp); 4252 4253 /* 4254 * If lingering on close then wait until the fin is acked, 4255 * the SO_LINGER time passes, or a reset is sent/received. 4256 */ 4257 if (tcp->tcp_linger && tcp->tcp_lingertime > 0 && 4258 !(tcp->tcp_fin_acked) && 4259 tcp->tcp_state >= TCPS_ESTABLISHED) { 4260 if (tcp->tcp_closeflags & (FNDELAY|FNONBLOCK)) { 4261 tcp->tcp_client_errno = EWOULDBLOCK; 4262 } else if (tcp->tcp_client_errno == 0) { 4263 4264 ASSERT(tcp->tcp_linger_tid == 0); 4265 4266 tcp->tcp_linger_tid = TCP_TIMER(tcp, 4267 tcp_close_linger_timeout, 4268 tcp->tcp_lingertime * hz); 4269 4270 /* tcp_close_linger_timeout will finish close */ 4271 if (tcp->tcp_linger_tid == 0) 4272 tcp->tcp_client_errno = ENOSR; 4273 else 4274 return; 4275 } 4276 4277 /* 4278 * Check if we need to detach or just close 4279 * the instance. 4280 */ 4281 if (tcp->tcp_state <= TCPS_LISTEN) 4282 break; 4283 } 4284 4285 /* 4286 * Make sure that no other thread will access the tcp_rq of 4287 * this instance (through lookups etc.) as tcp_rq will go 4288 * away shortly. 4289 */ 4290 tcp_acceptor_hash_remove(tcp); 4291 4292 mutex_enter(&tcp->tcp_non_sq_lock); 4293 if (tcp->tcp_flow_stopped) { 4294 tcp_clrqfull(tcp); 4295 } 4296 mutex_exit(&tcp->tcp_non_sq_lock); 4297 4298 if (tcp->tcp_timer_tid != 0) { 4299 delta = TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 4300 tcp->tcp_timer_tid = 0; 4301 } 4302 /* 4303 * Need to cancel those timers which will not be used when 4304 * TCP is detached. This has to be done before the tcp_wq 4305 * is set to the global queue. 4306 */ 4307 tcp_timers_stop(tcp); 4308 4309 tcp->tcp_detached = B_TRUE; 4310 if (tcp->tcp_state == TCPS_TIME_WAIT) { 4311 tcp_time_wait_append(tcp); 4312 TCP_DBGSTAT(tcps, tcp_detach_time_wait); 4313 ASSERT(connp->conn_ref >= 3); 4314 goto finish; 4315 } 4316 4317 /* 4318 * If delta is zero the timer event wasn't executed and was 4319 * successfully canceled. In this case we need to restart it 4320 * with the minimal delta possible. 4321 */ 4322 if (delta >= 0) 4323 tcp->tcp_timer_tid = TCP_TIMER(tcp, tcp_timer, 4324 delta ? delta : 1); 4325 4326 ASSERT(connp->conn_ref >= 3); 4327 goto finish; 4328 } 4329 4330 /* Detach did not complete. Still need to remove q from stream. */ 4331 if (msg) { 4332 if (tcp->tcp_state == TCPS_ESTABLISHED || 4333 tcp->tcp_state == TCPS_CLOSE_WAIT) 4334 BUMP_MIB(&tcps->tcps_mib, tcpEstabResets); 4335 if (tcp->tcp_state == TCPS_SYN_SENT || 4336 tcp->tcp_state == TCPS_SYN_RCVD) 4337 BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails); 4338 tcp_xmit_ctl(msg, tcp, tcp->tcp_snxt, 0, TH_RST); 4339 } 4340 4341 tcp_closei_local(tcp); 4342 CONN_DEC_REF(connp); 4343 ASSERT(connp->conn_ref >= 2); 4344 4345 finish: 4346 /* 4347 * Although packets are always processed on the correct 4348 * tcp's perimeter and access is serialized via squeue's, 4349 * IP still needs a queue when sending packets in time_wait 4350 * state so use WR(tcps_g_q) till ip_output() can be 4351 * changed to deal with just connp. For read side, we 4352 * could have set tcp_rq to NULL but there are some cases 4353 * in tcp_rput_data() from early days of this code which 4354 * do a putnext without checking if tcp is closed. Those 4355 * need to be identified before both tcp_rq and tcp_wq 4356 * can be set to NULL and tcps_g_q can disappear forever. 4357 */ 4358 mutex_enter(&tcp->tcp_closelock); 4359 /* 4360 * Don't change the queues in the case of a listener that has 4361 * eagers in its q or q0. It could surprise the eagers. 4362 * Instead wait for the eagers outside the squeue. 4363 */ 4364 if (!tcp->tcp_wait_for_eagers) { 4365 tcp->tcp_detached = B_TRUE; 4366 /* 4367 * When default queue is closing we set tcps_g_q to NULL 4368 * after the close is done. 4369 */ 4370 ASSERT(tcps->tcps_g_q != NULL); 4371 tcp->tcp_rq = tcps->tcps_g_q; 4372 tcp->tcp_wq = WR(tcps->tcps_g_q); 4373 } 4374 4375 /* Signal tcp_close() to finish closing. */ 4376 tcp->tcp_closed = 1; 4377 cv_signal(&tcp->tcp_closecv); 4378 mutex_exit(&tcp->tcp_closelock); 4379 } 4380 4381 4382 /* 4383 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp. 4384 * Some stream heads get upset if they see these later on as anything but NULL. 4385 */ 4386 static void 4387 tcp_close_mpp(mblk_t **mpp) 4388 { 4389 mblk_t *mp; 4390 4391 if ((mp = *mpp) != NULL) { 4392 do { 4393 mp->b_next = NULL; 4394 mp->b_prev = NULL; 4395 } while ((mp = mp->b_cont) != NULL); 4396 4397 mp = *mpp; 4398 *mpp = NULL; 4399 freemsg(mp); 4400 } 4401 } 4402 4403 /* Do detached close. */ 4404 static void 4405 tcp_close_detached(tcp_t *tcp) 4406 { 4407 if (tcp->tcp_fused) 4408 tcp_unfuse(tcp); 4409 4410 /* 4411 * Clustering code serializes TCP disconnect callbacks and 4412 * cluster tcp list walks by blocking a TCP disconnect callback 4413 * if a cluster tcp list walk is in progress. This ensures 4414 * accurate accounting of TCPs in the cluster code even though 4415 * the TCP list walk itself is not atomic. 4416 */ 4417 tcp_closei_local(tcp); 4418 CONN_DEC_REF(tcp->tcp_connp); 4419 } 4420 4421 /* 4422 * Stop all TCP timers, and free the timer mblks if requested. 4423 */ 4424 void 4425 tcp_timers_stop(tcp_t *tcp) 4426 { 4427 if (tcp->tcp_timer_tid != 0) { 4428 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_timer_tid); 4429 tcp->tcp_timer_tid = 0; 4430 } 4431 if (tcp->tcp_ka_tid != 0) { 4432 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ka_tid); 4433 tcp->tcp_ka_tid = 0; 4434 } 4435 if (tcp->tcp_ack_tid != 0) { 4436 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 4437 tcp->tcp_ack_tid = 0; 4438 } 4439 if (tcp->tcp_push_tid != 0) { 4440 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 4441 tcp->tcp_push_tid = 0; 4442 } 4443 } 4444 4445 /* 4446 * The tcp_t is going away. Remove it from all lists and set it 4447 * to TCPS_CLOSED. The freeing up of memory is deferred until 4448 * tcp_inactive. This is needed since a thread in tcp_rput might have 4449 * done a CONN_INC_REF on this structure before it was removed from the 4450 * hashes. 4451 */ 4452 static void 4453 tcp_closei_local(tcp_t *tcp) 4454 { 4455 ire_t *ire; 4456 conn_t *connp = tcp->tcp_connp; 4457 tcp_stack_t *tcps = tcp->tcp_tcps; 4458 4459 if (!TCP_IS_SOCKET(tcp)) 4460 tcp_acceptor_hash_remove(tcp); 4461 4462 UPDATE_MIB(&tcps->tcps_mib, tcpHCInSegs, tcp->tcp_ibsegs); 4463 tcp->tcp_ibsegs = 0; 4464 UPDATE_MIB(&tcps->tcps_mib, tcpHCOutSegs, tcp->tcp_obsegs); 4465 tcp->tcp_obsegs = 0; 4466 4467 /* 4468 * If we are an eager connection hanging off a listener that 4469 * hasn't formally accepted the connection yet, get off his 4470 * list and blow off any data that we have accumulated. 4471 */ 4472 if (tcp->tcp_listener != NULL) { 4473 tcp_t *listener = tcp->tcp_listener; 4474 mutex_enter(&listener->tcp_eager_lock); 4475 /* 4476 * tcp_tconnind_started == B_TRUE means that the 4477 * conn_ind has already gone to listener. At 4478 * this point, eager will be closed but we 4479 * leave it in listeners eager list so that 4480 * if listener decides to close without doing 4481 * accept, we can clean this up. In tcp_wput_accept 4482 * we take care of the case of accept on closed 4483 * eager. 4484 */ 4485 if (!tcp->tcp_tconnind_started) { 4486 tcp_eager_unlink(tcp); 4487 mutex_exit(&listener->tcp_eager_lock); 4488 /* 4489 * We don't want to have any pointers to the 4490 * listener queue, after we have released our 4491 * reference on the listener 4492 */ 4493 ASSERT(tcps->tcps_g_q != NULL); 4494 tcp->tcp_rq = tcps->tcps_g_q; 4495 tcp->tcp_wq = WR(tcps->tcps_g_q); 4496 CONN_DEC_REF(listener->tcp_connp); 4497 } else { 4498 mutex_exit(&listener->tcp_eager_lock); 4499 } 4500 } 4501 4502 /* Stop all the timers */ 4503 tcp_timers_stop(tcp); 4504 4505 if (tcp->tcp_state == TCPS_LISTEN) { 4506 if (tcp->tcp_ip_addr_cache) { 4507 kmem_free((void *)tcp->tcp_ip_addr_cache, 4508 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 4509 tcp->tcp_ip_addr_cache = NULL; 4510 } 4511 } 4512 mutex_enter(&tcp->tcp_non_sq_lock); 4513 if (tcp->tcp_flow_stopped) 4514 tcp_clrqfull(tcp); 4515 mutex_exit(&tcp->tcp_non_sq_lock); 4516 4517 tcp_bind_hash_remove(tcp); 4518 /* 4519 * If the tcp_time_wait_collector (which runs outside the squeue) 4520 * is trying to remove this tcp from the time wait list, we will 4521 * block in tcp_time_wait_remove while trying to acquire the 4522 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also 4523 * requires the ipcl_hash_remove to be ordered after the 4524 * tcp_time_wait_remove for the refcnt checks to work correctly. 4525 */ 4526 if (tcp->tcp_state == TCPS_TIME_WAIT) 4527 (void) tcp_time_wait_remove(tcp, NULL); 4528 CL_INET_DISCONNECT(tcp); 4529 ipcl_hash_remove(connp); 4530 4531 /* 4532 * Delete the cached ire in conn_ire_cache and also mark 4533 * the conn as CONDEMNED 4534 */ 4535 mutex_enter(&connp->conn_lock); 4536 connp->conn_state_flags |= CONN_CONDEMNED; 4537 ire = connp->conn_ire_cache; 4538 connp->conn_ire_cache = NULL; 4539 mutex_exit(&connp->conn_lock); 4540 if (ire != NULL) 4541 IRE_REFRELE_NOTR(ire); 4542 4543 /* Need to cleanup any pending ioctls */ 4544 ASSERT(tcp->tcp_time_wait_next == NULL); 4545 ASSERT(tcp->tcp_time_wait_prev == NULL); 4546 ASSERT(tcp->tcp_time_wait_expire == 0); 4547 tcp->tcp_state = TCPS_CLOSED; 4548 4549 /* Release any SSL context */ 4550 if (tcp->tcp_kssl_ent != NULL) { 4551 kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); 4552 tcp->tcp_kssl_ent = NULL; 4553 } 4554 if (tcp->tcp_kssl_ctx != NULL) { 4555 kssl_release_ctx(tcp->tcp_kssl_ctx); 4556 tcp->tcp_kssl_ctx = NULL; 4557 } 4558 tcp->tcp_kssl_pending = B_FALSE; 4559 4560 tcp_ipsec_cleanup(tcp); 4561 } 4562 4563 /* 4564 * tcp is dying (called from ipcl_conn_destroy and error cases). 4565 * Free the tcp_t in either case. 4566 */ 4567 void 4568 tcp_free(tcp_t *tcp) 4569 { 4570 mblk_t *mp; 4571 ip6_pkt_t *ipp; 4572 4573 ASSERT(tcp != NULL); 4574 ASSERT(tcp->tcp_ptpahn == NULL && tcp->tcp_acceptor_hash == NULL); 4575 4576 tcp->tcp_rq = NULL; 4577 tcp->tcp_wq = NULL; 4578 4579 tcp_close_mpp(&tcp->tcp_xmit_head); 4580 tcp_close_mpp(&tcp->tcp_reass_head); 4581 if (tcp->tcp_rcv_list != NULL) { 4582 /* Free b_next chain */ 4583 tcp_close_mpp(&tcp->tcp_rcv_list); 4584 } 4585 if ((mp = tcp->tcp_urp_mp) != NULL) { 4586 freemsg(mp); 4587 } 4588 if ((mp = tcp->tcp_urp_mark_mp) != NULL) { 4589 freemsg(mp); 4590 } 4591 4592 if (tcp->tcp_fused_sigurg_mp != NULL) { 4593 freeb(tcp->tcp_fused_sigurg_mp); 4594 tcp->tcp_fused_sigurg_mp = NULL; 4595 } 4596 4597 if (tcp->tcp_sack_info != NULL) { 4598 if (tcp->tcp_notsack_list != NULL) { 4599 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 4600 } 4601 bzero(tcp->tcp_sack_info, sizeof (tcp_sack_info_t)); 4602 } 4603 4604 if (tcp->tcp_hopopts != NULL) { 4605 mi_free(tcp->tcp_hopopts); 4606 tcp->tcp_hopopts = NULL; 4607 tcp->tcp_hopoptslen = 0; 4608 } 4609 ASSERT(tcp->tcp_hopoptslen == 0); 4610 if (tcp->tcp_dstopts != NULL) { 4611 mi_free(tcp->tcp_dstopts); 4612 tcp->tcp_dstopts = NULL; 4613 tcp->tcp_dstoptslen = 0; 4614 } 4615 ASSERT(tcp->tcp_dstoptslen == 0); 4616 if (tcp->tcp_rtdstopts != NULL) { 4617 mi_free(tcp->tcp_rtdstopts); 4618 tcp->tcp_rtdstopts = NULL; 4619 tcp->tcp_rtdstoptslen = 0; 4620 } 4621 ASSERT(tcp->tcp_rtdstoptslen == 0); 4622 if (tcp->tcp_rthdr != NULL) { 4623 mi_free(tcp->tcp_rthdr); 4624 tcp->tcp_rthdr = NULL; 4625 tcp->tcp_rthdrlen = 0; 4626 } 4627 ASSERT(tcp->tcp_rthdrlen == 0); 4628 4629 ipp = &tcp->tcp_sticky_ipp; 4630 if (ipp->ipp_fields & (IPPF_HOPOPTS | IPPF_RTDSTOPTS | IPPF_DSTOPTS | 4631 IPPF_RTHDR)) 4632 ip6_pkt_free(ipp); 4633 4634 /* 4635 * Free memory associated with the tcp/ip header template. 4636 */ 4637 4638 if (tcp->tcp_iphc != NULL) 4639 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 4640 4641 /* 4642 * Following is really a blowing away a union. 4643 * It happens to have exactly two members of identical size 4644 * the following code is enough. 4645 */ 4646 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind); 4647 4648 if (tcp->tcp_tracebuf != NULL) { 4649 kmem_free(tcp->tcp_tracebuf, sizeof (tcptrch_t)); 4650 tcp->tcp_tracebuf = NULL; 4651 } 4652 } 4653 4654 4655 /* 4656 * Put a connection confirmation message upstream built from the 4657 * address information within 'iph' and 'tcph'. Report our success or failure. 4658 */ 4659 static boolean_t 4660 tcp_conn_con(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph, mblk_t *idmp, 4661 mblk_t **defermp) 4662 { 4663 sin_t sin; 4664 sin6_t sin6; 4665 mblk_t *mp; 4666 char *optp = NULL; 4667 int optlen = 0; 4668 cred_t *cr; 4669 4670 if (defermp != NULL) 4671 *defermp = NULL; 4672 4673 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) { 4674 /* 4675 * Return in T_CONN_CON results of option negotiation through 4676 * the T_CONN_REQ. Note: If there is an real end-to-end option 4677 * negotiation, then what is received from remote end needs 4678 * to be taken into account but there is no such thing (yet?) 4679 * in our TCP/IP. 4680 * Note: We do not use mi_offset_param() here as 4681 * tcp_opts_conn_req contents do not directly come from 4682 * an application and are either generated in kernel or 4683 * from user input that was already verified. 4684 */ 4685 mp = tcp->tcp_conn.tcp_opts_conn_req; 4686 optp = (char *)(mp->b_rptr + 4687 ((struct T_conn_req *)mp->b_rptr)->OPT_offset); 4688 optlen = (int) 4689 ((struct T_conn_req *)mp->b_rptr)->OPT_length; 4690 } 4691 4692 if (IPH_HDR_VERSION(iphdr) == IPV4_VERSION) { 4693 ipha_t *ipha = (ipha_t *)iphdr; 4694 4695 /* packet is IPv4 */ 4696 if (tcp->tcp_family == AF_INET) { 4697 sin = sin_null; 4698 sin.sin_addr.s_addr = ipha->ipha_src; 4699 sin.sin_port = *(uint16_t *)tcph->th_lport; 4700 sin.sin_family = AF_INET; 4701 mp = mi_tpi_conn_con(NULL, (char *)&sin, 4702 (int)sizeof (sin_t), optp, optlen); 4703 } else { 4704 sin6 = sin6_null; 4705 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &sin6.sin6_addr); 4706 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4707 sin6.sin6_family = AF_INET6; 4708 mp = mi_tpi_conn_con(NULL, (char *)&sin6, 4709 (int)sizeof (sin6_t), optp, optlen); 4710 4711 } 4712 } else { 4713 ip6_t *ip6h = (ip6_t *)iphdr; 4714 4715 ASSERT(IPH_HDR_VERSION(iphdr) == IPV6_VERSION); 4716 ASSERT(tcp->tcp_family == AF_INET6); 4717 sin6 = sin6_null; 4718 sin6.sin6_addr = ip6h->ip6_src; 4719 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4720 sin6.sin6_family = AF_INET6; 4721 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 4722 mp = mi_tpi_conn_con(NULL, (char *)&sin6, 4723 (int)sizeof (sin6_t), optp, optlen); 4724 } 4725 4726 if (!mp) 4727 return (B_FALSE); 4728 4729 if ((cr = DB_CRED(idmp)) != NULL) { 4730 mblk_setcred(mp, cr); 4731 DB_CPID(mp) = DB_CPID(idmp); 4732 } 4733 4734 if (defermp == NULL) 4735 putnext(tcp->tcp_rq, mp); 4736 else 4737 *defermp = mp; 4738 4739 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 4740 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 4741 return (B_TRUE); 4742 } 4743 4744 /* 4745 * Defense for the SYN attack - 4746 * 1. When q0 is full, drop from the tail (tcp_eager_prev_drop_q0) the oldest 4747 * one from the list of droppable eagers. This list is a subset of q0. 4748 * see comments before the definition of MAKE_DROPPABLE(). 4749 * 2. Don't drop a SYN request before its first timeout. This gives every 4750 * request at least til the first timeout to complete its 3-way handshake. 4751 * 3. Maintain tcp_syn_rcvd_timeout as an accurate count of how many 4752 * requests currently on the queue that has timed out. This will be used 4753 * as an indicator of whether an attack is under way, so that appropriate 4754 * actions can be taken. (It's incremented in tcp_timer() and decremented 4755 * either when eager goes into ESTABLISHED, or gets freed up.) 4756 * 4. The current threshold is - # of timeout > q0len/4 => SYN alert on 4757 * # of timeout drops back to <= q0len/32 => SYN alert off 4758 */ 4759 static boolean_t 4760 tcp_drop_q0(tcp_t *tcp) 4761 { 4762 tcp_t *eager; 4763 mblk_t *mp; 4764 tcp_stack_t *tcps = tcp->tcp_tcps; 4765 4766 ASSERT(MUTEX_HELD(&tcp->tcp_eager_lock)); 4767 ASSERT(tcp->tcp_eager_next_q0 != tcp->tcp_eager_prev_q0); 4768 4769 /* Pick oldest eager from the list of droppable eagers */ 4770 eager = tcp->tcp_eager_prev_drop_q0; 4771 4772 /* If list is empty. return B_FALSE */ 4773 if (eager == tcp) { 4774 return (B_FALSE); 4775 } 4776 4777 /* If allocated, the mp will be freed in tcp_clean_death_wrapper() */ 4778 if ((mp = allocb(0, BPRI_HI)) == NULL) 4779 return (B_FALSE); 4780 4781 /* 4782 * Take this eager out from the list of droppable eagers since we are 4783 * going to drop it. 4784 */ 4785 MAKE_UNDROPPABLE(eager); 4786 4787 if (tcp->tcp_debug) { 4788 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 4789 "tcp_drop_q0: listen half-open queue (max=%d) overflow" 4790 " (%d pending) on %s, drop one", tcps->tcps_conn_req_max_q0, 4791 tcp->tcp_conn_req_cnt_q0, 4792 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 4793 } 4794 4795 BUMP_MIB(&tcps->tcps_mib, tcpHalfOpenDrop); 4796 4797 /* Put a reference on the conn as we are enqueueing it in the sqeue */ 4798 CONN_INC_REF(eager->tcp_connp); 4799 4800 /* Mark the IRE created for this SYN request temporary */ 4801 tcp_ip_ire_mark_advice(eager); 4802 squeue_fill(eager->tcp_connp->conn_sqp, mp, 4803 tcp_clean_death_wrapper, eager->tcp_connp, SQTAG_TCP_DROP_Q0); 4804 4805 return (B_TRUE); 4806 } 4807 4808 int 4809 tcp_conn_create_v6(conn_t *lconnp, conn_t *connp, mblk_t *mp, 4810 tcph_t *tcph, uint_t ipvers, mblk_t *idmp) 4811 { 4812 tcp_t *ltcp = lconnp->conn_tcp; 4813 tcp_t *tcp = connp->conn_tcp; 4814 mblk_t *tpi_mp; 4815 ipha_t *ipha; 4816 ip6_t *ip6h; 4817 sin6_t sin6; 4818 in6_addr_t v6dst; 4819 int err; 4820 int ifindex = 0; 4821 cred_t *cr; 4822 tcp_stack_t *tcps = tcp->tcp_tcps; 4823 4824 if (ipvers == IPV4_VERSION) { 4825 ipha = (ipha_t *)mp->b_rptr; 4826 4827 connp->conn_send = ip_output; 4828 connp->conn_recv = tcp_input; 4829 4830 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_srcv6); 4831 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_remv6); 4832 4833 sin6 = sin6_null; 4834 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &sin6.sin6_addr); 4835 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &v6dst); 4836 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4837 sin6.sin6_family = AF_INET6; 4838 sin6.__sin6_src_id = ip_srcid_find_addr(&v6dst, 4839 lconnp->conn_zoneid, tcps->tcps_netstack); 4840 if (tcp->tcp_recvdstaddr) { 4841 sin6_t sin6d; 4842 4843 sin6d = sin6_null; 4844 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, 4845 &sin6d.sin6_addr); 4846 sin6d.sin6_port = *(uint16_t *)tcph->th_fport; 4847 sin6d.sin6_family = AF_INET; 4848 tpi_mp = mi_tpi_extconn_ind(NULL, 4849 (char *)&sin6d, sizeof (sin6_t), 4850 (char *)&tcp, 4851 (t_scalar_t)sizeof (intptr_t), 4852 (char *)&sin6d, sizeof (sin6_t), 4853 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4854 } else { 4855 tpi_mp = mi_tpi_conn_ind(NULL, 4856 (char *)&sin6, sizeof (sin6_t), 4857 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4858 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4859 } 4860 } else { 4861 ip6h = (ip6_t *)mp->b_rptr; 4862 4863 connp->conn_send = ip_output_v6; 4864 connp->conn_recv = tcp_input; 4865 4866 connp->conn_srcv6 = ip6h->ip6_dst; 4867 connp->conn_remv6 = ip6h->ip6_src; 4868 4869 /* db_cksumstuff is set at ip_fanout_tcp_v6 */ 4870 ifindex = (int)DB_CKSUMSTUFF(mp); 4871 DB_CKSUMSTUFF(mp) = 0; 4872 4873 sin6 = sin6_null; 4874 sin6.sin6_addr = ip6h->ip6_src; 4875 sin6.sin6_port = *(uint16_t *)tcph->th_lport; 4876 sin6.sin6_family = AF_INET6; 4877 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 4878 sin6.__sin6_src_id = ip_srcid_find_addr(&ip6h->ip6_dst, 4879 lconnp->conn_zoneid, tcps->tcps_netstack); 4880 4881 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 4882 /* Pass up the scope_id of remote addr */ 4883 sin6.sin6_scope_id = ifindex; 4884 } else { 4885 sin6.sin6_scope_id = 0; 4886 } 4887 if (tcp->tcp_recvdstaddr) { 4888 sin6_t sin6d; 4889 4890 sin6d = sin6_null; 4891 sin6.sin6_addr = ip6h->ip6_dst; 4892 sin6d.sin6_port = *(uint16_t *)tcph->th_fport; 4893 sin6d.sin6_family = AF_INET; 4894 tpi_mp = mi_tpi_extconn_ind(NULL, 4895 (char *)&sin6d, sizeof (sin6_t), 4896 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4897 (char *)&sin6d, sizeof (sin6_t), 4898 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4899 } else { 4900 tpi_mp = mi_tpi_conn_ind(NULL, 4901 (char *)&sin6, sizeof (sin6_t), 4902 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 4903 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 4904 } 4905 } 4906 4907 if (tpi_mp == NULL) 4908 return (ENOMEM); 4909 4910 connp->conn_fport = *(uint16_t *)tcph->th_lport; 4911 connp->conn_lport = *(uint16_t *)tcph->th_fport; 4912 connp->conn_flags |= (IPCL_TCP6|IPCL_EAGER); 4913 connp->conn_fully_bound = B_FALSE; 4914 4915 if (tcps->tcps_trace) 4916 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_NOSLEEP); 4917 4918 /* Inherit information from the "parent" */ 4919 tcp->tcp_ipversion = ltcp->tcp_ipversion; 4920 tcp->tcp_family = ltcp->tcp_family; 4921 tcp->tcp_wq = ltcp->tcp_wq; 4922 tcp->tcp_rq = ltcp->tcp_rq; 4923 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 4924 tcp->tcp_detached = B_TRUE; 4925 if ((err = tcp_init_values(tcp)) != 0) { 4926 freemsg(tpi_mp); 4927 return (err); 4928 } 4929 4930 if (ipvers == IPV4_VERSION) { 4931 if ((err = tcp_header_init_ipv4(tcp)) != 0) { 4932 freemsg(tpi_mp); 4933 return (err); 4934 } 4935 ASSERT(tcp->tcp_ipha != NULL); 4936 } else { 4937 /* ifindex must be already set */ 4938 ASSERT(ifindex != 0); 4939 4940 if (ltcp->tcp_bound_if != 0) { 4941 /* 4942 * Set newtcp's bound_if equal to 4943 * listener's value. If ifindex is 4944 * not the same as ltcp->tcp_bound_if, 4945 * it must be a packet for the ipmp group 4946 * of interfaces 4947 */ 4948 tcp->tcp_bound_if = ltcp->tcp_bound_if; 4949 } else if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) { 4950 tcp->tcp_bound_if = ifindex; 4951 } 4952 4953 tcp->tcp_ipv6_recvancillary = ltcp->tcp_ipv6_recvancillary; 4954 tcp->tcp_recvifindex = 0; 4955 tcp->tcp_recvhops = 0xffffffffU; 4956 ASSERT(tcp->tcp_ip6h != NULL); 4957 } 4958 4959 tcp->tcp_lport = ltcp->tcp_lport; 4960 4961 if (ltcp->tcp_ipversion == tcp->tcp_ipversion) { 4962 if (tcp->tcp_iphc_len != ltcp->tcp_iphc_len) { 4963 /* 4964 * Listener had options of some sort; eager inherits. 4965 * Free up the eager template and allocate one 4966 * of the right size. 4967 */ 4968 if (tcp->tcp_hdr_grown) { 4969 kmem_free(tcp->tcp_iphc, tcp->tcp_iphc_len); 4970 } else { 4971 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 4972 kmem_cache_free(tcp_iphc_cache, tcp->tcp_iphc); 4973 } 4974 tcp->tcp_iphc = kmem_zalloc(ltcp->tcp_iphc_len, 4975 KM_NOSLEEP); 4976 if (tcp->tcp_iphc == NULL) { 4977 tcp->tcp_iphc_len = 0; 4978 freemsg(tpi_mp); 4979 return (ENOMEM); 4980 } 4981 tcp->tcp_iphc_len = ltcp->tcp_iphc_len; 4982 tcp->tcp_hdr_grown = B_TRUE; 4983 } 4984 tcp->tcp_hdr_len = ltcp->tcp_hdr_len; 4985 tcp->tcp_ip_hdr_len = ltcp->tcp_ip_hdr_len; 4986 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 4987 tcp->tcp_ip6_hops = ltcp->tcp_ip6_hops; 4988 tcp->tcp_ip6_vcf = ltcp->tcp_ip6_vcf; 4989 4990 /* 4991 * Copy the IP+TCP header template from listener to eager 4992 */ 4993 bcopy(ltcp->tcp_iphc, tcp->tcp_iphc, ltcp->tcp_hdr_len); 4994 if (tcp->tcp_ipversion == IPV6_VERSION) { 4995 if (((ip6i_t *)(tcp->tcp_iphc))->ip6i_nxt == 4996 IPPROTO_RAW) { 4997 tcp->tcp_ip6h = 4998 (ip6_t *)(tcp->tcp_iphc + 4999 sizeof (ip6i_t)); 5000 } else { 5001 tcp->tcp_ip6h = 5002 (ip6_t *)(tcp->tcp_iphc); 5003 } 5004 tcp->tcp_ipha = NULL; 5005 } else { 5006 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 5007 tcp->tcp_ip6h = NULL; 5008 } 5009 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + 5010 tcp->tcp_ip_hdr_len); 5011 } else { 5012 /* 5013 * only valid case when ipversion of listener and 5014 * eager differ is when listener is IPv6 and 5015 * eager is IPv4. 5016 * Eager header template has been initialized to the 5017 * maximum v4 header sizes, which includes space for 5018 * TCP and IP options. 5019 */ 5020 ASSERT((ltcp->tcp_ipversion == IPV6_VERSION) && 5021 (tcp->tcp_ipversion == IPV4_VERSION)); 5022 ASSERT(tcp->tcp_iphc_len >= 5023 TCP_MAX_COMBINED_HEADER_LENGTH); 5024 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 5025 /* copy IP header fields individually */ 5026 tcp->tcp_ipha->ipha_ttl = 5027 ltcp->tcp_ip6h->ip6_hops; 5028 bcopy(ltcp->tcp_tcph->th_lport, 5029 tcp->tcp_tcph->th_lport, sizeof (ushort_t)); 5030 } 5031 5032 bcopy(tcph->th_lport, tcp->tcp_tcph->th_fport, sizeof (in_port_t)); 5033 bcopy(tcp->tcp_tcph->th_fport, &tcp->tcp_fport, 5034 sizeof (in_port_t)); 5035 5036 if (ltcp->tcp_lport == 0) { 5037 tcp->tcp_lport = *(in_port_t *)tcph->th_fport; 5038 bcopy(tcph->th_fport, tcp->tcp_tcph->th_lport, 5039 sizeof (in_port_t)); 5040 } 5041 5042 if (tcp->tcp_ipversion == IPV4_VERSION) { 5043 ASSERT(ipha != NULL); 5044 tcp->tcp_ipha->ipha_dst = ipha->ipha_src; 5045 tcp->tcp_ipha->ipha_src = ipha->ipha_dst; 5046 5047 /* Source routing option copyover (reverse it) */ 5048 if (tcps->tcps_rev_src_routes) 5049 tcp_opt_reverse(tcp, ipha); 5050 } else { 5051 ASSERT(ip6h != NULL); 5052 tcp->tcp_ip6h->ip6_dst = ip6h->ip6_src; 5053 tcp->tcp_ip6h->ip6_src = ip6h->ip6_dst; 5054 } 5055 5056 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 5057 ASSERT(!tcp->tcp_tconnind_started); 5058 /* 5059 * If the SYN contains a credential, it's a loopback packet; attach 5060 * the credential to the TPI message. 5061 */ 5062 if ((cr = DB_CRED(idmp)) != NULL) { 5063 mblk_setcred(tpi_mp, cr); 5064 DB_CPID(tpi_mp) = DB_CPID(idmp); 5065 } 5066 tcp->tcp_conn.tcp_eager_conn_ind = tpi_mp; 5067 5068 /* Inherit the listener's SSL protection state */ 5069 5070 if ((tcp->tcp_kssl_ent = ltcp->tcp_kssl_ent) != NULL) { 5071 kssl_hold_ent(tcp->tcp_kssl_ent); 5072 tcp->tcp_kssl_pending = B_TRUE; 5073 } 5074 5075 return (0); 5076 } 5077 5078 5079 int 5080 tcp_conn_create_v4(conn_t *lconnp, conn_t *connp, ipha_t *ipha, 5081 tcph_t *tcph, mblk_t *idmp) 5082 { 5083 tcp_t *ltcp = lconnp->conn_tcp; 5084 tcp_t *tcp = connp->conn_tcp; 5085 sin_t sin; 5086 mblk_t *tpi_mp = NULL; 5087 int err; 5088 cred_t *cr; 5089 tcp_stack_t *tcps = tcp->tcp_tcps; 5090 5091 sin = sin_null; 5092 sin.sin_addr.s_addr = ipha->ipha_src; 5093 sin.sin_port = *(uint16_t *)tcph->th_lport; 5094 sin.sin_family = AF_INET; 5095 if (ltcp->tcp_recvdstaddr) { 5096 sin_t sind; 5097 5098 sind = sin_null; 5099 sind.sin_addr.s_addr = ipha->ipha_dst; 5100 sind.sin_port = *(uint16_t *)tcph->th_fport; 5101 sind.sin_family = AF_INET; 5102 tpi_mp = mi_tpi_extconn_ind(NULL, 5103 (char *)&sind, sizeof (sin_t), (char *)&tcp, 5104 (t_scalar_t)sizeof (intptr_t), (char *)&sind, 5105 sizeof (sin_t), (t_scalar_t)ltcp->tcp_conn_req_seqnum); 5106 } else { 5107 tpi_mp = mi_tpi_conn_ind(NULL, 5108 (char *)&sin, sizeof (sin_t), 5109 (char *)&tcp, (t_scalar_t)sizeof (intptr_t), 5110 (t_scalar_t)ltcp->tcp_conn_req_seqnum); 5111 } 5112 5113 if (tpi_mp == NULL) { 5114 return (ENOMEM); 5115 } 5116 5117 connp->conn_flags |= (IPCL_TCP4|IPCL_EAGER); 5118 connp->conn_send = ip_output; 5119 connp->conn_recv = tcp_input; 5120 connp->conn_fully_bound = B_FALSE; 5121 5122 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &connp->conn_srcv6); 5123 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &connp->conn_remv6); 5124 connp->conn_fport = *(uint16_t *)tcph->th_lport; 5125 connp->conn_lport = *(uint16_t *)tcph->th_fport; 5126 5127 if (tcps->tcps_trace) { 5128 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_NOSLEEP); 5129 } 5130 5131 /* Inherit information from the "parent" */ 5132 tcp->tcp_ipversion = ltcp->tcp_ipversion; 5133 tcp->tcp_family = ltcp->tcp_family; 5134 tcp->tcp_wq = ltcp->tcp_wq; 5135 tcp->tcp_rq = ltcp->tcp_rq; 5136 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 5137 tcp->tcp_detached = B_TRUE; 5138 if ((err = tcp_init_values(tcp)) != 0) { 5139 freemsg(tpi_mp); 5140 return (err); 5141 } 5142 5143 /* 5144 * Let's make sure that eager tcp template has enough space to 5145 * copy IPv4 listener's tcp template. Since the conn_t structure is 5146 * preserved and tcp_iphc_len is also preserved, an eager conn_t may 5147 * have a tcp_template of total len TCP_MAX_COMBINED_HEADER_LENGTH or 5148 * more (in case of re-allocation of conn_t with tcp-IPv6 template with 5149 * extension headers or with ip6i_t struct). Note that bcopy() below 5150 * copies listener tcp's hdr_len which cannot be greater than TCP_MAX_ 5151 * COMBINED_HEADER_LENGTH as this listener must be a IPv4 listener. 5152 */ 5153 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 5154 ASSERT(ltcp->tcp_hdr_len <= TCP_MAX_COMBINED_HEADER_LENGTH); 5155 5156 tcp->tcp_hdr_len = ltcp->tcp_hdr_len; 5157 tcp->tcp_ip_hdr_len = ltcp->tcp_ip_hdr_len; 5158 tcp->tcp_tcp_hdr_len = ltcp->tcp_tcp_hdr_len; 5159 tcp->tcp_ttl = ltcp->tcp_ttl; 5160 tcp->tcp_tos = ltcp->tcp_tos; 5161 5162 /* Copy the IP+TCP header template from listener to eager */ 5163 bcopy(ltcp->tcp_iphc, tcp->tcp_iphc, ltcp->tcp_hdr_len); 5164 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 5165 tcp->tcp_ip6h = NULL; 5166 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + 5167 tcp->tcp_ip_hdr_len); 5168 5169 /* Initialize the IP addresses and Ports */ 5170 tcp->tcp_ipha->ipha_dst = ipha->ipha_src; 5171 tcp->tcp_ipha->ipha_src = ipha->ipha_dst; 5172 bcopy(tcph->th_lport, tcp->tcp_tcph->th_fport, sizeof (in_port_t)); 5173 bcopy(tcph->th_fport, tcp->tcp_tcph->th_lport, sizeof (in_port_t)); 5174 5175 /* Source routing option copyover (reverse it) */ 5176 if (tcps->tcps_rev_src_routes) 5177 tcp_opt_reverse(tcp, ipha); 5178 5179 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 5180 ASSERT(!tcp->tcp_tconnind_started); 5181 5182 /* 5183 * If the SYN contains a credential, it's a loopback packet; attach 5184 * the credential to the TPI message. 5185 */ 5186 if ((cr = DB_CRED(idmp)) != NULL) { 5187 mblk_setcred(tpi_mp, cr); 5188 DB_CPID(tpi_mp) = DB_CPID(idmp); 5189 } 5190 tcp->tcp_conn.tcp_eager_conn_ind = tpi_mp; 5191 5192 /* Inherit the listener's SSL protection state */ 5193 if ((tcp->tcp_kssl_ent = ltcp->tcp_kssl_ent) != NULL) { 5194 kssl_hold_ent(tcp->tcp_kssl_ent); 5195 tcp->tcp_kssl_pending = B_TRUE; 5196 } 5197 5198 return (0); 5199 } 5200 5201 /* 5202 * sets up conn for ipsec. 5203 * if the first mblk is M_CTL it is consumed and mpp is updated. 5204 * in case of error mpp is freed. 5205 */ 5206 conn_t * 5207 tcp_get_ipsec_conn(tcp_t *tcp, squeue_t *sqp, mblk_t **mpp) 5208 { 5209 conn_t *connp = tcp->tcp_connp; 5210 conn_t *econnp; 5211 squeue_t *new_sqp; 5212 mblk_t *first_mp = *mpp; 5213 mblk_t *mp = *mpp; 5214 boolean_t mctl_present = B_FALSE; 5215 uint_t ipvers; 5216 5217 econnp = tcp_get_conn(sqp, tcp->tcp_tcps); 5218 if (econnp == NULL) { 5219 freemsg(first_mp); 5220 return (NULL); 5221 } 5222 if (DB_TYPE(mp) == M_CTL) { 5223 if (mp->b_cont == NULL || 5224 mp->b_cont->b_datap->db_type != M_DATA) { 5225 freemsg(first_mp); 5226 return (NULL); 5227 } 5228 mp = mp->b_cont; 5229 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) == 0) { 5230 freemsg(first_mp); 5231 return (NULL); 5232 } 5233 5234 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 5235 first_mp->b_datap->db_struioflag &= ~STRUIO_POLICY; 5236 mctl_present = B_TRUE; 5237 } else { 5238 ASSERT(mp->b_datap->db_struioflag & STRUIO_POLICY); 5239 mp->b_datap->db_struioflag &= ~STRUIO_POLICY; 5240 } 5241 5242 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 5243 DB_CKSUMSTART(mp) = 0; 5244 5245 ASSERT(OK_32PTR(mp->b_rptr)); 5246 ipvers = IPH_HDR_VERSION(mp->b_rptr); 5247 if (ipvers == IPV4_VERSION) { 5248 uint16_t *up; 5249 uint32_t ports; 5250 ipha_t *ipha; 5251 5252 ipha = (ipha_t *)mp->b_rptr; 5253 up = (uint16_t *)((uchar_t *)ipha + 5254 IPH_HDR_LENGTH(ipha) + TCP_PORTS_OFFSET); 5255 ports = *(uint32_t *)up; 5256 IPCL_TCP_EAGER_INIT(econnp, IPPROTO_TCP, 5257 ipha->ipha_dst, ipha->ipha_src, ports); 5258 } else { 5259 uint16_t *up; 5260 uint32_t ports; 5261 uint16_t ip_hdr_len; 5262 uint8_t *nexthdrp; 5263 ip6_t *ip6h; 5264 tcph_t *tcph; 5265 5266 ip6h = (ip6_t *)mp->b_rptr; 5267 if (ip6h->ip6_nxt == IPPROTO_TCP) { 5268 ip_hdr_len = IPV6_HDR_LEN; 5269 } else if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ip_hdr_len, 5270 &nexthdrp) || *nexthdrp != IPPROTO_TCP) { 5271 CONN_DEC_REF(econnp); 5272 freemsg(first_mp); 5273 return (NULL); 5274 } 5275 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5276 up = (uint16_t *)tcph->th_lport; 5277 ports = *(uint32_t *)up; 5278 IPCL_TCP_EAGER_INIT_V6(econnp, IPPROTO_TCP, 5279 ip6h->ip6_dst, ip6h->ip6_src, ports); 5280 } 5281 5282 /* 5283 * The caller already ensured that there is a sqp present. 5284 */ 5285 econnp->conn_sqp = new_sqp; 5286 5287 if (connp->conn_policy != NULL) { 5288 ipsec_in_t *ii; 5289 ii = (ipsec_in_t *)(first_mp->b_rptr); 5290 ASSERT(ii->ipsec_in_policy == NULL); 5291 IPPH_REFHOLD(connp->conn_policy); 5292 ii->ipsec_in_policy = connp->conn_policy; 5293 5294 first_mp->b_datap->db_type = IPSEC_POLICY_SET; 5295 if (!ip_bind_ipsec_policy_set(econnp, first_mp)) { 5296 CONN_DEC_REF(econnp); 5297 freemsg(first_mp); 5298 return (NULL); 5299 } 5300 } 5301 5302 if (ipsec_conn_cache_policy(econnp, ipvers == IPV4_VERSION) != 0) { 5303 CONN_DEC_REF(econnp); 5304 freemsg(first_mp); 5305 return (NULL); 5306 } 5307 5308 /* 5309 * If we know we have some policy, pass the "IPSEC" 5310 * options size TCP uses this adjust the MSS. 5311 */ 5312 econnp->conn_tcp->tcp_ipsec_overhead = conn_ipsec_length(econnp); 5313 if (mctl_present) { 5314 freeb(first_mp); 5315 *mpp = mp; 5316 } 5317 5318 return (econnp); 5319 } 5320 5321 /* 5322 * tcp_get_conn/tcp_free_conn 5323 * 5324 * tcp_get_conn is used to get a clean tcp connection structure. 5325 * It tries to reuse the connections put on the freelist by the 5326 * time_wait_collector failing which it goes to kmem_cache. This 5327 * way has two benefits compared to just allocating from and 5328 * freeing to kmem_cache. 5329 * 1) The time_wait_collector can free (which includes the cleanup) 5330 * outside the squeue. So when the interrupt comes, we have a clean 5331 * connection sitting in the freelist. Obviously, this buys us 5332 * performance. 5333 * 5334 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_conn_request 5335 * has multiple disadvantages - tying up the squeue during alloc, and the 5336 * fact that IPSec policy initialization has to happen here which 5337 * requires us sending a M_CTL and checking for it i.e. real ugliness. 5338 * But allocating the conn/tcp in IP land is also not the best since 5339 * we can't check the 'q' and 'q0' which are protected by squeue and 5340 * blindly allocate memory which might have to be freed here if we are 5341 * not allowed to accept the connection. By using the freelist and 5342 * putting the conn/tcp back in freelist, we don't pay a penalty for 5343 * allocating memory without checking 'q/q0' and freeing it if we can't 5344 * accept the connection. 5345 * 5346 * Care should be taken to put the conn back in the same squeue's freelist 5347 * from which it was allocated. Best results are obtained if conn is 5348 * allocated from listener's squeue and freed to the same. Time wait 5349 * collector will free up the freelist is the connection ends up sitting 5350 * there for too long. 5351 */ 5352 void * 5353 tcp_get_conn(void *arg, tcp_stack_t *tcps) 5354 { 5355 tcp_t *tcp = NULL; 5356 conn_t *connp = NULL; 5357 squeue_t *sqp = (squeue_t *)arg; 5358 tcp_squeue_priv_t *tcp_time_wait; 5359 netstack_t *ns; 5360 5361 tcp_time_wait = 5362 *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP)); 5363 5364 mutex_enter(&tcp_time_wait->tcp_time_wait_lock); 5365 tcp = tcp_time_wait->tcp_free_list; 5366 ASSERT((tcp != NULL) ^ (tcp_time_wait->tcp_free_list_cnt == 0)); 5367 if (tcp != NULL) { 5368 tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next; 5369 tcp_time_wait->tcp_free_list_cnt--; 5370 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 5371 tcp->tcp_time_wait_next = NULL; 5372 connp = tcp->tcp_connp; 5373 connp->conn_flags |= IPCL_REUSED; 5374 5375 ASSERT(tcp->tcp_tcps == NULL); 5376 ASSERT(connp->conn_netstack == NULL); 5377 ns = tcps->tcps_netstack; 5378 netstack_hold(ns); 5379 connp->conn_netstack = ns; 5380 tcp->tcp_tcps = tcps; 5381 TCPS_REFHOLD(tcps); 5382 ipcl_globalhash_insert(connp); 5383 return ((void *)connp); 5384 } 5385 mutex_exit(&tcp_time_wait->tcp_time_wait_lock); 5386 if ((connp = ipcl_conn_create(IPCL_TCPCONN, KM_NOSLEEP, 5387 tcps->tcps_netstack)) == NULL) 5388 return (NULL); 5389 tcp = connp->conn_tcp; 5390 tcp->tcp_tcps = tcps; 5391 TCPS_REFHOLD(tcps); 5392 return ((void *)connp); 5393 } 5394 5395 /* 5396 * Update the cached label for the given tcp_t. This should be called once per 5397 * connection, and before any packets are sent or tcp_process_options is 5398 * invoked. Returns B_FALSE if the correct label could not be constructed. 5399 */ 5400 static boolean_t 5401 tcp_update_label(tcp_t *tcp, const cred_t *cr) 5402 { 5403 conn_t *connp = tcp->tcp_connp; 5404 5405 if (tcp->tcp_ipversion == IPV4_VERSION) { 5406 uchar_t optbuf[IP_MAX_OPT_LENGTH]; 5407 int added; 5408 5409 if (tsol_compute_label(cr, tcp->tcp_remote, optbuf, 5410 connp->conn_mac_exempt, 5411 tcp->tcp_tcps->tcps_netstack->netstack_ip) != 0) 5412 return (B_FALSE); 5413 5414 added = tsol_remove_secopt(tcp->tcp_ipha, tcp->tcp_hdr_len); 5415 if (added == -1) 5416 return (B_FALSE); 5417 tcp->tcp_hdr_len += added; 5418 tcp->tcp_tcph = (tcph_t *)((uchar_t *)tcp->tcp_tcph + added); 5419 tcp->tcp_ip_hdr_len += added; 5420 if ((tcp->tcp_label_len = optbuf[IPOPT_OLEN]) != 0) { 5421 tcp->tcp_label_len = (tcp->tcp_label_len + 3) & ~3; 5422 added = tsol_prepend_option(optbuf, tcp->tcp_ipha, 5423 tcp->tcp_hdr_len); 5424 if (added == -1) 5425 return (B_FALSE); 5426 tcp->tcp_hdr_len += added; 5427 tcp->tcp_tcph = (tcph_t *) 5428 ((uchar_t *)tcp->tcp_tcph + added); 5429 tcp->tcp_ip_hdr_len += added; 5430 } 5431 } else { 5432 uchar_t optbuf[TSOL_MAX_IPV6_OPTION]; 5433 5434 if (tsol_compute_label_v6(cr, &tcp->tcp_remote_v6, optbuf, 5435 connp->conn_mac_exempt, 5436 tcp->tcp_tcps->tcps_netstack->netstack_ip) != 0) 5437 return (B_FALSE); 5438 if (tsol_update_sticky(&tcp->tcp_sticky_ipp, 5439 &tcp->tcp_label_len, optbuf) != 0) 5440 return (B_FALSE); 5441 if (tcp_build_hdrs(tcp->tcp_rq, tcp) != 0) 5442 return (B_FALSE); 5443 } 5444 5445 connp->conn_ulp_labeled = 1; 5446 5447 return (B_TRUE); 5448 } 5449 5450 /* BEGIN CSTYLED */ 5451 /* 5452 * 5453 * The sockfs ACCEPT path: 5454 * ======================= 5455 * 5456 * The eager is now established in its own perimeter as soon as SYN is 5457 * received in tcp_conn_request(). When sockfs receives conn_ind, it 5458 * completes the accept processing on the acceptor STREAM. The sending 5459 * of conn_ind part is common for both sockfs listener and a TLI/XTI 5460 * listener but a TLI/XTI listener completes the accept processing 5461 * on the listener perimeter. 5462 * 5463 * Common control flow for 3 way handshake: 5464 * ---------------------------------------- 5465 * 5466 * incoming SYN (listener perimeter) -> tcp_rput_data() 5467 * -> tcp_conn_request() 5468 * 5469 * incoming SYN-ACK-ACK (eager perim) -> tcp_rput_data() 5470 * send T_CONN_IND (listener perim) -> tcp_send_conn_ind() 5471 * 5472 * Sockfs ACCEPT Path: 5473 * ------------------- 5474 * 5475 * open acceptor stream (tcp_open allocates tcp_wput_accept() 5476 * as STREAM entry point) 5477 * 5478 * soaccept() sends T_CONN_RES on the acceptor STREAM to tcp_wput_accept() 5479 * 5480 * tcp_wput_accept() extracts the eager and makes the q->q_ptr <-> eager 5481 * association (we are not behind eager's squeue but sockfs is protecting us 5482 * and no one knows about this stream yet. The STREAMS entry point q->q_info 5483 * is changed to point at tcp_wput(). 5484 * 5485 * tcp_wput_accept() sends any deferred eagers via tcp_send_pending() to 5486 * listener (done on listener's perimeter). 5487 * 5488 * tcp_wput_accept() calls tcp_accept_finish() on eagers perimeter to finish 5489 * accept. 5490 * 5491 * TLI/XTI client ACCEPT path: 5492 * --------------------------- 5493 * 5494 * soaccept() sends T_CONN_RES on the listener STREAM. 5495 * 5496 * tcp_accept() -> tcp_accept_swap() complete the processing and send 5497 * the bind_mp to eager perimeter to finish accept (tcp_rput_other()). 5498 * 5499 * Locks: 5500 * ====== 5501 * 5502 * listener->tcp_eager_lock protects the listeners->tcp_eager_next_q0 and 5503 * and listeners->tcp_eager_next_q. 5504 * 5505 * Referencing: 5506 * ============ 5507 * 5508 * 1) We start out in tcp_conn_request by eager placing a ref on 5509 * listener and listener adding eager to listeners->tcp_eager_next_q0. 5510 * 5511 * 2) When a SYN-ACK-ACK arrives, we send the conn_ind to listener. Before 5512 * doing so we place a ref on the eager. This ref is finally dropped at the 5513 * end of tcp_accept_finish() while unwinding from the squeue, i.e. the 5514 * reference is dropped by the squeue framework. 5515 * 5516 * 3) The ref on listener placed in 1 above is dropped in tcp_accept_finish 5517 * 5518 * The reference must be released by the same entity that added the reference 5519 * In the above scheme, the eager is the entity that adds and releases the 5520 * references. Note that tcp_accept_finish executes in the squeue of the eager 5521 * (albeit after it is attached to the acceptor stream). Though 1. executes 5522 * in the listener's squeue, the eager is nascent at this point and the 5523 * reference can be considered to have been added on behalf of the eager. 5524 * 5525 * Eager getting a Reset or listener closing: 5526 * ========================================== 5527 * 5528 * Once the listener and eager are linked, the listener never does the unlink. 5529 * If the listener needs to close, tcp_eager_cleanup() is called which queues 5530 * a message on all eager perimeter. The eager then does the unlink, clears 5531 * any pointers to the listener's queue and drops the reference to the 5532 * listener. The listener waits in tcp_close outside the squeue until its 5533 * refcount has dropped to 1. This ensures that the listener has waited for 5534 * all eagers to clear their association with the listener. 5535 * 5536 * Similarly, if eager decides to go away, it can unlink itself and close. 5537 * When the T_CONN_RES comes down, we check if eager has closed. Note that 5538 * the reference to eager is still valid because of the extra ref we put 5539 * in tcp_send_conn_ind. 5540 * 5541 * Listener can always locate the eager under the protection 5542 * of the listener->tcp_eager_lock, and then do a refhold 5543 * on the eager during the accept processing. 5544 * 5545 * The acceptor stream accesses the eager in the accept processing 5546 * based on the ref placed on eager before sending T_conn_ind. 5547 * The only entity that can negate this refhold is a listener close 5548 * which is mutually exclusive with an active acceptor stream. 5549 * 5550 * Eager's reference on the listener 5551 * =================================== 5552 * 5553 * If the accept happens (even on a closed eager) the eager drops its 5554 * reference on the listener at the start of tcp_accept_finish. If the 5555 * eager is killed due to an incoming RST before the T_conn_ind is sent up, 5556 * the reference is dropped in tcp_closei_local. If the listener closes, 5557 * the reference is dropped in tcp_eager_kill. In all cases the reference 5558 * is dropped while executing in the eager's context (squeue). 5559 */ 5560 /* END CSTYLED */ 5561 5562 /* Process the SYN packet, mp, directed at the listener 'tcp' */ 5563 5564 /* 5565 * THIS FUNCTION IS DIRECTLY CALLED BY IP VIA SQUEUE FOR SYN. 5566 * tcp_rput_data will not see any SYN packets. 5567 */ 5568 /* ARGSUSED */ 5569 void 5570 tcp_conn_request(void *arg, mblk_t *mp, void *arg2) 5571 { 5572 tcph_t *tcph; 5573 uint32_t seg_seq; 5574 tcp_t *eager; 5575 uint_t ipvers; 5576 ipha_t *ipha; 5577 ip6_t *ip6h; 5578 int err; 5579 conn_t *econnp = NULL; 5580 squeue_t *new_sqp; 5581 mblk_t *mp1; 5582 uint_t ip_hdr_len; 5583 conn_t *connp = (conn_t *)arg; 5584 tcp_t *tcp = connp->conn_tcp; 5585 cred_t *credp; 5586 tcp_stack_t *tcps = tcp->tcp_tcps; 5587 ip_stack_t *ipst; 5588 5589 if (tcp->tcp_state != TCPS_LISTEN) 5590 goto error2; 5591 5592 ASSERT((tcp->tcp_connp->conn_flags & IPCL_BOUND) != 0); 5593 5594 mutex_enter(&tcp->tcp_eager_lock); 5595 if (tcp->tcp_conn_req_cnt_q >= tcp->tcp_conn_req_max) { 5596 mutex_exit(&tcp->tcp_eager_lock); 5597 TCP_STAT(tcps, tcp_listendrop); 5598 BUMP_MIB(&tcps->tcps_mib, tcpListenDrop); 5599 if (tcp->tcp_debug) { 5600 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 5601 "tcp_conn_request: listen backlog (max=%d) " 5602 "overflow (%d pending) on %s", 5603 tcp->tcp_conn_req_max, tcp->tcp_conn_req_cnt_q, 5604 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 5605 } 5606 goto error2; 5607 } 5608 5609 if (tcp->tcp_conn_req_cnt_q0 >= 5610 tcp->tcp_conn_req_max + tcps->tcps_conn_req_max_q0) { 5611 /* 5612 * Q0 is full. Drop a pending half-open req from the queue 5613 * to make room for the new SYN req. Also mark the time we 5614 * drop a SYN. 5615 * 5616 * A more aggressive defense against SYN attack will 5617 * be to set the "tcp_syn_defense" flag now. 5618 */ 5619 TCP_STAT(tcps, tcp_listendropq0); 5620 tcp->tcp_last_rcv_lbolt = lbolt64; 5621 if (!tcp_drop_q0(tcp)) { 5622 mutex_exit(&tcp->tcp_eager_lock); 5623 BUMP_MIB(&tcps->tcps_mib, tcpListenDropQ0); 5624 if (tcp->tcp_debug) { 5625 (void) strlog(TCP_MOD_ID, 0, 3, SL_TRACE, 5626 "tcp_conn_request: listen half-open queue " 5627 "(max=%d) full (%d pending) on %s", 5628 tcps->tcps_conn_req_max_q0, 5629 tcp->tcp_conn_req_cnt_q0, 5630 tcp_display(tcp, NULL, 5631 DISP_PORT_ONLY)); 5632 } 5633 goto error2; 5634 } 5635 } 5636 mutex_exit(&tcp->tcp_eager_lock); 5637 5638 /* 5639 * IP adds STRUIO_EAGER and ensures that the received packet is 5640 * M_DATA even if conn_ipv6_recvpktinfo is enabled or for ip6 5641 * link local address. If IPSec is enabled, db_struioflag has 5642 * STRUIO_POLICY set (mutually exclusive from STRUIO_EAGER); 5643 * otherwise an error case if neither of them is set. 5644 */ 5645 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 5646 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 5647 DB_CKSUMSTART(mp) = 0; 5648 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 5649 econnp = (conn_t *)tcp_get_conn(arg2, tcps); 5650 if (econnp == NULL) 5651 goto error2; 5652 ASSERT(econnp->conn_netstack == connp->conn_netstack); 5653 econnp->conn_sqp = new_sqp; 5654 } else if ((mp->b_datap->db_struioflag & STRUIO_POLICY) != 0) { 5655 /* 5656 * mp is updated in tcp_get_ipsec_conn(). 5657 */ 5658 econnp = tcp_get_ipsec_conn(tcp, arg2, &mp); 5659 if (econnp == NULL) { 5660 /* 5661 * mp freed by tcp_get_ipsec_conn. 5662 */ 5663 return; 5664 } 5665 ASSERT(econnp->conn_netstack == connp->conn_netstack); 5666 } else { 5667 goto error2; 5668 } 5669 5670 ASSERT(DB_TYPE(mp) == M_DATA); 5671 5672 ipvers = IPH_HDR_VERSION(mp->b_rptr); 5673 ASSERT(ipvers == IPV6_VERSION || ipvers == IPV4_VERSION); 5674 ASSERT(OK_32PTR(mp->b_rptr)); 5675 if (ipvers == IPV4_VERSION) { 5676 ipha = (ipha_t *)mp->b_rptr; 5677 ip_hdr_len = IPH_HDR_LENGTH(ipha); 5678 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5679 } else { 5680 ip6h = (ip6_t *)mp->b_rptr; 5681 ip_hdr_len = ip_hdr_length_v6(mp, ip6h); 5682 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 5683 } 5684 5685 if (tcp->tcp_family == AF_INET) { 5686 ASSERT(ipvers == IPV4_VERSION); 5687 err = tcp_conn_create_v4(connp, econnp, ipha, tcph, mp); 5688 } else { 5689 err = tcp_conn_create_v6(connp, econnp, mp, tcph, ipvers, mp); 5690 } 5691 5692 if (err) 5693 goto error3; 5694 5695 eager = econnp->conn_tcp; 5696 5697 /* Inherit various TCP parameters from the listener */ 5698 eager->tcp_naglim = tcp->tcp_naglim; 5699 eager->tcp_first_timer_threshold = 5700 tcp->tcp_first_timer_threshold; 5701 eager->tcp_second_timer_threshold = 5702 tcp->tcp_second_timer_threshold; 5703 5704 eager->tcp_first_ctimer_threshold = 5705 tcp->tcp_first_ctimer_threshold; 5706 eager->tcp_second_ctimer_threshold = 5707 tcp->tcp_second_ctimer_threshold; 5708 5709 /* 5710 * tcp_adapt_ire() may change tcp_rwnd according to the ire metrics. 5711 * If it does not, the eager's receive window will be set to the 5712 * listener's receive window later in this function. 5713 */ 5714 eager->tcp_rwnd = 0; 5715 5716 /* 5717 * Inherit listener's tcp_init_cwnd. Need to do this before 5718 * calling tcp_process_options() where tcp_mss_set() is called 5719 * to set the initial cwnd. 5720 */ 5721 eager->tcp_init_cwnd = tcp->tcp_init_cwnd; 5722 5723 /* 5724 * Zones: tcp_adapt_ire() and tcp_send_data() both need the 5725 * zone id before the accept is completed in tcp_wput_accept(). 5726 */ 5727 econnp->conn_zoneid = connp->conn_zoneid; 5728 econnp->conn_allzones = connp->conn_allzones; 5729 5730 /* Copy nexthop information from listener to eager */ 5731 if (connp->conn_nexthop_set) { 5732 econnp->conn_nexthop_set = connp->conn_nexthop_set; 5733 econnp->conn_nexthop_v4 = connp->conn_nexthop_v4; 5734 } 5735 5736 /* 5737 * TSOL: tsol_input_proc() needs the eager's cred before the 5738 * eager is accepted 5739 */ 5740 econnp->conn_cred = eager->tcp_cred = credp = connp->conn_cred; 5741 crhold(credp); 5742 5743 /* 5744 * If the caller has the process-wide flag set, then default to MAC 5745 * exempt mode. This allows read-down to unlabeled hosts. 5746 */ 5747 if (getpflags(NET_MAC_AWARE, credp) != 0) 5748 econnp->conn_mac_exempt = B_TRUE; 5749 5750 if (is_system_labeled()) { 5751 cred_t *cr; 5752 5753 if (connp->conn_mlp_type != mlptSingle) { 5754 cr = econnp->conn_peercred = DB_CRED(mp); 5755 if (cr != NULL) 5756 crhold(cr); 5757 else 5758 cr = econnp->conn_cred; 5759 DTRACE_PROBE2(mlp_syn_accept, conn_t *, 5760 econnp, cred_t *, cr) 5761 } else { 5762 cr = econnp->conn_cred; 5763 DTRACE_PROBE2(syn_accept, conn_t *, 5764 econnp, cred_t *, cr) 5765 } 5766 5767 if (!tcp_update_label(eager, cr)) { 5768 DTRACE_PROBE3( 5769 tx__ip__log__error__connrequest__tcp, 5770 char *, "eager connp(1) label on SYN mp(2) failed", 5771 conn_t *, econnp, mblk_t *, mp); 5772 goto error3; 5773 } 5774 } 5775 5776 eager->tcp_hard_binding = B_TRUE; 5777 5778 tcp_bind_hash_insert(&tcps->tcps_bind_fanout[ 5779 TCP_BIND_HASH(eager->tcp_lport)], eager, 0); 5780 5781 CL_INET_CONNECT(eager); 5782 5783 /* 5784 * No need to check for multicast destination since ip will only pass 5785 * up multicasts to those that have expressed interest 5786 * TODO: what about rejecting broadcasts? 5787 * Also check that source is not a multicast or broadcast address. 5788 */ 5789 eager->tcp_state = TCPS_SYN_RCVD; 5790 5791 5792 /* 5793 * There should be no ire in the mp as we are being called after 5794 * receiving the SYN. 5795 */ 5796 ASSERT(tcp_ire_mp(mp) == NULL); 5797 5798 /* 5799 * Adapt our mss, ttl, ... according to information provided in IRE. 5800 */ 5801 5802 if (tcp_adapt_ire(eager, NULL) == 0) { 5803 /* Undo the bind_hash_insert */ 5804 tcp_bind_hash_remove(eager); 5805 goto error3; 5806 } 5807 5808 /* Process all TCP options. */ 5809 tcp_process_options(eager, tcph); 5810 5811 /* Is the other end ECN capable? */ 5812 if (tcps->tcps_ecn_permitted >= 1 && 5813 (tcph->th_flags[0] & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) { 5814 eager->tcp_ecn_ok = B_TRUE; 5815 } 5816 5817 /* 5818 * listener->tcp_rq->q_hiwat should be the default window size or a 5819 * window size changed via SO_RCVBUF option. First round up the 5820 * eager's tcp_rwnd to the nearest MSS. Then find out the window 5821 * scale option value if needed. Call tcp_rwnd_set() to finish the 5822 * setting. 5823 * 5824 * Note if there is a rpipe metric associated with the remote host, 5825 * we should not inherit receive window size from listener. 5826 */ 5827 eager->tcp_rwnd = MSS_ROUNDUP( 5828 (eager->tcp_rwnd == 0 ? tcp->tcp_rq->q_hiwat : 5829 eager->tcp_rwnd), eager->tcp_mss); 5830 if (eager->tcp_snd_ws_ok) 5831 tcp_set_ws_value(eager); 5832 /* 5833 * Note that this is the only place tcp_rwnd_set() is called for 5834 * accepting a connection. We need to call it here instead of 5835 * after the 3-way handshake because we need to tell the other 5836 * side our rwnd in the SYN-ACK segment. 5837 */ 5838 (void) tcp_rwnd_set(eager, eager->tcp_rwnd); 5839 5840 /* 5841 * We eliminate the need for sockfs to send down a T_SVR4_OPTMGMT_REQ 5842 * via soaccept()->soinheritoptions() which essentially applies 5843 * all the listener options to the new STREAM. The options that we 5844 * need to take care of are: 5845 * SO_DEBUG, SO_REUSEADDR, SO_KEEPALIVE, SO_DONTROUTE, SO_BROADCAST, 5846 * SO_USELOOPBACK, SO_OOBINLINE, SO_DGRAM_ERRIND, SO_LINGER, 5847 * SO_SNDBUF, SO_RCVBUF. 5848 * 5849 * SO_RCVBUF: tcp_rwnd_set() above takes care of it. 5850 * SO_SNDBUF: Set the tcp_xmit_hiwater for the eager. When 5851 * tcp_maxpsz_set() gets called later from 5852 * tcp_accept_finish(), the option takes effect. 5853 * 5854 */ 5855 /* Set the TCP options */ 5856 eager->tcp_xmit_hiwater = tcp->tcp_xmit_hiwater; 5857 eager->tcp_dgram_errind = tcp->tcp_dgram_errind; 5858 eager->tcp_oobinline = tcp->tcp_oobinline; 5859 eager->tcp_reuseaddr = tcp->tcp_reuseaddr; 5860 eager->tcp_broadcast = tcp->tcp_broadcast; 5861 eager->tcp_useloopback = tcp->tcp_useloopback; 5862 eager->tcp_dontroute = tcp->tcp_dontroute; 5863 eager->tcp_linger = tcp->tcp_linger; 5864 eager->tcp_lingertime = tcp->tcp_lingertime; 5865 if (tcp->tcp_ka_enabled) 5866 eager->tcp_ka_enabled = 1; 5867 5868 /* Set the IP options */ 5869 econnp->conn_broadcast = connp->conn_broadcast; 5870 econnp->conn_loopback = connp->conn_loopback; 5871 econnp->conn_dontroute = connp->conn_dontroute; 5872 econnp->conn_reuseaddr = connp->conn_reuseaddr; 5873 5874 /* Put a ref on the listener for the eager. */ 5875 CONN_INC_REF(connp); 5876 mutex_enter(&tcp->tcp_eager_lock); 5877 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = eager; 5878 eager->tcp_eager_next_q0 = tcp->tcp_eager_next_q0; 5879 tcp->tcp_eager_next_q0 = eager; 5880 eager->tcp_eager_prev_q0 = tcp; 5881 5882 /* Set tcp_listener before adding it to tcp_conn_fanout */ 5883 eager->tcp_listener = tcp; 5884 eager->tcp_saved_listener = tcp; 5885 5886 /* 5887 * Tag this detached tcp vector for later retrieval 5888 * by our listener client in tcp_accept(). 5889 */ 5890 eager->tcp_conn_req_seqnum = tcp->tcp_conn_req_seqnum; 5891 tcp->tcp_conn_req_cnt_q0++; 5892 if (++tcp->tcp_conn_req_seqnum == -1) { 5893 /* 5894 * -1 is "special" and defined in TPI as something 5895 * that should never be used in T_CONN_IND 5896 */ 5897 ++tcp->tcp_conn_req_seqnum; 5898 } 5899 mutex_exit(&tcp->tcp_eager_lock); 5900 5901 if (tcp->tcp_syn_defense) { 5902 /* Don't drop the SYN that comes from a good IP source */ 5903 ipaddr_t *addr_cache = (ipaddr_t *)(tcp->tcp_ip_addr_cache); 5904 if (addr_cache != NULL && eager->tcp_remote == 5905 addr_cache[IP_ADDR_CACHE_HASH(eager->tcp_remote)]) { 5906 eager->tcp_dontdrop = B_TRUE; 5907 } 5908 } 5909 5910 /* 5911 * We need to insert the eager in its own perimeter but as soon 5912 * as we do that, we expose the eager to the classifier and 5913 * should not touch any field outside the eager's perimeter. 5914 * So do all the work necessary before inserting the eager 5915 * in its own perimeter. Be optimistic that ipcl_conn_insert() 5916 * will succeed but undo everything if it fails. 5917 */ 5918 seg_seq = ABE32_TO_U32(tcph->th_seq); 5919 eager->tcp_irs = seg_seq; 5920 eager->tcp_rack = seg_seq; 5921 eager->tcp_rnxt = seg_seq + 1; 5922 U32_TO_ABE32(eager->tcp_rnxt, eager->tcp_tcph->th_ack); 5923 BUMP_MIB(&tcps->tcps_mib, tcpPassiveOpens); 5924 eager->tcp_state = TCPS_SYN_RCVD; 5925 mp1 = tcp_xmit_mp(eager, eager->tcp_xmit_head, eager->tcp_mss, 5926 NULL, NULL, eager->tcp_iss, B_FALSE, NULL, B_FALSE); 5927 if (mp1 == NULL) { 5928 /* 5929 * Increment the ref count as we are going to 5930 * enqueueing an mp in squeue 5931 */ 5932 CONN_INC_REF(econnp); 5933 goto error; 5934 } 5935 DB_CPID(mp1) = tcp->tcp_cpid; 5936 eager->tcp_cpid = tcp->tcp_cpid; 5937 eager->tcp_open_time = lbolt64; 5938 5939 /* 5940 * We need to start the rto timer. In normal case, we start 5941 * the timer after sending the packet on the wire (or at 5942 * least believing that packet was sent by waiting for 5943 * CALL_IP_WPUT() to return). Since this is the first packet 5944 * being sent on the wire for the eager, our initial tcp_rto 5945 * is at least tcp_rexmit_interval_min which is a fairly 5946 * large value to allow the algorithm to adjust slowly to large 5947 * fluctuations of RTT during first few transmissions. 5948 * 5949 * Starting the timer first and then sending the packet in this 5950 * case shouldn't make much difference since tcp_rexmit_interval_min 5951 * is of the order of several 100ms and starting the timer 5952 * first and then sending the packet will result in difference 5953 * of few micro seconds. 5954 * 5955 * Without this optimization, we are forced to hold the fanout 5956 * lock across the ipcl_bind_insert() and sending the packet 5957 * so that we don't race against an incoming packet (maybe RST) 5958 * for this eager. 5959 * 5960 * It is necessary to acquire an extra reference on the eager 5961 * at this point and hold it until after tcp_send_data() to 5962 * ensure against an eager close race. 5963 */ 5964 5965 CONN_INC_REF(eager->tcp_connp); 5966 5967 TCP_RECORD_TRACE(eager, mp1, TCP_TRACE_SEND_PKT); 5968 TCP_TIMER_RESTART(eager, eager->tcp_rto); 5969 5970 5971 /* 5972 * Insert the eager in its own perimeter now. We are ready to deal 5973 * with any packets on eager. 5974 */ 5975 if (eager->tcp_ipversion == IPV4_VERSION) { 5976 if (ipcl_conn_insert(econnp, IPPROTO_TCP, 0, 0, 0) != 0) { 5977 goto error; 5978 } 5979 } else { 5980 if (ipcl_conn_insert_v6(econnp, IPPROTO_TCP, 0, 0, 0, 0) != 0) { 5981 goto error; 5982 } 5983 } 5984 5985 /* mark conn as fully-bound */ 5986 econnp->conn_fully_bound = B_TRUE; 5987 5988 /* Send the SYN-ACK */ 5989 tcp_send_data(eager, eager->tcp_wq, mp1); 5990 CONN_DEC_REF(eager->tcp_connp); 5991 freemsg(mp); 5992 5993 return; 5994 error: 5995 freemsg(mp1); 5996 eager->tcp_closemp_used = B_TRUE; 5997 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 5998 squeue_fill(econnp->conn_sqp, &eager->tcp_closemp, tcp_eager_kill, 5999 econnp, SQTAG_TCP_CONN_REQ_2); 6000 6001 /* 6002 * If a connection already exists, send the mp to that connections so 6003 * that it can be appropriately dealt with. 6004 */ 6005 ipst = tcps->tcps_netstack->netstack_ip; 6006 6007 if ((econnp = ipcl_classify(mp, connp->conn_zoneid, ipst)) != NULL) { 6008 if (!IPCL_IS_CONNECTED(econnp)) { 6009 /* 6010 * Something bad happened. ipcl_conn_insert() 6011 * failed because a connection already existed 6012 * in connected hash but we can't find it 6013 * anymore (someone blew it away). Just 6014 * free this message and hopefully remote 6015 * will retransmit at which time the SYN can be 6016 * treated as a new connection or dealth with 6017 * a TH_RST if a connection already exists. 6018 */ 6019 CONN_DEC_REF(econnp); 6020 freemsg(mp); 6021 } else { 6022 squeue_fill(econnp->conn_sqp, mp, tcp_input, 6023 econnp, SQTAG_TCP_CONN_REQ_1); 6024 } 6025 } else { 6026 /* Nobody wants this packet */ 6027 freemsg(mp); 6028 } 6029 return; 6030 error3: 6031 CONN_DEC_REF(econnp); 6032 error2: 6033 freemsg(mp); 6034 } 6035 6036 /* 6037 * In an ideal case of vertical partition in NUMA architecture, its 6038 * beneficial to have the listener and all the incoming connections 6039 * tied to the same squeue. The other constraint is that incoming 6040 * connections should be tied to the squeue attached to interrupted 6041 * CPU for obvious locality reason so this leaves the listener to 6042 * be tied to the same squeue. Our only problem is that when listener 6043 * is binding, the CPU that will get interrupted by the NIC whose 6044 * IP address the listener is binding to is not even known. So 6045 * the code below allows us to change that binding at the time the 6046 * CPU is interrupted by virtue of incoming connection's squeue. 6047 * 6048 * This is usefull only in case of a listener bound to a specific IP 6049 * address. For other kind of listeners, they get bound the 6050 * very first time and there is no attempt to rebind them. 6051 */ 6052 void 6053 tcp_conn_request_unbound(void *arg, mblk_t *mp, void *arg2) 6054 { 6055 conn_t *connp = (conn_t *)arg; 6056 squeue_t *sqp = (squeue_t *)arg2; 6057 squeue_t *new_sqp; 6058 uint32_t conn_flags; 6059 6060 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 6061 new_sqp = (squeue_t *)DB_CKSUMSTART(mp); 6062 } else { 6063 goto done; 6064 } 6065 6066 if (connp->conn_fanout == NULL) 6067 goto done; 6068 6069 if (!(connp->conn_flags & IPCL_FULLY_BOUND)) { 6070 mutex_enter(&connp->conn_fanout->connf_lock); 6071 mutex_enter(&connp->conn_lock); 6072 /* 6073 * No one from read or write side can access us now 6074 * except for already queued packets on this squeue. 6075 * But since we haven't changed the squeue yet, they 6076 * can't execute. If they are processed after we have 6077 * changed the squeue, they are sent back to the 6078 * correct squeue down below. 6079 * But a listner close can race with processing of 6080 * incoming SYN. If incoming SYN processing changes 6081 * the squeue then the listener close which is waiting 6082 * to enter the squeue would operate on the wrong 6083 * squeue. Hence we don't change the squeue here unless 6084 * the refcount is exactly the minimum refcount. The 6085 * minimum refcount of 4 is counted as - 1 each for 6086 * TCP and IP, 1 for being in the classifier hash, and 6087 * 1 for the mblk being processed. 6088 */ 6089 6090 if (connp->conn_ref != 4 || 6091 connp->conn_tcp->tcp_state != TCPS_LISTEN) { 6092 mutex_exit(&connp->conn_lock); 6093 mutex_exit(&connp->conn_fanout->connf_lock); 6094 goto done; 6095 } 6096 if (connp->conn_sqp != new_sqp) { 6097 while (connp->conn_sqp != new_sqp) 6098 (void) casptr(&connp->conn_sqp, sqp, new_sqp); 6099 } 6100 6101 do { 6102 conn_flags = connp->conn_flags; 6103 conn_flags |= IPCL_FULLY_BOUND; 6104 (void) cas32(&connp->conn_flags, connp->conn_flags, 6105 conn_flags); 6106 } while (!(connp->conn_flags & IPCL_FULLY_BOUND)); 6107 6108 mutex_exit(&connp->conn_fanout->connf_lock); 6109 mutex_exit(&connp->conn_lock); 6110 } 6111 6112 done: 6113 if (connp->conn_sqp != sqp) { 6114 CONN_INC_REF(connp); 6115 squeue_fill(connp->conn_sqp, mp, 6116 connp->conn_recv, connp, SQTAG_TCP_CONN_REQ_UNBOUND); 6117 } else { 6118 tcp_conn_request(connp, mp, sqp); 6119 } 6120 } 6121 6122 /* 6123 * Successful connect request processing begins when our client passes 6124 * a T_CONN_REQ message into tcp_wput() and ends when tcp_rput() passes 6125 * our T_OK_ACK reply message upstream. The control flow looks like this: 6126 * upstream -> tcp_wput() -> tcp_wput_proto() -> tcp_connect() -> IP 6127 * upstream <- tcp_rput() <- IP 6128 * After various error checks are completed, tcp_connect() lays 6129 * the target address and port into the composite header template, 6130 * preallocates the T_OK_ACK reply message, construct a full 12 byte bind 6131 * request followed by an IRE request, and passes the three mblk message 6132 * down to IP looking like this: 6133 * O_T_BIND_REQ for IP --> IRE req --> T_OK_ACK for our client 6134 * Processing continues in tcp_rput() when we receive the following message: 6135 * T_BIND_ACK from IP --> IRE ack --> T_OK_ACK for our client 6136 * After consuming the first two mblks, tcp_rput() calls tcp_timer(), 6137 * to fire off the connection request, and then passes the T_OK_ACK mblk 6138 * upstream that we filled in below. There are, of course, numerous 6139 * error conditions along the way which truncate the processing described 6140 * above. 6141 */ 6142 static void 6143 tcp_connect(tcp_t *tcp, mblk_t *mp) 6144 { 6145 sin_t *sin; 6146 sin6_t *sin6; 6147 queue_t *q = tcp->tcp_wq; 6148 struct T_conn_req *tcr; 6149 ipaddr_t *dstaddrp; 6150 in_port_t dstport; 6151 uint_t srcid; 6152 6153 tcr = (struct T_conn_req *)mp->b_rptr; 6154 6155 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 6156 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tcr)) { 6157 tcp_err_ack(tcp, mp, TPROTO, 0); 6158 return; 6159 } 6160 6161 /* 6162 * Determine packet type based on type of address passed in 6163 * the request should contain an IPv4 or IPv6 address. 6164 * Make sure that address family matches the type of 6165 * family of the the address passed down 6166 */ 6167 switch (tcr->DEST_length) { 6168 default: 6169 tcp_err_ack(tcp, mp, TBADADDR, 0); 6170 return; 6171 6172 case (sizeof (sin_t) - sizeof (sin->sin_zero)): { 6173 /* 6174 * XXX: The check for valid DEST_length was not there 6175 * in earlier releases and some buggy 6176 * TLI apps (e.g Sybase) got away with not feeding 6177 * in sin_zero part of address. 6178 * We allow that bug to keep those buggy apps humming. 6179 * Test suites require the check on DEST_length. 6180 * We construct a new mblk with valid DEST_length 6181 * free the original so the rest of the code does 6182 * not have to keep track of this special shorter 6183 * length address case. 6184 */ 6185 mblk_t *nmp; 6186 struct T_conn_req *ntcr; 6187 sin_t *nsin; 6188 6189 nmp = allocb(sizeof (struct T_conn_req) + sizeof (sin_t) + 6190 tcr->OPT_length, BPRI_HI); 6191 if (nmp == NULL) { 6192 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 6193 return; 6194 } 6195 ntcr = (struct T_conn_req *)nmp->b_rptr; 6196 bzero(ntcr, sizeof (struct T_conn_req)); /* zero fill */ 6197 ntcr->PRIM_type = T_CONN_REQ; 6198 ntcr->DEST_length = sizeof (sin_t); 6199 ntcr->DEST_offset = sizeof (struct T_conn_req); 6200 6201 nsin = (sin_t *)((uchar_t *)ntcr + ntcr->DEST_offset); 6202 *nsin = sin_null; 6203 /* Get pointer to shorter address to copy from original mp */ 6204 sin = (sin_t *)mi_offset_param(mp, tcr->DEST_offset, 6205 tcr->DEST_length); /* extract DEST_length worth of sin_t */ 6206 if (sin == NULL || !OK_32PTR((char *)sin)) { 6207 freemsg(nmp); 6208 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6209 return; 6210 } 6211 nsin->sin_family = sin->sin_family; 6212 nsin->sin_port = sin->sin_port; 6213 nsin->sin_addr = sin->sin_addr; 6214 /* Note:nsin->sin_zero zero-fill with sin_null assign above */ 6215 nmp->b_wptr = (uchar_t *)&nsin[1]; 6216 if (tcr->OPT_length != 0) { 6217 ntcr->OPT_length = tcr->OPT_length; 6218 ntcr->OPT_offset = nmp->b_wptr - nmp->b_rptr; 6219 bcopy((uchar_t *)tcr + tcr->OPT_offset, 6220 (uchar_t *)ntcr + ntcr->OPT_offset, 6221 tcr->OPT_length); 6222 nmp->b_wptr += tcr->OPT_length; 6223 } 6224 freemsg(mp); /* original mp freed */ 6225 mp = nmp; /* re-initialize original variables */ 6226 tcr = ntcr; 6227 } 6228 /* FALLTHRU */ 6229 6230 case sizeof (sin_t): 6231 sin = (sin_t *)mi_offset_param(mp, tcr->DEST_offset, 6232 sizeof (sin_t)); 6233 if (sin == NULL || !OK_32PTR((char *)sin)) { 6234 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6235 return; 6236 } 6237 if (tcp->tcp_family != AF_INET || 6238 sin->sin_family != AF_INET) { 6239 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6240 return; 6241 } 6242 if (sin->sin_port == 0) { 6243 tcp_err_ack(tcp, mp, TBADADDR, 0); 6244 return; 6245 } 6246 if (tcp->tcp_connp && tcp->tcp_connp->conn_ipv6_v6only) { 6247 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6248 return; 6249 } 6250 6251 break; 6252 6253 case sizeof (sin6_t): 6254 sin6 = (sin6_t *)mi_offset_param(mp, tcr->DEST_offset, 6255 sizeof (sin6_t)); 6256 if (sin6 == NULL || !OK_32PTR((char *)sin6)) { 6257 tcp_err_ack(tcp, mp, TSYSERR, EINVAL); 6258 return; 6259 } 6260 if (tcp->tcp_family != AF_INET6 || 6261 sin6->sin6_family != AF_INET6) { 6262 tcp_err_ack(tcp, mp, TSYSERR, EAFNOSUPPORT); 6263 return; 6264 } 6265 if (sin6->sin6_port == 0) { 6266 tcp_err_ack(tcp, mp, TBADADDR, 0); 6267 return; 6268 } 6269 break; 6270 } 6271 /* 6272 * TODO: If someone in TCPS_TIME_WAIT has this dst/port we 6273 * should key on their sequence number and cut them loose. 6274 */ 6275 6276 /* 6277 * If options passed in, feed it for verification and handling 6278 */ 6279 if (tcr->OPT_length != 0) { 6280 mblk_t *ok_mp; 6281 mblk_t *discon_mp; 6282 mblk_t *conn_opts_mp; 6283 int t_error, sys_error, do_disconnect; 6284 6285 conn_opts_mp = NULL; 6286 6287 if (tcp_conprim_opt_process(tcp, mp, 6288 &do_disconnect, &t_error, &sys_error) < 0) { 6289 if (do_disconnect) { 6290 ASSERT(t_error == 0 && sys_error == 0); 6291 discon_mp = mi_tpi_discon_ind(NULL, 6292 ECONNREFUSED, 0); 6293 if (!discon_mp) { 6294 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, 6295 TSYSERR, ENOMEM); 6296 return; 6297 } 6298 ok_mp = mi_tpi_ok_ack_alloc(mp); 6299 if (!ok_mp) { 6300 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6301 TSYSERR, ENOMEM); 6302 return; 6303 } 6304 qreply(q, ok_mp); 6305 qreply(q, discon_mp); /* no flush! */ 6306 } else { 6307 ASSERT(t_error != 0); 6308 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, t_error, 6309 sys_error); 6310 } 6311 return; 6312 } 6313 /* 6314 * Success in setting options, the mp option buffer represented 6315 * by OPT_length/offset has been potentially modified and 6316 * contains results of option processing. We copy it in 6317 * another mp to save it for potentially influencing returning 6318 * it in T_CONN_CONN. 6319 */ 6320 if (tcr->OPT_length != 0) { /* there are resulting options */ 6321 conn_opts_mp = copyb(mp); 6322 if (!conn_opts_mp) { 6323 tcp_err_ack_prim(tcp, mp, T_CONN_REQ, 6324 TSYSERR, ENOMEM); 6325 return; 6326 } 6327 ASSERT(tcp->tcp_conn.tcp_opts_conn_req == NULL); 6328 tcp->tcp_conn.tcp_opts_conn_req = conn_opts_mp; 6329 /* 6330 * Note: 6331 * These resulting option negotiation can include any 6332 * end-to-end negotiation options but there no such 6333 * thing (yet?) in our TCP/IP. 6334 */ 6335 } 6336 } 6337 6338 /* 6339 * If we're connecting to an IPv4-mapped IPv6 address, we need to 6340 * make sure that the template IP header in the tcp structure is an 6341 * IPv4 header, and that the tcp_ipversion is IPV4_VERSION. We 6342 * need to this before we call tcp_bindi() so that the port lookup 6343 * code will look for ports in the correct port space (IPv4 and 6344 * IPv6 have separate port spaces). 6345 */ 6346 if (tcp->tcp_family == AF_INET6 && tcp->tcp_ipversion == IPV6_VERSION && 6347 IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6348 int err = 0; 6349 6350 err = tcp_header_init_ipv4(tcp); 6351 if (err != 0) { 6352 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6353 goto connect_failed; 6354 } 6355 if (tcp->tcp_lport != 0) 6356 *(uint16_t *)tcp->tcp_tcph->th_lport = tcp->tcp_lport; 6357 } 6358 6359 switch (tcp->tcp_state) { 6360 case TCPS_IDLE: 6361 /* 6362 * We support quick connect, refer to comments in 6363 * tcp_connect_*() 6364 */ 6365 /* FALLTHRU */ 6366 case TCPS_BOUND: 6367 case TCPS_LISTEN: 6368 if (tcp->tcp_family == AF_INET6) { 6369 if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6370 tcp_connect_ipv6(tcp, mp, 6371 &sin6->sin6_addr, 6372 sin6->sin6_port, sin6->sin6_flowinfo, 6373 sin6->__sin6_src_id, sin6->sin6_scope_id); 6374 return; 6375 } 6376 /* 6377 * Destination adress is mapped IPv6 address. 6378 * Source bound address should be unspecified or 6379 * IPv6 mapped address as well. 6380 */ 6381 if (!IN6_IS_ADDR_UNSPECIFIED( 6382 &tcp->tcp_bound_source_v6) && 6383 !IN6_IS_ADDR_V4MAPPED(&tcp->tcp_bound_source_v6)) { 6384 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, 6385 EADDRNOTAVAIL); 6386 break; 6387 } 6388 dstaddrp = &V4_PART_OF_V6((sin6->sin6_addr)); 6389 dstport = sin6->sin6_port; 6390 srcid = sin6->__sin6_src_id; 6391 } else { 6392 dstaddrp = &sin->sin_addr.s_addr; 6393 dstport = sin->sin_port; 6394 srcid = 0; 6395 } 6396 6397 tcp_connect_ipv4(tcp, mp, dstaddrp, dstport, srcid); 6398 return; 6399 default: 6400 mp = mi_tpi_err_ack_alloc(mp, TOUTSTATE, 0); 6401 break; 6402 } 6403 /* 6404 * Note: Code below is the "failure" case 6405 */ 6406 /* return error ack and blow away saved option results if any */ 6407 connect_failed: 6408 if (mp != NULL) 6409 putnext(tcp->tcp_rq, mp); 6410 else { 6411 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6412 TSYSERR, ENOMEM); 6413 } 6414 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6415 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6416 } 6417 6418 /* 6419 * Handle connect to IPv4 destinations, including connections for AF_INET6 6420 * sockets connecting to IPv4 mapped IPv6 destinations. 6421 */ 6422 static void 6423 tcp_connect_ipv4(tcp_t *tcp, mblk_t *mp, ipaddr_t *dstaddrp, in_port_t dstport, 6424 uint_t srcid) 6425 { 6426 tcph_t *tcph; 6427 mblk_t *mp1; 6428 ipaddr_t dstaddr = *dstaddrp; 6429 int32_t oldstate; 6430 uint16_t lport; 6431 tcp_stack_t *tcps = tcp->tcp_tcps; 6432 6433 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 6434 6435 /* Check for attempt to connect to INADDR_ANY */ 6436 if (dstaddr == INADDR_ANY) { 6437 /* 6438 * SunOS 4.x and 4.3 BSD allow an application 6439 * to connect a TCP socket to INADDR_ANY. 6440 * When they do this, the kernel picks the 6441 * address of one interface and uses it 6442 * instead. The kernel usually ends up 6443 * picking the address of the loopback 6444 * interface. This is an undocumented feature. 6445 * However, we provide the same thing here 6446 * in order to have source and binary 6447 * compatibility with SunOS 4.x. 6448 * Update the T_CONN_REQ (sin/sin6) since it is used to 6449 * generate the T_CONN_CON. 6450 */ 6451 dstaddr = htonl(INADDR_LOOPBACK); 6452 *dstaddrp = dstaddr; 6453 } 6454 6455 /* Handle __sin6_src_id if socket not bound to an IP address */ 6456 if (srcid != 0 && tcp->tcp_ipha->ipha_src == INADDR_ANY) { 6457 ip_srcid_find_id(srcid, &tcp->tcp_ip_src_v6, 6458 tcp->tcp_connp->conn_zoneid, tcps->tcps_netstack); 6459 IN6_V4MAPPED_TO_IPADDR(&tcp->tcp_ip_src_v6, 6460 tcp->tcp_ipha->ipha_src); 6461 } 6462 6463 /* 6464 * Don't let an endpoint connect to itself. Note that 6465 * the test here does not catch the case where the 6466 * source IP addr was left unspecified by the user. In 6467 * this case, the source addr is set in tcp_adapt_ire() 6468 * using the reply to the T_BIND message that we send 6469 * down to IP here and the check is repeated in tcp_rput_other. 6470 */ 6471 if (dstaddr == tcp->tcp_ipha->ipha_src && 6472 dstport == tcp->tcp_lport) { 6473 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6474 goto failed; 6475 } 6476 6477 tcp->tcp_ipha->ipha_dst = dstaddr; 6478 IN6_IPADDR_TO_V4MAPPED(dstaddr, &tcp->tcp_remote_v6); 6479 6480 /* 6481 * Massage a source route if any putting the first hop 6482 * in iph_dst. Compute a starting value for the checksum which 6483 * takes into account that the original iph_dst should be 6484 * included in the checksum but that ip will include the 6485 * first hop in the source route in the tcp checksum. 6486 */ 6487 tcp->tcp_sum = ip_massage_options(tcp->tcp_ipha, tcps->tcps_netstack); 6488 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 6489 tcp->tcp_sum -= ((tcp->tcp_ipha->ipha_dst >> 16) + 6490 (tcp->tcp_ipha->ipha_dst & 0xffff)); 6491 if ((int)tcp->tcp_sum < 0) 6492 tcp->tcp_sum--; 6493 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 6494 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 6495 (tcp->tcp_sum >> 16)); 6496 tcph = tcp->tcp_tcph; 6497 *(uint16_t *)tcph->th_fport = dstport; 6498 tcp->tcp_fport = dstport; 6499 6500 oldstate = tcp->tcp_state; 6501 /* 6502 * At this point the remote destination address and remote port fields 6503 * in the tcp-four-tuple have been filled in the tcp structure. Now we 6504 * have to see which state tcp was in so we can take apropriate action. 6505 */ 6506 if (oldstate == TCPS_IDLE) { 6507 /* 6508 * We support a quick connect capability here, allowing 6509 * clients to transition directly from IDLE to SYN_SENT 6510 * tcp_bindi will pick an unused port, insert the connection 6511 * in the bind hash and transition to BOUND state. 6512 */ 6513 lport = tcp_update_next_port(tcps->tcps_next_port_to_try, 6514 tcp, B_TRUE); 6515 lport = tcp_bindi(tcp, lport, &tcp->tcp_ip_src_v6, 0, B_TRUE, 6516 B_FALSE, B_FALSE); 6517 if (lport == 0) { 6518 mp = mi_tpi_err_ack_alloc(mp, TNOADDR, 0); 6519 goto failed; 6520 } 6521 } 6522 tcp->tcp_state = TCPS_SYN_SENT; 6523 6524 /* 6525 * TODO: allow data with connect requests 6526 * by unlinking M_DATA trailers here and 6527 * linking them in behind the T_OK_ACK mblk. 6528 * The tcp_rput() bind ack handler would then 6529 * feed them to tcp_wput_data() rather than call 6530 * tcp_timer(). 6531 */ 6532 mp = mi_tpi_ok_ack_alloc(mp); 6533 if (!mp) { 6534 tcp->tcp_state = oldstate; 6535 goto failed; 6536 } 6537 if (tcp->tcp_family == AF_INET) { 6538 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 6539 sizeof (ipa_conn_t)); 6540 } else { 6541 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 6542 sizeof (ipa6_conn_t)); 6543 } 6544 if (mp1) { 6545 /* Hang onto the T_OK_ACK for later. */ 6546 linkb(mp1, mp); 6547 mblk_setcred(mp1, tcp->tcp_cred); 6548 if (tcp->tcp_family == AF_INET) 6549 mp1 = ip_bind_v4(tcp->tcp_wq, mp1, tcp->tcp_connp); 6550 else { 6551 mp1 = ip_bind_v6(tcp->tcp_wq, mp1, tcp->tcp_connp, 6552 &tcp->tcp_sticky_ipp); 6553 } 6554 BUMP_MIB(&tcps->tcps_mib, tcpActiveOpens); 6555 tcp->tcp_active_open = 1; 6556 /* 6557 * If the bind cannot complete immediately 6558 * IP will arrange to call tcp_rput_other 6559 * when the bind completes. 6560 */ 6561 if (mp1 != NULL) 6562 tcp_rput_other(tcp, mp1); 6563 return; 6564 } 6565 /* Error case */ 6566 tcp->tcp_state = oldstate; 6567 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6568 6569 failed: 6570 /* return error ack and blow away saved option results if any */ 6571 if (mp != NULL) 6572 putnext(tcp->tcp_rq, mp); 6573 else { 6574 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6575 TSYSERR, ENOMEM); 6576 } 6577 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6578 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6579 6580 } 6581 6582 /* 6583 * Handle connect to IPv6 destinations. 6584 */ 6585 static void 6586 tcp_connect_ipv6(tcp_t *tcp, mblk_t *mp, in6_addr_t *dstaddrp, 6587 in_port_t dstport, uint32_t flowinfo, uint_t srcid, uint32_t scope_id) 6588 { 6589 tcph_t *tcph; 6590 mblk_t *mp1; 6591 ip6_rthdr_t *rth; 6592 int32_t oldstate; 6593 uint16_t lport; 6594 tcp_stack_t *tcps = tcp->tcp_tcps; 6595 6596 ASSERT(tcp->tcp_family == AF_INET6); 6597 6598 /* 6599 * If we're here, it means that the destination address is a native 6600 * IPv6 address. Return an error if tcp_ipversion is not IPv6. A 6601 * reason why it might not be IPv6 is if the socket was bound to an 6602 * IPv4-mapped IPv6 address. 6603 */ 6604 if (tcp->tcp_ipversion != IPV6_VERSION) { 6605 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6606 goto failed; 6607 } 6608 6609 /* 6610 * Interpret a zero destination to mean loopback. 6611 * Update the T_CONN_REQ (sin/sin6) since it is used to 6612 * generate the T_CONN_CON. 6613 */ 6614 if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp)) { 6615 *dstaddrp = ipv6_loopback; 6616 } 6617 6618 /* Handle __sin6_src_id if socket not bound to an IP address */ 6619 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip6h->ip6_src)) { 6620 ip_srcid_find_id(srcid, &tcp->tcp_ip6h->ip6_src, 6621 tcp->tcp_connp->conn_zoneid, tcps->tcps_netstack); 6622 tcp->tcp_ip_src_v6 = tcp->tcp_ip6h->ip6_src; 6623 } 6624 6625 /* 6626 * Take care of the scope_id now and add ip6i_t 6627 * if ip6i_t is not already allocated through TCP 6628 * sticky options. At this point tcp_ip6h does not 6629 * have dst info, thus use dstaddrp. 6630 */ 6631 if (scope_id != 0 && 6632 IN6_IS_ADDR_LINKSCOPE(dstaddrp)) { 6633 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 6634 ip6i_t *ip6i; 6635 6636 ipp->ipp_ifindex = scope_id; 6637 ip6i = (ip6i_t *)tcp->tcp_iphc; 6638 6639 if ((ipp->ipp_fields & IPPF_HAS_IP6I) && 6640 ip6i != NULL && (ip6i->ip6i_nxt == IPPROTO_RAW)) { 6641 /* Already allocated */ 6642 ip6i->ip6i_flags |= IP6I_IFINDEX; 6643 ip6i->ip6i_ifindex = ipp->ipp_ifindex; 6644 ipp->ipp_fields |= IPPF_SCOPE_ID; 6645 } else { 6646 int reterr; 6647 6648 ipp->ipp_fields |= IPPF_SCOPE_ID; 6649 if (ipp->ipp_fields & IPPF_HAS_IP6I) 6650 ip2dbg(("tcp_connect_v6: SCOPE_ID set\n")); 6651 reterr = tcp_build_hdrs(tcp->tcp_rq, tcp); 6652 if (reterr != 0) 6653 goto failed; 6654 ip1dbg(("tcp_connect_ipv6: tcp_bld_hdrs returned\n")); 6655 } 6656 } 6657 6658 /* 6659 * Don't let an endpoint connect to itself. Note that 6660 * the test here does not catch the case where the 6661 * source IP addr was left unspecified by the user. In 6662 * this case, the source addr is set in tcp_adapt_ire() 6663 * using the reply to the T_BIND message that we send 6664 * down to IP here and the check is repeated in tcp_rput_other. 6665 */ 6666 if (IN6_ARE_ADDR_EQUAL(dstaddrp, &tcp->tcp_ip6h->ip6_src) && 6667 (dstport == tcp->tcp_lport)) { 6668 mp = mi_tpi_err_ack_alloc(mp, TBADADDR, 0); 6669 goto failed; 6670 } 6671 6672 tcp->tcp_ip6h->ip6_dst = *dstaddrp; 6673 tcp->tcp_remote_v6 = *dstaddrp; 6674 tcp->tcp_ip6h->ip6_vcf = 6675 (IPV6_DEFAULT_VERS_AND_FLOW & IPV6_VERS_AND_FLOW_MASK) | 6676 (flowinfo & ~IPV6_VERS_AND_FLOW_MASK); 6677 6678 6679 /* 6680 * Massage a routing header (if present) putting the first hop 6681 * in ip6_dst. Compute a starting value for the checksum which 6682 * takes into account that the original ip6_dst should be 6683 * included in the checksum but that ip will include the 6684 * first hop in the source route in the tcp checksum. 6685 */ 6686 rth = ip_find_rthdr_v6(tcp->tcp_ip6h, (uint8_t *)tcp->tcp_tcph); 6687 if (rth != NULL) { 6688 tcp->tcp_sum = ip_massage_options_v6(tcp->tcp_ip6h, rth, 6689 tcps->tcps_netstack); 6690 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 6691 (tcp->tcp_sum >> 16)); 6692 } else { 6693 tcp->tcp_sum = 0; 6694 } 6695 6696 tcph = tcp->tcp_tcph; 6697 *(uint16_t *)tcph->th_fport = dstport; 6698 tcp->tcp_fport = dstport; 6699 6700 oldstate = tcp->tcp_state; 6701 /* 6702 * At this point the remote destination address and remote port fields 6703 * in the tcp-four-tuple have been filled in the tcp structure. Now we 6704 * have to see which state tcp was in so we can take apropriate action. 6705 */ 6706 if (oldstate == TCPS_IDLE) { 6707 /* 6708 * We support a quick connect capability here, allowing 6709 * clients to transition directly from IDLE to SYN_SENT 6710 * tcp_bindi will pick an unused port, insert the connection 6711 * in the bind hash and transition to BOUND state. 6712 */ 6713 lport = tcp_update_next_port(tcps->tcps_next_port_to_try, 6714 tcp, B_TRUE); 6715 lport = tcp_bindi(tcp, lport, &tcp->tcp_ip_src_v6, 0, B_TRUE, 6716 B_FALSE, B_FALSE); 6717 if (lport == 0) { 6718 mp = mi_tpi_err_ack_alloc(mp, TNOADDR, 0); 6719 goto failed; 6720 } 6721 } 6722 tcp->tcp_state = TCPS_SYN_SENT; 6723 /* 6724 * TODO: allow data with connect requests 6725 * by unlinking M_DATA trailers here and 6726 * linking them in behind the T_OK_ACK mblk. 6727 * The tcp_rput() bind ack handler would then 6728 * feed them to tcp_wput_data() rather than call 6729 * tcp_timer(). 6730 */ 6731 mp = mi_tpi_ok_ack_alloc(mp); 6732 if (!mp) { 6733 tcp->tcp_state = oldstate; 6734 goto failed; 6735 } 6736 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, sizeof (ipa6_conn_t)); 6737 if (mp1) { 6738 /* Hang onto the T_OK_ACK for later. */ 6739 linkb(mp1, mp); 6740 mblk_setcred(mp1, tcp->tcp_cred); 6741 mp1 = ip_bind_v6(tcp->tcp_wq, mp1, tcp->tcp_connp, 6742 &tcp->tcp_sticky_ipp); 6743 BUMP_MIB(&tcps->tcps_mib, tcpActiveOpens); 6744 tcp->tcp_active_open = 1; 6745 /* ip_bind_v6() may return ACK or ERROR */ 6746 if (mp1 != NULL) 6747 tcp_rput_other(tcp, mp1); 6748 return; 6749 } 6750 /* Error case */ 6751 tcp->tcp_state = oldstate; 6752 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, ENOMEM); 6753 6754 failed: 6755 /* return error ack and blow away saved option results if any */ 6756 if (mp != NULL) 6757 putnext(tcp->tcp_rq, mp); 6758 else { 6759 tcp_err_ack_prim(tcp, NULL, T_CONN_REQ, 6760 TSYSERR, ENOMEM); 6761 } 6762 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 6763 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 6764 } 6765 6766 /* 6767 * We need a stream q for detached closing tcp connections 6768 * to use. Our client hereby indicates that this q is the 6769 * one to use. 6770 */ 6771 static void 6772 tcp_def_q_set(tcp_t *tcp, mblk_t *mp) 6773 { 6774 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 6775 queue_t *q = tcp->tcp_wq; 6776 tcp_stack_t *tcps = tcp->tcp_tcps; 6777 6778 #ifdef NS_DEBUG 6779 (void) printf("TCP_IOC_DEFAULT_Q for stack %d\n", 6780 tcps->tcps_netstack->netstack_stackid); 6781 #endif 6782 mp->b_datap->db_type = M_IOCACK; 6783 iocp->ioc_count = 0; 6784 mutex_enter(&tcps->tcps_g_q_lock); 6785 if (tcps->tcps_g_q != NULL) { 6786 mutex_exit(&tcps->tcps_g_q_lock); 6787 iocp->ioc_error = EALREADY; 6788 } else { 6789 mblk_t *mp1; 6790 6791 mp1 = tcp_ip_bind_mp(tcp, O_T_BIND_REQ, 0); 6792 if (mp1 == NULL) { 6793 mutex_exit(&tcps->tcps_g_q_lock); 6794 iocp->ioc_error = ENOMEM; 6795 } else { 6796 tcps->tcps_g_q = tcp->tcp_rq; 6797 mutex_exit(&tcps->tcps_g_q_lock); 6798 iocp->ioc_error = 0; 6799 iocp->ioc_rval = 0; 6800 /* 6801 * We are passing tcp_sticky_ipp as NULL 6802 * as it is not useful for tcp_default queue 6803 */ 6804 mp1 = ip_bind_v6(q, mp1, tcp->tcp_connp, NULL); 6805 if (mp1 != NULL) 6806 tcp_rput_other(tcp, mp1); 6807 } 6808 } 6809 qreply(q, mp); 6810 } 6811 6812 /* 6813 * Our client hereby directs us to reject the connection request 6814 * that tcp_conn_request() marked with 'seqnum'. Rejection consists 6815 * of sending the appropriate RST, not an ICMP error. 6816 */ 6817 static void 6818 tcp_disconnect(tcp_t *tcp, mblk_t *mp) 6819 { 6820 tcp_t *ltcp = NULL; 6821 t_scalar_t seqnum; 6822 conn_t *connp; 6823 tcp_stack_t *tcps = tcp->tcp_tcps; 6824 6825 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 6826 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_discon_req)) { 6827 tcp_err_ack(tcp, mp, TPROTO, 0); 6828 return; 6829 } 6830 6831 /* 6832 * Right now, upper modules pass down a T_DISCON_REQ to TCP, 6833 * when the stream is in BOUND state. Do not send a reset, 6834 * since the destination IP address is not valid, and it can 6835 * be the initialized value of all zeros (broadcast address). 6836 * 6837 * If TCP has sent down a bind request to IP and has not 6838 * received the reply, reject the request. Otherwise, TCP 6839 * will be confused. 6840 */ 6841 if (tcp->tcp_state <= TCPS_BOUND || tcp->tcp_hard_binding) { 6842 if (tcp->tcp_debug) { 6843 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 6844 "tcp_disconnect: bad state, %d", tcp->tcp_state); 6845 } 6846 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 6847 return; 6848 } 6849 6850 seqnum = ((struct T_discon_req *)mp->b_rptr)->SEQ_number; 6851 6852 if (seqnum == -1 || tcp->tcp_conn_req_max == 0) { 6853 6854 /* 6855 * According to TPI, for non-listeners, ignore seqnum 6856 * and disconnect. 6857 * Following interpretation of -1 seqnum is historical 6858 * and implied TPI ? (TPI only states that for T_CONN_IND, 6859 * a valid seqnum should not be -1). 6860 * 6861 * -1 means disconnect everything 6862 * regardless even on a listener. 6863 */ 6864 6865 int old_state = tcp->tcp_state; 6866 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 6867 6868 /* 6869 * The connection can't be on the tcp_time_wait_head list 6870 * since it is not detached. 6871 */ 6872 ASSERT(tcp->tcp_time_wait_next == NULL); 6873 ASSERT(tcp->tcp_time_wait_prev == NULL); 6874 ASSERT(tcp->tcp_time_wait_expire == 0); 6875 ltcp = NULL; 6876 /* 6877 * If it used to be a listener, check to make sure no one else 6878 * has taken the port before switching back to LISTEN state. 6879 */ 6880 if (tcp->tcp_ipversion == IPV4_VERSION) { 6881 connp = ipcl_lookup_listener_v4(tcp->tcp_lport, 6882 tcp->tcp_ipha->ipha_src, 6883 tcp->tcp_connp->conn_zoneid, ipst); 6884 if (connp != NULL) 6885 ltcp = connp->conn_tcp; 6886 } else { 6887 /* Allow tcp_bound_if listeners? */ 6888 connp = ipcl_lookup_listener_v6(tcp->tcp_lport, 6889 &tcp->tcp_ip6h->ip6_src, 0, 6890 tcp->tcp_connp->conn_zoneid, ipst); 6891 if (connp != NULL) 6892 ltcp = connp->conn_tcp; 6893 } 6894 if (tcp->tcp_conn_req_max && ltcp == NULL) { 6895 tcp->tcp_state = TCPS_LISTEN; 6896 } else if (old_state > TCPS_BOUND) { 6897 tcp->tcp_conn_req_max = 0; 6898 tcp->tcp_state = TCPS_BOUND; 6899 } 6900 if (ltcp != NULL) 6901 CONN_DEC_REF(ltcp->tcp_connp); 6902 if (old_state == TCPS_SYN_SENT || old_state == TCPS_SYN_RCVD) { 6903 BUMP_MIB(&tcps->tcps_mib, tcpAttemptFails); 6904 } else if (old_state == TCPS_ESTABLISHED || 6905 old_state == TCPS_CLOSE_WAIT) { 6906 BUMP_MIB(&tcps->tcps_mib, tcpEstabResets); 6907 } 6908 6909 if (tcp->tcp_fused) 6910 tcp_unfuse(tcp); 6911 6912 mutex_enter(&tcp->tcp_eager_lock); 6913 if ((tcp->tcp_conn_req_cnt_q0 != 0) || 6914 (tcp->tcp_conn_req_cnt_q != 0)) { 6915 tcp_eager_cleanup(tcp, 0); 6916 } 6917 mutex_exit(&tcp->tcp_eager_lock); 6918 6919 tcp_xmit_ctl("tcp_disconnect", tcp, tcp->tcp_snxt, 6920 tcp->tcp_rnxt, TH_RST | TH_ACK); 6921 6922 tcp_reinit(tcp); 6923 6924 if (old_state >= TCPS_ESTABLISHED) { 6925 /* Send M_FLUSH according to TPI */ 6926 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 6927 } 6928 mp = mi_tpi_ok_ack_alloc(mp); 6929 if (mp) 6930 putnext(tcp->tcp_rq, mp); 6931 return; 6932 } else if (!tcp_eager_blowoff(tcp, seqnum)) { 6933 tcp_err_ack(tcp, mp, TBADSEQ, 0); 6934 return; 6935 } 6936 if (tcp->tcp_state >= TCPS_ESTABLISHED) { 6937 /* Send M_FLUSH according to TPI */ 6938 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 6939 } 6940 mp = mi_tpi_ok_ack_alloc(mp); 6941 if (mp) 6942 putnext(tcp->tcp_rq, mp); 6943 } 6944 6945 /* 6946 * Diagnostic routine used to return a string associated with the tcp state. 6947 * Note that if the caller does not supply a buffer, it will use an internal 6948 * static string. This means that if multiple threads call this function at 6949 * the same time, output can be corrupted... Note also that this function 6950 * does not check the size of the supplied buffer. The caller has to make 6951 * sure that it is big enough. 6952 */ 6953 static char * 6954 tcp_display(tcp_t *tcp, char *sup_buf, char format) 6955 { 6956 char buf1[30]; 6957 static char priv_buf[INET6_ADDRSTRLEN * 2 + 80]; 6958 char *buf; 6959 char *cp; 6960 in6_addr_t local, remote; 6961 char local_addrbuf[INET6_ADDRSTRLEN]; 6962 char remote_addrbuf[INET6_ADDRSTRLEN]; 6963 6964 if (sup_buf != NULL) 6965 buf = sup_buf; 6966 else 6967 buf = priv_buf; 6968 6969 if (tcp == NULL) 6970 return ("NULL_TCP"); 6971 switch (tcp->tcp_state) { 6972 case TCPS_CLOSED: 6973 cp = "TCP_CLOSED"; 6974 break; 6975 case TCPS_IDLE: 6976 cp = "TCP_IDLE"; 6977 break; 6978 case TCPS_BOUND: 6979 cp = "TCP_BOUND"; 6980 break; 6981 case TCPS_LISTEN: 6982 cp = "TCP_LISTEN"; 6983 break; 6984 case TCPS_SYN_SENT: 6985 cp = "TCP_SYN_SENT"; 6986 break; 6987 case TCPS_SYN_RCVD: 6988 cp = "TCP_SYN_RCVD"; 6989 break; 6990 case TCPS_ESTABLISHED: 6991 cp = "TCP_ESTABLISHED"; 6992 break; 6993 case TCPS_CLOSE_WAIT: 6994 cp = "TCP_CLOSE_WAIT"; 6995 break; 6996 case TCPS_FIN_WAIT_1: 6997 cp = "TCP_FIN_WAIT_1"; 6998 break; 6999 case TCPS_CLOSING: 7000 cp = "TCP_CLOSING"; 7001 break; 7002 case TCPS_LAST_ACK: 7003 cp = "TCP_LAST_ACK"; 7004 break; 7005 case TCPS_FIN_WAIT_2: 7006 cp = "TCP_FIN_WAIT_2"; 7007 break; 7008 case TCPS_TIME_WAIT: 7009 cp = "TCP_TIME_WAIT"; 7010 break; 7011 default: 7012 (void) mi_sprintf(buf1, "TCPUnkState(%d)", tcp->tcp_state); 7013 cp = buf1; 7014 break; 7015 } 7016 switch (format) { 7017 case DISP_ADDR_AND_PORT: 7018 if (tcp->tcp_ipversion == IPV4_VERSION) { 7019 /* 7020 * Note that we use the remote address in the tcp_b 7021 * structure. This means that it will print out 7022 * the real destination address, not the next hop's 7023 * address if source routing is used. 7024 */ 7025 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ip_src, &local); 7026 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_remote, &remote); 7027 7028 } else { 7029 local = tcp->tcp_ip_src_v6; 7030 remote = tcp->tcp_remote_v6; 7031 } 7032 (void) inet_ntop(AF_INET6, &local, local_addrbuf, 7033 sizeof (local_addrbuf)); 7034 (void) inet_ntop(AF_INET6, &remote, remote_addrbuf, 7035 sizeof (remote_addrbuf)); 7036 (void) mi_sprintf(buf, "[%s.%u, %s.%u] %s", 7037 local_addrbuf, ntohs(tcp->tcp_lport), remote_addrbuf, 7038 ntohs(tcp->tcp_fport), cp); 7039 break; 7040 case DISP_PORT_ONLY: 7041 default: 7042 (void) mi_sprintf(buf, "[%u, %u] %s", 7043 ntohs(tcp->tcp_lport), ntohs(tcp->tcp_fport), cp); 7044 break; 7045 } 7046 7047 return (buf); 7048 } 7049 7050 /* 7051 * Called via squeue to get on to eager's perimeter. It sends a 7052 * TH_RST if eager is in the fanout table. The listener wants the 7053 * eager to disappear either by means of tcp_eager_blowoff() or 7054 * tcp_eager_cleanup() being called. tcp_eager_kill() can also be 7055 * called (via squeue) if the eager cannot be inserted in the 7056 * fanout table in tcp_conn_request(). 7057 */ 7058 /* ARGSUSED */ 7059 void 7060 tcp_eager_kill(void *arg, mblk_t *mp, void *arg2) 7061 { 7062 conn_t *econnp = (conn_t *)arg; 7063 tcp_t *eager = econnp->conn_tcp; 7064 tcp_t *listener = eager->tcp_listener; 7065 tcp_stack_t *tcps = eager->tcp_tcps; 7066 7067 /* 7068 * We could be called because listener is closing. Since 7069 * the eager is using listener's queue's, its not safe. 7070 * Better use the default queue just to send the TH_RST 7071 * out. 7072 */ 7073 ASSERT(tcps->tcps_g_q != NULL); 7074 eager->tcp_rq = tcps->tcps_g_q; 7075 eager->tcp_wq = WR(tcps->tcps_g_q); 7076 7077 /* 7078 * An eager's conn_fanout will be NULL if it's a duplicate 7079 * for an existing 4-tuples in the conn fanout table. 7080 * We don't want to send an RST out in such case. 7081 */ 7082 if (econnp->conn_fanout != NULL && eager->tcp_state > TCPS_LISTEN) { 7083 tcp_xmit_ctl("tcp_eager_kill, can't wait", 7084 eager, eager->tcp_snxt, 0, TH_RST); 7085 } 7086 7087 /* We are here because listener wants this eager gone */ 7088 if (listener != NULL) { 7089 mutex_enter(&listener->tcp_eager_lock); 7090 tcp_eager_unlink(eager); 7091 if (eager->tcp_tconnind_started) { 7092 /* 7093 * The eager has sent a conn_ind up to the 7094 * listener but listener decides to close 7095 * instead. We need to drop the extra ref 7096 * placed on eager in tcp_rput_data() before 7097 * sending the conn_ind to listener. 7098 */ 7099 CONN_DEC_REF(econnp); 7100 } 7101 mutex_exit(&listener->tcp_eager_lock); 7102 CONN_DEC_REF(listener->tcp_connp); 7103 } 7104 7105 if (eager->tcp_state > TCPS_BOUND) 7106 tcp_close_detached(eager); 7107 } 7108 7109 /* 7110 * Reset any eager connection hanging off this listener marked 7111 * with 'seqnum' and then reclaim it's resources. 7112 */ 7113 static boolean_t 7114 tcp_eager_blowoff(tcp_t *listener, t_scalar_t seqnum) 7115 { 7116 tcp_t *eager; 7117 mblk_t *mp; 7118 tcp_stack_t *tcps = listener->tcp_tcps; 7119 7120 TCP_STAT(tcps, tcp_eager_blowoff_calls); 7121 eager = listener; 7122 mutex_enter(&listener->tcp_eager_lock); 7123 do { 7124 eager = eager->tcp_eager_next_q; 7125 if (eager == NULL) { 7126 mutex_exit(&listener->tcp_eager_lock); 7127 return (B_FALSE); 7128 } 7129 } while (eager->tcp_conn_req_seqnum != seqnum); 7130 7131 if (eager->tcp_closemp_used) { 7132 mutex_exit(&listener->tcp_eager_lock); 7133 return (B_TRUE); 7134 } 7135 eager->tcp_closemp_used = B_TRUE; 7136 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 7137 CONN_INC_REF(eager->tcp_connp); 7138 mutex_exit(&listener->tcp_eager_lock); 7139 mp = &eager->tcp_closemp; 7140 squeue_fill(eager->tcp_connp->conn_sqp, mp, tcp_eager_kill, 7141 eager->tcp_connp, SQTAG_TCP_EAGER_BLOWOFF); 7142 return (B_TRUE); 7143 } 7144 7145 /* 7146 * Reset any eager connection hanging off this listener 7147 * and then reclaim it's resources. 7148 */ 7149 static void 7150 tcp_eager_cleanup(tcp_t *listener, boolean_t q0_only) 7151 { 7152 tcp_t *eager; 7153 mblk_t *mp; 7154 tcp_stack_t *tcps = listener->tcp_tcps; 7155 7156 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 7157 7158 if (!q0_only) { 7159 /* First cleanup q */ 7160 TCP_STAT(tcps, tcp_eager_blowoff_q); 7161 eager = listener->tcp_eager_next_q; 7162 while (eager != NULL) { 7163 if (!eager->tcp_closemp_used) { 7164 eager->tcp_closemp_used = B_TRUE; 7165 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 7166 CONN_INC_REF(eager->tcp_connp); 7167 mp = &eager->tcp_closemp; 7168 squeue_fill(eager->tcp_connp->conn_sqp, mp, 7169 tcp_eager_kill, eager->tcp_connp, 7170 SQTAG_TCP_EAGER_CLEANUP); 7171 } 7172 eager = eager->tcp_eager_next_q; 7173 } 7174 } 7175 /* Then cleanup q0 */ 7176 TCP_STAT(tcps, tcp_eager_blowoff_q0); 7177 eager = listener->tcp_eager_next_q0; 7178 while (eager != listener) { 7179 if (!eager->tcp_closemp_used) { 7180 eager->tcp_closemp_used = B_TRUE; 7181 TCP_DEBUG_GETPCSTACK(eager->tcmp_stk, 15); 7182 CONN_INC_REF(eager->tcp_connp); 7183 mp = &eager->tcp_closemp; 7184 squeue_fill(eager->tcp_connp->conn_sqp, mp, 7185 tcp_eager_kill, eager->tcp_connp, 7186 SQTAG_TCP_EAGER_CLEANUP_Q0); 7187 } 7188 eager = eager->tcp_eager_next_q0; 7189 } 7190 } 7191 7192 /* 7193 * If we are an eager connection hanging off a listener that hasn't 7194 * formally accepted the connection yet, get off his list and blow off 7195 * any data that we have accumulated. 7196 */ 7197 static void 7198 tcp_eager_unlink(tcp_t *tcp) 7199 { 7200 tcp_t *listener = tcp->tcp_listener; 7201 7202 ASSERT(MUTEX_HELD(&listener->tcp_eager_lock)); 7203 ASSERT(listener != NULL); 7204 if (tcp->tcp_eager_next_q0 != NULL) { 7205 ASSERT(tcp->tcp_eager_prev_q0 != NULL); 7206 7207 /* Remove the eager tcp from q0 */ 7208 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 7209 tcp->tcp_eager_prev_q0; 7210 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 7211 tcp->tcp_eager_next_q0; 7212 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 7213 listener->tcp_conn_req_cnt_q0--; 7214 7215 tcp->tcp_eager_next_q0 = NULL; 7216 tcp->tcp_eager_prev_q0 = NULL; 7217 7218 /* 7219 * Take the eager out, if it is in the list of droppable 7220 * eagers. 7221 */ 7222 MAKE_UNDROPPABLE(tcp); 7223 7224 if (tcp->tcp_syn_rcvd_timeout != 0) { 7225 /* we have timed out before */ 7226 ASSERT(listener->tcp_syn_rcvd_timeout > 0); 7227 listener->tcp_syn_rcvd_timeout--; 7228 } 7229 } else { 7230 tcp_t **tcpp = &listener->tcp_eager_next_q; 7231 tcp_t *prev = NULL; 7232 7233 for (; tcpp[0]; tcpp = &tcpp[0]->tcp_eager_next_q) { 7234 if (tcpp[0] == tcp) { 7235 if (listener->tcp_eager_last_q == tcp) { 7236 /* 7237 * If we are unlinking the last 7238 * element on the list, adjust 7239 * tail pointer. Set tail pointer 7240 * to nil when list is empty. 7241 */ 7242 ASSERT(tcp->tcp_eager_next_q == NULL); 7243 if (listener->tcp_eager_last_q == 7244 listener->tcp_eager_next_q) { 7245 listener->tcp_eager_last_q = 7246 NULL; 7247 } else { 7248 /* 7249 * We won't get here if there 7250 * is only one eager in the 7251 * list. 7252 */ 7253 ASSERT(prev != NULL); 7254 listener->tcp_eager_last_q = 7255 prev; 7256 } 7257 } 7258 tcpp[0] = tcp->tcp_eager_next_q; 7259 tcp->tcp_eager_next_q = NULL; 7260 tcp->tcp_eager_last_q = NULL; 7261 ASSERT(listener->tcp_conn_req_cnt_q > 0); 7262 listener->tcp_conn_req_cnt_q--; 7263 break; 7264 } 7265 prev = tcpp[0]; 7266 } 7267 } 7268 tcp->tcp_listener = NULL; 7269 } 7270 7271 /* Shorthand to generate and send TPI error acks to our client */ 7272 static void 7273 tcp_err_ack(tcp_t *tcp, mblk_t *mp, int t_error, int sys_error) 7274 { 7275 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL) 7276 putnext(tcp->tcp_rq, mp); 7277 } 7278 7279 /* Shorthand to generate and send TPI error acks to our client */ 7280 static void 7281 tcp_err_ack_prim(tcp_t *tcp, mblk_t *mp, int primitive, 7282 int t_error, int sys_error) 7283 { 7284 struct T_error_ack *teackp; 7285 7286 if ((mp = tpi_ack_alloc(mp, sizeof (struct T_error_ack), 7287 M_PCPROTO, T_ERROR_ACK)) != NULL) { 7288 teackp = (struct T_error_ack *)mp->b_rptr; 7289 teackp->ERROR_prim = primitive; 7290 teackp->TLI_error = t_error; 7291 teackp->UNIX_error = sys_error; 7292 putnext(tcp->tcp_rq, mp); 7293 } 7294 } 7295 7296 /* 7297 * Note: No locks are held when inspecting tcp_g_*epriv_ports 7298 * but instead the code relies on: 7299 * - the fact that the address of the array and its size never changes 7300 * - the atomic assignment of the elements of the array 7301 */ 7302 /* ARGSUSED */ 7303 static int 7304 tcp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 7305 { 7306 int i; 7307 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 7308 7309 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7310 if (tcps->tcps_g_epriv_ports[i] != 0) 7311 (void) mi_mpprintf(mp, "%d ", 7312 tcps->tcps_g_epriv_ports[i]); 7313 } 7314 return (0); 7315 } 7316 7317 /* 7318 * Hold a lock while changing tcp_g_epriv_ports to prevent multiple 7319 * threads from changing it at the same time. 7320 */ 7321 /* ARGSUSED */ 7322 static int 7323 tcp_extra_priv_ports_add(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 7324 cred_t *cr) 7325 { 7326 long new_value; 7327 int i; 7328 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 7329 7330 /* 7331 * Fail the request if the new value does not lie within the 7332 * port number limits. 7333 */ 7334 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 7335 new_value <= 0 || new_value >= 65536) { 7336 return (EINVAL); 7337 } 7338 7339 mutex_enter(&tcps->tcps_epriv_port_lock); 7340 /* Check if the value is already in the list */ 7341 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7342 if (new_value == tcps->tcps_g_epriv_ports[i]) { 7343 mutex_exit(&tcps->tcps_epriv_port_lock); 7344 return (EEXIST); 7345 } 7346 } 7347 /* Find an empty slot */ 7348 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7349 if (tcps->tcps_g_epriv_ports[i] == 0) 7350 break; 7351 } 7352 if (i == tcps->tcps_g_num_epriv_ports) { 7353 mutex_exit(&tcps->tcps_epriv_port_lock); 7354 return (EOVERFLOW); 7355 } 7356 /* Set the new value */ 7357 tcps->tcps_g_epriv_ports[i] = (uint16_t)new_value; 7358 mutex_exit(&tcps->tcps_epriv_port_lock); 7359 return (0); 7360 } 7361 7362 /* 7363 * Hold a lock while changing tcp_g_epriv_ports to prevent multiple 7364 * threads from changing it at the same time. 7365 */ 7366 /* ARGSUSED */ 7367 static int 7368 tcp_extra_priv_ports_del(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 7369 cred_t *cr) 7370 { 7371 long new_value; 7372 int i; 7373 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 7374 7375 /* 7376 * Fail the request if the new value does not lie within the 7377 * port number limits. 7378 */ 7379 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || new_value <= 0 || 7380 new_value >= 65536) { 7381 return (EINVAL); 7382 } 7383 7384 mutex_enter(&tcps->tcps_epriv_port_lock); 7385 /* Check that the value is already in the list */ 7386 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 7387 if (tcps->tcps_g_epriv_ports[i] == new_value) 7388 break; 7389 } 7390 if (i == tcps->tcps_g_num_epriv_ports) { 7391 mutex_exit(&tcps->tcps_epriv_port_lock); 7392 return (ESRCH); 7393 } 7394 /* Clear the value */ 7395 tcps->tcps_g_epriv_ports[i] = 0; 7396 mutex_exit(&tcps->tcps_epriv_port_lock); 7397 return (0); 7398 } 7399 7400 /* Return the TPI/TLI equivalent of our current tcp_state */ 7401 static int 7402 tcp_tpistate(tcp_t *tcp) 7403 { 7404 switch (tcp->tcp_state) { 7405 case TCPS_IDLE: 7406 return (TS_UNBND); 7407 case TCPS_LISTEN: 7408 /* 7409 * Return whether there are outstanding T_CONN_IND waiting 7410 * for the matching T_CONN_RES. Therefore don't count q0. 7411 */ 7412 if (tcp->tcp_conn_req_cnt_q > 0) 7413 return (TS_WRES_CIND); 7414 else 7415 return (TS_IDLE); 7416 case TCPS_BOUND: 7417 return (TS_IDLE); 7418 case TCPS_SYN_SENT: 7419 return (TS_WCON_CREQ); 7420 case TCPS_SYN_RCVD: 7421 /* 7422 * Note: assumption: this has to the active open SYN_RCVD. 7423 * The passive instance is detached in SYN_RCVD stage of 7424 * incoming connection processing so we cannot get request 7425 * for T_info_ack on it. 7426 */ 7427 return (TS_WACK_CRES); 7428 case TCPS_ESTABLISHED: 7429 return (TS_DATA_XFER); 7430 case TCPS_CLOSE_WAIT: 7431 return (TS_WREQ_ORDREL); 7432 case TCPS_FIN_WAIT_1: 7433 return (TS_WIND_ORDREL); 7434 case TCPS_FIN_WAIT_2: 7435 return (TS_WIND_ORDREL); 7436 7437 case TCPS_CLOSING: 7438 case TCPS_LAST_ACK: 7439 case TCPS_TIME_WAIT: 7440 case TCPS_CLOSED: 7441 /* 7442 * Following TS_WACK_DREQ7 is a rendition of "not 7443 * yet TS_IDLE" TPI state. There is no best match to any 7444 * TPI state for TCPS_{CLOSING, LAST_ACK, TIME_WAIT} but we 7445 * choose a value chosen that will map to TLI/XTI level 7446 * state of TSTATECHNG (state is process of changing) which 7447 * captures what this dummy state represents. 7448 */ 7449 return (TS_WACK_DREQ7); 7450 default: 7451 cmn_err(CE_WARN, "tcp_tpistate: strange state (%d) %s", 7452 tcp->tcp_state, tcp_display(tcp, NULL, 7453 DISP_PORT_ONLY)); 7454 return (TS_UNBND); 7455 } 7456 } 7457 7458 static void 7459 tcp_copy_info(struct T_info_ack *tia, tcp_t *tcp) 7460 { 7461 tcp_stack_t *tcps = tcp->tcp_tcps; 7462 7463 if (tcp->tcp_family == AF_INET6) 7464 *tia = tcp_g_t_info_ack_v6; 7465 else 7466 *tia = tcp_g_t_info_ack; 7467 tia->CURRENT_state = tcp_tpistate(tcp); 7468 tia->OPT_size = tcp_max_optsize; 7469 if (tcp->tcp_mss == 0) { 7470 /* Not yet set - tcp_open does not set mss */ 7471 if (tcp->tcp_ipversion == IPV4_VERSION) 7472 tia->TIDU_size = tcps->tcps_mss_def_ipv4; 7473 else 7474 tia->TIDU_size = tcps->tcps_mss_def_ipv6; 7475 } else { 7476 tia->TIDU_size = tcp->tcp_mss; 7477 } 7478 /* TODO: Default ETSDU is 1. Is that correct for tcp? */ 7479 } 7480 7481 /* 7482 * This routine responds to T_CAPABILITY_REQ messages. It is called by 7483 * tcp_wput. Much of the T_CAPABILITY_ACK information is copied from 7484 * tcp_g_t_info_ack. The current state of the stream is copied from 7485 * tcp_state. 7486 */ 7487 static void 7488 tcp_capability_req(tcp_t *tcp, mblk_t *mp) 7489 { 7490 t_uscalar_t cap_bits1; 7491 struct T_capability_ack *tcap; 7492 7493 if (MBLKL(mp) < sizeof (struct T_capability_req)) { 7494 freemsg(mp); 7495 return; 7496 } 7497 7498 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 7499 7500 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 7501 mp->b_datap->db_type, T_CAPABILITY_ACK); 7502 if (mp == NULL) 7503 return; 7504 7505 tcap = (struct T_capability_ack *)mp->b_rptr; 7506 tcap->CAP_bits1 = 0; 7507 7508 if (cap_bits1 & TC1_INFO) { 7509 tcp_copy_info(&tcap->INFO_ack, tcp); 7510 tcap->CAP_bits1 |= TC1_INFO; 7511 } 7512 7513 if (cap_bits1 & TC1_ACCEPTOR_ID) { 7514 tcap->ACCEPTOR_id = tcp->tcp_acceptor_id; 7515 tcap->CAP_bits1 |= TC1_ACCEPTOR_ID; 7516 } 7517 7518 putnext(tcp->tcp_rq, mp); 7519 } 7520 7521 /* 7522 * This routine responds to T_INFO_REQ messages. It is called by tcp_wput. 7523 * Most of the T_INFO_ACK information is copied from tcp_g_t_info_ack. 7524 * The current state of the stream is copied from tcp_state. 7525 */ 7526 static void 7527 tcp_info_req(tcp_t *tcp, mblk_t *mp) 7528 { 7529 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO, 7530 T_INFO_ACK); 7531 if (!mp) { 7532 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 7533 return; 7534 } 7535 tcp_copy_info((struct T_info_ack *)mp->b_rptr, tcp); 7536 putnext(tcp->tcp_rq, mp); 7537 } 7538 7539 /* Respond to the TPI addr request */ 7540 static void 7541 tcp_addr_req(tcp_t *tcp, mblk_t *mp) 7542 { 7543 sin_t *sin; 7544 mblk_t *ackmp; 7545 struct T_addr_ack *taa; 7546 7547 /* Make it large enough for worst case */ 7548 ackmp = reallocb(mp, sizeof (struct T_addr_ack) + 7549 2 * sizeof (sin6_t), 1); 7550 if (ackmp == NULL) { 7551 tcp_err_ack(tcp, mp, TSYSERR, ENOMEM); 7552 return; 7553 } 7554 7555 if (tcp->tcp_ipversion == IPV6_VERSION) { 7556 tcp_addr_req_ipv6(tcp, ackmp); 7557 return; 7558 } 7559 taa = (struct T_addr_ack *)ackmp->b_rptr; 7560 7561 bzero(taa, sizeof (struct T_addr_ack)); 7562 ackmp->b_wptr = (uchar_t *)&taa[1]; 7563 7564 taa->PRIM_type = T_ADDR_ACK; 7565 ackmp->b_datap->db_type = M_PCPROTO; 7566 7567 /* 7568 * Note: Following code assumes 32 bit alignment of basic 7569 * data structures like sin_t and struct T_addr_ack. 7570 */ 7571 if (tcp->tcp_state >= TCPS_BOUND) { 7572 /* 7573 * Fill in local address 7574 */ 7575 taa->LOCADDR_length = sizeof (sin_t); 7576 taa->LOCADDR_offset = sizeof (*taa); 7577 7578 sin = (sin_t *)&taa[1]; 7579 7580 /* Fill zeroes and then intialize non-zero fields */ 7581 *sin = sin_null; 7582 7583 sin->sin_family = AF_INET; 7584 7585 sin->sin_addr.s_addr = tcp->tcp_ipha->ipha_src; 7586 sin->sin_port = *(uint16_t *)tcp->tcp_tcph->th_lport; 7587 7588 ackmp->b_wptr = (uchar_t *)&sin[1]; 7589 7590 if (tcp->tcp_state >= TCPS_SYN_RCVD) { 7591 /* 7592 * Fill in Remote address 7593 */ 7594 taa->REMADDR_length = sizeof (sin_t); 7595 taa->REMADDR_offset = ROUNDUP32(taa->LOCADDR_offset + 7596 taa->LOCADDR_length); 7597 7598 sin = (sin_t *)(ackmp->b_rptr + taa->REMADDR_offset); 7599 *sin = sin_null; 7600 sin->sin_family = AF_INET; 7601 sin->sin_addr.s_addr = tcp->tcp_remote; 7602 sin->sin_port = tcp->tcp_fport; 7603 7604 ackmp->b_wptr = (uchar_t *)&sin[1]; 7605 } 7606 } 7607 putnext(tcp->tcp_rq, ackmp); 7608 } 7609 7610 /* Assumes that tcp_addr_req gets enough space and alignment */ 7611 static void 7612 tcp_addr_req_ipv6(tcp_t *tcp, mblk_t *ackmp) 7613 { 7614 sin6_t *sin6; 7615 struct T_addr_ack *taa; 7616 7617 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 7618 ASSERT(OK_32PTR(ackmp->b_rptr)); 7619 ASSERT(ackmp->b_wptr - ackmp->b_rptr >= sizeof (struct T_addr_ack) + 7620 2 * sizeof (sin6_t)); 7621 7622 taa = (struct T_addr_ack *)ackmp->b_rptr; 7623 7624 bzero(taa, sizeof (struct T_addr_ack)); 7625 ackmp->b_wptr = (uchar_t *)&taa[1]; 7626 7627 taa->PRIM_type = T_ADDR_ACK; 7628 ackmp->b_datap->db_type = M_PCPROTO; 7629 7630 /* 7631 * Note: Following code assumes 32 bit alignment of basic 7632 * data structures like sin6_t and struct T_addr_ack. 7633 */ 7634 if (tcp->tcp_state >= TCPS_BOUND) { 7635 /* 7636 * Fill in local address 7637 */ 7638 taa->LOCADDR_length = sizeof (sin6_t); 7639 taa->LOCADDR_offset = sizeof (*taa); 7640 7641 sin6 = (sin6_t *)&taa[1]; 7642 *sin6 = sin6_null; 7643 7644 sin6->sin6_family = AF_INET6; 7645 sin6->sin6_addr = tcp->tcp_ip6h->ip6_src; 7646 sin6->sin6_port = tcp->tcp_lport; 7647 7648 ackmp->b_wptr = (uchar_t *)&sin6[1]; 7649 7650 if (tcp->tcp_state >= TCPS_SYN_RCVD) { 7651 /* 7652 * Fill in Remote address 7653 */ 7654 taa->REMADDR_length = sizeof (sin6_t); 7655 taa->REMADDR_offset = ROUNDUP32(taa->LOCADDR_offset + 7656 taa->LOCADDR_length); 7657 7658 sin6 = (sin6_t *)(ackmp->b_rptr + taa->REMADDR_offset); 7659 *sin6 = sin6_null; 7660 sin6->sin6_family = AF_INET6; 7661 sin6->sin6_flowinfo = 7662 tcp->tcp_ip6h->ip6_vcf & 7663 ~IPV6_VERS_AND_FLOW_MASK; 7664 sin6->sin6_addr = tcp->tcp_remote_v6; 7665 sin6->sin6_port = tcp->tcp_fport; 7666 7667 ackmp->b_wptr = (uchar_t *)&sin6[1]; 7668 } 7669 } 7670 putnext(tcp->tcp_rq, ackmp); 7671 } 7672 7673 /* 7674 * Handle reinitialization of a tcp structure. 7675 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE. 7676 */ 7677 static void 7678 tcp_reinit(tcp_t *tcp) 7679 { 7680 mblk_t *mp; 7681 int err; 7682 tcp_stack_t *tcps = tcp->tcp_tcps; 7683 7684 TCP_STAT(tcps, tcp_reinit_calls); 7685 7686 /* tcp_reinit should never be called for detached tcp_t's */ 7687 ASSERT(tcp->tcp_listener == NULL); 7688 ASSERT((tcp->tcp_family == AF_INET && 7689 tcp->tcp_ipversion == IPV4_VERSION) || 7690 (tcp->tcp_family == AF_INET6 && 7691 (tcp->tcp_ipversion == IPV4_VERSION || 7692 tcp->tcp_ipversion == IPV6_VERSION))); 7693 7694 /* Cancel outstanding timers */ 7695 tcp_timers_stop(tcp); 7696 7697 /* 7698 * Reset everything in the state vector, after updating global 7699 * MIB data from instance counters. 7700 */ 7701 UPDATE_MIB(&tcps->tcps_mib, tcpHCInSegs, tcp->tcp_ibsegs); 7702 tcp->tcp_ibsegs = 0; 7703 UPDATE_MIB(&tcps->tcps_mib, tcpHCOutSegs, tcp->tcp_obsegs); 7704 tcp->tcp_obsegs = 0; 7705 7706 tcp_close_mpp(&tcp->tcp_xmit_head); 7707 if (tcp->tcp_snd_zcopy_aware) 7708 tcp_zcopy_notify(tcp); 7709 tcp->tcp_xmit_last = tcp->tcp_xmit_tail = NULL; 7710 tcp->tcp_unsent = tcp->tcp_xmit_tail_unsent = 0; 7711 mutex_enter(&tcp->tcp_non_sq_lock); 7712 if (tcp->tcp_flow_stopped && 7713 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 7714 tcp_clrqfull(tcp); 7715 } 7716 mutex_exit(&tcp->tcp_non_sq_lock); 7717 tcp_close_mpp(&tcp->tcp_reass_head); 7718 tcp->tcp_reass_tail = NULL; 7719 if (tcp->tcp_rcv_list != NULL) { 7720 /* Free b_next chain */ 7721 tcp_close_mpp(&tcp->tcp_rcv_list); 7722 tcp->tcp_rcv_last_head = NULL; 7723 tcp->tcp_rcv_last_tail = NULL; 7724 tcp->tcp_rcv_cnt = 0; 7725 } 7726 tcp->tcp_rcv_last_tail = NULL; 7727 7728 if ((mp = tcp->tcp_urp_mp) != NULL) { 7729 freemsg(mp); 7730 tcp->tcp_urp_mp = NULL; 7731 } 7732 if ((mp = tcp->tcp_urp_mark_mp) != NULL) { 7733 freemsg(mp); 7734 tcp->tcp_urp_mark_mp = NULL; 7735 } 7736 if (tcp->tcp_fused_sigurg_mp != NULL) { 7737 freeb(tcp->tcp_fused_sigurg_mp); 7738 tcp->tcp_fused_sigurg_mp = NULL; 7739 } 7740 7741 /* 7742 * Following is a union with two members which are 7743 * identical types and size so the following cleanup 7744 * is enough. 7745 */ 7746 tcp_close_mpp(&tcp->tcp_conn.tcp_eager_conn_ind); 7747 7748 CL_INET_DISCONNECT(tcp); 7749 7750 /* 7751 * The connection can't be on the tcp_time_wait_head list 7752 * since it is not detached. 7753 */ 7754 ASSERT(tcp->tcp_time_wait_next == NULL); 7755 ASSERT(tcp->tcp_time_wait_prev == NULL); 7756 ASSERT(tcp->tcp_time_wait_expire == 0); 7757 7758 if (tcp->tcp_kssl_pending) { 7759 tcp->tcp_kssl_pending = B_FALSE; 7760 7761 /* Don't reset if the initialized by bind. */ 7762 if (tcp->tcp_kssl_ent != NULL) { 7763 kssl_release_ent(tcp->tcp_kssl_ent, NULL, 7764 KSSL_NO_PROXY); 7765 } 7766 } 7767 if (tcp->tcp_kssl_ctx != NULL) { 7768 kssl_release_ctx(tcp->tcp_kssl_ctx); 7769 tcp->tcp_kssl_ctx = NULL; 7770 } 7771 7772 /* 7773 * Reset/preserve other values 7774 */ 7775 tcp_reinit_values(tcp); 7776 ipcl_hash_remove(tcp->tcp_connp); 7777 conn_delete_ire(tcp->tcp_connp, NULL); 7778 tcp_ipsec_cleanup(tcp); 7779 7780 if (tcp->tcp_conn_req_max != 0) { 7781 /* 7782 * This is the case when a TLI program uses the same 7783 * transport end point to accept a connection. This 7784 * makes the TCP both a listener and acceptor. When 7785 * this connection is closed, we need to set the state 7786 * back to TCPS_LISTEN. Make sure that the eager list 7787 * is reinitialized. 7788 * 7789 * Note that this stream is still bound to the four 7790 * tuples of the previous connection in IP. If a new 7791 * SYN with different foreign address comes in, IP will 7792 * not find it and will send it to the global queue. In 7793 * the global queue, TCP will do a tcp_lookup_listener() 7794 * to find this stream. This works because this stream 7795 * is only removed from connected hash. 7796 * 7797 */ 7798 tcp->tcp_state = TCPS_LISTEN; 7799 tcp->tcp_eager_next_q0 = tcp->tcp_eager_prev_q0 = tcp; 7800 tcp->tcp_eager_next_drop_q0 = tcp; 7801 tcp->tcp_eager_prev_drop_q0 = tcp; 7802 tcp->tcp_connp->conn_recv = tcp_conn_request; 7803 if (tcp->tcp_family == AF_INET6) { 7804 ASSERT(tcp->tcp_connp->conn_af_isv6); 7805 (void) ipcl_bind_insert_v6(tcp->tcp_connp, IPPROTO_TCP, 7806 &tcp->tcp_ip6h->ip6_src, tcp->tcp_lport); 7807 } else { 7808 ASSERT(!tcp->tcp_connp->conn_af_isv6); 7809 (void) ipcl_bind_insert(tcp->tcp_connp, IPPROTO_TCP, 7810 tcp->tcp_ipha->ipha_src, tcp->tcp_lport); 7811 } 7812 } else { 7813 tcp->tcp_state = TCPS_BOUND; 7814 } 7815 7816 /* 7817 * Initialize to default values 7818 * Can't fail since enough header template space already allocated 7819 * at open(). 7820 */ 7821 err = tcp_init_values(tcp); 7822 ASSERT(err == 0); 7823 /* Restore state in tcp_tcph */ 7824 bcopy(&tcp->tcp_lport, tcp->tcp_tcph->th_lport, TCP_PORT_LEN); 7825 if (tcp->tcp_ipversion == IPV4_VERSION) 7826 tcp->tcp_ipha->ipha_src = tcp->tcp_bound_source; 7827 else 7828 tcp->tcp_ip6h->ip6_src = tcp->tcp_bound_source_v6; 7829 /* 7830 * Copy of the src addr. in tcp_t is needed in tcp_t 7831 * since the lookup funcs can only lookup on tcp_t 7832 */ 7833 tcp->tcp_ip_src_v6 = tcp->tcp_bound_source_v6; 7834 7835 ASSERT(tcp->tcp_ptpbhn != NULL); 7836 tcp->tcp_rq->q_hiwat = tcps->tcps_recv_hiwat; 7837 tcp->tcp_rwnd = tcps->tcps_recv_hiwat; 7838 tcp->tcp_mss = tcp->tcp_ipversion != IPV4_VERSION ? 7839 tcps->tcps_mss_def_ipv6 : tcps->tcps_mss_def_ipv4; 7840 } 7841 7842 /* 7843 * Force values to zero that need be zero. 7844 * Do not touch values asociated with the BOUND or LISTEN state 7845 * since the connection will end up in that state after the reinit. 7846 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t 7847 * structure! 7848 */ 7849 static void 7850 tcp_reinit_values(tcp) 7851 tcp_t *tcp; 7852 { 7853 tcp_stack_t *tcps = tcp->tcp_tcps; 7854 7855 #ifndef lint 7856 #define DONTCARE(x) 7857 #define PRESERVE(x) 7858 #else 7859 #define DONTCARE(x) ((x) = (x)) 7860 #define PRESERVE(x) ((x) = (x)) 7861 #endif /* lint */ 7862 7863 PRESERVE(tcp->tcp_bind_hash); 7864 PRESERVE(tcp->tcp_ptpbhn); 7865 PRESERVE(tcp->tcp_acceptor_hash); 7866 PRESERVE(tcp->tcp_ptpahn); 7867 7868 /* Should be ASSERT NULL on these with new code! */ 7869 ASSERT(tcp->tcp_time_wait_next == NULL); 7870 ASSERT(tcp->tcp_time_wait_prev == NULL); 7871 ASSERT(tcp->tcp_time_wait_expire == 0); 7872 PRESERVE(tcp->tcp_state); 7873 PRESERVE(tcp->tcp_rq); 7874 PRESERVE(tcp->tcp_wq); 7875 7876 ASSERT(tcp->tcp_xmit_head == NULL); 7877 ASSERT(tcp->tcp_xmit_last == NULL); 7878 ASSERT(tcp->tcp_unsent == 0); 7879 ASSERT(tcp->tcp_xmit_tail == NULL); 7880 ASSERT(tcp->tcp_xmit_tail_unsent == 0); 7881 7882 tcp->tcp_snxt = 0; /* Displayed in mib */ 7883 tcp->tcp_suna = 0; /* Displayed in mib */ 7884 tcp->tcp_swnd = 0; 7885 DONTCARE(tcp->tcp_cwnd); /* Init in tcp_mss_set */ 7886 7887 ASSERT(tcp->tcp_ibsegs == 0); 7888 ASSERT(tcp->tcp_obsegs == 0); 7889 7890 if (tcp->tcp_iphc != NULL) { 7891 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 7892 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 7893 } 7894 7895 DONTCARE(tcp->tcp_naglim); /* Init in tcp_init_values */ 7896 DONTCARE(tcp->tcp_hdr_len); /* Init in tcp_init_values */ 7897 DONTCARE(tcp->tcp_ipha); 7898 DONTCARE(tcp->tcp_ip6h); 7899 DONTCARE(tcp->tcp_ip_hdr_len); 7900 DONTCARE(tcp->tcp_tcph); 7901 DONTCARE(tcp->tcp_tcp_hdr_len); /* Init in tcp_init_values */ 7902 tcp->tcp_valid_bits = 0; 7903 7904 DONTCARE(tcp->tcp_xmit_hiwater); /* Init in tcp_init_values */ 7905 DONTCARE(tcp->tcp_timer_backoff); /* Init in tcp_init_values */ 7906 DONTCARE(tcp->tcp_last_recv_time); /* Init in tcp_init_values */ 7907 tcp->tcp_last_rcv_lbolt = 0; 7908 7909 tcp->tcp_init_cwnd = 0; 7910 7911 tcp->tcp_urp_last_valid = 0; 7912 tcp->tcp_hard_binding = 0; 7913 tcp->tcp_hard_bound = 0; 7914 PRESERVE(tcp->tcp_cred); 7915 PRESERVE(tcp->tcp_cpid); 7916 PRESERVE(tcp->tcp_open_time); 7917 PRESERVE(tcp->tcp_exclbind); 7918 7919 tcp->tcp_fin_acked = 0; 7920 tcp->tcp_fin_rcvd = 0; 7921 tcp->tcp_fin_sent = 0; 7922 tcp->tcp_ordrel_done = 0; 7923 7924 tcp->tcp_debug = 0; 7925 tcp->tcp_dontroute = 0; 7926 tcp->tcp_broadcast = 0; 7927 7928 tcp->tcp_useloopback = 0; 7929 tcp->tcp_reuseaddr = 0; 7930 tcp->tcp_oobinline = 0; 7931 tcp->tcp_dgram_errind = 0; 7932 7933 tcp->tcp_detached = 0; 7934 tcp->tcp_bind_pending = 0; 7935 tcp->tcp_unbind_pending = 0; 7936 tcp->tcp_deferred_clean_death = 0; 7937 7938 tcp->tcp_snd_ws_ok = B_FALSE; 7939 tcp->tcp_snd_ts_ok = B_FALSE; 7940 tcp->tcp_linger = 0; 7941 tcp->tcp_ka_enabled = 0; 7942 tcp->tcp_zero_win_probe = 0; 7943 7944 tcp->tcp_loopback = 0; 7945 tcp->tcp_localnet = 0; 7946 tcp->tcp_syn_defense = 0; 7947 tcp->tcp_set_timer = 0; 7948 7949 tcp->tcp_active_open = 0; 7950 ASSERT(tcp->tcp_timeout == B_FALSE); 7951 tcp->tcp_rexmit = B_FALSE; 7952 tcp->tcp_xmit_zc_clean = B_FALSE; 7953 7954 tcp->tcp_snd_sack_ok = B_FALSE; 7955 PRESERVE(tcp->tcp_recvdstaddr); 7956 tcp->tcp_hwcksum = B_FALSE; 7957 7958 tcp->tcp_ire_ill_check_done = B_FALSE; 7959 DONTCARE(tcp->tcp_maxpsz); /* Init in tcp_init_values */ 7960 7961 tcp->tcp_mdt = B_FALSE; 7962 tcp->tcp_mdt_hdr_head = 0; 7963 tcp->tcp_mdt_hdr_tail = 0; 7964 7965 tcp->tcp_conn_def_q0 = 0; 7966 tcp->tcp_ip_forward_progress = B_FALSE; 7967 tcp->tcp_anon_priv_bind = 0; 7968 tcp->tcp_ecn_ok = B_FALSE; 7969 7970 tcp->tcp_cwr = B_FALSE; 7971 tcp->tcp_ecn_echo_on = B_FALSE; 7972 7973 if (tcp->tcp_sack_info != NULL) { 7974 if (tcp->tcp_notsack_list != NULL) { 7975 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 7976 } 7977 kmem_cache_free(tcp_sack_info_cache, tcp->tcp_sack_info); 7978 tcp->tcp_sack_info = NULL; 7979 } 7980 7981 tcp->tcp_rcv_ws = 0; 7982 tcp->tcp_snd_ws = 0; 7983 tcp->tcp_ts_recent = 0; 7984 tcp->tcp_rnxt = 0; /* Displayed in mib */ 7985 DONTCARE(tcp->tcp_rwnd); /* Set in tcp_reinit() */ 7986 tcp->tcp_if_mtu = 0; 7987 7988 ASSERT(tcp->tcp_reass_head == NULL); 7989 ASSERT(tcp->tcp_reass_tail == NULL); 7990 7991 tcp->tcp_cwnd_cnt = 0; 7992 7993 ASSERT(tcp->tcp_rcv_list == NULL); 7994 ASSERT(tcp->tcp_rcv_last_head == NULL); 7995 ASSERT(tcp->tcp_rcv_last_tail == NULL); 7996 ASSERT(tcp->tcp_rcv_cnt == 0); 7997 7998 DONTCARE(tcp->tcp_cwnd_ssthresh); /* Init in tcp_adapt_ire */ 7999 DONTCARE(tcp->tcp_cwnd_max); /* Init in tcp_init_values */ 8000 tcp->tcp_csuna = 0; 8001 8002 tcp->tcp_rto = 0; /* Displayed in MIB */ 8003 DONTCARE(tcp->tcp_rtt_sa); /* Init in tcp_init_values */ 8004 DONTCARE(tcp->tcp_rtt_sd); /* Init in tcp_init_values */ 8005 tcp->tcp_rtt_update = 0; 8006 8007 DONTCARE(tcp->tcp_swl1); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */ 8008 DONTCARE(tcp->tcp_swl2); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */ 8009 8010 tcp->tcp_rack = 0; /* Displayed in mib */ 8011 tcp->tcp_rack_cnt = 0; 8012 tcp->tcp_rack_cur_max = 0; 8013 tcp->tcp_rack_abs_max = 0; 8014 8015 tcp->tcp_max_swnd = 0; 8016 8017 ASSERT(tcp->tcp_listener == NULL); 8018 8019 DONTCARE(tcp->tcp_xmit_lowater); /* Init in tcp_init_values */ 8020 8021 DONTCARE(tcp->tcp_irs); /* tcp_valid_bits cleared */ 8022 DONTCARE(tcp->tcp_iss); /* tcp_valid_bits cleared */ 8023 DONTCARE(tcp->tcp_fss); /* tcp_valid_bits cleared */ 8024 DONTCARE(tcp->tcp_urg); /* tcp_valid_bits cleared */ 8025 8026 ASSERT(tcp->tcp_conn_req_cnt_q == 0); 8027 ASSERT(tcp->tcp_conn_req_cnt_q0 == 0); 8028 PRESERVE(tcp->tcp_conn_req_max); 8029 PRESERVE(tcp->tcp_conn_req_seqnum); 8030 8031 DONTCARE(tcp->tcp_ip_hdr_len); /* Init in tcp_init_values */ 8032 DONTCARE(tcp->tcp_first_timer_threshold); /* Init in tcp_init_values */ 8033 DONTCARE(tcp->tcp_second_timer_threshold); /* Init in tcp_init_values */ 8034 DONTCARE(tcp->tcp_first_ctimer_threshold); /* Init in tcp_init_values */ 8035 DONTCARE(tcp->tcp_second_ctimer_threshold); /* in tcp_init_values */ 8036 8037 tcp->tcp_lingertime = 0; 8038 8039 DONTCARE(tcp->tcp_urp_last); /* tcp_urp_last_valid is cleared */ 8040 ASSERT(tcp->tcp_urp_mp == NULL); 8041 ASSERT(tcp->tcp_urp_mark_mp == NULL); 8042 ASSERT(tcp->tcp_fused_sigurg_mp == NULL); 8043 8044 ASSERT(tcp->tcp_eager_next_q == NULL); 8045 ASSERT(tcp->tcp_eager_last_q == NULL); 8046 ASSERT((tcp->tcp_eager_next_q0 == NULL && 8047 tcp->tcp_eager_prev_q0 == NULL) || 8048 tcp->tcp_eager_next_q0 == tcp->tcp_eager_prev_q0); 8049 ASSERT(tcp->tcp_conn.tcp_eager_conn_ind == NULL); 8050 8051 ASSERT((tcp->tcp_eager_next_drop_q0 == NULL && 8052 tcp->tcp_eager_prev_drop_q0 == NULL) || 8053 tcp->tcp_eager_next_drop_q0 == tcp->tcp_eager_prev_drop_q0); 8054 8055 tcp->tcp_client_errno = 0; 8056 8057 DONTCARE(tcp->tcp_sum); /* Init in tcp_init_values */ 8058 8059 tcp->tcp_remote_v6 = ipv6_all_zeros; /* Displayed in MIB */ 8060 8061 PRESERVE(tcp->tcp_bound_source_v6); 8062 tcp->tcp_last_sent_len = 0; 8063 tcp->tcp_dupack_cnt = 0; 8064 8065 tcp->tcp_fport = 0; /* Displayed in MIB */ 8066 PRESERVE(tcp->tcp_lport); 8067 8068 PRESERVE(tcp->tcp_acceptor_lockp); 8069 8070 ASSERT(tcp->tcp_ordrelid == 0); 8071 PRESERVE(tcp->tcp_acceptor_id); 8072 DONTCARE(tcp->tcp_ipsec_overhead); 8073 8074 /* 8075 * If tcp_tracing flag is ON (i.e. We have a trace buffer 8076 * in tcp structure and now tracing), Re-initialize all 8077 * members of tcp_traceinfo. 8078 */ 8079 if (tcp->tcp_tracebuf != NULL) { 8080 bzero(tcp->tcp_tracebuf, sizeof (tcptrch_t)); 8081 } 8082 8083 PRESERVE(tcp->tcp_family); 8084 if (tcp->tcp_family == AF_INET6) { 8085 tcp->tcp_ipversion = IPV6_VERSION; 8086 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 8087 } else { 8088 tcp->tcp_ipversion = IPV4_VERSION; 8089 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 8090 } 8091 8092 tcp->tcp_bound_if = 0; 8093 tcp->tcp_ipv6_recvancillary = 0; 8094 tcp->tcp_recvifindex = 0; 8095 tcp->tcp_recvhops = 0; 8096 tcp->tcp_closed = 0; 8097 tcp->tcp_cleandeathtag = 0; 8098 if (tcp->tcp_hopopts != NULL) { 8099 mi_free(tcp->tcp_hopopts); 8100 tcp->tcp_hopopts = NULL; 8101 tcp->tcp_hopoptslen = 0; 8102 } 8103 ASSERT(tcp->tcp_hopoptslen == 0); 8104 if (tcp->tcp_dstopts != NULL) { 8105 mi_free(tcp->tcp_dstopts); 8106 tcp->tcp_dstopts = NULL; 8107 tcp->tcp_dstoptslen = 0; 8108 } 8109 ASSERT(tcp->tcp_dstoptslen == 0); 8110 if (tcp->tcp_rtdstopts != NULL) { 8111 mi_free(tcp->tcp_rtdstopts); 8112 tcp->tcp_rtdstopts = NULL; 8113 tcp->tcp_rtdstoptslen = 0; 8114 } 8115 ASSERT(tcp->tcp_rtdstoptslen == 0); 8116 if (tcp->tcp_rthdr != NULL) { 8117 mi_free(tcp->tcp_rthdr); 8118 tcp->tcp_rthdr = NULL; 8119 tcp->tcp_rthdrlen = 0; 8120 } 8121 ASSERT(tcp->tcp_rthdrlen == 0); 8122 PRESERVE(tcp->tcp_drop_opt_ack_cnt); 8123 8124 /* Reset fusion-related fields */ 8125 tcp->tcp_fused = B_FALSE; 8126 tcp->tcp_unfusable = B_FALSE; 8127 tcp->tcp_fused_sigurg = B_FALSE; 8128 tcp->tcp_direct_sockfs = B_FALSE; 8129 tcp->tcp_fuse_syncstr_stopped = B_FALSE; 8130 tcp->tcp_fuse_syncstr_plugged = B_FALSE; 8131 tcp->tcp_loopback_peer = NULL; 8132 tcp->tcp_fuse_rcv_hiwater = 0; 8133 tcp->tcp_fuse_rcv_unread_hiwater = 0; 8134 tcp->tcp_fuse_rcv_unread_cnt = 0; 8135 8136 tcp->tcp_lso = B_FALSE; 8137 8138 tcp->tcp_in_ack_unsent = 0; 8139 tcp->tcp_cork = B_FALSE; 8140 tcp->tcp_tconnind_started = B_FALSE; 8141 8142 PRESERVE(tcp->tcp_squeue_bytes); 8143 8144 ASSERT(tcp->tcp_kssl_ctx == NULL); 8145 ASSERT(!tcp->tcp_kssl_pending); 8146 PRESERVE(tcp->tcp_kssl_ent); 8147 8148 tcp->tcp_closemp_used = B_FALSE; 8149 8150 #ifdef DEBUG 8151 DONTCARE(tcp->tcmp_stk[0]); 8152 #endif 8153 8154 8155 #undef DONTCARE 8156 #undef PRESERVE 8157 } 8158 8159 /* 8160 * Allocate necessary resources and initialize state vector. 8161 * Guaranteed not to fail so that when an error is returned, 8162 * the caller doesn't need to do any additional cleanup. 8163 */ 8164 int 8165 tcp_init(tcp_t *tcp, queue_t *q) 8166 { 8167 int err; 8168 8169 tcp->tcp_rq = q; 8170 tcp->tcp_wq = WR(q); 8171 tcp->tcp_state = TCPS_IDLE; 8172 if ((err = tcp_init_values(tcp)) != 0) 8173 tcp_timers_stop(tcp); 8174 return (err); 8175 } 8176 8177 static int 8178 tcp_init_values(tcp_t *tcp) 8179 { 8180 int err; 8181 tcp_stack_t *tcps = tcp->tcp_tcps; 8182 8183 ASSERT((tcp->tcp_family == AF_INET && 8184 tcp->tcp_ipversion == IPV4_VERSION) || 8185 (tcp->tcp_family == AF_INET6 && 8186 (tcp->tcp_ipversion == IPV4_VERSION || 8187 tcp->tcp_ipversion == IPV6_VERSION))); 8188 8189 /* 8190 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO 8191 * will be close to tcp_rexmit_interval_initial. By doing this, we 8192 * allow the algorithm to adjust slowly to large fluctuations of RTT 8193 * during first few transmissions of a connection as seen in slow 8194 * links. 8195 */ 8196 tcp->tcp_rtt_sa = tcps->tcps_rexmit_interval_initial << 2; 8197 tcp->tcp_rtt_sd = tcps->tcps_rexmit_interval_initial >> 1; 8198 tcp->tcp_rto = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 8199 tcps->tcps_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5) + 8200 tcps->tcps_conn_grace_period; 8201 if (tcp->tcp_rto < tcps->tcps_rexmit_interval_min) 8202 tcp->tcp_rto = tcps->tcps_rexmit_interval_min; 8203 tcp->tcp_timer_backoff = 0; 8204 tcp->tcp_ms_we_have_waited = 0; 8205 tcp->tcp_last_recv_time = lbolt; 8206 tcp->tcp_cwnd_max = tcps->tcps_cwnd_max_; 8207 tcp->tcp_cwnd_ssthresh = TCP_MAX_LARGEWIN; 8208 tcp->tcp_snd_burst = TCP_CWND_INFINITE; 8209 8210 tcp->tcp_maxpsz = tcps->tcps_maxpsz_multiplier; 8211 8212 tcp->tcp_first_timer_threshold = tcps->tcps_ip_notify_interval; 8213 tcp->tcp_first_ctimer_threshold = tcps->tcps_ip_notify_cinterval; 8214 tcp->tcp_second_timer_threshold = tcps->tcps_ip_abort_interval; 8215 /* 8216 * Fix it to tcp_ip_abort_linterval later if it turns out to be a 8217 * passive open. 8218 */ 8219 tcp->tcp_second_ctimer_threshold = tcps->tcps_ip_abort_cinterval; 8220 8221 tcp->tcp_naglim = tcps->tcps_naglim_def; 8222 8223 /* NOTE: ISS is now set in tcp_adapt_ire(). */ 8224 8225 tcp->tcp_mdt_hdr_head = 0; 8226 tcp->tcp_mdt_hdr_tail = 0; 8227 8228 /* Reset fusion-related fields */ 8229 tcp->tcp_fused = B_FALSE; 8230 tcp->tcp_unfusable = B_FALSE; 8231 tcp->tcp_fused_sigurg = B_FALSE; 8232 tcp->tcp_direct_sockfs = B_FALSE; 8233 tcp->tcp_fuse_syncstr_stopped = B_FALSE; 8234 tcp->tcp_fuse_syncstr_plugged = B_FALSE; 8235 tcp->tcp_loopback_peer = NULL; 8236 tcp->tcp_fuse_rcv_hiwater = 0; 8237 tcp->tcp_fuse_rcv_unread_hiwater = 0; 8238 tcp->tcp_fuse_rcv_unread_cnt = 0; 8239 8240 /* Initialize the header template */ 8241 if (tcp->tcp_ipversion == IPV4_VERSION) { 8242 err = tcp_header_init_ipv4(tcp); 8243 } else { 8244 err = tcp_header_init_ipv6(tcp); 8245 } 8246 if (err) 8247 return (err); 8248 8249 /* 8250 * Init the window scale to the max so tcp_rwnd_set() won't pare 8251 * down tcp_rwnd. tcp_adapt_ire() will set the right value later. 8252 */ 8253 tcp->tcp_rcv_ws = TCP_MAX_WINSHIFT; 8254 tcp->tcp_xmit_lowater = tcps->tcps_xmit_lowat; 8255 tcp->tcp_xmit_hiwater = tcps->tcps_xmit_hiwat; 8256 8257 tcp->tcp_cork = B_FALSE; 8258 /* 8259 * Init the tcp_debug option. This value determines whether TCP 8260 * calls strlog() to print out debug messages. Doing this 8261 * initialization here means that this value is not inherited thru 8262 * tcp_reinit(). 8263 */ 8264 tcp->tcp_debug = tcps->tcps_dbg; 8265 8266 tcp->tcp_ka_interval = tcps->tcps_keepalive_interval; 8267 tcp->tcp_ka_abort_thres = tcps->tcps_keepalive_abort_interval; 8268 8269 return (0); 8270 } 8271 8272 /* 8273 * Initialize the IPv4 header. Loses any record of any IP options. 8274 */ 8275 static int 8276 tcp_header_init_ipv4(tcp_t *tcp) 8277 { 8278 tcph_t *tcph; 8279 uint32_t sum; 8280 conn_t *connp; 8281 tcp_stack_t *tcps = tcp->tcp_tcps; 8282 8283 /* 8284 * This is a simple initialization. If there's 8285 * already a template, it should never be too small, 8286 * so reuse it. Otherwise, allocate space for the new one. 8287 */ 8288 if (tcp->tcp_iphc == NULL) { 8289 ASSERT(tcp->tcp_iphc_len == 0); 8290 tcp->tcp_iphc_len = TCP_MAX_COMBINED_HEADER_LENGTH; 8291 tcp->tcp_iphc = kmem_cache_alloc(tcp_iphc_cache, KM_NOSLEEP); 8292 if (tcp->tcp_iphc == NULL) { 8293 tcp->tcp_iphc_len = 0; 8294 return (ENOMEM); 8295 } 8296 } 8297 8298 /* options are gone; may need a new label */ 8299 connp = tcp->tcp_connp; 8300 connp->conn_mlp_type = mlptSingle; 8301 connp->conn_ulp_labeled = !is_system_labeled(); 8302 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 8303 tcp->tcp_ipha = (ipha_t *)tcp->tcp_iphc; 8304 tcp->tcp_ip6h = NULL; 8305 tcp->tcp_ipversion = IPV4_VERSION; 8306 tcp->tcp_hdr_len = sizeof (ipha_t) + sizeof (tcph_t); 8307 tcp->tcp_tcp_hdr_len = sizeof (tcph_t); 8308 tcp->tcp_ip_hdr_len = sizeof (ipha_t); 8309 tcp->tcp_ipha->ipha_length = htons(sizeof (ipha_t) + sizeof (tcph_t)); 8310 tcp->tcp_ipha->ipha_version_and_hdr_length 8311 = (IP_VERSION << 4) | IP_SIMPLE_HDR_LENGTH_IN_WORDS; 8312 tcp->tcp_ipha->ipha_ident = 0; 8313 8314 tcp->tcp_ttl = (uchar_t)tcps->tcps_ipv4_ttl; 8315 tcp->tcp_tos = 0; 8316 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 0; 8317 tcp->tcp_ipha->ipha_ttl = (uchar_t)tcps->tcps_ipv4_ttl; 8318 tcp->tcp_ipha->ipha_protocol = IPPROTO_TCP; 8319 8320 tcph = (tcph_t *)(tcp->tcp_iphc + sizeof (ipha_t)); 8321 tcp->tcp_tcph = tcph; 8322 tcph->th_offset_and_rsrvd[0] = (5 << 4); 8323 /* 8324 * IP wants our header length in the checksum field to 8325 * allow it to perform a single pseudo-header+checksum 8326 * calculation on behalf of TCP. 8327 * Include the adjustment for a source route once IP_OPTIONS is set. 8328 */ 8329 sum = sizeof (tcph_t) + tcp->tcp_sum; 8330 sum = (sum >> 16) + (sum & 0xFFFF); 8331 U16_TO_ABE16(sum, tcph->th_sum); 8332 return (0); 8333 } 8334 8335 /* 8336 * Initialize the IPv6 header. Loses any record of any IPv6 extension headers. 8337 */ 8338 static int 8339 tcp_header_init_ipv6(tcp_t *tcp) 8340 { 8341 tcph_t *tcph; 8342 uint32_t sum; 8343 conn_t *connp; 8344 tcp_stack_t *tcps = tcp->tcp_tcps; 8345 8346 /* 8347 * This is a simple initialization. If there's 8348 * already a template, it should never be too small, 8349 * so reuse it. Otherwise, allocate space for the new one. 8350 * Ensure that there is enough space to "downgrade" the tcp_t 8351 * to an IPv4 tcp_t. This requires having space for a full load 8352 * of IPv4 options, as well as a full load of TCP options 8353 * (TCP_MAX_COMBINED_HEADER_LENGTH, 120 bytes); this is more space 8354 * than a v6 header and a TCP header with a full load of TCP options 8355 * (IPV6_HDR_LEN is 40 bytes; TCP_MAX_HDR_LENGTH is 60 bytes). 8356 * We want to avoid reallocation in the "downgraded" case when 8357 * processing outbound IPv4 options. 8358 */ 8359 if (tcp->tcp_iphc == NULL) { 8360 ASSERT(tcp->tcp_iphc_len == 0); 8361 tcp->tcp_iphc_len = TCP_MAX_COMBINED_HEADER_LENGTH; 8362 tcp->tcp_iphc = kmem_cache_alloc(tcp_iphc_cache, KM_NOSLEEP); 8363 if (tcp->tcp_iphc == NULL) { 8364 tcp->tcp_iphc_len = 0; 8365 return (ENOMEM); 8366 } 8367 } 8368 8369 /* options are gone; may need a new label */ 8370 connp = tcp->tcp_connp; 8371 connp->conn_mlp_type = mlptSingle; 8372 connp->conn_ulp_labeled = !is_system_labeled(); 8373 8374 ASSERT(tcp->tcp_iphc_len >= TCP_MAX_COMBINED_HEADER_LENGTH); 8375 tcp->tcp_ipversion = IPV6_VERSION; 8376 tcp->tcp_hdr_len = IPV6_HDR_LEN + sizeof (tcph_t); 8377 tcp->tcp_tcp_hdr_len = sizeof (tcph_t); 8378 tcp->tcp_ip_hdr_len = IPV6_HDR_LEN; 8379 tcp->tcp_ip6h = (ip6_t *)tcp->tcp_iphc; 8380 tcp->tcp_ipha = NULL; 8381 8382 /* Initialize the header template */ 8383 8384 tcp->tcp_ip6h->ip6_vcf = IPV6_DEFAULT_VERS_AND_FLOW; 8385 tcp->tcp_ip6h->ip6_plen = ntohs(sizeof (tcph_t)); 8386 tcp->tcp_ip6h->ip6_nxt = IPPROTO_TCP; 8387 tcp->tcp_ip6h->ip6_hops = (uint8_t)tcps->tcps_ipv6_hoplimit; 8388 8389 tcph = (tcph_t *)(tcp->tcp_iphc + IPV6_HDR_LEN); 8390 tcp->tcp_tcph = tcph; 8391 tcph->th_offset_and_rsrvd[0] = (5 << 4); 8392 /* 8393 * IP wants our header length in the checksum field to 8394 * allow it to perform a single psuedo-header+checksum 8395 * calculation on behalf of TCP. 8396 * Include the adjustment for a source route when IPV6_RTHDR is set. 8397 */ 8398 sum = sizeof (tcph_t) + tcp->tcp_sum; 8399 sum = (sum >> 16) + (sum & 0xFFFF); 8400 U16_TO_ABE16(sum, tcph->th_sum); 8401 return (0); 8402 } 8403 8404 /* At minimum we need 8 bytes in the TCP header for the lookup */ 8405 #define ICMP_MIN_TCP_HDR 8 8406 8407 /* 8408 * tcp_icmp_error is called by tcp_rput_other to process ICMP error messages 8409 * passed up by IP. The message is always received on the correct tcp_t. 8410 * Assumes that IP has pulled up everything up to and including the ICMP header. 8411 */ 8412 void 8413 tcp_icmp_error(tcp_t *tcp, mblk_t *mp) 8414 { 8415 icmph_t *icmph; 8416 ipha_t *ipha; 8417 int iph_hdr_length; 8418 tcph_t *tcph; 8419 boolean_t ipsec_mctl = B_FALSE; 8420 boolean_t secure; 8421 mblk_t *first_mp = mp; 8422 uint32_t new_mss; 8423 uint32_t ratio; 8424 size_t mp_size = MBLKL(mp); 8425 uint32_t seg_seq; 8426 tcp_stack_t *tcps = tcp->tcp_tcps; 8427 8428 /* Assume IP provides aligned packets - otherwise toss */ 8429 if (!OK_32PTR(mp->b_rptr)) { 8430 freemsg(mp); 8431 return; 8432 } 8433 8434 /* 8435 * Since ICMP errors are normal data marked with M_CTL when sent 8436 * to TCP or UDP, we have to look for a IPSEC_IN value to identify 8437 * packets starting with an ipsec_info_t, see ipsec_info.h. 8438 */ 8439 if ((mp_size == sizeof (ipsec_info_t)) && 8440 (((ipsec_info_t *)mp->b_rptr)->ipsec_info_type == IPSEC_IN)) { 8441 ASSERT(mp->b_cont != NULL); 8442 mp = mp->b_cont; 8443 /* IP should have done this */ 8444 ASSERT(OK_32PTR(mp->b_rptr)); 8445 mp_size = MBLKL(mp); 8446 ipsec_mctl = B_TRUE; 8447 } 8448 8449 /* 8450 * Verify that we have a complete outer IP header. If not, drop it. 8451 */ 8452 if (mp_size < sizeof (ipha_t)) { 8453 noticmpv4: 8454 freemsg(first_mp); 8455 return; 8456 } 8457 8458 ipha = (ipha_t *)mp->b_rptr; 8459 /* 8460 * Verify IP version. Anything other than IPv4 or IPv6 packet is sent 8461 * upstream. ICMPv6 is handled in tcp_icmp_error_ipv6. 8462 */ 8463 switch (IPH_HDR_VERSION(ipha)) { 8464 case IPV6_VERSION: 8465 tcp_icmp_error_ipv6(tcp, first_mp, ipsec_mctl); 8466 return; 8467 case IPV4_VERSION: 8468 break; 8469 default: 8470 goto noticmpv4; 8471 } 8472 8473 /* Skip past the outer IP and ICMP headers */ 8474 iph_hdr_length = IPH_HDR_LENGTH(ipha); 8475 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 8476 /* 8477 * If we don't have the correct outer IP header length or if the ULP 8478 * is not IPPROTO_ICMP or if we don't have a complete inner IP header 8479 * send it upstream. 8480 */ 8481 if (iph_hdr_length < sizeof (ipha_t) || 8482 ipha->ipha_protocol != IPPROTO_ICMP || 8483 (ipha_t *)&icmph[1] + 1 > (ipha_t *)mp->b_wptr) { 8484 goto noticmpv4; 8485 } 8486 ipha = (ipha_t *)&icmph[1]; 8487 8488 /* Skip past the inner IP and find the ULP header */ 8489 iph_hdr_length = IPH_HDR_LENGTH(ipha); 8490 tcph = (tcph_t *)((char *)ipha + iph_hdr_length); 8491 /* 8492 * If we don't have the correct inner IP header length or if the ULP 8493 * is not IPPROTO_TCP or if we don't have at least ICMP_MIN_TCP_HDR 8494 * bytes of TCP header, drop it. 8495 */ 8496 if (iph_hdr_length < sizeof (ipha_t) || 8497 ipha->ipha_protocol != IPPROTO_TCP || 8498 (uchar_t *)tcph + ICMP_MIN_TCP_HDR > mp->b_wptr) { 8499 goto noticmpv4; 8500 } 8501 8502 if (TCP_IS_DETACHED_NONEAGER(tcp)) { 8503 if (ipsec_mctl) { 8504 secure = ipsec_in_is_secure(first_mp); 8505 } else { 8506 secure = B_FALSE; 8507 } 8508 if (secure) { 8509 /* 8510 * If we are willing to accept this in clear 8511 * we don't have to verify policy. 8512 */ 8513 if (!ipsec_inbound_accept_clear(mp, ipha, NULL)) { 8514 if (!tcp_check_policy(tcp, first_mp, 8515 ipha, NULL, secure, ipsec_mctl)) { 8516 /* 8517 * tcp_check_policy called 8518 * ip_drop_packet() on failure. 8519 */ 8520 return; 8521 } 8522 } 8523 } 8524 } else if (ipsec_mctl) { 8525 /* 8526 * This is a hard_bound connection. IP has already 8527 * verified policy. We don't have to do it again. 8528 */ 8529 freeb(first_mp); 8530 first_mp = mp; 8531 ipsec_mctl = B_FALSE; 8532 } 8533 8534 seg_seq = ABE32_TO_U32(tcph->th_seq); 8535 /* 8536 * TCP SHOULD check that the TCP sequence number contained in 8537 * payload of the ICMP error message is within the range 8538 * SND.UNA <= SEG.SEQ < SND.NXT. 8539 */ 8540 if (SEQ_LT(seg_seq, tcp->tcp_suna) || SEQ_GEQ(seg_seq, tcp->tcp_snxt)) { 8541 /* 8542 * If the ICMP message is bogus, should we kill the 8543 * connection, or should we just drop the bogus ICMP 8544 * message? It would probably make more sense to just 8545 * drop the message so that if this one managed to get 8546 * in, the real connection should not suffer. 8547 */ 8548 goto noticmpv4; 8549 } 8550 8551 switch (icmph->icmph_type) { 8552 case ICMP_DEST_UNREACHABLE: 8553 switch (icmph->icmph_code) { 8554 case ICMP_FRAGMENTATION_NEEDED: 8555 /* 8556 * Reduce the MSS based on the new MTU. This will 8557 * eliminate any fragmentation locally. 8558 * N.B. There may well be some funny side-effects on 8559 * the local send policy and the remote receive policy. 8560 * Pending further research, we provide 8561 * tcp_ignore_path_mtu just in case this proves 8562 * disastrous somewhere. 8563 * 8564 * After updating the MSS, retransmit part of the 8565 * dropped segment using the new mss by calling 8566 * tcp_wput_data(). Need to adjust all those 8567 * params to make sure tcp_wput_data() work properly. 8568 */ 8569 if (tcps->tcps_ignore_path_mtu) 8570 break; 8571 8572 /* 8573 * Decrease the MSS by time stamp options 8574 * IP options and IPSEC options. tcp_hdr_len 8575 * includes time stamp option and IP option 8576 * length. 8577 */ 8578 8579 new_mss = ntohs(icmph->icmph_du_mtu) - 8580 tcp->tcp_hdr_len - tcp->tcp_ipsec_overhead; 8581 8582 /* 8583 * Only update the MSS if the new one is 8584 * smaller than the previous one. This is 8585 * to avoid problems when getting multiple 8586 * ICMP errors for the same MTU. 8587 */ 8588 if (new_mss >= tcp->tcp_mss) 8589 break; 8590 8591 /* 8592 * Stop doing PMTU if new_mss is less than 68 8593 * or less than tcp_mss_min. 8594 * The value 68 comes from rfc 1191. 8595 */ 8596 if (new_mss < MAX(68, tcps->tcps_mss_min)) 8597 tcp->tcp_ipha->ipha_fragment_offset_and_flags = 8598 0; 8599 8600 ratio = tcp->tcp_cwnd / tcp->tcp_mss; 8601 ASSERT(ratio >= 1); 8602 tcp_mss_set(tcp, new_mss, B_TRUE); 8603 8604 /* 8605 * Make sure we have something to 8606 * send. 8607 */ 8608 if (SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) && 8609 (tcp->tcp_xmit_head != NULL)) { 8610 /* 8611 * Shrink tcp_cwnd in 8612 * proportion to the old MSS/new MSS. 8613 */ 8614 tcp->tcp_cwnd = ratio * tcp->tcp_mss; 8615 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 8616 (tcp->tcp_unsent == 0)) { 8617 tcp->tcp_rexmit_max = tcp->tcp_fss; 8618 } else { 8619 tcp->tcp_rexmit_max = tcp->tcp_snxt; 8620 } 8621 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 8622 tcp->tcp_rexmit = B_TRUE; 8623 tcp->tcp_dupack_cnt = 0; 8624 tcp->tcp_snd_burst = TCP_CWND_SS; 8625 tcp_ss_rexmit(tcp); 8626 } 8627 break; 8628 case ICMP_PORT_UNREACHABLE: 8629 case ICMP_PROTOCOL_UNREACHABLE: 8630 switch (tcp->tcp_state) { 8631 case TCPS_SYN_SENT: 8632 case TCPS_SYN_RCVD: 8633 /* 8634 * ICMP can snipe away incipient 8635 * TCP connections as long as 8636 * seq number is same as initial 8637 * send seq number. 8638 */ 8639 if (seg_seq == tcp->tcp_iss) { 8640 (void) tcp_clean_death(tcp, 8641 ECONNREFUSED, 6); 8642 } 8643 break; 8644 } 8645 break; 8646 case ICMP_HOST_UNREACHABLE: 8647 case ICMP_NET_UNREACHABLE: 8648 /* Record the error in case we finally time out. */ 8649 if (icmph->icmph_code == ICMP_HOST_UNREACHABLE) 8650 tcp->tcp_client_errno = EHOSTUNREACH; 8651 else 8652 tcp->tcp_client_errno = ENETUNREACH; 8653 if (tcp->tcp_state == TCPS_SYN_RCVD) { 8654 if (tcp->tcp_listener != NULL && 8655 tcp->tcp_listener->tcp_syn_defense) { 8656 /* 8657 * Ditch the half-open connection if we 8658 * suspect a SYN attack is under way. 8659 */ 8660 tcp_ip_ire_mark_advice(tcp); 8661 (void) tcp_clean_death(tcp, 8662 tcp->tcp_client_errno, 7); 8663 } 8664 } 8665 break; 8666 default: 8667 break; 8668 } 8669 break; 8670 case ICMP_SOURCE_QUENCH: { 8671 /* 8672 * use a global boolean to control 8673 * whether TCP should respond to ICMP_SOURCE_QUENCH. 8674 * The default is false. 8675 */ 8676 if (tcp_icmp_source_quench) { 8677 /* 8678 * Reduce the sending rate as if we got a 8679 * retransmit timeout 8680 */ 8681 uint32_t npkt; 8682 8683 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / 8684 tcp->tcp_mss; 8685 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * tcp->tcp_mss; 8686 tcp->tcp_cwnd = tcp->tcp_mss; 8687 tcp->tcp_cwnd_cnt = 0; 8688 } 8689 break; 8690 } 8691 } 8692 freemsg(first_mp); 8693 } 8694 8695 /* 8696 * tcp_icmp_error_ipv6 is called by tcp_rput_other to process ICMPv6 8697 * error messages passed up by IP. 8698 * Assumes that IP has pulled up all the extension headers as well 8699 * as the ICMPv6 header. 8700 */ 8701 static void 8702 tcp_icmp_error_ipv6(tcp_t *tcp, mblk_t *mp, boolean_t ipsec_mctl) 8703 { 8704 icmp6_t *icmp6; 8705 ip6_t *ip6h; 8706 uint16_t iph_hdr_length; 8707 tcpha_t *tcpha; 8708 uint8_t *nexthdrp; 8709 uint32_t new_mss; 8710 uint32_t ratio; 8711 boolean_t secure; 8712 mblk_t *first_mp = mp; 8713 size_t mp_size; 8714 uint32_t seg_seq; 8715 tcp_stack_t *tcps = tcp->tcp_tcps; 8716 8717 /* 8718 * The caller has determined if this is an IPSEC_IN packet and 8719 * set ipsec_mctl appropriately (see tcp_icmp_error). 8720 */ 8721 if (ipsec_mctl) 8722 mp = mp->b_cont; 8723 8724 mp_size = MBLKL(mp); 8725 8726 /* 8727 * Verify that we have a complete IP header. If not, send it upstream. 8728 */ 8729 if (mp_size < sizeof (ip6_t)) { 8730 noticmpv6: 8731 freemsg(first_mp); 8732 return; 8733 } 8734 8735 /* 8736 * Verify this is an ICMPV6 packet, else send it upstream. 8737 */ 8738 ip6h = (ip6_t *)mp->b_rptr; 8739 if (ip6h->ip6_nxt == IPPROTO_ICMPV6) { 8740 iph_hdr_length = IPV6_HDR_LEN; 8741 } else if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, 8742 &nexthdrp) || 8743 *nexthdrp != IPPROTO_ICMPV6) { 8744 goto noticmpv6; 8745 } 8746 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length]; 8747 ip6h = (ip6_t *)&icmp6[1]; 8748 /* 8749 * Verify if we have a complete ICMP and inner IP header. 8750 */ 8751 if ((uchar_t *)&ip6h[1] > mp->b_wptr) 8752 goto noticmpv6; 8753 8754 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) 8755 goto noticmpv6; 8756 tcpha = (tcpha_t *)((char *)ip6h + iph_hdr_length); 8757 /* 8758 * Validate inner header. If the ULP is not IPPROTO_TCP or if we don't 8759 * have at least ICMP_MIN_TCP_HDR bytes of TCP header drop the 8760 * packet. 8761 */ 8762 if ((*nexthdrp != IPPROTO_TCP) || 8763 ((uchar_t *)tcpha + ICMP_MIN_TCP_HDR) > mp->b_wptr) { 8764 goto noticmpv6; 8765 } 8766 8767 /* 8768 * ICMP errors come on the right queue or come on 8769 * listener/global queue for detached connections and 8770 * get switched to the right queue. If it comes on the 8771 * right queue, policy check has already been done by IP 8772 * and thus free the first_mp without verifying the policy. 8773 * If it has come for a non-hard bound connection, we need 8774 * to verify policy as IP may not have done it. 8775 */ 8776 if (!tcp->tcp_hard_bound) { 8777 if (ipsec_mctl) { 8778 secure = ipsec_in_is_secure(first_mp); 8779 } else { 8780 secure = B_FALSE; 8781 } 8782 if (secure) { 8783 /* 8784 * If we are willing to accept this in clear 8785 * we don't have to verify policy. 8786 */ 8787 if (!ipsec_inbound_accept_clear(mp, NULL, ip6h)) { 8788 if (!tcp_check_policy(tcp, first_mp, 8789 NULL, ip6h, secure, ipsec_mctl)) { 8790 /* 8791 * tcp_check_policy called 8792 * ip_drop_packet() on failure. 8793 */ 8794 return; 8795 } 8796 } 8797 } 8798 } else if (ipsec_mctl) { 8799 /* 8800 * This is a hard_bound connection. IP has already 8801 * verified policy. We don't have to do it again. 8802 */ 8803 freeb(first_mp); 8804 first_mp = mp; 8805 ipsec_mctl = B_FALSE; 8806 } 8807 8808 seg_seq = ntohl(tcpha->tha_seq); 8809 /* 8810 * TCP SHOULD check that the TCP sequence number contained in 8811 * payload of the ICMP error message is within the range 8812 * SND.UNA <= SEG.SEQ < SND.NXT. 8813 */ 8814 if (SEQ_LT(seg_seq, tcp->tcp_suna) || SEQ_GEQ(seg_seq, tcp->tcp_snxt)) { 8815 /* 8816 * If the ICMP message is bogus, should we kill the 8817 * connection, or should we just drop the bogus ICMP 8818 * message? It would probably make more sense to just 8819 * drop the message so that if this one managed to get 8820 * in, the real connection should not suffer. 8821 */ 8822 goto noticmpv6; 8823 } 8824 8825 switch (icmp6->icmp6_type) { 8826 case ICMP6_PACKET_TOO_BIG: 8827 /* 8828 * Reduce the MSS based on the new MTU. This will 8829 * eliminate any fragmentation locally. 8830 * N.B. There may well be some funny side-effects on 8831 * the local send policy and the remote receive policy. 8832 * Pending further research, we provide 8833 * tcp_ignore_path_mtu just in case this proves 8834 * disastrous somewhere. 8835 * 8836 * After updating the MSS, retransmit part of the 8837 * dropped segment using the new mss by calling 8838 * tcp_wput_data(). Need to adjust all those 8839 * params to make sure tcp_wput_data() work properly. 8840 */ 8841 if (tcps->tcps_ignore_path_mtu) 8842 break; 8843 8844 /* 8845 * Decrease the MSS by time stamp options 8846 * IP options and IPSEC options. tcp_hdr_len 8847 * includes time stamp option and IP option 8848 * length. 8849 */ 8850 new_mss = ntohs(icmp6->icmp6_mtu) - tcp->tcp_hdr_len - 8851 tcp->tcp_ipsec_overhead; 8852 8853 /* 8854 * Only update the MSS if the new one is 8855 * smaller than the previous one. This is 8856 * to avoid problems when getting multiple 8857 * ICMP errors for the same MTU. 8858 */ 8859 if (new_mss >= tcp->tcp_mss) 8860 break; 8861 8862 ratio = tcp->tcp_cwnd / tcp->tcp_mss; 8863 ASSERT(ratio >= 1); 8864 tcp_mss_set(tcp, new_mss, B_TRUE); 8865 8866 /* 8867 * Make sure we have something to 8868 * send. 8869 */ 8870 if (SEQ_LT(tcp->tcp_suna, tcp->tcp_snxt) && 8871 (tcp->tcp_xmit_head != NULL)) { 8872 /* 8873 * Shrink tcp_cwnd in 8874 * proportion to the old MSS/new MSS. 8875 */ 8876 tcp->tcp_cwnd = ratio * tcp->tcp_mss; 8877 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 8878 (tcp->tcp_unsent == 0)) { 8879 tcp->tcp_rexmit_max = tcp->tcp_fss; 8880 } else { 8881 tcp->tcp_rexmit_max = tcp->tcp_snxt; 8882 } 8883 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 8884 tcp->tcp_rexmit = B_TRUE; 8885 tcp->tcp_dupack_cnt = 0; 8886 tcp->tcp_snd_burst = TCP_CWND_SS; 8887 tcp_ss_rexmit(tcp); 8888 } 8889 break; 8890 8891 case ICMP6_DST_UNREACH: 8892 switch (icmp6->icmp6_code) { 8893 case ICMP6_DST_UNREACH_NOPORT: 8894 if (((tcp->tcp_state == TCPS_SYN_SENT) || 8895 (tcp->tcp_state == TCPS_SYN_RCVD)) && 8896 (seg_seq == tcp->tcp_iss)) { 8897 (void) tcp_clean_death(tcp, 8898 ECONNREFUSED, 8); 8899 } 8900 break; 8901 8902 case ICMP6_DST_UNREACH_ADMIN: 8903 case ICMP6_DST_UNREACH_NOROUTE: 8904 case ICMP6_DST_UNREACH_BEYONDSCOPE: 8905 case ICMP6_DST_UNREACH_ADDR: 8906 /* Record the error in case we finally time out. */ 8907 tcp->tcp_client_errno = EHOSTUNREACH; 8908 if (((tcp->tcp_state == TCPS_SYN_SENT) || 8909 (tcp->tcp_state == TCPS_SYN_RCVD)) && 8910 (seg_seq == tcp->tcp_iss)) { 8911 if (tcp->tcp_listener != NULL && 8912 tcp->tcp_listener->tcp_syn_defense) { 8913 /* 8914 * Ditch the half-open connection if we 8915 * suspect a SYN attack is under way. 8916 */ 8917 tcp_ip_ire_mark_advice(tcp); 8918 (void) tcp_clean_death(tcp, 8919 tcp->tcp_client_errno, 9); 8920 } 8921 } 8922 8923 8924 break; 8925 default: 8926 break; 8927 } 8928 break; 8929 8930 case ICMP6_PARAM_PROB: 8931 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */ 8932 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER && 8933 (uchar_t *)ip6h + icmp6->icmp6_pptr == 8934 (uchar_t *)nexthdrp) { 8935 if (tcp->tcp_state == TCPS_SYN_SENT || 8936 tcp->tcp_state == TCPS_SYN_RCVD) { 8937 (void) tcp_clean_death(tcp, 8938 ECONNREFUSED, 10); 8939 } 8940 break; 8941 } 8942 break; 8943 8944 case ICMP6_TIME_EXCEEDED: 8945 default: 8946 break; 8947 } 8948 freemsg(first_mp); 8949 } 8950 8951 /* 8952 * IP recognizes seven kinds of bind requests: 8953 * 8954 * - A zero-length address binds only to the protocol number. 8955 * 8956 * - A 4-byte address is treated as a request to 8957 * validate that the address is a valid local IPv4 8958 * address, appropriate for an application to bind to. 8959 * IP does the verification, but does not make any note 8960 * of the address at this time. 8961 * 8962 * - A 16-byte address contains is treated as a request 8963 * to validate a local IPv6 address, as the 4-byte 8964 * address case above. 8965 * 8966 * - A 16-byte sockaddr_in to validate the local IPv4 address and also 8967 * use it for the inbound fanout of packets. 8968 * 8969 * - A 24-byte sockaddr_in6 to validate the local IPv6 address and also 8970 * use it for the inbound fanout of packets. 8971 * 8972 * - A 12-byte address (ipa_conn_t) containing complete IPv4 fanout 8973 * information consisting of local and remote addresses 8974 * and ports. In this case, the addresses are both 8975 * validated as appropriate for this operation, and, if 8976 * so, the information is retained for use in the 8977 * inbound fanout. 8978 * 8979 * - A 36-byte address address (ipa6_conn_t) containing complete IPv6 8980 * fanout information, like the 12-byte case above. 8981 * 8982 * IP will also fill in the IRE request mblk with information 8983 * regarding our peer. In all cases, we notify IP of our protocol 8984 * type by appending a single protocol byte to the bind request. 8985 */ 8986 static mblk_t * 8987 tcp_ip_bind_mp(tcp_t *tcp, t_scalar_t bind_prim, t_scalar_t addr_length) 8988 { 8989 char *cp; 8990 mblk_t *mp; 8991 struct T_bind_req *tbr; 8992 ipa_conn_t *ac; 8993 ipa6_conn_t *ac6; 8994 sin_t *sin; 8995 sin6_t *sin6; 8996 8997 ASSERT(bind_prim == O_T_BIND_REQ || bind_prim == T_BIND_REQ); 8998 ASSERT((tcp->tcp_family == AF_INET && 8999 tcp->tcp_ipversion == IPV4_VERSION) || 9000 (tcp->tcp_family == AF_INET6 && 9001 (tcp->tcp_ipversion == IPV4_VERSION || 9002 tcp->tcp_ipversion == IPV6_VERSION))); 9003 9004 mp = allocb(sizeof (*tbr) + addr_length + 1, BPRI_HI); 9005 if (!mp) 9006 return (mp); 9007 mp->b_datap->db_type = M_PROTO; 9008 tbr = (struct T_bind_req *)mp->b_rptr; 9009 tbr->PRIM_type = bind_prim; 9010 tbr->ADDR_offset = sizeof (*tbr); 9011 tbr->CONIND_number = 0; 9012 tbr->ADDR_length = addr_length; 9013 cp = (char *)&tbr[1]; 9014 switch (addr_length) { 9015 case sizeof (ipa_conn_t): 9016 ASSERT(tcp->tcp_family == AF_INET); 9017 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 9018 9019 mp->b_cont = allocb(sizeof (ire_t), BPRI_HI); 9020 if (mp->b_cont == NULL) { 9021 freemsg(mp); 9022 return (NULL); 9023 } 9024 mp->b_cont->b_wptr += sizeof (ire_t); 9025 mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE; 9026 9027 /* cp known to be 32 bit aligned */ 9028 ac = (ipa_conn_t *)cp; 9029 ac->ac_laddr = tcp->tcp_ipha->ipha_src; 9030 ac->ac_faddr = tcp->tcp_remote; 9031 ac->ac_fport = tcp->tcp_fport; 9032 ac->ac_lport = tcp->tcp_lport; 9033 tcp->tcp_hard_binding = 1; 9034 break; 9035 9036 case sizeof (ipa6_conn_t): 9037 ASSERT(tcp->tcp_family == AF_INET6); 9038 9039 mp->b_cont = allocb(sizeof (ire_t), BPRI_HI); 9040 if (mp->b_cont == NULL) { 9041 freemsg(mp); 9042 return (NULL); 9043 } 9044 mp->b_cont->b_wptr += sizeof (ire_t); 9045 mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE; 9046 9047 /* cp known to be 32 bit aligned */ 9048 ac6 = (ipa6_conn_t *)cp; 9049 if (tcp->tcp_ipversion == IPV4_VERSION) { 9050 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 9051 &ac6->ac6_laddr); 9052 } else { 9053 ac6->ac6_laddr = tcp->tcp_ip6h->ip6_src; 9054 } 9055 ac6->ac6_faddr = tcp->tcp_remote_v6; 9056 ac6->ac6_fport = tcp->tcp_fport; 9057 ac6->ac6_lport = tcp->tcp_lport; 9058 tcp->tcp_hard_binding = 1; 9059 break; 9060 9061 case sizeof (sin_t): 9062 /* 9063 * NOTE: IPV6_ADDR_LEN also has same size. 9064 * Use family to discriminate. 9065 */ 9066 if (tcp->tcp_family == AF_INET) { 9067 sin = (sin_t *)cp; 9068 9069 *sin = sin_null; 9070 sin->sin_family = AF_INET; 9071 sin->sin_addr.s_addr = tcp->tcp_bound_source; 9072 sin->sin_port = tcp->tcp_lport; 9073 break; 9074 } else { 9075 *(in6_addr_t *)cp = tcp->tcp_bound_source_v6; 9076 } 9077 break; 9078 9079 case sizeof (sin6_t): 9080 ASSERT(tcp->tcp_family == AF_INET6); 9081 sin6 = (sin6_t *)cp; 9082 9083 *sin6 = sin6_null; 9084 sin6->sin6_family = AF_INET6; 9085 sin6->sin6_addr = tcp->tcp_bound_source_v6; 9086 sin6->sin6_port = tcp->tcp_lport; 9087 break; 9088 9089 case IP_ADDR_LEN: 9090 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 9091 *(uint32_t *)cp = tcp->tcp_ipha->ipha_src; 9092 break; 9093 9094 } 9095 /* Add protocol number to end */ 9096 cp[addr_length] = (char)IPPROTO_TCP; 9097 mp->b_wptr = (uchar_t *)&cp[addr_length + 1]; 9098 return (mp); 9099 } 9100 9101 /* 9102 * Notify IP that we are having trouble with this connection. IP should 9103 * blow the IRE away and start over. 9104 */ 9105 static void 9106 tcp_ip_notify(tcp_t *tcp) 9107 { 9108 struct iocblk *iocp; 9109 ipid_t *ipid; 9110 mblk_t *mp; 9111 9112 /* IPv6 has NUD thus notification to delete the IRE is not needed */ 9113 if (tcp->tcp_ipversion == IPV6_VERSION) 9114 return; 9115 9116 mp = mkiocb(IP_IOCTL); 9117 if (mp == NULL) 9118 return; 9119 9120 iocp = (struct iocblk *)mp->b_rptr; 9121 iocp->ioc_count = sizeof (ipid_t) + sizeof (tcp->tcp_ipha->ipha_dst); 9122 9123 mp->b_cont = allocb(iocp->ioc_count, BPRI_HI); 9124 if (!mp->b_cont) { 9125 freeb(mp); 9126 return; 9127 } 9128 9129 ipid = (ipid_t *)mp->b_cont->b_rptr; 9130 mp->b_cont->b_wptr += iocp->ioc_count; 9131 bzero(ipid, sizeof (*ipid)); 9132 ipid->ipid_cmd = IP_IOC_IRE_DELETE_NO_REPLY; 9133 ipid->ipid_ire_type = IRE_CACHE; 9134 ipid->ipid_addr_offset = sizeof (ipid_t); 9135 ipid->ipid_addr_length = sizeof (tcp->tcp_ipha->ipha_dst); 9136 /* 9137 * Note: in the case of source routing we want to blow away the 9138 * route to the first source route hop. 9139 */ 9140 bcopy(&tcp->tcp_ipha->ipha_dst, &ipid[1], 9141 sizeof (tcp->tcp_ipha->ipha_dst)); 9142 9143 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 9144 } 9145 9146 /* Unlink and return any mblk that looks like it contains an ire */ 9147 static mblk_t * 9148 tcp_ire_mp(mblk_t *mp) 9149 { 9150 mblk_t *prev_mp; 9151 9152 for (;;) { 9153 prev_mp = mp; 9154 mp = mp->b_cont; 9155 if (mp == NULL) 9156 break; 9157 switch (DB_TYPE(mp)) { 9158 case IRE_DB_TYPE: 9159 case IRE_DB_REQ_TYPE: 9160 if (prev_mp != NULL) 9161 prev_mp->b_cont = mp->b_cont; 9162 mp->b_cont = NULL; 9163 return (mp); 9164 default: 9165 break; 9166 } 9167 } 9168 return (mp); 9169 } 9170 9171 /* 9172 * Timer callback routine for keepalive probe. We do a fake resend of 9173 * last ACKed byte. Then set a timer using RTO. When the timer expires, 9174 * check to see if we have heard anything from the other end for the last 9175 * RTO period. If we have, set the timer to expire for another 9176 * tcp_keepalive_intrvl and check again. If we have not, set a timer using 9177 * RTO << 1 and check again when it expires. Keep exponentially increasing 9178 * the timeout if we have not heard from the other side. If for more than 9179 * (tcp_ka_interval + tcp_ka_abort_thres) we have not heard anything, 9180 * kill the connection unless the keepalive abort threshold is 0. In 9181 * that case, we will probe "forever." 9182 */ 9183 static void 9184 tcp_keepalive_killer(void *arg) 9185 { 9186 mblk_t *mp; 9187 conn_t *connp = (conn_t *)arg; 9188 tcp_t *tcp = connp->conn_tcp; 9189 int32_t firetime; 9190 int32_t idletime; 9191 int32_t ka_intrvl; 9192 tcp_stack_t *tcps = tcp->tcp_tcps; 9193 9194 tcp->tcp_ka_tid = 0; 9195 9196 if (tcp->tcp_fused) 9197 return; 9198 9199 BUMP_MIB(&tcps->tcps_mib, tcpTimKeepalive); 9200 ka_intrvl = tcp->tcp_ka_interval; 9201 9202 /* 9203 * Keepalive probe should only be sent if the application has not 9204 * done a close on the connection. 9205 */ 9206 if (tcp->tcp_state > TCPS_CLOSE_WAIT) { 9207 return; 9208 } 9209 /* Timer fired too early, restart it. */ 9210 if (tcp->tcp_state < TCPS_ESTABLISHED) { 9211 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 9212 MSEC_TO_TICK(ka_intrvl)); 9213 return; 9214 } 9215 9216 idletime = TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time); 9217 /* 9218 * If we have not heard from the other side for a long 9219 * time, kill the connection unless the keepalive abort 9220 * threshold is 0. In that case, we will probe "forever." 9221 */ 9222 if (tcp->tcp_ka_abort_thres != 0 && 9223 idletime > (ka_intrvl + tcp->tcp_ka_abort_thres)) { 9224 BUMP_MIB(&tcps->tcps_mib, tcpTimKeepaliveDrop); 9225 (void) tcp_clean_death(tcp, tcp->tcp_client_errno ? 9226 tcp->tcp_client_errno : ETIMEDOUT, 11); 9227 return; 9228 } 9229 9230 if (tcp->tcp_snxt == tcp->tcp_suna && 9231 idletime >= ka_intrvl) { 9232 /* Fake resend of last ACKed byte. */ 9233 mblk_t *mp1 = allocb(1, BPRI_LO); 9234 9235 if (mp1 != NULL) { 9236 *mp1->b_wptr++ = '\0'; 9237 mp = tcp_xmit_mp(tcp, mp1, 1, NULL, NULL, 9238 tcp->tcp_suna - 1, B_FALSE, NULL, B_TRUE); 9239 freeb(mp1); 9240 /* 9241 * if allocation failed, fall through to start the 9242 * timer back. 9243 */ 9244 if (mp != NULL) { 9245 TCP_RECORD_TRACE(tcp, mp, 9246 TCP_TRACE_SEND_PKT); 9247 tcp_send_data(tcp, tcp->tcp_wq, mp); 9248 BUMP_MIB(&tcps->tcps_mib, 9249 tcpTimKeepaliveProbe); 9250 if (tcp->tcp_ka_last_intrvl != 0) { 9251 int max; 9252 /* 9253 * We should probe again at least 9254 * in ka_intrvl, but not more than 9255 * tcp_rexmit_interval_max. 9256 */ 9257 max = tcps->tcps_rexmit_interval_max; 9258 firetime = MIN(ka_intrvl - 1, 9259 tcp->tcp_ka_last_intrvl << 1); 9260 if (firetime > max) 9261 firetime = max; 9262 } else { 9263 firetime = tcp->tcp_rto; 9264 } 9265 tcp->tcp_ka_tid = TCP_TIMER(tcp, 9266 tcp_keepalive_killer, 9267 MSEC_TO_TICK(firetime)); 9268 tcp->tcp_ka_last_intrvl = firetime; 9269 return; 9270 } 9271 } 9272 } else { 9273 tcp->tcp_ka_last_intrvl = 0; 9274 } 9275 9276 /* firetime can be negative if (mp1 == NULL || mp == NULL) */ 9277 if ((firetime = ka_intrvl - idletime) < 0) { 9278 firetime = ka_intrvl; 9279 } 9280 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 9281 MSEC_TO_TICK(firetime)); 9282 } 9283 9284 int 9285 tcp_maxpsz_set(tcp_t *tcp, boolean_t set_maxblk) 9286 { 9287 queue_t *q = tcp->tcp_rq; 9288 int32_t mss = tcp->tcp_mss; 9289 int maxpsz; 9290 9291 if (TCP_IS_DETACHED(tcp)) 9292 return (mss); 9293 9294 if (tcp->tcp_fused) { 9295 maxpsz = tcp_fuse_maxpsz_set(tcp); 9296 mss = INFPSZ; 9297 } else if (tcp->tcp_mdt || tcp->tcp_lso || tcp->tcp_maxpsz == 0) { 9298 /* 9299 * Set the sd_qn_maxpsz according to the socket send buffer 9300 * size, and sd_maxblk to INFPSZ (-1). This will essentially 9301 * instruct the stream head to copyin user data into contiguous 9302 * kernel-allocated buffers without breaking it up into smaller 9303 * chunks. We round up the buffer size to the nearest SMSS. 9304 */ 9305 maxpsz = MSS_ROUNDUP(tcp->tcp_xmit_hiwater, mss); 9306 if (tcp->tcp_kssl_ctx == NULL) 9307 mss = INFPSZ; 9308 else 9309 mss = SSL3_MAX_RECORD_LEN; 9310 } else { 9311 /* 9312 * Set sd_qn_maxpsz to approx half the (receivers) buffer 9313 * (and a multiple of the mss). This instructs the stream 9314 * head to break down larger than SMSS writes into SMSS- 9315 * size mblks, up to tcp_maxpsz_multiplier mblks at a time. 9316 */ 9317 maxpsz = tcp->tcp_maxpsz * mss; 9318 if (maxpsz > tcp->tcp_xmit_hiwater/2) { 9319 maxpsz = tcp->tcp_xmit_hiwater/2; 9320 /* Round up to nearest mss */ 9321 maxpsz = MSS_ROUNDUP(maxpsz, mss); 9322 } 9323 } 9324 (void) setmaxps(q, maxpsz); 9325 tcp->tcp_wq->q_maxpsz = maxpsz; 9326 9327 if (set_maxblk) 9328 (void) mi_set_sth_maxblk(q, mss); 9329 9330 return (mss); 9331 } 9332 9333 /* 9334 * Extract option values from a tcp header. We put any found values into the 9335 * tcpopt struct and return a bitmask saying which options were found. 9336 */ 9337 static int 9338 tcp_parse_options(tcph_t *tcph, tcp_opt_t *tcpopt) 9339 { 9340 uchar_t *endp; 9341 int len; 9342 uint32_t mss; 9343 uchar_t *up = (uchar_t *)tcph; 9344 int found = 0; 9345 int32_t sack_len; 9346 tcp_seq sack_begin, sack_end; 9347 tcp_t *tcp; 9348 9349 endp = up + TCP_HDR_LENGTH(tcph); 9350 up += TCP_MIN_HEADER_LENGTH; 9351 while (up < endp) { 9352 len = endp - up; 9353 switch (*up) { 9354 case TCPOPT_EOL: 9355 break; 9356 9357 case TCPOPT_NOP: 9358 up++; 9359 continue; 9360 9361 case TCPOPT_MAXSEG: 9362 if (len < TCPOPT_MAXSEG_LEN || 9363 up[1] != TCPOPT_MAXSEG_LEN) 9364 break; 9365 9366 mss = BE16_TO_U16(up+2); 9367 /* Caller must handle tcp_mss_min and tcp_mss_max_* */ 9368 tcpopt->tcp_opt_mss = mss; 9369 found |= TCP_OPT_MSS_PRESENT; 9370 9371 up += TCPOPT_MAXSEG_LEN; 9372 continue; 9373 9374 case TCPOPT_WSCALE: 9375 if (len < TCPOPT_WS_LEN || up[1] != TCPOPT_WS_LEN) 9376 break; 9377 9378 if (up[2] > TCP_MAX_WINSHIFT) 9379 tcpopt->tcp_opt_wscale = TCP_MAX_WINSHIFT; 9380 else 9381 tcpopt->tcp_opt_wscale = up[2]; 9382 found |= TCP_OPT_WSCALE_PRESENT; 9383 9384 up += TCPOPT_WS_LEN; 9385 continue; 9386 9387 case TCPOPT_SACK_PERMITTED: 9388 if (len < TCPOPT_SACK_OK_LEN || 9389 up[1] != TCPOPT_SACK_OK_LEN) 9390 break; 9391 found |= TCP_OPT_SACK_OK_PRESENT; 9392 up += TCPOPT_SACK_OK_LEN; 9393 continue; 9394 9395 case TCPOPT_SACK: 9396 if (len <= 2 || up[1] <= 2 || len < up[1]) 9397 break; 9398 9399 /* If TCP is not interested in SACK blks... */ 9400 if ((tcp = tcpopt->tcp) == NULL) { 9401 up += up[1]; 9402 continue; 9403 } 9404 sack_len = up[1] - TCPOPT_HEADER_LEN; 9405 up += TCPOPT_HEADER_LEN; 9406 9407 /* 9408 * If the list is empty, allocate one and assume 9409 * nothing is sack'ed. 9410 */ 9411 ASSERT(tcp->tcp_sack_info != NULL); 9412 if (tcp->tcp_notsack_list == NULL) { 9413 tcp_notsack_update(&(tcp->tcp_notsack_list), 9414 tcp->tcp_suna, tcp->tcp_snxt, 9415 &(tcp->tcp_num_notsack_blk), 9416 &(tcp->tcp_cnt_notsack_list)); 9417 9418 /* 9419 * Make sure tcp_notsack_list is not NULL. 9420 * This happens when kmem_alloc(KM_NOSLEEP) 9421 * returns NULL. 9422 */ 9423 if (tcp->tcp_notsack_list == NULL) { 9424 up += sack_len; 9425 continue; 9426 } 9427 tcp->tcp_fack = tcp->tcp_suna; 9428 } 9429 9430 while (sack_len > 0) { 9431 if (up + 8 > endp) { 9432 up = endp; 9433 break; 9434 } 9435 sack_begin = BE32_TO_U32(up); 9436 up += 4; 9437 sack_end = BE32_TO_U32(up); 9438 up += 4; 9439 sack_len -= 8; 9440 /* 9441 * Bounds checking. Make sure the SACK 9442 * info is within tcp_suna and tcp_snxt. 9443 * If this SACK blk is out of bound, ignore 9444 * it but continue to parse the following 9445 * blks. 9446 */ 9447 if (SEQ_LEQ(sack_end, sack_begin) || 9448 SEQ_LT(sack_begin, tcp->tcp_suna) || 9449 SEQ_GT(sack_end, tcp->tcp_snxt)) { 9450 continue; 9451 } 9452 tcp_notsack_insert(&(tcp->tcp_notsack_list), 9453 sack_begin, sack_end, 9454 &(tcp->tcp_num_notsack_blk), 9455 &(tcp->tcp_cnt_notsack_list)); 9456 if (SEQ_GT(sack_end, tcp->tcp_fack)) { 9457 tcp->tcp_fack = sack_end; 9458 } 9459 } 9460 found |= TCP_OPT_SACK_PRESENT; 9461 continue; 9462 9463 case TCPOPT_TSTAMP: 9464 if (len < TCPOPT_TSTAMP_LEN || 9465 up[1] != TCPOPT_TSTAMP_LEN) 9466 break; 9467 9468 tcpopt->tcp_opt_ts_val = BE32_TO_U32(up+2); 9469 tcpopt->tcp_opt_ts_ecr = BE32_TO_U32(up+6); 9470 9471 found |= TCP_OPT_TSTAMP_PRESENT; 9472 9473 up += TCPOPT_TSTAMP_LEN; 9474 continue; 9475 9476 default: 9477 if (len <= 1 || len < (int)up[1] || up[1] == 0) 9478 break; 9479 up += up[1]; 9480 continue; 9481 } 9482 break; 9483 } 9484 return (found); 9485 } 9486 9487 /* 9488 * Set the mss associated with a particular tcp based on its current value, 9489 * and a new one passed in. Observe minimums and maximums, and reset 9490 * other state variables that we want to view as multiples of mss. 9491 * 9492 * This function is called in various places mainly because 9493 * 1) Various stuffs, tcp_mss, tcp_cwnd, ... need to be adjusted when the 9494 * other side's SYN/SYN-ACK packet arrives. 9495 * 2) PMTUd may get us a new MSS. 9496 * 3) If the other side stops sending us timestamp option, we need to 9497 * increase the MSS size to use the extra bytes available. 9498 * 9499 * do_ss is used to control whether we will be doing slow start or 9500 * not if there is a change in the mss. Note that for some events like 9501 * tcp_paws_check() we allow the tcp_cwnd to adjust to the new mss but 9502 * do not perform a slow start specifically. 9503 */ 9504 static void 9505 tcp_mss_set(tcp_t *tcp, uint32_t mss, boolean_t do_ss) 9506 { 9507 uint32_t mss_max; 9508 tcp_stack_t *tcps = tcp->tcp_tcps; 9509 9510 if (tcp->tcp_ipversion == IPV4_VERSION) 9511 mss_max = tcps->tcps_mss_max_ipv4; 9512 else 9513 mss_max = tcps->tcps_mss_max_ipv6; 9514 9515 if (mss < tcps->tcps_mss_min) 9516 mss = tcps->tcps_mss_min; 9517 if (mss > mss_max) 9518 mss = mss_max; 9519 /* 9520 * Unless naglim has been set by our client to 9521 * a non-mss value, force naglim to track mss. 9522 * This can help to aggregate small writes. 9523 */ 9524 if (mss < tcp->tcp_naglim || tcp->tcp_mss == tcp->tcp_naglim) 9525 tcp->tcp_naglim = mss; 9526 /* 9527 * TCP should be able to buffer at least 4 MSS data for obvious 9528 * performance reason. 9529 */ 9530 if ((mss << 2) > tcp->tcp_xmit_hiwater) 9531 tcp->tcp_xmit_hiwater = mss << 2; 9532 9533 /* 9534 * Check if we need to apply the tcp_init_cwnd here. If 9535 * it is set and the MSS gets bigger (should not happen 9536 * normally), we need to adjust the resulting tcp_cwnd properly. 9537 * The new tcp_cwnd should not get bigger. 9538 */ 9539 /* 9540 * We need to avoid setting tcp_cwnd to its slow start value 9541 * unnecessarily. However we have to let the tcp_cwnd adjust 9542 * to the modified mss. 9543 */ 9544 if (tcp->tcp_init_cwnd == 0 && do_ss) { 9545 tcp->tcp_cwnd = MIN(tcps->tcps_slow_start_initial * 9546 mss, MIN(4 * mss, MAX(2 * mss, 4380 / mss * mss))); 9547 } else { 9548 if (tcp->tcp_mss < mss) { 9549 tcp->tcp_cwnd = MAX(1, 9550 (tcp->tcp_init_cwnd * tcp->tcp_mss / 9551 mss)) * mss; 9552 } else { 9553 tcp->tcp_cwnd = tcp->tcp_init_cwnd * mss; 9554 } 9555 } 9556 tcp->tcp_mss = mss; 9557 tcp->tcp_cwnd_cnt = 0; 9558 (void) tcp_maxpsz_set(tcp, B_TRUE); 9559 } 9560 9561 static int 9562 tcp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 9563 { 9564 tcp_t *tcp = NULL; 9565 conn_t *connp; 9566 int err; 9567 dev_t conn_dev; 9568 zoneid_t zoneid; 9569 tcp_stack_t *tcps = NULL; 9570 9571 if (q->q_ptr != NULL) 9572 return (0); 9573 9574 if (!(flag & SO_ACCEPTOR)) { 9575 /* 9576 * Special case for install: miniroot needs to be able to 9577 * access files via NFS as though it were always in the 9578 * global zone. 9579 */ 9580 if (credp == kcred && nfs_global_client_only != 0) { 9581 zoneid = GLOBAL_ZONEID; 9582 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)-> 9583 netstack_tcp; 9584 ASSERT(tcps != NULL); 9585 } else { 9586 netstack_t *ns; 9587 9588 ns = netstack_find_by_cred(credp); 9589 ASSERT(ns != NULL); 9590 tcps = ns->netstack_tcp; 9591 ASSERT(tcps != NULL); 9592 9593 /* 9594 * For exclusive stacks we set the zoneid to zero 9595 * to make TCP operate as if in the global zone. 9596 */ 9597 if (tcps->tcps_netstack->netstack_stackid != 9598 GLOBAL_NETSTACKID) 9599 zoneid = GLOBAL_ZONEID; 9600 else 9601 zoneid = crgetzoneid(credp); 9602 } 9603 /* 9604 * For stackid zero this is done from strplumb.c, but 9605 * non-zero stackids are handled here. 9606 */ 9607 if (tcps->tcps_g_q == NULL && 9608 tcps->tcps_netstack->netstack_stackid != 9609 GLOBAL_NETSTACKID) { 9610 tcp_g_q_setup(tcps); 9611 } 9612 } 9613 if (sflag == MODOPEN) { 9614 /* 9615 * This is a special case. The purpose of a modopen 9616 * is to allow just the T_SVR4_OPTMGMT_REQ to pass 9617 * through for MIB browsers. Everything else is failed. 9618 */ 9619 connp = (conn_t *)tcp_get_conn(IP_SQUEUE_GET(lbolt), tcps); 9620 /* tcp_get_conn incremented refcnt */ 9621 netstack_rele(tcps->tcps_netstack); 9622 9623 if (connp == NULL) 9624 return (ENOMEM); 9625 9626 connp->conn_flags |= IPCL_TCPMOD; 9627 connp->conn_cred = credp; 9628 connp->conn_zoneid = zoneid; 9629 ASSERT(connp->conn_netstack == tcps->tcps_netstack); 9630 ASSERT(connp->conn_netstack->netstack_tcp == tcps); 9631 q->q_ptr = WR(q)->q_ptr = connp; 9632 crhold(credp); 9633 q->q_qinfo = &tcp_mod_rinit; 9634 WR(q)->q_qinfo = &tcp_mod_winit; 9635 qprocson(q); 9636 return (0); 9637 } 9638 if ((conn_dev = inet_minor_alloc(ip_minor_arena)) == 0) { 9639 if (tcps != NULL) 9640 netstack_rele(tcps->tcps_netstack); 9641 return (EBUSY); 9642 } 9643 9644 *devp = makedevice(getemajor(*devp), (minor_t)conn_dev); 9645 9646 if (flag & SO_ACCEPTOR) { 9647 /* No netstack_find_by_cred, hence no netstack_rele needed */ 9648 ASSERT(tcps == NULL); 9649 q->q_qinfo = &tcp_acceptor_rinit; 9650 q->q_ptr = (void *)conn_dev; 9651 WR(q)->q_qinfo = &tcp_acceptor_winit; 9652 WR(q)->q_ptr = (void *)conn_dev; 9653 qprocson(q); 9654 return (0); 9655 } 9656 9657 connp = (conn_t *)tcp_get_conn(IP_SQUEUE_GET(lbolt), tcps); 9658 /* 9659 * Both tcp_get_conn and netstack_find_by_cred incremented refcnt, 9660 * so we drop it by one. 9661 */ 9662 netstack_rele(tcps->tcps_netstack); 9663 if (connp == NULL) { 9664 inet_minor_free(ip_minor_arena, conn_dev); 9665 q->q_ptr = NULL; 9666 return (ENOSR); 9667 } 9668 connp->conn_sqp = IP_SQUEUE_GET(lbolt); 9669 tcp = connp->conn_tcp; 9670 9671 q->q_ptr = WR(q)->q_ptr = connp; 9672 if (getmajor(*devp) == TCP6_MAJ) { 9673 connp->conn_flags |= (IPCL_TCP6|IPCL_ISV6); 9674 connp->conn_send = ip_output_v6; 9675 connp->conn_af_isv6 = B_TRUE; 9676 connp->conn_pkt_isv6 = B_TRUE; 9677 connp->conn_src_preferences = IPV6_PREFER_SRC_DEFAULT; 9678 tcp->tcp_ipversion = IPV6_VERSION; 9679 tcp->tcp_family = AF_INET6; 9680 tcp->tcp_mss = tcps->tcps_mss_def_ipv6; 9681 } else { 9682 connp->conn_flags |= IPCL_TCP4; 9683 connp->conn_send = ip_output; 9684 connp->conn_af_isv6 = B_FALSE; 9685 connp->conn_pkt_isv6 = B_FALSE; 9686 tcp->tcp_ipversion = IPV4_VERSION; 9687 tcp->tcp_family = AF_INET; 9688 tcp->tcp_mss = tcps->tcps_mss_def_ipv4; 9689 } 9690 9691 /* 9692 * TCP keeps a copy of cred for cache locality reasons but 9693 * we put a reference only once. If connp->conn_cred 9694 * becomes invalid, tcp_cred should also be set to NULL. 9695 */ 9696 tcp->tcp_cred = connp->conn_cred = credp; 9697 crhold(connp->conn_cred); 9698 tcp->tcp_cpid = curproc->p_pid; 9699 tcp->tcp_open_time = lbolt64; 9700 connp->conn_zoneid = zoneid; 9701 connp->conn_mlp_type = mlptSingle; 9702 connp->conn_ulp_labeled = !is_system_labeled(); 9703 ASSERT(connp->conn_netstack == tcps->tcps_netstack); 9704 ASSERT(tcp->tcp_tcps == tcps); 9705 9706 /* 9707 * If the caller has the process-wide flag set, then default to MAC 9708 * exempt mode. This allows read-down to unlabeled hosts. 9709 */ 9710 if (getpflags(NET_MAC_AWARE, credp) != 0) 9711 connp->conn_mac_exempt = B_TRUE; 9712 9713 connp->conn_dev = conn_dev; 9714 9715 ASSERT(q->q_qinfo == &tcp_rinit); 9716 ASSERT(WR(q)->q_qinfo == &tcp_winit); 9717 9718 if (flag & SO_SOCKSTR) { 9719 /* 9720 * No need to insert a socket in tcp acceptor hash. 9721 * If it was a socket acceptor stream, we dealt with 9722 * it above. A socket listener can never accept a 9723 * connection and doesn't need acceptor_id. 9724 */ 9725 connp->conn_flags |= IPCL_SOCKET; 9726 tcp->tcp_issocket = 1; 9727 WR(q)->q_qinfo = &tcp_sock_winit; 9728 } else { 9729 #ifdef _ILP32 9730 tcp->tcp_acceptor_id = (t_uscalar_t)RD(q); 9731 #else 9732 tcp->tcp_acceptor_id = conn_dev; 9733 #endif /* _ILP32 */ 9734 tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp); 9735 } 9736 9737 if (tcps->tcps_trace) 9738 tcp->tcp_tracebuf = kmem_zalloc(sizeof (tcptrch_t), KM_SLEEP); 9739 9740 err = tcp_init(tcp, q); 9741 if (err != 0) { 9742 inet_minor_free(ip_minor_arena, connp->conn_dev); 9743 tcp_acceptor_hash_remove(tcp); 9744 CONN_DEC_REF(connp); 9745 q->q_ptr = WR(q)->q_ptr = NULL; 9746 return (err); 9747 } 9748 9749 RD(q)->q_hiwat = tcps->tcps_recv_hiwat; 9750 tcp->tcp_rwnd = tcps->tcps_recv_hiwat; 9751 9752 /* Non-zero default values */ 9753 connp->conn_multicast_loop = IP_DEFAULT_MULTICAST_LOOP; 9754 /* 9755 * Put the ref for TCP. Ref for IP was already put 9756 * by ipcl_conn_create. Also Make the conn_t globally 9757 * visible to walkers 9758 */ 9759 mutex_enter(&connp->conn_lock); 9760 CONN_INC_REF_LOCKED(connp); 9761 ASSERT(connp->conn_ref == 2); 9762 connp->conn_state_flags &= ~CONN_INCIPIENT; 9763 mutex_exit(&connp->conn_lock); 9764 9765 qprocson(q); 9766 return (0); 9767 } 9768 9769 /* 9770 * Some TCP options can be "set" by requesting them in the option 9771 * buffer. This is needed for XTI feature test though we do not 9772 * allow it in general. We interpret that this mechanism is more 9773 * applicable to OSI protocols and need not be allowed in general. 9774 * This routine filters out options for which it is not allowed (most) 9775 * and lets through those (few) for which it is. [ The XTI interface 9776 * test suite specifics will imply that any XTI_GENERIC level XTI_* if 9777 * ever implemented will have to be allowed here ]. 9778 */ 9779 static boolean_t 9780 tcp_allow_connopt_set(int level, int name) 9781 { 9782 9783 switch (level) { 9784 case IPPROTO_TCP: 9785 switch (name) { 9786 case TCP_NODELAY: 9787 return (B_TRUE); 9788 default: 9789 return (B_FALSE); 9790 } 9791 /*NOTREACHED*/ 9792 default: 9793 return (B_FALSE); 9794 } 9795 /*NOTREACHED*/ 9796 } 9797 9798 /* 9799 * This routine gets default values of certain options whose default 9800 * values are maintained by protocol specific code 9801 */ 9802 /* ARGSUSED */ 9803 int 9804 tcp_opt_default(queue_t *q, int level, int name, uchar_t *ptr) 9805 { 9806 int32_t *i1 = (int32_t *)ptr; 9807 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 9808 9809 switch (level) { 9810 case IPPROTO_TCP: 9811 switch (name) { 9812 case TCP_NOTIFY_THRESHOLD: 9813 *i1 = tcps->tcps_ip_notify_interval; 9814 break; 9815 case TCP_ABORT_THRESHOLD: 9816 *i1 = tcps->tcps_ip_abort_interval; 9817 break; 9818 case TCP_CONN_NOTIFY_THRESHOLD: 9819 *i1 = tcps->tcps_ip_notify_cinterval; 9820 break; 9821 case TCP_CONN_ABORT_THRESHOLD: 9822 *i1 = tcps->tcps_ip_abort_cinterval; 9823 break; 9824 default: 9825 return (-1); 9826 } 9827 break; 9828 case IPPROTO_IP: 9829 switch (name) { 9830 case IP_TTL: 9831 *i1 = tcps->tcps_ipv4_ttl; 9832 break; 9833 default: 9834 return (-1); 9835 } 9836 break; 9837 case IPPROTO_IPV6: 9838 switch (name) { 9839 case IPV6_UNICAST_HOPS: 9840 *i1 = tcps->tcps_ipv6_hoplimit; 9841 break; 9842 default: 9843 return (-1); 9844 } 9845 break; 9846 default: 9847 return (-1); 9848 } 9849 return (sizeof (int)); 9850 } 9851 9852 9853 /* 9854 * TCP routine to get the values of options. 9855 */ 9856 int 9857 tcp_opt_get(queue_t *q, int level, int name, uchar_t *ptr) 9858 { 9859 int *i1 = (int *)ptr; 9860 conn_t *connp = Q_TO_CONN(q); 9861 tcp_t *tcp = connp->conn_tcp; 9862 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 9863 9864 switch (level) { 9865 case SOL_SOCKET: 9866 switch (name) { 9867 case SO_LINGER: { 9868 struct linger *lgr = (struct linger *)ptr; 9869 9870 lgr->l_onoff = tcp->tcp_linger ? SO_LINGER : 0; 9871 lgr->l_linger = tcp->tcp_lingertime; 9872 } 9873 return (sizeof (struct linger)); 9874 case SO_DEBUG: 9875 *i1 = tcp->tcp_debug ? SO_DEBUG : 0; 9876 break; 9877 case SO_KEEPALIVE: 9878 *i1 = tcp->tcp_ka_enabled ? SO_KEEPALIVE : 0; 9879 break; 9880 case SO_DONTROUTE: 9881 *i1 = tcp->tcp_dontroute ? SO_DONTROUTE : 0; 9882 break; 9883 case SO_USELOOPBACK: 9884 *i1 = tcp->tcp_useloopback ? SO_USELOOPBACK : 0; 9885 break; 9886 case SO_BROADCAST: 9887 *i1 = tcp->tcp_broadcast ? SO_BROADCAST : 0; 9888 break; 9889 case SO_REUSEADDR: 9890 *i1 = tcp->tcp_reuseaddr ? SO_REUSEADDR : 0; 9891 break; 9892 case SO_OOBINLINE: 9893 *i1 = tcp->tcp_oobinline ? SO_OOBINLINE : 0; 9894 break; 9895 case SO_DGRAM_ERRIND: 9896 *i1 = tcp->tcp_dgram_errind ? SO_DGRAM_ERRIND : 0; 9897 break; 9898 case SO_TYPE: 9899 *i1 = SOCK_STREAM; 9900 break; 9901 case SO_SNDBUF: 9902 *i1 = tcp->tcp_xmit_hiwater; 9903 break; 9904 case SO_RCVBUF: 9905 *i1 = RD(q)->q_hiwat; 9906 break; 9907 case SO_SND_COPYAVOID: 9908 *i1 = tcp->tcp_snd_zcopy_on ? 9909 SO_SND_COPYAVOID : 0; 9910 break; 9911 case SO_ALLZONES: 9912 *i1 = connp->conn_allzones ? 1 : 0; 9913 break; 9914 case SO_ANON_MLP: 9915 *i1 = connp->conn_anon_mlp; 9916 break; 9917 case SO_MAC_EXEMPT: 9918 *i1 = connp->conn_mac_exempt; 9919 break; 9920 case SO_EXCLBIND: 9921 *i1 = tcp->tcp_exclbind ? SO_EXCLBIND : 0; 9922 break; 9923 case SO_PROTOTYPE: 9924 *i1 = IPPROTO_TCP; 9925 break; 9926 case SO_DOMAIN: 9927 *i1 = tcp->tcp_family; 9928 break; 9929 default: 9930 return (-1); 9931 } 9932 break; 9933 case IPPROTO_TCP: 9934 switch (name) { 9935 case TCP_NODELAY: 9936 *i1 = (tcp->tcp_naglim == 1) ? TCP_NODELAY : 0; 9937 break; 9938 case TCP_MAXSEG: 9939 *i1 = tcp->tcp_mss; 9940 break; 9941 case TCP_NOTIFY_THRESHOLD: 9942 *i1 = (int)tcp->tcp_first_timer_threshold; 9943 break; 9944 case TCP_ABORT_THRESHOLD: 9945 *i1 = tcp->tcp_second_timer_threshold; 9946 break; 9947 case TCP_CONN_NOTIFY_THRESHOLD: 9948 *i1 = tcp->tcp_first_ctimer_threshold; 9949 break; 9950 case TCP_CONN_ABORT_THRESHOLD: 9951 *i1 = tcp->tcp_second_ctimer_threshold; 9952 break; 9953 case TCP_RECVDSTADDR: 9954 *i1 = tcp->tcp_recvdstaddr; 9955 break; 9956 case TCP_ANONPRIVBIND: 9957 *i1 = tcp->tcp_anon_priv_bind; 9958 break; 9959 case TCP_EXCLBIND: 9960 *i1 = tcp->tcp_exclbind ? TCP_EXCLBIND : 0; 9961 break; 9962 case TCP_INIT_CWND: 9963 *i1 = tcp->tcp_init_cwnd; 9964 break; 9965 case TCP_KEEPALIVE_THRESHOLD: 9966 *i1 = tcp->tcp_ka_interval; 9967 break; 9968 case TCP_KEEPALIVE_ABORT_THRESHOLD: 9969 *i1 = tcp->tcp_ka_abort_thres; 9970 break; 9971 case TCP_CORK: 9972 *i1 = tcp->tcp_cork; 9973 break; 9974 default: 9975 return (-1); 9976 } 9977 break; 9978 case IPPROTO_IP: 9979 if (tcp->tcp_family != AF_INET) 9980 return (-1); 9981 switch (name) { 9982 case IP_OPTIONS: 9983 case T_IP_OPTIONS: { 9984 /* 9985 * This is compatible with BSD in that in only return 9986 * the reverse source route with the final destination 9987 * as the last entry. The first 4 bytes of the option 9988 * will contain the final destination. 9989 */ 9990 int opt_len; 9991 9992 opt_len = (char *)tcp->tcp_tcph - (char *)tcp->tcp_ipha; 9993 opt_len -= tcp->tcp_label_len + IP_SIMPLE_HDR_LENGTH; 9994 ASSERT(opt_len >= 0); 9995 /* Caller ensures enough space */ 9996 if (opt_len > 0) { 9997 /* 9998 * TODO: Do we have to handle getsockopt on an 9999 * initiator as well? 10000 */ 10001 return (ip_opt_get_user(tcp->tcp_ipha, ptr)); 10002 } 10003 return (0); 10004 } 10005 case IP_TOS: 10006 case T_IP_TOS: 10007 *i1 = (int)tcp->tcp_ipha->ipha_type_of_service; 10008 break; 10009 case IP_TTL: 10010 *i1 = (int)tcp->tcp_ipha->ipha_ttl; 10011 break; 10012 case IP_NEXTHOP: 10013 /* Handled at IP level */ 10014 return (-EINVAL); 10015 default: 10016 return (-1); 10017 } 10018 break; 10019 case IPPROTO_IPV6: 10020 /* 10021 * IPPROTO_IPV6 options are only supported for sockets 10022 * that are using IPv6 on the wire. 10023 */ 10024 if (tcp->tcp_ipversion != IPV6_VERSION) { 10025 return (-1); 10026 } 10027 switch (name) { 10028 case IPV6_UNICAST_HOPS: 10029 *i1 = (unsigned int) tcp->tcp_ip6h->ip6_hops; 10030 break; /* goto sizeof (int) option return */ 10031 case IPV6_BOUND_IF: 10032 /* Zero if not set */ 10033 *i1 = tcp->tcp_bound_if; 10034 break; /* goto sizeof (int) option return */ 10035 case IPV6_RECVPKTINFO: 10036 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) 10037 *i1 = 1; 10038 else 10039 *i1 = 0; 10040 break; /* goto sizeof (int) option return */ 10041 case IPV6_RECVTCLASS: 10042 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVTCLASS) 10043 *i1 = 1; 10044 else 10045 *i1 = 0; 10046 break; /* goto sizeof (int) option return */ 10047 case IPV6_RECVHOPLIMIT: 10048 if (tcp->tcp_ipv6_recvancillary & 10049 TCP_IPV6_RECVHOPLIMIT) 10050 *i1 = 1; 10051 else 10052 *i1 = 0; 10053 break; /* goto sizeof (int) option return */ 10054 case IPV6_RECVHOPOPTS: 10055 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPOPTS) 10056 *i1 = 1; 10057 else 10058 *i1 = 0; 10059 break; /* goto sizeof (int) option return */ 10060 case IPV6_RECVDSTOPTS: 10061 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVDSTOPTS) 10062 *i1 = 1; 10063 else 10064 *i1 = 0; 10065 break; /* goto sizeof (int) option return */ 10066 case _OLD_IPV6_RECVDSTOPTS: 10067 if (tcp->tcp_ipv6_recvancillary & 10068 TCP_OLD_IPV6_RECVDSTOPTS) 10069 *i1 = 1; 10070 else 10071 *i1 = 0; 10072 break; /* goto sizeof (int) option return */ 10073 case IPV6_RECVRTHDR: 10074 if (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTHDR) 10075 *i1 = 1; 10076 else 10077 *i1 = 0; 10078 break; /* goto sizeof (int) option return */ 10079 case IPV6_RECVRTHDRDSTOPTS: 10080 if (tcp->tcp_ipv6_recvancillary & 10081 TCP_IPV6_RECVRTDSTOPTS) 10082 *i1 = 1; 10083 else 10084 *i1 = 0; 10085 break; /* goto sizeof (int) option return */ 10086 case IPV6_PKTINFO: { 10087 /* XXX assumes that caller has room for max size! */ 10088 struct in6_pktinfo *pkti; 10089 10090 pkti = (struct in6_pktinfo *)ptr; 10091 if (ipp->ipp_fields & IPPF_IFINDEX) 10092 pkti->ipi6_ifindex = ipp->ipp_ifindex; 10093 else 10094 pkti->ipi6_ifindex = 0; 10095 if (ipp->ipp_fields & IPPF_ADDR) 10096 pkti->ipi6_addr = ipp->ipp_addr; 10097 else 10098 pkti->ipi6_addr = ipv6_all_zeros; 10099 return (sizeof (struct in6_pktinfo)); 10100 } 10101 case IPV6_TCLASS: 10102 if (ipp->ipp_fields & IPPF_TCLASS) 10103 *i1 = ipp->ipp_tclass; 10104 else 10105 *i1 = IPV6_FLOW_TCLASS( 10106 IPV6_DEFAULT_VERS_AND_FLOW); 10107 break; /* goto sizeof (int) option return */ 10108 case IPV6_NEXTHOP: { 10109 sin6_t *sin6 = (sin6_t *)ptr; 10110 10111 if (!(ipp->ipp_fields & IPPF_NEXTHOP)) 10112 return (0); 10113 *sin6 = sin6_null; 10114 sin6->sin6_family = AF_INET6; 10115 sin6->sin6_addr = ipp->ipp_nexthop; 10116 return (sizeof (sin6_t)); 10117 } 10118 case IPV6_HOPOPTS: 10119 if (!(ipp->ipp_fields & IPPF_HOPOPTS)) 10120 return (0); 10121 if (ipp->ipp_hopoptslen <= tcp->tcp_label_len) 10122 return (0); 10123 bcopy((char *)ipp->ipp_hopopts + tcp->tcp_label_len, 10124 ptr, ipp->ipp_hopoptslen - tcp->tcp_label_len); 10125 if (tcp->tcp_label_len > 0) { 10126 ptr[0] = ((char *)ipp->ipp_hopopts)[0]; 10127 ptr[1] = (ipp->ipp_hopoptslen - 10128 tcp->tcp_label_len + 7) / 8 - 1; 10129 } 10130 return (ipp->ipp_hopoptslen - tcp->tcp_label_len); 10131 case IPV6_RTHDRDSTOPTS: 10132 if (!(ipp->ipp_fields & IPPF_RTDSTOPTS)) 10133 return (0); 10134 bcopy(ipp->ipp_rtdstopts, ptr, ipp->ipp_rtdstoptslen); 10135 return (ipp->ipp_rtdstoptslen); 10136 case IPV6_RTHDR: 10137 if (!(ipp->ipp_fields & IPPF_RTHDR)) 10138 return (0); 10139 bcopy(ipp->ipp_rthdr, ptr, ipp->ipp_rthdrlen); 10140 return (ipp->ipp_rthdrlen); 10141 case IPV6_DSTOPTS: 10142 if (!(ipp->ipp_fields & IPPF_DSTOPTS)) 10143 return (0); 10144 bcopy(ipp->ipp_dstopts, ptr, ipp->ipp_dstoptslen); 10145 return (ipp->ipp_dstoptslen); 10146 case IPV6_SRC_PREFERENCES: 10147 return (ip6_get_src_preferences(connp, 10148 (uint32_t *)ptr)); 10149 case IPV6_PATHMTU: { 10150 struct ip6_mtuinfo *mtuinfo = (struct ip6_mtuinfo *)ptr; 10151 10152 if (tcp->tcp_state < TCPS_ESTABLISHED) 10153 return (-1); 10154 10155 return (ip_fill_mtuinfo(&connp->conn_remv6, 10156 connp->conn_fport, mtuinfo, 10157 connp->conn_netstack)); 10158 } 10159 default: 10160 return (-1); 10161 } 10162 break; 10163 default: 10164 return (-1); 10165 } 10166 return (sizeof (int)); 10167 } 10168 10169 /* 10170 * We declare as 'int' rather than 'void' to satisfy pfi_t arg requirements. 10171 * Parameters are assumed to be verified by the caller. 10172 */ 10173 /* ARGSUSED */ 10174 int 10175 tcp_opt_set(queue_t *q, uint_t optset_context, int level, int name, 10176 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 10177 void *thisdg_attrs, cred_t *cr, mblk_t *mblk) 10178 { 10179 conn_t *connp = Q_TO_CONN(q); 10180 tcp_t *tcp = connp->conn_tcp; 10181 int *i1 = (int *)invalp; 10182 boolean_t onoff = (*i1 == 0) ? 0 : 1; 10183 boolean_t checkonly; 10184 int reterr; 10185 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 10186 10187 switch (optset_context) { 10188 case SETFN_OPTCOM_CHECKONLY: 10189 checkonly = B_TRUE; 10190 /* 10191 * Note: Implies T_CHECK semantics for T_OPTCOM_REQ 10192 * inlen != 0 implies value supplied and 10193 * we have to "pretend" to set it. 10194 * inlen == 0 implies that there is no 10195 * value part in T_CHECK request and just validation 10196 * done elsewhere should be enough, we just return here. 10197 */ 10198 if (inlen == 0) { 10199 *outlenp = 0; 10200 return (0); 10201 } 10202 break; 10203 case SETFN_OPTCOM_NEGOTIATE: 10204 checkonly = B_FALSE; 10205 break; 10206 case SETFN_UD_NEGOTIATE: /* error on conn-oriented transports ? */ 10207 case SETFN_CONN_NEGOTIATE: 10208 checkonly = B_FALSE; 10209 /* 10210 * Negotiating local and "association-related" options 10211 * from other (T_CONN_REQ, T_CONN_RES,T_UNITDATA_REQ) 10212 * primitives is allowed by XTI, but we choose 10213 * to not implement this style negotiation for Internet 10214 * protocols (We interpret it is a must for OSI world but 10215 * optional for Internet protocols) for all options. 10216 * [ Will do only for the few options that enable test 10217 * suites that our XTI implementation of this feature 10218 * works for transports that do allow it ] 10219 */ 10220 if (!tcp_allow_connopt_set(level, name)) { 10221 *outlenp = 0; 10222 return (EINVAL); 10223 } 10224 break; 10225 default: 10226 /* 10227 * We should never get here 10228 */ 10229 *outlenp = 0; 10230 return (EINVAL); 10231 } 10232 10233 ASSERT((optset_context != SETFN_OPTCOM_CHECKONLY) || 10234 (optset_context == SETFN_OPTCOM_CHECKONLY && inlen != 0)); 10235 10236 /* 10237 * For TCP, we should have no ancillary data sent down 10238 * (sendmsg isn't supported for SOCK_STREAM), so thisdg_attrs 10239 * has to be zero. 10240 */ 10241 ASSERT(thisdg_attrs == NULL); 10242 10243 /* 10244 * For fixed length options, no sanity check 10245 * of passed in length is done. It is assumed *_optcom_req() 10246 * routines do the right thing. 10247 */ 10248 10249 switch (level) { 10250 case SOL_SOCKET: 10251 switch (name) { 10252 case SO_LINGER: { 10253 struct linger *lgr = (struct linger *)invalp; 10254 10255 if (!checkonly) { 10256 if (lgr->l_onoff) { 10257 tcp->tcp_linger = 1; 10258 tcp->tcp_lingertime = lgr->l_linger; 10259 } else { 10260 tcp->tcp_linger = 0; 10261 tcp->tcp_lingertime = 0; 10262 } 10263 /* struct copy */ 10264 *(struct linger *)outvalp = *lgr; 10265 } else { 10266 if (!lgr->l_onoff) { 10267 ((struct linger *)outvalp)->l_onoff = 0; 10268 ((struct linger *)outvalp)->l_linger = 0; 10269 } else { 10270 /* struct copy */ 10271 *(struct linger *)outvalp = *lgr; 10272 } 10273 } 10274 *outlenp = sizeof (struct linger); 10275 return (0); 10276 } 10277 case SO_DEBUG: 10278 if (!checkonly) 10279 tcp->tcp_debug = onoff; 10280 break; 10281 case SO_KEEPALIVE: 10282 if (checkonly) { 10283 /* T_CHECK case */ 10284 break; 10285 } 10286 10287 if (!onoff) { 10288 if (tcp->tcp_ka_enabled) { 10289 if (tcp->tcp_ka_tid != 0) { 10290 (void) TCP_TIMER_CANCEL(tcp, 10291 tcp->tcp_ka_tid); 10292 tcp->tcp_ka_tid = 0; 10293 } 10294 tcp->tcp_ka_enabled = 0; 10295 } 10296 break; 10297 } 10298 if (!tcp->tcp_ka_enabled) { 10299 /* Crank up the keepalive timer */ 10300 tcp->tcp_ka_last_intrvl = 0; 10301 tcp->tcp_ka_tid = TCP_TIMER(tcp, 10302 tcp_keepalive_killer, 10303 MSEC_TO_TICK(tcp->tcp_ka_interval)); 10304 tcp->tcp_ka_enabled = 1; 10305 } 10306 break; 10307 case SO_DONTROUTE: 10308 /* 10309 * SO_DONTROUTE, SO_USELOOPBACK, and SO_BROADCAST are 10310 * only of interest to IP. We track them here only so 10311 * that we can report their current value. 10312 */ 10313 if (!checkonly) { 10314 tcp->tcp_dontroute = onoff; 10315 tcp->tcp_connp->conn_dontroute = onoff; 10316 } 10317 break; 10318 case SO_USELOOPBACK: 10319 if (!checkonly) { 10320 tcp->tcp_useloopback = onoff; 10321 tcp->tcp_connp->conn_loopback = onoff; 10322 } 10323 break; 10324 case SO_BROADCAST: 10325 if (!checkonly) { 10326 tcp->tcp_broadcast = onoff; 10327 tcp->tcp_connp->conn_broadcast = onoff; 10328 } 10329 break; 10330 case SO_REUSEADDR: 10331 if (!checkonly) { 10332 tcp->tcp_reuseaddr = onoff; 10333 tcp->tcp_connp->conn_reuseaddr = onoff; 10334 } 10335 break; 10336 case SO_OOBINLINE: 10337 if (!checkonly) 10338 tcp->tcp_oobinline = onoff; 10339 break; 10340 case SO_DGRAM_ERRIND: 10341 if (!checkonly) 10342 tcp->tcp_dgram_errind = onoff; 10343 break; 10344 case SO_SNDBUF: { 10345 if (*i1 > tcps->tcps_max_buf) { 10346 *outlenp = 0; 10347 return (ENOBUFS); 10348 } 10349 if (checkonly) 10350 break; 10351 10352 tcp->tcp_xmit_hiwater = *i1; 10353 if (tcps->tcps_snd_lowat_fraction != 0) 10354 tcp->tcp_xmit_lowater = 10355 tcp->tcp_xmit_hiwater / 10356 tcps->tcps_snd_lowat_fraction; 10357 (void) tcp_maxpsz_set(tcp, B_TRUE); 10358 /* 10359 * If we are flow-controlled, recheck the condition. 10360 * There are apps that increase SO_SNDBUF size when 10361 * flow-controlled (EWOULDBLOCK), and expect the flow 10362 * control condition to be lifted right away. 10363 */ 10364 mutex_enter(&tcp->tcp_non_sq_lock); 10365 if (tcp->tcp_flow_stopped && 10366 TCP_UNSENT_BYTES(tcp) < tcp->tcp_xmit_hiwater) { 10367 tcp_clrqfull(tcp); 10368 } 10369 mutex_exit(&tcp->tcp_non_sq_lock); 10370 break; 10371 } 10372 case SO_RCVBUF: 10373 if (*i1 > tcps->tcps_max_buf) { 10374 *outlenp = 0; 10375 return (ENOBUFS); 10376 } 10377 /* Silently ignore zero */ 10378 if (!checkonly && *i1 != 0) { 10379 *i1 = MSS_ROUNDUP(*i1, tcp->tcp_mss); 10380 (void) tcp_rwnd_set(tcp, *i1); 10381 } 10382 /* 10383 * XXX should we return the rwnd here 10384 * and tcp_opt_get ? 10385 */ 10386 break; 10387 case SO_SND_COPYAVOID: 10388 if (!checkonly) { 10389 /* we only allow enable at most once for now */ 10390 if (tcp->tcp_loopback || 10391 (!tcp->tcp_snd_zcopy_aware && 10392 (onoff != 1 || !tcp_zcopy_check(tcp)))) { 10393 *outlenp = 0; 10394 return (EOPNOTSUPP); 10395 } 10396 tcp->tcp_snd_zcopy_aware = 1; 10397 } 10398 break; 10399 case SO_ALLZONES: 10400 /* Handled at the IP level */ 10401 return (-EINVAL); 10402 case SO_ANON_MLP: 10403 if (!checkonly) { 10404 mutex_enter(&connp->conn_lock); 10405 connp->conn_anon_mlp = onoff; 10406 mutex_exit(&connp->conn_lock); 10407 } 10408 break; 10409 case SO_MAC_EXEMPT: 10410 if (secpolicy_net_mac_aware(cr) != 0 || 10411 IPCL_IS_BOUND(connp)) 10412 return (EACCES); 10413 if (!checkonly) { 10414 mutex_enter(&connp->conn_lock); 10415 connp->conn_mac_exempt = onoff; 10416 mutex_exit(&connp->conn_lock); 10417 } 10418 break; 10419 case SO_EXCLBIND: 10420 if (!checkonly) 10421 tcp->tcp_exclbind = onoff; 10422 break; 10423 default: 10424 *outlenp = 0; 10425 return (EINVAL); 10426 } 10427 break; 10428 case IPPROTO_TCP: 10429 switch (name) { 10430 case TCP_NODELAY: 10431 if (!checkonly) 10432 tcp->tcp_naglim = *i1 ? 1 : tcp->tcp_mss; 10433 break; 10434 case TCP_NOTIFY_THRESHOLD: 10435 if (!checkonly) 10436 tcp->tcp_first_timer_threshold = *i1; 10437 break; 10438 case TCP_ABORT_THRESHOLD: 10439 if (!checkonly) 10440 tcp->tcp_second_timer_threshold = *i1; 10441 break; 10442 case TCP_CONN_NOTIFY_THRESHOLD: 10443 if (!checkonly) 10444 tcp->tcp_first_ctimer_threshold = *i1; 10445 break; 10446 case TCP_CONN_ABORT_THRESHOLD: 10447 if (!checkonly) 10448 tcp->tcp_second_ctimer_threshold = *i1; 10449 break; 10450 case TCP_RECVDSTADDR: 10451 if (tcp->tcp_state > TCPS_LISTEN) 10452 return (EOPNOTSUPP); 10453 if (!checkonly) 10454 tcp->tcp_recvdstaddr = onoff; 10455 break; 10456 case TCP_ANONPRIVBIND: 10457 if ((reterr = secpolicy_net_privaddr(cr, 0)) != 0) { 10458 *outlenp = 0; 10459 return (reterr); 10460 } 10461 if (!checkonly) { 10462 tcp->tcp_anon_priv_bind = onoff; 10463 } 10464 break; 10465 case TCP_EXCLBIND: 10466 if (!checkonly) 10467 tcp->tcp_exclbind = onoff; 10468 break; /* goto sizeof (int) option return */ 10469 case TCP_INIT_CWND: { 10470 uint32_t init_cwnd = *((uint32_t *)invalp); 10471 10472 if (checkonly) 10473 break; 10474 10475 /* 10476 * Only allow socket with network configuration 10477 * privilege to set the initial cwnd to be larger 10478 * than allowed by RFC 3390. 10479 */ 10480 if (init_cwnd <= MIN(4, MAX(2, 4380 / tcp->tcp_mss))) { 10481 tcp->tcp_init_cwnd = init_cwnd; 10482 break; 10483 } 10484 if ((reterr = secpolicy_ip_config(cr, B_TRUE)) != 0) { 10485 *outlenp = 0; 10486 return (reterr); 10487 } 10488 if (init_cwnd > TCP_MAX_INIT_CWND) { 10489 *outlenp = 0; 10490 return (EINVAL); 10491 } 10492 tcp->tcp_init_cwnd = init_cwnd; 10493 break; 10494 } 10495 case TCP_KEEPALIVE_THRESHOLD: 10496 if (checkonly) 10497 break; 10498 10499 if (*i1 < tcps->tcps_keepalive_interval_low || 10500 *i1 > tcps->tcps_keepalive_interval_high) { 10501 *outlenp = 0; 10502 return (EINVAL); 10503 } 10504 if (*i1 != tcp->tcp_ka_interval) { 10505 tcp->tcp_ka_interval = *i1; 10506 /* 10507 * Check if we need to restart the 10508 * keepalive timer. 10509 */ 10510 if (tcp->tcp_ka_tid != 0) { 10511 ASSERT(tcp->tcp_ka_enabled); 10512 (void) TCP_TIMER_CANCEL(tcp, 10513 tcp->tcp_ka_tid); 10514 tcp->tcp_ka_last_intrvl = 0; 10515 tcp->tcp_ka_tid = TCP_TIMER(tcp, 10516 tcp_keepalive_killer, 10517 MSEC_TO_TICK(tcp->tcp_ka_interval)); 10518 } 10519 } 10520 break; 10521 case TCP_KEEPALIVE_ABORT_THRESHOLD: 10522 if (!checkonly) { 10523 if (*i1 < 10524 tcps->tcps_keepalive_abort_interval_low || 10525 *i1 > 10526 tcps->tcps_keepalive_abort_interval_high) { 10527 *outlenp = 0; 10528 return (EINVAL); 10529 } 10530 tcp->tcp_ka_abort_thres = *i1; 10531 } 10532 break; 10533 case TCP_CORK: 10534 if (!checkonly) { 10535 /* 10536 * if tcp->tcp_cork was set and is now 10537 * being unset, we have to make sure that 10538 * the remaining data gets sent out. Also 10539 * unset tcp->tcp_cork so that tcp_wput_data() 10540 * can send data even if it is less than mss 10541 */ 10542 if (tcp->tcp_cork && onoff == 0 && 10543 tcp->tcp_unsent > 0) { 10544 tcp->tcp_cork = B_FALSE; 10545 tcp_wput_data(tcp, NULL, B_FALSE); 10546 } 10547 tcp->tcp_cork = onoff; 10548 } 10549 break; 10550 default: 10551 *outlenp = 0; 10552 return (EINVAL); 10553 } 10554 break; 10555 case IPPROTO_IP: 10556 if (tcp->tcp_family != AF_INET) { 10557 *outlenp = 0; 10558 return (ENOPROTOOPT); 10559 } 10560 switch (name) { 10561 case IP_OPTIONS: 10562 case T_IP_OPTIONS: 10563 reterr = tcp_opt_set_header(tcp, checkonly, 10564 invalp, inlen); 10565 if (reterr) { 10566 *outlenp = 0; 10567 return (reterr); 10568 } 10569 /* OK return - copy input buffer into output buffer */ 10570 if (invalp != outvalp) { 10571 /* don't trust bcopy for identical src/dst */ 10572 bcopy(invalp, outvalp, inlen); 10573 } 10574 *outlenp = inlen; 10575 return (0); 10576 case IP_TOS: 10577 case T_IP_TOS: 10578 if (!checkonly) { 10579 tcp->tcp_ipha->ipha_type_of_service = 10580 (uchar_t)*i1; 10581 tcp->tcp_tos = (uchar_t)*i1; 10582 } 10583 break; 10584 case IP_TTL: 10585 if (!checkonly) { 10586 tcp->tcp_ipha->ipha_ttl = (uchar_t)*i1; 10587 tcp->tcp_ttl = (uchar_t)*i1; 10588 } 10589 break; 10590 case IP_BOUND_IF: 10591 case IP_NEXTHOP: 10592 /* Handled at the IP level */ 10593 return (-EINVAL); 10594 case IP_SEC_OPT: 10595 /* 10596 * We should not allow policy setting after 10597 * we start listening for connections. 10598 */ 10599 if (tcp->tcp_state == TCPS_LISTEN) { 10600 return (EINVAL); 10601 } else { 10602 /* Handled at the IP level */ 10603 return (-EINVAL); 10604 } 10605 default: 10606 *outlenp = 0; 10607 return (EINVAL); 10608 } 10609 break; 10610 case IPPROTO_IPV6: { 10611 ip6_pkt_t *ipp; 10612 10613 /* 10614 * IPPROTO_IPV6 options are only supported for sockets 10615 * that are using IPv6 on the wire. 10616 */ 10617 if (tcp->tcp_ipversion != IPV6_VERSION) { 10618 *outlenp = 0; 10619 return (ENOPROTOOPT); 10620 } 10621 /* 10622 * Only sticky options; no ancillary data 10623 */ 10624 ASSERT(thisdg_attrs == NULL); 10625 ipp = &tcp->tcp_sticky_ipp; 10626 10627 switch (name) { 10628 case IPV6_UNICAST_HOPS: 10629 /* -1 means use default */ 10630 if (*i1 < -1 || *i1 > IPV6_MAX_HOPS) { 10631 *outlenp = 0; 10632 return (EINVAL); 10633 } 10634 if (!checkonly) { 10635 if (*i1 == -1) { 10636 tcp->tcp_ip6h->ip6_hops = 10637 ipp->ipp_unicast_hops = 10638 (uint8_t)tcps->tcps_ipv6_hoplimit; 10639 ipp->ipp_fields &= ~IPPF_UNICAST_HOPS; 10640 /* Pass modified value to IP. */ 10641 *i1 = tcp->tcp_ip6h->ip6_hops; 10642 } else { 10643 tcp->tcp_ip6h->ip6_hops = 10644 ipp->ipp_unicast_hops = 10645 (uint8_t)*i1; 10646 ipp->ipp_fields |= IPPF_UNICAST_HOPS; 10647 } 10648 reterr = tcp_build_hdrs(q, tcp); 10649 if (reterr != 0) 10650 return (reterr); 10651 } 10652 break; 10653 case IPV6_BOUND_IF: 10654 if (!checkonly) { 10655 int error = 0; 10656 10657 tcp->tcp_bound_if = *i1; 10658 error = ip_opt_set_ill(tcp->tcp_connp, *i1, 10659 B_TRUE, checkonly, level, name, mblk); 10660 if (error != 0) { 10661 *outlenp = 0; 10662 return (error); 10663 } 10664 } 10665 break; 10666 /* 10667 * Set boolean switches for ancillary data delivery 10668 */ 10669 case IPV6_RECVPKTINFO: 10670 if (!checkonly) { 10671 if (onoff) 10672 tcp->tcp_ipv6_recvancillary |= 10673 TCP_IPV6_RECVPKTINFO; 10674 else 10675 tcp->tcp_ipv6_recvancillary &= 10676 ~TCP_IPV6_RECVPKTINFO; 10677 /* Force it to be sent up with the next msg */ 10678 tcp->tcp_recvifindex = 0; 10679 } 10680 break; 10681 case IPV6_RECVTCLASS: 10682 if (!checkonly) { 10683 if (onoff) 10684 tcp->tcp_ipv6_recvancillary |= 10685 TCP_IPV6_RECVTCLASS; 10686 else 10687 tcp->tcp_ipv6_recvancillary &= 10688 ~TCP_IPV6_RECVTCLASS; 10689 } 10690 break; 10691 case IPV6_RECVHOPLIMIT: 10692 if (!checkonly) { 10693 if (onoff) 10694 tcp->tcp_ipv6_recvancillary |= 10695 TCP_IPV6_RECVHOPLIMIT; 10696 else 10697 tcp->tcp_ipv6_recvancillary &= 10698 ~TCP_IPV6_RECVHOPLIMIT; 10699 /* Force it to be sent up with the next msg */ 10700 tcp->tcp_recvhops = 0xffffffffU; 10701 } 10702 break; 10703 case IPV6_RECVHOPOPTS: 10704 if (!checkonly) { 10705 if (onoff) 10706 tcp->tcp_ipv6_recvancillary |= 10707 TCP_IPV6_RECVHOPOPTS; 10708 else 10709 tcp->tcp_ipv6_recvancillary &= 10710 ~TCP_IPV6_RECVHOPOPTS; 10711 } 10712 break; 10713 case IPV6_RECVDSTOPTS: 10714 if (!checkonly) { 10715 if (onoff) 10716 tcp->tcp_ipv6_recvancillary |= 10717 TCP_IPV6_RECVDSTOPTS; 10718 else 10719 tcp->tcp_ipv6_recvancillary &= 10720 ~TCP_IPV6_RECVDSTOPTS; 10721 } 10722 break; 10723 case _OLD_IPV6_RECVDSTOPTS: 10724 if (!checkonly) { 10725 if (onoff) 10726 tcp->tcp_ipv6_recvancillary |= 10727 TCP_OLD_IPV6_RECVDSTOPTS; 10728 else 10729 tcp->tcp_ipv6_recvancillary &= 10730 ~TCP_OLD_IPV6_RECVDSTOPTS; 10731 } 10732 break; 10733 case IPV6_RECVRTHDR: 10734 if (!checkonly) { 10735 if (onoff) 10736 tcp->tcp_ipv6_recvancillary |= 10737 TCP_IPV6_RECVRTHDR; 10738 else 10739 tcp->tcp_ipv6_recvancillary &= 10740 ~TCP_IPV6_RECVRTHDR; 10741 } 10742 break; 10743 case IPV6_RECVRTHDRDSTOPTS: 10744 if (!checkonly) { 10745 if (onoff) 10746 tcp->tcp_ipv6_recvancillary |= 10747 TCP_IPV6_RECVRTDSTOPTS; 10748 else 10749 tcp->tcp_ipv6_recvancillary &= 10750 ~TCP_IPV6_RECVRTDSTOPTS; 10751 } 10752 break; 10753 case IPV6_PKTINFO: 10754 if (inlen != 0 && inlen != sizeof (struct in6_pktinfo)) 10755 return (EINVAL); 10756 if (checkonly) 10757 break; 10758 10759 if (inlen == 0) { 10760 ipp->ipp_fields &= ~(IPPF_IFINDEX|IPPF_ADDR); 10761 } else { 10762 struct in6_pktinfo *pkti; 10763 10764 pkti = (struct in6_pktinfo *)invalp; 10765 /* 10766 * RFC 3542 states that ipi6_addr must be 10767 * the unspecified address when setting the 10768 * IPV6_PKTINFO sticky socket option on a 10769 * TCP socket. 10770 */ 10771 if (!IN6_IS_ADDR_UNSPECIFIED(&pkti->ipi6_addr)) 10772 return (EINVAL); 10773 /* 10774 * ip6_set_pktinfo() validates the source 10775 * address and interface index. 10776 */ 10777 reterr = ip6_set_pktinfo(cr, tcp->tcp_connp, 10778 pkti, mblk); 10779 if (reterr != 0) 10780 return (reterr); 10781 ipp->ipp_ifindex = pkti->ipi6_ifindex; 10782 ipp->ipp_addr = pkti->ipi6_addr; 10783 if (ipp->ipp_ifindex != 0) 10784 ipp->ipp_fields |= IPPF_IFINDEX; 10785 else 10786 ipp->ipp_fields &= ~IPPF_IFINDEX; 10787 if (!IN6_IS_ADDR_UNSPECIFIED(&ipp->ipp_addr)) 10788 ipp->ipp_fields |= IPPF_ADDR; 10789 else 10790 ipp->ipp_fields &= ~IPPF_ADDR; 10791 } 10792 reterr = tcp_build_hdrs(q, tcp); 10793 if (reterr != 0) 10794 return (reterr); 10795 break; 10796 case IPV6_TCLASS: 10797 if (inlen != 0 && inlen != sizeof (int)) 10798 return (EINVAL); 10799 if (checkonly) 10800 break; 10801 10802 if (inlen == 0) { 10803 ipp->ipp_fields &= ~IPPF_TCLASS; 10804 } else { 10805 if (*i1 > 255 || *i1 < -1) 10806 return (EINVAL); 10807 if (*i1 == -1) { 10808 ipp->ipp_tclass = 0; 10809 *i1 = 0; 10810 } else { 10811 ipp->ipp_tclass = *i1; 10812 } 10813 ipp->ipp_fields |= IPPF_TCLASS; 10814 } 10815 reterr = tcp_build_hdrs(q, tcp); 10816 if (reterr != 0) 10817 return (reterr); 10818 break; 10819 case IPV6_NEXTHOP: 10820 /* 10821 * IP will verify that the nexthop is reachable 10822 * and fail for sticky options. 10823 */ 10824 if (inlen != 0 && inlen != sizeof (sin6_t)) 10825 return (EINVAL); 10826 if (checkonly) 10827 break; 10828 10829 if (inlen == 0) { 10830 ipp->ipp_fields &= ~IPPF_NEXTHOP; 10831 } else { 10832 sin6_t *sin6 = (sin6_t *)invalp; 10833 10834 if (sin6->sin6_family != AF_INET6) 10835 return (EAFNOSUPPORT); 10836 if (IN6_IS_ADDR_V4MAPPED( 10837 &sin6->sin6_addr)) 10838 return (EADDRNOTAVAIL); 10839 ipp->ipp_nexthop = sin6->sin6_addr; 10840 if (!IN6_IS_ADDR_UNSPECIFIED( 10841 &ipp->ipp_nexthop)) 10842 ipp->ipp_fields |= IPPF_NEXTHOP; 10843 else 10844 ipp->ipp_fields &= ~IPPF_NEXTHOP; 10845 } 10846 reterr = tcp_build_hdrs(q, tcp); 10847 if (reterr != 0) 10848 return (reterr); 10849 break; 10850 case IPV6_HOPOPTS: { 10851 ip6_hbh_t *hopts = (ip6_hbh_t *)invalp; 10852 10853 /* 10854 * Sanity checks - minimum size, size a multiple of 10855 * eight bytes, and matching size passed in. 10856 */ 10857 if (inlen != 0 && 10858 inlen != (8 * (hopts->ip6h_len + 1))) 10859 return (EINVAL); 10860 10861 if (checkonly) 10862 break; 10863 10864 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10865 (uchar_t **)&ipp->ipp_hopopts, 10866 &ipp->ipp_hopoptslen, tcp->tcp_label_len); 10867 if (reterr != 0) 10868 return (reterr); 10869 if (ipp->ipp_hopoptslen == 0) 10870 ipp->ipp_fields &= ~IPPF_HOPOPTS; 10871 else 10872 ipp->ipp_fields |= IPPF_HOPOPTS; 10873 reterr = tcp_build_hdrs(q, tcp); 10874 if (reterr != 0) 10875 return (reterr); 10876 break; 10877 } 10878 case IPV6_RTHDRDSTOPTS: { 10879 ip6_dest_t *dopts = (ip6_dest_t *)invalp; 10880 10881 /* 10882 * Sanity checks - minimum size, size a multiple of 10883 * eight bytes, and matching size passed in. 10884 */ 10885 if (inlen != 0 && 10886 inlen != (8 * (dopts->ip6d_len + 1))) 10887 return (EINVAL); 10888 10889 if (checkonly) 10890 break; 10891 10892 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10893 (uchar_t **)&ipp->ipp_rtdstopts, 10894 &ipp->ipp_rtdstoptslen, 0); 10895 if (reterr != 0) 10896 return (reterr); 10897 if (ipp->ipp_rtdstoptslen == 0) 10898 ipp->ipp_fields &= ~IPPF_RTDSTOPTS; 10899 else 10900 ipp->ipp_fields |= IPPF_RTDSTOPTS; 10901 reterr = tcp_build_hdrs(q, tcp); 10902 if (reterr != 0) 10903 return (reterr); 10904 break; 10905 } 10906 case IPV6_DSTOPTS: { 10907 ip6_dest_t *dopts = (ip6_dest_t *)invalp; 10908 10909 /* 10910 * Sanity checks - minimum size, size a multiple of 10911 * eight bytes, and matching size passed in. 10912 */ 10913 if (inlen != 0 && 10914 inlen != (8 * (dopts->ip6d_len + 1))) 10915 return (EINVAL); 10916 10917 if (checkonly) 10918 break; 10919 10920 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10921 (uchar_t **)&ipp->ipp_dstopts, 10922 &ipp->ipp_dstoptslen, 0); 10923 if (reterr != 0) 10924 return (reterr); 10925 if (ipp->ipp_dstoptslen == 0) 10926 ipp->ipp_fields &= ~IPPF_DSTOPTS; 10927 else 10928 ipp->ipp_fields |= IPPF_DSTOPTS; 10929 reterr = tcp_build_hdrs(q, tcp); 10930 if (reterr != 0) 10931 return (reterr); 10932 break; 10933 } 10934 case IPV6_RTHDR: { 10935 ip6_rthdr_t *rt = (ip6_rthdr_t *)invalp; 10936 10937 /* 10938 * Sanity checks - minimum size, size a multiple of 10939 * eight bytes, and matching size passed in. 10940 */ 10941 if (inlen != 0 && 10942 inlen != (8 * (rt->ip6r_len + 1))) 10943 return (EINVAL); 10944 10945 if (checkonly) 10946 break; 10947 10948 reterr = optcom_pkt_set(invalp, inlen, B_TRUE, 10949 (uchar_t **)&ipp->ipp_rthdr, 10950 &ipp->ipp_rthdrlen, 0); 10951 if (reterr != 0) 10952 return (reterr); 10953 if (ipp->ipp_rthdrlen == 0) 10954 ipp->ipp_fields &= ~IPPF_RTHDR; 10955 else 10956 ipp->ipp_fields |= IPPF_RTHDR; 10957 reterr = tcp_build_hdrs(q, tcp); 10958 if (reterr != 0) 10959 return (reterr); 10960 break; 10961 } 10962 case IPV6_V6ONLY: 10963 if (!checkonly) 10964 tcp->tcp_connp->conn_ipv6_v6only = onoff; 10965 break; 10966 case IPV6_USE_MIN_MTU: 10967 if (inlen != sizeof (int)) 10968 return (EINVAL); 10969 10970 if (*i1 < -1 || *i1 > 1) 10971 return (EINVAL); 10972 10973 if (checkonly) 10974 break; 10975 10976 ipp->ipp_fields |= IPPF_USE_MIN_MTU; 10977 ipp->ipp_use_min_mtu = *i1; 10978 break; 10979 case IPV6_BOUND_PIF: 10980 /* Handled at the IP level */ 10981 return (-EINVAL); 10982 case IPV6_SEC_OPT: 10983 /* 10984 * We should not allow policy setting after 10985 * we start listening for connections. 10986 */ 10987 if (tcp->tcp_state == TCPS_LISTEN) { 10988 return (EINVAL); 10989 } else { 10990 /* Handled at the IP level */ 10991 return (-EINVAL); 10992 } 10993 case IPV6_SRC_PREFERENCES: 10994 if (inlen != sizeof (uint32_t)) 10995 return (EINVAL); 10996 reterr = ip6_set_src_preferences(tcp->tcp_connp, 10997 *(uint32_t *)invalp); 10998 if (reterr != 0) { 10999 *outlenp = 0; 11000 return (reterr); 11001 } 11002 break; 11003 default: 11004 *outlenp = 0; 11005 return (EINVAL); 11006 } 11007 break; 11008 } /* end IPPROTO_IPV6 */ 11009 default: 11010 *outlenp = 0; 11011 return (EINVAL); 11012 } 11013 /* 11014 * Common case of OK return with outval same as inval 11015 */ 11016 if (invalp != outvalp) { 11017 /* don't trust bcopy for identical src/dst */ 11018 (void) bcopy(invalp, outvalp, inlen); 11019 } 11020 *outlenp = inlen; 11021 return (0); 11022 } 11023 11024 /* 11025 * Update tcp_sticky_hdrs based on tcp_sticky_ipp. 11026 * The headers include ip6i_t (if needed), ip6_t, any sticky extension 11027 * headers, and the maximum size tcp header (to avoid reallocation 11028 * on the fly for additional tcp options). 11029 * Returns failure if can't allocate memory. 11030 */ 11031 static int 11032 tcp_build_hdrs(queue_t *q, tcp_t *tcp) 11033 { 11034 char *hdrs; 11035 uint_t hdrs_len; 11036 ip6i_t *ip6i; 11037 char buf[TCP_MAX_HDR_LENGTH]; 11038 ip6_pkt_t *ipp = &tcp->tcp_sticky_ipp; 11039 in6_addr_t src, dst; 11040 tcp_stack_t *tcps = tcp->tcp_tcps; 11041 11042 /* 11043 * save the existing tcp header and source/dest IP addresses 11044 */ 11045 bcopy(tcp->tcp_tcph, buf, tcp->tcp_tcp_hdr_len); 11046 src = tcp->tcp_ip6h->ip6_src; 11047 dst = tcp->tcp_ip6h->ip6_dst; 11048 hdrs_len = ip_total_hdrs_len_v6(ipp) + TCP_MAX_HDR_LENGTH; 11049 ASSERT(hdrs_len != 0); 11050 if (hdrs_len > tcp->tcp_iphc_len) { 11051 /* Need to reallocate */ 11052 hdrs = kmem_zalloc(hdrs_len, KM_NOSLEEP); 11053 if (hdrs == NULL) 11054 return (ENOMEM); 11055 if (tcp->tcp_iphc != NULL) { 11056 if (tcp->tcp_hdr_grown) { 11057 kmem_free(tcp->tcp_iphc, tcp->tcp_iphc_len); 11058 } else { 11059 bzero(tcp->tcp_iphc, tcp->tcp_iphc_len); 11060 kmem_cache_free(tcp_iphc_cache, tcp->tcp_iphc); 11061 } 11062 tcp->tcp_iphc_len = 0; 11063 } 11064 ASSERT(tcp->tcp_iphc_len == 0); 11065 tcp->tcp_iphc = hdrs; 11066 tcp->tcp_iphc_len = hdrs_len; 11067 tcp->tcp_hdr_grown = B_TRUE; 11068 } 11069 ip_build_hdrs_v6((uchar_t *)tcp->tcp_iphc, 11070 hdrs_len - TCP_MAX_HDR_LENGTH, ipp, IPPROTO_TCP); 11071 11072 /* Set header fields not in ipp */ 11073 if (ipp->ipp_fields & IPPF_HAS_IP6I) { 11074 ip6i = (ip6i_t *)tcp->tcp_iphc; 11075 tcp->tcp_ip6h = (ip6_t *)&ip6i[1]; 11076 } else { 11077 tcp->tcp_ip6h = (ip6_t *)tcp->tcp_iphc; 11078 } 11079 /* 11080 * tcp->tcp_ip_hdr_len will include ip6i_t if there is one. 11081 * 11082 * tcp->tcp_tcp_hdr_len doesn't change here. 11083 */ 11084 tcp->tcp_ip_hdr_len = hdrs_len - TCP_MAX_HDR_LENGTH; 11085 tcp->tcp_tcph = (tcph_t *)(tcp->tcp_iphc + tcp->tcp_ip_hdr_len); 11086 tcp->tcp_hdr_len = tcp->tcp_ip_hdr_len + tcp->tcp_tcp_hdr_len; 11087 11088 bcopy(buf, tcp->tcp_tcph, tcp->tcp_tcp_hdr_len); 11089 11090 tcp->tcp_ip6h->ip6_src = src; 11091 tcp->tcp_ip6h->ip6_dst = dst; 11092 11093 /* 11094 * If the hop limit was not set by ip_build_hdrs_v6(), set it to 11095 * the default value for TCP. 11096 */ 11097 if (!(ipp->ipp_fields & IPPF_UNICAST_HOPS)) 11098 tcp->tcp_ip6h->ip6_hops = tcps->tcps_ipv6_hoplimit; 11099 11100 /* 11101 * If we're setting extension headers after a connection 11102 * has been established, and if we have a routing header 11103 * among the extension headers, call ip_massage_options_v6 to 11104 * manipulate the routing header/ip6_dst set the checksum 11105 * difference in the tcp header template. 11106 * (This happens in tcp_connect_ipv6 if the routing header 11107 * is set prior to the connect.) 11108 * Set the tcp_sum to zero first in case we've cleared a 11109 * routing header or don't have one at all. 11110 */ 11111 tcp->tcp_sum = 0; 11112 if ((tcp->tcp_state >= TCPS_SYN_SENT) && 11113 (tcp->tcp_ipp_fields & IPPF_RTHDR)) { 11114 ip6_rthdr_t *rth = ip_find_rthdr_v6(tcp->tcp_ip6h, 11115 (uint8_t *)tcp->tcp_tcph); 11116 if (rth != NULL) { 11117 tcp->tcp_sum = ip_massage_options_v6(tcp->tcp_ip6h, 11118 rth, tcps->tcps_netstack); 11119 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + 11120 (tcp->tcp_sum >> 16)); 11121 } 11122 } 11123 11124 /* Try to get everything in a single mblk */ 11125 (void) mi_set_sth_wroff(RD(q), hdrs_len + tcps->tcps_wroff_xtra); 11126 return (0); 11127 } 11128 11129 /* 11130 * Transfer any source route option from ipha to buf/dst in reversed form. 11131 */ 11132 static int 11133 tcp_opt_rev_src_route(ipha_t *ipha, char *buf, uchar_t *dst) 11134 { 11135 ipoptp_t opts; 11136 uchar_t *opt; 11137 uint8_t optval; 11138 uint8_t optlen; 11139 uint32_t len = 0; 11140 11141 for (optval = ipoptp_first(&opts, ipha); 11142 optval != IPOPT_EOL; 11143 optval = ipoptp_next(&opts)) { 11144 opt = opts.ipoptp_cur; 11145 optlen = opts.ipoptp_len; 11146 switch (optval) { 11147 int off1, off2; 11148 case IPOPT_SSRR: 11149 case IPOPT_LSRR: 11150 11151 /* Reverse source route */ 11152 /* 11153 * First entry should be the next to last one in the 11154 * current source route (the last entry is our 11155 * address.) 11156 * The last entry should be the final destination. 11157 */ 11158 buf[IPOPT_OPTVAL] = (uint8_t)optval; 11159 buf[IPOPT_OLEN] = (uint8_t)optlen; 11160 off1 = IPOPT_MINOFF_SR - 1; 11161 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1; 11162 if (off2 < 0) { 11163 /* No entries in source route */ 11164 break; 11165 } 11166 bcopy(opt + off2, dst, IP_ADDR_LEN); 11167 /* 11168 * Note: use src since ipha has not had its src 11169 * and dst reversed (it is in the state it was 11170 * received. 11171 */ 11172 bcopy(&ipha->ipha_src, buf + off2, 11173 IP_ADDR_LEN); 11174 off2 -= IP_ADDR_LEN; 11175 11176 while (off2 > 0) { 11177 bcopy(opt + off2, buf + off1, 11178 IP_ADDR_LEN); 11179 off1 += IP_ADDR_LEN; 11180 off2 -= IP_ADDR_LEN; 11181 } 11182 buf[IPOPT_OFFSET] = IPOPT_MINOFF_SR; 11183 buf += optlen; 11184 len += optlen; 11185 break; 11186 } 11187 } 11188 done: 11189 /* Pad the resulting options */ 11190 while (len & 0x3) { 11191 *buf++ = IPOPT_EOL; 11192 len++; 11193 } 11194 return (len); 11195 } 11196 11197 11198 /* 11199 * Extract and revert a source route from ipha (if any) 11200 * and then update the relevant fields in both tcp_t and the standard header. 11201 */ 11202 static void 11203 tcp_opt_reverse(tcp_t *tcp, ipha_t *ipha) 11204 { 11205 char buf[TCP_MAX_HDR_LENGTH]; 11206 uint_t tcph_len; 11207 int len; 11208 11209 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 11210 len = IPH_HDR_LENGTH(ipha); 11211 if (len == IP_SIMPLE_HDR_LENGTH) 11212 /* Nothing to do */ 11213 return; 11214 if (len > IP_SIMPLE_HDR_LENGTH + TCP_MAX_IP_OPTIONS_LENGTH || 11215 (len & 0x3)) 11216 return; 11217 11218 tcph_len = tcp->tcp_tcp_hdr_len; 11219 bcopy(tcp->tcp_tcph, buf, tcph_len); 11220 tcp->tcp_sum = (tcp->tcp_ipha->ipha_dst >> 16) + 11221 (tcp->tcp_ipha->ipha_dst & 0xffff); 11222 len = tcp_opt_rev_src_route(ipha, (char *)tcp->tcp_ipha + 11223 IP_SIMPLE_HDR_LENGTH, (uchar_t *)&tcp->tcp_ipha->ipha_dst); 11224 len += IP_SIMPLE_HDR_LENGTH; 11225 tcp->tcp_sum -= ((tcp->tcp_ipha->ipha_dst >> 16) + 11226 (tcp->tcp_ipha->ipha_dst & 0xffff)); 11227 if ((int)tcp->tcp_sum < 0) 11228 tcp->tcp_sum--; 11229 tcp->tcp_sum = (tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16); 11230 tcp->tcp_sum = ntohs((tcp->tcp_sum & 0xFFFF) + (tcp->tcp_sum >> 16)); 11231 tcp->tcp_tcph = (tcph_t *)((char *)tcp->tcp_ipha + len); 11232 bcopy(buf, tcp->tcp_tcph, tcph_len); 11233 tcp->tcp_ip_hdr_len = len; 11234 tcp->tcp_ipha->ipha_version_and_hdr_length = 11235 (IP_VERSION << 4) | (len >> 2); 11236 len += tcph_len; 11237 tcp->tcp_hdr_len = len; 11238 } 11239 11240 /* 11241 * Copy the standard header into its new location, 11242 * lay in the new options and then update the relevant 11243 * fields in both tcp_t and the standard header. 11244 */ 11245 static int 11246 tcp_opt_set_header(tcp_t *tcp, boolean_t checkonly, uchar_t *ptr, uint_t len) 11247 { 11248 uint_t tcph_len; 11249 uint8_t *ip_optp; 11250 tcph_t *new_tcph; 11251 tcp_stack_t *tcps = tcp->tcp_tcps; 11252 11253 if ((len > TCP_MAX_IP_OPTIONS_LENGTH) || (len & 0x3)) 11254 return (EINVAL); 11255 11256 if (len > IP_MAX_OPT_LENGTH - tcp->tcp_label_len) 11257 return (EINVAL); 11258 11259 if (checkonly) { 11260 /* 11261 * do not really set, just pretend to - T_CHECK 11262 */ 11263 return (0); 11264 } 11265 11266 ip_optp = (uint8_t *)tcp->tcp_ipha + IP_SIMPLE_HDR_LENGTH; 11267 if (tcp->tcp_label_len > 0) { 11268 int padlen; 11269 uint8_t opt; 11270 11271 /* convert list termination to no-ops */ 11272 padlen = tcp->tcp_label_len - ip_optp[IPOPT_OLEN]; 11273 ip_optp += ip_optp[IPOPT_OLEN]; 11274 opt = len > 0 ? IPOPT_NOP : IPOPT_EOL; 11275 while (--padlen >= 0) 11276 *ip_optp++ = opt; 11277 } 11278 tcph_len = tcp->tcp_tcp_hdr_len; 11279 new_tcph = (tcph_t *)(ip_optp + len); 11280 ovbcopy(tcp->tcp_tcph, new_tcph, tcph_len); 11281 tcp->tcp_tcph = new_tcph; 11282 bcopy(ptr, ip_optp, len); 11283 11284 len += IP_SIMPLE_HDR_LENGTH + tcp->tcp_label_len; 11285 11286 tcp->tcp_ip_hdr_len = len; 11287 tcp->tcp_ipha->ipha_version_and_hdr_length = 11288 (IP_VERSION << 4) | (len >> 2); 11289 tcp->tcp_hdr_len = len + tcph_len; 11290 if (!TCP_IS_DETACHED(tcp)) { 11291 /* Always allocate room for all options. */ 11292 (void) mi_set_sth_wroff(tcp->tcp_rq, 11293 TCP_MAX_COMBINED_HEADER_LENGTH + tcps->tcps_wroff_xtra); 11294 } 11295 return (0); 11296 } 11297 11298 /* Get callback routine passed to nd_load by tcp_param_register */ 11299 /* ARGSUSED */ 11300 static int 11301 tcp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 11302 { 11303 tcpparam_t *tcppa = (tcpparam_t *)cp; 11304 11305 (void) mi_mpprintf(mp, "%u", tcppa->tcp_param_val); 11306 return (0); 11307 } 11308 11309 /* 11310 * Walk through the param array specified registering each element with the 11311 * named dispatch handler. 11312 */ 11313 static boolean_t 11314 tcp_param_register(IDP *ndp, tcpparam_t *tcppa, int cnt, tcp_stack_t *tcps) 11315 { 11316 for (; cnt-- > 0; tcppa++) { 11317 if (tcppa->tcp_param_name && tcppa->tcp_param_name[0]) { 11318 if (!nd_load(ndp, tcppa->tcp_param_name, 11319 tcp_param_get, tcp_param_set, 11320 (caddr_t)tcppa)) { 11321 nd_free(ndp); 11322 return (B_FALSE); 11323 } 11324 } 11325 } 11326 tcps->tcps_wroff_xtra_param = kmem_zalloc(sizeof (tcpparam_t), 11327 KM_SLEEP); 11328 bcopy(&lcl_tcp_wroff_xtra_param, tcps->tcps_wroff_xtra_param, 11329 sizeof (tcpparam_t)); 11330 if (!nd_load(ndp, tcps->tcps_wroff_xtra_param->tcp_param_name, 11331 tcp_param_get, tcp_param_set_aligned, 11332 (caddr_t)tcps->tcps_wroff_xtra_param)) { 11333 nd_free(ndp); 11334 return (B_FALSE); 11335 } 11336 tcps->tcps_mdt_head_param = kmem_zalloc(sizeof (tcpparam_t), 11337 KM_SLEEP); 11338 bcopy(&lcl_tcp_mdt_head_param, tcps->tcps_mdt_head_param, 11339 sizeof (tcpparam_t)); 11340 if (!nd_load(ndp, tcps->tcps_mdt_head_param->tcp_param_name, 11341 tcp_param_get, tcp_param_set_aligned, 11342 (caddr_t)tcps->tcps_mdt_head_param)) { 11343 nd_free(ndp); 11344 return (B_FALSE); 11345 } 11346 tcps->tcps_mdt_tail_param = kmem_zalloc(sizeof (tcpparam_t), 11347 KM_SLEEP); 11348 bcopy(&lcl_tcp_mdt_tail_param, tcps->tcps_mdt_tail_param, 11349 sizeof (tcpparam_t)); 11350 if (!nd_load(ndp, tcps->tcps_mdt_tail_param->tcp_param_name, 11351 tcp_param_get, tcp_param_set_aligned, 11352 (caddr_t)tcps->tcps_mdt_tail_param)) { 11353 nd_free(ndp); 11354 return (B_FALSE); 11355 } 11356 tcps->tcps_mdt_max_pbufs_param = kmem_zalloc(sizeof (tcpparam_t), 11357 KM_SLEEP); 11358 bcopy(&lcl_tcp_mdt_max_pbufs_param, tcps->tcps_mdt_max_pbufs_param, 11359 sizeof (tcpparam_t)); 11360 if (!nd_load(ndp, tcps->tcps_mdt_max_pbufs_param->tcp_param_name, 11361 tcp_param_get, tcp_param_set_aligned, 11362 (caddr_t)tcps->tcps_mdt_max_pbufs_param)) { 11363 nd_free(ndp); 11364 return (B_FALSE); 11365 } 11366 if (!nd_load(ndp, "tcp_extra_priv_ports", 11367 tcp_extra_priv_ports_get, NULL, NULL)) { 11368 nd_free(ndp); 11369 return (B_FALSE); 11370 } 11371 if (!nd_load(ndp, "tcp_extra_priv_ports_add", 11372 NULL, tcp_extra_priv_ports_add, NULL)) { 11373 nd_free(ndp); 11374 return (B_FALSE); 11375 } 11376 if (!nd_load(ndp, "tcp_extra_priv_ports_del", 11377 NULL, tcp_extra_priv_ports_del, NULL)) { 11378 nd_free(ndp); 11379 return (B_FALSE); 11380 } 11381 if (!nd_load(ndp, "tcp_status", tcp_status_report, NULL, 11382 NULL)) { 11383 nd_free(ndp); 11384 return (B_FALSE); 11385 } 11386 if (!nd_load(ndp, "tcp_bind_hash", tcp_bind_hash_report, 11387 NULL, NULL)) { 11388 nd_free(ndp); 11389 return (B_FALSE); 11390 } 11391 if (!nd_load(ndp, "tcp_listen_hash", 11392 tcp_listen_hash_report, NULL, NULL)) { 11393 nd_free(ndp); 11394 return (B_FALSE); 11395 } 11396 if (!nd_load(ndp, "tcp_conn_hash", tcp_conn_hash_report, 11397 NULL, NULL)) { 11398 nd_free(ndp); 11399 return (B_FALSE); 11400 } 11401 if (!nd_load(ndp, "tcp_acceptor_hash", 11402 tcp_acceptor_hash_report, NULL, NULL)) { 11403 nd_free(ndp); 11404 return (B_FALSE); 11405 } 11406 if (!nd_load(ndp, "tcp_host_param", tcp_host_param_report, 11407 tcp_host_param_set, NULL)) { 11408 nd_free(ndp); 11409 return (B_FALSE); 11410 } 11411 if (!nd_load(ndp, "tcp_host_param_ipv6", 11412 tcp_host_param_report, tcp_host_param_set_ipv6, NULL)) { 11413 nd_free(ndp); 11414 return (B_FALSE); 11415 } 11416 if (!nd_load(ndp, "tcp_1948_phrase", NULL, 11417 tcp_1948_phrase_set, NULL)) { 11418 nd_free(ndp); 11419 return (B_FALSE); 11420 } 11421 if (!nd_load(ndp, "tcp_reserved_port_list", 11422 tcp_reserved_port_list, NULL, NULL)) { 11423 nd_free(ndp); 11424 return (B_FALSE); 11425 } 11426 /* 11427 * Dummy ndd variables - only to convey obsolescence information 11428 * through printing of their name (no get or set routines) 11429 * XXX Remove in future releases ? 11430 */ 11431 if (!nd_load(ndp, 11432 "tcp_close_wait_interval(obsoleted - " 11433 "use tcp_time_wait_interval)", NULL, NULL, NULL)) { 11434 nd_free(ndp); 11435 return (B_FALSE); 11436 } 11437 return (B_TRUE); 11438 } 11439 11440 /* ndd set routine for tcp_wroff_xtra, tcp_mdt_hdr_{head,tail}_min. */ 11441 /* ARGSUSED */ 11442 static int 11443 tcp_param_set_aligned(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 11444 cred_t *cr) 11445 { 11446 long new_value; 11447 tcpparam_t *tcppa = (tcpparam_t *)cp; 11448 11449 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 11450 new_value < tcppa->tcp_param_min || 11451 new_value > tcppa->tcp_param_max) { 11452 return (EINVAL); 11453 } 11454 /* 11455 * Need to make sure new_value is a multiple of 4. If it is not, 11456 * round it up. For future 64 bit requirement, we actually make it 11457 * a multiple of 8. 11458 */ 11459 if (new_value & 0x7) { 11460 new_value = (new_value & ~0x7) + 0x8; 11461 } 11462 tcppa->tcp_param_val = new_value; 11463 return (0); 11464 } 11465 11466 /* Set callback routine passed to nd_load by tcp_param_register */ 11467 /* ARGSUSED */ 11468 static int 11469 tcp_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr) 11470 { 11471 long new_value; 11472 tcpparam_t *tcppa = (tcpparam_t *)cp; 11473 11474 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 11475 new_value < tcppa->tcp_param_min || 11476 new_value > tcppa->tcp_param_max) { 11477 return (EINVAL); 11478 } 11479 tcppa->tcp_param_val = new_value; 11480 return (0); 11481 } 11482 11483 /* 11484 * Add a new piece to the tcp reassembly queue. If the gap at the beginning 11485 * is filled, return as much as we can. The message passed in may be 11486 * multi-part, chained using b_cont. "start" is the starting sequence 11487 * number for this piece. 11488 */ 11489 static mblk_t * 11490 tcp_reass(tcp_t *tcp, mblk_t *mp, uint32_t start) 11491 { 11492 uint32_t end; 11493 mblk_t *mp1; 11494 mblk_t *mp2; 11495 mblk_t *next_mp; 11496 uint32_t u1; 11497 tcp_stack_t *tcps = tcp->tcp_tcps; 11498 11499 /* Walk through all the new pieces. */ 11500 do { 11501 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 11502 (uintptr_t)INT_MAX); 11503 end = start + (int)(mp->b_wptr - mp->b_rptr); 11504 next_mp = mp->b_cont; 11505 if (start == end) { 11506 /* Empty. Blast it. */ 11507 freeb(mp); 11508 continue; 11509 } 11510 mp->b_cont = NULL; 11511 TCP_REASS_SET_SEQ(mp, start); 11512 TCP_REASS_SET_END(mp, end); 11513 mp1 = tcp->tcp_reass_tail; 11514 if (!mp1) { 11515 tcp->tcp_reass_tail = mp; 11516 tcp->tcp_reass_head = mp; 11517 BUMP_MIB(&tcps->tcps_mib, tcpInDataUnorderSegs); 11518 UPDATE_MIB(&tcps->tcps_mib, 11519 tcpInDataUnorderBytes, end - start); 11520 continue; 11521 } 11522 /* New stuff completely beyond tail? */ 11523 if (SEQ_GEQ(start, TCP_REASS_END(mp1))) { 11524 /* Link it on end. */ 11525 mp1->b_cont = mp; 11526 tcp->tcp_reass_tail = mp; 11527 BUMP_MIB(&tcps->tcps_mib, tcpInDataUnorderSegs); 11528 UPDATE_MIB(&tcps->tcps_mib, 11529 tcpInDataUnorderBytes, end - start); 11530 continue; 11531 } 11532 mp1 = tcp->tcp_reass_head; 11533 u1 = TCP_REASS_SEQ(mp1); 11534 /* New stuff at the front? */ 11535 if (SEQ_LT(start, u1)) { 11536 /* Yes... Check for overlap. */ 11537 mp->b_cont = mp1; 11538 tcp->tcp_reass_head = mp; 11539 tcp_reass_elim_overlap(tcp, mp); 11540 continue; 11541 } 11542 /* 11543 * The new piece fits somewhere between the head and tail. 11544 * We find our slot, where mp1 precedes us and mp2 trails. 11545 */ 11546 for (; (mp2 = mp1->b_cont) != NULL; mp1 = mp2) { 11547 u1 = TCP_REASS_SEQ(mp2); 11548 if (SEQ_LEQ(start, u1)) 11549 break; 11550 } 11551 /* Link ourselves in */ 11552 mp->b_cont = mp2; 11553 mp1->b_cont = mp; 11554 11555 /* Trim overlap with following mblk(s) first */ 11556 tcp_reass_elim_overlap(tcp, mp); 11557 11558 /* Trim overlap with preceding mblk */ 11559 tcp_reass_elim_overlap(tcp, mp1); 11560 11561 } while (start = end, mp = next_mp); 11562 mp1 = tcp->tcp_reass_head; 11563 /* Anything ready to go? */ 11564 if (TCP_REASS_SEQ(mp1) != tcp->tcp_rnxt) 11565 return (NULL); 11566 /* Eat what we can off the queue */ 11567 for (;;) { 11568 mp = mp1->b_cont; 11569 end = TCP_REASS_END(mp1); 11570 TCP_REASS_SET_SEQ(mp1, 0); 11571 TCP_REASS_SET_END(mp1, 0); 11572 if (!mp) { 11573 tcp->tcp_reass_tail = NULL; 11574 break; 11575 } 11576 if (end != TCP_REASS_SEQ(mp)) { 11577 mp1->b_cont = NULL; 11578 break; 11579 } 11580 mp1 = mp; 11581 } 11582 mp1 = tcp->tcp_reass_head; 11583 tcp->tcp_reass_head = mp; 11584 return (mp1); 11585 } 11586 11587 /* Eliminate any overlap that mp may have over later mblks */ 11588 static void 11589 tcp_reass_elim_overlap(tcp_t *tcp, mblk_t *mp) 11590 { 11591 uint32_t end; 11592 mblk_t *mp1; 11593 uint32_t u1; 11594 tcp_stack_t *tcps = tcp->tcp_tcps; 11595 11596 end = TCP_REASS_END(mp); 11597 while ((mp1 = mp->b_cont) != NULL) { 11598 u1 = TCP_REASS_SEQ(mp1); 11599 if (!SEQ_GT(end, u1)) 11600 break; 11601 if (!SEQ_GEQ(end, TCP_REASS_END(mp1))) { 11602 mp->b_wptr -= end - u1; 11603 TCP_REASS_SET_END(mp, u1); 11604 BUMP_MIB(&tcps->tcps_mib, tcpInDataPartDupSegs); 11605 UPDATE_MIB(&tcps->tcps_mib, 11606 tcpInDataPartDupBytes, end - u1); 11607 break; 11608 } 11609 mp->b_cont = mp1->b_cont; 11610 TCP_REASS_SET_SEQ(mp1, 0); 11611 TCP_REASS_SET_END(mp1, 0); 11612 freeb(mp1); 11613 BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs); 11614 UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes, end - u1); 11615 } 11616 if (!mp1) 11617 tcp->tcp_reass_tail = mp; 11618 } 11619 11620 /* 11621 * Send up all messages queued on tcp_rcv_list. 11622 */ 11623 static uint_t 11624 tcp_rcv_drain(queue_t *q, tcp_t *tcp) 11625 { 11626 mblk_t *mp; 11627 uint_t ret = 0; 11628 uint_t thwin; 11629 #ifdef DEBUG 11630 uint_t cnt = 0; 11631 #endif 11632 tcp_stack_t *tcps = tcp->tcp_tcps; 11633 11634 /* Can't drain on an eager connection */ 11635 if (tcp->tcp_listener != NULL) 11636 return (ret); 11637 11638 /* 11639 * Handle two cases here: we are currently fused or we were 11640 * previously fused and have some urgent data to be delivered 11641 * upstream. The latter happens because we either ran out of 11642 * memory or were detached and therefore sending the SIGURG was 11643 * deferred until this point. In either case we pass control 11644 * over to tcp_fuse_rcv_drain() since it may need to complete 11645 * some work. 11646 */ 11647 if ((tcp->tcp_fused || tcp->tcp_fused_sigurg)) { 11648 ASSERT(tcp->tcp_fused_sigurg_mp != NULL); 11649 if (tcp_fuse_rcv_drain(q, tcp, tcp->tcp_fused ? NULL : 11650 &tcp->tcp_fused_sigurg_mp)) 11651 return (ret); 11652 } 11653 11654 while ((mp = tcp->tcp_rcv_list) != NULL) { 11655 tcp->tcp_rcv_list = mp->b_next; 11656 mp->b_next = NULL; 11657 #ifdef DEBUG 11658 cnt += msgdsize(mp); 11659 #endif 11660 /* Does this need SSL processing first? */ 11661 if ((tcp->tcp_kssl_ctx != NULL) && (DB_TYPE(mp) == M_DATA)) { 11662 tcp_kssl_input(tcp, mp); 11663 continue; 11664 } 11665 putnext(q, mp); 11666 } 11667 ASSERT(cnt == tcp->tcp_rcv_cnt); 11668 tcp->tcp_rcv_last_head = NULL; 11669 tcp->tcp_rcv_last_tail = NULL; 11670 tcp->tcp_rcv_cnt = 0; 11671 11672 /* Learn the latest rwnd information that we sent to the other side. */ 11673 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 11674 << tcp->tcp_rcv_ws; 11675 /* This is peer's calculated send window (our receive window). */ 11676 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 11677 /* 11678 * Increase the receive window to max. But we need to do receiver 11679 * SWS avoidance. This means that we need to check the increase of 11680 * of receive window is at least 1 MSS. 11681 */ 11682 if (canputnext(q) && (q->q_hiwat - thwin >= tcp->tcp_mss)) { 11683 /* 11684 * If the window that the other side knows is less than max 11685 * deferred acks segments, send an update immediately. 11686 */ 11687 if (thwin < tcp->tcp_rack_cur_max * tcp->tcp_mss) { 11688 BUMP_MIB(&tcps->tcps_mib, tcpOutWinUpdate); 11689 ret = TH_ACK_NEEDED; 11690 } 11691 tcp->tcp_rwnd = q->q_hiwat; 11692 } 11693 /* No need for the push timer now. */ 11694 if (tcp->tcp_push_tid != 0) { 11695 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 11696 tcp->tcp_push_tid = 0; 11697 } 11698 return (ret); 11699 } 11700 11701 /* 11702 * Queue data on tcp_rcv_list which is a b_next chain. 11703 * tcp_rcv_last_head/tail is the last element of this chain. 11704 * Each element of the chain is a b_cont chain. 11705 * 11706 * M_DATA messages are added to the current element. 11707 * Other messages are added as new (b_next) elements. 11708 */ 11709 void 11710 tcp_rcv_enqueue(tcp_t *tcp, mblk_t *mp, uint_t seg_len) 11711 { 11712 ASSERT(seg_len == msgdsize(mp)); 11713 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_rcv_last_head != NULL); 11714 11715 if (tcp->tcp_rcv_list == NULL) { 11716 ASSERT(tcp->tcp_rcv_last_head == NULL); 11717 tcp->tcp_rcv_list = mp; 11718 tcp->tcp_rcv_last_head = mp; 11719 } else if (DB_TYPE(mp) == DB_TYPE(tcp->tcp_rcv_last_head)) { 11720 tcp->tcp_rcv_last_tail->b_cont = mp; 11721 } else { 11722 tcp->tcp_rcv_last_head->b_next = mp; 11723 tcp->tcp_rcv_last_head = mp; 11724 } 11725 11726 while (mp->b_cont) 11727 mp = mp->b_cont; 11728 11729 tcp->tcp_rcv_last_tail = mp; 11730 tcp->tcp_rcv_cnt += seg_len; 11731 tcp->tcp_rwnd -= seg_len; 11732 } 11733 11734 /* 11735 * DEFAULT TCP ENTRY POINT via squeue on READ side. 11736 * 11737 * This is the default entry function into TCP on the read side. TCP is 11738 * always entered via squeue i.e. using squeue's for mutual exclusion. 11739 * When classifier does a lookup to find the tcp, it also puts a reference 11740 * on the conn structure associated so the tcp is guaranteed to exist 11741 * when we come here. We still need to check the state because it might 11742 * as well has been closed. The squeue processing function i.e. squeue_enter, 11743 * squeue_enter_nodrain, or squeue_drain is responsible for doing the 11744 * CONN_DEC_REF. 11745 * 11746 * Apart from the default entry point, IP also sends packets directly to 11747 * tcp_rput_data for AF_INET fast path and tcp_conn_request for incoming 11748 * connections. 11749 */ 11750 void 11751 tcp_input(void *arg, mblk_t *mp, void *arg2) 11752 { 11753 conn_t *connp = (conn_t *)arg; 11754 tcp_t *tcp = (tcp_t *)connp->conn_tcp; 11755 11756 /* arg2 is the sqp */ 11757 ASSERT(arg2 != NULL); 11758 ASSERT(mp != NULL); 11759 11760 /* 11761 * Don't accept any input on a closed tcp as this TCP logically does 11762 * not exist on the system. Don't proceed further with this TCP. 11763 * For eg. this packet could trigger another close of this tcp 11764 * which would be disastrous for tcp_refcnt. tcp_close_detached / 11765 * tcp_clean_death / tcp_closei_local must be called at most once 11766 * on a TCP. In this case we need to refeed the packet into the 11767 * classifier and figure out where the packet should go. Need to 11768 * preserve the recv_ill somehow. Until we figure that out, for 11769 * now just drop the packet if we can't classify the packet. 11770 */ 11771 if (tcp->tcp_state == TCPS_CLOSED || 11772 tcp->tcp_state == TCPS_BOUND) { 11773 conn_t *new_connp; 11774 ip_stack_t *ipst = tcp->tcp_tcps->tcps_netstack->netstack_ip; 11775 11776 new_connp = ipcl_classify(mp, connp->conn_zoneid, ipst); 11777 if (new_connp != NULL) { 11778 tcp_reinput(new_connp, mp, arg2); 11779 return; 11780 } 11781 /* We failed to classify. For now just drop the packet */ 11782 freemsg(mp); 11783 return; 11784 } 11785 11786 if (DB_TYPE(mp) == M_DATA) 11787 tcp_rput_data(connp, mp, arg2); 11788 else 11789 tcp_rput_common(tcp, mp); 11790 } 11791 11792 /* 11793 * The read side put procedure. 11794 * The packets passed up by ip are assume to be aligned according to 11795 * OK_32PTR and the IP+TCP headers fitting in the first mblk. 11796 */ 11797 static void 11798 tcp_rput_common(tcp_t *tcp, mblk_t *mp) 11799 { 11800 /* 11801 * tcp_rput_data() does not expect M_CTL except for the case 11802 * where tcp_ipv6_recvancillary is set and we get a IN_PKTINFO 11803 * type. Need to make sure that any other M_CTLs don't make 11804 * it to tcp_rput_data since it is not expecting any and doesn't 11805 * check for it. 11806 */ 11807 if (DB_TYPE(mp) == M_CTL) { 11808 switch (*(uint32_t *)(mp->b_rptr)) { 11809 case TCP_IOC_ABORT_CONN: 11810 /* 11811 * Handle connection abort request. 11812 */ 11813 tcp_ioctl_abort_handler(tcp, mp); 11814 return; 11815 case IPSEC_IN: 11816 /* 11817 * Only secure icmp arrive in TCP and they 11818 * don't go through data path. 11819 */ 11820 tcp_icmp_error(tcp, mp); 11821 return; 11822 case IN_PKTINFO: 11823 /* 11824 * Handle IPV6_RECVPKTINFO socket option on AF_INET6 11825 * sockets that are receiving IPv4 traffic. tcp 11826 */ 11827 ASSERT(tcp->tcp_family == AF_INET6); 11828 ASSERT(tcp->tcp_ipv6_recvancillary & 11829 TCP_IPV6_RECVPKTINFO); 11830 tcp_rput_data(tcp->tcp_connp, mp, 11831 tcp->tcp_connp->conn_sqp); 11832 return; 11833 case MDT_IOC_INFO_UPDATE: 11834 /* 11835 * Handle Multidata information update; the 11836 * following routine will free the message. 11837 */ 11838 if (tcp->tcp_connp->conn_mdt_ok) { 11839 tcp_mdt_update(tcp, 11840 &((ip_mdt_info_t *)mp->b_rptr)->mdt_capab, 11841 B_FALSE); 11842 } 11843 freemsg(mp); 11844 return; 11845 case LSO_IOC_INFO_UPDATE: 11846 /* 11847 * Handle LSO information update; the following 11848 * routine will free the message. 11849 */ 11850 if (tcp->tcp_connp->conn_lso_ok) { 11851 tcp_lso_update(tcp, 11852 &((ip_lso_info_t *)mp->b_rptr)->lso_capab); 11853 } 11854 freemsg(mp); 11855 return; 11856 default: 11857 /* 11858 * tcp_icmp_err() will process the M_CTL packets. 11859 * Non-ICMP packets, if any, will be discarded in 11860 * tcp_icmp_err(). We will process the ICMP packet 11861 * even if we are TCP_IS_DETACHED_NONEAGER as the 11862 * incoming ICMP packet may result in changing 11863 * the tcp_mss, which we would need if we have 11864 * packets to retransmit. 11865 */ 11866 tcp_icmp_error(tcp, mp); 11867 return; 11868 } 11869 } 11870 11871 /* No point processing the message if tcp is already closed */ 11872 if (TCP_IS_DETACHED_NONEAGER(tcp)) { 11873 freemsg(mp); 11874 return; 11875 } 11876 11877 tcp_rput_other(tcp, mp); 11878 } 11879 11880 11881 /* The minimum of smoothed mean deviation in RTO calculation. */ 11882 #define TCP_SD_MIN 400 11883 11884 /* 11885 * Set RTO for this connection. The formula is from Jacobson and Karels' 11886 * "Congestion Avoidance and Control" in SIGCOMM '88. The variable names 11887 * are the same as those in Appendix A.2 of that paper. 11888 * 11889 * m = new measurement 11890 * sa = smoothed RTT average (8 * average estimates). 11891 * sv = smoothed mean deviation (mdev) of RTT (4 * deviation estimates). 11892 */ 11893 static void 11894 tcp_set_rto(tcp_t *tcp, clock_t rtt) 11895 { 11896 long m = TICK_TO_MSEC(rtt); 11897 clock_t sa = tcp->tcp_rtt_sa; 11898 clock_t sv = tcp->tcp_rtt_sd; 11899 clock_t rto; 11900 tcp_stack_t *tcps = tcp->tcp_tcps; 11901 11902 BUMP_MIB(&tcps->tcps_mib, tcpRttUpdate); 11903 tcp->tcp_rtt_update++; 11904 11905 /* tcp_rtt_sa is not 0 means this is a new sample. */ 11906 if (sa != 0) { 11907 /* 11908 * Update average estimator: 11909 * new rtt = 7/8 old rtt + 1/8 Error 11910 */ 11911 11912 /* m is now Error in estimate. */ 11913 m -= sa >> 3; 11914 if ((sa += m) <= 0) { 11915 /* 11916 * Don't allow the smoothed average to be negative. 11917 * We use 0 to denote reinitialization of the 11918 * variables. 11919 */ 11920 sa = 1; 11921 } 11922 11923 /* 11924 * Update deviation estimator: 11925 * new mdev = 3/4 old mdev + 1/4 (abs(Error) - old mdev) 11926 */ 11927 if (m < 0) 11928 m = -m; 11929 m -= sv >> 2; 11930 sv += m; 11931 } else { 11932 /* 11933 * This follows BSD's implementation. So the reinitialized 11934 * RTO is 3 * m. We cannot go less than 2 because if the 11935 * link is bandwidth dominated, doubling the window size 11936 * during slow start means doubling the RTT. We want to be 11937 * more conservative when we reinitialize our estimates. 3 11938 * is just a convenient number. 11939 */ 11940 sa = m << 3; 11941 sv = m << 1; 11942 } 11943 if (sv < TCP_SD_MIN) { 11944 /* 11945 * We do not know that if sa captures the delay ACK 11946 * effect as in a long train of segments, a receiver 11947 * does not delay its ACKs. So set the minimum of sv 11948 * to be TCP_SD_MIN, which is default to 400 ms, twice 11949 * of BSD DATO. That means the minimum of mean 11950 * deviation is 100 ms. 11951 * 11952 */ 11953 sv = TCP_SD_MIN; 11954 } 11955 tcp->tcp_rtt_sa = sa; 11956 tcp->tcp_rtt_sd = sv; 11957 /* 11958 * RTO = average estimates (sa / 8) + 4 * deviation estimates (sv) 11959 * 11960 * Add tcp_rexmit_interval extra in case of extreme environment 11961 * where the algorithm fails to work. The default value of 11962 * tcp_rexmit_interval_extra should be 0. 11963 * 11964 * As we use a finer grained clock than BSD and update 11965 * RTO for every ACKs, add in another .25 of RTT to the 11966 * deviation of RTO to accomodate burstiness of 1/4 of 11967 * window size. 11968 */ 11969 rto = (sa >> 3) + sv + tcps->tcps_rexmit_interval_extra + (sa >> 5); 11970 11971 if (rto > tcps->tcps_rexmit_interval_max) { 11972 tcp->tcp_rto = tcps->tcps_rexmit_interval_max; 11973 } else if (rto < tcps->tcps_rexmit_interval_min) { 11974 tcp->tcp_rto = tcps->tcps_rexmit_interval_min; 11975 } else { 11976 tcp->tcp_rto = rto; 11977 } 11978 11979 /* Now, we can reset tcp_timer_backoff to use the new RTO... */ 11980 tcp->tcp_timer_backoff = 0; 11981 } 11982 11983 /* 11984 * tcp_get_seg_mp() is called to get the pointer to a segment in the 11985 * send queue which starts at the given seq. no. 11986 * 11987 * Parameters: 11988 * tcp_t *tcp: the tcp instance pointer. 11989 * uint32_t seq: the starting seq. no of the requested segment. 11990 * int32_t *off: after the execution, *off will be the offset to 11991 * the returned mblk which points to the requested seq no. 11992 * It is the caller's responsibility to send in a non-null off. 11993 * 11994 * Return: 11995 * A mblk_t pointer pointing to the requested segment in send queue. 11996 */ 11997 static mblk_t * 11998 tcp_get_seg_mp(tcp_t *tcp, uint32_t seq, int32_t *off) 11999 { 12000 int32_t cnt; 12001 mblk_t *mp; 12002 12003 /* Defensive coding. Make sure we don't send incorrect data. */ 12004 if (SEQ_LT(seq, tcp->tcp_suna) || SEQ_GEQ(seq, tcp->tcp_snxt)) 12005 return (NULL); 12006 12007 cnt = seq - tcp->tcp_suna; 12008 mp = tcp->tcp_xmit_head; 12009 while (cnt > 0 && mp != NULL) { 12010 cnt -= mp->b_wptr - mp->b_rptr; 12011 if (cnt < 0) { 12012 cnt += mp->b_wptr - mp->b_rptr; 12013 break; 12014 } 12015 mp = mp->b_cont; 12016 } 12017 ASSERT(mp != NULL); 12018 *off = cnt; 12019 return (mp); 12020 } 12021 12022 /* 12023 * This function handles all retransmissions if SACK is enabled for this 12024 * connection. First it calculates how many segments can be retransmitted 12025 * based on tcp_pipe. Then it goes thru the notsack list to find eligible 12026 * segments. A segment is eligible if sack_cnt for that segment is greater 12027 * than or equal tcp_dupack_fast_retransmit. After it has retransmitted 12028 * all eligible segments, it checks to see if TCP can send some new segments 12029 * (fast recovery). If it can, set the appropriate flag for tcp_rput_data(). 12030 * 12031 * Parameters: 12032 * tcp_t *tcp: the tcp structure of the connection. 12033 * uint_t *flags: in return, appropriate value will be set for 12034 * tcp_rput_data(). 12035 */ 12036 static void 12037 tcp_sack_rxmit(tcp_t *tcp, uint_t *flags) 12038 { 12039 notsack_blk_t *notsack_blk; 12040 int32_t usable_swnd; 12041 int32_t mss; 12042 uint32_t seg_len; 12043 mblk_t *xmit_mp; 12044 tcp_stack_t *tcps = tcp->tcp_tcps; 12045 12046 ASSERT(tcp->tcp_sack_info != NULL); 12047 ASSERT(tcp->tcp_notsack_list != NULL); 12048 ASSERT(tcp->tcp_rexmit == B_FALSE); 12049 12050 /* Defensive coding in case there is a bug... */ 12051 if (tcp->tcp_notsack_list == NULL) { 12052 return; 12053 } 12054 notsack_blk = tcp->tcp_notsack_list; 12055 mss = tcp->tcp_mss; 12056 12057 /* 12058 * Limit the num of outstanding data in the network to be 12059 * tcp_cwnd_ssthresh, which is half of the original congestion wnd. 12060 */ 12061 usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe; 12062 12063 /* At least retransmit 1 MSS of data. */ 12064 if (usable_swnd <= 0) { 12065 usable_swnd = mss; 12066 } 12067 12068 /* Make sure no new RTT samples will be taken. */ 12069 tcp->tcp_csuna = tcp->tcp_snxt; 12070 12071 notsack_blk = tcp->tcp_notsack_list; 12072 while (usable_swnd > 0) { 12073 mblk_t *snxt_mp, *tmp_mp; 12074 tcp_seq begin = tcp->tcp_sack_snxt; 12075 tcp_seq end; 12076 int32_t off; 12077 12078 for (; notsack_blk != NULL; notsack_blk = notsack_blk->next) { 12079 if (SEQ_GT(notsack_blk->end, begin) && 12080 (notsack_blk->sack_cnt >= 12081 tcps->tcps_dupack_fast_retransmit)) { 12082 end = notsack_blk->end; 12083 if (SEQ_LT(begin, notsack_blk->begin)) { 12084 begin = notsack_blk->begin; 12085 } 12086 break; 12087 } 12088 } 12089 /* 12090 * All holes are filled. Manipulate tcp_cwnd to send more 12091 * if we can. Note that after the SACK recovery, tcp_cwnd is 12092 * set to tcp_cwnd_ssthresh. 12093 */ 12094 if (notsack_blk == NULL) { 12095 usable_swnd = tcp->tcp_cwnd_ssthresh - tcp->tcp_pipe; 12096 if (usable_swnd <= 0 || tcp->tcp_unsent == 0) { 12097 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna; 12098 ASSERT(tcp->tcp_cwnd > 0); 12099 return; 12100 } else { 12101 usable_swnd = usable_swnd / mss; 12102 tcp->tcp_cwnd = tcp->tcp_snxt - tcp->tcp_suna + 12103 MAX(usable_swnd * mss, mss); 12104 *flags |= TH_XMIT_NEEDED; 12105 return; 12106 } 12107 } 12108 12109 /* 12110 * Note that we may send more than usable_swnd allows here 12111 * because of round off, but no more than 1 MSS of data. 12112 */ 12113 seg_len = end - begin; 12114 if (seg_len > mss) 12115 seg_len = mss; 12116 snxt_mp = tcp_get_seg_mp(tcp, begin, &off); 12117 ASSERT(snxt_mp != NULL); 12118 /* This should not happen. Defensive coding again... */ 12119 if (snxt_mp == NULL) { 12120 return; 12121 } 12122 12123 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, seg_len, &off, 12124 &tmp_mp, begin, B_TRUE, &seg_len, B_TRUE); 12125 if (xmit_mp == NULL) 12126 return; 12127 12128 usable_swnd -= seg_len; 12129 tcp->tcp_pipe += seg_len; 12130 tcp->tcp_sack_snxt = begin + seg_len; 12131 TCP_RECORD_TRACE(tcp, xmit_mp, TCP_TRACE_SEND_PKT); 12132 tcp_send_data(tcp, tcp->tcp_wq, xmit_mp); 12133 12134 /* 12135 * Update the send timestamp to avoid false retransmission. 12136 */ 12137 snxt_mp->b_prev = (mblk_t *)lbolt; 12138 12139 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 12140 UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, seg_len); 12141 BUMP_MIB(&tcps->tcps_mib, tcpOutSackRetransSegs); 12142 /* 12143 * Update tcp_rexmit_max to extend this SACK recovery phase. 12144 * This happens when new data sent during fast recovery is 12145 * also lost. If TCP retransmits those new data, it needs 12146 * to extend SACK recover phase to avoid starting another 12147 * fast retransmit/recovery unnecessarily. 12148 */ 12149 if (SEQ_GT(tcp->tcp_sack_snxt, tcp->tcp_rexmit_max)) { 12150 tcp->tcp_rexmit_max = tcp->tcp_sack_snxt; 12151 } 12152 } 12153 } 12154 12155 /* 12156 * This function handles policy checking at TCP level for non-hard_bound/ 12157 * detached connections. 12158 */ 12159 static boolean_t 12160 tcp_check_policy(tcp_t *tcp, mblk_t *first_mp, ipha_t *ipha, ip6_t *ip6h, 12161 boolean_t secure, boolean_t mctl_present) 12162 { 12163 ipsec_latch_t *ipl = NULL; 12164 ipsec_action_t *act = NULL; 12165 mblk_t *data_mp; 12166 ipsec_in_t *ii; 12167 const char *reason; 12168 kstat_named_t *counter; 12169 tcp_stack_t *tcps = tcp->tcp_tcps; 12170 ipsec_stack_t *ipss; 12171 ip_stack_t *ipst; 12172 12173 ASSERT(mctl_present || !secure); 12174 12175 ASSERT((ipha == NULL && ip6h != NULL) || 12176 (ip6h == NULL && ipha != NULL)); 12177 12178 /* 12179 * We don't necessarily have an ipsec_in_act action to verify 12180 * policy because of assymetrical policy where we have only 12181 * outbound policy and no inbound policy (possible with global 12182 * policy). 12183 */ 12184 if (!secure) { 12185 if (act == NULL || act->ipa_act.ipa_type == IPSEC_ACT_BYPASS || 12186 act->ipa_act.ipa_type == IPSEC_ACT_CLEAR) 12187 return (B_TRUE); 12188 ipsec_log_policy_failure(IPSEC_POLICY_MISMATCH, 12189 "tcp_check_policy", ipha, ip6h, secure, 12190 tcps->tcps_netstack); 12191 ipss = tcps->tcps_netstack->netstack_ipsec; 12192 12193 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, 12194 DROPPER(ipss, ipds_tcp_clear), 12195 &tcps->tcps_dropper); 12196 return (B_FALSE); 12197 } 12198 12199 /* 12200 * We have a secure packet. 12201 */ 12202 if (act == NULL) { 12203 ipsec_log_policy_failure(IPSEC_POLICY_NOT_NEEDED, 12204 "tcp_check_policy", ipha, ip6h, secure, 12205 tcps->tcps_netstack); 12206 ipss = tcps->tcps_netstack->netstack_ipsec; 12207 12208 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, 12209 DROPPER(ipss, ipds_tcp_secure), 12210 &tcps->tcps_dropper); 12211 return (B_FALSE); 12212 } 12213 12214 /* 12215 * XXX This whole routine is currently incorrect. ipl should 12216 * be set to the latch pointer, but is currently not set, so 12217 * we initialize it to NULL to avoid picking up random garbage. 12218 */ 12219 if (ipl == NULL) 12220 return (B_TRUE); 12221 12222 data_mp = first_mp->b_cont; 12223 12224 ii = (ipsec_in_t *)first_mp->b_rptr; 12225 12226 ipst = tcps->tcps_netstack->netstack_ip; 12227 12228 if (ipsec_check_ipsecin_latch(ii, data_mp, ipl, ipha, ip6h, &reason, 12229 &counter, tcp->tcp_connp)) { 12230 BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded); 12231 return (B_TRUE); 12232 } 12233 (void) strlog(TCP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE, 12234 "tcp inbound policy mismatch: %s, packet dropped\n", 12235 reason); 12236 BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed); 12237 12238 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, counter, 12239 &tcps->tcps_dropper); 12240 return (B_FALSE); 12241 } 12242 12243 /* 12244 * tcp_ss_rexmit() is called in tcp_rput_data() to do slow start 12245 * retransmission after a timeout. 12246 * 12247 * To limit the number of duplicate segments, we limit the number of segment 12248 * to be sent in one time to tcp_snd_burst, the burst variable. 12249 */ 12250 static void 12251 tcp_ss_rexmit(tcp_t *tcp) 12252 { 12253 uint32_t snxt; 12254 uint32_t smax; 12255 int32_t win; 12256 int32_t mss; 12257 int32_t off; 12258 int32_t burst = tcp->tcp_snd_burst; 12259 mblk_t *snxt_mp; 12260 tcp_stack_t *tcps = tcp->tcp_tcps; 12261 12262 /* 12263 * Note that tcp_rexmit can be set even though TCP has retransmitted 12264 * all unack'ed segments. 12265 */ 12266 if (SEQ_LT(tcp->tcp_rexmit_nxt, tcp->tcp_rexmit_max)) { 12267 smax = tcp->tcp_rexmit_max; 12268 snxt = tcp->tcp_rexmit_nxt; 12269 if (SEQ_LT(snxt, tcp->tcp_suna)) { 12270 snxt = tcp->tcp_suna; 12271 } 12272 win = MIN(tcp->tcp_cwnd, tcp->tcp_swnd); 12273 win -= snxt - tcp->tcp_suna; 12274 mss = tcp->tcp_mss; 12275 snxt_mp = tcp_get_seg_mp(tcp, snxt, &off); 12276 12277 while (SEQ_LT(snxt, smax) && (win > 0) && 12278 (burst > 0) && (snxt_mp != NULL)) { 12279 mblk_t *xmit_mp; 12280 mblk_t *old_snxt_mp = snxt_mp; 12281 uint32_t cnt = mss; 12282 12283 if (win < cnt) { 12284 cnt = win; 12285 } 12286 if (SEQ_GT(snxt + cnt, smax)) { 12287 cnt = smax - snxt; 12288 } 12289 xmit_mp = tcp_xmit_mp(tcp, snxt_mp, cnt, &off, 12290 &snxt_mp, snxt, B_TRUE, &cnt, B_TRUE); 12291 if (xmit_mp == NULL) 12292 return; 12293 12294 tcp_send_data(tcp, tcp->tcp_wq, xmit_mp); 12295 12296 snxt += cnt; 12297 win -= cnt; 12298 /* 12299 * Update the send timestamp to avoid false 12300 * retransmission. 12301 */ 12302 old_snxt_mp->b_prev = (mblk_t *)lbolt; 12303 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 12304 UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, cnt); 12305 12306 tcp->tcp_rexmit_nxt = snxt; 12307 burst--; 12308 } 12309 /* 12310 * If we have transmitted all we have at the time 12311 * we started the retranmission, we can leave 12312 * the rest of the job to tcp_wput_data(). But we 12313 * need to check the send window first. If the 12314 * win is not 0, go on with tcp_wput_data(). 12315 */ 12316 if (SEQ_LT(snxt, smax) || win == 0) { 12317 return; 12318 } 12319 } 12320 /* Only call tcp_wput_data() if there is data to be sent. */ 12321 if (tcp->tcp_unsent) { 12322 tcp_wput_data(tcp, NULL, B_FALSE); 12323 } 12324 } 12325 12326 /* 12327 * Process all TCP option in SYN segment. Note that this function should 12328 * be called after tcp_adapt_ire() is called so that the necessary info 12329 * from IRE is already set in the tcp structure. 12330 * 12331 * This function sets up the correct tcp_mss value according to the 12332 * MSS option value and our header size. It also sets up the window scale 12333 * and timestamp values, and initialize SACK info blocks. But it does not 12334 * change receive window size after setting the tcp_mss value. The caller 12335 * should do the appropriate change. 12336 */ 12337 void 12338 tcp_process_options(tcp_t *tcp, tcph_t *tcph) 12339 { 12340 int options; 12341 tcp_opt_t tcpopt; 12342 uint32_t mss_max; 12343 char *tmp_tcph; 12344 tcp_stack_t *tcps = tcp->tcp_tcps; 12345 12346 tcpopt.tcp = NULL; 12347 options = tcp_parse_options(tcph, &tcpopt); 12348 12349 /* 12350 * Process MSS option. Note that MSS option value does not account 12351 * for IP or TCP options. This means that it is equal to MTU - minimum 12352 * IP+TCP header size, which is 40 bytes for IPv4 and 60 bytes for 12353 * IPv6. 12354 */ 12355 if (!(options & TCP_OPT_MSS_PRESENT)) { 12356 if (tcp->tcp_ipversion == IPV4_VERSION) 12357 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv4; 12358 else 12359 tcpopt.tcp_opt_mss = tcps->tcps_mss_def_ipv6; 12360 } else { 12361 if (tcp->tcp_ipversion == IPV4_VERSION) 12362 mss_max = tcps->tcps_mss_max_ipv4; 12363 else 12364 mss_max = tcps->tcps_mss_max_ipv6; 12365 if (tcpopt.tcp_opt_mss < tcps->tcps_mss_min) 12366 tcpopt.tcp_opt_mss = tcps->tcps_mss_min; 12367 else if (tcpopt.tcp_opt_mss > mss_max) 12368 tcpopt.tcp_opt_mss = mss_max; 12369 } 12370 12371 /* Process Window Scale option. */ 12372 if (options & TCP_OPT_WSCALE_PRESENT) { 12373 tcp->tcp_snd_ws = tcpopt.tcp_opt_wscale; 12374 tcp->tcp_snd_ws_ok = B_TRUE; 12375 } else { 12376 tcp->tcp_snd_ws = B_FALSE; 12377 tcp->tcp_snd_ws_ok = B_FALSE; 12378 tcp->tcp_rcv_ws = B_FALSE; 12379 } 12380 12381 /* Process Timestamp option. */ 12382 if ((options & TCP_OPT_TSTAMP_PRESENT) && 12383 (tcp->tcp_snd_ts_ok || TCP_IS_DETACHED(tcp))) { 12384 tmp_tcph = (char *)tcp->tcp_tcph; 12385 12386 tcp->tcp_snd_ts_ok = B_TRUE; 12387 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 12388 tcp->tcp_last_rcv_lbolt = lbolt64; 12389 ASSERT(OK_32PTR(tmp_tcph)); 12390 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 12391 12392 /* Fill in our template header with basic timestamp option. */ 12393 tmp_tcph += tcp->tcp_tcp_hdr_len; 12394 tmp_tcph[0] = TCPOPT_NOP; 12395 tmp_tcph[1] = TCPOPT_NOP; 12396 tmp_tcph[2] = TCPOPT_TSTAMP; 12397 tmp_tcph[3] = TCPOPT_TSTAMP_LEN; 12398 tcp->tcp_hdr_len += TCPOPT_REAL_TS_LEN; 12399 tcp->tcp_tcp_hdr_len += TCPOPT_REAL_TS_LEN; 12400 tcp->tcp_tcph->th_offset_and_rsrvd[0] += (3 << 4); 12401 } else { 12402 tcp->tcp_snd_ts_ok = B_FALSE; 12403 } 12404 12405 /* 12406 * Process SACK options. If SACK is enabled for this connection, 12407 * then allocate the SACK info structure. Note the following ways 12408 * when tcp_snd_sack_ok is set to true. 12409 * 12410 * For active connection: in tcp_adapt_ire() called in 12411 * tcp_rput_other(), or in tcp_rput_other() when tcp_sack_permitted 12412 * is checked. 12413 * 12414 * For passive connection: in tcp_adapt_ire() called in 12415 * tcp_accept_comm(). 12416 * 12417 * That's the reason why the extra TCP_IS_DETACHED() check is there. 12418 * That check makes sure that if we did not send a SACK OK option, 12419 * we will not enable SACK for this connection even though the other 12420 * side sends us SACK OK option. For active connection, the SACK 12421 * info structure has already been allocated. So we need to free 12422 * it if SACK is disabled. 12423 */ 12424 if ((options & TCP_OPT_SACK_OK_PRESENT) && 12425 (tcp->tcp_snd_sack_ok || 12426 (tcps->tcps_sack_permitted != 0 && TCP_IS_DETACHED(tcp)))) { 12427 /* This should be true only in the passive case. */ 12428 if (tcp->tcp_sack_info == NULL) { 12429 ASSERT(TCP_IS_DETACHED(tcp)); 12430 tcp->tcp_sack_info = 12431 kmem_cache_alloc(tcp_sack_info_cache, KM_NOSLEEP); 12432 } 12433 if (tcp->tcp_sack_info == NULL) { 12434 tcp->tcp_snd_sack_ok = B_FALSE; 12435 } else { 12436 tcp->tcp_snd_sack_ok = B_TRUE; 12437 if (tcp->tcp_snd_ts_ok) { 12438 tcp->tcp_max_sack_blk = 3; 12439 } else { 12440 tcp->tcp_max_sack_blk = 4; 12441 } 12442 } 12443 } else { 12444 /* 12445 * Resetting tcp_snd_sack_ok to B_FALSE so that 12446 * no SACK info will be used for this 12447 * connection. This assumes that SACK usage 12448 * permission is negotiated. This may need 12449 * to be changed once this is clarified. 12450 */ 12451 if (tcp->tcp_sack_info != NULL) { 12452 ASSERT(tcp->tcp_notsack_list == NULL); 12453 kmem_cache_free(tcp_sack_info_cache, 12454 tcp->tcp_sack_info); 12455 tcp->tcp_sack_info = NULL; 12456 } 12457 tcp->tcp_snd_sack_ok = B_FALSE; 12458 } 12459 12460 /* 12461 * Now we know the exact TCP/IP header length, subtract 12462 * that from tcp_mss to get our side's MSS. 12463 */ 12464 tcp->tcp_mss -= tcp->tcp_hdr_len; 12465 /* 12466 * Here we assume that the other side's header size will be equal to 12467 * our header size. We calculate the real MSS accordingly. Need to 12468 * take into additional stuffs IPsec puts in. 12469 * 12470 * Real MSS = Opt.MSS - (our TCP/IP header - min TCP/IP header) 12471 */ 12472 tcpopt.tcp_opt_mss -= tcp->tcp_hdr_len + tcp->tcp_ipsec_overhead - 12473 ((tcp->tcp_ipversion == IPV4_VERSION ? 12474 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) + TCP_MIN_HEADER_LENGTH); 12475 12476 /* 12477 * Set MSS to the smaller one of both ends of the connection. 12478 * We should not have called tcp_mss_set() before, but our 12479 * side of the MSS should have been set to a proper value 12480 * by tcp_adapt_ire(). tcp_mss_set() will also set up the 12481 * STREAM head parameters properly. 12482 * 12483 * If we have a larger-than-16-bit window but the other side 12484 * didn't want to do window scale, tcp_rwnd_set() will take 12485 * care of that. 12486 */ 12487 tcp_mss_set(tcp, MIN(tcpopt.tcp_opt_mss, tcp->tcp_mss), B_TRUE); 12488 } 12489 12490 /* 12491 * Sends the T_CONN_IND to the listener. The caller calls this 12492 * functions via squeue to get inside the listener's perimeter 12493 * once the 3 way hand shake is done a T_CONN_IND needs to be 12494 * sent. As an optimization, the caller can call this directly 12495 * if listener's perimeter is same as eager's. 12496 */ 12497 /* ARGSUSED */ 12498 void 12499 tcp_send_conn_ind(void *arg, mblk_t *mp, void *arg2) 12500 { 12501 conn_t *lconnp = (conn_t *)arg; 12502 tcp_t *listener = lconnp->conn_tcp; 12503 tcp_t *tcp; 12504 struct T_conn_ind *conn_ind; 12505 ipaddr_t *addr_cache; 12506 boolean_t need_send_conn_ind = B_FALSE; 12507 tcp_stack_t *tcps = listener->tcp_tcps; 12508 12509 /* retrieve the eager */ 12510 conn_ind = (struct T_conn_ind *)mp->b_rptr; 12511 ASSERT(conn_ind->OPT_offset != 0 && 12512 conn_ind->OPT_length == sizeof (intptr_t)); 12513 bcopy(mp->b_rptr + conn_ind->OPT_offset, &tcp, 12514 conn_ind->OPT_length); 12515 12516 /* 12517 * TLI/XTI applications will get confused by 12518 * sending eager as an option since it violates 12519 * the option semantics. So remove the eager as 12520 * option since TLI/XTI app doesn't need it anyway. 12521 */ 12522 if (!TCP_IS_SOCKET(listener)) { 12523 conn_ind->OPT_length = 0; 12524 conn_ind->OPT_offset = 0; 12525 } 12526 if (listener->tcp_state == TCPS_CLOSED || 12527 TCP_IS_DETACHED(listener)) { 12528 /* 12529 * If listener has closed, it would have caused a 12530 * a cleanup/blowoff to happen for the eager. We 12531 * just need to return. 12532 */ 12533 freemsg(mp); 12534 return; 12535 } 12536 12537 12538 /* 12539 * if the conn_req_q is full defer passing up the 12540 * T_CONN_IND until space is availabe after t_accept() 12541 * processing 12542 */ 12543 mutex_enter(&listener->tcp_eager_lock); 12544 12545 /* 12546 * Take the eager out, if it is in the list of droppable eagers 12547 * as we are here because the 3W handshake is over. 12548 */ 12549 MAKE_UNDROPPABLE(tcp); 12550 12551 if (listener->tcp_conn_req_cnt_q < listener->tcp_conn_req_max) { 12552 tcp_t *tail; 12553 12554 /* 12555 * The eager already has an extra ref put in tcp_rput_data 12556 * so that it stays till accept comes back even though it 12557 * might get into TCPS_CLOSED as a result of a TH_RST etc. 12558 */ 12559 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 12560 listener->tcp_conn_req_cnt_q0--; 12561 listener->tcp_conn_req_cnt_q++; 12562 12563 /* Move from SYN_RCVD to ESTABLISHED list */ 12564 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 12565 tcp->tcp_eager_prev_q0; 12566 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 12567 tcp->tcp_eager_next_q0; 12568 tcp->tcp_eager_prev_q0 = NULL; 12569 tcp->tcp_eager_next_q0 = NULL; 12570 12571 /* 12572 * Insert at end of the queue because sockfs 12573 * sends down T_CONN_RES in chronological 12574 * order. Leaving the older conn indications 12575 * at front of the queue helps reducing search 12576 * time. 12577 */ 12578 tail = listener->tcp_eager_last_q; 12579 if (tail != NULL) 12580 tail->tcp_eager_next_q = tcp; 12581 else 12582 listener->tcp_eager_next_q = tcp; 12583 listener->tcp_eager_last_q = tcp; 12584 tcp->tcp_eager_next_q = NULL; 12585 /* 12586 * Delay sending up the T_conn_ind until we are 12587 * done with the eager. Once we have have sent up 12588 * the T_conn_ind, the accept can potentially complete 12589 * any time and release the refhold we have on the eager. 12590 */ 12591 need_send_conn_ind = B_TRUE; 12592 } else { 12593 /* 12594 * Defer connection on q0 and set deferred 12595 * connection bit true 12596 */ 12597 tcp->tcp_conn_def_q0 = B_TRUE; 12598 12599 /* take tcp out of q0 ... */ 12600 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 12601 tcp->tcp_eager_next_q0; 12602 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 12603 tcp->tcp_eager_prev_q0; 12604 12605 /* ... and place it at the end of q0 */ 12606 tcp->tcp_eager_prev_q0 = listener->tcp_eager_prev_q0; 12607 tcp->tcp_eager_next_q0 = listener; 12608 listener->tcp_eager_prev_q0->tcp_eager_next_q0 = tcp; 12609 listener->tcp_eager_prev_q0 = tcp; 12610 tcp->tcp_conn.tcp_eager_conn_ind = mp; 12611 } 12612 12613 /* we have timed out before */ 12614 if (tcp->tcp_syn_rcvd_timeout != 0) { 12615 tcp->tcp_syn_rcvd_timeout = 0; 12616 listener->tcp_syn_rcvd_timeout--; 12617 if (listener->tcp_syn_defense && 12618 listener->tcp_syn_rcvd_timeout <= 12619 (tcps->tcps_conn_req_max_q0 >> 5) && 12620 10*MINUTES < TICK_TO_MSEC(lbolt64 - 12621 listener->tcp_last_rcv_lbolt)) { 12622 /* 12623 * Turn off the defense mode if we 12624 * believe the SYN attack is over. 12625 */ 12626 listener->tcp_syn_defense = B_FALSE; 12627 if (listener->tcp_ip_addr_cache) { 12628 kmem_free((void *)listener->tcp_ip_addr_cache, 12629 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t)); 12630 listener->tcp_ip_addr_cache = NULL; 12631 } 12632 } 12633 } 12634 addr_cache = (ipaddr_t *)(listener->tcp_ip_addr_cache); 12635 if (addr_cache != NULL) { 12636 /* 12637 * We have finished a 3-way handshake with this 12638 * remote host. This proves the IP addr is good. 12639 * Cache it! 12640 */ 12641 addr_cache[IP_ADDR_CACHE_HASH( 12642 tcp->tcp_remote)] = tcp->tcp_remote; 12643 } 12644 mutex_exit(&listener->tcp_eager_lock); 12645 if (need_send_conn_ind) 12646 putnext(listener->tcp_rq, mp); 12647 } 12648 12649 mblk_t * 12650 tcp_find_pktinfo(tcp_t *tcp, mblk_t *mp, uint_t *ipversp, uint_t *ip_hdr_lenp, 12651 uint_t *ifindexp, ip6_pkt_t *ippp) 12652 { 12653 ip_pktinfo_t *pinfo; 12654 ip6_t *ip6h; 12655 uchar_t *rptr; 12656 mblk_t *first_mp = mp; 12657 boolean_t mctl_present = B_FALSE; 12658 uint_t ifindex = 0; 12659 ip6_pkt_t ipp; 12660 uint_t ipvers; 12661 uint_t ip_hdr_len; 12662 tcp_stack_t *tcps = tcp->tcp_tcps; 12663 12664 rptr = mp->b_rptr; 12665 ASSERT(OK_32PTR(rptr)); 12666 ASSERT(tcp != NULL); 12667 ipp.ipp_fields = 0; 12668 12669 switch DB_TYPE(mp) { 12670 case M_CTL: 12671 mp = mp->b_cont; 12672 if (mp == NULL) { 12673 freemsg(first_mp); 12674 return (NULL); 12675 } 12676 if (DB_TYPE(mp) != M_DATA) { 12677 freemsg(first_mp); 12678 return (NULL); 12679 } 12680 mctl_present = B_TRUE; 12681 break; 12682 case M_DATA: 12683 break; 12684 default: 12685 cmn_err(CE_NOTE, "tcp_find_pktinfo: unknown db_type"); 12686 freemsg(mp); 12687 return (NULL); 12688 } 12689 ipvers = IPH_HDR_VERSION(rptr); 12690 if (ipvers == IPV4_VERSION) { 12691 if (tcp == NULL) { 12692 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12693 goto done; 12694 } 12695 12696 ipp.ipp_fields |= IPPF_HOPLIMIT; 12697 ipp.ipp_hoplimit = ((ipha_t *)rptr)->ipha_ttl; 12698 12699 /* 12700 * If we have IN_PKTINFO in an M_CTL and tcp_ipv6_recvancillary 12701 * has TCP_IPV6_RECVPKTINFO set, pass I/F index along in ipp. 12702 */ 12703 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) && 12704 mctl_present) { 12705 pinfo = (ip_pktinfo_t *)first_mp->b_rptr; 12706 if ((MBLKL(first_mp) == sizeof (ip_pktinfo_t)) && 12707 (pinfo->ip_pkt_ulp_type == IN_PKTINFO) && 12708 (pinfo->ip_pkt_flags & IPF_RECVIF)) { 12709 ipp.ipp_fields |= IPPF_IFINDEX; 12710 ipp.ipp_ifindex = pinfo->ip_pkt_ifindex; 12711 ifindex = pinfo->ip_pkt_ifindex; 12712 } 12713 freeb(first_mp); 12714 mctl_present = B_FALSE; 12715 } 12716 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12717 } else { 12718 ip6h = (ip6_t *)rptr; 12719 12720 ASSERT(ipvers == IPV6_VERSION); 12721 ipp.ipp_fields = IPPF_HOPLIMIT | IPPF_TCLASS; 12722 ipp.ipp_tclass = (ip6h->ip6_flow & 0x0FF00000) >> 20; 12723 ipp.ipp_hoplimit = ip6h->ip6_hops; 12724 12725 if (ip6h->ip6_nxt != IPPROTO_TCP) { 12726 uint8_t nexthdrp; 12727 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 12728 12729 /* Look for ifindex information */ 12730 if (ip6h->ip6_nxt == IPPROTO_RAW) { 12731 ip6i_t *ip6i = (ip6i_t *)ip6h; 12732 if ((uchar_t *)&ip6i[1] > mp->b_wptr) { 12733 BUMP_MIB(&ipst->ips_ip_mib, tcpInErrs); 12734 freemsg(first_mp); 12735 return (NULL); 12736 } 12737 12738 if (ip6i->ip6i_flags & IP6I_IFINDEX) { 12739 ASSERT(ip6i->ip6i_ifindex != 0); 12740 ipp.ipp_fields |= IPPF_IFINDEX; 12741 ipp.ipp_ifindex = ip6i->ip6i_ifindex; 12742 ifindex = ip6i->ip6i_ifindex; 12743 } 12744 rptr = (uchar_t *)&ip6i[1]; 12745 mp->b_rptr = rptr; 12746 if (rptr == mp->b_wptr) { 12747 mblk_t *mp1; 12748 mp1 = mp->b_cont; 12749 freeb(mp); 12750 mp = mp1; 12751 rptr = mp->b_rptr; 12752 } 12753 if (MBLKL(mp) < IPV6_HDR_LEN + 12754 sizeof (tcph_t)) { 12755 BUMP_MIB(&ipst->ips_ip_mib, tcpInErrs); 12756 freemsg(first_mp); 12757 return (NULL); 12758 } 12759 ip6h = (ip6_t *)rptr; 12760 } 12761 12762 /* 12763 * Find any potentially interesting extension headers 12764 * as well as the length of the IPv6 + extension 12765 * headers. 12766 */ 12767 ip_hdr_len = ip_find_hdr_v6(mp, ip6h, &ipp, &nexthdrp); 12768 /* Verify if this is a TCP packet */ 12769 if (nexthdrp != IPPROTO_TCP) { 12770 BUMP_MIB(&ipst->ips_ip_mib, tcpInErrs); 12771 freemsg(first_mp); 12772 return (NULL); 12773 } 12774 } else { 12775 ip_hdr_len = IPV6_HDR_LEN; 12776 } 12777 } 12778 12779 done: 12780 if (ipversp != NULL) 12781 *ipversp = ipvers; 12782 if (ip_hdr_lenp != NULL) 12783 *ip_hdr_lenp = ip_hdr_len; 12784 if (ippp != NULL) 12785 *ippp = ipp; 12786 if (ifindexp != NULL) 12787 *ifindexp = ifindex; 12788 if (mctl_present) { 12789 freeb(first_mp); 12790 } 12791 return (mp); 12792 } 12793 12794 /* 12795 * Handle M_DATA messages from IP. Its called directly from IP via 12796 * squeue for AF_INET type sockets fast path. No M_CTL are expected 12797 * in this path. 12798 * 12799 * For everything else (including AF_INET6 sockets with 'tcp_ipversion' 12800 * v4 and v6), we are called through tcp_input() and a M_CTL can 12801 * be present for options but tcp_find_pktinfo() deals with it. We 12802 * only expect M_DATA packets after tcp_find_pktinfo() is done. 12803 * 12804 * The first argument is always the connp/tcp to which the mp belongs. 12805 * There are no exceptions to this rule. The caller has already put 12806 * a reference on this connp/tcp and once tcp_rput_data() returns, 12807 * the squeue will do the refrele. 12808 * 12809 * The TH_SYN for the listener directly go to tcp_conn_request via 12810 * squeue. 12811 * 12812 * sqp: NULL = recursive, sqp != NULL means called from squeue 12813 */ 12814 void 12815 tcp_rput_data(void *arg, mblk_t *mp, void *arg2) 12816 { 12817 int32_t bytes_acked; 12818 int32_t gap; 12819 mblk_t *mp1; 12820 uint_t flags; 12821 uint32_t new_swnd = 0; 12822 uchar_t *iphdr; 12823 uchar_t *rptr; 12824 int32_t rgap; 12825 uint32_t seg_ack; 12826 int seg_len; 12827 uint_t ip_hdr_len; 12828 uint32_t seg_seq; 12829 tcph_t *tcph; 12830 int urp; 12831 tcp_opt_t tcpopt; 12832 uint_t ipvers; 12833 ip6_pkt_t ipp; 12834 boolean_t ofo_seg = B_FALSE; /* Out of order segment */ 12835 uint32_t cwnd; 12836 uint32_t add; 12837 int npkt; 12838 int mss; 12839 conn_t *connp = (conn_t *)arg; 12840 squeue_t *sqp = (squeue_t *)arg2; 12841 tcp_t *tcp = connp->conn_tcp; 12842 tcp_stack_t *tcps = tcp->tcp_tcps; 12843 12844 /* 12845 * RST from fused tcp loopback peer should trigger an unfuse. 12846 */ 12847 if (tcp->tcp_fused) { 12848 TCP_STAT(tcps, tcp_fusion_aborted); 12849 tcp_unfuse(tcp); 12850 } 12851 12852 iphdr = mp->b_rptr; 12853 rptr = mp->b_rptr; 12854 ASSERT(OK_32PTR(rptr)); 12855 12856 /* 12857 * An AF_INET socket is not capable of receiving any pktinfo. Do inline 12858 * processing here. For rest call tcp_find_pktinfo to fill up the 12859 * necessary information. 12860 */ 12861 if (IPCL_IS_TCP4(connp)) { 12862 ipvers = IPV4_VERSION; 12863 ip_hdr_len = IPH_HDR_LENGTH(rptr); 12864 } else { 12865 mp = tcp_find_pktinfo(tcp, mp, &ipvers, &ip_hdr_len, 12866 NULL, &ipp); 12867 if (mp == NULL) { 12868 TCP_STAT(tcps, tcp_rput_v6_error); 12869 return; 12870 } 12871 iphdr = mp->b_rptr; 12872 rptr = mp->b_rptr; 12873 } 12874 ASSERT(DB_TYPE(mp) == M_DATA); 12875 12876 tcph = (tcph_t *)&rptr[ip_hdr_len]; 12877 seg_seq = ABE32_TO_U32(tcph->th_seq); 12878 seg_ack = ABE32_TO_U32(tcph->th_ack); 12879 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 12880 seg_len = (int)(mp->b_wptr - rptr) - 12881 (ip_hdr_len + TCP_HDR_LENGTH(tcph)); 12882 if ((mp1 = mp->b_cont) != NULL && mp1->b_datap->db_type == M_DATA) { 12883 do { 12884 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 12885 (uintptr_t)INT_MAX); 12886 seg_len += (int)(mp1->b_wptr - mp1->b_rptr); 12887 } while ((mp1 = mp1->b_cont) != NULL && 12888 mp1->b_datap->db_type == M_DATA); 12889 } 12890 12891 if (tcp->tcp_state == TCPS_TIME_WAIT) { 12892 tcp_time_wait_processing(tcp, mp, seg_seq, seg_ack, 12893 seg_len, tcph); 12894 return; 12895 } 12896 12897 if (sqp != NULL) { 12898 /* 12899 * This is the correct place to update tcp_last_recv_time. Note 12900 * that it is also updated for tcp structure that belongs to 12901 * global and listener queues which do not really need updating. 12902 * But that should not cause any harm. And it is updated for 12903 * all kinds of incoming segments, not only for data segments. 12904 */ 12905 tcp->tcp_last_recv_time = lbolt; 12906 } 12907 12908 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 12909 12910 BUMP_LOCAL(tcp->tcp_ibsegs); 12911 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_RECV_PKT); 12912 12913 if ((flags & TH_URG) && sqp != NULL) { 12914 /* 12915 * TCP can't handle urgent pointers that arrive before 12916 * the connection has been accept()ed since it can't 12917 * buffer OOB data. Discard segment if this happens. 12918 * 12919 * We can't just rely on a non-null tcp_listener to indicate 12920 * that the accept() has completed since unlinking of the 12921 * eager and completion of the accept are not atomic. 12922 * tcp_detached, when it is not set (B_FALSE) indicates 12923 * that the accept() has completed. 12924 * 12925 * Nor can it reassemble urgent pointers, so discard 12926 * if it's not the next segment expected. 12927 * 12928 * Otherwise, collapse chain into one mblk (discard if 12929 * that fails). This makes sure the headers, retransmitted 12930 * data, and new data all are in the same mblk. 12931 */ 12932 ASSERT(mp != NULL); 12933 if (tcp->tcp_detached || !pullupmsg(mp, -1)) { 12934 freemsg(mp); 12935 return; 12936 } 12937 /* Update pointers into message */ 12938 iphdr = rptr = mp->b_rptr; 12939 tcph = (tcph_t *)&rptr[ip_hdr_len]; 12940 if (SEQ_GT(seg_seq, tcp->tcp_rnxt)) { 12941 /* 12942 * Since we can't handle any data with this urgent 12943 * pointer that is out of sequence, we expunge 12944 * the data. This allows us to still register 12945 * the urgent mark and generate the M_PCSIG, 12946 * which we can do. 12947 */ 12948 mp->b_wptr = (uchar_t *)tcph + TCP_HDR_LENGTH(tcph); 12949 seg_len = 0; 12950 } 12951 } 12952 12953 switch (tcp->tcp_state) { 12954 case TCPS_SYN_SENT: 12955 if (flags & TH_ACK) { 12956 /* 12957 * Note that our stack cannot send data before a 12958 * connection is established, therefore the 12959 * following check is valid. Otherwise, it has 12960 * to be changed. 12961 */ 12962 if (SEQ_LEQ(seg_ack, tcp->tcp_iss) || 12963 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 12964 freemsg(mp); 12965 if (flags & TH_RST) 12966 return; 12967 tcp_xmit_ctl("TCPS_SYN_SENT-Bad_seq", 12968 tcp, seg_ack, 0, TH_RST); 12969 return; 12970 } 12971 ASSERT(tcp->tcp_suna + 1 == seg_ack); 12972 } 12973 if (flags & TH_RST) { 12974 freemsg(mp); 12975 if (flags & TH_ACK) 12976 (void) tcp_clean_death(tcp, 12977 ECONNREFUSED, 13); 12978 return; 12979 } 12980 if (!(flags & TH_SYN)) { 12981 freemsg(mp); 12982 return; 12983 } 12984 12985 /* Process all TCP options. */ 12986 tcp_process_options(tcp, tcph); 12987 /* 12988 * The following changes our rwnd to be a multiple of the 12989 * MIN(peer MSS, our MSS) for performance reason. 12990 */ 12991 (void) tcp_rwnd_set(tcp, MSS_ROUNDUP(tcp->tcp_rq->q_hiwat, 12992 tcp->tcp_mss)); 12993 12994 /* Is the other end ECN capable? */ 12995 if (tcp->tcp_ecn_ok) { 12996 if ((flags & (TH_ECE|TH_CWR)) != TH_ECE) { 12997 tcp->tcp_ecn_ok = B_FALSE; 12998 } 12999 } 13000 /* 13001 * Clear ECN flags because it may interfere with later 13002 * processing. 13003 */ 13004 flags &= ~(TH_ECE|TH_CWR); 13005 13006 tcp->tcp_irs = seg_seq; 13007 tcp->tcp_rack = seg_seq; 13008 tcp->tcp_rnxt = seg_seq + 1; 13009 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 13010 if (!TCP_IS_DETACHED(tcp)) { 13011 /* Allocate room for SACK options if needed. */ 13012 if (tcp->tcp_snd_sack_ok) { 13013 (void) mi_set_sth_wroff(tcp->tcp_rq, 13014 tcp->tcp_hdr_len + TCPOPT_MAX_SACK_LEN + 13015 (tcp->tcp_loopback ? 0 : 13016 tcps->tcps_wroff_xtra)); 13017 } else { 13018 (void) mi_set_sth_wroff(tcp->tcp_rq, 13019 tcp->tcp_hdr_len + 13020 (tcp->tcp_loopback ? 0 : 13021 tcps->tcps_wroff_xtra)); 13022 } 13023 } 13024 if (flags & TH_ACK) { 13025 /* 13026 * If we can't get the confirmation upstream, pretend 13027 * we didn't even see this one. 13028 * 13029 * XXX: how can we pretend we didn't see it if we 13030 * have updated rnxt et. al. 13031 * 13032 * For loopback we defer sending up the T_CONN_CON 13033 * until after some checks below. 13034 */ 13035 mp1 = NULL; 13036 if (!tcp_conn_con(tcp, iphdr, tcph, mp, 13037 tcp->tcp_loopback ? &mp1 : NULL)) { 13038 freemsg(mp); 13039 return; 13040 } 13041 /* SYN was acked - making progress */ 13042 if (tcp->tcp_ipversion == IPV6_VERSION) 13043 tcp->tcp_ip_forward_progress = B_TRUE; 13044 13045 /* One for the SYN */ 13046 tcp->tcp_suna = tcp->tcp_iss + 1; 13047 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 13048 tcp->tcp_state = TCPS_ESTABLISHED; 13049 13050 /* 13051 * If SYN was retransmitted, need to reset all 13052 * retransmission info. This is because this 13053 * segment will be treated as a dup ACK. 13054 */ 13055 if (tcp->tcp_rexmit) { 13056 tcp->tcp_rexmit = B_FALSE; 13057 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 13058 tcp->tcp_rexmit_max = tcp->tcp_snxt; 13059 tcp->tcp_snd_burst = tcp->tcp_localnet ? 13060 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 13061 tcp->tcp_ms_we_have_waited = 0; 13062 13063 /* 13064 * Set tcp_cwnd back to 1 MSS, per 13065 * recommendation from 13066 * draft-floyd-incr-init-win-01.txt, 13067 * Increasing TCP's Initial Window. 13068 */ 13069 tcp->tcp_cwnd = tcp->tcp_mss; 13070 } 13071 13072 tcp->tcp_swl1 = seg_seq; 13073 tcp->tcp_swl2 = seg_ack; 13074 13075 new_swnd = BE16_TO_U16(tcph->th_win); 13076 tcp->tcp_swnd = new_swnd; 13077 if (new_swnd > tcp->tcp_max_swnd) 13078 tcp->tcp_max_swnd = new_swnd; 13079 13080 /* 13081 * Always send the three-way handshake ack immediately 13082 * in order to make the connection complete as soon as 13083 * possible on the accepting host. 13084 */ 13085 flags |= TH_ACK_NEEDED; 13086 13087 /* 13088 * Special case for loopback. At this point we have 13089 * received SYN-ACK from the remote endpoint. In 13090 * order to ensure that both endpoints reach the 13091 * fused state prior to any data exchange, the final 13092 * ACK needs to be sent before we indicate T_CONN_CON 13093 * to the module upstream. 13094 */ 13095 if (tcp->tcp_loopback) { 13096 mblk_t *ack_mp; 13097 13098 ASSERT(!tcp->tcp_unfusable); 13099 ASSERT(mp1 != NULL); 13100 /* 13101 * For loopback, we always get a pure SYN-ACK 13102 * and only need to send back the final ACK 13103 * with no data (this is because the other 13104 * tcp is ours and we don't do T/TCP). This 13105 * final ACK triggers the passive side to 13106 * perform fusion in ESTABLISHED state. 13107 */ 13108 if ((ack_mp = tcp_ack_mp(tcp)) != NULL) { 13109 if (tcp->tcp_ack_tid != 0) { 13110 (void) TCP_TIMER_CANCEL(tcp, 13111 tcp->tcp_ack_tid); 13112 tcp->tcp_ack_tid = 0; 13113 } 13114 TCP_RECORD_TRACE(tcp, ack_mp, 13115 TCP_TRACE_SEND_PKT); 13116 tcp_send_data(tcp, tcp->tcp_wq, ack_mp); 13117 BUMP_LOCAL(tcp->tcp_obsegs); 13118 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 13119 13120 /* Send up T_CONN_CON */ 13121 putnext(tcp->tcp_rq, mp1); 13122 13123 freemsg(mp); 13124 return; 13125 } 13126 /* 13127 * Forget fusion; we need to handle more 13128 * complex cases below. Send the deferred 13129 * T_CONN_CON message upstream and proceed 13130 * as usual. Mark this tcp as not capable 13131 * of fusion. 13132 */ 13133 TCP_STAT(tcps, tcp_fusion_unfusable); 13134 tcp->tcp_unfusable = B_TRUE; 13135 putnext(tcp->tcp_rq, mp1); 13136 } 13137 13138 /* 13139 * Check to see if there is data to be sent. If 13140 * yes, set the transmit flag. Then check to see 13141 * if received data processing needs to be done. 13142 * If not, go straight to xmit_check. This short 13143 * cut is OK as we don't support T/TCP. 13144 */ 13145 if (tcp->tcp_unsent) 13146 flags |= TH_XMIT_NEEDED; 13147 13148 if (seg_len == 0 && !(flags & TH_URG)) { 13149 freemsg(mp); 13150 goto xmit_check; 13151 } 13152 13153 flags &= ~TH_SYN; 13154 seg_seq++; 13155 break; 13156 } 13157 tcp->tcp_state = TCPS_SYN_RCVD; 13158 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, tcp->tcp_mss, 13159 NULL, NULL, tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 13160 if (mp1) { 13161 DB_CPID(mp1) = tcp->tcp_cpid; 13162 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 13163 tcp_send_data(tcp, tcp->tcp_wq, mp1); 13164 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 13165 } 13166 freemsg(mp); 13167 return; 13168 case TCPS_SYN_RCVD: 13169 if (flags & TH_ACK) { 13170 /* 13171 * In this state, a SYN|ACK packet is either bogus 13172 * because the other side must be ACKing our SYN which 13173 * indicates it has seen the ACK for their SYN and 13174 * shouldn't retransmit it or we're crossing SYNs 13175 * on active open. 13176 */ 13177 if ((flags & TH_SYN) && !tcp->tcp_active_open) { 13178 freemsg(mp); 13179 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_syn", 13180 tcp, seg_ack, 0, TH_RST); 13181 return; 13182 } 13183 /* 13184 * NOTE: RFC 793 pg. 72 says this should be 13185 * tcp->tcp_suna <= seg_ack <= tcp->tcp_snxt 13186 * but that would mean we have an ack that ignored 13187 * our SYN. 13188 */ 13189 if (SEQ_LEQ(seg_ack, tcp->tcp_suna) || 13190 SEQ_GT(seg_ack, tcp->tcp_snxt)) { 13191 freemsg(mp); 13192 tcp_xmit_ctl("TCPS_SYN_RCVD-bad_ack", 13193 tcp, seg_ack, 0, TH_RST); 13194 return; 13195 } 13196 } 13197 break; 13198 case TCPS_LISTEN: 13199 /* 13200 * Only a TLI listener can come through this path when a 13201 * acceptor is going back to be a listener and a packet 13202 * for the acceptor hits the classifier. For a socket 13203 * listener, this can never happen because a listener 13204 * can never accept connection on itself and hence a 13205 * socket acceptor can not go back to being a listener. 13206 */ 13207 ASSERT(!TCP_IS_SOCKET(tcp)); 13208 /*FALLTHRU*/ 13209 case TCPS_CLOSED: 13210 case TCPS_BOUND: { 13211 conn_t *new_connp; 13212 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 13213 13214 new_connp = ipcl_classify(mp, connp->conn_zoneid, ipst); 13215 if (new_connp != NULL) { 13216 tcp_reinput(new_connp, mp, connp->conn_sqp); 13217 return; 13218 } 13219 /* We failed to classify. For now just drop the packet */ 13220 freemsg(mp); 13221 return; 13222 } 13223 case TCPS_IDLE: 13224 /* 13225 * Handle the case where the tcp_clean_death() has happened 13226 * on a connection (application hasn't closed yet) but a packet 13227 * was already queued on squeue before tcp_clean_death() 13228 * was processed. Calling tcp_clean_death() twice on same 13229 * connection can result in weird behaviour. 13230 */ 13231 freemsg(mp); 13232 return; 13233 default: 13234 break; 13235 } 13236 13237 /* 13238 * Already on the correct queue/perimeter. 13239 * If this is a detached connection and not an eager 13240 * connection hanging off a listener then new data 13241 * (past the FIN) will cause a reset. 13242 * We do a special check here where it 13243 * is out of the main line, rather than check 13244 * if we are detached every time we see new 13245 * data down below. 13246 */ 13247 if (TCP_IS_DETACHED_NONEAGER(tcp) && 13248 (seg_len > 0 && SEQ_GT(seg_seq + seg_len, tcp->tcp_rnxt))) { 13249 BUMP_MIB(&tcps->tcps_mib, tcpInClosed); 13250 TCP_RECORD_TRACE(tcp, 13251 mp, TCP_TRACE_RECV_PKT); 13252 13253 freemsg(mp); 13254 /* 13255 * This could be an SSL closure alert. We're detached so just 13256 * acknowledge it this last time. 13257 */ 13258 if (tcp->tcp_kssl_ctx != NULL) { 13259 kssl_release_ctx(tcp->tcp_kssl_ctx); 13260 tcp->tcp_kssl_ctx = NULL; 13261 13262 tcp->tcp_rnxt += seg_len; 13263 U32_TO_ABE32(tcp->tcp_rnxt, tcp->tcp_tcph->th_ack); 13264 flags |= TH_ACK_NEEDED; 13265 goto ack_check; 13266 } 13267 13268 tcp_xmit_ctl("new data when detached", tcp, 13269 tcp->tcp_snxt, 0, TH_RST); 13270 (void) tcp_clean_death(tcp, EPROTO, 12); 13271 return; 13272 } 13273 13274 mp->b_rptr = (uchar_t *)tcph + TCP_HDR_LENGTH(tcph); 13275 urp = BE16_TO_U16(tcph->th_urp) - TCP_OLD_URP_INTERPRETATION; 13276 new_swnd = BE16_TO_U16(tcph->th_win) << 13277 ((tcph->th_flags[0] & TH_SYN) ? 0 : tcp->tcp_snd_ws); 13278 13279 if (tcp->tcp_snd_ts_ok) { 13280 if (!tcp_paws_check(tcp, tcph, &tcpopt)) { 13281 /* 13282 * This segment is not acceptable. 13283 * Drop it and send back an ACK. 13284 */ 13285 freemsg(mp); 13286 flags |= TH_ACK_NEEDED; 13287 goto ack_check; 13288 } 13289 } else if (tcp->tcp_snd_sack_ok) { 13290 ASSERT(tcp->tcp_sack_info != NULL); 13291 tcpopt.tcp = tcp; 13292 /* 13293 * SACK info in already updated in tcp_parse_options. Ignore 13294 * all other TCP options... 13295 */ 13296 (void) tcp_parse_options(tcph, &tcpopt); 13297 } 13298 try_again:; 13299 mss = tcp->tcp_mss; 13300 gap = seg_seq - tcp->tcp_rnxt; 13301 rgap = tcp->tcp_rwnd - (gap + seg_len); 13302 /* 13303 * gap is the amount of sequence space between what we expect to see 13304 * and what we got for seg_seq. A positive value for gap means 13305 * something got lost. A negative value means we got some old stuff. 13306 */ 13307 if (gap < 0) { 13308 /* Old stuff present. Is the SYN in there? */ 13309 if (seg_seq == tcp->tcp_irs && (flags & TH_SYN) && 13310 (seg_len != 0)) { 13311 flags &= ~TH_SYN; 13312 seg_seq++; 13313 urp--; 13314 /* Recompute the gaps after noting the SYN. */ 13315 goto try_again; 13316 } 13317 BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs); 13318 UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes, 13319 (seg_len > -gap ? -gap : seg_len)); 13320 /* Remove the old stuff from seg_len. */ 13321 seg_len += gap; 13322 /* 13323 * Anything left? 13324 * Make sure to check for unack'd FIN when rest of data 13325 * has been previously ack'd. 13326 */ 13327 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 13328 /* 13329 * Resets are only valid if they lie within our offered 13330 * window. If the RST bit is set, we just ignore this 13331 * segment. 13332 */ 13333 if (flags & TH_RST) { 13334 freemsg(mp); 13335 return; 13336 } 13337 13338 /* 13339 * The arriving of dup data packets indicate that we 13340 * may have postponed an ack for too long, or the other 13341 * side's RTT estimate is out of shape. Start acking 13342 * more often. 13343 */ 13344 if (SEQ_GEQ(seg_seq + seg_len - gap, tcp->tcp_rack) && 13345 tcp->tcp_rack_cnt >= 1 && 13346 tcp->tcp_rack_abs_max > 2) { 13347 tcp->tcp_rack_abs_max--; 13348 } 13349 tcp->tcp_rack_cur_max = 1; 13350 13351 /* 13352 * This segment is "unacceptable". None of its 13353 * sequence space lies within our advertized window. 13354 * 13355 * Adjust seg_len to the original value for tracing. 13356 */ 13357 seg_len -= gap; 13358 if (tcp->tcp_debug) { 13359 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13360 "tcp_rput: unacceptable, gap %d, rgap %d, " 13361 "flags 0x%x, seg_seq %u, seg_ack %u, " 13362 "seg_len %d, rnxt %u, snxt %u, %s", 13363 gap, rgap, flags, seg_seq, seg_ack, 13364 seg_len, tcp->tcp_rnxt, tcp->tcp_snxt, 13365 tcp_display(tcp, NULL, 13366 DISP_ADDR_AND_PORT)); 13367 } 13368 13369 /* 13370 * Arrange to send an ACK in response to the 13371 * unacceptable segment per RFC 793 page 69. There 13372 * is only one small difference between ours and the 13373 * acceptability test in the RFC - we accept ACK-only 13374 * packet with SEG.SEQ = RCV.NXT+RCV.WND and no ACK 13375 * will be generated. 13376 * 13377 * Note that we have to ACK an ACK-only packet at least 13378 * for stacks that send 0-length keep-alives with 13379 * SEG.SEQ = SND.NXT-1 as recommended by RFC1122, 13380 * section 4.2.3.6. As long as we don't ever generate 13381 * an unacceptable packet in response to an incoming 13382 * packet that is unacceptable, it should not cause 13383 * "ACK wars". 13384 */ 13385 flags |= TH_ACK_NEEDED; 13386 13387 /* 13388 * Continue processing this segment in order to use the 13389 * ACK information it contains, but skip all other 13390 * sequence-number processing. Processing the ACK 13391 * information is necessary in order to 13392 * re-synchronize connections that may have lost 13393 * synchronization. 13394 * 13395 * We clear seg_len and flag fields related to 13396 * sequence number processing as they are not 13397 * to be trusted for an unacceptable segment. 13398 */ 13399 seg_len = 0; 13400 flags &= ~(TH_SYN | TH_FIN | TH_URG); 13401 goto process_ack; 13402 } 13403 13404 /* Fix seg_seq, and chew the gap off the front. */ 13405 seg_seq = tcp->tcp_rnxt; 13406 urp += gap; 13407 do { 13408 mblk_t *mp2; 13409 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 13410 (uintptr_t)UINT_MAX); 13411 gap += (uint_t)(mp->b_wptr - mp->b_rptr); 13412 if (gap > 0) { 13413 mp->b_rptr = mp->b_wptr - gap; 13414 break; 13415 } 13416 mp2 = mp; 13417 mp = mp->b_cont; 13418 freeb(mp2); 13419 } while (gap < 0); 13420 /* 13421 * If the urgent data has already been acknowledged, we 13422 * should ignore TH_URG below 13423 */ 13424 if (urp < 0) 13425 flags &= ~TH_URG; 13426 } 13427 /* 13428 * rgap is the amount of stuff received out of window. A negative 13429 * value is the amount out of window. 13430 */ 13431 if (rgap < 0) { 13432 mblk_t *mp2; 13433 13434 if (tcp->tcp_rwnd == 0) { 13435 BUMP_MIB(&tcps->tcps_mib, tcpInWinProbe); 13436 } else { 13437 BUMP_MIB(&tcps->tcps_mib, tcpInDataPastWinSegs); 13438 UPDATE_MIB(&tcps->tcps_mib, 13439 tcpInDataPastWinBytes, -rgap); 13440 } 13441 13442 /* 13443 * seg_len does not include the FIN, so if more than 13444 * just the FIN is out of window, we act like we don't 13445 * see it. (If just the FIN is out of window, rgap 13446 * will be zero and we will go ahead and acknowledge 13447 * the FIN.) 13448 */ 13449 flags &= ~TH_FIN; 13450 13451 /* Fix seg_len and make sure there is something left. */ 13452 seg_len += rgap; 13453 if (seg_len <= 0) { 13454 /* 13455 * Resets are only valid if they lie within our offered 13456 * window. If the RST bit is set, we just ignore this 13457 * segment. 13458 */ 13459 if (flags & TH_RST) { 13460 freemsg(mp); 13461 return; 13462 } 13463 13464 /* Per RFC 793, we need to send back an ACK. */ 13465 flags |= TH_ACK_NEEDED; 13466 13467 /* 13468 * Send SIGURG as soon as possible i.e. even 13469 * if the TH_URG was delivered in a window probe 13470 * packet (which will be unacceptable). 13471 * 13472 * We generate a signal if none has been generated 13473 * for this connection or if this is a new urgent 13474 * byte. Also send a zero-length "unmarked" message 13475 * to inform SIOCATMARK that this is not the mark. 13476 * 13477 * tcp_urp_last_valid is cleared when the T_exdata_ind 13478 * is sent up. This plus the check for old data 13479 * (gap >= 0) handles the wraparound of the sequence 13480 * number space without having to always track the 13481 * correct MAX(tcp_urp_last, tcp_rnxt). (BSD tracks 13482 * this max in its rcv_up variable). 13483 * 13484 * This prevents duplicate SIGURGS due to a "late" 13485 * zero-window probe when the T_EXDATA_IND has already 13486 * been sent up. 13487 */ 13488 if ((flags & TH_URG) && 13489 (!tcp->tcp_urp_last_valid || SEQ_GT(urp + seg_seq, 13490 tcp->tcp_urp_last))) { 13491 mp1 = allocb(0, BPRI_MED); 13492 if (mp1 == NULL) { 13493 freemsg(mp); 13494 return; 13495 } 13496 if (!TCP_IS_DETACHED(tcp) && 13497 !putnextctl1(tcp->tcp_rq, M_PCSIG, 13498 SIGURG)) { 13499 /* Try again on the rexmit. */ 13500 freemsg(mp1); 13501 freemsg(mp); 13502 return; 13503 } 13504 /* 13505 * If the next byte would be the mark 13506 * then mark with MARKNEXT else mark 13507 * with NOTMARKNEXT. 13508 */ 13509 if (gap == 0 && urp == 0) 13510 mp1->b_flag |= MSGMARKNEXT; 13511 else 13512 mp1->b_flag |= MSGNOTMARKNEXT; 13513 freemsg(tcp->tcp_urp_mark_mp); 13514 tcp->tcp_urp_mark_mp = mp1; 13515 flags |= TH_SEND_URP_MARK; 13516 tcp->tcp_urp_last_valid = B_TRUE; 13517 tcp->tcp_urp_last = urp + seg_seq; 13518 } 13519 /* 13520 * If this is a zero window probe, continue to 13521 * process the ACK part. But we need to set seg_len 13522 * to 0 to avoid data processing. Otherwise just 13523 * drop the segment and send back an ACK. 13524 */ 13525 if (tcp->tcp_rwnd == 0 && seg_seq == tcp->tcp_rnxt) { 13526 flags &= ~(TH_SYN | TH_URG); 13527 seg_len = 0; 13528 goto process_ack; 13529 } else { 13530 freemsg(mp); 13531 goto ack_check; 13532 } 13533 } 13534 /* Pitch out of window stuff off the end. */ 13535 rgap = seg_len; 13536 mp2 = mp; 13537 do { 13538 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 13539 (uintptr_t)INT_MAX); 13540 rgap -= (int)(mp2->b_wptr - mp2->b_rptr); 13541 if (rgap < 0) { 13542 mp2->b_wptr += rgap; 13543 if ((mp1 = mp2->b_cont) != NULL) { 13544 mp2->b_cont = NULL; 13545 freemsg(mp1); 13546 } 13547 break; 13548 } 13549 } while ((mp2 = mp2->b_cont) != NULL); 13550 } 13551 ok:; 13552 /* 13553 * TCP should check ECN info for segments inside the window only. 13554 * Therefore the check should be done here. 13555 */ 13556 if (tcp->tcp_ecn_ok) { 13557 if (flags & TH_CWR) { 13558 tcp->tcp_ecn_echo_on = B_FALSE; 13559 } 13560 /* 13561 * Note that both ECN_CE and CWR can be set in the 13562 * same segment. In this case, we once again turn 13563 * on ECN_ECHO. 13564 */ 13565 if (tcp->tcp_ipversion == IPV4_VERSION) { 13566 uchar_t tos = ((ipha_t *)rptr)->ipha_type_of_service; 13567 13568 if ((tos & IPH_ECN_CE) == IPH_ECN_CE) { 13569 tcp->tcp_ecn_echo_on = B_TRUE; 13570 } 13571 } else { 13572 uint32_t vcf = ((ip6_t *)rptr)->ip6_vcf; 13573 13574 if ((vcf & htonl(IPH_ECN_CE << 20)) == 13575 htonl(IPH_ECN_CE << 20)) { 13576 tcp->tcp_ecn_echo_on = B_TRUE; 13577 } 13578 } 13579 } 13580 13581 /* 13582 * Check whether we can update tcp_ts_recent. This test is 13583 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 13584 * Extensions for High Performance: An Update", Internet Draft. 13585 */ 13586 if (tcp->tcp_snd_ts_ok && 13587 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 13588 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 13589 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 13590 tcp->tcp_last_rcv_lbolt = lbolt64; 13591 } 13592 13593 if (seg_seq != tcp->tcp_rnxt || tcp->tcp_reass_head) { 13594 /* 13595 * FIN in an out of order segment. We record this in 13596 * tcp_valid_bits and the seq num of FIN in tcp_ofo_fin_seq. 13597 * Clear the FIN so that any check on FIN flag will fail. 13598 * Remember that FIN also counts in the sequence number 13599 * space. So we need to ack out of order FIN only segments. 13600 */ 13601 if (flags & TH_FIN) { 13602 tcp->tcp_valid_bits |= TCP_OFO_FIN_VALID; 13603 tcp->tcp_ofo_fin_seq = seg_seq + seg_len; 13604 flags &= ~TH_FIN; 13605 flags |= TH_ACK_NEEDED; 13606 } 13607 if (seg_len > 0) { 13608 /* Fill in the SACK blk list. */ 13609 if (tcp->tcp_snd_sack_ok) { 13610 ASSERT(tcp->tcp_sack_info != NULL); 13611 tcp_sack_insert(tcp->tcp_sack_list, 13612 seg_seq, seg_seq + seg_len, 13613 &(tcp->tcp_num_sack_blk)); 13614 } 13615 13616 /* 13617 * Attempt reassembly and see if we have something 13618 * ready to go. 13619 */ 13620 mp = tcp_reass(tcp, mp, seg_seq); 13621 /* Always ack out of order packets */ 13622 flags |= TH_ACK_NEEDED | TH_PUSH; 13623 if (mp) { 13624 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 13625 (uintptr_t)INT_MAX); 13626 seg_len = mp->b_cont ? msgdsize(mp) : 13627 (int)(mp->b_wptr - mp->b_rptr); 13628 seg_seq = tcp->tcp_rnxt; 13629 /* 13630 * A gap is filled and the seq num and len 13631 * of the gap match that of a previously 13632 * received FIN, put the FIN flag back in. 13633 */ 13634 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 13635 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 13636 flags |= TH_FIN; 13637 tcp->tcp_valid_bits &= 13638 ~TCP_OFO_FIN_VALID; 13639 } 13640 } else { 13641 /* 13642 * Keep going even with NULL mp. 13643 * There may be a useful ACK or something else 13644 * we don't want to miss. 13645 * 13646 * But TCP should not perform fast retransmit 13647 * because of the ack number. TCP uses 13648 * seg_len == 0 to determine if it is a pure 13649 * ACK. And this is not a pure ACK. 13650 */ 13651 seg_len = 0; 13652 ofo_seg = B_TRUE; 13653 } 13654 } 13655 } else if (seg_len > 0) { 13656 BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs); 13657 UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, seg_len); 13658 /* 13659 * If an out of order FIN was received before, and the seq 13660 * num and len of the new segment match that of the FIN, 13661 * put the FIN flag back in. 13662 */ 13663 if ((tcp->tcp_valid_bits & TCP_OFO_FIN_VALID) && 13664 seg_seq + seg_len == tcp->tcp_ofo_fin_seq) { 13665 flags |= TH_FIN; 13666 tcp->tcp_valid_bits &= ~TCP_OFO_FIN_VALID; 13667 } 13668 } 13669 if ((flags & (TH_RST | TH_SYN | TH_URG | TH_ACK)) != TH_ACK) { 13670 if (flags & TH_RST) { 13671 freemsg(mp); 13672 switch (tcp->tcp_state) { 13673 case TCPS_SYN_RCVD: 13674 (void) tcp_clean_death(tcp, ECONNREFUSED, 14); 13675 break; 13676 case TCPS_ESTABLISHED: 13677 case TCPS_FIN_WAIT_1: 13678 case TCPS_FIN_WAIT_2: 13679 case TCPS_CLOSE_WAIT: 13680 (void) tcp_clean_death(tcp, ECONNRESET, 15); 13681 break; 13682 case TCPS_CLOSING: 13683 case TCPS_LAST_ACK: 13684 (void) tcp_clean_death(tcp, 0, 16); 13685 break; 13686 default: 13687 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 13688 (void) tcp_clean_death(tcp, ENXIO, 17); 13689 break; 13690 } 13691 return; 13692 } 13693 if (flags & TH_SYN) { 13694 /* 13695 * See RFC 793, Page 71 13696 * 13697 * The seq number must be in the window as it should 13698 * be "fixed" above. If it is outside window, it should 13699 * be already rejected. Note that we allow seg_seq to be 13700 * rnxt + rwnd because we want to accept 0 window probe. 13701 */ 13702 ASSERT(SEQ_GEQ(seg_seq, tcp->tcp_rnxt) && 13703 SEQ_LEQ(seg_seq, tcp->tcp_rnxt + tcp->tcp_rwnd)); 13704 freemsg(mp); 13705 /* 13706 * If the ACK flag is not set, just use our snxt as the 13707 * seq number of the RST segment. 13708 */ 13709 if (!(flags & TH_ACK)) { 13710 seg_ack = tcp->tcp_snxt; 13711 } 13712 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 13713 TH_RST|TH_ACK); 13714 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 13715 (void) tcp_clean_death(tcp, ECONNRESET, 18); 13716 return; 13717 } 13718 /* 13719 * urp could be -1 when the urp field in the packet is 0 13720 * and TCP_OLD_URP_INTERPRETATION is set. This implies that the urgent 13721 * byte was at seg_seq - 1, in which case we ignore the urgent flag. 13722 */ 13723 if (flags & TH_URG && urp >= 0) { 13724 if (!tcp->tcp_urp_last_valid || 13725 SEQ_GT(urp + seg_seq, tcp->tcp_urp_last)) { 13726 /* 13727 * If we haven't generated the signal yet for this 13728 * urgent pointer value, do it now. Also, send up a 13729 * zero-length M_DATA indicating whether or not this is 13730 * the mark. The latter is not needed when a 13731 * T_EXDATA_IND is sent up. However, if there are 13732 * allocation failures this code relies on the sender 13733 * retransmitting and the socket code for determining 13734 * the mark should not block waiting for the peer to 13735 * transmit. Thus, for simplicity we always send up the 13736 * mark indication. 13737 */ 13738 mp1 = allocb(0, BPRI_MED); 13739 if (mp1 == NULL) { 13740 freemsg(mp); 13741 return; 13742 } 13743 if (!TCP_IS_DETACHED(tcp) && 13744 !putnextctl1(tcp->tcp_rq, M_PCSIG, SIGURG)) { 13745 /* Try again on the rexmit. */ 13746 freemsg(mp1); 13747 freemsg(mp); 13748 return; 13749 } 13750 /* 13751 * Mark with NOTMARKNEXT for now. 13752 * The code below will change this to MARKNEXT 13753 * if we are at the mark. 13754 * 13755 * If there are allocation failures (e.g. in dupmsg 13756 * below) the next time tcp_rput_data sees the urgent 13757 * segment it will send up the MSG*MARKNEXT message. 13758 */ 13759 mp1->b_flag |= MSGNOTMARKNEXT; 13760 freemsg(tcp->tcp_urp_mark_mp); 13761 tcp->tcp_urp_mark_mp = mp1; 13762 flags |= TH_SEND_URP_MARK; 13763 #ifdef DEBUG 13764 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13765 "tcp_rput: sent M_PCSIG 2 seq %x urp %x " 13766 "last %x, %s", 13767 seg_seq, urp, tcp->tcp_urp_last, 13768 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 13769 #endif /* DEBUG */ 13770 tcp->tcp_urp_last_valid = B_TRUE; 13771 tcp->tcp_urp_last = urp + seg_seq; 13772 } else if (tcp->tcp_urp_mark_mp != NULL) { 13773 /* 13774 * An allocation failure prevented the previous 13775 * tcp_rput_data from sending up the allocated 13776 * MSG*MARKNEXT message - send it up this time 13777 * around. 13778 */ 13779 flags |= TH_SEND_URP_MARK; 13780 } 13781 13782 /* 13783 * If the urgent byte is in this segment, make sure that it is 13784 * all by itself. This makes it much easier to deal with the 13785 * possibility of an allocation failure on the T_exdata_ind. 13786 * Note that seg_len is the number of bytes in the segment, and 13787 * urp is the offset into the segment of the urgent byte. 13788 * urp < seg_len means that the urgent byte is in this segment. 13789 */ 13790 if (urp < seg_len) { 13791 if (seg_len != 1) { 13792 uint32_t tmp_rnxt; 13793 /* 13794 * Break it up and feed it back in. 13795 * Re-attach the IP header. 13796 */ 13797 mp->b_rptr = iphdr; 13798 if (urp > 0) { 13799 /* 13800 * There is stuff before the urgent 13801 * byte. 13802 */ 13803 mp1 = dupmsg(mp); 13804 if (!mp1) { 13805 /* 13806 * Trim from urgent byte on. 13807 * The rest will come back. 13808 */ 13809 (void) adjmsg(mp, 13810 urp - seg_len); 13811 tcp_rput_data(connp, 13812 mp, NULL); 13813 return; 13814 } 13815 (void) adjmsg(mp1, urp - seg_len); 13816 /* Feed this piece back in. */ 13817 tmp_rnxt = tcp->tcp_rnxt; 13818 tcp_rput_data(connp, mp1, NULL); 13819 /* 13820 * If the data passed back in was not 13821 * processed (ie: bad ACK) sending 13822 * the remainder back in will cause a 13823 * loop. In this case, drop the 13824 * packet and let the sender try 13825 * sending a good packet. 13826 */ 13827 if (tmp_rnxt == tcp->tcp_rnxt) { 13828 freemsg(mp); 13829 return; 13830 } 13831 } 13832 if (urp != seg_len - 1) { 13833 uint32_t tmp_rnxt; 13834 /* 13835 * There is stuff after the urgent 13836 * byte. 13837 */ 13838 mp1 = dupmsg(mp); 13839 if (!mp1) { 13840 /* 13841 * Trim everything beyond the 13842 * urgent byte. The rest will 13843 * come back. 13844 */ 13845 (void) adjmsg(mp, 13846 urp + 1 - seg_len); 13847 tcp_rput_data(connp, 13848 mp, NULL); 13849 return; 13850 } 13851 (void) adjmsg(mp1, urp + 1 - seg_len); 13852 tmp_rnxt = tcp->tcp_rnxt; 13853 tcp_rput_data(connp, mp1, NULL); 13854 /* 13855 * If the data passed back in was not 13856 * processed (ie: bad ACK) sending 13857 * the remainder back in will cause a 13858 * loop. In this case, drop the 13859 * packet and let the sender try 13860 * sending a good packet. 13861 */ 13862 if (tmp_rnxt == tcp->tcp_rnxt) { 13863 freemsg(mp); 13864 return; 13865 } 13866 } 13867 tcp_rput_data(connp, mp, NULL); 13868 return; 13869 } 13870 /* 13871 * This segment contains only the urgent byte. We 13872 * have to allocate the T_exdata_ind, if we can. 13873 */ 13874 if (!tcp->tcp_urp_mp) { 13875 struct T_exdata_ind *tei; 13876 mp1 = allocb(sizeof (struct T_exdata_ind), 13877 BPRI_MED); 13878 if (!mp1) { 13879 /* 13880 * Sigh... It'll be back. 13881 * Generate any MSG*MARK message now. 13882 */ 13883 freemsg(mp); 13884 seg_len = 0; 13885 if (flags & TH_SEND_URP_MARK) { 13886 13887 13888 ASSERT(tcp->tcp_urp_mark_mp); 13889 tcp->tcp_urp_mark_mp->b_flag &= 13890 ~MSGNOTMARKNEXT; 13891 tcp->tcp_urp_mark_mp->b_flag |= 13892 MSGMARKNEXT; 13893 } 13894 goto ack_check; 13895 } 13896 mp1->b_datap->db_type = M_PROTO; 13897 tei = (struct T_exdata_ind *)mp1->b_rptr; 13898 tei->PRIM_type = T_EXDATA_IND; 13899 tei->MORE_flag = 0; 13900 mp1->b_wptr = (uchar_t *)&tei[1]; 13901 tcp->tcp_urp_mp = mp1; 13902 #ifdef DEBUG 13903 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13904 "tcp_rput: allocated exdata_ind %s", 13905 tcp_display(tcp, NULL, 13906 DISP_PORT_ONLY)); 13907 #endif /* DEBUG */ 13908 /* 13909 * There is no need to send a separate MSG*MARK 13910 * message since the T_EXDATA_IND will be sent 13911 * now. 13912 */ 13913 flags &= ~TH_SEND_URP_MARK; 13914 freemsg(tcp->tcp_urp_mark_mp); 13915 tcp->tcp_urp_mark_mp = NULL; 13916 } 13917 /* 13918 * Now we are all set. On the next putnext upstream, 13919 * tcp_urp_mp will be non-NULL and will get prepended 13920 * to what has to be this piece containing the urgent 13921 * byte. If for any reason we abort this segment below, 13922 * if it comes back, we will have this ready, or it 13923 * will get blown off in close. 13924 */ 13925 } else if (urp == seg_len) { 13926 /* 13927 * The urgent byte is the next byte after this sequence 13928 * number. If there is data it is marked with 13929 * MSGMARKNEXT and any tcp_urp_mark_mp is discarded 13930 * since it is not needed. Otherwise, if the code 13931 * above just allocated a zero-length tcp_urp_mark_mp 13932 * message, that message is tagged with MSGMARKNEXT. 13933 * Sending up these MSGMARKNEXT messages makes 13934 * SIOCATMARK work correctly even though 13935 * the T_EXDATA_IND will not be sent up until the 13936 * urgent byte arrives. 13937 */ 13938 if (seg_len != 0) { 13939 flags |= TH_MARKNEXT_NEEDED; 13940 freemsg(tcp->tcp_urp_mark_mp); 13941 tcp->tcp_urp_mark_mp = NULL; 13942 flags &= ~TH_SEND_URP_MARK; 13943 } else if (tcp->tcp_urp_mark_mp != NULL) { 13944 flags |= TH_SEND_URP_MARK; 13945 tcp->tcp_urp_mark_mp->b_flag &= 13946 ~MSGNOTMARKNEXT; 13947 tcp->tcp_urp_mark_mp->b_flag |= MSGMARKNEXT; 13948 } 13949 #ifdef DEBUG 13950 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13951 "tcp_rput: AT MARK, len %d, flags 0x%x, %s", 13952 seg_len, flags, 13953 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 13954 #endif /* DEBUG */ 13955 } else { 13956 /* Data left until we hit mark */ 13957 #ifdef DEBUG 13958 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 13959 "tcp_rput: URP %d bytes left, %s", 13960 urp - seg_len, tcp_display(tcp, NULL, 13961 DISP_PORT_ONLY)); 13962 #endif /* DEBUG */ 13963 } 13964 } 13965 13966 process_ack: 13967 if (!(flags & TH_ACK)) { 13968 freemsg(mp); 13969 goto xmit_check; 13970 } 13971 } 13972 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 13973 13974 if (tcp->tcp_ipversion == IPV6_VERSION && bytes_acked > 0) 13975 tcp->tcp_ip_forward_progress = B_TRUE; 13976 if (tcp->tcp_state == TCPS_SYN_RCVD) { 13977 if ((tcp->tcp_conn.tcp_eager_conn_ind != NULL) && 13978 ((tcp->tcp_kssl_ent == NULL) || !tcp->tcp_kssl_pending)) { 13979 /* 3-way handshake complete - pass up the T_CONN_IND */ 13980 tcp_t *listener = tcp->tcp_listener; 13981 mblk_t *mp = tcp->tcp_conn.tcp_eager_conn_ind; 13982 13983 tcp->tcp_tconnind_started = B_TRUE; 13984 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 13985 /* 13986 * We are here means eager is fine but it can 13987 * get a TH_RST at any point between now and till 13988 * accept completes and disappear. We need to 13989 * ensure that reference to eager is valid after 13990 * we get out of eager's perimeter. So we do 13991 * an extra refhold. 13992 */ 13993 CONN_INC_REF(connp); 13994 13995 /* 13996 * The listener also exists because of the refhold 13997 * done in tcp_conn_request. Its possible that it 13998 * might have closed. We will check that once we 13999 * get inside listeners context. 14000 */ 14001 CONN_INC_REF(listener->tcp_connp); 14002 if (listener->tcp_connp->conn_sqp == 14003 connp->conn_sqp) { 14004 tcp_send_conn_ind(listener->tcp_connp, mp, 14005 listener->tcp_connp->conn_sqp); 14006 CONN_DEC_REF(listener->tcp_connp); 14007 } else if (!tcp->tcp_loopback) { 14008 squeue_fill(listener->tcp_connp->conn_sqp, mp, 14009 tcp_send_conn_ind, 14010 listener->tcp_connp, SQTAG_TCP_CONN_IND); 14011 } else { 14012 squeue_enter(listener->tcp_connp->conn_sqp, mp, 14013 tcp_send_conn_ind, listener->tcp_connp, 14014 SQTAG_TCP_CONN_IND); 14015 } 14016 } 14017 14018 if (tcp->tcp_active_open) { 14019 /* 14020 * We are seeing the final ack in the three way 14021 * hand shake of a active open'ed connection 14022 * so we must send up a T_CONN_CON 14023 */ 14024 if (!tcp_conn_con(tcp, iphdr, tcph, mp, NULL)) { 14025 freemsg(mp); 14026 return; 14027 } 14028 /* 14029 * Don't fuse the loopback endpoints for 14030 * simultaneous active opens. 14031 */ 14032 if (tcp->tcp_loopback) { 14033 TCP_STAT(tcps, tcp_fusion_unfusable); 14034 tcp->tcp_unfusable = B_TRUE; 14035 } 14036 } 14037 14038 tcp->tcp_suna = tcp->tcp_iss + 1; /* One for the SYN */ 14039 bytes_acked--; 14040 /* SYN was acked - making progress */ 14041 if (tcp->tcp_ipversion == IPV6_VERSION) 14042 tcp->tcp_ip_forward_progress = B_TRUE; 14043 14044 /* 14045 * If SYN was retransmitted, need to reset all 14046 * retransmission info as this segment will be 14047 * treated as a dup ACK. 14048 */ 14049 if (tcp->tcp_rexmit) { 14050 tcp->tcp_rexmit = B_FALSE; 14051 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 14052 tcp->tcp_rexmit_max = tcp->tcp_snxt; 14053 tcp->tcp_snd_burst = tcp->tcp_localnet ? 14054 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 14055 tcp->tcp_ms_we_have_waited = 0; 14056 tcp->tcp_cwnd = mss; 14057 } 14058 14059 /* 14060 * We set the send window to zero here. 14061 * This is needed if there is data to be 14062 * processed already on the queue. 14063 * Later (at swnd_update label), the 14064 * "new_swnd > tcp_swnd" condition is satisfied 14065 * the XMIT_NEEDED flag is set in the current 14066 * (SYN_RCVD) state. This ensures tcp_wput_data() is 14067 * called if there is already data on queue in 14068 * this state. 14069 */ 14070 tcp->tcp_swnd = 0; 14071 14072 if (new_swnd > tcp->tcp_max_swnd) 14073 tcp->tcp_max_swnd = new_swnd; 14074 tcp->tcp_swl1 = seg_seq; 14075 tcp->tcp_swl2 = seg_ack; 14076 tcp->tcp_state = TCPS_ESTABLISHED; 14077 tcp->tcp_valid_bits &= ~TCP_ISS_VALID; 14078 14079 /* Fuse when both sides are in ESTABLISHED state */ 14080 if (tcp->tcp_loopback && do_tcp_fusion) 14081 tcp_fuse(tcp, iphdr, tcph); 14082 14083 } 14084 /* This code follows 4.4BSD-Lite2 mostly. */ 14085 if (bytes_acked < 0) 14086 goto est; 14087 14088 /* 14089 * If TCP is ECN capable and the congestion experience bit is 14090 * set, reduce tcp_cwnd and tcp_ssthresh. But this should only be 14091 * done once per window (or more loosely, per RTT). 14092 */ 14093 if (tcp->tcp_cwr && SEQ_GT(seg_ack, tcp->tcp_cwr_snd_max)) 14094 tcp->tcp_cwr = B_FALSE; 14095 if (tcp->tcp_ecn_ok && (flags & TH_ECE)) { 14096 if (!tcp->tcp_cwr) { 14097 npkt = ((tcp->tcp_snxt - tcp->tcp_suna) >> 1) / mss; 14098 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * mss; 14099 tcp->tcp_cwnd = npkt * mss; 14100 /* 14101 * If the cwnd is 0, use the timer to clock out 14102 * new segments. This is required by the ECN spec. 14103 */ 14104 if (npkt == 0) { 14105 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 14106 /* 14107 * This makes sure that when the ACK comes 14108 * back, we will increase tcp_cwnd by 1 MSS. 14109 */ 14110 tcp->tcp_cwnd_cnt = 0; 14111 } 14112 tcp->tcp_cwr = B_TRUE; 14113 /* 14114 * This marks the end of the current window of in 14115 * flight data. That is why we don't use 14116 * tcp_suna + tcp_swnd. Only data in flight can 14117 * provide ECN info. 14118 */ 14119 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 14120 tcp->tcp_ecn_cwr_sent = B_FALSE; 14121 } 14122 } 14123 14124 mp1 = tcp->tcp_xmit_head; 14125 if (bytes_acked == 0) { 14126 if (!ofo_seg && seg_len == 0 && new_swnd == tcp->tcp_swnd) { 14127 int dupack_cnt; 14128 14129 BUMP_MIB(&tcps->tcps_mib, tcpInDupAck); 14130 /* 14131 * Fast retransmit. When we have seen exactly three 14132 * identical ACKs while we have unacked data 14133 * outstanding we take it as a hint that our peer 14134 * dropped something. 14135 * 14136 * If TCP is retransmitting, don't do fast retransmit. 14137 */ 14138 if (mp1 && tcp->tcp_suna != tcp->tcp_snxt && 14139 ! tcp->tcp_rexmit) { 14140 /* Do Limited Transmit */ 14141 if ((dupack_cnt = ++tcp->tcp_dupack_cnt) < 14142 tcps->tcps_dupack_fast_retransmit) { 14143 /* 14144 * RFC 3042 14145 * 14146 * What we need to do is temporarily 14147 * increase tcp_cwnd so that new 14148 * data can be sent if it is allowed 14149 * by the receive window (tcp_rwnd). 14150 * tcp_wput_data() will take care of 14151 * the rest. 14152 * 14153 * If the connection is SACK capable, 14154 * only do limited xmit when there 14155 * is SACK info. 14156 * 14157 * Note how tcp_cwnd is incremented. 14158 * The first dup ACK will increase 14159 * it by 1 MSS. The second dup ACK 14160 * will increase it by 2 MSS. This 14161 * means that only 1 new segment will 14162 * be sent for each dup ACK. 14163 */ 14164 if (tcp->tcp_unsent > 0 && 14165 (!tcp->tcp_snd_sack_ok || 14166 (tcp->tcp_snd_sack_ok && 14167 tcp->tcp_notsack_list != NULL))) { 14168 tcp->tcp_cwnd += mss << 14169 (tcp->tcp_dupack_cnt - 1); 14170 flags |= TH_LIMIT_XMIT; 14171 } 14172 } else if (dupack_cnt == 14173 tcps->tcps_dupack_fast_retransmit) { 14174 14175 /* 14176 * If we have reduced tcp_ssthresh 14177 * because of ECN, do not reduce it again 14178 * unless it is already one window of data 14179 * away. After one window of data, tcp_cwr 14180 * should then be cleared. Note that 14181 * for non ECN capable connection, tcp_cwr 14182 * should always be false. 14183 * 14184 * Adjust cwnd since the duplicate 14185 * ack indicates that a packet was 14186 * dropped (due to congestion.) 14187 */ 14188 if (!tcp->tcp_cwr) { 14189 npkt = ((tcp->tcp_snxt - 14190 tcp->tcp_suna) >> 1) / mss; 14191 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 14192 mss; 14193 tcp->tcp_cwnd = (npkt + 14194 tcp->tcp_dupack_cnt) * mss; 14195 } 14196 if (tcp->tcp_ecn_ok) { 14197 tcp->tcp_cwr = B_TRUE; 14198 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 14199 tcp->tcp_ecn_cwr_sent = B_FALSE; 14200 } 14201 14202 /* 14203 * We do Hoe's algorithm. Refer to her 14204 * paper "Improving the Start-up Behavior 14205 * of a Congestion Control Scheme for TCP," 14206 * appeared in SIGCOMM'96. 14207 * 14208 * Save highest seq no we have sent so far. 14209 * Be careful about the invisible FIN byte. 14210 */ 14211 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 14212 (tcp->tcp_unsent == 0)) { 14213 tcp->tcp_rexmit_max = tcp->tcp_fss; 14214 } else { 14215 tcp->tcp_rexmit_max = tcp->tcp_snxt; 14216 } 14217 14218 /* 14219 * Do not allow bursty traffic during. 14220 * fast recovery. Refer to Fall and Floyd's 14221 * paper "Simulation-based Comparisons of 14222 * Tahoe, Reno and SACK TCP" (in CCR?) 14223 * This is a best current practise. 14224 */ 14225 tcp->tcp_snd_burst = TCP_CWND_SS; 14226 14227 /* 14228 * For SACK: 14229 * Calculate tcp_pipe, which is the 14230 * estimated number of bytes in 14231 * network. 14232 * 14233 * tcp_fack is the highest sack'ed seq num 14234 * TCP has received. 14235 * 14236 * tcp_pipe is explained in the above quoted 14237 * Fall and Floyd's paper. tcp_fack is 14238 * explained in Mathis and Mahdavi's 14239 * "Forward Acknowledgment: Refining TCP 14240 * Congestion Control" in SIGCOMM '96. 14241 */ 14242 if (tcp->tcp_snd_sack_ok) { 14243 ASSERT(tcp->tcp_sack_info != NULL); 14244 if (tcp->tcp_notsack_list != NULL) { 14245 tcp->tcp_pipe = tcp->tcp_snxt - 14246 tcp->tcp_fack; 14247 tcp->tcp_sack_snxt = seg_ack; 14248 flags |= TH_NEED_SACK_REXMIT; 14249 } else { 14250 /* 14251 * Always initialize tcp_pipe 14252 * even though we don't have 14253 * any SACK info. If later 14254 * we get SACK info and 14255 * tcp_pipe is not initialized, 14256 * funny things will happen. 14257 */ 14258 tcp->tcp_pipe = 14259 tcp->tcp_cwnd_ssthresh; 14260 } 14261 } else { 14262 flags |= TH_REXMIT_NEEDED; 14263 } /* tcp_snd_sack_ok */ 14264 14265 } else { 14266 /* 14267 * Here we perform congestion 14268 * avoidance, but NOT slow start. 14269 * This is known as the Fast 14270 * Recovery Algorithm. 14271 */ 14272 if (tcp->tcp_snd_sack_ok && 14273 tcp->tcp_notsack_list != NULL) { 14274 flags |= TH_NEED_SACK_REXMIT; 14275 tcp->tcp_pipe -= mss; 14276 if (tcp->tcp_pipe < 0) 14277 tcp->tcp_pipe = 0; 14278 } else { 14279 /* 14280 * We know that one more packet has 14281 * left the pipe thus we can update 14282 * cwnd. 14283 */ 14284 cwnd = tcp->tcp_cwnd + mss; 14285 if (cwnd > tcp->tcp_cwnd_max) 14286 cwnd = tcp->tcp_cwnd_max; 14287 tcp->tcp_cwnd = cwnd; 14288 if (tcp->tcp_unsent > 0) 14289 flags |= TH_XMIT_NEEDED; 14290 } 14291 } 14292 } 14293 } else if (tcp->tcp_zero_win_probe) { 14294 /* 14295 * If the window has opened, need to arrange 14296 * to send additional data. 14297 */ 14298 if (new_swnd != 0) { 14299 /* tcp_suna != tcp_snxt */ 14300 /* Packet contains a window update */ 14301 BUMP_MIB(&tcps->tcps_mib, tcpInWinUpdate); 14302 tcp->tcp_zero_win_probe = 0; 14303 tcp->tcp_timer_backoff = 0; 14304 tcp->tcp_ms_we_have_waited = 0; 14305 14306 /* 14307 * Transmit starting with tcp_suna since 14308 * the one byte probe is not ack'ed. 14309 * If TCP has sent more than one identical 14310 * probe, tcp_rexmit will be set. That means 14311 * tcp_ss_rexmit() will send out the one 14312 * byte along with new data. Otherwise, 14313 * fake the retransmission. 14314 */ 14315 flags |= TH_XMIT_NEEDED; 14316 if (!tcp->tcp_rexmit) { 14317 tcp->tcp_rexmit = B_TRUE; 14318 tcp->tcp_dupack_cnt = 0; 14319 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 14320 tcp->tcp_rexmit_max = tcp->tcp_suna + 1; 14321 } 14322 } 14323 } 14324 goto swnd_update; 14325 } 14326 14327 /* 14328 * Check for "acceptability" of ACK value per RFC 793, pages 72 - 73. 14329 * If the ACK value acks something that we have not yet sent, it might 14330 * be an old duplicate segment. Send an ACK to re-synchronize the 14331 * other side. 14332 * Note: reset in response to unacceptable ACK in SYN_RECEIVE 14333 * state is handled above, so we can always just drop the segment and 14334 * send an ACK here. 14335 * 14336 * Should we send ACKs in response to ACK only segments? 14337 */ 14338 if (SEQ_GT(seg_ack, tcp->tcp_snxt)) { 14339 BUMP_MIB(&tcps->tcps_mib, tcpInAckUnsent); 14340 /* drop the received segment */ 14341 freemsg(mp); 14342 14343 /* 14344 * Send back an ACK. If tcp_drop_ack_unsent_cnt is 14345 * greater than 0, check if the number of such 14346 * bogus ACks is greater than that count. If yes, 14347 * don't send back any ACK. This prevents TCP from 14348 * getting into an ACK storm if somehow an attacker 14349 * successfully spoofs an acceptable segment to our 14350 * peer. 14351 */ 14352 if (tcp_drop_ack_unsent_cnt > 0 && 14353 ++tcp->tcp_in_ack_unsent > tcp_drop_ack_unsent_cnt) { 14354 TCP_STAT(tcps, tcp_in_ack_unsent_drop); 14355 return; 14356 } 14357 mp = tcp_ack_mp(tcp); 14358 if (mp != NULL) { 14359 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 14360 BUMP_LOCAL(tcp->tcp_obsegs); 14361 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 14362 tcp_send_data(tcp, tcp->tcp_wq, mp); 14363 } 14364 return; 14365 } 14366 14367 /* 14368 * TCP gets a new ACK, update the notsack'ed list to delete those 14369 * blocks that are covered by this ACK. 14370 */ 14371 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 14372 tcp_notsack_remove(&(tcp->tcp_notsack_list), seg_ack, 14373 &(tcp->tcp_num_notsack_blk), &(tcp->tcp_cnt_notsack_list)); 14374 } 14375 14376 /* 14377 * If we got an ACK after fast retransmit, check to see 14378 * if it is a partial ACK. If it is not and the congestion 14379 * window was inflated to account for the other side's 14380 * cached packets, retract it. If it is, do Hoe's algorithm. 14381 */ 14382 if (tcp->tcp_dupack_cnt >= tcps->tcps_dupack_fast_retransmit) { 14383 ASSERT(tcp->tcp_rexmit == B_FALSE); 14384 if (SEQ_GEQ(seg_ack, tcp->tcp_rexmit_max)) { 14385 tcp->tcp_dupack_cnt = 0; 14386 /* 14387 * Restore the orig tcp_cwnd_ssthresh after 14388 * fast retransmit phase. 14389 */ 14390 if (tcp->tcp_cwnd > tcp->tcp_cwnd_ssthresh) { 14391 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh; 14392 } 14393 tcp->tcp_rexmit_max = seg_ack; 14394 tcp->tcp_cwnd_cnt = 0; 14395 tcp->tcp_snd_burst = tcp->tcp_localnet ? 14396 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 14397 14398 /* 14399 * Remove all notsack info to avoid confusion with 14400 * the next fast retrasnmit/recovery phase. 14401 */ 14402 if (tcp->tcp_snd_sack_ok && 14403 tcp->tcp_notsack_list != NULL) { 14404 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 14405 } 14406 } else { 14407 if (tcp->tcp_snd_sack_ok && 14408 tcp->tcp_notsack_list != NULL) { 14409 flags |= TH_NEED_SACK_REXMIT; 14410 tcp->tcp_pipe -= mss; 14411 if (tcp->tcp_pipe < 0) 14412 tcp->tcp_pipe = 0; 14413 } else { 14414 /* 14415 * Hoe's algorithm: 14416 * 14417 * Retransmit the unack'ed segment and 14418 * restart fast recovery. Note that we 14419 * need to scale back tcp_cwnd to the 14420 * original value when we started fast 14421 * recovery. This is to prevent overly 14422 * aggressive behaviour in sending new 14423 * segments. 14424 */ 14425 tcp->tcp_cwnd = tcp->tcp_cwnd_ssthresh + 14426 tcps->tcps_dupack_fast_retransmit * mss; 14427 tcp->tcp_cwnd_cnt = tcp->tcp_cwnd; 14428 flags |= TH_REXMIT_NEEDED; 14429 } 14430 } 14431 } else { 14432 tcp->tcp_dupack_cnt = 0; 14433 if (tcp->tcp_rexmit) { 14434 /* 14435 * TCP is retranmitting. If the ACK ack's all 14436 * outstanding data, update tcp_rexmit_max and 14437 * tcp_rexmit_nxt. Otherwise, update tcp_rexmit_nxt 14438 * to the correct value. 14439 * 14440 * Note that SEQ_LEQ() is used. This is to avoid 14441 * unnecessary fast retransmit caused by dup ACKs 14442 * received when TCP does slow start retransmission 14443 * after a time out. During this phase, TCP may 14444 * send out segments which are already received. 14445 * This causes dup ACKs to be sent back. 14446 */ 14447 if (SEQ_LEQ(seg_ack, tcp->tcp_rexmit_max)) { 14448 if (SEQ_GT(seg_ack, tcp->tcp_rexmit_nxt)) { 14449 tcp->tcp_rexmit_nxt = seg_ack; 14450 } 14451 if (seg_ack != tcp->tcp_rexmit_max) { 14452 flags |= TH_XMIT_NEEDED; 14453 } 14454 } else { 14455 tcp->tcp_rexmit = B_FALSE; 14456 tcp->tcp_xmit_zc_clean = B_FALSE; 14457 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 14458 tcp->tcp_snd_burst = tcp->tcp_localnet ? 14459 TCP_CWND_INFINITE : TCP_CWND_NORMAL; 14460 } 14461 tcp->tcp_ms_we_have_waited = 0; 14462 } 14463 } 14464 14465 BUMP_MIB(&tcps->tcps_mib, tcpInAckSegs); 14466 UPDATE_MIB(&tcps->tcps_mib, tcpInAckBytes, bytes_acked); 14467 tcp->tcp_suna = seg_ack; 14468 if (tcp->tcp_zero_win_probe != 0) { 14469 tcp->tcp_zero_win_probe = 0; 14470 tcp->tcp_timer_backoff = 0; 14471 } 14472 14473 /* 14474 * If tcp_xmit_head is NULL, then it must be the FIN being ack'ed. 14475 * Note that it cannot be the SYN being ack'ed. The code flow 14476 * will not reach here. 14477 */ 14478 if (mp1 == NULL) { 14479 goto fin_acked; 14480 } 14481 14482 /* 14483 * Update the congestion window. 14484 * 14485 * If TCP is not ECN capable or TCP is ECN capable but the 14486 * congestion experience bit is not set, increase the tcp_cwnd as 14487 * usual. 14488 */ 14489 if (!tcp->tcp_ecn_ok || !(flags & TH_ECE)) { 14490 cwnd = tcp->tcp_cwnd; 14491 add = mss; 14492 14493 if (cwnd >= tcp->tcp_cwnd_ssthresh) { 14494 /* 14495 * This is to prevent an increase of less than 1 MSS of 14496 * tcp_cwnd. With partial increase, tcp_wput_data() 14497 * may send out tinygrams in order to preserve mblk 14498 * boundaries. 14499 * 14500 * By initializing tcp_cwnd_cnt to new tcp_cwnd and 14501 * decrementing it by 1 MSS for every ACKs, tcp_cwnd is 14502 * increased by 1 MSS for every RTTs. 14503 */ 14504 if (tcp->tcp_cwnd_cnt <= 0) { 14505 tcp->tcp_cwnd_cnt = cwnd + add; 14506 } else { 14507 tcp->tcp_cwnd_cnt -= add; 14508 add = 0; 14509 } 14510 } 14511 tcp->tcp_cwnd = MIN(cwnd + add, tcp->tcp_cwnd_max); 14512 } 14513 14514 /* See if the latest urgent data has been acknowledged */ 14515 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && 14516 SEQ_GT(seg_ack, tcp->tcp_urg)) 14517 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 14518 14519 /* Can we update the RTT estimates? */ 14520 if (tcp->tcp_snd_ts_ok) { 14521 /* Ignore zero timestamp echo-reply. */ 14522 if (tcpopt.tcp_opt_ts_ecr != 0) { 14523 tcp_set_rto(tcp, (int32_t)lbolt - 14524 (int32_t)tcpopt.tcp_opt_ts_ecr); 14525 } 14526 14527 /* If needed, restart the timer. */ 14528 if (tcp->tcp_set_timer == 1) { 14529 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 14530 tcp->tcp_set_timer = 0; 14531 } 14532 /* 14533 * Update tcp_csuna in case the other side stops sending 14534 * us timestamps. 14535 */ 14536 tcp->tcp_csuna = tcp->tcp_snxt; 14537 } else if (SEQ_GT(seg_ack, tcp->tcp_csuna)) { 14538 /* 14539 * An ACK sequence we haven't seen before, so get the RTT 14540 * and update the RTO. But first check if the timestamp is 14541 * valid to use. 14542 */ 14543 if ((mp1->b_next != NULL) && 14544 SEQ_GT(seg_ack, (uint32_t)(uintptr_t)(mp1->b_next))) 14545 tcp_set_rto(tcp, (int32_t)lbolt - 14546 (int32_t)(intptr_t)mp1->b_prev); 14547 else 14548 BUMP_MIB(&tcps->tcps_mib, tcpRttNoUpdate); 14549 14550 /* Remeber the last sequence to be ACKed */ 14551 tcp->tcp_csuna = seg_ack; 14552 if (tcp->tcp_set_timer == 1) { 14553 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 14554 tcp->tcp_set_timer = 0; 14555 } 14556 } else { 14557 BUMP_MIB(&tcps->tcps_mib, tcpRttNoUpdate); 14558 } 14559 14560 /* Eat acknowledged bytes off the xmit queue. */ 14561 for (;;) { 14562 mblk_t *mp2; 14563 uchar_t *wptr; 14564 14565 wptr = mp1->b_wptr; 14566 ASSERT((uintptr_t)(wptr - mp1->b_rptr) <= (uintptr_t)INT_MAX); 14567 bytes_acked -= (int)(wptr - mp1->b_rptr); 14568 if (bytes_acked < 0) { 14569 mp1->b_rptr = wptr + bytes_acked; 14570 /* 14571 * Set a new timestamp if all the bytes timed by the 14572 * old timestamp have been ack'ed. 14573 */ 14574 if (SEQ_GT(seg_ack, 14575 (uint32_t)(uintptr_t)(mp1->b_next))) { 14576 mp1->b_prev = (mblk_t *)(uintptr_t)lbolt; 14577 mp1->b_next = NULL; 14578 } 14579 break; 14580 } 14581 mp1->b_next = NULL; 14582 mp1->b_prev = NULL; 14583 mp2 = mp1; 14584 mp1 = mp1->b_cont; 14585 14586 /* 14587 * This notification is required for some zero-copy 14588 * clients to maintain a copy semantic. After the data 14589 * is ack'ed, client is safe to modify or reuse the buffer. 14590 */ 14591 if (tcp->tcp_snd_zcopy_aware && 14592 (mp2->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 14593 tcp_zcopy_notify(tcp); 14594 freeb(mp2); 14595 if (bytes_acked == 0) { 14596 if (mp1 == NULL) { 14597 /* Everything is ack'ed, clear the tail. */ 14598 tcp->tcp_xmit_tail = NULL; 14599 /* 14600 * Cancel the timer unless we are still 14601 * waiting for an ACK for the FIN packet. 14602 */ 14603 if (tcp->tcp_timer_tid != 0 && 14604 tcp->tcp_snxt == tcp->tcp_suna) { 14605 (void) TCP_TIMER_CANCEL(tcp, 14606 tcp->tcp_timer_tid); 14607 tcp->tcp_timer_tid = 0; 14608 } 14609 goto pre_swnd_update; 14610 } 14611 if (mp2 != tcp->tcp_xmit_tail) 14612 break; 14613 tcp->tcp_xmit_tail = mp1; 14614 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 14615 (uintptr_t)INT_MAX); 14616 tcp->tcp_xmit_tail_unsent = (int)(mp1->b_wptr - 14617 mp1->b_rptr); 14618 break; 14619 } 14620 if (mp1 == NULL) { 14621 /* 14622 * More was acked but there is nothing more 14623 * outstanding. This means that the FIN was 14624 * just acked or that we're talking to a clown. 14625 */ 14626 fin_acked: 14627 ASSERT(tcp->tcp_fin_sent); 14628 tcp->tcp_xmit_tail = NULL; 14629 if (tcp->tcp_fin_sent) { 14630 /* FIN was acked - making progress */ 14631 if (tcp->tcp_ipversion == IPV6_VERSION && 14632 !tcp->tcp_fin_acked) 14633 tcp->tcp_ip_forward_progress = B_TRUE; 14634 tcp->tcp_fin_acked = B_TRUE; 14635 if (tcp->tcp_linger_tid != 0 && 14636 TCP_TIMER_CANCEL(tcp, 14637 tcp->tcp_linger_tid) >= 0) { 14638 tcp_stop_lingering(tcp); 14639 freemsg(mp); 14640 mp = NULL; 14641 } 14642 } else { 14643 /* 14644 * We should never get here because 14645 * we have already checked that the 14646 * number of bytes ack'ed should be 14647 * smaller than or equal to what we 14648 * have sent so far (it is the 14649 * acceptability check of the ACK). 14650 * We can only get here if the send 14651 * queue is corrupted. 14652 * 14653 * Terminate the connection and 14654 * panic the system. It is better 14655 * for us to panic instead of 14656 * continuing to avoid other disaster. 14657 */ 14658 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 14659 tcp->tcp_rnxt, TH_RST|TH_ACK); 14660 panic("Memory corruption " 14661 "detected for connection %s.", 14662 tcp_display(tcp, NULL, 14663 DISP_ADDR_AND_PORT)); 14664 /*NOTREACHED*/ 14665 } 14666 goto pre_swnd_update; 14667 } 14668 ASSERT(mp2 != tcp->tcp_xmit_tail); 14669 } 14670 if (tcp->tcp_unsent) { 14671 flags |= TH_XMIT_NEEDED; 14672 } 14673 pre_swnd_update: 14674 tcp->tcp_xmit_head = mp1; 14675 swnd_update: 14676 /* 14677 * The following check is different from most other implementations. 14678 * For bi-directional transfer, when segments are dropped, the 14679 * "normal" check will not accept a window update in those 14680 * retransmitted segemnts. Failing to do that, TCP may send out 14681 * segments which are outside receiver's window. As TCP accepts 14682 * the ack in those retransmitted segments, if the window update in 14683 * the same segment is not accepted, TCP will incorrectly calculates 14684 * that it can send more segments. This can create a deadlock 14685 * with the receiver if its window becomes zero. 14686 */ 14687 if (SEQ_LT(tcp->tcp_swl2, seg_ack) || 14688 SEQ_LT(tcp->tcp_swl1, seg_seq) || 14689 (tcp->tcp_swl1 == seg_seq && new_swnd > tcp->tcp_swnd)) { 14690 /* 14691 * The criteria for update is: 14692 * 14693 * 1. the segment acknowledges some data. Or 14694 * 2. the segment is new, i.e. it has a higher seq num. Or 14695 * 3. the segment is not old and the advertised window is 14696 * larger than the previous advertised window. 14697 */ 14698 if (tcp->tcp_unsent && new_swnd > tcp->tcp_swnd) 14699 flags |= TH_XMIT_NEEDED; 14700 tcp->tcp_swnd = new_swnd; 14701 if (new_swnd > tcp->tcp_max_swnd) 14702 tcp->tcp_max_swnd = new_swnd; 14703 tcp->tcp_swl1 = seg_seq; 14704 tcp->tcp_swl2 = seg_ack; 14705 } 14706 est: 14707 if (tcp->tcp_state > TCPS_ESTABLISHED) { 14708 14709 switch (tcp->tcp_state) { 14710 case TCPS_FIN_WAIT_1: 14711 if (tcp->tcp_fin_acked) { 14712 tcp->tcp_state = TCPS_FIN_WAIT_2; 14713 /* 14714 * We implement the non-standard BSD/SunOS 14715 * FIN_WAIT_2 flushing algorithm. 14716 * If there is no user attached to this 14717 * TCP endpoint, then this TCP struct 14718 * could hang around forever in FIN_WAIT_2 14719 * state if the peer forgets to send us 14720 * a FIN. To prevent this, we wait only 14721 * 2*MSL (a convenient time value) for 14722 * the FIN to arrive. If it doesn't show up, 14723 * we flush the TCP endpoint. This algorithm, 14724 * though a violation of RFC-793, has worked 14725 * for over 10 years in BSD systems. 14726 * Note: SunOS 4.x waits 675 seconds before 14727 * flushing the FIN_WAIT_2 connection. 14728 */ 14729 TCP_TIMER_RESTART(tcp, 14730 tcps->tcps_fin_wait_2_flush_interval); 14731 } 14732 break; 14733 case TCPS_FIN_WAIT_2: 14734 break; /* Shutdown hook? */ 14735 case TCPS_LAST_ACK: 14736 freemsg(mp); 14737 if (tcp->tcp_fin_acked) { 14738 (void) tcp_clean_death(tcp, 0, 19); 14739 return; 14740 } 14741 goto xmit_check; 14742 case TCPS_CLOSING: 14743 if (tcp->tcp_fin_acked) { 14744 tcp->tcp_state = TCPS_TIME_WAIT; 14745 /* 14746 * Unconditionally clear the exclusive binding 14747 * bit so this TIME-WAIT connection won't 14748 * interfere with new ones. 14749 */ 14750 tcp->tcp_exclbind = 0; 14751 if (!TCP_IS_DETACHED(tcp)) { 14752 TCP_TIMER_RESTART(tcp, 14753 tcps->tcps_time_wait_interval); 14754 } else { 14755 tcp_time_wait_append(tcp); 14756 TCP_DBGSTAT(tcps, tcp_rput_time_wait); 14757 } 14758 } 14759 /*FALLTHRU*/ 14760 case TCPS_CLOSE_WAIT: 14761 freemsg(mp); 14762 goto xmit_check; 14763 default: 14764 ASSERT(tcp->tcp_state != TCPS_TIME_WAIT); 14765 break; 14766 } 14767 } 14768 if (flags & TH_FIN) { 14769 /* Make sure we ack the fin */ 14770 flags |= TH_ACK_NEEDED; 14771 if (!tcp->tcp_fin_rcvd) { 14772 tcp->tcp_fin_rcvd = B_TRUE; 14773 tcp->tcp_rnxt++; 14774 tcph = tcp->tcp_tcph; 14775 U32_TO_ABE32(tcp->tcp_rnxt, tcph->th_ack); 14776 14777 /* 14778 * Generate the ordrel_ind at the end unless we 14779 * are an eager guy. 14780 * In the eager case tcp_rsrv will do this when run 14781 * after tcp_accept is done. 14782 */ 14783 if (tcp->tcp_listener == NULL && 14784 !TCP_IS_DETACHED(tcp) && (!tcp->tcp_hard_binding)) 14785 flags |= TH_ORDREL_NEEDED; 14786 switch (tcp->tcp_state) { 14787 case TCPS_SYN_RCVD: 14788 case TCPS_ESTABLISHED: 14789 tcp->tcp_state = TCPS_CLOSE_WAIT; 14790 /* Keepalive? */ 14791 break; 14792 case TCPS_FIN_WAIT_1: 14793 if (!tcp->tcp_fin_acked) { 14794 tcp->tcp_state = TCPS_CLOSING; 14795 break; 14796 } 14797 /* FALLTHRU */ 14798 case TCPS_FIN_WAIT_2: 14799 tcp->tcp_state = TCPS_TIME_WAIT; 14800 /* 14801 * Unconditionally clear the exclusive binding 14802 * bit so this TIME-WAIT connection won't 14803 * interfere with new ones. 14804 */ 14805 tcp->tcp_exclbind = 0; 14806 if (!TCP_IS_DETACHED(tcp)) { 14807 TCP_TIMER_RESTART(tcp, 14808 tcps->tcps_time_wait_interval); 14809 } else { 14810 tcp_time_wait_append(tcp); 14811 TCP_DBGSTAT(tcps, tcp_rput_time_wait); 14812 } 14813 if (seg_len) { 14814 /* 14815 * implies data piggybacked on FIN. 14816 * break to handle data. 14817 */ 14818 break; 14819 } 14820 freemsg(mp); 14821 goto ack_check; 14822 } 14823 } 14824 } 14825 if (mp == NULL) 14826 goto xmit_check; 14827 if (seg_len == 0) { 14828 freemsg(mp); 14829 goto xmit_check; 14830 } 14831 if (mp->b_rptr == mp->b_wptr) { 14832 /* 14833 * The header has been consumed, so we remove the 14834 * zero-length mblk here. 14835 */ 14836 mp1 = mp; 14837 mp = mp->b_cont; 14838 freeb(mp1); 14839 } 14840 tcph = tcp->tcp_tcph; 14841 tcp->tcp_rack_cnt++; 14842 { 14843 uint32_t cur_max; 14844 14845 cur_max = tcp->tcp_rack_cur_max; 14846 if (tcp->tcp_rack_cnt >= cur_max) { 14847 /* 14848 * We have more unacked data than we should - send 14849 * an ACK now. 14850 */ 14851 flags |= TH_ACK_NEEDED; 14852 cur_max++; 14853 if (cur_max > tcp->tcp_rack_abs_max) 14854 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 14855 else 14856 tcp->tcp_rack_cur_max = cur_max; 14857 } else if (TCP_IS_DETACHED(tcp)) { 14858 /* We don't have an ACK timer for detached TCP. */ 14859 flags |= TH_ACK_NEEDED; 14860 } else if (seg_len < mss) { 14861 /* 14862 * If we get a segment that is less than an mss, and we 14863 * already have unacknowledged data, and the amount 14864 * unacknowledged is not a multiple of mss, then we 14865 * better generate an ACK now. Otherwise, this may be 14866 * the tail piece of a transaction, and we would rather 14867 * wait for the response. 14868 */ 14869 uint32_t udif; 14870 ASSERT((uintptr_t)(tcp->tcp_rnxt - tcp->tcp_rack) <= 14871 (uintptr_t)INT_MAX); 14872 udif = (int)(tcp->tcp_rnxt - tcp->tcp_rack); 14873 if (udif && (udif % mss)) 14874 flags |= TH_ACK_NEEDED; 14875 else 14876 flags |= TH_ACK_TIMER_NEEDED; 14877 } else { 14878 /* Start delayed ack timer */ 14879 flags |= TH_ACK_TIMER_NEEDED; 14880 } 14881 } 14882 tcp->tcp_rnxt += seg_len; 14883 U32_TO_ABE32(tcp->tcp_rnxt, tcph->th_ack); 14884 14885 /* Update SACK list */ 14886 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 14887 tcp_sack_remove(tcp->tcp_sack_list, tcp->tcp_rnxt, 14888 &(tcp->tcp_num_sack_blk)); 14889 } 14890 14891 if (tcp->tcp_urp_mp) { 14892 tcp->tcp_urp_mp->b_cont = mp; 14893 mp = tcp->tcp_urp_mp; 14894 tcp->tcp_urp_mp = NULL; 14895 /* Ready for a new signal. */ 14896 tcp->tcp_urp_last_valid = B_FALSE; 14897 #ifdef DEBUG 14898 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 14899 "tcp_rput: sending exdata_ind %s", 14900 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 14901 #endif /* DEBUG */ 14902 } 14903 14904 /* 14905 * Check for ancillary data changes compared to last segment. 14906 */ 14907 if (tcp->tcp_ipv6_recvancillary != 0) { 14908 mp = tcp_rput_add_ancillary(tcp, mp, &ipp); 14909 if (mp == NULL) 14910 return; 14911 } 14912 14913 if (tcp->tcp_listener || tcp->tcp_hard_binding) { 14914 /* 14915 * Side queue inbound data until the accept happens. 14916 * tcp_accept/tcp_rput drains this when the accept happens. 14917 * M_DATA is queued on b_cont. Otherwise (T_OPTDATA_IND or 14918 * T_EXDATA_IND) it is queued on b_next. 14919 * XXX Make urgent data use this. Requires: 14920 * Removing tcp_listener check for TH_URG 14921 * Making M_PCPROTO and MARK messages skip the eager case 14922 */ 14923 14924 if (tcp->tcp_kssl_pending) { 14925 tcp_kssl_input(tcp, mp); 14926 } else { 14927 tcp_rcv_enqueue(tcp, mp, seg_len); 14928 } 14929 } else { 14930 if (mp->b_datap->db_type != M_DATA || 14931 (flags & TH_MARKNEXT_NEEDED)) { 14932 if (tcp->tcp_rcv_list != NULL) { 14933 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 14934 } 14935 ASSERT(tcp->tcp_rcv_list == NULL || 14936 tcp->tcp_fused_sigurg); 14937 if (flags & TH_MARKNEXT_NEEDED) { 14938 #ifdef DEBUG 14939 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 14940 "tcp_rput: sending MSGMARKNEXT %s", 14941 tcp_display(tcp, NULL, 14942 DISP_PORT_ONLY)); 14943 #endif /* DEBUG */ 14944 mp->b_flag |= MSGMARKNEXT; 14945 flags &= ~TH_MARKNEXT_NEEDED; 14946 } 14947 14948 /* Does this need SSL processing first? */ 14949 if ((tcp->tcp_kssl_ctx != NULL) && 14950 (DB_TYPE(mp) == M_DATA)) { 14951 tcp_kssl_input(tcp, mp); 14952 } else { 14953 putnext(tcp->tcp_rq, mp); 14954 if (!canputnext(tcp->tcp_rq)) 14955 tcp->tcp_rwnd -= seg_len; 14956 } 14957 } else if ((flags & (TH_PUSH|TH_FIN)) || 14958 tcp->tcp_rcv_cnt + seg_len >= tcp->tcp_rq->q_hiwat >> 3) { 14959 if (tcp->tcp_rcv_list != NULL) { 14960 /* 14961 * Enqueue the new segment first and then 14962 * call tcp_rcv_drain() to send all data 14963 * up. The other way to do this is to 14964 * send all queued data up and then call 14965 * putnext() to send the new segment up. 14966 * This way can remove the else part later 14967 * on. 14968 * 14969 * We don't this to avoid one more call to 14970 * canputnext() as tcp_rcv_drain() needs to 14971 * call canputnext(). 14972 */ 14973 tcp_rcv_enqueue(tcp, mp, seg_len); 14974 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 14975 } else { 14976 /* Does this need SSL processing first? */ 14977 if ((tcp->tcp_kssl_ctx != NULL) && 14978 (DB_TYPE(mp) == M_DATA)) { 14979 tcp_kssl_input(tcp, mp); 14980 } else { 14981 putnext(tcp->tcp_rq, mp); 14982 if (!canputnext(tcp->tcp_rq)) 14983 tcp->tcp_rwnd -= seg_len; 14984 } 14985 } 14986 } else { 14987 /* 14988 * Enqueue all packets when processing an mblk 14989 * from the co queue and also enqueue normal packets. 14990 */ 14991 tcp_rcv_enqueue(tcp, mp, seg_len); 14992 } 14993 /* 14994 * Make sure the timer is running if we have data waiting 14995 * for a push bit. This provides resiliency against 14996 * implementations that do not correctly generate push bits. 14997 */ 14998 if (tcp->tcp_rcv_list != NULL && tcp->tcp_push_tid == 0) { 14999 /* 15000 * The connection may be closed at this point, so don't 15001 * do anything for a detached tcp. 15002 */ 15003 if (!TCP_IS_DETACHED(tcp)) 15004 tcp->tcp_push_tid = TCP_TIMER(tcp, 15005 tcp_push_timer, 15006 MSEC_TO_TICK(tcps->tcps_push_timer_interval)); 15007 } 15008 } 15009 xmit_check: 15010 /* Is there anything left to do? */ 15011 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 15012 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_ACK_NEEDED| 15013 TH_NEED_SACK_REXMIT|TH_LIMIT_XMIT|TH_ACK_TIMER_NEEDED| 15014 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 15015 goto done; 15016 15017 /* Any transmit work to do and a non-zero window? */ 15018 if ((flags & (TH_REXMIT_NEEDED|TH_XMIT_NEEDED|TH_NEED_SACK_REXMIT| 15019 TH_LIMIT_XMIT)) && tcp->tcp_swnd != 0) { 15020 if (flags & TH_REXMIT_NEEDED) { 15021 uint32_t snd_size = tcp->tcp_snxt - tcp->tcp_suna; 15022 15023 BUMP_MIB(&tcps->tcps_mib, tcpOutFastRetrans); 15024 if (snd_size > mss) 15025 snd_size = mss; 15026 if (snd_size > tcp->tcp_swnd) 15027 snd_size = tcp->tcp_swnd; 15028 mp1 = tcp_xmit_mp(tcp, tcp->tcp_xmit_head, snd_size, 15029 NULL, NULL, tcp->tcp_suna, B_TRUE, &snd_size, 15030 B_TRUE); 15031 15032 if (mp1 != NULL) { 15033 tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt; 15034 tcp->tcp_csuna = tcp->tcp_snxt; 15035 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 15036 UPDATE_MIB(&tcps->tcps_mib, 15037 tcpRetransBytes, snd_size); 15038 TCP_RECORD_TRACE(tcp, mp1, 15039 TCP_TRACE_SEND_PKT); 15040 tcp_send_data(tcp, tcp->tcp_wq, mp1); 15041 } 15042 } 15043 if (flags & TH_NEED_SACK_REXMIT) { 15044 tcp_sack_rxmit(tcp, &flags); 15045 } 15046 /* 15047 * For TH_LIMIT_XMIT, tcp_wput_data() is called to send 15048 * out new segment. Note that tcp_rexmit should not be 15049 * set, otherwise TH_LIMIT_XMIT should not be set. 15050 */ 15051 if (flags & (TH_XMIT_NEEDED|TH_LIMIT_XMIT)) { 15052 if (!tcp->tcp_rexmit) { 15053 tcp_wput_data(tcp, NULL, B_FALSE); 15054 } else { 15055 tcp_ss_rexmit(tcp); 15056 } 15057 } 15058 /* 15059 * Adjust tcp_cwnd back to normal value after sending 15060 * new data segments. 15061 */ 15062 if (flags & TH_LIMIT_XMIT) { 15063 tcp->tcp_cwnd -= mss << (tcp->tcp_dupack_cnt - 1); 15064 /* 15065 * This will restart the timer. Restarting the 15066 * timer is used to avoid a timeout before the 15067 * limited transmitted segment's ACK gets back. 15068 */ 15069 if (tcp->tcp_xmit_head != NULL) 15070 tcp->tcp_xmit_head->b_prev = (mblk_t *)lbolt; 15071 } 15072 15073 /* Anything more to do? */ 15074 if ((flags & (TH_ACK_NEEDED|TH_ACK_TIMER_NEEDED| 15075 TH_ORDREL_NEEDED|TH_SEND_URP_MARK)) == 0) 15076 goto done; 15077 } 15078 ack_check: 15079 if (flags & TH_SEND_URP_MARK) { 15080 ASSERT(tcp->tcp_urp_mark_mp); 15081 /* 15082 * Send up any queued data and then send the mark message 15083 */ 15084 if (tcp->tcp_rcv_list != NULL) { 15085 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 15086 } 15087 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 15088 15089 mp1 = tcp->tcp_urp_mark_mp; 15090 tcp->tcp_urp_mark_mp = NULL; 15091 #ifdef DEBUG 15092 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 15093 "tcp_rput: sending zero-length %s %s", 15094 ((mp1->b_flag & MSGMARKNEXT) ? "MSGMARKNEXT" : 15095 "MSGNOTMARKNEXT"), 15096 tcp_display(tcp, NULL, DISP_PORT_ONLY)); 15097 #endif /* DEBUG */ 15098 putnext(tcp->tcp_rq, mp1); 15099 flags &= ~TH_SEND_URP_MARK; 15100 } 15101 if (flags & TH_ACK_NEEDED) { 15102 /* 15103 * Time to send an ack for some reason. 15104 */ 15105 mp1 = tcp_ack_mp(tcp); 15106 15107 if (mp1 != NULL) { 15108 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 15109 tcp_send_data(tcp, tcp->tcp_wq, mp1); 15110 BUMP_LOCAL(tcp->tcp_obsegs); 15111 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 15112 } 15113 if (tcp->tcp_ack_tid != 0) { 15114 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_ack_tid); 15115 tcp->tcp_ack_tid = 0; 15116 } 15117 } 15118 if (flags & TH_ACK_TIMER_NEEDED) { 15119 /* 15120 * Arrange for deferred ACK or push wait timeout. 15121 * Start timer if it is not already running. 15122 */ 15123 if (tcp->tcp_ack_tid == 0) { 15124 tcp->tcp_ack_tid = TCP_TIMER(tcp, tcp_ack_timer, 15125 MSEC_TO_TICK(tcp->tcp_localnet ? 15126 (clock_t)tcps->tcps_local_dack_interval : 15127 (clock_t)tcps->tcps_deferred_ack_interval)); 15128 } 15129 } 15130 if (flags & TH_ORDREL_NEEDED) { 15131 /* 15132 * Send up the ordrel_ind unless we are an eager guy. 15133 * In the eager case tcp_rsrv will do this when run 15134 * after tcp_accept is done. 15135 */ 15136 ASSERT(tcp->tcp_listener == NULL); 15137 if (tcp->tcp_rcv_list != NULL) { 15138 /* 15139 * Push any mblk(s) enqueued from co processing. 15140 */ 15141 flags |= tcp_rcv_drain(tcp->tcp_rq, tcp); 15142 } 15143 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 15144 if ((mp1 = mi_tpi_ordrel_ind()) != NULL) { 15145 tcp->tcp_ordrel_done = B_TRUE; 15146 putnext(tcp->tcp_rq, mp1); 15147 if (tcp->tcp_deferred_clean_death) { 15148 /* 15149 * tcp_clean_death was deferred 15150 * for T_ORDREL_IND - do it now 15151 */ 15152 (void) tcp_clean_death(tcp, 15153 tcp->tcp_client_errno, 20); 15154 tcp->tcp_deferred_clean_death = B_FALSE; 15155 } 15156 } else { 15157 /* 15158 * Run the orderly release in the 15159 * service routine. 15160 */ 15161 qenable(tcp->tcp_rq); 15162 /* 15163 * Caveat(XXX): The machine may be so 15164 * overloaded that tcp_rsrv() is not scheduled 15165 * until after the endpoint has transitioned 15166 * to TCPS_TIME_WAIT 15167 * and tcp_time_wait_interval expires. Then 15168 * tcp_timer() will blow away state in tcp_t 15169 * and T_ORDREL_IND will never be delivered 15170 * upstream. Unlikely but potentially 15171 * a problem. 15172 */ 15173 } 15174 } 15175 done: 15176 ASSERT(!(flags & TH_MARKNEXT_NEEDED)); 15177 } 15178 15179 /* 15180 * This function does PAWS protection check. Returns B_TRUE if the 15181 * segment passes the PAWS test, else returns B_FALSE. 15182 */ 15183 boolean_t 15184 tcp_paws_check(tcp_t *tcp, tcph_t *tcph, tcp_opt_t *tcpoptp) 15185 { 15186 uint8_t flags; 15187 int options; 15188 uint8_t *up; 15189 15190 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 15191 /* 15192 * If timestamp option is aligned nicely, get values inline, 15193 * otherwise call general routine to parse. Only do that 15194 * if timestamp is the only option. 15195 */ 15196 if (TCP_HDR_LENGTH(tcph) == (uint32_t)TCP_MIN_HEADER_LENGTH + 15197 TCPOPT_REAL_TS_LEN && 15198 OK_32PTR((up = ((uint8_t *)tcph) + 15199 TCP_MIN_HEADER_LENGTH)) && 15200 *(uint32_t *)up == TCPOPT_NOP_NOP_TSTAMP) { 15201 tcpoptp->tcp_opt_ts_val = ABE32_TO_U32((up+4)); 15202 tcpoptp->tcp_opt_ts_ecr = ABE32_TO_U32((up+8)); 15203 15204 options = TCP_OPT_TSTAMP_PRESENT; 15205 } else { 15206 if (tcp->tcp_snd_sack_ok) { 15207 tcpoptp->tcp = tcp; 15208 } else { 15209 tcpoptp->tcp = NULL; 15210 } 15211 options = tcp_parse_options(tcph, tcpoptp); 15212 } 15213 15214 if (options & TCP_OPT_TSTAMP_PRESENT) { 15215 /* 15216 * Do PAWS per RFC 1323 section 4.2. Accept RST 15217 * regardless of the timestamp, page 18 RFC 1323.bis. 15218 */ 15219 if ((flags & TH_RST) == 0 && 15220 TSTMP_LT(tcpoptp->tcp_opt_ts_val, 15221 tcp->tcp_ts_recent)) { 15222 if (TSTMP_LT(lbolt64, tcp->tcp_last_rcv_lbolt + 15223 PAWS_TIMEOUT)) { 15224 /* This segment is not acceptable. */ 15225 return (B_FALSE); 15226 } else { 15227 /* 15228 * Connection has been idle for 15229 * too long. Reset the timestamp 15230 * and assume the segment is valid. 15231 */ 15232 tcp->tcp_ts_recent = 15233 tcpoptp->tcp_opt_ts_val; 15234 } 15235 } 15236 } else { 15237 /* 15238 * If we don't get a timestamp on every packet, we 15239 * figure we can't really trust 'em, so we stop sending 15240 * and parsing them. 15241 */ 15242 tcp->tcp_snd_ts_ok = B_FALSE; 15243 15244 tcp->tcp_hdr_len -= TCPOPT_REAL_TS_LEN; 15245 tcp->tcp_tcp_hdr_len -= TCPOPT_REAL_TS_LEN; 15246 tcp->tcp_tcph->th_offset_and_rsrvd[0] -= (3 << 4); 15247 /* 15248 * Adjust the tcp_mss accordingly. We also need to 15249 * adjust tcp_cwnd here in accordance with the new mss. 15250 * But we avoid doing a slow start here so as to not 15251 * to lose on the transfer rate built up so far. 15252 */ 15253 tcp_mss_set(tcp, tcp->tcp_mss + TCPOPT_REAL_TS_LEN, B_FALSE); 15254 if (tcp->tcp_snd_sack_ok) { 15255 ASSERT(tcp->tcp_sack_info != NULL); 15256 tcp->tcp_max_sack_blk = 4; 15257 } 15258 } 15259 return (B_TRUE); 15260 } 15261 15262 /* 15263 * Attach ancillary data to a received TCP segments for the 15264 * ancillary pieces requested by the application that are 15265 * different than they were in the previous data segment. 15266 * 15267 * Save the "current" values once memory allocation is ok so that 15268 * when memory allocation fails we can just wait for the next data segment. 15269 */ 15270 static mblk_t * 15271 tcp_rput_add_ancillary(tcp_t *tcp, mblk_t *mp, ip6_pkt_t *ipp) 15272 { 15273 struct T_optdata_ind *todi; 15274 int optlen; 15275 uchar_t *optptr; 15276 struct T_opthdr *toh; 15277 uint_t addflag; /* Which pieces to add */ 15278 mblk_t *mp1; 15279 15280 optlen = 0; 15281 addflag = 0; 15282 /* If app asked for pktinfo and the index has changed ... */ 15283 if ((ipp->ipp_fields & IPPF_IFINDEX) && 15284 ipp->ipp_ifindex != tcp->tcp_recvifindex && 15285 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO)) { 15286 optlen += sizeof (struct T_opthdr) + 15287 sizeof (struct in6_pktinfo); 15288 addflag |= TCP_IPV6_RECVPKTINFO; 15289 } 15290 /* If app asked for hoplimit and it has changed ... */ 15291 if ((ipp->ipp_fields & IPPF_HOPLIMIT) && 15292 ipp->ipp_hoplimit != tcp->tcp_recvhops && 15293 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPLIMIT)) { 15294 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 15295 addflag |= TCP_IPV6_RECVHOPLIMIT; 15296 } 15297 /* If app asked for tclass and it has changed ... */ 15298 if ((ipp->ipp_fields & IPPF_TCLASS) && 15299 ipp->ipp_tclass != tcp->tcp_recvtclass && 15300 (tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVTCLASS)) { 15301 optlen += sizeof (struct T_opthdr) + sizeof (uint_t); 15302 addflag |= TCP_IPV6_RECVTCLASS; 15303 } 15304 /* 15305 * If app asked for hopbyhop headers and it has changed ... 15306 * For security labels, note that (1) security labels can't change on 15307 * a connected socket at all, (2) we're connected to at most one peer, 15308 * (3) if anything changes, then it must be some other extra option. 15309 */ 15310 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVHOPOPTS) && 15311 ip_cmpbuf(tcp->tcp_hopopts, tcp->tcp_hopoptslen, 15312 (ipp->ipp_fields & IPPF_HOPOPTS), 15313 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) { 15314 optlen += sizeof (struct T_opthdr) + ipp->ipp_hopoptslen - 15315 tcp->tcp_label_len; 15316 addflag |= TCP_IPV6_RECVHOPOPTS; 15317 if (!ip_allocbuf((void **)&tcp->tcp_hopopts, 15318 &tcp->tcp_hopoptslen, (ipp->ipp_fields & IPPF_HOPOPTS), 15319 ipp->ipp_hopopts, ipp->ipp_hopoptslen)) 15320 return (mp); 15321 } 15322 /* If app asked for dst headers before routing headers ... */ 15323 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTDSTOPTS) && 15324 ip_cmpbuf(tcp->tcp_rtdstopts, tcp->tcp_rtdstoptslen, 15325 (ipp->ipp_fields & IPPF_RTDSTOPTS), 15326 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen)) { 15327 optlen += sizeof (struct T_opthdr) + 15328 ipp->ipp_rtdstoptslen; 15329 addflag |= TCP_IPV6_RECVRTDSTOPTS; 15330 if (!ip_allocbuf((void **)&tcp->tcp_rtdstopts, 15331 &tcp->tcp_rtdstoptslen, (ipp->ipp_fields & IPPF_RTDSTOPTS), 15332 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen)) 15333 return (mp); 15334 } 15335 /* If app asked for routing headers and it has changed ... */ 15336 if ((tcp->tcp_ipv6_recvancillary & TCP_IPV6_RECVRTHDR) && 15337 ip_cmpbuf(tcp->tcp_rthdr, tcp->tcp_rthdrlen, 15338 (ipp->ipp_fields & IPPF_RTHDR), 15339 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) { 15340 optlen += sizeof (struct T_opthdr) + ipp->ipp_rthdrlen; 15341 addflag |= TCP_IPV6_RECVRTHDR; 15342 if (!ip_allocbuf((void **)&tcp->tcp_rthdr, 15343 &tcp->tcp_rthdrlen, (ipp->ipp_fields & IPPF_RTHDR), 15344 ipp->ipp_rthdr, ipp->ipp_rthdrlen)) 15345 return (mp); 15346 } 15347 /* If app asked for dest headers and it has changed ... */ 15348 if ((tcp->tcp_ipv6_recvancillary & 15349 (TCP_IPV6_RECVDSTOPTS | TCP_OLD_IPV6_RECVDSTOPTS)) && 15350 ip_cmpbuf(tcp->tcp_dstopts, tcp->tcp_dstoptslen, 15351 (ipp->ipp_fields & IPPF_DSTOPTS), 15352 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) { 15353 optlen += sizeof (struct T_opthdr) + ipp->ipp_dstoptslen; 15354 addflag |= TCP_IPV6_RECVDSTOPTS; 15355 if (!ip_allocbuf((void **)&tcp->tcp_dstopts, 15356 &tcp->tcp_dstoptslen, (ipp->ipp_fields & IPPF_DSTOPTS), 15357 ipp->ipp_dstopts, ipp->ipp_dstoptslen)) 15358 return (mp); 15359 } 15360 15361 if (optlen == 0) { 15362 /* Nothing to add */ 15363 return (mp); 15364 } 15365 mp1 = allocb(sizeof (struct T_optdata_ind) + optlen, BPRI_MED); 15366 if (mp1 == NULL) { 15367 /* 15368 * Defer sending ancillary data until the next TCP segment 15369 * arrives. 15370 */ 15371 return (mp); 15372 } 15373 mp1->b_cont = mp; 15374 mp = mp1; 15375 mp->b_wptr += sizeof (*todi) + optlen; 15376 mp->b_datap->db_type = M_PROTO; 15377 todi = (struct T_optdata_ind *)mp->b_rptr; 15378 todi->PRIM_type = T_OPTDATA_IND; 15379 todi->DATA_flag = 1; /* MORE data */ 15380 todi->OPT_length = optlen; 15381 todi->OPT_offset = sizeof (*todi); 15382 optptr = (uchar_t *)&todi[1]; 15383 /* 15384 * If app asked for pktinfo and the index has changed ... 15385 * Note that the local address never changes for the connection. 15386 */ 15387 if (addflag & TCP_IPV6_RECVPKTINFO) { 15388 struct in6_pktinfo *pkti; 15389 15390 toh = (struct T_opthdr *)optptr; 15391 toh->level = IPPROTO_IPV6; 15392 toh->name = IPV6_PKTINFO; 15393 toh->len = sizeof (*toh) + sizeof (*pkti); 15394 toh->status = 0; 15395 optptr += sizeof (*toh); 15396 pkti = (struct in6_pktinfo *)optptr; 15397 if (tcp->tcp_ipversion == IPV6_VERSION) 15398 pkti->ipi6_addr = tcp->tcp_ip6h->ip6_src; 15399 else 15400 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 15401 &pkti->ipi6_addr); 15402 pkti->ipi6_ifindex = ipp->ipp_ifindex; 15403 optptr += sizeof (*pkti); 15404 ASSERT(OK_32PTR(optptr)); 15405 /* Save as "last" value */ 15406 tcp->tcp_recvifindex = ipp->ipp_ifindex; 15407 } 15408 /* If app asked for hoplimit and it has changed ... */ 15409 if (addflag & TCP_IPV6_RECVHOPLIMIT) { 15410 toh = (struct T_opthdr *)optptr; 15411 toh->level = IPPROTO_IPV6; 15412 toh->name = IPV6_HOPLIMIT; 15413 toh->len = sizeof (*toh) + sizeof (uint_t); 15414 toh->status = 0; 15415 optptr += sizeof (*toh); 15416 *(uint_t *)optptr = ipp->ipp_hoplimit; 15417 optptr += sizeof (uint_t); 15418 ASSERT(OK_32PTR(optptr)); 15419 /* Save as "last" value */ 15420 tcp->tcp_recvhops = ipp->ipp_hoplimit; 15421 } 15422 /* If app asked for tclass and it has changed ... */ 15423 if (addflag & TCP_IPV6_RECVTCLASS) { 15424 toh = (struct T_opthdr *)optptr; 15425 toh->level = IPPROTO_IPV6; 15426 toh->name = IPV6_TCLASS; 15427 toh->len = sizeof (*toh) + sizeof (uint_t); 15428 toh->status = 0; 15429 optptr += sizeof (*toh); 15430 *(uint_t *)optptr = ipp->ipp_tclass; 15431 optptr += sizeof (uint_t); 15432 ASSERT(OK_32PTR(optptr)); 15433 /* Save as "last" value */ 15434 tcp->tcp_recvtclass = ipp->ipp_tclass; 15435 } 15436 if (addflag & TCP_IPV6_RECVHOPOPTS) { 15437 toh = (struct T_opthdr *)optptr; 15438 toh->level = IPPROTO_IPV6; 15439 toh->name = IPV6_HOPOPTS; 15440 toh->len = sizeof (*toh) + ipp->ipp_hopoptslen - 15441 tcp->tcp_label_len; 15442 toh->status = 0; 15443 optptr += sizeof (*toh); 15444 bcopy((uchar_t *)ipp->ipp_hopopts + tcp->tcp_label_len, optptr, 15445 ipp->ipp_hopoptslen - tcp->tcp_label_len); 15446 optptr += ipp->ipp_hopoptslen - tcp->tcp_label_len; 15447 ASSERT(OK_32PTR(optptr)); 15448 /* Save as last value */ 15449 ip_savebuf((void **)&tcp->tcp_hopopts, &tcp->tcp_hopoptslen, 15450 (ipp->ipp_fields & IPPF_HOPOPTS), 15451 ipp->ipp_hopopts, ipp->ipp_hopoptslen); 15452 } 15453 if (addflag & TCP_IPV6_RECVRTDSTOPTS) { 15454 toh = (struct T_opthdr *)optptr; 15455 toh->level = IPPROTO_IPV6; 15456 toh->name = IPV6_RTHDRDSTOPTS; 15457 toh->len = sizeof (*toh) + ipp->ipp_rtdstoptslen; 15458 toh->status = 0; 15459 optptr += sizeof (*toh); 15460 bcopy(ipp->ipp_rtdstopts, optptr, ipp->ipp_rtdstoptslen); 15461 optptr += ipp->ipp_rtdstoptslen; 15462 ASSERT(OK_32PTR(optptr)); 15463 /* Save as last value */ 15464 ip_savebuf((void **)&tcp->tcp_rtdstopts, 15465 &tcp->tcp_rtdstoptslen, 15466 (ipp->ipp_fields & IPPF_RTDSTOPTS), 15467 ipp->ipp_rtdstopts, ipp->ipp_rtdstoptslen); 15468 } 15469 if (addflag & TCP_IPV6_RECVRTHDR) { 15470 toh = (struct T_opthdr *)optptr; 15471 toh->level = IPPROTO_IPV6; 15472 toh->name = IPV6_RTHDR; 15473 toh->len = sizeof (*toh) + ipp->ipp_rthdrlen; 15474 toh->status = 0; 15475 optptr += sizeof (*toh); 15476 bcopy(ipp->ipp_rthdr, optptr, ipp->ipp_rthdrlen); 15477 optptr += ipp->ipp_rthdrlen; 15478 ASSERT(OK_32PTR(optptr)); 15479 /* Save as last value */ 15480 ip_savebuf((void **)&tcp->tcp_rthdr, &tcp->tcp_rthdrlen, 15481 (ipp->ipp_fields & IPPF_RTHDR), 15482 ipp->ipp_rthdr, ipp->ipp_rthdrlen); 15483 } 15484 if (addflag & (TCP_IPV6_RECVDSTOPTS | TCP_OLD_IPV6_RECVDSTOPTS)) { 15485 toh = (struct T_opthdr *)optptr; 15486 toh->level = IPPROTO_IPV6; 15487 toh->name = IPV6_DSTOPTS; 15488 toh->len = sizeof (*toh) + ipp->ipp_dstoptslen; 15489 toh->status = 0; 15490 optptr += sizeof (*toh); 15491 bcopy(ipp->ipp_dstopts, optptr, ipp->ipp_dstoptslen); 15492 optptr += ipp->ipp_dstoptslen; 15493 ASSERT(OK_32PTR(optptr)); 15494 /* Save as last value */ 15495 ip_savebuf((void **)&tcp->tcp_dstopts, &tcp->tcp_dstoptslen, 15496 (ipp->ipp_fields & IPPF_DSTOPTS), 15497 ipp->ipp_dstopts, ipp->ipp_dstoptslen); 15498 } 15499 ASSERT(optptr == mp->b_wptr); 15500 return (mp); 15501 } 15502 15503 15504 /* 15505 * Handle a *T_BIND_REQ that has failed either due to a T_ERROR_ACK 15506 * or a "bad" IRE detected by tcp_adapt_ire. 15507 * We can't tell if the failure was due to the laddr or the faddr 15508 * thus we clear out all addresses and ports. 15509 */ 15510 static void 15511 tcp_bind_failed(tcp_t *tcp, mblk_t *mp, int error) 15512 { 15513 queue_t *q = tcp->tcp_rq; 15514 tcph_t *tcph; 15515 struct T_error_ack *tea; 15516 conn_t *connp = tcp->tcp_connp; 15517 15518 15519 ASSERT(mp->b_datap->db_type == M_PCPROTO); 15520 15521 if (mp->b_cont) { 15522 freemsg(mp->b_cont); 15523 mp->b_cont = NULL; 15524 } 15525 tea = (struct T_error_ack *)mp->b_rptr; 15526 switch (tea->PRIM_type) { 15527 case T_BIND_ACK: 15528 /* 15529 * Need to unbind with classifier since we were just told that 15530 * our bind succeeded. 15531 */ 15532 tcp->tcp_hard_bound = B_FALSE; 15533 tcp->tcp_hard_binding = B_FALSE; 15534 15535 ipcl_hash_remove(connp); 15536 /* Reuse the mblk if possible */ 15537 ASSERT(mp->b_datap->db_lim - mp->b_datap->db_base >= 15538 sizeof (*tea)); 15539 mp->b_rptr = mp->b_datap->db_base; 15540 mp->b_wptr = mp->b_rptr + sizeof (*tea); 15541 tea = (struct T_error_ack *)mp->b_rptr; 15542 tea->PRIM_type = T_ERROR_ACK; 15543 tea->TLI_error = TSYSERR; 15544 tea->UNIX_error = error; 15545 if (tcp->tcp_state >= TCPS_SYN_SENT) { 15546 tea->ERROR_prim = T_CONN_REQ; 15547 } else { 15548 tea->ERROR_prim = O_T_BIND_REQ; 15549 } 15550 break; 15551 15552 case T_ERROR_ACK: 15553 if (tcp->tcp_state >= TCPS_SYN_SENT) 15554 tea->ERROR_prim = T_CONN_REQ; 15555 break; 15556 default: 15557 panic("tcp_bind_failed: unexpected TPI type"); 15558 /*NOTREACHED*/ 15559 } 15560 15561 tcp->tcp_state = TCPS_IDLE; 15562 if (tcp->tcp_ipversion == IPV4_VERSION) 15563 tcp->tcp_ipha->ipha_src = 0; 15564 else 15565 V6_SET_ZERO(tcp->tcp_ip6h->ip6_src); 15566 /* 15567 * Copy of the src addr. in tcp_t is needed since 15568 * the lookup funcs. can only look at tcp_t 15569 */ 15570 V6_SET_ZERO(tcp->tcp_ip_src_v6); 15571 15572 tcph = tcp->tcp_tcph; 15573 tcph->th_lport[0] = 0; 15574 tcph->th_lport[1] = 0; 15575 tcp_bind_hash_remove(tcp); 15576 bzero(&connp->u_port, sizeof (connp->u_port)); 15577 /* blow away saved option results if any */ 15578 if (tcp->tcp_conn.tcp_opts_conn_req != NULL) 15579 tcp_close_mpp(&tcp->tcp_conn.tcp_opts_conn_req); 15580 15581 conn_delete_ire(tcp->tcp_connp, NULL); 15582 putnext(q, mp); 15583 } 15584 15585 /* 15586 * tcp_rput_other is called by tcp_rput to handle everything other than M_DATA 15587 * messages. 15588 */ 15589 void 15590 tcp_rput_other(tcp_t *tcp, mblk_t *mp) 15591 { 15592 mblk_t *mp1; 15593 uchar_t *rptr = mp->b_rptr; 15594 queue_t *q = tcp->tcp_rq; 15595 struct T_error_ack *tea; 15596 uint32_t mss; 15597 mblk_t *syn_mp; 15598 mblk_t *mdti; 15599 mblk_t *lsoi; 15600 int retval; 15601 mblk_t *ire_mp; 15602 tcp_stack_t *tcps = tcp->tcp_tcps; 15603 15604 switch (mp->b_datap->db_type) { 15605 case M_PROTO: 15606 case M_PCPROTO: 15607 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 15608 if ((mp->b_wptr - rptr) < sizeof (t_scalar_t)) 15609 break; 15610 tea = (struct T_error_ack *)rptr; 15611 switch (tea->PRIM_type) { 15612 case T_BIND_ACK: 15613 /* 15614 * Adapt Multidata information, if any. The 15615 * following tcp_mdt_update routine will free 15616 * the message. 15617 */ 15618 if ((mdti = tcp_mdt_info_mp(mp)) != NULL) { 15619 tcp_mdt_update(tcp, &((ip_mdt_info_t *)mdti-> 15620 b_rptr)->mdt_capab, B_TRUE); 15621 freemsg(mdti); 15622 } 15623 15624 /* 15625 * Check to update LSO information with tcp, and 15626 * tcp_lso_update routine will free the message. 15627 */ 15628 if ((lsoi = tcp_lso_info_mp(mp)) != NULL) { 15629 tcp_lso_update(tcp, &((ip_lso_info_t *)lsoi-> 15630 b_rptr)->lso_capab); 15631 freemsg(lsoi); 15632 } 15633 15634 /* Get the IRE, if we had requested for it */ 15635 ire_mp = tcp_ire_mp(mp); 15636 15637 if (tcp->tcp_hard_binding) { 15638 tcp->tcp_hard_binding = B_FALSE; 15639 tcp->tcp_hard_bound = B_TRUE; 15640 CL_INET_CONNECT(tcp); 15641 } else { 15642 if (ire_mp != NULL) 15643 freeb(ire_mp); 15644 goto after_syn_sent; 15645 } 15646 15647 retval = tcp_adapt_ire(tcp, ire_mp); 15648 if (ire_mp != NULL) 15649 freeb(ire_mp); 15650 if (retval == 0) { 15651 tcp_bind_failed(tcp, mp, 15652 (int)((tcp->tcp_state >= TCPS_SYN_SENT) ? 15653 ENETUNREACH : EADDRNOTAVAIL)); 15654 return; 15655 } 15656 /* 15657 * Don't let an endpoint connect to itself. 15658 * Also checked in tcp_connect() but that 15659 * check can't handle the case when the 15660 * local IP address is INADDR_ANY. 15661 */ 15662 if (tcp->tcp_ipversion == IPV4_VERSION) { 15663 if ((tcp->tcp_ipha->ipha_dst == 15664 tcp->tcp_ipha->ipha_src) && 15665 (BE16_EQL(tcp->tcp_tcph->th_lport, 15666 tcp->tcp_tcph->th_fport))) { 15667 tcp_bind_failed(tcp, mp, EADDRNOTAVAIL); 15668 return; 15669 } 15670 } else { 15671 if (IN6_ARE_ADDR_EQUAL( 15672 &tcp->tcp_ip6h->ip6_dst, 15673 &tcp->tcp_ip6h->ip6_src) && 15674 (BE16_EQL(tcp->tcp_tcph->th_lport, 15675 tcp->tcp_tcph->th_fport))) { 15676 tcp_bind_failed(tcp, mp, EADDRNOTAVAIL); 15677 return; 15678 } 15679 } 15680 ASSERT(tcp->tcp_state == TCPS_SYN_SENT); 15681 /* 15682 * This should not be possible! Just for 15683 * defensive coding... 15684 */ 15685 if (tcp->tcp_state != TCPS_SYN_SENT) 15686 goto after_syn_sent; 15687 15688 if (is_system_labeled() && 15689 !tcp_update_label(tcp, CONN_CRED(tcp->tcp_connp))) { 15690 tcp_bind_failed(tcp, mp, EHOSTUNREACH); 15691 return; 15692 } 15693 15694 ASSERT(q == tcp->tcp_rq); 15695 /* 15696 * tcp_adapt_ire() does not adjust 15697 * for TCP/IP header length. 15698 */ 15699 mss = tcp->tcp_mss - tcp->tcp_hdr_len; 15700 15701 /* 15702 * Just make sure our rwnd is at 15703 * least tcp_recv_hiwat_mss * MSS 15704 * large, and round up to the nearest 15705 * MSS. 15706 * 15707 * We do the round up here because 15708 * we need to get the interface 15709 * MTU first before we can do the 15710 * round up. 15711 */ 15712 tcp->tcp_rwnd = MAX(MSS_ROUNDUP(tcp->tcp_rwnd, mss), 15713 tcps->tcps_recv_hiwat_minmss * mss); 15714 q->q_hiwat = tcp->tcp_rwnd; 15715 tcp_set_ws_value(tcp); 15716 U32_TO_ABE16((tcp->tcp_rwnd >> tcp->tcp_rcv_ws), 15717 tcp->tcp_tcph->th_win); 15718 if (tcp->tcp_rcv_ws > 0 || tcps->tcps_wscale_always) 15719 tcp->tcp_snd_ws_ok = B_TRUE; 15720 15721 /* 15722 * Set tcp_snd_ts_ok to true 15723 * so that tcp_xmit_mp will 15724 * include the timestamp 15725 * option in the SYN segment. 15726 */ 15727 if (tcps->tcps_tstamp_always || 15728 (tcp->tcp_rcv_ws && tcps->tcps_tstamp_if_wscale)) { 15729 tcp->tcp_snd_ts_ok = B_TRUE; 15730 } 15731 15732 /* 15733 * tcp_snd_sack_ok can be set in 15734 * tcp_adapt_ire() if the sack metric 15735 * is set. So check it here also. 15736 */ 15737 if (tcps->tcps_sack_permitted == 2 || 15738 tcp->tcp_snd_sack_ok) { 15739 if (tcp->tcp_sack_info == NULL) { 15740 tcp->tcp_sack_info = 15741 kmem_cache_alloc(tcp_sack_info_cache, 15742 KM_SLEEP); 15743 } 15744 tcp->tcp_snd_sack_ok = B_TRUE; 15745 } 15746 15747 /* 15748 * Should we use ECN? Note that the current 15749 * default value (SunOS 5.9) of tcp_ecn_permitted 15750 * is 1. The reason for doing this is that there 15751 * are equipments out there that will drop ECN 15752 * enabled IP packets. Setting it to 1 avoids 15753 * compatibility problems. 15754 */ 15755 if (tcps->tcps_ecn_permitted == 2) 15756 tcp->tcp_ecn_ok = B_TRUE; 15757 15758 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 15759 syn_mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, 15760 tcp->tcp_iss, B_FALSE, NULL, B_FALSE); 15761 if (syn_mp) { 15762 cred_t *cr; 15763 pid_t pid; 15764 15765 /* 15766 * Obtain the credential from the 15767 * thread calling connect(); the credential 15768 * lives on in the second mblk which 15769 * originated from T_CONN_REQ and is echoed 15770 * with the T_BIND_ACK from ip. If none 15771 * can be found, default to the creator 15772 * of the socket. 15773 */ 15774 if (mp->b_cont == NULL || 15775 (cr = DB_CRED(mp->b_cont)) == NULL) { 15776 cr = tcp->tcp_cred; 15777 pid = tcp->tcp_cpid; 15778 } else { 15779 pid = DB_CPID(mp->b_cont); 15780 } 15781 15782 TCP_RECORD_TRACE(tcp, syn_mp, 15783 TCP_TRACE_SEND_PKT); 15784 mblk_setcred(syn_mp, cr); 15785 DB_CPID(syn_mp) = pid; 15786 tcp_send_data(tcp, tcp->tcp_wq, syn_mp); 15787 } 15788 after_syn_sent: 15789 /* 15790 * A trailer mblk indicates a waiting client upstream. 15791 * We complete here the processing begun in 15792 * either tcp_bind() or tcp_connect() by passing 15793 * upstream the reply message they supplied. 15794 */ 15795 mp1 = mp; 15796 mp = mp->b_cont; 15797 freeb(mp1); 15798 if (mp) 15799 break; 15800 return; 15801 case T_ERROR_ACK: 15802 if (tcp->tcp_debug) { 15803 (void) strlog(TCP_MOD_ID, 0, 1, 15804 SL_TRACE|SL_ERROR, 15805 "tcp_rput_other: case T_ERROR_ACK, " 15806 "ERROR_prim == %d", 15807 tea->ERROR_prim); 15808 } 15809 switch (tea->ERROR_prim) { 15810 case O_T_BIND_REQ: 15811 case T_BIND_REQ: 15812 tcp_bind_failed(tcp, mp, 15813 (int)((tcp->tcp_state >= TCPS_SYN_SENT) ? 15814 ENETUNREACH : EADDRNOTAVAIL)); 15815 return; 15816 case T_UNBIND_REQ: 15817 tcp->tcp_hard_binding = B_FALSE; 15818 tcp->tcp_hard_bound = B_FALSE; 15819 if (mp->b_cont) { 15820 freemsg(mp->b_cont); 15821 mp->b_cont = NULL; 15822 } 15823 if (tcp->tcp_unbind_pending) 15824 tcp->tcp_unbind_pending = 0; 15825 else { 15826 /* From tcp_ip_unbind() - free */ 15827 freemsg(mp); 15828 return; 15829 } 15830 break; 15831 case T_SVR4_OPTMGMT_REQ: 15832 if (tcp->tcp_drop_opt_ack_cnt > 0) { 15833 /* T_OPTMGMT_REQ generated by TCP */ 15834 printf("T_SVR4_OPTMGMT_REQ failed " 15835 "%d/%d - dropped (cnt %d)\n", 15836 tea->TLI_error, tea->UNIX_error, 15837 tcp->tcp_drop_opt_ack_cnt); 15838 freemsg(mp); 15839 tcp->tcp_drop_opt_ack_cnt--; 15840 return; 15841 } 15842 break; 15843 } 15844 if (tea->ERROR_prim == T_SVR4_OPTMGMT_REQ && 15845 tcp->tcp_drop_opt_ack_cnt > 0) { 15846 printf("T_SVR4_OPTMGMT_REQ failed %d/%d " 15847 "- dropped (cnt %d)\n", 15848 tea->TLI_error, tea->UNIX_error, 15849 tcp->tcp_drop_opt_ack_cnt); 15850 freemsg(mp); 15851 tcp->tcp_drop_opt_ack_cnt--; 15852 return; 15853 } 15854 break; 15855 case T_OPTMGMT_ACK: 15856 if (tcp->tcp_drop_opt_ack_cnt > 0) { 15857 /* T_OPTMGMT_REQ generated by TCP */ 15858 freemsg(mp); 15859 tcp->tcp_drop_opt_ack_cnt--; 15860 return; 15861 } 15862 break; 15863 default: 15864 break; 15865 } 15866 break; 15867 case M_FLUSH: 15868 if (*rptr & FLUSHR) 15869 flushq(q, FLUSHDATA); 15870 break; 15871 default: 15872 /* M_CTL will be directly sent to tcp_icmp_error() */ 15873 ASSERT(DB_TYPE(mp) != M_CTL); 15874 break; 15875 } 15876 /* 15877 * Make sure we set this bit before sending the ACK for 15878 * bind. Otherwise accept could possibly run and free 15879 * this tcp struct. 15880 */ 15881 putnext(q, mp); 15882 } 15883 15884 /* 15885 * Called as the result of a qbufcall or a qtimeout to remedy a failure 15886 * to allocate a T_ordrel_ind in tcp_rsrv(). qenable(q) will make 15887 * tcp_rsrv() try again. 15888 */ 15889 static void 15890 tcp_ordrel_kick(void *arg) 15891 { 15892 conn_t *connp = (conn_t *)arg; 15893 tcp_t *tcp = connp->conn_tcp; 15894 15895 tcp->tcp_ordrelid = 0; 15896 tcp->tcp_timeout = B_FALSE; 15897 if (!TCP_IS_DETACHED(tcp) && tcp->tcp_rq != NULL && 15898 tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 15899 qenable(tcp->tcp_rq); 15900 } 15901 } 15902 15903 /* ARGSUSED */ 15904 static void 15905 tcp_rsrv_input(void *arg, mblk_t *mp, void *arg2) 15906 { 15907 conn_t *connp = (conn_t *)arg; 15908 tcp_t *tcp = connp->conn_tcp; 15909 queue_t *q = tcp->tcp_rq; 15910 uint_t thwin; 15911 tcp_stack_t *tcps = tcp->tcp_tcps; 15912 15913 freeb(mp); 15914 15915 TCP_STAT(tcps, tcp_rsrv_calls); 15916 15917 if (TCP_IS_DETACHED(tcp) || q == NULL) { 15918 return; 15919 } 15920 15921 if (tcp->tcp_fused) { 15922 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 15923 15924 ASSERT(tcp->tcp_fused); 15925 ASSERT(peer_tcp != NULL && peer_tcp->tcp_fused); 15926 ASSERT(peer_tcp->tcp_loopback_peer == tcp); 15927 ASSERT(!TCP_IS_DETACHED(tcp)); 15928 ASSERT(tcp->tcp_connp->conn_sqp == 15929 peer_tcp->tcp_connp->conn_sqp); 15930 15931 /* 15932 * Normally we would not get backenabled in synchronous 15933 * streams mode, but in case this happens, we need to plug 15934 * synchronous streams during our drain to prevent a race 15935 * with tcp_fuse_rrw() or tcp_fuse_rinfop(). 15936 */ 15937 TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp); 15938 if (tcp->tcp_rcv_list != NULL) 15939 (void) tcp_rcv_drain(tcp->tcp_rq, tcp); 15940 15941 if (peer_tcp > tcp) { 15942 mutex_enter(&peer_tcp->tcp_non_sq_lock); 15943 mutex_enter(&tcp->tcp_non_sq_lock); 15944 } else { 15945 mutex_enter(&tcp->tcp_non_sq_lock); 15946 mutex_enter(&peer_tcp->tcp_non_sq_lock); 15947 } 15948 15949 if (peer_tcp->tcp_flow_stopped && 15950 (TCP_UNSENT_BYTES(peer_tcp) <= 15951 peer_tcp->tcp_xmit_lowater)) { 15952 tcp_clrqfull(peer_tcp); 15953 } 15954 mutex_exit(&peer_tcp->tcp_non_sq_lock); 15955 mutex_exit(&tcp->tcp_non_sq_lock); 15956 15957 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp); 15958 TCP_STAT(tcps, tcp_fusion_backenabled); 15959 return; 15960 } 15961 15962 if (canputnext(q)) { 15963 tcp->tcp_rwnd = q->q_hiwat; 15964 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 15965 << tcp->tcp_rcv_ws; 15966 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 15967 /* 15968 * Send back a window update immediately if TCP is above 15969 * ESTABLISHED state and the increase of the rcv window 15970 * that the other side knows is at least 1 MSS after flow 15971 * control is lifted. 15972 */ 15973 if (tcp->tcp_state >= TCPS_ESTABLISHED && 15974 (q->q_hiwat - thwin >= tcp->tcp_mss)) { 15975 tcp_xmit_ctl(NULL, tcp, 15976 (tcp->tcp_swnd == 0) ? tcp->tcp_suna : 15977 tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 15978 BUMP_MIB(&tcps->tcps_mib, tcpOutWinUpdate); 15979 } 15980 } 15981 /* Handle a failure to allocate a T_ORDREL_IND here */ 15982 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 15983 ASSERT(tcp->tcp_listener == NULL); 15984 if (tcp->tcp_rcv_list != NULL) { 15985 (void) tcp_rcv_drain(q, tcp); 15986 } 15987 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 15988 mp = mi_tpi_ordrel_ind(); 15989 if (mp) { 15990 tcp->tcp_ordrel_done = B_TRUE; 15991 putnext(q, mp); 15992 if (tcp->tcp_deferred_clean_death) { 15993 /* 15994 * tcp_clean_death was deferred for 15995 * T_ORDREL_IND - do it now 15996 */ 15997 tcp->tcp_deferred_clean_death = B_FALSE; 15998 (void) tcp_clean_death(tcp, 15999 tcp->tcp_client_errno, 22); 16000 } 16001 } else if (!tcp->tcp_timeout && tcp->tcp_ordrelid == 0) { 16002 /* 16003 * If there isn't already a timer running 16004 * start one. Use a 4 second 16005 * timer as a fallback since it can't fail. 16006 */ 16007 tcp->tcp_timeout = B_TRUE; 16008 tcp->tcp_ordrelid = TCP_TIMER(tcp, tcp_ordrel_kick, 16009 MSEC_TO_TICK(4000)); 16010 } 16011 } 16012 } 16013 16014 /* 16015 * The read side service routine is called mostly when we get back-enabled as a 16016 * result of flow control relief. Since we don't actually queue anything in 16017 * TCP, we have no data to send out of here. What we do is clear the receive 16018 * window, and send out a window update. 16019 * This routine is also called to drive an orderly release message upstream 16020 * if the attempt in tcp_rput failed. 16021 */ 16022 static void 16023 tcp_rsrv(queue_t *q) 16024 { 16025 conn_t *connp = Q_TO_CONN(q); 16026 tcp_t *tcp = connp->conn_tcp; 16027 mblk_t *mp; 16028 tcp_stack_t *tcps = tcp->tcp_tcps; 16029 16030 /* No code does a putq on the read side */ 16031 ASSERT(q->q_first == NULL); 16032 16033 /* Nothing to do for the default queue */ 16034 if (q == tcps->tcps_g_q) { 16035 return; 16036 } 16037 16038 mp = allocb(0, BPRI_HI); 16039 if (mp == NULL) { 16040 /* 16041 * We are under memory pressure. Return for now and we 16042 * we will be called again later. 16043 */ 16044 if (!tcp->tcp_timeout && tcp->tcp_ordrelid == 0) { 16045 /* 16046 * If there isn't already a timer running 16047 * start one. Use a 4 second 16048 * timer as a fallback since it can't fail. 16049 */ 16050 tcp->tcp_timeout = B_TRUE; 16051 tcp->tcp_ordrelid = TCP_TIMER(tcp, tcp_ordrel_kick, 16052 MSEC_TO_TICK(4000)); 16053 } 16054 return; 16055 } 16056 CONN_INC_REF(connp); 16057 squeue_enter(connp->conn_sqp, mp, tcp_rsrv_input, connp, 16058 SQTAG_TCP_RSRV); 16059 } 16060 16061 /* 16062 * tcp_rwnd_set() is called to adjust the receive window to a desired value. 16063 * We do not allow the receive window to shrink. After setting rwnd, 16064 * set the flow control hiwat of the stream. 16065 * 16066 * This function is called in 2 cases: 16067 * 16068 * 1) Before data transfer begins, in tcp_accept_comm() for accepting a 16069 * connection (passive open) and in tcp_rput_data() for active connect. 16070 * This is called after tcp_mss_set() when the desired MSS value is known. 16071 * This makes sure that our window size is a mutiple of the other side's 16072 * MSS. 16073 * 2) Handling SO_RCVBUF option. 16074 * 16075 * It is ASSUMED that the requested size is a multiple of the current MSS. 16076 * 16077 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the 16078 * user requests so. 16079 */ 16080 static int 16081 tcp_rwnd_set(tcp_t *tcp, uint32_t rwnd) 16082 { 16083 uint32_t mss = tcp->tcp_mss; 16084 uint32_t old_max_rwnd; 16085 uint32_t max_transmittable_rwnd; 16086 boolean_t tcp_detached = TCP_IS_DETACHED(tcp); 16087 tcp_stack_t *tcps = tcp->tcp_tcps; 16088 16089 if (tcp->tcp_fused) { 16090 size_t sth_hiwat; 16091 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 16092 16093 ASSERT(peer_tcp != NULL); 16094 /* 16095 * Record the stream head's high water mark for 16096 * this endpoint; this is used for flow-control 16097 * purposes in tcp_fuse_output(). 16098 */ 16099 sth_hiwat = tcp_fuse_set_rcv_hiwat(tcp, rwnd); 16100 if (!tcp_detached) 16101 (void) mi_set_sth_hiwat(tcp->tcp_rq, sth_hiwat); 16102 16103 /* 16104 * In the fusion case, the maxpsz stream head value of 16105 * our peer is set according to its send buffer size 16106 * and our receive buffer size; since the latter may 16107 * have changed we need to update the peer's maxpsz. 16108 */ 16109 (void) tcp_maxpsz_set(peer_tcp, B_TRUE); 16110 return (rwnd); 16111 } 16112 16113 if (tcp_detached) 16114 old_max_rwnd = tcp->tcp_rwnd; 16115 else 16116 old_max_rwnd = tcp->tcp_rq->q_hiwat; 16117 16118 /* 16119 * Insist on a receive window that is at least 16120 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid 16121 * funny TCP interactions of Nagle algorithm, SWS avoidance 16122 * and delayed acknowledgement. 16123 */ 16124 rwnd = MAX(rwnd, tcps->tcps_recv_hiwat_minmss * mss); 16125 16126 /* 16127 * If window size info has already been exchanged, TCP should not 16128 * shrink the window. Shrinking window is doable if done carefully. 16129 * We may add that support later. But so far there is not a real 16130 * need to do that. 16131 */ 16132 if (rwnd < old_max_rwnd && tcp->tcp_state > TCPS_SYN_SENT) { 16133 /* MSS may have changed, do a round up again. */ 16134 rwnd = MSS_ROUNDUP(old_max_rwnd, mss); 16135 } 16136 16137 /* 16138 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check 16139 * can be applied even before the window scale option is decided. 16140 */ 16141 max_transmittable_rwnd = TCP_MAXWIN << tcp->tcp_rcv_ws; 16142 if (rwnd > max_transmittable_rwnd) { 16143 rwnd = max_transmittable_rwnd - 16144 (max_transmittable_rwnd % mss); 16145 if (rwnd < mss) 16146 rwnd = max_transmittable_rwnd; 16147 /* 16148 * If we're over the limit we may have to back down tcp_rwnd. 16149 * The increment below won't work for us. So we set all three 16150 * here and the increment below will have no effect. 16151 */ 16152 tcp->tcp_rwnd = old_max_rwnd = rwnd; 16153 } 16154 if (tcp->tcp_localnet) { 16155 tcp->tcp_rack_abs_max = 16156 MIN(tcps->tcps_local_dacks_max, rwnd / mss / 2); 16157 } else { 16158 /* 16159 * For a remote host on a different subnet (through a router), 16160 * we ack every other packet to be conforming to RFC1122. 16161 * tcp_deferred_acks_max is default to 2. 16162 */ 16163 tcp->tcp_rack_abs_max = 16164 MIN(tcps->tcps_deferred_acks_max, rwnd / mss / 2); 16165 } 16166 if (tcp->tcp_rack_cur_max > tcp->tcp_rack_abs_max) 16167 tcp->tcp_rack_cur_max = tcp->tcp_rack_abs_max; 16168 else 16169 tcp->tcp_rack_cur_max = 0; 16170 /* 16171 * Increment the current rwnd by the amount the maximum grew (we 16172 * can not overwrite it since we might be in the middle of a 16173 * connection.) 16174 */ 16175 tcp->tcp_rwnd += rwnd - old_max_rwnd; 16176 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, tcp->tcp_tcph->th_win); 16177 if ((tcp->tcp_rcv_ws > 0) && rwnd > tcp->tcp_cwnd_max) 16178 tcp->tcp_cwnd_max = rwnd; 16179 16180 if (tcp_detached) 16181 return (rwnd); 16182 /* 16183 * We set the maximum receive window into rq->q_hiwat. 16184 * This is not actually used for flow control. 16185 */ 16186 tcp->tcp_rq->q_hiwat = rwnd; 16187 /* 16188 * Set the Stream head high water mark. This doesn't have to be 16189 * here, since we are simply using default values, but we would 16190 * prefer to choose these values algorithmically, with a likely 16191 * relationship to rwnd. 16192 */ 16193 (void) mi_set_sth_hiwat(tcp->tcp_rq, 16194 MAX(rwnd, tcps->tcps_sth_rcv_hiwat)); 16195 return (rwnd); 16196 } 16197 16198 /* 16199 * Return SNMP stuff in buffer in mpdata. 16200 */ 16201 int 16202 tcp_snmp_get(queue_t *q, mblk_t *mpctl) 16203 { 16204 mblk_t *mpdata; 16205 mblk_t *mp_conn_ctl = NULL; 16206 mblk_t *mp_conn_tail; 16207 mblk_t *mp_attr_ctl = NULL; 16208 mblk_t *mp_attr_tail; 16209 mblk_t *mp6_conn_ctl = NULL; 16210 mblk_t *mp6_conn_tail; 16211 mblk_t *mp6_attr_ctl = NULL; 16212 mblk_t *mp6_attr_tail; 16213 struct opthdr *optp; 16214 mib2_tcpConnEntry_t tce; 16215 mib2_tcp6ConnEntry_t tce6; 16216 mib2_transportMLPEntry_t mlp; 16217 connf_t *connfp; 16218 conn_t *connp; 16219 int i; 16220 boolean_t ispriv; 16221 zoneid_t zoneid; 16222 int v4_conn_idx; 16223 int v6_conn_idx; 16224 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 16225 ip_stack_t *ipst; 16226 16227 if (mpctl == NULL || 16228 (mpdata = mpctl->b_cont) == NULL || 16229 (mp_conn_ctl = copymsg(mpctl)) == NULL || 16230 (mp_attr_ctl = copymsg(mpctl)) == NULL || 16231 (mp6_conn_ctl = copymsg(mpctl)) == NULL || 16232 (mp6_attr_ctl = copymsg(mpctl)) == NULL) { 16233 freemsg(mp_conn_ctl); 16234 freemsg(mp_attr_ctl); 16235 freemsg(mp6_conn_ctl); 16236 freemsg(mp6_attr_ctl); 16237 return (0); 16238 } 16239 16240 /* build table of connections -- need count in fixed part */ 16241 SET_MIB(tcps->tcps_mib.tcpRtoAlgorithm, 4); /* vanj */ 16242 SET_MIB(tcps->tcps_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min); 16243 SET_MIB(tcps->tcps_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max); 16244 SET_MIB(tcps->tcps_mib.tcpMaxConn, -1); 16245 SET_MIB(tcps->tcps_mib.tcpCurrEstab, 0); 16246 16247 ispriv = 16248 secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0; 16249 zoneid = Q_TO_CONN(q)->conn_zoneid; 16250 16251 v4_conn_idx = v6_conn_idx = 0; 16252 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL; 16253 16254 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 16255 ipst = tcps->tcps_netstack->netstack_ip; 16256 16257 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 16258 16259 connp = NULL; 16260 16261 while ((connp = 16262 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16263 tcp_t *tcp; 16264 boolean_t needattr; 16265 16266 if (connp->conn_zoneid != zoneid) 16267 continue; /* not in this zone */ 16268 16269 tcp = connp->conn_tcp; 16270 UPDATE_MIB(&tcps->tcps_mib, 16271 tcpHCInSegs, tcp->tcp_ibsegs); 16272 tcp->tcp_ibsegs = 0; 16273 UPDATE_MIB(&tcps->tcps_mib, 16274 tcpHCOutSegs, tcp->tcp_obsegs); 16275 tcp->tcp_obsegs = 0; 16276 16277 tce6.tcp6ConnState = tce.tcpConnState = 16278 tcp_snmp_state(tcp); 16279 if (tce.tcpConnState == MIB2_TCP_established || 16280 tce.tcpConnState == MIB2_TCP_closeWait) 16281 BUMP_MIB(&tcps->tcps_mib, tcpCurrEstab); 16282 16283 needattr = B_FALSE; 16284 bzero(&mlp, sizeof (mlp)); 16285 if (connp->conn_mlp_type != mlptSingle) { 16286 if (connp->conn_mlp_type == mlptShared || 16287 connp->conn_mlp_type == mlptBoth) 16288 mlp.tme_flags |= MIB2_TMEF_SHARED; 16289 if (connp->conn_mlp_type == mlptPrivate || 16290 connp->conn_mlp_type == mlptBoth) 16291 mlp.tme_flags |= MIB2_TMEF_PRIVATE; 16292 needattr = B_TRUE; 16293 } 16294 if (connp->conn_peercred != NULL) { 16295 ts_label_t *tsl; 16296 16297 tsl = crgetlabel(connp->conn_peercred); 16298 mlp.tme_doi = label2doi(tsl); 16299 mlp.tme_label = *label2bslabel(tsl); 16300 needattr = B_TRUE; 16301 } 16302 16303 /* Create a message to report on IPv6 entries */ 16304 if (tcp->tcp_ipversion == IPV6_VERSION) { 16305 tce6.tcp6ConnLocalAddress = tcp->tcp_ip_src_v6; 16306 tce6.tcp6ConnRemAddress = tcp->tcp_remote_v6; 16307 tce6.tcp6ConnLocalPort = ntohs(tcp->tcp_lport); 16308 tce6.tcp6ConnRemPort = ntohs(tcp->tcp_fport); 16309 tce6.tcp6ConnIfIndex = tcp->tcp_bound_if; 16310 /* Don't want just anybody seeing these... */ 16311 if (ispriv) { 16312 tce6.tcp6ConnEntryInfo.ce_snxt = 16313 tcp->tcp_snxt; 16314 tce6.tcp6ConnEntryInfo.ce_suna = 16315 tcp->tcp_suna; 16316 tce6.tcp6ConnEntryInfo.ce_rnxt = 16317 tcp->tcp_rnxt; 16318 tce6.tcp6ConnEntryInfo.ce_rack = 16319 tcp->tcp_rack; 16320 } else { 16321 /* 16322 * Netstat, unfortunately, uses this to 16323 * get send/receive queue sizes. How to fix? 16324 * Why not compute the difference only? 16325 */ 16326 tce6.tcp6ConnEntryInfo.ce_snxt = 16327 tcp->tcp_snxt - tcp->tcp_suna; 16328 tce6.tcp6ConnEntryInfo.ce_suna = 0; 16329 tce6.tcp6ConnEntryInfo.ce_rnxt = 16330 tcp->tcp_rnxt - tcp->tcp_rack; 16331 tce6.tcp6ConnEntryInfo.ce_rack = 0; 16332 } 16333 16334 tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd; 16335 tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 16336 tce6.tcp6ConnEntryInfo.ce_rto = tcp->tcp_rto; 16337 tce6.tcp6ConnEntryInfo.ce_mss = tcp->tcp_mss; 16338 tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state; 16339 16340 tce6.tcp6ConnCreationProcess = 16341 (tcp->tcp_cpid < 0) ? MIB2_UNKNOWN_PROCESS : 16342 tcp->tcp_cpid; 16343 tce6.tcp6ConnCreationTime = tcp->tcp_open_time; 16344 16345 (void) snmp_append_data2(mp6_conn_ctl->b_cont, 16346 &mp6_conn_tail, (char *)&tce6, sizeof (tce6)); 16347 16348 mlp.tme_connidx = v6_conn_idx++; 16349 if (needattr) 16350 (void) snmp_append_data2(mp6_attr_ctl->b_cont, 16351 &mp6_attr_tail, (char *)&mlp, sizeof (mlp)); 16352 } 16353 /* 16354 * Create an IPv4 table entry for IPv4 entries and also 16355 * for IPv6 entries which are bound to in6addr_any 16356 * but don't have IPV6_V6ONLY set. 16357 * (i.e. anything an IPv4 peer could connect to) 16358 */ 16359 if (tcp->tcp_ipversion == IPV4_VERSION || 16360 (tcp->tcp_state <= TCPS_LISTEN && 16361 !tcp->tcp_connp->conn_ipv6_v6only && 16362 IN6_IS_ADDR_UNSPECIFIED(&tcp->tcp_ip_src_v6))) { 16363 if (tcp->tcp_ipversion == IPV6_VERSION) { 16364 tce.tcpConnRemAddress = INADDR_ANY; 16365 tce.tcpConnLocalAddress = INADDR_ANY; 16366 } else { 16367 tce.tcpConnRemAddress = 16368 tcp->tcp_remote; 16369 tce.tcpConnLocalAddress = 16370 tcp->tcp_ip_src; 16371 } 16372 tce.tcpConnLocalPort = ntohs(tcp->tcp_lport); 16373 tce.tcpConnRemPort = ntohs(tcp->tcp_fport); 16374 /* Don't want just anybody seeing these... */ 16375 if (ispriv) { 16376 tce.tcpConnEntryInfo.ce_snxt = 16377 tcp->tcp_snxt; 16378 tce.tcpConnEntryInfo.ce_suna = 16379 tcp->tcp_suna; 16380 tce.tcpConnEntryInfo.ce_rnxt = 16381 tcp->tcp_rnxt; 16382 tce.tcpConnEntryInfo.ce_rack = 16383 tcp->tcp_rack; 16384 } else { 16385 /* 16386 * Netstat, unfortunately, uses this to 16387 * get send/receive queue sizes. How 16388 * to fix? 16389 * Why not compute the difference only? 16390 */ 16391 tce.tcpConnEntryInfo.ce_snxt = 16392 tcp->tcp_snxt - tcp->tcp_suna; 16393 tce.tcpConnEntryInfo.ce_suna = 0; 16394 tce.tcpConnEntryInfo.ce_rnxt = 16395 tcp->tcp_rnxt - tcp->tcp_rack; 16396 tce.tcpConnEntryInfo.ce_rack = 0; 16397 } 16398 16399 tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd; 16400 tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 16401 tce.tcpConnEntryInfo.ce_rto = tcp->tcp_rto; 16402 tce.tcpConnEntryInfo.ce_mss = tcp->tcp_mss; 16403 tce.tcpConnEntryInfo.ce_state = 16404 tcp->tcp_state; 16405 16406 tce.tcpConnCreationProcess = 16407 (tcp->tcp_cpid < 0) ? MIB2_UNKNOWN_PROCESS : 16408 tcp->tcp_cpid; 16409 tce.tcpConnCreationTime = tcp->tcp_open_time; 16410 16411 (void) snmp_append_data2(mp_conn_ctl->b_cont, 16412 &mp_conn_tail, (char *)&tce, sizeof (tce)); 16413 16414 mlp.tme_connidx = v4_conn_idx++; 16415 if (needattr) 16416 (void) snmp_append_data2( 16417 mp_attr_ctl->b_cont, 16418 &mp_attr_tail, (char *)&mlp, 16419 sizeof (mlp)); 16420 } 16421 } 16422 } 16423 16424 /* fixed length structure for IPv4 and IPv6 counters */ 16425 SET_MIB(tcps->tcps_mib.tcpConnTableSize, sizeof (mib2_tcpConnEntry_t)); 16426 SET_MIB(tcps->tcps_mib.tcp6ConnTableSize, 16427 sizeof (mib2_tcp6ConnEntry_t)); 16428 /* synchronize 32- and 64-bit counters */ 16429 SYNC32_MIB(&tcps->tcps_mib, tcpInSegs, tcpHCInSegs); 16430 SYNC32_MIB(&tcps->tcps_mib, tcpOutSegs, tcpHCOutSegs); 16431 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 16432 optp->level = MIB2_TCP; 16433 optp->name = 0; 16434 (void) snmp_append_data(mpdata, (char *)&tcps->tcps_mib, 16435 sizeof (tcps->tcps_mib)); 16436 optp->len = msgdsize(mpdata); 16437 qreply(q, mpctl); 16438 16439 /* table of connections... */ 16440 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[ 16441 sizeof (struct T_optmgmt_ack)]; 16442 optp->level = MIB2_TCP; 16443 optp->name = MIB2_TCP_CONN; 16444 optp->len = msgdsize(mp_conn_ctl->b_cont); 16445 qreply(q, mp_conn_ctl); 16446 16447 /* table of MLP attributes... */ 16448 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[ 16449 sizeof (struct T_optmgmt_ack)]; 16450 optp->level = MIB2_TCP; 16451 optp->name = EXPER_XPORT_MLP; 16452 optp->len = msgdsize(mp_attr_ctl->b_cont); 16453 if (optp->len == 0) 16454 freemsg(mp_attr_ctl); 16455 else 16456 qreply(q, mp_attr_ctl); 16457 16458 /* table of IPv6 connections... */ 16459 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[ 16460 sizeof (struct T_optmgmt_ack)]; 16461 optp->level = MIB2_TCP6; 16462 optp->name = MIB2_TCP6_CONN; 16463 optp->len = msgdsize(mp6_conn_ctl->b_cont); 16464 qreply(q, mp6_conn_ctl); 16465 16466 /* table of IPv6 MLP attributes... */ 16467 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[ 16468 sizeof (struct T_optmgmt_ack)]; 16469 optp->level = MIB2_TCP6; 16470 optp->name = EXPER_XPORT_MLP; 16471 optp->len = msgdsize(mp6_attr_ctl->b_cont); 16472 if (optp->len == 0) 16473 freemsg(mp6_attr_ctl); 16474 else 16475 qreply(q, mp6_attr_ctl); 16476 return (1); 16477 } 16478 16479 /* Return 0 if invalid set request, 1 otherwise, including non-tcp requests */ 16480 /* ARGSUSED */ 16481 int 16482 tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len) 16483 { 16484 mib2_tcpConnEntry_t *tce = (mib2_tcpConnEntry_t *)ptr; 16485 16486 switch (level) { 16487 case MIB2_TCP: 16488 switch (name) { 16489 case 13: 16490 if (tce->tcpConnState != MIB2_TCP_deleteTCB) 16491 return (0); 16492 /* TODO: delete entry defined by tce */ 16493 return (1); 16494 default: 16495 return (0); 16496 } 16497 default: 16498 return (1); 16499 } 16500 } 16501 16502 /* Translate TCP state to MIB2 TCP state. */ 16503 static int 16504 tcp_snmp_state(tcp_t *tcp) 16505 { 16506 if (tcp == NULL) 16507 return (0); 16508 16509 switch (tcp->tcp_state) { 16510 case TCPS_CLOSED: 16511 case TCPS_IDLE: /* RFC1213 doesn't have analogue for IDLE & BOUND */ 16512 case TCPS_BOUND: 16513 return (MIB2_TCP_closed); 16514 case TCPS_LISTEN: 16515 return (MIB2_TCP_listen); 16516 case TCPS_SYN_SENT: 16517 return (MIB2_TCP_synSent); 16518 case TCPS_SYN_RCVD: 16519 return (MIB2_TCP_synReceived); 16520 case TCPS_ESTABLISHED: 16521 return (MIB2_TCP_established); 16522 case TCPS_CLOSE_WAIT: 16523 return (MIB2_TCP_closeWait); 16524 case TCPS_FIN_WAIT_1: 16525 return (MIB2_TCP_finWait1); 16526 case TCPS_CLOSING: 16527 return (MIB2_TCP_closing); 16528 case TCPS_LAST_ACK: 16529 return (MIB2_TCP_lastAck); 16530 case TCPS_FIN_WAIT_2: 16531 return (MIB2_TCP_finWait2); 16532 case TCPS_TIME_WAIT: 16533 return (MIB2_TCP_timeWait); 16534 default: 16535 return (0); 16536 } 16537 } 16538 16539 static char tcp_report_header[] = 16540 "TCP " MI_COL_HDRPAD_STR 16541 "zone dest snxt suna " 16542 "swnd rnxt rack rwnd rto mss w sw rw t " 16543 "recent [lport,fport] state"; 16544 16545 /* 16546 * TCP status report triggered via the Named Dispatch mechanism. 16547 */ 16548 /* ARGSUSED */ 16549 static void 16550 tcp_report_item(mblk_t *mp, tcp_t *tcp, int hashval, tcp_t *thisstream, 16551 cred_t *cr) 16552 { 16553 char hash[10], addrbuf[INET6_ADDRSTRLEN]; 16554 boolean_t ispriv = secpolicy_ip_config(cr, B_TRUE) == 0; 16555 char cflag; 16556 in6_addr_t v6dst; 16557 char buf[80]; 16558 uint_t print_len, buf_len; 16559 16560 buf_len = mp->b_datap->db_lim - mp->b_wptr; 16561 if (buf_len <= 0) 16562 return; 16563 16564 if (hashval >= 0) 16565 (void) sprintf(hash, "%03d ", hashval); 16566 else 16567 hash[0] = '\0'; 16568 16569 /* 16570 * Note that we use the remote address in the tcp_b structure. 16571 * This means that it will print out the real destination address, 16572 * not the next hop's address if source routing is used. This 16573 * avoid the confusion on the output because user may not 16574 * know that source routing is used for a connection. 16575 */ 16576 if (tcp->tcp_ipversion == IPV4_VERSION) { 16577 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_remote, &v6dst); 16578 } else { 16579 v6dst = tcp->tcp_remote_v6; 16580 } 16581 (void) inet_ntop(AF_INET6, &v6dst, addrbuf, sizeof (addrbuf)); 16582 /* 16583 * the ispriv checks are so that normal users cannot determine 16584 * sequence number information using NDD. 16585 */ 16586 16587 if (TCP_IS_DETACHED(tcp)) 16588 cflag = '*'; 16589 else 16590 cflag = ' '; 16591 print_len = snprintf((char *)mp->b_wptr, buf_len, 16592 "%s " MI_COL_PTRFMT_STR "%d %s %08x %08x %010d %08x %08x " 16593 "%010d %05ld %05d %1d %02d %02d %1d %08x %s%c\n", 16594 hash, 16595 (void *)tcp, 16596 tcp->tcp_connp->conn_zoneid, 16597 addrbuf, 16598 (ispriv) ? tcp->tcp_snxt : 0, 16599 (ispriv) ? tcp->tcp_suna : 0, 16600 tcp->tcp_swnd, 16601 (ispriv) ? tcp->tcp_rnxt : 0, 16602 (ispriv) ? tcp->tcp_rack : 0, 16603 tcp->tcp_rwnd, 16604 tcp->tcp_rto, 16605 tcp->tcp_mss, 16606 tcp->tcp_snd_ws_ok, 16607 tcp->tcp_snd_ws, 16608 tcp->tcp_rcv_ws, 16609 tcp->tcp_snd_ts_ok, 16610 tcp->tcp_ts_recent, 16611 tcp_display(tcp, buf, DISP_PORT_ONLY), cflag); 16612 if (print_len < buf_len) { 16613 ((mblk_t *)mp)->b_wptr += print_len; 16614 } else { 16615 ((mblk_t *)mp)->b_wptr += buf_len; 16616 } 16617 } 16618 16619 /* 16620 * TCP status report (for listeners only) triggered via the Named Dispatch 16621 * mechanism. 16622 */ 16623 /* ARGSUSED */ 16624 static void 16625 tcp_report_listener(mblk_t *mp, tcp_t *tcp, int hashval) 16626 { 16627 char addrbuf[INET6_ADDRSTRLEN]; 16628 in6_addr_t v6dst; 16629 uint_t print_len, buf_len; 16630 16631 buf_len = mp->b_datap->db_lim - mp->b_wptr; 16632 if (buf_len <= 0) 16633 return; 16634 16635 if (tcp->tcp_ipversion == IPV4_VERSION) { 16636 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, &v6dst); 16637 (void) inet_ntop(AF_INET6, &v6dst, addrbuf, sizeof (addrbuf)); 16638 } else { 16639 (void) inet_ntop(AF_INET6, &tcp->tcp_ip6h->ip6_src, 16640 addrbuf, sizeof (addrbuf)); 16641 } 16642 print_len = snprintf((char *)mp->b_wptr, buf_len, 16643 "%03d " 16644 MI_COL_PTRFMT_STR 16645 "%d %s %05u %08u %d/%d/%d%c\n", 16646 hashval, (void *)tcp, 16647 tcp->tcp_connp->conn_zoneid, 16648 addrbuf, 16649 (uint_t)BE16_TO_U16(tcp->tcp_tcph->th_lport), 16650 tcp->tcp_conn_req_seqnum, 16651 tcp->tcp_conn_req_cnt_q0, tcp->tcp_conn_req_cnt_q, 16652 tcp->tcp_conn_req_max, 16653 tcp->tcp_syn_defense ? '*' : ' '); 16654 if (print_len < buf_len) { 16655 ((mblk_t *)mp)->b_wptr += print_len; 16656 } else { 16657 ((mblk_t *)mp)->b_wptr += buf_len; 16658 } 16659 } 16660 16661 /* TCP status report triggered via the Named Dispatch mechanism. */ 16662 /* ARGSUSED */ 16663 static int 16664 tcp_status_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16665 { 16666 tcp_t *tcp; 16667 int i; 16668 conn_t *connp; 16669 connf_t *connfp; 16670 zoneid_t zoneid; 16671 tcp_stack_t *tcps; 16672 ip_stack_t *ipst; 16673 16674 zoneid = Q_TO_CONN(q)->conn_zoneid; 16675 tcps = Q_TO_TCP(q)->tcp_tcps; 16676 16677 /* 16678 * Because of the ndd constraint, at most we can have 64K buffer 16679 * to put in all TCP info. So to be more efficient, just 16680 * allocate a 64K buffer here, assuming we need that large buffer. 16681 * This may be a problem as any user can read tcp_status. Therefore 16682 * we limit the rate of doing this using tcp_ndd_get_info_interval. 16683 * This should be OK as normal users should not do this too often. 16684 */ 16685 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16686 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16687 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16688 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16689 return (0); 16690 } 16691 } 16692 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16693 /* The following may work even if we cannot get a large buf. */ 16694 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16695 return (0); 16696 } 16697 16698 (void) mi_mpprintf(mp, "%s", tcp_report_header); 16699 16700 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 16701 16702 ipst = tcps->tcps_netstack->netstack_ip; 16703 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 16704 16705 connp = NULL; 16706 16707 while ((connp = 16708 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16709 tcp = connp->conn_tcp; 16710 if (zoneid != GLOBAL_ZONEID && 16711 zoneid != connp->conn_zoneid) 16712 continue; 16713 tcp_report_item(mp->b_cont, tcp, -1, tcp, 16714 cr); 16715 } 16716 16717 } 16718 16719 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16720 return (0); 16721 } 16722 16723 /* TCP status report triggered via the Named Dispatch mechanism. */ 16724 /* ARGSUSED */ 16725 static int 16726 tcp_bind_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16727 { 16728 tf_t *tbf; 16729 tcp_t *tcp; 16730 int i; 16731 zoneid_t zoneid; 16732 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 16733 16734 zoneid = Q_TO_CONN(q)->conn_zoneid; 16735 16736 /* Refer to comments in tcp_status_report(). */ 16737 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16738 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16739 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16740 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16741 return (0); 16742 } 16743 } 16744 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16745 /* The following may work even if we cannot get a large buf. */ 16746 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16747 return (0); 16748 } 16749 16750 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16751 16752 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) { 16753 tbf = &tcps->tcps_bind_fanout[i]; 16754 mutex_enter(&tbf->tf_lock); 16755 for (tcp = tbf->tf_tcp; tcp != NULL; 16756 tcp = tcp->tcp_bind_hash) { 16757 if (zoneid != GLOBAL_ZONEID && 16758 zoneid != tcp->tcp_connp->conn_zoneid) 16759 continue; 16760 CONN_INC_REF(tcp->tcp_connp); 16761 tcp_report_item(mp->b_cont, tcp, i, 16762 Q_TO_TCP(q), cr); 16763 CONN_DEC_REF(tcp->tcp_connp); 16764 } 16765 mutex_exit(&tbf->tf_lock); 16766 } 16767 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16768 return (0); 16769 } 16770 16771 /* TCP status report triggered via the Named Dispatch mechanism. */ 16772 /* ARGSUSED */ 16773 static int 16774 tcp_listen_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16775 { 16776 connf_t *connfp; 16777 conn_t *connp; 16778 tcp_t *tcp; 16779 int i; 16780 zoneid_t zoneid; 16781 tcp_stack_t *tcps; 16782 ip_stack_t *ipst; 16783 16784 zoneid = Q_TO_CONN(q)->conn_zoneid; 16785 tcps = Q_TO_TCP(q)->tcp_tcps; 16786 16787 /* Refer to comments in tcp_status_report(). */ 16788 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16789 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16790 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16791 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16792 return (0); 16793 } 16794 } 16795 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16796 /* The following may work even if we cannot get a large buf. */ 16797 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16798 return (0); 16799 } 16800 16801 (void) mi_mpprintf(mp, 16802 " TCP " MI_COL_HDRPAD_STR 16803 "zone IP addr port seqnum backlog (q0/q/max)"); 16804 16805 ipst = tcps->tcps_netstack->netstack_ip; 16806 16807 for (i = 0; i < ipst->ips_ipcl_bind_fanout_size; i++) { 16808 connfp = &ipst->ips_ipcl_bind_fanout[i]; 16809 connp = NULL; 16810 while ((connp = 16811 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16812 tcp = connp->conn_tcp; 16813 if (zoneid != GLOBAL_ZONEID && 16814 zoneid != connp->conn_zoneid) 16815 continue; 16816 tcp_report_listener(mp->b_cont, tcp, i); 16817 } 16818 } 16819 16820 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16821 return (0); 16822 } 16823 16824 /* TCP status report triggered via the Named Dispatch mechanism. */ 16825 /* ARGSUSED */ 16826 static int 16827 tcp_conn_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16828 { 16829 connf_t *connfp; 16830 conn_t *connp; 16831 tcp_t *tcp; 16832 int i; 16833 zoneid_t zoneid; 16834 tcp_stack_t *tcps; 16835 ip_stack_t *ipst; 16836 16837 zoneid = Q_TO_CONN(q)->conn_zoneid; 16838 tcps = Q_TO_TCP(q)->tcp_tcps; 16839 ipst = tcps->tcps_netstack->netstack_ip; 16840 16841 /* Refer to comments in tcp_status_report(). */ 16842 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16843 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16844 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16845 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16846 return (0); 16847 } 16848 } 16849 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16850 /* The following may work even if we cannot get a large buf. */ 16851 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16852 return (0); 16853 } 16854 16855 (void) mi_mpprintf(mp, "tcp_conn_hash_size = %d", 16856 ipst->ips_ipcl_conn_fanout_size); 16857 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16858 16859 for (i = 0; i < ipst->ips_ipcl_conn_fanout_size; i++) { 16860 connfp = &ipst->ips_ipcl_conn_fanout[i]; 16861 connp = NULL; 16862 while ((connp = 16863 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 16864 tcp = connp->conn_tcp; 16865 if (zoneid != GLOBAL_ZONEID && 16866 zoneid != connp->conn_zoneid) 16867 continue; 16868 tcp_report_item(mp->b_cont, tcp, i, 16869 Q_TO_TCP(q), cr); 16870 } 16871 } 16872 16873 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16874 return (0); 16875 } 16876 16877 /* TCP status report triggered via the Named Dispatch mechanism. */ 16878 /* ARGSUSED */ 16879 static int 16880 tcp_acceptor_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 16881 { 16882 tf_t *tf; 16883 tcp_t *tcp; 16884 int i; 16885 zoneid_t zoneid; 16886 tcp_stack_t *tcps; 16887 16888 zoneid = Q_TO_CONN(q)->conn_zoneid; 16889 tcps = Q_TO_TCP(q)->tcp_tcps; 16890 16891 /* Refer to comments in tcp_status_report(). */ 16892 if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) { 16893 if (ddi_get_lbolt() - tcps->tcps_last_ndd_get_info_time < 16894 drv_usectohz(tcps->tcps_ndd_get_info_interval * 1000)) { 16895 (void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG); 16896 return (0); 16897 } 16898 } 16899 if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) { 16900 /* The following may work even if we cannot get a large buf. */ 16901 (void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG); 16902 return (0); 16903 } 16904 16905 (void) mi_mpprintf(mp, " %s", tcp_report_header); 16906 16907 for (i = 0; i < TCP_FANOUT_SIZE; i++) { 16908 tf = &tcps->tcps_acceptor_fanout[i]; 16909 mutex_enter(&tf->tf_lock); 16910 for (tcp = tf->tf_tcp; tcp != NULL; 16911 tcp = tcp->tcp_acceptor_hash) { 16912 if (zoneid != GLOBAL_ZONEID && 16913 zoneid != tcp->tcp_connp->conn_zoneid) 16914 continue; 16915 tcp_report_item(mp->b_cont, tcp, i, 16916 Q_TO_TCP(q), cr); 16917 } 16918 mutex_exit(&tf->tf_lock); 16919 } 16920 tcps->tcps_last_ndd_get_info_time = ddi_get_lbolt(); 16921 return (0); 16922 } 16923 16924 /* 16925 * tcp_timer is the timer service routine. It handles the retransmission, 16926 * FIN_WAIT_2 flush, and zero window probe timeout events. It figures out 16927 * from the state of the tcp instance what kind of action needs to be done 16928 * at the time it is called. 16929 */ 16930 static void 16931 tcp_timer(void *arg) 16932 { 16933 mblk_t *mp; 16934 clock_t first_threshold; 16935 clock_t second_threshold; 16936 clock_t ms; 16937 uint32_t mss; 16938 conn_t *connp = (conn_t *)arg; 16939 tcp_t *tcp = connp->conn_tcp; 16940 tcp_stack_t *tcps = tcp->tcp_tcps; 16941 16942 tcp->tcp_timer_tid = 0; 16943 16944 if (tcp->tcp_fused) 16945 return; 16946 16947 first_threshold = tcp->tcp_first_timer_threshold; 16948 second_threshold = tcp->tcp_second_timer_threshold; 16949 switch (tcp->tcp_state) { 16950 case TCPS_IDLE: 16951 case TCPS_BOUND: 16952 case TCPS_LISTEN: 16953 return; 16954 case TCPS_SYN_RCVD: { 16955 tcp_t *listener = tcp->tcp_listener; 16956 16957 if (tcp->tcp_syn_rcvd_timeout == 0 && (listener != NULL)) { 16958 ASSERT(tcp->tcp_rq == listener->tcp_rq); 16959 /* it's our first timeout */ 16960 tcp->tcp_syn_rcvd_timeout = 1; 16961 mutex_enter(&listener->tcp_eager_lock); 16962 listener->tcp_syn_rcvd_timeout++; 16963 if (!tcp->tcp_dontdrop && !tcp->tcp_closemp_used) { 16964 /* 16965 * Make this eager available for drop if we 16966 * need to drop one to accomodate a new 16967 * incoming SYN request. 16968 */ 16969 MAKE_DROPPABLE(listener, tcp); 16970 } 16971 if (!listener->tcp_syn_defense && 16972 (listener->tcp_syn_rcvd_timeout > 16973 (tcps->tcps_conn_req_max_q0 >> 2)) && 16974 (tcps->tcps_conn_req_max_q0 > 200)) { 16975 /* We may be under attack. Put on a defense. */ 16976 listener->tcp_syn_defense = B_TRUE; 16977 cmn_err(CE_WARN, "High TCP connect timeout " 16978 "rate! System (port %d) may be under a " 16979 "SYN flood attack!", 16980 BE16_TO_U16(listener->tcp_tcph->th_lport)); 16981 16982 listener->tcp_ip_addr_cache = kmem_zalloc( 16983 IP_ADDR_CACHE_SIZE * sizeof (ipaddr_t), 16984 KM_NOSLEEP); 16985 } 16986 mutex_exit(&listener->tcp_eager_lock); 16987 } else if (listener != NULL) { 16988 mutex_enter(&listener->tcp_eager_lock); 16989 tcp->tcp_syn_rcvd_timeout++; 16990 if (tcp->tcp_syn_rcvd_timeout > 1 && 16991 !tcp->tcp_closemp_used) { 16992 /* 16993 * This is our second timeout. Put the tcp in 16994 * the list of droppable eagers to allow it to 16995 * be dropped, if needed. We don't check 16996 * whether tcp_dontdrop is set or not to 16997 * protect ourselve from a SYN attack where a 16998 * remote host can spoof itself as one of the 16999 * good IP source and continue to hold 17000 * resources too long. 17001 */ 17002 MAKE_DROPPABLE(listener, tcp); 17003 } 17004 mutex_exit(&listener->tcp_eager_lock); 17005 } 17006 } 17007 /* FALLTHRU */ 17008 case TCPS_SYN_SENT: 17009 first_threshold = tcp->tcp_first_ctimer_threshold; 17010 second_threshold = tcp->tcp_second_ctimer_threshold; 17011 break; 17012 case TCPS_ESTABLISHED: 17013 case TCPS_FIN_WAIT_1: 17014 case TCPS_CLOSING: 17015 case TCPS_CLOSE_WAIT: 17016 case TCPS_LAST_ACK: 17017 /* If we have data to rexmit */ 17018 if (tcp->tcp_suna != tcp->tcp_snxt) { 17019 clock_t time_to_wait; 17020 17021 BUMP_MIB(&tcps->tcps_mib, tcpTimRetrans); 17022 if (!tcp->tcp_xmit_head) 17023 break; 17024 time_to_wait = lbolt - 17025 (clock_t)tcp->tcp_xmit_head->b_prev; 17026 time_to_wait = tcp->tcp_rto - 17027 TICK_TO_MSEC(time_to_wait); 17028 /* 17029 * If the timer fires too early, 1 clock tick earlier, 17030 * restart the timer. 17031 */ 17032 if (time_to_wait > msec_per_tick) { 17033 TCP_STAT(tcps, tcp_timer_fire_early); 17034 TCP_TIMER_RESTART(tcp, time_to_wait); 17035 return; 17036 } 17037 /* 17038 * When we probe zero windows, we force the swnd open. 17039 * If our peer acks with a closed window swnd will be 17040 * set to zero by tcp_rput(). As long as we are 17041 * receiving acks tcp_rput will 17042 * reset 'tcp_ms_we_have_waited' so as not to trip the 17043 * first and second interval actions. NOTE: the timer 17044 * interval is allowed to continue its exponential 17045 * backoff. 17046 */ 17047 if (tcp->tcp_swnd == 0 || tcp->tcp_zero_win_probe) { 17048 if (tcp->tcp_debug) { 17049 (void) strlog(TCP_MOD_ID, 0, 1, 17050 SL_TRACE, "tcp_timer: zero win"); 17051 } 17052 } else { 17053 /* 17054 * After retransmission, we need to do 17055 * slow start. Set the ssthresh to one 17056 * half of current effective window and 17057 * cwnd to one MSS. Also reset 17058 * tcp_cwnd_cnt. 17059 * 17060 * Note that if tcp_ssthresh is reduced because 17061 * of ECN, do not reduce it again unless it is 17062 * already one window of data away (tcp_cwr 17063 * should then be cleared) or this is a 17064 * timeout for a retransmitted segment. 17065 */ 17066 uint32_t npkt; 17067 17068 if (!tcp->tcp_cwr || tcp->tcp_rexmit) { 17069 npkt = ((tcp->tcp_timer_backoff ? 17070 tcp->tcp_cwnd_ssthresh : 17071 tcp->tcp_snxt - 17072 tcp->tcp_suna) >> 1) / tcp->tcp_mss; 17073 tcp->tcp_cwnd_ssthresh = MAX(npkt, 2) * 17074 tcp->tcp_mss; 17075 } 17076 tcp->tcp_cwnd = tcp->tcp_mss; 17077 tcp->tcp_cwnd_cnt = 0; 17078 if (tcp->tcp_ecn_ok) { 17079 tcp->tcp_cwr = B_TRUE; 17080 tcp->tcp_cwr_snd_max = tcp->tcp_snxt; 17081 tcp->tcp_ecn_cwr_sent = B_FALSE; 17082 } 17083 } 17084 break; 17085 } 17086 /* 17087 * We have something to send yet we cannot send. The 17088 * reason can be: 17089 * 17090 * 1. Zero send window: we need to do zero window probe. 17091 * 2. Zero cwnd: because of ECN, we need to "clock out 17092 * segments. 17093 * 3. SWS avoidance: receiver may have shrunk window, 17094 * reset our knowledge. 17095 * 17096 * Note that condition 2 can happen with either 1 or 17097 * 3. But 1 and 3 are exclusive. 17098 */ 17099 if (tcp->tcp_unsent != 0) { 17100 if (tcp->tcp_cwnd == 0) { 17101 /* 17102 * Set tcp_cwnd to 1 MSS so that a 17103 * new segment can be sent out. We 17104 * are "clocking out" new data when 17105 * the network is really congested. 17106 */ 17107 ASSERT(tcp->tcp_ecn_ok); 17108 tcp->tcp_cwnd = tcp->tcp_mss; 17109 } 17110 if (tcp->tcp_swnd == 0) { 17111 /* Extend window for zero window probe */ 17112 tcp->tcp_swnd++; 17113 tcp->tcp_zero_win_probe = B_TRUE; 17114 BUMP_MIB(&tcps->tcps_mib, tcpOutWinProbe); 17115 } else { 17116 /* 17117 * Handle timeout from sender SWS avoidance. 17118 * Reset our knowledge of the max send window 17119 * since the receiver might have reduced its 17120 * receive buffer. Avoid setting tcp_max_swnd 17121 * to one since that will essentially disable 17122 * the SWS checks. 17123 * 17124 * Note that since we don't have a SWS 17125 * state variable, if the timeout is set 17126 * for ECN but not for SWS, this 17127 * code will also be executed. This is 17128 * fine as tcp_max_swnd is updated 17129 * constantly and it will not affect 17130 * anything. 17131 */ 17132 tcp->tcp_max_swnd = MAX(tcp->tcp_swnd, 2); 17133 } 17134 tcp_wput_data(tcp, NULL, B_FALSE); 17135 return; 17136 } 17137 /* Is there a FIN that needs to be to re retransmitted? */ 17138 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 17139 !tcp->tcp_fin_acked) 17140 break; 17141 /* Nothing to do, return without restarting timer. */ 17142 TCP_STAT(tcps, tcp_timer_fire_miss); 17143 return; 17144 case TCPS_FIN_WAIT_2: 17145 /* 17146 * User closed the TCP endpoint and peer ACK'ed our FIN. 17147 * We waited some time for for peer's FIN, but it hasn't 17148 * arrived. We flush the connection now to avoid 17149 * case where the peer has rebooted. 17150 */ 17151 if (TCP_IS_DETACHED(tcp)) { 17152 (void) tcp_clean_death(tcp, 0, 23); 17153 } else { 17154 TCP_TIMER_RESTART(tcp, 17155 tcps->tcps_fin_wait_2_flush_interval); 17156 } 17157 return; 17158 case TCPS_TIME_WAIT: 17159 (void) tcp_clean_death(tcp, 0, 24); 17160 return; 17161 default: 17162 if (tcp->tcp_debug) { 17163 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE|SL_ERROR, 17164 "tcp_timer: strange state (%d) %s", 17165 tcp->tcp_state, tcp_display(tcp, NULL, 17166 DISP_PORT_ONLY)); 17167 } 17168 return; 17169 } 17170 if ((ms = tcp->tcp_ms_we_have_waited) > second_threshold) { 17171 /* 17172 * For zero window probe, we need to send indefinitely, 17173 * unless we have not heard from the other side for some 17174 * time... 17175 */ 17176 if ((tcp->tcp_zero_win_probe == 0) || 17177 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) > 17178 second_threshold)) { 17179 BUMP_MIB(&tcps->tcps_mib, tcpTimRetransDrop); 17180 /* 17181 * If TCP is in SYN_RCVD state, send back a 17182 * RST|ACK as BSD does. Note that tcp_zero_win_probe 17183 * should be zero in TCPS_SYN_RCVD state. 17184 */ 17185 if (tcp->tcp_state == TCPS_SYN_RCVD) { 17186 tcp_xmit_ctl("tcp_timer: RST sent on timeout " 17187 "in SYN_RCVD", 17188 tcp, tcp->tcp_snxt, 17189 tcp->tcp_rnxt, TH_RST | TH_ACK); 17190 } 17191 (void) tcp_clean_death(tcp, 17192 tcp->tcp_client_errno ? 17193 tcp->tcp_client_errno : ETIMEDOUT, 25); 17194 return; 17195 } else { 17196 /* 17197 * Set tcp_ms_we_have_waited to second_threshold 17198 * so that in next timeout, we will do the above 17199 * check (lbolt - tcp_last_recv_time). This is 17200 * also to avoid overflow. 17201 * 17202 * We don't need to decrement tcp_timer_backoff 17203 * to avoid overflow because it will be decremented 17204 * later if new timeout value is greater than 17205 * tcp_rexmit_interval_max. In the case when 17206 * tcp_rexmit_interval_max is greater than 17207 * second_threshold, it means that we will wait 17208 * longer than second_threshold to send the next 17209 * window probe. 17210 */ 17211 tcp->tcp_ms_we_have_waited = second_threshold; 17212 } 17213 } else if (ms > first_threshold) { 17214 if (tcp->tcp_snd_zcopy_aware && (!tcp->tcp_xmit_zc_clean) && 17215 tcp->tcp_xmit_head != NULL) { 17216 tcp->tcp_xmit_head = 17217 tcp_zcopy_backoff(tcp, tcp->tcp_xmit_head, 1); 17218 } 17219 /* 17220 * We have been retransmitting for too long... The RTT 17221 * we calculated is probably incorrect. Reinitialize it. 17222 * Need to compensate for 0 tcp_rtt_sa. Reset 17223 * tcp_rtt_update so that we won't accidentally cache a 17224 * bad value. But only do this if this is not a zero 17225 * window probe. 17226 */ 17227 if (tcp->tcp_rtt_sa != 0 && tcp->tcp_zero_win_probe == 0) { 17228 tcp->tcp_rtt_sd += (tcp->tcp_rtt_sa >> 3) + 17229 (tcp->tcp_rtt_sa >> 5); 17230 tcp->tcp_rtt_sa = 0; 17231 tcp_ip_notify(tcp); 17232 tcp->tcp_rtt_update = 0; 17233 } 17234 } 17235 tcp->tcp_timer_backoff++; 17236 if ((ms = (tcp->tcp_rtt_sa >> 3) + tcp->tcp_rtt_sd + 17237 tcps->tcps_rexmit_interval_extra + (tcp->tcp_rtt_sa >> 5)) < 17238 tcps->tcps_rexmit_interval_min) { 17239 /* 17240 * This means the original RTO is tcp_rexmit_interval_min. 17241 * So we will use tcp_rexmit_interval_min as the RTO value 17242 * and do the backoff. 17243 */ 17244 ms = tcps->tcps_rexmit_interval_min << tcp->tcp_timer_backoff; 17245 } else { 17246 ms <<= tcp->tcp_timer_backoff; 17247 } 17248 if (ms > tcps->tcps_rexmit_interval_max) { 17249 ms = tcps->tcps_rexmit_interval_max; 17250 /* 17251 * ms is at max, decrement tcp_timer_backoff to avoid 17252 * overflow. 17253 */ 17254 tcp->tcp_timer_backoff--; 17255 } 17256 tcp->tcp_ms_we_have_waited += ms; 17257 if (tcp->tcp_zero_win_probe == 0) { 17258 tcp->tcp_rto = ms; 17259 } 17260 TCP_TIMER_RESTART(tcp, ms); 17261 /* 17262 * This is after a timeout and tcp_rto is backed off. Set 17263 * tcp_set_timer to 1 so that next time RTO is updated, we will 17264 * restart the timer with a correct value. 17265 */ 17266 tcp->tcp_set_timer = 1; 17267 mss = tcp->tcp_snxt - tcp->tcp_suna; 17268 if (mss > tcp->tcp_mss) 17269 mss = tcp->tcp_mss; 17270 if (mss > tcp->tcp_swnd && tcp->tcp_swnd != 0) 17271 mss = tcp->tcp_swnd; 17272 17273 if ((mp = tcp->tcp_xmit_head) != NULL) 17274 mp->b_prev = (mblk_t *)lbolt; 17275 mp = tcp_xmit_mp(tcp, mp, mss, NULL, NULL, tcp->tcp_suna, B_TRUE, &mss, 17276 B_TRUE); 17277 17278 /* 17279 * When slow start after retransmission begins, start with 17280 * this seq no. tcp_rexmit_max marks the end of special slow 17281 * start phase. tcp_snd_burst controls how many segments 17282 * can be sent because of an ack. 17283 */ 17284 tcp->tcp_rexmit_nxt = tcp->tcp_suna; 17285 tcp->tcp_snd_burst = TCP_CWND_SS; 17286 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 17287 (tcp->tcp_unsent == 0)) { 17288 tcp->tcp_rexmit_max = tcp->tcp_fss; 17289 } else { 17290 tcp->tcp_rexmit_max = tcp->tcp_snxt; 17291 } 17292 tcp->tcp_rexmit = B_TRUE; 17293 tcp->tcp_dupack_cnt = 0; 17294 17295 /* 17296 * Remove all rexmit SACK blk to start from fresh. 17297 */ 17298 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 17299 TCP_NOTSACK_REMOVE_ALL(tcp->tcp_notsack_list); 17300 tcp->tcp_num_notsack_blk = 0; 17301 tcp->tcp_cnt_notsack_list = 0; 17302 } 17303 if (mp == NULL) { 17304 return; 17305 } 17306 /* Attach credentials to retransmitted initial SYNs. */ 17307 if (tcp->tcp_state == TCPS_SYN_SENT) { 17308 mblk_setcred(mp, tcp->tcp_cred); 17309 DB_CPID(mp) = tcp->tcp_cpid; 17310 } 17311 17312 tcp->tcp_csuna = tcp->tcp_snxt; 17313 BUMP_MIB(&tcps->tcps_mib, tcpRetransSegs); 17314 UPDATE_MIB(&tcps->tcps_mib, tcpRetransBytes, mss); 17315 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 17316 tcp_send_data(tcp, tcp->tcp_wq, mp); 17317 17318 } 17319 17320 /* tcp_unbind is called by tcp_wput_proto to handle T_UNBIND_REQ messages. */ 17321 static void 17322 tcp_unbind(tcp_t *tcp, mblk_t *mp) 17323 { 17324 conn_t *connp; 17325 17326 switch (tcp->tcp_state) { 17327 case TCPS_BOUND: 17328 case TCPS_LISTEN: 17329 break; 17330 default: 17331 tcp_err_ack(tcp, mp, TOUTSTATE, 0); 17332 return; 17333 } 17334 17335 /* 17336 * Need to clean up all the eagers since after the unbind, segments 17337 * will no longer be delivered to this listener stream. 17338 */ 17339 mutex_enter(&tcp->tcp_eager_lock); 17340 if (tcp->tcp_conn_req_cnt_q0 != 0 || tcp->tcp_conn_req_cnt_q != 0) { 17341 tcp_eager_cleanup(tcp, 0); 17342 } 17343 mutex_exit(&tcp->tcp_eager_lock); 17344 17345 if (tcp->tcp_ipversion == IPV4_VERSION) { 17346 tcp->tcp_ipha->ipha_src = 0; 17347 } else { 17348 V6_SET_ZERO(tcp->tcp_ip6h->ip6_src); 17349 } 17350 V6_SET_ZERO(tcp->tcp_ip_src_v6); 17351 bzero(tcp->tcp_tcph->th_lport, sizeof (tcp->tcp_tcph->th_lport)); 17352 tcp_bind_hash_remove(tcp); 17353 tcp->tcp_state = TCPS_IDLE; 17354 tcp->tcp_mdt = B_FALSE; 17355 /* Send M_FLUSH according to TPI */ 17356 (void) putnextctl1(tcp->tcp_rq, M_FLUSH, FLUSHRW); 17357 connp = tcp->tcp_connp; 17358 connp->conn_mdt_ok = B_FALSE; 17359 ipcl_hash_remove(connp); 17360 bzero(&connp->conn_ports, sizeof (connp->conn_ports)); 17361 mp = mi_tpi_ok_ack_alloc(mp); 17362 putnext(tcp->tcp_rq, mp); 17363 } 17364 17365 /* 17366 * Don't let port fall into the privileged range. 17367 * Since the extra privileged ports can be arbitrary we also 17368 * ensure that we exclude those from consideration. 17369 * tcp_g_epriv_ports is not sorted thus we loop over it until 17370 * there are no changes. 17371 * 17372 * Note: No locks are held when inspecting tcp_g_*epriv_ports 17373 * but instead the code relies on: 17374 * - the fact that the address of the array and its size never changes 17375 * - the atomic assignment of the elements of the array 17376 * 17377 * Returns 0 if there are no more ports available. 17378 * 17379 * TS note: skip multilevel ports. 17380 */ 17381 static in_port_t 17382 tcp_update_next_port(in_port_t port, const tcp_t *tcp, boolean_t random) 17383 { 17384 int i; 17385 boolean_t restart = B_FALSE; 17386 tcp_stack_t *tcps = tcp->tcp_tcps; 17387 17388 if (random && tcp_random_anon_port != 0) { 17389 (void) random_get_pseudo_bytes((uint8_t *)&port, 17390 sizeof (in_port_t)); 17391 /* 17392 * Unless changed by a sys admin, the smallest anon port 17393 * is 32768 and the largest anon port is 65535. It is 17394 * very likely (50%) for the random port to be smaller 17395 * than the smallest anon port. When that happens, 17396 * add port % (anon port range) to the smallest anon 17397 * port to get the random port. It should fall into the 17398 * valid anon port range. 17399 */ 17400 if (port < tcps->tcps_smallest_anon_port) { 17401 port = tcps->tcps_smallest_anon_port + 17402 port % (tcps->tcps_largest_anon_port - 17403 tcps->tcps_smallest_anon_port); 17404 } 17405 } 17406 17407 retry: 17408 if (port < tcps->tcps_smallest_anon_port) 17409 port = (in_port_t)tcps->tcps_smallest_anon_port; 17410 17411 if (port > tcps->tcps_largest_anon_port) { 17412 if (restart) 17413 return (0); 17414 restart = B_TRUE; 17415 port = (in_port_t)tcps->tcps_smallest_anon_port; 17416 } 17417 17418 if (port < tcps->tcps_smallest_nonpriv_port) 17419 port = (in_port_t)tcps->tcps_smallest_nonpriv_port; 17420 17421 for (i = 0; i < tcps->tcps_g_num_epriv_ports; i++) { 17422 if (port == tcps->tcps_g_epriv_ports[i]) { 17423 port++; 17424 /* 17425 * Make sure whether the port is in the 17426 * valid range. 17427 */ 17428 goto retry; 17429 } 17430 } 17431 if (is_system_labeled() && 17432 (i = tsol_next_port(crgetzone(tcp->tcp_cred), port, 17433 IPPROTO_TCP, B_TRUE)) != 0) { 17434 port = i; 17435 goto retry; 17436 } 17437 return (port); 17438 } 17439 17440 /* 17441 * Return the next anonymous port in the privileged port range for 17442 * bind checking. It starts at IPPORT_RESERVED - 1 and goes 17443 * downwards. This is the same behavior as documented in the userland 17444 * library call rresvport(3N). 17445 * 17446 * TS note: skip multilevel ports. 17447 */ 17448 static in_port_t 17449 tcp_get_next_priv_port(const tcp_t *tcp) 17450 { 17451 static in_port_t next_priv_port = IPPORT_RESERVED - 1; 17452 in_port_t nextport; 17453 boolean_t restart = B_FALSE; 17454 tcp_stack_t *tcps = tcp->tcp_tcps; 17455 retry: 17456 if (next_priv_port < tcps->tcps_min_anonpriv_port || 17457 next_priv_port >= IPPORT_RESERVED) { 17458 next_priv_port = IPPORT_RESERVED - 1; 17459 if (restart) 17460 return (0); 17461 restart = B_TRUE; 17462 } 17463 if (is_system_labeled() && 17464 (nextport = tsol_next_port(crgetzone(tcp->tcp_cred), 17465 next_priv_port, IPPROTO_TCP, B_FALSE)) != 0) { 17466 next_priv_port = nextport; 17467 goto retry; 17468 } 17469 return (next_priv_port--); 17470 } 17471 17472 /* The write side r/w procedure. */ 17473 17474 #if CCS_STATS 17475 struct { 17476 struct { 17477 int64_t count, bytes; 17478 } tot, hit; 17479 } wrw_stats; 17480 #endif 17481 17482 /* 17483 * Call by tcp_wput() to handle all non data, except M_PROTO and M_PCPROTO, 17484 * messages. 17485 */ 17486 /* ARGSUSED */ 17487 static void 17488 tcp_wput_nondata(void *arg, mblk_t *mp, void *arg2) 17489 { 17490 conn_t *connp = (conn_t *)arg; 17491 tcp_t *tcp = connp->conn_tcp; 17492 queue_t *q = tcp->tcp_wq; 17493 17494 ASSERT(DB_TYPE(mp) != M_IOCTL); 17495 /* 17496 * TCP is D_MP and qprocsoff() is done towards the end of the tcp_close. 17497 * Once the close starts, streamhead and sockfs will not let any data 17498 * packets come down (close ensures that there are no threads using the 17499 * queue and no new threads will come down) but since qprocsoff() 17500 * hasn't happened yet, a M_FLUSH or some non data message might 17501 * get reflected back (in response to our own FLUSHRW) and get 17502 * processed after tcp_close() is done. The conn would still be valid 17503 * because a ref would have added but we need to check the state 17504 * before actually processing the packet. 17505 */ 17506 if (TCP_IS_DETACHED(tcp) || (tcp->tcp_state == TCPS_CLOSED)) { 17507 freemsg(mp); 17508 return; 17509 } 17510 17511 switch (DB_TYPE(mp)) { 17512 case M_IOCDATA: 17513 tcp_wput_iocdata(tcp, mp); 17514 break; 17515 case M_FLUSH: 17516 tcp_wput_flush(tcp, mp); 17517 break; 17518 default: 17519 CALL_IP_WPUT(connp, q, mp); 17520 break; 17521 } 17522 } 17523 17524 /* 17525 * The TCP fast path write put procedure. 17526 * NOTE: the logic of the fast path is duplicated from tcp_wput_data() 17527 */ 17528 /* ARGSUSED */ 17529 void 17530 tcp_output(void *arg, mblk_t *mp, void *arg2) 17531 { 17532 int len; 17533 int hdrlen; 17534 int plen; 17535 mblk_t *mp1; 17536 uchar_t *rptr; 17537 uint32_t snxt; 17538 tcph_t *tcph; 17539 struct datab *db; 17540 uint32_t suna; 17541 uint32_t mss; 17542 ipaddr_t *dst; 17543 ipaddr_t *src; 17544 uint32_t sum; 17545 int usable; 17546 conn_t *connp = (conn_t *)arg; 17547 tcp_t *tcp = connp->conn_tcp; 17548 uint32_t msize; 17549 tcp_stack_t *tcps = tcp->tcp_tcps; 17550 17551 /* 17552 * Try and ASSERT the minimum possible references on the 17553 * conn early enough. Since we are executing on write side, 17554 * the connection is obviously not detached and that means 17555 * there is a ref each for TCP and IP. Since we are behind 17556 * the squeue, the minimum references needed are 3. If the 17557 * conn is in classifier hash list, there should be an 17558 * extra ref for that (we check both the possibilities). 17559 */ 17560 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 17561 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 17562 17563 ASSERT(DB_TYPE(mp) == M_DATA); 17564 msize = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp); 17565 17566 mutex_enter(&tcp->tcp_non_sq_lock); 17567 tcp->tcp_squeue_bytes -= msize; 17568 mutex_exit(&tcp->tcp_non_sq_lock); 17569 17570 /* Bypass tcp protocol for fused tcp loopback */ 17571 if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize)) 17572 return; 17573 17574 mss = tcp->tcp_mss; 17575 if (tcp->tcp_xmit_zc_clean) 17576 mp = tcp_zcopy_backoff(tcp, mp, 0); 17577 17578 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 17579 len = (int)(mp->b_wptr - mp->b_rptr); 17580 17581 /* 17582 * Criteria for fast path: 17583 * 17584 * 1. no unsent data 17585 * 2. single mblk in request 17586 * 3. connection established 17587 * 4. data in mblk 17588 * 5. len <= mss 17589 * 6. no tcp_valid bits 17590 */ 17591 if ((tcp->tcp_unsent != 0) || 17592 (tcp->tcp_cork) || 17593 (mp->b_cont != NULL) || 17594 (tcp->tcp_state != TCPS_ESTABLISHED) || 17595 (len == 0) || 17596 (len > mss) || 17597 (tcp->tcp_valid_bits != 0)) { 17598 tcp_wput_data(tcp, mp, B_FALSE); 17599 return; 17600 } 17601 17602 ASSERT(tcp->tcp_xmit_tail_unsent == 0); 17603 ASSERT(tcp->tcp_fin_sent == 0); 17604 17605 /* queue new packet onto retransmission queue */ 17606 if (tcp->tcp_xmit_head == NULL) { 17607 tcp->tcp_xmit_head = mp; 17608 } else { 17609 tcp->tcp_xmit_last->b_cont = mp; 17610 } 17611 tcp->tcp_xmit_last = mp; 17612 tcp->tcp_xmit_tail = mp; 17613 17614 /* find out how much we can send */ 17615 /* BEGIN CSTYLED */ 17616 /* 17617 * un-acked usable 17618 * |--------------|-----------------| 17619 * tcp_suna tcp_snxt tcp_suna+tcp_swnd 17620 */ 17621 /* END CSTYLED */ 17622 17623 /* start sending from tcp_snxt */ 17624 snxt = tcp->tcp_snxt; 17625 17626 /* 17627 * Check to see if this connection has been idled for some 17628 * time and no ACK is expected. If it is, we need to slow 17629 * start again to get back the connection's "self-clock" as 17630 * described in VJ's paper. 17631 * 17632 * Refer to the comment in tcp_mss_set() for the calculation 17633 * of tcp_cwnd after idle. 17634 */ 17635 if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet && 17636 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) { 17637 SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle); 17638 } 17639 17640 usable = tcp->tcp_swnd; /* tcp window size */ 17641 if (usable > tcp->tcp_cwnd) 17642 usable = tcp->tcp_cwnd; /* congestion window smaller */ 17643 usable -= snxt; /* subtract stuff already sent */ 17644 suna = tcp->tcp_suna; 17645 usable += suna; 17646 /* usable can be < 0 if the congestion window is smaller */ 17647 if (len > usable) { 17648 /* Can't send complete M_DATA in one shot */ 17649 goto slow; 17650 } 17651 17652 mutex_enter(&tcp->tcp_non_sq_lock); 17653 if (tcp->tcp_flow_stopped && 17654 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 17655 tcp_clrqfull(tcp); 17656 } 17657 mutex_exit(&tcp->tcp_non_sq_lock); 17658 17659 /* 17660 * determine if anything to send (Nagle). 17661 * 17662 * 1. len < tcp_mss (i.e. small) 17663 * 2. unacknowledged data present 17664 * 3. len < nagle limit 17665 * 4. last packet sent < nagle limit (previous packet sent) 17666 */ 17667 if ((len < mss) && (snxt != suna) && 17668 (len < (int)tcp->tcp_naglim) && 17669 (tcp->tcp_last_sent_len < tcp->tcp_naglim)) { 17670 /* 17671 * This was the first unsent packet and normally 17672 * mss < xmit_hiwater so there is no need to worry 17673 * about flow control. The next packet will go 17674 * through the flow control check in tcp_wput_data(). 17675 */ 17676 /* leftover work from above */ 17677 tcp->tcp_unsent = len; 17678 tcp->tcp_xmit_tail_unsent = len; 17679 17680 return; 17681 } 17682 17683 /* len <= tcp->tcp_mss && len == unsent so no silly window */ 17684 17685 if (snxt == suna) { 17686 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 17687 } 17688 17689 /* we have always sent something */ 17690 tcp->tcp_rack_cnt = 0; 17691 17692 tcp->tcp_snxt = snxt + len; 17693 tcp->tcp_rack = tcp->tcp_rnxt; 17694 17695 if ((mp1 = dupb(mp)) == 0) 17696 goto no_memory; 17697 mp->b_prev = (mblk_t *)(uintptr_t)lbolt; 17698 mp->b_next = (mblk_t *)(uintptr_t)snxt; 17699 17700 /* adjust tcp header information */ 17701 tcph = tcp->tcp_tcph; 17702 tcph->th_flags[0] = (TH_ACK|TH_PUSH); 17703 17704 sum = len + tcp->tcp_tcp_hdr_len + tcp->tcp_sum; 17705 sum = (sum >> 16) + (sum & 0xFFFF); 17706 U16_TO_ABE16(sum, tcph->th_sum); 17707 17708 U32_TO_ABE32(snxt, tcph->th_seq); 17709 17710 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 17711 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len); 17712 BUMP_LOCAL(tcp->tcp_obsegs); 17713 17714 /* Update the latest receive window size in TCP header. */ 17715 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 17716 tcph->th_win); 17717 17718 tcp->tcp_last_sent_len = (ushort_t)len; 17719 17720 plen = len + tcp->tcp_hdr_len; 17721 17722 if (tcp->tcp_ipversion == IPV4_VERSION) { 17723 tcp->tcp_ipha->ipha_length = htons(plen); 17724 } else { 17725 tcp->tcp_ip6h->ip6_plen = htons(plen - 17726 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 17727 } 17728 17729 /* see if we need to allocate a mblk for the headers */ 17730 hdrlen = tcp->tcp_hdr_len; 17731 rptr = mp1->b_rptr - hdrlen; 17732 db = mp1->b_datap; 17733 if ((db->db_ref != 2) || rptr < db->db_base || 17734 (!OK_32PTR(rptr))) { 17735 /* NOTE: we assume allocb returns an OK_32PTR */ 17736 mp = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 17737 tcps->tcps_wroff_xtra, BPRI_MED); 17738 if (!mp) { 17739 freemsg(mp1); 17740 goto no_memory; 17741 } 17742 mp->b_cont = mp1; 17743 mp1 = mp; 17744 /* Leave room for Link Level header */ 17745 /* hdrlen = tcp->tcp_hdr_len; */ 17746 rptr = &mp1->b_rptr[tcps->tcps_wroff_xtra]; 17747 mp1->b_wptr = &rptr[hdrlen]; 17748 } 17749 mp1->b_rptr = rptr; 17750 17751 /* Fill in the timestamp option. */ 17752 if (tcp->tcp_snd_ts_ok) { 17753 U32_TO_BE32((uint32_t)lbolt, 17754 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 17755 U32_TO_BE32(tcp->tcp_ts_recent, 17756 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 17757 } else { 17758 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 17759 } 17760 17761 /* copy header into outgoing packet */ 17762 dst = (ipaddr_t *)rptr; 17763 src = (ipaddr_t *)tcp->tcp_iphc; 17764 dst[0] = src[0]; 17765 dst[1] = src[1]; 17766 dst[2] = src[2]; 17767 dst[3] = src[3]; 17768 dst[4] = src[4]; 17769 dst[5] = src[5]; 17770 dst[6] = src[6]; 17771 dst[7] = src[7]; 17772 dst[8] = src[8]; 17773 dst[9] = src[9]; 17774 if (hdrlen -= 40) { 17775 hdrlen >>= 2; 17776 dst += 10; 17777 src += 10; 17778 do { 17779 *dst++ = *src++; 17780 } while (--hdrlen); 17781 } 17782 17783 /* 17784 * Set the ECN info in the TCP header. Note that this 17785 * is not the template header. 17786 */ 17787 if (tcp->tcp_ecn_ok) { 17788 SET_ECT(tcp, rptr); 17789 17790 tcph = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 17791 if (tcp->tcp_ecn_echo_on) 17792 tcph->th_flags[0] |= TH_ECE; 17793 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 17794 tcph->th_flags[0] |= TH_CWR; 17795 tcp->tcp_ecn_cwr_sent = B_TRUE; 17796 } 17797 } 17798 17799 if (tcp->tcp_ip_forward_progress) { 17800 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 17801 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 17802 tcp->tcp_ip_forward_progress = B_FALSE; 17803 } 17804 TCP_RECORD_TRACE(tcp, mp1, TCP_TRACE_SEND_PKT); 17805 tcp_send_data(tcp, tcp->tcp_wq, mp1); 17806 return; 17807 17808 /* 17809 * If we ran out of memory, we pretend to have sent the packet 17810 * and that it was lost on the wire. 17811 */ 17812 no_memory: 17813 return; 17814 17815 slow: 17816 /* leftover work from above */ 17817 tcp->tcp_unsent = len; 17818 tcp->tcp_xmit_tail_unsent = len; 17819 tcp_wput_data(tcp, NULL, B_FALSE); 17820 } 17821 17822 /* 17823 * The function called through squeue to get behind eager's perimeter to 17824 * finish the accept processing. 17825 */ 17826 /* ARGSUSED */ 17827 void 17828 tcp_accept_finish(void *arg, mblk_t *mp, void *arg2) 17829 { 17830 conn_t *connp = (conn_t *)arg; 17831 tcp_t *tcp = connp->conn_tcp; 17832 queue_t *q = tcp->tcp_rq; 17833 mblk_t *mp1; 17834 mblk_t *stropt_mp = mp; 17835 struct stroptions *stropt; 17836 uint_t thwin; 17837 tcp_stack_t *tcps = tcp->tcp_tcps; 17838 17839 /* 17840 * Drop the eager's ref on the listener, that was placed when 17841 * this eager began life in tcp_conn_request. 17842 */ 17843 CONN_DEC_REF(tcp->tcp_saved_listener->tcp_connp); 17844 17845 if (tcp->tcp_state <= TCPS_BOUND || tcp->tcp_accept_error) { 17846 /* 17847 * Someone blewoff the eager before we could finish 17848 * the accept. 17849 * 17850 * The only reason eager exists it because we put in 17851 * a ref on it when conn ind went up. We need to send 17852 * a disconnect indication up while the last reference 17853 * on the eager will be dropped by the squeue when we 17854 * return. 17855 */ 17856 ASSERT(tcp->tcp_listener == NULL); 17857 if (tcp->tcp_issocket || tcp->tcp_send_discon_ind) { 17858 struct T_discon_ind *tdi; 17859 17860 (void) putnextctl1(q, M_FLUSH, FLUSHRW); 17861 /* 17862 * Let us reuse the incoming mblk to avoid memory 17863 * allocation failure problems. We know that the 17864 * size of the incoming mblk i.e. stroptions is greater 17865 * than sizeof T_discon_ind. So the reallocb below 17866 * can't fail. 17867 */ 17868 freemsg(mp->b_cont); 17869 mp->b_cont = NULL; 17870 ASSERT(DB_REF(mp) == 1); 17871 mp = reallocb(mp, sizeof (struct T_discon_ind), 17872 B_FALSE); 17873 ASSERT(mp != NULL); 17874 DB_TYPE(mp) = M_PROTO; 17875 ((union T_primitives *)mp->b_rptr)->type = T_DISCON_IND; 17876 tdi = (struct T_discon_ind *)mp->b_rptr; 17877 if (tcp->tcp_issocket) { 17878 tdi->DISCON_reason = ECONNREFUSED; 17879 tdi->SEQ_number = 0; 17880 } else { 17881 tdi->DISCON_reason = ENOPROTOOPT; 17882 tdi->SEQ_number = 17883 tcp->tcp_conn_req_seqnum; 17884 } 17885 mp->b_wptr = mp->b_rptr + sizeof (struct T_discon_ind); 17886 putnext(q, mp); 17887 } else { 17888 freemsg(mp); 17889 } 17890 if (tcp->tcp_hard_binding) { 17891 tcp->tcp_hard_binding = B_FALSE; 17892 tcp->tcp_hard_bound = B_TRUE; 17893 } 17894 tcp->tcp_detached = B_FALSE; 17895 return; 17896 } 17897 17898 mp1 = stropt_mp->b_cont; 17899 stropt_mp->b_cont = NULL; 17900 ASSERT(DB_TYPE(stropt_mp) == M_SETOPTS); 17901 stropt = (struct stroptions *)stropt_mp->b_rptr; 17902 17903 while (mp1 != NULL) { 17904 mp = mp1; 17905 mp1 = mp1->b_cont; 17906 mp->b_cont = NULL; 17907 tcp->tcp_drop_opt_ack_cnt++; 17908 CALL_IP_WPUT(connp, tcp->tcp_wq, mp); 17909 } 17910 mp = NULL; 17911 17912 /* 17913 * For a loopback connection with tcp_direct_sockfs on, note that 17914 * we don't have to protect tcp_rcv_list yet because synchronous 17915 * streams has not yet been enabled and tcp_fuse_rrw() cannot 17916 * possibly race with us. 17917 */ 17918 17919 /* 17920 * Set the max window size (tcp_rq->q_hiwat) of the acceptor 17921 * properly. This is the first time we know of the acceptor' 17922 * queue. So we do it here. 17923 */ 17924 if (tcp->tcp_rcv_list == NULL) { 17925 /* 17926 * Recv queue is empty, tcp_rwnd should not have changed. 17927 * That means it should be equal to the listener's tcp_rwnd. 17928 */ 17929 tcp->tcp_rq->q_hiwat = tcp->tcp_rwnd; 17930 } else { 17931 #ifdef DEBUG 17932 uint_t cnt = 0; 17933 17934 mp1 = tcp->tcp_rcv_list; 17935 while ((mp = mp1) != NULL) { 17936 mp1 = mp->b_next; 17937 cnt += msgdsize(mp); 17938 } 17939 ASSERT(cnt != 0 && tcp->tcp_rcv_cnt == cnt); 17940 #endif 17941 /* There is some data, add them back to get the max. */ 17942 tcp->tcp_rq->q_hiwat = tcp->tcp_rwnd + tcp->tcp_rcv_cnt; 17943 } 17944 17945 stropt->so_flags = SO_HIWAT; 17946 stropt->so_hiwat = MAX(q->q_hiwat, tcps->tcps_sth_rcv_hiwat); 17947 17948 stropt->so_flags |= SO_MAXBLK; 17949 stropt->so_maxblk = tcp_maxpsz_set(tcp, B_FALSE); 17950 17951 /* 17952 * This is the first time we run on the correct 17953 * queue after tcp_accept. So fix all the q parameters 17954 * here. 17955 */ 17956 /* Allocate room for SACK options if needed. */ 17957 stropt->so_flags |= SO_WROFF; 17958 if (tcp->tcp_fused) { 17959 ASSERT(tcp->tcp_loopback); 17960 ASSERT(tcp->tcp_loopback_peer != NULL); 17961 /* 17962 * For fused tcp loopback, set the stream head's write 17963 * offset value to zero since we won't be needing any room 17964 * for TCP/IP headers. This would also improve performance 17965 * since it would reduce the amount of work done by kmem. 17966 * Non-fused tcp loopback case is handled separately below. 17967 */ 17968 stropt->so_wroff = 0; 17969 /* 17970 * Record the stream head's high water mark for this endpoint; 17971 * this is used for flow-control purposes in tcp_fuse_output(). 17972 */ 17973 stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(tcp, q->q_hiwat); 17974 /* 17975 * Update the peer's transmit parameters according to 17976 * our recently calculated high water mark value. 17977 */ 17978 (void) tcp_maxpsz_set(tcp->tcp_loopback_peer, B_TRUE); 17979 } else if (tcp->tcp_snd_sack_ok) { 17980 stropt->so_wroff = tcp->tcp_hdr_len + TCPOPT_MAX_SACK_LEN + 17981 (tcp->tcp_loopback ? 0 : tcps->tcps_wroff_xtra); 17982 } else { 17983 stropt->so_wroff = tcp->tcp_hdr_len + (tcp->tcp_loopback ? 0 : 17984 tcps->tcps_wroff_xtra); 17985 } 17986 17987 /* 17988 * If this is endpoint is handling SSL, then reserve extra 17989 * offset and space at the end. 17990 * Also have the stream head allocate SSL3_MAX_RECORD_LEN packets, 17991 * overriding the previous setting. The extra cost of signing and 17992 * encrypting multiple MSS-size records (12 of them with Ethernet), 17993 * instead of a single contiguous one by the stream head 17994 * largely outweighs the statistical reduction of ACKs, when 17995 * applicable. The peer will also save on decyption and verification 17996 * costs. 17997 */ 17998 if (tcp->tcp_kssl_ctx != NULL) { 17999 stropt->so_wroff += SSL3_WROFFSET; 18000 18001 stropt->so_flags |= SO_TAIL; 18002 stropt->so_tail = SSL3_MAX_TAIL_LEN; 18003 18004 stropt->so_maxblk = SSL3_MAX_RECORD_LEN; 18005 } 18006 18007 /* Send the options up */ 18008 putnext(q, stropt_mp); 18009 18010 /* 18011 * Pass up any data and/or a fin that has been received. 18012 * 18013 * Adjust receive window in case it had decreased 18014 * (because there is data <=> tcp_rcv_list != NULL) 18015 * while the connection was detached. Note that 18016 * in case the eager was flow-controlled, w/o this 18017 * code, the rwnd may never open up again! 18018 */ 18019 if (tcp->tcp_rcv_list != NULL) { 18020 /* We drain directly in case of fused tcp loopback */ 18021 if (!tcp->tcp_fused && canputnext(q)) { 18022 tcp->tcp_rwnd = q->q_hiwat; 18023 thwin = ((uint_t)BE16_TO_U16(tcp->tcp_tcph->th_win)) 18024 << tcp->tcp_rcv_ws; 18025 thwin -= tcp->tcp_rnxt - tcp->tcp_rack; 18026 if (tcp->tcp_state >= TCPS_ESTABLISHED && 18027 (q->q_hiwat - thwin >= tcp->tcp_mss)) { 18028 tcp_xmit_ctl(NULL, 18029 tcp, (tcp->tcp_swnd == 0) ? 18030 tcp->tcp_suna : tcp->tcp_snxt, 18031 tcp->tcp_rnxt, TH_ACK); 18032 BUMP_MIB(&tcps->tcps_mib, tcpOutWinUpdate); 18033 } 18034 18035 } 18036 (void) tcp_rcv_drain(q, tcp); 18037 18038 /* 18039 * For fused tcp loopback, back-enable peer endpoint 18040 * if it's currently flow-controlled. 18041 */ 18042 if (tcp->tcp_fused) { 18043 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 18044 18045 ASSERT(peer_tcp != NULL); 18046 ASSERT(peer_tcp->tcp_fused); 18047 /* 18048 * In order to change the peer's tcp_flow_stopped, 18049 * we need to take locks for both end points. The 18050 * highest address is taken first. 18051 */ 18052 if (peer_tcp > tcp) { 18053 mutex_enter(&peer_tcp->tcp_non_sq_lock); 18054 mutex_enter(&tcp->tcp_non_sq_lock); 18055 } else { 18056 mutex_enter(&tcp->tcp_non_sq_lock); 18057 mutex_enter(&peer_tcp->tcp_non_sq_lock); 18058 } 18059 if (peer_tcp->tcp_flow_stopped) { 18060 tcp_clrqfull(peer_tcp); 18061 TCP_STAT(tcps, tcp_fusion_backenabled); 18062 } 18063 mutex_exit(&peer_tcp->tcp_non_sq_lock); 18064 mutex_exit(&tcp->tcp_non_sq_lock); 18065 } 18066 } 18067 ASSERT(tcp->tcp_rcv_list == NULL || tcp->tcp_fused_sigurg); 18068 if (tcp->tcp_fin_rcvd && !tcp->tcp_ordrel_done) { 18069 mp = mi_tpi_ordrel_ind(); 18070 if (mp) { 18071 tcp->tcp_ordrel_done = B_TRUE; 18072 putnext(q, mp); 18073 if (tcp->tcp_deferred_clean_death) { 18074 /* 18075 * tcp_clean_death was deferred 18076 * for T_ORDREL_IND - do it now 18077 */ 18078 (void) tcp_clean_death(tcp, 18079 tcp->tcp_client_errno, 21); 18080 tcp->tcp_deferred_clean_death = B_FALSE; 18081 } 18082 } else { 18083 /* 18084 * Run the orderly release in the 18085 * service routine. 18086 */ 18087 qenable(q); 18088 } 18089 } 18090 if (tcp->tcp_hard_binding) { 18091 tcp->tcp_hard_binding = B_FALSE; 18092 tcp->tcp_hard_bound = B_TRUE; 18093 } 18094 18095 tcp->tcp_detached = B_FALSE; 18096 18097 /* We can enable synchronous streams now */ 18098 if (tcp->tcp_fused) { 18099 tcp_fuse_syncstr_enable_pair(tcp); 18100 } 18101 18102 if (tcp->tcp_ka_enabled) { 18103 tcp->tcp_ka_last_intrvl = 0; 18104 tcp->tcp_ka_tid = TCP_TIMER(tcp, tcp_keepalive_killer, 18105 MSEC_TO_TICK(tcp->tcp_ka_interval)); 18106 } 18107 18108 /* 18109 * At this point, eager is fully established and will 18110 * have the following references - 18111 * 18112 * 2 references for connection to exist (1 for TCP and 1 for IP). 18113 * 1 reference for the squeue which will be dropped by the squeue as 18114 * soon as this function returns. 18115 * There will be 1 additonal reference for being in classifier 18116 * hash list provided something bad hasn't happened. 18117 */ 18118 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 18119 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 18120 } 18121 18122 /* 18123 * The function called through squeue to get behind listener's perimeter to 18124 * send a deffered conn_ind. 18125 */ 18126 /* ARGSUSED */ 18127 void 18128 tcp_send_pending(void *arg, mblk_t *mp, void *arg2) 18129 { 18130 conn_t *connp = (conn_t *)arg; 18131 tcp_t *listener = connp->conn_tcp; 18132 18133 if (listener->tcp_state == TCPS_CLOSED || 18134 TCP_IS_DETACHED(listener)) { 18135 /* 18136 * If listener has closed, it would have caused a 18137 * a cleanup/blowoff to happen for the eager. 18138 */ 18139 tcp_t *tcp; 18140 struct T_conn_ind *conn_ind; 18141 18142 conn_ind = (struct T_conn_ind *)mp->b_rptr; 18143 bcopy(mp->b_rptr + conn_ind->OPT_offset, &tcp, 18144 conn_ind->OPT_length); 18145 /* 18146 * We need to drop the ref on eager that was put 18147 * tcp_rput_data() before trying to send the conn_ind 18148 * to listener. The conn_ind was deferred in tcp_send_conn_ind 18149 * and tcp_wput_accept() is sending this deferred conn_ind but 18150 * listener is closed so we drop the ref. 18151 */ 18152 CONN_DEC_REF(tcp->tcp_connp); 18153 freemsg(mp); 18154 return; 18155 } 18156 putnext(listener->tcp_rq, mp); 18157 } 18158 18159 18160 /* 18161 * This is the STREAMS entry point for T_CONN_RES coming down on 18162 * Acceptor STREAM when sockfs listener does accept processing. 18163 * Read the block comment on top of tcp_conn_request(). 18164 */ 18165 void 18166 tcp_wput_accept(queue_t *q, mblk_t *mp) 18167 { 18168 queue_t *rq = RD(q); 18169 struct T_conn_res *conn_res; 18170 tcp_t *eager; 18171 tcp_t *listener; 18172 struct T_ok_ack *ok; 18173 t_scalar_t PRIM_type; 18174 mblk_t *opt_mp; 18175 conn_t *econnp; 18176 18177 ASSERT(DB_TYPE(mp) == M_PROTO); 18178 18179 conn_res = (struct T_conn_res *)mp->b_rptr; 18180 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= (uintptr_t)INT_MAX); 18181 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_conn_res)) { 18182 mp = mi_tpi_err_ack_alloc(mp, TPROTO, 0); 18183 if (mp != NULL) 18184 putnext(rq, mp); 18185 return; 18186 } 18187 switch (conn_res->PRIM_type) { 18188 case O_T_CONN_RES: 18189 case T_CONN_RES: 18190 /* 18191 * We pass up an err ack if allocb fails. This will 18192 * cause sockfs to issue a T_DISCON_REQ which will cause 18193 * tcp_eager_blowoff to be called. sockfs will then call 18194 * rq->q_qinfo->qi_qclose to cleanup the acceptor stream. 18195 * we need to do the allocb up here because we have to 18196 * make sure rq->q_qinfo->qi_qclose still points to the 18197 * correct function (tcpclose_accept) in case allocb 18198 * fails. 18199 */ 18200 opt_mp = allocb(sizeof (struct stroptions), BPRI_HI); 18201 if (opt_mp == NULL) { 18202 mp = mi_tpi_err_ack_alloc(mp, TPROTO, 0); 18203 if (mp != NULL) 18204 putnext(rq, mp); 18205 return; 18206 } 18207 18208 bcopy(mp->b_rptr + conn_res->OPT_offset, 18209 &eager, conn_res->OPT_length); 18210 PRIM_type = conn_res->PRIM_type; 18211 mp->b_datap->db_type = M_PCPROTO; 18212 mp->b_wptr = mp->b_rptr + sizeof (struct T_ok_ack); 18213 ok = (struct T_ok_ack *)mp->b_rptr; 18214 ok->PRIM_type = T_OK_ACK; 18215 ok->CORRECT_prim = PRIM_type; 18216 econnp = eager->tcp_connp; 18217 econnp->conn_dev = (dev_t)q->q_ptr; 18218 eager->tcp_rq = rq; 18219 eager->tcp_wq = q; 18220 rq->q_ptr = econnp; 18221 rq->q_qinfo = &tcp_rinit; 18222 q->q_ptr = econnp; 18223 q->q_qinfo = &tcp_winit; 18224 listener = eager->tcp_listener; 18225 eager->tcp_issocket = B_TRUE; 18226 18227 econnp->conn_zoneid = listener->tcp_connp->conn_zoneid; 18228 econnp->conn_allzones = listener->tcp_connp->conn_allzones; 18229 ASSERT(econnp->conn_netstack == 18230 listener->tcp_connp->conn_netstack); 18231 ASSERT(eager->tcp_tcps == listener->tcp_tcps); 18232 18233 /* Put the ref for IP */ 18234 CONN_INC_REF(econnp); 18235 18236 /* 18237 * We should have minimum of 3 references on the conn 18238 * at this point. One each for TCP and IP and one for 18239 * the T_conn_ind that was sent up when the 3-way handshake 18240 * completed. In the normal case we would also have another 18241 * reference (making a total of 4) for the conn being in the 18242 * classifier hash list. However the eager could have received 18243 * an RST subsequently and tcp_closei_local could have removed 18244 * the eager from the classifier hash list, hence we can't 18245 * assert that reference. 18246 */ 18247 ASSERT(econnp->conn_ref >= 3); 18248 18249 /* 18250 * Send the new local address also up to sockfs. There 18251 * should already be enough space in the mp that came 18252 * down from soaccept(). 18253 */ 18254 if (eager->tcp_family == AF_INET) { 18255 sin_t *sin; 18256 18257 ASSERT((mp->b_datap->db_lim - mp->b_datap->db_base) >= 18258 (sizeof (struct T_ok_ack) + sizeof (sin_t))); 18259 sin = (sin_t *)mp->b_wptr; 18260 mp->b_wptr += sizeof (sin_t); 18261 sin->sin_family = AF_INET; 18262 sin->sin_port = eager->tcp_lport; 18263 sin->sin_addr.s_addr = eager->tcp_ipha->ipha_src; 18264 } else { 18265 sin6_t *sin6; 18266 18267 ASSERT((mp->b_datap->db_lim - mp->b_datap->db_base) >= 18268 sizeof (struct T_ok_ack) + sizeof (sin6_t)); 18269 sin6 = (sin6_t *)mp->b_wptr; 18270 mp->b_wptr += sizeof (sin6_t); 18271 sin6->sin6_family = AF_INET6; 18272 sin6->sin6_port = eager->tcp_lport; 18273 if (eager->tcp_ipversion == IPV4_VERSION) { 18274 sin6->sin6_flowinfo = 0; 18275 IN6_IPADDR_TO_V4MAPPED( 18276 eager->tcp_ipha->ipha_src, 18277 &sin6->sin6_addr); 18278 } else { 18279 ASSERT(eager->tcp_ip6h != NULL); 18280 sin6->sin6_flowinfo = 18281 eager->tcp_ip6h->ip6_vcf & 18282 ~IPV6_VERS_AND_FLOW_MASK; 18283 sin6->sin6_addr = eager->tcp_ip6h->ip6_src; 18284 } 18285 sin6->sin6_scope_id = 0; 18286 sin6->__sin6_src_id = 0; 18287 } 18288 18289 putnext(rq, mp); 18290 18291 opt_mp->b_datap->db_type = M_SETOPTS; 18292 opt_mp->b_wptr += sizeof (struct stroptions); 18293 18294 /* 18295 * Prepare for inheriting IPV6_BOUND_IF and IPV6_RECVPKTINFO 18296 * from listener to acceptor. The message is chained on the 18297 * bind_mp which tcp_rput_other will send down to IP. 18298 */ 18299 if (listener->tcp_bound_if != 0) { 18300 /* allocate optmgmt req */ 18301 mp = tcp_setsockopt_mp(IPPROTO_IPV6, 18302 IPV6_BOUND_IF, (char *)&listener->tcp_bound_if, 18303 sizeof (int)); 18304 if (mp != NULL) 18305 linkb(opt_mp, mp); 18306 } 18307 if (listener->tcp_ipv6_recvancillary & TCP_IPV6_RECVPKTINFO) { 18308 uint_t on = 1; 18309 18310 /* allocate optmgmt req */ 18311 mp = tcp_setsockopt_mp(IPPROTO_IPV6, 18312 IPV6_RECVPKTINFO, (char *)&on, sizeof (on)); 18313 if (mp != NULL) 18314 linkb(opt_mp, mp); 18315 } 18316 18317 18318 mutex_enter(&listener->tcp_eager_lock); 18319 18320 if (listener->tcp_eager_prev_q0->tcp_conn_def_q0) { 18321 18322 tcp_t *tail; 18323 tcp_t *tcp; 18324 mblk_t *mp1; 18325 18326 tcp = listener->tcp_eager_prev_q0; 18327 /* 18328 * listener->tcp_eager_prev_q0 points to the TAIL of the 18329 * deferred T_conn_ind queue. We need to get to the head 18330 * of the queue in order to send up T_conn_ind the same 18331 * order as how the 3WHS is completed. 18332 */ 18333 while (tcp != listener) { 18334 if (!tcp->tcp_eager_prev_q0->tcp_conn_def_q0 && 18335 !tcp->tcp_kssl_pending) 18336 break; 18337 else 18338 tcp = tcp->tcp_eager_prev_q0; 18339 } 18340 /* None of the pending eagers can be sent up now */ 18341 if (tcp == listener) 18342 goto no_more_eagers; 18343 18344 mp1 = tcp->tcp_conn.tcp_eager_conn_ind; 18345 tcp->tcp_conn.tcp_eager_conn_ind = NULL; 18346 /* Move from q0 to q */ 18347 ASSERT(listener->tcp_conn_req_cnt_q0 > 0); 18348 listener->tcp_conn_req_cnt_q0--; 18349 listener->tcp_conn_req_cnt_q++; 18350 tcp->tcp_eager_next_q0->tcp_eager_prev_q0 = 18351 tcp->tcp_eager_prev_q0; 18352 tcp->tcp_eager_prev_q0->tcp_eager_next_q0 = 18353 tcp->tcp_eager_next_q0; 18354 tcp->tcp_eager_prev_q0 = NULL; 18355 tcp->tcp_eager_next_q0 = NULL; 18356 tcp->tcp_conn_def_q0 = B_FALSE; 18357 18358 /* Make sure the tcp isn't in the list of droppables */ 18359 ASSERT(tcp->tcp_eager_next_drop_q0 == NULL && 18360 tcp->tcp_eager_prev_drop_q0 == NULL); 18361 18362 /* 18363 * Insert at end of the queue because sockfs sends 18364 * down T_CONN_RES in chronological order. Leaving 18365 * the older conn indications at front of the queue 18366 * helps reducing search time. 18367 */ 18368 tail = listener->tcp_eager_last_q; 18369 if (tail != NULL) { 18370 tail->tcp_eager_next_q = tcp; 18371 } else { 18372 listener->tcp_eager_next_q = tcp; 18373 } 18374 listener->tcp_eager_last_q = tcp; 18375 tcp->tcp_eager_next_q = NULL; 18376 18377 /* Need to get inside the listener perimeter */ 18378 CONN_INC_REF(listener->tcp_connp); 18379 squeue_fill(listener->tcp_connp->conn_sqp, mp1, 18380 tcp_send_pending, listener->tcp_connp, 18381 SQTAG_TCP_SEND_PENDING); 18382 } 18383 no_more_eagers: 18384 tcp_eager_unlink(eager); 18385 mutex_exit(&listener->tcp_eager_lock); 18386 18387 /* 18388 * At this point, the eager is detached from the listener 18389 * but we still have an extra refs on eager (apart from the 18390 * usual tcp references). The ref was placed in tcp_rput_data 18391 * before sending the conn_ind in tcp_send_conn_ind. 18392 * The ref will be dropped in tcp_accept_finish(). 18393 */ 18394 squeue_enter_nodrain(econnp->conn_sqp, opt_mp, 18395 tcp_accept_finish, econnp, SQTAG_TCP_ACCEPT_FINISH_Q0); 18396 return; 18397 default: 18398 mp = mi_tpi_err_ack_alloc(mp, TNOTSUPPORT, 0); 18399 if (mp != NULL) 18400 putnext(rq, mp); 18401 return; 18402 } 18403 } 18404 18405 void 18406 tcp_wput(queue_t *q, mblk_t *mp) 18407 { 18408 conn_t *connp = Q_TO_CONN(q); 18409 tcp_t *tcp; 18410 void (*output_proc)(); 18411 t_scalar_t type; 18412 uchar_t *rptr; 18413 struct iocblk *iocp; 18414 uint32_t msize; 18415 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 18416 18417 ASSERT(connp->conn_ref >= 2); 18418 18419 switch (DB_TYPE(mp)) { 18420 case M_DATA: 18421 tcp = connp->conn_tcp; 18422 ASSERT(tcp != NULL); 18423 18424 msize = msgdsize(mp); 18425 18426 mutex_enter(&tcp->tcp_non_sq_lock); 18427 tcp->tcp_squeue_bytes += msize; 18428 if (TCP_UNSENT_BYTES(tcp) > tcp->tcp_xmit_hiwater) { 18429 tcp_setqfull(tcp); 18430 } 18431 mutex_exit(&tcp->tcp_non_sq_lock); 18432 18433 CONN_INC_REF(connp); 18434 (*tcp_squeue_wput_proc)(connp->conn_sqp, mp, 18435 tcp_output, connp, SQTAG_TCP_OUTPUT); 18436 return; 18437 case M_PROTO: 18438 case M_PCPROTO: 18439 /* 18440 * if it is a snmp message, don't get behind the squeue 18441 */ 18442 tcp = connp->conn_tcp; 18443 rptr = mp->b_rptr; 18444 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) { 18445 type = ((union T_primitives *)rptr)->type; 18446 } else { 18447 if (tcp->tcp_debug) { 18448 (void) strlog(TCP_MOD_ID, 0, 1, 18449 SL_ERROR|SL_TRACE, 18450 "tcp_wput_proto, dropping one..."); 18451 } 18452 freemsg(mp); 18453 return; 18454 } 18455 if (type == T_SVR4_OPTMGMT_REQ) { 18456 cred_t *cr = DB_CREDDEF(mp, tcp->tcp_cred); 18457 if (snmpcom_req(q, mp, tcp_snmp_set, tcp_snmp_get, 18458 cr)) { 18459 /* 18460 * This was a SNMP request 18461 */ 18462 return; 18463 } else { 18464 output_proc = tcp_wput_proto; 18465 } 18466 } else { 18467 output_proc = tcp_wput_proto; 18468 } 18469 break; 18470 case M_IOCTL: 18471 /* 18472 * Most ioctls can be processed right away without going via 18473 * squeues - process them right here. Those that do require 18474 * squeue (currently TCP_IOC_DEFAULT_Q and _SIOCSOCKFALLBACK) 18475 * are processed by tcp_wput_ioctl(). 18476 */ 18477 iocp = (struct iocblk *)mp->b_rptr; 18478 tcp = connp->conn_tcp; 18479 18480 switch (iocp->ioc_cmd) { 18481 case TCP_IOC_ABORT_CONN: 18482 tcp_ioctl_abort_conn(q, mp); 18483 return; 18484 case TI_GETPEERNAME: 18485 if (tcp->tcp_state < TCPS_SYN_RCVD) { 18486 iocp->ioc_error = ENOTCONN; 18487 iocp->ioc_count = 0; 18488 mp->b_datap->db_type = M_IOCACK; 18489 qreply(q, mp); 18490 return; 18491 } 18492 /* FALLTHRU */ 18493 case TI_GETMYNAME: 18494 mi_copyin(q, mp, NULL, 18495 SIZEOF_STRUCT(strbuf, iocp->ioc_flag)); 18496 return; 18497 case ND_SET: 18498 /* nd_getset does the necessary checks */ 18499 case ND_GET: 18500 if (!nd_getset(q, tcps->tcps_g_nd, mp)) { 18501 CALL_IP_WPUT(connp, q, mp); 18502 return; 18503 } 18504 qreply(q, mp); 18505 return; 18506 case TCP_IOC_DEFAULT_Q: 18507 /* 18508 * Wants to be the default wq. Check the credentials 18509 * first, the rest is executed via squeue. 18510 */ 18511 if (secpolicy_ip_config(iocp->ioc_cr, B_FALSE) != 0) { 18512 iocp->ioc_error = EPERM; 18513 iocp->ioc_count = 0; 18514 mp->b_datap->db_type = M_IOCACK; 18515 qreply(q, mp); 18516 return; 18517 } 18518 output_proc = tcp_wput_ioctl; 18519 break; 18520 default: 18521 output_proc = tcp_wput_ioctl; 18522 break; 18523 } 18524 break; 18525 default: 18526 output_proc = tcp_wput_nondata; 18527 break; 18528 } 18529 18530 CONN_INC_REF(connp); 18531 (*tcp_squeue_wput_proc)(connp->conn_sqp, mp, 18532 output_proc, connp, SQTAG_TCP_WPUT_OTHER); 18533 } 18534 18535 /* 18536 * Initial STREAMS write side put() procedure for sockets. It tries to 18537 * handle the T_CAPABILITY_REQ which sockfs sends down while setting 18538 * up the socket without using the squeue. Non T_CAPABILITY_REQ messages 18539 * are handled by tcp_wput() as usual. 18540 * 18541 * All further messages will also be handled by tcp_wput() because we cannot 18542 * be sure that the above short cut is safe later. 18543 */ 18544 static void 18545 tcp_wput_sock(queue_t *wq, mblk_t *mp) 18546 { 18547 conn_t *connp = Q_TO_CONN(wq); 18548 tcp_t *tcp = connp->conn_tcp; 18549 struct T_capability_req *car = (struct T_capability_req *)mp->b_rptr; 18550 18551 ASSERT(wq->q_qinfo == &tcp_sock_winit); 18552 wq->q_qinfo = &tcp_winit; 18553 18554 ASSERT(IPCL_IS_TCP(connp)); 18555 ASSERT(TCP_IS_SOCKET(tcp)); 18556 18557 if (DB_TYPE(mp) == M_PCPROTO && 18558 MBLKL(mp) == sizeof (struct T_capability_req) && 18559 car->PRIM_type == T_CAPABILITY_REQ) { 18560 tcp_capability_req(tcp, mp); 18561 return; 18562 } 18563 18564 tcp_wput(wq, mp); 18565 } 18566 18567 static boolean_t 18568 tcp_zcopy_check(tcp_t *tcp) 18569 { 18570 conn_t *connp = tcp->tcp_connp; 18571 ire_t *ire; 18572 boolean_t zc_enabled = B_FALSE; 18573 tcp_stack_t *tcps = tcp->tcp_tcps; 18574 18575 if (do_tcpzcopy == 2) 18576 zc_enabled = B_TRUE; 18577 else if (tcp->tcp_ipversion == IPV4_VERSION && 18578 IPCL_IS_CONNECTED(connp) && 18579 (connp->conn_flags & IPCL_CHECK_POLICY) == 0 && 18580 connp->conn_dontroute == 0 && 18581 !connp->conn_nexthop_set && 18582 connp->conn_xmit_if_ill == NULL && 18583 connp->conn_nofailover_ill == NULL && 18584 do_tcpzcopy == 1) { 18585 /* 18586 * the checks above closely resemble the fast path checks 18587 * in tcp_send_data(). 18588 */ 18589 mutex_enter(&connp->conn_lock); 18590 ire = connp->conn_ire_cache; 18591 ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT)); 18592 if (ire != NULL && !(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18593 IRE_REFHOLD(ire); 18594 if (ire->ire_stq != NULL) { 18595 ill_t *ill = (ill_t *)ire->ire_stq->q_ptr; 18596 18597 zc_enabled = ill && (ill->ill_capabilities & 18598 ILL_CAPAB_ZEROCOPY) && 18599 (ill->ill_zerocopy_capab-> 18600 ill_zerocopy_flags != 0); 18601 } 18602 IRE_REFRELE(ire); 18603 } 18604 mutex_exit(&connp->conn_lock); 18605 } 18606 tcp->tcp_snd_zcopy_on = zc_enabled; 18607 if (!TCP_IS_DETACHED(tcp)) { 18608 if (zc_enabled) { 18609 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMSAFE); 18610 TCP_STAT(tcps, tcp_zcopy_on); 18611 } else { 18612 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMUNSAFE); 18613 TCP_STAT(tcps, tcp_zcopy_off); 18614 } 18615 } 18616 return (zc_enabled); 18617 } 18618 18619 static mblk_t * 18620 tcp_zcopy_disable(tcp_t *tcp, mblk_t *bp) 18621 { 18622 tcp_stack_t *tcps = tcp->tcp_tcps; 18623 18624 if (do_tcpzcopy == 2) 18625 return (bp); 18626 else if (tcp->tcp_snd_zcopy_on) { 18627 tcp->tcp_snd_zcopy_on = B_FALSE; 18628 if (!TCP_IS_DETACHED(tcp)) { 18629 (void) mi_set_sth_copyopt(tcp->tcp_rq, ZCVMUNSAFE); 18630 TCP_STAT(tcps, tcp_zcopy_disable); 18631 } 18632 } 18633 return (tcp_zcopy_backoff(tcp, bp, 0)); 18634 } 18635 18636 /* 18637 * Backoff from a zero-copy mblk by copying data to a new mblk and freeing 18638 * the original desballoca'ed segmapped mblk. 18639 */ 18640 static mblk_t * 18641 tcp_zcopy_backoff(tcp_t *tcp, mblk_t *bp, int fix_xmitlist) 18642 { 18643 mblk_t *head, *tail, *nbp; 18644 tcp_stack_t *tcps = tcp->tcp_tcps; 18645 18646 if (IS_VMLOANED_MBLK(bp)) { 18647 TCP_STAT(tcps, tcp_zcopy_backoff); 18648 if ((head = copyb(bp)) == NULL) { 18649 /* fail to backoff; leave it for the next backoff */ 18650 tcp->tcp_xmit_zc_clean = B_FALSE; 18651 return (bp); 18652 } 18653 if (bp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) { 18654 if (fix_xmitlist) 18655 tcp_zcopy_notify(tcp); 18656 else 18657 head->b_datap->db_struioflag |= STRUIO_ZCNOTIFY; 18658 } 18659 nbp = bp->b_cont; 18660 if (fix_xmitlist) { 18661 head->b_prev = bp->b_prev; 18662 head->b_next = bp->b_next; 18663 if (tcp->tcp_xmit_tail == bp) 18664 tcp->tcp_xmit_tail = head; 18665 } 18666 bp->b_next = NULL; 18667 bp->b_prev = NULL; 18668 freeb(bp); 18669 } else { 18670 head = bp; 18671 nbp = bp->b_cont; 18672 } 18673 tail = head; 18674 while (nbp) { 18675 if (IS_VMLOANED_MBLK(nbp)) { 18676 TCP_STAT(tcps, tcp_zcopy_backoff); 18677 if ((tail->b_cont = copyb(nbp)) == NULL) { 18678 tcp->tcp_xmit_zc_clean = B_FALSE; 18679 tail->b_cont = nbp; 18680 return (head); 18681 } 18682 tail = tail->b_cont; 18683 if (nbp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) { 18684 if (fix_xmitlist) 18685 tcp_zcopy_notify(tcp); 18686 else 18687 tail->b_datap->db_struioflag |= 18688 STRUIO_ZCNOTIFY; 18689 } 18690 bp = nbp; 18691 nbp = nbp->b_cont; 18692 if (fix_xmitlist) { 18693 tail->b_prev = bp->b_prev; 18694 tail->b_next = bp->b_next; 18695 if (tcp->tcp_xmit_tail == bp) 18696 tcp->tcp_xmit_tail = tail; 18697 } 18698 bp->b_next = NULL; 18699 bp->b_prev = NULL; 18700 freeb(bp); 18701 } else { 18702 tail->b_cont = nbp; 18703 tail = nbp; 18704 nbp = nbp->b_cont; 18705 } 18706 } 18707 if (fix_xmitlist) { 18708 tcp->tcp_xmit_last = tail; 18709 tcp->tcp_xmit_zc_clean = B_TRUE; 18710 } 18711 return (head); 18712 } 18713 18714 static void 18715 tcp_zcopy_notify(tcp_t *tcp) 18716 { 18717 struct stdata *stp; 18718 18719 if (tcp->tcp_detached) 18720 return; 18721 stp = STREAM(tcp->tcp_rq); 18722 mutex_enter(&stp->sd_lock); 18723 stp->sd_flag |= STZCNOTIFY; 18724 cv_broadcast(&stp->sd_zcopy_wait); 18725 mutex_exit(&stp->sd_lock); 18726 } 18727 18728 static boolean_t 18729 tcp_send_find_ire(tcp_t *tcp, ipaddr_t *dst, ire_t **irep) 18730 { 18731 ire_t *ire; 18732 conn_t *connp = tcp->tcp_connp; 18733 tcp_stack_t *tcps = tcp->tcp_tcps; 18734 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 18735 18736 mutex_enter(&connp->conn_lock); 18737 ire = connp->conn_ire_cache; 18738 ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT)); 18739 18740 if ((ire != NULL) && 18741 (((dst != NULL) && (ire->ire_addr == *dst)) || ((dst == NULL) && 18742 IN6_ARE_ADDR_EQUAL(&ire->ire_addr_v6, &tcp->tcp_ip6h->ip6_dst))) && 18743 !(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18744 IRE_REFHOLD(ire); 18745 mutex_exit(&connp->conn_lock); 18746 } else { 18747 boolean_t cached = B_FALSE; 18748 ts_label_t *tsl; 18749 18750 /* force a recheck later on */ 18751 tcp->tcp_ire_ill_check_done = B_FALSE; 18752 18753 TCP_DBGSTAT(tcps, tcp_ire_null1); 18754 connp->conn_ire_cache = NULL; 18755 mutex_exit(&connp->conn_lock); 18756 18757 if (ire != NULL) 18758 IRE_REFRELE_NOTR(ire); 18759 18760 tsl = crgetlabel(CONN_CRED(connp)); 18761 ire = (dst ? 18762 ire_cache_lookup(*dst, connp->conn_zoneid, tsl, ipst) : 18763 ire_cache_lookup_v6(&tcp->tcp_ip6h->ip6_dst, 18764 connp->conn_zoneid, tsl, ipst)); 18765 18766 if (ire == NULL) { 18767 TCP_STAT(tcps, tcp_ire_null); 18768 return (B_FALSE); 18769 } 18770 18771 IRE_REFHOLD_NOTR(ire); 18772 /* 18773 * Since we are inside the squeue, there cannot be another 18774 * thread in TCP trying to set the conn_ire_cache now. The 18775 * check for IRE_MARK_CONDEMNED ensures that an interface 18776 * unplumb thread has not yet started cleaning up the conns. 18777 * Hence we don't need to grab the conn lock. 18778 */ 18779 if (CONN_CACHE_IRE(connp)) { 18780 rw_enter(&ire->ire_bucket->irb_lock, RW_READER); 18781 if (!(ire->ire_marks & IRE_MARK_CONDEMNED)) { 18782 TCP_CHECK_IREINFO(tcp, ire); 18783 connp->conn_ire_cache = ire; 18784 cached = B_TRUE; 18785 } 18786 rw_exit(&ire->ire_bucket->irb_lock); 18787 } 18788 18789 /* 18790 * We can continue to use the ire but since it was 18791 * not cached, we should drop the extra reference. 18792 */ 18793 if (!cached) 18794 IRE_REFRELE_NOTR(ire); 18795 18796 /* 18797 * Rampart note: no need to select a new label here, since 18798 * labels are not allowed to change during the life of a TCP 18799 * connection. 18800 */ 18801 } 18802 18803 *irep = ire; 18804 18805 return (B_TRUE); 18806 } 18807 18808 /* 18809 * Called from tcp_send() or tcp_send_data() to find workable IRE. 18810 * 18811 * 0 = success; 18812 * 1 = failed to find ire and ill. 18813 */ 18814 static boolean_t 18815 tcp_send_find_ire_ill(tcp_t *tcp, mblk_t *mp, ire_t **irep, ill_t **illp) 18816 { 18817 ipha_t *ipha; 18818 ipaddr_t dst; 18819 ire_t *ire; 18820 ill_t *ill; 18821 conn_t *connp = tcp->tcp_connp; 18822 mblk_t *ire_fp_mp; 18823 tcp_stack_t *tcps = tcp->tcp_tcps; 18824 18825 if (mp != NULL) 18826 ipha = (ipha_t *)mp->b_rptr; 18827 else 18828 ipha = tcp->tcp_ipha; 18829 dst = ipha->ipha_dst; 18830 18831 if (!tcp_send_find_ire(tcp, &dst, &ire)) 18832 return (B_FALSE); 18833 18834 if ((ire->ire_flags & RTF_MULTIRT) || 18835 (ire->ire_stq == NULL) || 18836 (ire->ire_nce == NULL) || 18837 ((ire_fp_mp = ire->ire_nce->nce_fp_mp) == NULL) || 18838 ((mp != NULL) && (ire->ire_max_frag < ntohs(ipha->ipha_length) || 18839 MBLKL(ire_fp_mp) > MBLKHEAD(mp)))) { 18840 TCP_STAT(tcps, tcp_ip_ire_send); 18841 IRE_REFRELE(ire); 18842 return (B_FALSE); 18843 } 18844 18845 ill = ire_to_ill(ire); 18846 if (connp->conn_outgoing_ill != NULL) { 18847 ill_t *conn_outgoing_ill = NULL; 18848 /* 18849 * Choose a good ill in the group to send the packets on. 18850 */ 18851 ire = conn_set_outgoing_ill(connp, ire, &conn_outgoing_ill); 18852 ill = ire_to_ill(ire); 18853 } 18854 ASSERT(ill != NULL); 18855 18856 if (!tcp->tcp_ire_ill_check_done) { 18857 tcp_ire_ill_check(tcp, ire, ill, B_TRUE); 18858 tcp->tcp_ire_ill_check_done = B_TRUE; 18859 } 18860 18861 *irep = ire; 18862 *illp = ill; 18863 18864 return (B_TRUE); 18865 } 18866 18867 static void 18868 tcp_send_data(tcp_t *tcp, queue_t *q, mblk_t *mp) 18869 { 18870 ipha_t *ipha; 18871 ipaddr_t src; 18872 ipaddr_t dst; 18873 uint32_t cksum; 18874 ire_t *ire; 18875 uint16_t *up; 18876 ill_t *ill; 18877 conn_t *connp = tcp->tcp_connp; 18878 uint32_t hcksum_txflags = 0; 18879 mblk_t *ire_fp_mp; 18880 uint_t ire_fp_mp_len; 18881 tcp_stack_t *tcps = tcp->tcp_tcps; 18882 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 18883 18884 ASSERT(DB_TYPE(mp) == M_DATA); 18885 18886 if (DB_CRED(mp) == NULL) 18887 mblk_setcred(mp, CONN_CRED(connp)); 18888 18889 ipha = (ipha_t *)mp->b_rptr; 18890 src = ipha->ipha_src; 18891 dst = ipha->ipha_dst; 18892 18893 /* 18894 * Drop off fast path for IPv6 and also if options are present or 18895 * we need to resolve a TS label. 18896 */ 18897 if (tcp->tcp_ipversion != IPV4_VERSION || 18898 !IPCL_IS_CONNECTED(connp) || 18899 !CONN_IS_LSO_MD_FASTPATH(connp) || 18900 (connp->conn_flags & IPCL_CHECK_POLICY) != 0 || 18901 !connp->conn_ulp_labeled || 18902 ipha->ipha_ident == IP_HDR_INCLUDED || 18903 ipha->ipha_version_and_hdr_length != IP_SIMPLE_HDR_VERSION || 18904 IPP_ENABLED(IPP_LOCAL_OUT, ipst)) { 18905 if (tcp->tcp_snd_zcopy_aware) 18906 mp = tcp_zcopy_disable(tcp, mp); 18907 TCP_STAT(tcps, tcp_ip_send); 18908 CALL_IP_WPUT(connp, q, mp); 18909 return; 18910 } 18911 18912 if (!tcp_send_find_ire_ill(tcp, mp, &ire, &ill)) { 18913 if (tcp->tcp_snd_zcopy_aware) 18914 mp = tcp_zcopy_backoff(tcp, mp, 0); 18915 CALL_IP_WPUT(connp, q, mp); 18916 return; 18917 } 18918 ire_fp_mp = ire->ire_nce->nce_fp_mp; 18919 ire_fp_mp_len = MBLKL(ire_fp_mp); 18920 18921 ASSERT(ipha->ipha_ident == 0 || ipha->ipha_ident == IP_HDR_INCLUDED); 18922 ipha->ipha_ident = (uint16_t)atomic_add_32_nv(&ire->ire_ident, 1); 18923 #ifndef _BIG_ENDIAN 18924 ipha->ipha_ident = (ipha->ipha_ident << 8) | (ipha->ipha_ident >> 8); 18925 #endif 18926 18927 /* 18928 * Check to see if we need to re-enable LSO/MDT for this connection 18929 * because it was previously disabled due to changes in the ill; 18930 * note that by doing it here, this re-enabling only applies when 18931 * the packet is not dispatched through CALL_IP_WPUT(). 18932 * 18933 * That means for IPv4, it is worth re-enabling LSO/MDT for the fastpath 18934 * case, since that's how we ended up here. For IPv6, we do the 18935 * re-enabling work in ip_xmit_v6(), albeit indirectly via squeue. 18936 */ 18937 if (connp->conn_lso_ok && !tcp->tcp_lso && ILL_LSO_TCP_USABLE(ill)) { 18938 /* 18939 * Restore LSO for this connection, so that next time around 18940 * it is eligible to go through tcp_lsosend() path again. 18941 */ 18942 TCP_STAT(tcps, tcp_lso_enabled); 18943 tcp->tcp_lso = B_TRUE; 18944 ip1dbg(("tcp_send_data: reenabling LSO for connp %p on " 18945 "interface %s\n", (void *)connp, ill->ill_name)); 18946 } else if (connp->conn_mdt_ok && !tcp->tcp_mdt && ILL_MDT_USABLE(ill)) { 18947 /* 18948 * Restore MDT for this connection, so that next time around 18949 * it is eligible to go through tcp_multisend() path again. 18950 */ 18951 TCP_STAT(tcps, tcp_mdt_conn_resumed1); 18952 tcp->tcp_mdt = B_TRUE; 18953 ip1dbg(("tcp_send_data: reenabling MDT for connp %p on " 18954 "interface %s\n", (void *)connp, ill->ill_name)); 18955 } 18956 18957 if (tcp->tcp_snd_zcopy_aware) { 18958 if ((ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) == 0 || 18959 (ill->ill_zerocopy_capab->ill_zerocopy_flags == 0)) 18960 mp = tcp_zcopy_disable(tcp, mp); 18961 /* 18962 * we shouldn't need to reset ipha as the mp containing 18963 * ipha should never be a zero-copy mp. 18964 */ 18965 } 18966 18967 if (ILL_HCKSUM_CAPABLE(ill) && dohwcksum) { 18968 ASSERT(ill->ill_hcksum_capab != NULL); 18969 hcksum_txflags = ill->ill_hcksum_capab->ill_hcksum_txflags; 18970 } 18971 18972 /* pseudo-header checksum (do it in parts for IP header checksum) */ 18973 cksum = (dst >> 16) + (dst & 0xFFFF) + (src >> 16) + (src & 0xFFFF); 18974 18975 ASSERT(ipha->ipha_version_and_hdr_length == IP_SIMPLE_HDR_VERSION); 18976 up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 18977 18978 IP_CKSUM_XMIT_FAST(ire->ire_ipversion, hcksum_txflags, mp, ipha, up, 18979 IPPROTO_TCP, IP_SIMPLE_HDR_LENGTH, ntohs(ipha->ipha_length), cksum); 18980 18981 /* Software checksum? */ 18982 if (DB_CKSUMFLAGS(mp) == 0) { 18983 TCP_STAT(tcps, tcp_out_sw_cksum); 18984 TCP_STAT_UPDATE(tcps, tcp_out_sw_cksum_bytes, 18985 ntohs(ipha->ipha_length) - IP_SIMPLE_HDR_LENGTH); 18986 } 18987 18988 ipha->ipha_fragment_offset_and_flags |= 18989 (uint32_t)htons(ire->ire_frag_flag); 18990 18991 /* Calculate IP header checksum if hardware isn't capable */ 18992 if (!(DB_CKSUMFLAGS(mp) & HCK_IPV4_HDRCKSUM)) { 18993 IP_HDR_CKSUM(ipha, cksum, ((uint32_t *)ipha)[0], 18994 ((uint16_t *)ipha)[4]); 18995 } 18996 18997 ASSERT(DB_TYPE(ire_fp_mp) == M_DATA); 18998 mp->b_rptr = (uchar_t *)ipha - ire_fp_mp_len; 18999 bcopy(ire_fp_mp->b_rptr, mp->b_rptr, ire_fp_mp_len); 19000 19001 UPDATE_OB_PKT_COUNT(ire); 19002 ire->ire_last_used_time = lbolt; 19003 19004 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutRequests); 19005 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits); 19006 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets, 19007 ntohs(ipha->ipha_length)); 19008 19009 if (ILL_DLS_CAPABLE(ill)) { 19010 /* 19011 * Send the packet directly to DLD, where it may be queued 19012 * depending on the availability of transmit resources at 19013 * the media layer. 19014 */ 19015 IP_DLS_ILL_TX(ill, ipha, mp, ipst); 19016 } else { 19017 ill_t *out_ill = (ill_t *)ire->ire_stq->q_ptr; 19018 DTRACE_PROBE4(ip4__physical__out__start, 19019 ill_t *, NULL, ill_t *, out_ill, 19020 ipha_t *, ipha, mblk_t *, mp); 19021 FW_HOOKS(ipst->ips_ip4_physical_out_event, 19022 ipst->ips_ipv4firewall_physical_out, 19023 NULL, out_ill, ipha, mp, mp, ipst); 19024 DTRACE_PROBE1(ip4__physical__out__end, mblk_t *, mp); 19025 if (mp != NULL) 19026 putnext(ire->ire_stq, mp); 19027 } 19028 IRE_REFRELE(ire); 19029 } 19030 19031 /* 19032 * This handles the case when the receiver has shrunk its win. Per RFC 1122 19033 * if the receiver shrinks the window, i.e. moves the right window to the 19034 * left, the we should not send new data, but should retransmit normally the 19035 * old unacked data between suna and suna + swnd. We might has sent data 19036 * that is now outside the new window, pretend that we didn't send it. 19037 */ 19038 static void 19039 tcp_process_shrunk_swnd(tcp_t *tcp, uint32_t shrunk_count) 19040 { 19041 uint32_t snxt = tcp->tcp_snxt; 19042 mblk_t *xmit_tail; 19043 int32_t offset; 19044 19045 ASSERT(shrunk_count > 0); 19046 19047 /* Pretend we didn't send the data outside the window */ 19048 snxt -= shrunk_count; 19049 19050 /* Get the mblk and the offset in it per the shrunk window */ 19051 xmit_tail = tcp_get_seg_mp(tcp, snxt, &offset); 19052 19053 ASSERT(xmit_tail != NULL); 19054 19055 /* Reset all the values per the now shrunk window */ 19056 tcp->tcp_snxt = snxt; 19057 tcp->tcp_xmit_tail = xmit_tail; 19058 tcp->tcp_xmit_tail_unsent = xmit_tail->b_wptr - xmit_tail->b_rptr - 19059 offset; 19060 tcp->tcp_unsent += shrunk_count; 19061 19062 if (tcp->tcp_suna == tcp->tcp_snxt && tcp->tcp_swnd == 0) 19063 /* 19064 * Make sure the timer is running so that we will probe a zero 19065 * window. 19066 */ 19067 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 19068 } 19069 19070 19071 /* 19072 * The TCP normal data output path. 19073 * NOTE: the logic of the fast path is duplicated from this function. 19074 */ 19075 static void 19076 tcp_wput_data(tcp_t *tcp, mblk_t *mp, boolean_t urgent) 19077 { 19078 int len; 19079 mblk_t *local_time; 19080 mblk_t *mp1; 19081 uint32_t snxt; 19082 int tail_unsent; 19083 int tcpstate; 19084 int usable = 0; 19085 mblk_t *xmit_tail; 19086 queue_t *q = tcp->tcp_wq; 19087 int32_t mss; 19088 int32_t num_sack_blk = 0; 19089 int32_t tcp_hdr_len; 19090 int32_t tcp_tcp_hdr_len; 19091 int mdt_thres; 19092 int rc; 19093 tcp_stack_t *tcps = tcp->tcp_tcps; 19094 ip_stack_t *ipst; 19095 19096 tcpstate = tcp->tcp_state; 19097 if (mp == NULL) { 19098 /* 19099 * tcp_wput_data() with NULL mp should only be called when 19100 * there is unsent data. 19101 */ 19102 ASSERT(tcp->tcp_unsent > 0); 19103 /* Really tacky... but we need this for detached closes. */ 19104 len = tcp->tcp_unsent; 19105 goto data_null; 19106 } 19107 19108 #if CCS_STATS 19109 wrw_stats.tot.count++; 19110 wrw_stats.tot.bytes += msgdsize(mp); 19111 #endif 19112 ASSERT(mp->b_datap->db_type == M_DATA); 19113 /* 19114 * Don't allow data after T_ORDREL_REQ or T_DISCON_REQ, 19115 * or before a connection attempt has begun. 19116 */ 19117 if (tcpstate < TCPS_SYN_SENT || tcpstate > TCPS_CLOSE_WAIT || 19118 (tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) { 19119 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) != 0) { 19120 #ifdef DEBUG 19121 cmn_err(CE_WARN, 19122 "tcp_wput_data: data after ordrel, %s", 19123 tcp_display(tcp, NULL, 19124 DISP_ADDR_AND_PORT)); 19125 #else 19126 if (tcp->tcp_debug) { 19127 (void) strlog(TCP_MOD_ID, 0, 1, 19128 SL_TRACE|SL_ERROR, 19129 "tcp_wput_data: data after ordrel, %s\n", 19130 tcp_display(tcp, NULL, 19131 DISP_ADDR_AND_PORT)); 19132 } 19133 #endif /* DEBUG */ 19134 } 19135 if (tcp->tcp_snd_zcopy_aware && 19136 (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY) != 0) 19137 tcp_zcopy_notify(tcp); 19138 freemsg(mp); 19139 mutex_enter(&tcp->tcp_non_sq_lock); 19140 if (tcp->tcp_flow_stopped && 19141 TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 19142 tcp_clrqfull(tcp); 19143 } 19144 mutex_exit(&tcp->tcp_non_sq_lock); 19145 return; 19146 } 19147 19148 /* Strip empties */ 19149 for (;;) { 19150 ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <= 19151 (uintptr_t)INT_MAX); 19152 len = (int)(mp->b_wptr - mp->b_rptr); 19153 if (len > 0) 19154 break; 19155 mp1 = mp; 19156 mp = mp->b_cont; 19157 freeb(mp1); 19158 if (!mp) { 19159 return; 19160 } 19161 } 19162 19163 /* If we are the first on the list ... */ 19164 if (tcp->tcp_xmit_head == NULL) { 19165 tcp->tcp_xmit_head = mp; 19166 tcp->tcp_xmit_tail = mp; 19167 tcp->tcp_xmit_tail_unsent = len; 19168 } else { 19169 /* If tiny tx and room in txq tail, pullup to save mblks. */ 19170 struct datab *dp; 19171 19172 mp1 = tcp->tcp_xmit_last; 19173 if (len < tcp_tx_pull_len && 19174 (dp = mp1->b_datap)->db_ref == 1 && 19175 dp->db_lim - mp1->b_wptr >= len) { 19176 ASSERT(len > 0); 19177 ASSERT(!mp1->b_cont); 19178 if (len == 1) { 19179 *mp1->b_wptr++ = *mp->b_rptr; 19180 } else { 19181 bcopy(mp->b_rptr, mp1->b_wptr, len); 19182 mp1->b_wptr += len; 19183 } 19184 if (mp1 == tcp->tcp_xmit_tail) 19185 tcp->tcp_xmit_tail_unsent += len; 19186 mp1->b_cont = mp->b_cont; 19187 if (tcp->tcp_snd_zcopy_aware && 19188 (mp->b_datap->db_struioflag & STRUIO_ZCNOTIFY)) 19189 mp1->b_datap->db_struioflag |= STRUIO_ZCNOTIFY; 19190 freeb(mp); 19191 mp = mp1; 19192 } else { 19193 tcp->tcp_xmit_last->b_cont = mp; 19194 } 19195 len += tcp->tcp_unsent; 19196 } 19197 19198 /* Tack on however many more positive length mblks we have */ 19199 if ((mp1 = mp->b_cont) != NULL) { 19200 do { 19201 int tlen; 19202 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 19203 (uintptr_t)INT_MAX); 19204 tlen = (int)(mp1->b_wptr - mp1->b_rptr); 19205 if (tlen <= 0) { 19206 mp->b_cont = mp1->b_cont; 19207 freeb(mp1); 19208 } else { 19209 len += tlen; 19210 mp = mp1; 19211 } 19212 } while ((mp1 = mp->b_cont) != NULL); 19213 } 19214 tcp->tcp_xmit_last = mp; 19215 tcp->tcp_unsent = len; 19216 19217 if (urgent) 19218 usable = 1; 19219 19220 data_null: 19221 snxt = tcp->tcp_snxt; 19222 xmit_tail = tcp->tcp_xmit_tail; 19223 tail_unsent = tcp->tcp_xmit_tail_unsent; 19224 19225 /* 19226 * Note that tcp_mss has been adjusted to take into account the 19227 * timestamp option if applicable. Because SACK options do not 19228 * appear in every TCP segments and they are of variable lengths, 19229 * they cannot be included in tcp_mss. Thus we need to calculate 19230 * the actual segment length when we need to send a segment which 19231 * includes SACK options. 19232 */ 19233 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 19234 int32_t opt_len; 19235 19236 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 19237 tcp->tcp_num_sack_blk); 19238 opt_len = num_sack_blk * sizeof (sack_blk_t) + TCPOPT_NOP_LEN * 19239 2 + TCPOPT_HEADER_LEN; 19240 mss = tcp->tcp_mss - opt_len; 19241 tcp_hdr_len = tcp->tcp_hdr_len + opt_len; 19242 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len + opt_len; 19243 } else { 19244 mss = tcp->tcp_mss; 19245 tcp_hdr_len = tcp->tcp_hdr_len; 19246 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len; 19247 } 19248 19249 if ((tcp->tcp_suna == snxt) && !tcp->tcp_localnet && 19250 (TICK_TO_MSEC(lbolt - tcp->tcp_last_recv_time) >= tcp->tcp_rto)) { 19251 SET_TCP_INIT_CWND(tcp, mss, tcps->tcps_slow_start_after_idle); 19252 } 19253 if (tcpstate == TCPS_SYN_RCVD) { 19254 /* 19255 * The three-way connection establishment handshake is not 19256 * complete yet. We want to queue the data for transmission 19257 * after entering ESTABLISHED state (RFC793). A jump to 19258 * "done" label effectively leaves data on the queue. 19259 */ 19260 goto done; 19261 } else { 19262 int usable_r; 19263 19264 /* 19265 * In the special case when cwnd is zero, which can only 19266 * happen if the connection is ECN capable, return now. 19267 * New segments is sent using tcp_timer(). The timer 19268 * is set in tcp_rput_data(). 19269 */ 19270 if (tcp->tcp_cwnd == 0) { 19271 /* 19272 * Note that tcp_cwnd is 0 before 3-way handshake is 19273 * finished. 19274 */ 19275 ASSERT(tcp->tcp_ecn_ok || 19276 tcp->tcp_state < TCPS_ESTABLISHED); 19277 return; 19278 } 19279 19280 /* NOTE: trouble if xmitting while SYN not acked? */ 19281 usable_r = snxt - tcp->tcp_suna; 19282 usable_r = tcp->tcp_swnd - usable_r; 19283 19284 /* 19285 * Check if the receiver has shrunk the window. If 19286 * tcp_wput_data() with NULL mp is called, tcp_fin_sent 19287 * cannot be set as there is unsent data, so FIN cannot 19288 * be sent out. Otherwise, we need to take into account 19289 * of FIN as it consumes an "invisible" sequence number. 19290 */ 19291 ASSERT(tcp->tcp_fin_sent == 0); 19292 if (usable_r < 0) { 19293 /* 19294 * The receiver has shrunk the window and we have sent 19295 * -usable_r date beyond the window, re-adjust. 19296 * 19297 * If TCP window scaling is enabled, there can be 19298 * round down error as the advertised receive window 19299 * is actually right shifted n bits. This means that 19300 * the lower n bits info is wiped out. It will look 19301 * like the window is shrunk. Do a check here to 19302 * see if the shrunk amount is actually within the 19303 * error in window calculation. If it is, just 19304 * return. Note that this check is inside the 19305 * shrunk window check. This makes sure that even 19306 * though tcp_process_shrunk_swnd() is not called, 19307 * we will stop further processing. 19308 */ 19309 if ((-usable_r >> tcp->tcp_snd_ws) > 0) { 19310 tcp_process_shrunk_swnd(tcp, -usable_r); 19311 } 19312 return; 19313 } 19314 19315 /* usable = MIN(swnd, cwnd) - unacked_bytes */ 19316 if (tcp->tcp_swnd > tcp->tcp_cwnd) 19317 usable_r -= tcp->tcp_swnd - tcp->tcp_cwnd; 19318 19319 /* usable = MIN(usable, unsent) */ 19320 if (usable_r > len) 19321 usable_r = len; 19322 19323 /* usable = MAX(usable, {1 for urgent, 0 for data}) */ 19324 if (usable_r > 0) { 19325 usable = usable_r; 19326 } else { 19327 /* Bypass all other unnecessary processing. */ 19328 goto done; 19329 } 19330 } 19331 19332 local_time = (mblk_t *)lbolt; 19333 19334 /* 19335 * "Our" Nagle Algorithm. This is not the same as in the old 19336 * BSD. This is more in line with the true intent of Nagle. 19337 * 19338 * The conditions are: 19339 * 1. The amount of unsent data (or amount of data which can be 19340 * sent, whichever is smaller) is less than Nagle limit. 19341 * 2. The last sent size is also less than Nagle limit. 19342 * 3. There is unack'ed data. 19343 * 4. Urgent pointer is not set. Send urgent data ignoring the 19344 * Nagle algorithm. This reduces the probability that urgent 19345 * bytes get "merged" together. 19346 * 5. The app has not closed the connection. This eliminates the 19347 * wait time of the receiving side waiting for the last piece of 19348 * (small) data. 19349 * 19350 * If all are satisified, exit without sending anything. Note 19351 * that Nagle limit can be smaller than 1 MSS. Nagle limit is 19352 * the smaller of 1 MSS and global tcp_naglim_def (default to be 19353 * 4095). 19354 */ 19355 if (usable < (int)tcp->tcp_naglim && 19356 tcp->tcp_naglim > tcp->tcp_last_sent_len && 19357 snxt != tcp->tcp_suna && 19358 !(tcp->tcp_valid_bits & TCP_URG_VALID) && 19359 !(tcp->tcp_valid_bits & TCP_FSS_VALID)) { 19360 goto done; 19361 } 19362 19363 if (tcp->tcp_cork) { 19364 /* 19365 * if the tcp->tcp_cork option is set, then we have to force 19366 * TCP not to send partial segment (smaller than MSS bytes). 19367 * We are calculating the usable now based on full mss and 19368 * will save the rest of remaining data for later. 19369 */ 19370 if (usable < mss) 19371 goto done; 19372 usable = (usable / mss) * mss; 19373 } 19374 19375 /* Update the latest receive window size in TCP header. */ 19376 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 19377 tcp->tcp_tcph->th_win); 19378 19379 /* 19380 * Determine if it's worthwhile to attempt LSO or MDT, based on: 19381 * 19382 * 1. Simple TCP/IP{v4,v6} (no options). 19383 * 2. IPSEC/IPQoS processing is not needed for the TCP connection. 19384 * 3. If the TCP connection is in ESTABLISHED state. 19385 * 4. The TCP is not detached. 19386 * 19387 * If any of the above conditions have changed during the 19388 * connection, stop using LSO/MDT and restore the stream head 19389 * parameters accordingly. 19390 */ 19391 ipst = tcps->tcps_netstack->netstack_ip; 19392 19393 if ((tcp->tcp_lso || tcp->tcp_mdt) && 19394 ((tcp->tcp_ipversion == IPV4_VERSION && 19395 tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) || 19396 (tcp->tcp_ipversion == IPV6_VERSION && 19397 tcp->tcp_ip_hdr_len != IPV6_HDR_LEN) || 19398 tcp->tcp_state != TCPS_ESTABLISHED || 19399 TCP_IS_DETACHED(tcp) || !CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp) || 19400 CONN_IPSEC_OUT_ENCAPSULATED(tcp->tcp_connp) || 19401 IPP_ENABLED(IPP_LOCAL_OUT, ipst))) { 19402 if (tcp->tcp_lso) { 19403 tcp->tcp_connp->conn_lso_ok = B_FALSE; 19404 tcp->tcp_lso = B_FALSE; 19405 } else { 19406 tcp->tcp_connp->conn_mdt_ok = B_FALSE; 19407 tcp->tcp_mdt = B_FALSE; 19408 } 19409 19410 /* Anything other than detached is considered pathological */ 19411 if (!TCP_IS_DETACHED(tcp)) { 19412 if (tcp->tcp_lso) 19413 TCP_STAT(tcps, tcp_lso_disabled); 19414 else 19415 TCP_STAT(tcps, tcp_mdt_conn_halted1); 19416 (void) tcp_maxpsz_set(tcp, B_TRUE); 19417 } 19418 } 19419 19420 /* Use MDT if sendable amount is greater than the threshold */ 19421 if (tcp->tcp_mdt && 19422 (mdt_thres = mss << tcp_mdt_smss_threshold, usable > mdt_thres) && 19423 (tail_unsent > mdt_thres || (xmit_tail->b_cont != NULL && 19424 MBLKL(xmit_tail->b_cont) > mdt_thres)) && 19425 (tcp->tcp_valid_bits == 0 || 19426 tcp->tcp_valid_bits == TCP_FSS_VALID)) { 19427 ASSERT(tcp->tcp_connp->conn_mdt_ok); 19428 rc = tcp_multisend(q, tcp, mss, tcp_hdr_len, tcp_tcp_hdr_len, 19429 num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail, 19430 local_time, mdt_thres); 19431 } else { 19432 rc = tcp_send(q, tcp, mss, tcp_hdr_len, tcp_tcp_hdr_len, 19433 num_sack_blk, &usable, &snxt, &tail_unsent, &xmit_tail, 19434 local_time, INT_MAX); 19435 } 19436 19437 /* Pretend that all we were trying to send really got sent */ 19438 if (rc < 0 && tail_unsent < 0) { 19439 do { 19440 xmit_tail = xmit_tail->b_cont; 19441 xmit_tail->b_prev = local_time; 19442 ASSERT((uintptr_t)(xmit_tail->b_wptr - 19443 xmit_tail->b_rptr) <= (uintptr_t)INT_MAX); 19444 tail_unsent += (int)(xmit_tail->b_wptr - 19445 xmit_tail->b_rptr); 19446 } while (tail_unsent < 0); 19447 } 19448 done:; 19449 tcp->tcp_xmit_tail = xmit_tail; 19450 tcp->tcp_xmit_tail_unsent = tail_unsent; 19451 len = tcp->tcp_snxt - snxt; 19452 if (len) { 19453 /* 19454 * If new data was sent, need to update the notsack 19455 * list, which is, afterall, data blocks that have 19456 * not been sack'ed by the receiver. New data is 19457 * not sack'ed. 19458 */ 19459 if (tcp->tcp_snd_sack_ok && tcp->tcp_notsack_list != NULL) { 19460 /* len is a negative value. */ 19461 tcp->tcp_pipe -= len; 19462 tcp_notsack_update(&(tcp->tcp_notsack_list), 19463 tcp->tcp_snxt, snxt, 19464 &(tcp->tcp_num_notsack_blk), 19465 &(tcp->tcp_cnt_notsack_list)); 19466 } 19467 tcp->tcp_snxt = snxt + tcp->tcp_fin_sent; 19468 tcp->tcp_rack = tcp->tcp_rnxt; 19469 tcp->tcp_rack_cnt = 0; 19470 if ((snxt + len) == tcp->tcp_suna) { 19471 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 19472 } 19473 } else if (snxt == tcp->tcp_suna && tcp->tcp_swnd == 0) { 19474 /* 19475 * Didn't send anything. Make sure the timer is running 19476 * so that we will probe a zero window. 19477 */ 19478 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 19479 } 19480 /* Note that len is the amount we just sent but with a negative sign */ 19481 tcp->tcp_unsent += len; 19482 mutex_enter(&tcp->tcp_non_sq_lock); 19483 if (tcp->tcp_flow_stopped) { 19484 if (TCP_UNSENT_BYTES(tcp) <= tcp->tcp_xmit_lowater) { 19485 tcp_clrqfull(tcp); 19486 } 19487 } else if (TCP_UNSENT_BYTES(tcp) >= tcp->tcp_xmit_hiwater) { 19488 tcp_setqfull(tcp); 19489 } 19490 mutex_exit(&tcp->tcp_non_sq_lock); 19491 } 19492 19493 /* 19494 * tcp_fill_header is called by tcp_send() and tcp_multisend() to fill the 19495 * outgoing TCP header with the template header, as well as other 19496 * options such as time-stamp, ECN and/or SACK. 19497 */ 19498 static void 19499 tcp_fill_header(tcp_t *tcp, uchar_t *rptr, clock_t now, int num_sack_blk) 19500 { 19501 tcph_t *tcp_tmpl, *tcp_h; 19502 uint32_t *dst, *src; 19503 int hdrlen; 19504 19505 ASSERT(OK_32PTR(rptr)); 19506 19507 /* Template header */ 19508 tcp_tmpl = tcp->tcp_tcph; 19509 19510 /* Header of outgoing packet */ 19511 tcp_h = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 19512 19513 /* dst and src are opaque 32-bit fields, used for copying */ 19514 dst = (uint32_t *)rptr; 19515 src = (uint32_t *)tcp->tcp_iphc; 19516 hdrlen = tcp->tcp_hdr_len; 19517 19518 /* Fill time-stamp option if needed */ 19519 if (tcp->tcp_snd_ts_ok) { 19520 U32_TO_BE32((uint32_t)now, 19521 (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 4); 19522 U32_TO_BE32(tcp->tcp_ts_recent, 19523 (char *)tcp_tmpl + TCP_MIN_HEADER_LENGTH + 8); 19524 } else { 19525 ASSERT(tcp->tcp_tcp_hdr_len == TCP_MIN_HEADER_LENGTH); 19526 } 19527 19528 /* 19529 * Copy the template header; is this really more efficient than 19530 * calling bcopy()? For simple IPv4/TCP, it may be the case, 19531 * but perhaps not for other scenarios. 19532 */ 19533 dst[0] = src[0]; 19534 dst[1] = src[1]; 19535 dst[2] = src[2]; 19536 dst[3] = src[3]; 19537 dst[4] = src[4]; 19538 dst[5] = src[5]; 19539 dst[6] = src[6]; 19540 dst[7] = src[7]; 19541 dst[8] = src[8]; 19542 dst[9] = src[9]; 19543 if (hdrlen -= 40) { 19544 hdrlen >>= 2; 19545 dst += 10; 19546 src += 10; 19547 do { 19548 *dst++ = *src++; 19549 } while (--hdrlen); 19550 } 19551 19552 /* 19553 * Set the ECN info in the TCP header if it is not a zero 19554 * window probe. Zero window probe is only sent in 19555 * tcp_wput_data() and tcp_timer(). 19556 */ 19557 if (tcp->tcp_ecn_ok && !tcp->tcp_zero_win_probe) { 19558 SET_ECT(tcp, rptr); 19559 19560 if (tcp->tcp_ecn_echo_on) 19561 tcp_h->th_flags[0] |= TH_ECE; 19562 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 19563 tcp_h->th_flags[0] |= TH_CWR; 19564 tcp->tcp_ecn_cwr_sent = B_TRUE; 19565 } 19566 } 19567 19568 /* Fill in SACK options */ 19569 if (num_sack_blk > 0) { 19570 uchar_t *wptr = rptr + tcp->tcp_hdr_len; 19571 sack_blk_t *tmp; 19572 int32_t i; 19573 19574 wptr[0] = TCPOPT_NOP; 19575 wptr[1] = TCPOPT_NOP; 19576 wptr[2] = TCPOPT_SACK; 19577 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 19578 sizeof (sack_blk_t); 19579 wptr += TCPOPT_REAL_SACK_LEN; 19580 19581 tmp = tcp->tcp_sack_list; 19582 for (i = 0; i < num_sack_blk; i++) { 19583 U32_TO_BE32(tmp[i].begin, wptr); 19584 wptr += sizeof (tcp_seq); 19585 U32_TO_BE32(tmp[i].end, wptr); 19586 wptr += sizeof (tcp_seq); 19587 } 19588 tcp_h->th_offset_and_rsrvd[0] += 19589 ((num_sack_blk * 2 + 1) << 4); 19590 } 19591 } 19592 19593 /* 19594 * tcp_mdt_add_attrs() is called by tcp_multisend() in order to attach 19595 * the destination address and SAP attribute, and if necessary, the 19596 * hardware checksum offload attribute to a Multidata message. 19597 */ 19598 static int 19599 tcp_mdt_add_attrs(multidata_t *mmd, const mblk_t *dlmp, const boolean_t hwcksum, 19600 const uint32_t start, const uint32_t stuff, const uint32_t end, 19601 const uint32_t flags, tcp_stack_t *tcps) 19602 { 19603 /* Add global destination address & SAP attribute */ 19604 if (dlmp == NULL || !ip_md_addr_attr(mmd, NULL, dlmp)) { 19605 ip1dbg(("tcp_mdt_add_attrs: can't add global physical " 19606 "destination address+SAP\n")); 19607 19608 if (dlmp != NULL) 19609 TCP_STAT(tcps, tcp_mdt_allocfail); 19610 return (-1); 19611 } 19612 19613 /* Add global hwcksum attribute */ 19614 if (hwcksum && 19615 !ip_md_hcksum_attr(mmd, NULL, start, stuff, end, flags)) { 19616 ip1dbg(("tcp_mdt_add_attrs: can't add global hardware " 19617 "checksum attribute\n")); 19618 19619 TCP_STAT(tcps, tcp_mdt_allocfail); 19620 return (-1); 19621 } 19622 19623 return (0); 19624 } 19625 19626 /* 19627 * Smaller and private version of pdescinfo_t used specifically for TCP, 19628 * which allows for only two payload spans per packet. 19629 */ 19630 typedef struct tcp_pdescinfo_s PDESCINFO_STRUCT(2) tcp_pdescinfo_t; 19631 19632 /* 19633 * tcp_multisend() is called by tcp_wput_data() for Multidata Transmit 19634 * scheme, and returns one the following: 19635 * 19636 * -1 = failed allocation. 19637 * 0 = success; burst count reached, or usable send window is too small, 19638 * and that we'd rather wait until later before sending again. 19639 */ 19640 static int 19641 tcp_multisend(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len, 19642 const int tcp_tcp_hdr_len, const int num_sack_blk, int *usable, 19643 uint_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 19644 const int mdt_thres) 19645 { 19646 mblk_t *md_mp_head, *md_mp, *md_pbuf, *md_pbuf_nxt, *md_hbuf; 19647 multidata_t *mmd; 19648 uint_t obsegs, obbytes, hdr_frag_sz; 19649 uint_t cur_hdr_off, cur_pld_off, base_pld_off, first_snxt; 19650 int num_burst_seg, max_pld; 19651 pdesc_t *pkt; 19652 tcp_pdescinfo_t tcp_pkt_info; 19653 pdescinfo_t *pkt_info; 19654 int pbuf_idx, pbuf_idx_nxt; 19655 int seg_len, len, spill, af; 19656 boolean_t add_buffer, zcopy, clusterwide; 19657 boolean_t buf_trunked = B_FALSE; 19658 boolean_t rconfirm = B_FALSE; 19659 boolean_t done = B_FALSE; 19660 uint32_t cksum; 19661 uint32_t hwcksum_flags; 19662 ire_t *ire = NULL; 19663 ill_t *ill; 19664 ipha_t *ipha; 19665 ip6_t *ip6h; 19666 ipaddr_t src, dst; 19667 ill_zerocopy_capab_t *zc_cap = NULL; 19668 uint16_t *up; 19669 int err; 19670 conn_t *connp; 19671 mblk_t *mp, *mp1, *fw_mp_head = NULL; 19672 uchar_t *pld_start; 19673 tcp_stack_t *tcps = tcp->tcp_tcps; 19674 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 19675 19676 #ifdef _BIG_ENDIAN 19677 #define IPVER(ip6h) ((((uint32_t *)ip6h)[0] >> 28) & 0x7) 19678 #else 19679 #define IPVER(ip6h) ((((uint32_t *)ip6h)[0] >> 4) & 0x7) 19680 #endif 19681 19682 #define PREP_NEW_MULTIDATA() { \ 19683 mmd = NULL; \ 19684 md_mp = md_hbuf = NULL; \ 19685 cur_hdr_off = 0; \ 19686 max_pld = tcp->tcp_mdt_max_pld; \ 19687 pbuf_idx = pbuf_idx_nxt = -1; \ 19688 add_buffer = B_TRUE; \ 19689 zcopy = B_FALSE; \ 19690 } 19691 19692 #define PREP_NEW_PBUF() { \ 19693 md_pbuf = md_pbuf_nxt = NULL; \ 19694 pbuf_idx = pbuf_idx_nxt = -1; \ 19695 cur_pld_off = 0; \ 19696 first_snxt = *snxt; \ 19697 ASSERT(*tail_unsent > 0); \ 19698 base_pld_off = MBLKL(*xmit_tail) - *tail_unsent; \ 19699 } 19700 19701 ASSERT(mdt_thres >= mss); 19702 ASSERT(*usable > 0 && *usable > mdt_thres); 19703 ASSERT(tcp->tcp_state == TCPS_ESTABLISHED); 19704 ASSERT(!TCP_IS_DETACHED(tcp)); 19705 ASSERT(tcp->tcp_valid_bits == 0 || 19706 tcp->tcp_valid_bits == TCP_FSS_VALID); 19707 ASSERT((tcp->tcp_ipversion == IPV4_VERSION && 19708 tcp->tcp_ip_hdr_len == IP_SIMPLE_HDR_LENGTH) || 19709 (tcp->tcp_ipversion == IPV6_VERSION && 19710 tcp->tcp_ip_hdr_len == IPV6_HDR_LEN)); 19711 19712 connp = tcp->tcp_connp; 19713 ASSERT(connp != NULL); 19714 ASSERT(CONN_IS_LSO_MD_FASTPATH(connp)); 19715 ASSERT(!CONN_IPSEC_OUT_ENCAPSULATED(connp)); 19716 19717 /* 19718 * Note that tcp will only declare at most 2 payload spans per 19719 * packet, which is much lower than the maximum allowable number 19720 * of packet spans per Multidata. For this reason, we use the 19721 * privately declared and smaller descriptor info structure, in 19722 * order to save some stack space. 19723 */ 19724 pkt_info = (pdescinfo_t *)&tcp_pkt_info; 19725 19726 af = (tcp->tcp_ipversion == IPV4_VERSION) ? AF_INET : AF_INET6; 19727 if (af == AF_INET) { 19728 dst = tcp->tcp_ipha->ipha_dst; 19729 src = tcp->tcp_ipha->ipha_src; 19730 ASSERT(!CLASSD(dst)); 19731 } 19732 ASSERT(af == AF_INET || 19733 !IN6_IS_ADDR_MULTICAST(&tcp->tcp_ip6h->ip6_dst)); 19734 19735 obsegs = obbytes = 0; 19736 num_burst_seg = tcp->tcp_snd_burst; 19737 md_mp_head = NULL; 19738 PREP_NEW_MULTIDATA(); 19739 19740 /* 19741 * Before we go on further, make sure there is an IRE that we can 19742 * use, and that the ILL supports MDT. Otherwise, there's no point 19743 * in proceeding any further, and we should just hand everything 19744 * off to the legacy path. 19745 */ 19746 if (!tcp_send_find_ire(tcp, (af == AF_INET) ? &dst : NULL, &ire)) 19747 goto legacy_send_no_md; 19748 19749 ASSERT(ire != NULL); 19750 ASSERT(af != AF_INET || ire->ire_ipversion == IPV4_VERSION); 19751 ASSERT(af == AF_INET || !IN6_IS_ADDR_V4MAPPED(&(ire->ire_addr_v6))); 19752 ASSERT(af == AF_INET || ire->ire_nce != NULL); 19753 ASSERT(!(ire->ire_type & IRE_BROADCAST)); 19754 /* 19755 * If we do support loopback for MDT (which requires modifications 19756 * to the receiving paths), the following assertions should go away, 19757 * and we would be sending the Multidata to loopback conn later on. 19758 */ 19759 ASSERT(!IRE_IS_LOCAL(ire)); 19760 ASSERT(ire->ire_stq != NULL); 19761 19762 ill = ire_to_ill(ire); 19763 ASSERT(ill != NULL); 19764 ASSERT(!ILL_MDT_CAPABLE(ill) || ill->ill_mdt_capab != NULL); 19765 19766 if (!tcp->tcp_ire_ill_check_done) { 19767 tcp_ire_ill_check(tcp, ire, ill, B_TRUE); 19768 tcp->tcp_ire_ill_check_done = B_TRUE; 19769 } 19770 19771 /* 19772 * If the underlying interface conditions have changed, or if the 19773 * new interface does not support MDT, go back to legacy path. 19774 */ 19775 if (!ILL_MDT_USABLE(ill) || (ire->ire_flags & RTF_MULTIRT) != 0) { 19776 /* don't go through this path anymore for this connection */ 19777 TCP_STAT(tcps, tcp_mdt_conn_halted2); 19778 tcp->tcp_mdt = B_FALSE; 19779 ip1dbg(("tcp_multisend: disabling MDT for connp %p on " 19780 "interface %s\n", (void *)connp, ill->ill_name)); 19781 /* IRE will be released prior to returning */ 19782 goto legacy_send_no_md; 19783 } 19784 19785 if (ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) 19786 zc_cap = ill->ill_zerocopy_capab; 19787 19788 /* 19789 * Check if we can take tcp fast-path. Note that "incomplete" 19790 * ire's (where the link-layer for next hop is not resolved 19791 * or where the fast-path header in nce_fp_mp is not available 19792 * yet) are sent down the legacy (slow) path. 19793 * NOTE: We should fix ip_xmit_v4 to handle M_MULTIDATA 19794 */ 19795 if (ire->ire_nce && ire->ire_nce->nce_state != ND_REACHABLE) { 19796 /* IRE will be released prior to returning */ 19797 goto legacy_send_no_md; 19798 } 19799 19800 /* go to legacy path if interface doesn't support zerocopy */ 19801 if (tcp->tcp_snd_zcopy_aware && do_tcpzcopy != 2 && 19802 (zc_cap == NULL || zc_cap->ill_zerocopy_flags == 0)) { 19803 /* IRE will be released prior to returning */ 19804 goto legacy_send_no_md; 19805 } 19806 19807 /* does the interface support hardware checksum offload? */ 19808 hwcksum_flags = 0; 19809 if (ILL_HCKSUM_CAPABLE(ill) && 19810 (ill->ill_hcksum_capab->ill_hcksum_txflags & 19811 (HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 | HCKSUM_INET_PARTIAL | 19812 HCKSUM_IPHDRCKSUM)) && dohwcksum) { 19813 if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19814 HCKSUM_IPHDRCKSUM) 19815 hwcksum_flags = HCK_IPV4_HDRCKSUM; 19816 19817 if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19818 (HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6)) 19819 hwcksum_flags |= HCK_FULLCKSUM; 19820 else if (ill->ill_hcksum_capab->ill_hcksum_txflags & 19821 HCKSUM_INET_PARTIAL) 19822 hwcksum_flags |= HCK_PARTIALCKSUM; 19823 } 19824 19825 /* 19826 * Each header fragment consists of the leading extra space, 19827 * followed by the TCP/IP header, and the trailing extra space. 19828 * We make sure that each header fragment begins on a 32-bit 19829 * aligned memory address (tcp_mdt_hdr_head is already 32-bit 19830 * aligned in tcp_mdt_update). 19831 */ 19832 hdr_frag_sz = roundup((tcp->tcp_mdt_hdr_head + tcp_hdr_len + 19833 tcp->tcp_mdt_hdr_tail), 4); 19834 19835 /* are we starting from the beginning of data block? */ 19836 if (*tail_unsent == 0) { 19837 *xmit_tail = (*xmit_tail)->b_cont; 19838 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= (uintptr_t)INT_MAX); 19839 *tail_unsent = (int)MBLKL(*xmit_tail); 19840 } 19841 19842 /* 19843 * Here we create one or more Multidata messages, each made up of 19844 * one header buffer and up to N payload buffers. This entire 19845 * operation is done within two loops: 19846 * 19847 * The outer loop mostly deals with creating the Multidata message, 19848 * as well as the header buffer that gets added to it. It also 19849 * links the Multidata messages together such that all of them can 19850 * be sent down to the lower layer in a single putnext call; this 19851 * linking behavior depends on the tcp_mdt_chain tunable. 19852 * 19853 * The inner loop takes an existing Multidata message, and adds 19854 * one or more (up to tcp_mdt_max_pld) payload buffers to it. It 19855 * packetizes those buffers by filling up the corresponding header 19856 * buffer fragments with the proper IP and TCP headers, and by 19857 * describing the layout of each packet in the packet descriptors 19858 * that get added to the Multidata. 19859 */ 19860 do { 19861 /* 19862 * If usable send window is too small, or data blocks in 19863 * transmit list are smaller than our threshold (i.e. app 19864 * performs large writes followed by small ones), we hand 19865 * off the control over to the legacy path. Note that we'll 19866 * get back the control once it encounters a large block. 19867 */ 19868 if (*usable < mss || (*tail_unsent <= mdt_thres && 19869 (*xmit_tail)->b_cont != NULL && 19870 MBLKL((*xmit_tail)->b_cont) <= mdt_thres)) { 19871 /* send down what we've got so far */ 19872 if (md_mp_head != NULL) { 19873 tcp_multisend_data(tcp, ire, ill, md_mp_head, 19874 obsegs, obbytes, &rconfirm); 19875 } 19876 /* 19877 * Pass control over to tcp_send(), but tell it to 19878 * return to us once a large-size transmission is 19879 * possible. 19880 */ 19881 TCP_STAT(tcps, tcp_mdt_legacy_small); 19882 if ((err = tcp_send(q, tcp, mss, tcp_hdr_len, 19883 tcp_tcp_hdr_len, num_sack_blk, usable, snxt, 19884 tail_unsent, xmit_tail, local_time, 19885 mdt_thres)) <= 0) { 19886 /* burst count reached, or alloc failed */ 19887 IRE_REFRELE(ire); 19888 return (err); 19889 } 19890 19891 /* tcp_send() may have sent everything, so check */ 19892 if (*usable <= 0) { 19893 IRE_REFRELE(ire); 19894 return (0); 19895 } 19896 19897 TCP_STAT(tcps, tcp_mdt_legacy_ret); 19898 /* 19899 * We may have delivered the Multidata, so make sure 19900 * to re-initialize before the next round. 19901 */ 19902 md_mp_head = NULL; 19903 obsegs = obbytes = 0; 19904 num_burst_seg = tcp->tcp_snd_burst; 19905 PREP_NEW_MULTIDATA(); 19906 19907 /* are we starting from the beginning of data block? */ 19908 if (*tail_unsent == 0) { 19909 *xmit_tail = (*xmit_tail)->b_cont; 19910 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 19911 (uintptr_t)INT_MAX); 19912 *tail_unsent = (int)MBLKL(*xmit_tail); 19913 } 19914 } 19915 19916 /* 19917 * max_pld limits the number of mblks in tcp's transmit 19918 * queue that can be added to a Multidata message. Once 19919 * this counter reaches zero, no more additional mblks 19920 * can be added to it. What happens afterwards depends 19921 * on whether or not we are set to chain the Multidata 19922 * messages. If we are to link them together, reset 19923 * max_pld to its original value (tcp_mdt_max_pld) and 19924 * prepare to create a new Multidata message which will 19925 * get linked to md_mp_head. Else, leave it alone and 19926 * let the inner loop break on its own. 19927 */ 19928 if (tcp_mdt_chain && max_pld == 0) 19929 PREP_NEW_MULTIDATA(); 19930 19931 /* adding a payload buffer; re-initialize values */ 19932 if (add_buffer) 19933 PREP_NEW_PBUF(); 19934 19935 /* 19936 * If we don't have a Multidata, either because we just 19937 * (re)entered this outer loop, or after we branched off 19938 * to tcp_send above, setup the Multidata and header 19939 * buffer to be used. 19940 */ 19941 if (md_mp == NULL) { 19942 int md_hbuflen; 19943 uint32_t start, stuff; 19944 19945 /* 19946 * Calculate Multidata header buffer size large enough 19947 * to hold all of the headers that can possibly be 19948 * sent at this moment. We'd rather over-estimate 19949 * the size than running out of space; this is okay 19950 * since this buffer is small anyway. 19951 */ 19952 md_hbuflen = (howmany(*usable, mss) + 1) * hdr_frag_sz; 19953 19954 /* 19955 * Start and stuff offset for partial hardware 19956 * checksum offload; these are currently for IPv4. 19957 * For full checksum offload, they are set to zero. 19958 */ 19959 if ((hwcksum_flags & HCK_PARTIALCKSUM)) { 19960 if (af == AF_INET) { 19961 start = IP_SIMPLE_HDR_LENGTH; 19962 stuff = IP_SIMPLE_HDR_LENGTH + 19963 TCP_CHECKSUM_OFFSET; 19964 } else { 19965 start = IPV6_HDR_LEN; 19966 stuff = IPV6_HDR_LEN + 19967 TCP_CHECKSUM_OFFSET; 19968 } 19969 } else { 19970 start = stuff = 0; 19971 } 19972 19973 /* 19974 * Create the header buffer, Multidata, as well as 19975 * any necessary attributes (destination address, 19976 * SAP and hardware checksum offload) that should 19977 * be associated with the Multidata message. 19978 */ 19979 ASSERT(cur_hdr_off == 0); 19980 if ((md_hbuf = allocb(md_hbuflen, BPRI_HI)) == NULL || 19981 ((md_hbuf->b_wptr += md_hbuflen), 19982 (mmd = mmd_alloc(md_hbuf, &md_mp, 19983 KM_NOSLEEP)) == NULL) || (tcp_mdt_add_attrs(mmd, 19984 /* fastpath mblk */ 19985 ire->ire_nce->nce_res_mp, 19986 /* hardware checksum enabled */ 19987 (hwcksum_flags & (HCK_FULLCKSUM|HCK_PARTIALCKSUM)), 19988 /* hardware checksum offsets */ 19989 start, stuff, 0, 19990 /* hardware checksum flag */ 19991 hwcksum_flags, tcps) != 0)) { 19992 legacy_send: 19993 if (md_mp != NULL) { 19994 /* Unlink message from the chain */ 19995 if (md_mp_head != NULL) { 19996 err = (intptr_t)rmvb(md_mp_head, 19997 md_mp); 19998 /* 19999 * We can't assert that rmvb 20000 * did not return -1, since we 20001 * may get here before linkb 20002 * happens. We do, however, 20003 * check if we just removed the 20004 * only element in the list. 20005 */ 20006 if (err == 0) 20007 md_mp_head = NULL; 20008 } 20009 /* md_hbuf gets freed automatically */ 20010 TCP_STAT(tcps, tcp_mdt_discarded); 20011 freeb(md_mp); 20012 } else { 20013 /* Either allocb or mmd_alloc failed */ 20014 TCP_STAT(tcps, tcp_mdt_allocfail); 20015 if (md_hbuf != NULL) 20016 freeb(md_hbuf); 20017 } 20018 20019 /* send down what we've got so far */ 20020 if (md_mp_head != NULL) { 20021 tcp_multisend_data(tcp, ire, ill, 20022 md_mp_head, obsegs, obbytes, 20023 &rconfirm); 20024 } 20025 legacy_send_no_md: 20026 if (ire != NULL) 20027 IRE_REFRELE(ire); 20028 /* 20029 * Too bad; let the legacy path handle this. 20030 * We specify INT_MAX for the threshold, since 20031 * we gave up with the Multidata processings 20032 * and let the old path have it all. 20033 */ 20034 TCP_STAT(tcps, tcp_mdt_legacy_all); 20035 return (tcp_send(q, tcp, mss, tcp_hdr_len, 20036 tcp_tcp_hdr_len, num_sack_blk, usable, 20037 snxt, tail_unsent, xmit_tail, local_time, 20038 INT_MAX)); 20039 } 20040 20041 /* link to any existing ones, if applicable */ 20042 TCP_STAT(tcps, tcp_mdt_allocd); 20043 if (md_mp_head == NULL) { 20044 md_mp_head = md_mp; 20045 } else if (tcp_mdt_chain) { 20046 TCP_STAT(tcps, tcp_mdt_linked); 20047 linkb(md_mp_head, md_mp); 20048 } 20049 } 20050 20051 ASSERT(md_mp_head != NULL); 20052 ASSERT(tcp_mdt_chain || md_mp_head->b_cont == NULL); 20053 ASSERT(md_mp != NULL && mmd != NULL); 20054 ASSERT(md_hbuf != NULL); 20055 20056 /* 20057 * Packetize the transmittable portion of the data block; 20058 * each data block is essentially added to the Multidata 20059 * as a payload buffer. We also deal with adding more 20060 * than one payload buffers, which happens when the remaining 20061 * packetized portion of the current payload buffer is less 20062 * than MSS, while the next data block in transmit queue 20063 * has enough data to make up for one. This "spillover" 20064 * case essentially creates a split-packet, where portions 20065 * of the packet's payload fragments may span across two 20066 * virtually discontiguous address blocks. 20067 */ 20068 seg_len = mss; 20069 do { 20070 len = seg_len; 20071 20072 ASSERT(len > 0); 20073 ASSERT(max_pld >= 0); 20074 ASSERT(!add_buffer || cur_pld_off == 0); 20075 20076 /* 20077 * First time around for this payload buffer; note 20078 * in the case of a spillover, the following has 20079 * been done prior to adding the split-packet 20080 * descriptor to Multidata, and we don't want to 20081 * repeat the process. 20082 */ 20083 if (add_buffer) { 20084 ASSERT(mmd != NULL); 20085 ASSERT(md_pbuf == NULL); 20086 ASSERT(md_pbuf_nxt == NULL); 20087 ASSERT(pbuf_idx == -1 && pbuf_idx_nxt == -1); 20088 20089 /* 20090 * Have we reached the limit? We'd get to 20091 * this case when we're not chaining the 20092 * Multidata messages together, and since 20093 * we're done, terminate this loop. 20094 */ 20095 if (max_pld == 0) 20096 break; /* done */ 20097 20098 if ((md_pbuf = dupb(*xmit_tail)) == NULL) { 20099 TCP_STAT(tcps, tcp_mdt_allocfail); 20100 goto legacy_send; /* out_of_mem */ 20101 } 20102 20103 if (IS_VMLOANED_MBLK(md_pbuf) && !zcopy && 20104 zc_cap != NULL) { 20105 if (!ip_md_zcopy_attr(mmd, NULL, 20106 zc_cap->ill_zerocopy_flags)) { 20107 freeb(md_pbuf); 20108 TCP_STAT(tcps, 20109 tcp_mdt_allocfail); 20110 /* out_of_mem */ 20111 goto legacy_send; 20112 } 20113 zcopy = B_TRUE; 20114 } 20115 20116 md_pbuf->b_rptr += base_pld_off; 20117 20118 /* 20119 * Add a payload buffer to the Multidata; this 20120 * operation must not fail, or otherwise our 20121 * logic in this routine is broken. There 20122 * is no memory allocation done by the 20123 * routine, so any returned failure simply 20124 * tells us that we've done something wrong. 20125 * 20126 * A failure tells us that either we're adding 20127 * the same payload buffer more than once, or 20128 * we're trying to add more buffers than 20129 * allowed (max_pld calculation is wrong). 20130 * None of the above cases should happen, and 20131 * we panic because either there's horrible 20132 * heap corruption, and/or programming mistake. 20133 */ 20134 pbuf_idx = mmd_addpldbuf(mmd, md_pbuf); 20135 if (pbuf_idx < 0) { 20136 cmn_err(CE_PANIC, "tcp_multisend: " 20137 "payload buffer logic error " 20138 "detected for tcp %p mmd %p " 20139 "pbuf %p (%d)\n", 20140 (void *)tcp, (void *)mmd, 20141 (void *)md_pbuf, pbuf_idx); 20142 } 20143 20144 ASSERT(max_pld > 0); 20145 --max_pld; 20146 add_buffer = B_FALSE; 20147 } 20148 20149 ASSERT(md_mp_head != NULL); 20150 ASSERT(md_pbuf != NULL); 20151 ASSERT(md_pbuf_nxt == NULL); 20152 ASSERT(pbuf_idx != -1); 20153 ASSERT(pbuf_idx_nxt == -1); 20154 ASSERT(*usable > 0); 20155 20156 /* 20157 * We spillover to the next payload buffer only 20158 * if all of the following is true: 20159 * 20160 * 1. There is not enough data on the current 20161 * payload buffer to make up `len', 20162 * 2. We are allowed to send `len', 20163 * 3. The next payload buffer length is large 20164 * enough to accomodate `spill'. 20165 */ 20166 if ((spill = len - *tail_unsent) > 0 && 20167 *usable >= len && 20168 MBLKL((*xmit_tail)->b_cont) >= spill && 20169 max_pld > 0) { 20170 md_pbuf_nxt = dupb((*xmit_tail)->b_cont); 20171 if (md_pbuf_nxt == NULL) { 20172 TCP_STAT(tcps, tcp_mdt_allocfail); 20173 goto legacy_send; /* out_of_mem */ 20174 } 20175 20176 if (IS_VMLOANED_MBLK(md_pbuf_nxt) && !zcopy && 20177 zc_cap != NULL) { 20178 if (!ip_md_zcopy_attr(mmd, NULL, 20179 zc_cap->ill_zerocopy_flags)) { 20180 freeb(md_pbuf_nxt); 20181 TCP_STAT(tcps, 20182 tcp_mdt_allocfail); 20183 /* out_of_mem */ 20184 goto legacy_send; 20185 } 20186 zcopy = B_TRUE; 20187 } 20188 20189 /* 20190 * See comments above on the first call to 20191 * mmd_addpldbuf for explanation on the panic. 20192 */ 20193 pbuf_idx_nxt = mmd_addpldbuf(mmd, md_pbuf_nxt); 20194 if (pbuf_idx_nxt < 0) { 20195 panic("tcp_multisend: " 20196 "next payload buffer logic error " 20197 "detected for tcp %p mmd %p " 20198 "pbuf %p (%d)\n", 20199 (void *)tcp, (void *)mmd, 20200 (void *)md_pbuf_nxt, pbuf_idx_nxt); 20201 } 20202 20203 ASSERT(max_pld > 0); 20204 --max_pld; 20205 } else if (spill > 0) { 20206 /* 20207 * If there's a spillover, but the following 20208 * xmit_tail couldn't give us enough octets 20209 * to reach "len", then stop the current 20210 * Multidata creation and let the legacy 20211 * tcp_send() path take over. We don't want 20212 * to send the tiny segment as part of this 20213 * Multidata for performance reasons; instead, 20214 * we let the legacy path deal with grouping 20215 * it with the subsequent small mblks. 20216 */ 20217 if (*usable >= len && 20218 MBLKL((*xmit_tail)->b_cont) < spill) { 20219 max_pld = 0; 20220 break; /* done */ 20221 } 20222 20223 /* 20224 * We can't spillover, and we are near 20225 * the end of the current payload buffer, 20226 * so send what's left. 20227 */ 20228 ASSERT(*tail_unsent > 0); 20229 len = *tail_unsent; 20230 } 20231 20232 /* tail_unsent is negated if there is a spillover */ 20233 *tail_unsent -= len; 20234 *usable -= len; 20235 ASSERT(*usable >= 0); 20236 20237 if (*usable < mss) 20238 seg_len = *usable; 20239 /* 20240 * Sender SWS avoidance; see comments in tcp_send(); 20241 * everything else is the same, except that we only 20242 * do this here if there is no more data to be sent 20243 * following the current xmit_tail. We don't check 20244 * for 1-byte urgent data because we shouldn't get 20245 * here if TCP_URG_VALID is set. 20246 */ 20247 if (*usable > 0 && *usable < mss && 20248 ((md_pbuf_nxt == NULL && 20249 (*xmit_tail)->b_cont == NULL) || 20250 (md_pbuf_nxt != NULL && 20251 (*xmit_tail)->b_cont->b_cont == NULL)) && 20252 seg_len < (tcp->tcp_max_swnd >> 1) && 20253 (tcp->tcp_unsent - 20254 ((*snxt + len) - tcp->tcp_snxt)) > seg_len && 20255 !tcp->tcp_zero_win_probe) { 20256 if ((*snxt + len) == tcp->tcp_snxt && 20257 (*snxt + len) == tcp->tcp_suna) { 20258 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 20259 } 20260 done = B_TRUE; 20261 } 20262 20263 /* 20264 * Prime pump for IP's checksumming on our behalf; 20265 * include the adjustment for a source route if any. 20266 * Do this only for software/partial hardware checksum 20267 * offload, as this field gets zeroed out later for 20268 * the full hardware checksum offload case. 20269 */ 20270 if (!(hwcksum_flags & HCK_FULLCKSUM)) { 20271 cksum = len + tcp_tcp_hdr_len + tcp->tcp_sum; 20272 cksum = (cksum >> 16) + (cksum & 0xFFFF); 20273 U16_TO_ABE16(cksum, tcp->tcp_tcph->th_sum); 20274 } 20275 20276 U32_TO_ABE32(*snxt, tcp->tcp_tcph->th_seq); 20277 *snxt += len; 20278 20279 tcp->tcp_tcph->th_flags[0] = TH_ACK; 20280 /* 20281 * We set the PUSH bit only if TCP has no more buffered 20282 * data to be transmitted (or if sender SWS avoidance 20283 * takes place), as opposed to setting it for every 20284 * last packet in the burst. 20285 */ 20286 if (done || 20287 (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) == 0) 20288 tcp->tcp_tcph->th_flags[0] |= TH_PUSH; 20289 20290 /* 20291 * Set FIN bit if this is our last segment; snxt 20292 * already includes its length, and it will not 20293 * be adjusted after this point. 20294 */ 20295 if (tcp->tcp_valid_bits == TCP_FSS_VALID && 20296 *snxt == tcp->tcp_fss) { 20297 if (!tcp->tcp_fin_acked) { 20298 tcp->tcp_tcph->th_flags[0] |= TH_FIN; 20299 BUMP_MIB(&tcps->tcps_mib, 20300 tcpOutControl); 20301 } 20302 if (!tcp->tcp_fin_sent) { 20303 tcp->tcp_fin_sent = B_TRUE; 20304 /* 20305 * tcp state must be ESTABLISHED 20306 * in order for us to get here in 20307 * the first place. 20308 */ 20309 tcp->tcp_state = TCPS_FIN_WAIT_1; 20310 20311 /* 20312 * Upon returning from this routine, 20313 * tcp_wput_data() will set tcp_snxt 20314 * to be equal to snxt + tcp_fin_sent. 20315 * This is essentially the same as 20316 * setting it to tcp_fss + 1. 20317 */ 20318 } 20319 } 20320 20321 tcp->tcp_last_sent_len = (ushort_t)len; 20322 20323 len += tcp_hdr_len; 20324 if (tcp->tcp_ipversion == IPV4_VERSION) 20325 tcp->tcp_ipha->ipha_length = htons(len); 20326 else 20327 tcp->tcp_ip6h->ip6_plen = htons(len - 20328 ((char *)&tcp->tcp_ip6h[1] - 20329 tcp->tcp_iphc)); 20330 20331 pkt_info->flags = (PDESC_HBUF_REF | PDESC_PBUF_REF); 20332 20333 /* setup header fragment */ 20334 PDESC_HDR_ADD(pkt_info, 20335 md_hbuf->b_rptr + cur_hdr_off, /* base */ 20336 tcp->tcp_mdt_hdr_head, /* head room */ 20337 tcp_hdr_len, /* len */ 20338 tcp->tcp_mdt_hdr_tail); /* tail room */ 20339 20340 ASSERT(pkt_info->hdr_lim - pkt_info->hdr_base == 20341 hdr_frag_sz); 20342 ASSERT(MBLKIN(md_hbuf, 20343 (pkt_info->hdr_base - md_hbuf->b_rptr), 20344 PDESC_HDRSIZE(pkt_info))); 20345 20346 /* setup first payload fragment */ 20347 PDESC_PLD_INIT(pkt_info); 20348 PDESC_PLD_SPAN_ADD(pkt_info, 20349 pbuf_idx, /* index */ 20350 md_pbuf->b_rptr + cur_pld_off, /* start */ 20351 tcp->tcp_last_sent_len); /* len */ 20352 20353 /* create a split-packet in case of a spillover */ 20354 if (md_pbuf_nxt != NULL) { 20355 ASSERT(spill > 0); 20356 ASSERT(pbuf_idx_nxt > pbuf_idx); 20357 ASSERT(!add_buffer); 20358 20359 md_pbuf = md_pbuf_nxt; 20360 md_pbuf_nxt = NULL; 20361 pbuf_idx = pbuf_idx_nxt; 20362 pbuf_idx_nxt = -1; 20363 cur_pld_off = spill; 20364 20365 /* trim out first payload fragment */ 20366 PDESC_PLD_SPAN_TRIM(pkt_info, 0, spill); 20367 20368 /* setup second payload fragment */ 20369 PDESC_PLD_SPAN_ADD(pkt_info, 20370 pbuf_idx, /* index */ 20371 md_pbuf->b_rptr, /* start */ 20372 spill); /* len */ 20373 20374 if ((*xmit_tail)->b_next == NULL) { 20375 /* 20376 * Store the lbolt used for RTT 20377 * estimation. We can only record one 20378 * timestamp per mblk so we do it when 20379 * we reach the end of the payload 20380 * buffer. Also we only take a new 20381 * timestamp sample when the previous 20382 * timed data from the same mblk has 20383 * been ack'ed. 20384 */ 20385 (*xmit_tail)->b_prev = local_time; 20386 (*xmit_tail)->b_next = 20387 (mblk_t *)(uintptr_t)first_snxt; 20388 } 20389 20390 first_snxt = *snxt - spill; 20391 20392 /* 20393 * Advance xmit_tail; usable could be 0 by 20394 * the time we got here, but we made sure 20395 * above that we would only spillover to 20396 * the next data block if usable includes 20397 * the spilled-over amount prior to the 20398 * subtraction. Therefore, we are sure 20399 * that xmit_tail->b_cont can't be NULL. 20400 */ 20401 ASSERT((*xmit_tail)->b_cont != NULL); 20402 *xmit_tail = (*xmit_tail)->b_cont; 20403 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 20404 (uintptr_t)INT_MAX); 20405 *tail_unsent = (int)MBLKL(*xmit_tail) - spill; 20406 } else { 20407 cur_pld_off += tcp->tcp_last_sent_len; 20408 } 20409 20410 /* 20411 * Fill in the header using the template header, and 20412 * add options such as time-stamp, ECN and/or SACK, 20413 * as needed. 20414 */ 20415 tcp_fill_header(tcp, pkt_info->hdr_rptr, 20416 (clock_t)local_time, num_sack_blk); 20417 20418 /* take care of some IP header businesses */ 20419 if (af == AF_INET) { 20420 ipha = (ipha_t *)pkt_info->hdr_rptr; 20421 20422 ASSERT(OK_32PTR((uchar_t *)ipha)); 20423 ASSERT(PDESC_HDRL(pkt_info) >= 20424 IP_SIMPLE_HDR_LENGTH); 20425 ASSERT(ipha->ipha_version_and_hdr_length == 20426 IP_SIMPLE_HDR_VERSION); 20427 20428 /* 20429 * Assign ident value for current packet; see 20430 * related comments in ip_wput_ire() about the 20431 * contract private interface with clustering 20432 * group. 20433 */ 20434 clusterwide = B_FALSE; 20435 if (cl_inet_ipident != NULL) { 20436 ASSERT(cl_inet_isclusterwide != NULL); 20437 if ((*cl_inet_isclusterwide)(IPPROTO_IP, 20438 AF_INET, 20439 (uint8_t *)(uintptr_t)src)) { 20440 ipha->ipha_ident = 20441 (*cl_inet_ipident) 20442 (IPPROTO_IP, AF_INET, 20443 (uint8_t *)(uintptr_t)src, 20444 (uint8_t *)(uintptr_t)dst); 20445 clusterwide = B_TRUE; 20446 } 20447 } 20448 20449 if (!clusterwide) { 20450 ipha->ipha_ident = (uint16_t) 20451 atomic_add_32_nv( 20452 &ire->ire_ident, 1); 20453 } 20454 #ifndef _BIG_ENDIAN 20455 ipha->ipha_ident = (ipha->ipha_ident << 8) | 20456 (ipha->ipha_ident >> 8); 20457 #endif 20458 } else { 20459 ip6h = (ip6_t *)pkt_info->hdr_rptr; 20460 20461 ASSERT(OK_32PTR((uchar_t *)ip6h)); 20462 ASSERT(IPVER(ip6h) == IPV6_VERSION); 20463 ASSERT(ip6h->ip6_nxt == IPPROTO_TCP); 20464 ASSERT(PDESC_HDRL(pkt_info) >= 20465 (IPV6_HDR_LEN + TCP_CHECKSUM_OFFSET + 20466 TCP_CHECKSUM_SIZE)); 20467 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 20468 20469 if (tcp->tcp_ip_forward_progress) { 20470 rconfirm = B_TRUE; 20471 tcp->tcp_ip_forward_progress = B_FALSE; 20472 } 20473 } 20474 20475 /* at least one payload span, and at most two */ 20476 ASSERT(pkt_info->pld_cnt > 0 && pkt_info->pld_cnt < 3); 20477 20478 /* add the packet descriptor to Multidata */ 20479 if ((pkt = mmd_addpdesc(mmd, pkt_info, &err, 20480 KM_NOSLEEP)) == NULL) { 20481 /* 20482 * Any failure other than ENOMEM indicates 20483 * that we have passed in invalid pkt_info 20484 * or parameters to mmd_addpdesc, which must 20485 * not happen. 20486 * 20487 * EINVAL is a result of failure on boundary 20488 * checks against the pkt_info contents. It 20489 * should not happen, and we panic because 20490 * either there's horrible heap corruption, 20491 * and/or programming mistake. 20492 */ 20493 if (err != ENOMEM) { 20494 cmn_err(CE_PANIC, "tcp_multisend: " 20495 "pdesc logic error detected for " 20496 "tcp %p mmd %p pinfo %p (%d)\n", 20497 (void *)tcp, (void *)mmd, 20498 (void *)pkt_info, err); 20499 } 20500 TCP_STAT(tcps, tcp_mdt_addpdescfail); 20501 goto legacy_send; /* out_of_mem */ 20502 } 20503 ASSERT(pkt != NULL); 20504 20505 /* calculate IP header and TCP checksums */ 20506 if (af == AF_INET) { 20507 /* calculate pseudo-header checksum */ 20508 cksum = (dst >> 16) + (dst & 0xFFFF) + 20509 (src >> 16) + (src & 0xFFFF); 20510 20511 /* offset for TCP header checksum */ 20512 up = IPH_TCPH_CHECKSUMP(ipha, 20513 IP_SIMPLE_HDR_LENGTH); 20514 } else { 20515 up = (uint16_t *)&ip6h->ip6_src; 20516 20517 /* calculate pseudo-header checksum */ 20518 cksum = up[0] + up[1] + up[2] + up[3] + 20519 up[4] + up[5] + up[6] + up[7] + 20520 up[8] + up[9] + up[10] + up[11] + 20521 up[12] + up[13] + up[14] + up[15]; 20522 20523 /* Fold the initial sum */ 20524 cksum = (cksum & 0xffff) + (cksum >> 16); 20525 20526 up = (uint16_t *)(((uchar_t *)ip6h) + 20527 IPV6_HDR_LEN + TCP_CHECKSUM_OFFSET); 20528 } 20529 20530 if (hwcksum_flags & HCK_FULLCKSUM) { 20531 /* clear checksum field for hardware */ 20532 *up = 0; 20533 } else if (hwcksum_flags & HCK_PARTIALCKSUM) { 20534 uint32_t sum; 20535 20536 /* pseudo-header checksumming */ 20537 sum = *up + cksum + IP_TCP_CSUM_COMP; 20538 sum = (sum & 0xFFFF) + (sum >> 16); 20539 *up = (sum & 0xFFFF) + (sum >> 16); 20540 } else { 20541 /* software checksumming */ 20542 TCP_STAT(tcps, tcp_out_sw_cksum); 20543 TCP_STAT_UPDATE(tcps, tcp_out_sw_cksum_bytes, 20544 tcp->tcp_hdr_len + tcp->tcp_last_sent_len); 20545 *up = IP_MD_CSUM(pkt, tcp->tcp_ip_hdr_len, 20546 cksum + IP_TCP_CSUM_COMP); 20547 if (*up == 0) 20548 *up = 0xFFFF; 20549 } 20550 20551 /* IPv4 header checksum */ 20552 if (af == AF_INET) { 20553 ipha->ipha_fragment_offset_and_flags |= 20554 (uint32_t)htons(ire->ire_frag_flag); 20555 20556 if (hwcksum_flags & HCK_IPV4_HDRCKSUM) { 20557 ipha->ipha_hdr_checksum = 0; 20558 } else { 20559 IP_HDR_CKSUM(ipha, cksum, 20560 ((uint32_t *)ipha)[0], 20561 ((uint16_t *)ipha)[4]); 20562 } 20563 } 20564 20565 if (af == AF_INET && 20566 HOOKS4_INTERESTED_PHYSICAL_OUT(ipst) || 20567 af == AF_INET6 && 20568 HOOKS6_INTERESTED_PHYSICAL_OUT(ipst)) { 20569 /* build header(IP/TCP) mblk for this segment */ 20570 if ((mp = dupb(md_hbuf)) == NULL) 20571 goto legacy_send; 20572 20573 mp->b_rptr = pkt_info->hdr_rptr; 20574 mp->b_wptr = pkt_info->hdr_wptr; 20575 20576 /* build payload mblk for this segment */ 20577 if ((mp1 = dupb(*xmit_tail)) == NULL) { 20578 freemsg(mp); 20579 goto legacy_send; 20580 } 20581 mp1->b_wptr = md_pbuf->b_rptr + cur_pld_off; 20582 mp1->b_rptr = mp1->b_wptr - 20583 tcp->tcp_last_sent_len; 20584 linkb(mp, mp1); 20585 20586 pld_start = mp1->b_rptr; 20587 20588 if (af == AF_INET) { 20589 DTRACE_PROBE4( 20590 ip4__physical__out__start, 20591 ill_t *, NULL, 20592 ill_t *, ill, 20593 ipha_t *, ipha, 20594 mblk_t *, mp); 20595 FW_HOOKS( 20596 ipst->ips_ip4_physical_out_event, 20597 ipst->ips_ipv4firewall_physical_out, 20598 NULL, ill, ipha, mp, mp, ipst); 20599 DTRACE_PROBE1( 20600 ip4__physical__out__end, 20601 mblk_t *, mp); 20602 } else { 20603 DTRACE_PROBE4( 20604 ip6__physical__out_start, 20605 ill_t *, NULL, 20606 ill_t *, ill, 20607 ip6_t *, ip6h, 20608 mblk_t *, mp); 20609 FW_HOOKS6( 20610 ipst->ips_ip6_physical_out_event, 20611 ipst->ips_ipv6firewall_physical_out, 20612 NULL, ill, ip6h, mp, mp, ipst); 20613 DTRACE_PROBE1( 20614 ip6__physical__out__end, 20615 mblk_t *, mp); 20616 } 20617 20618 if (buf_trunked && mp != NULL) { 20619 /* 20620 * Need to pass it to normal path. 20621 */ 20622 CALL_IP_WPUT(tcp->tcp_connp, q, mp); 20623 } else if (mp == NULL || 20624 mp->b_rptr != pkt_info->hdr_rptr || 20625 mp->b_wptr != pkt_info->hdr_wptr || 20626 (mp1 = mp->b_cont) == NULL || 20627 mp1->b_rptr != pld_start || 20628 mp1->b_wptr != pld_start + 20629 tcp->tcp_last_sent_len || 20630 mp1->b_cont != NULL) { 20631 /* 20632 * Need to pass all packets of this 20633 * buffer to normal path, either when 20634 * packet is blocked, or when boundary 20635 * of header buffer or payload buffer 20636 * has been changed by FW_HOOKS[6]. 20637 */ 20638 buf_trunked = B_TRUE; 20639 if (md_mp_head != NULL) { 20640 err = (intptr_t)rmvb(md_mp_head, 20641 md_mp); 20642 if (err == 0) 20643 md_mp_head = NULL; 20644 } 20645 20646 /* send down what we've got so far */ 20647 if (md_mp_head != NULL) { 20648 tcp_multisend_data(tcp, ire, 20649 ill, md_mp_head, obsegs, 20650 obbytes, &rconfirm); 20651 } 20652 md_mp_head = NULL; 20653 20654 if (mp != NULL) 20655 CALL_IP_WPUT(tcp->tcp_connp, 20656 q, mp); 20657 20658 mp1 = fw_mp_head; 20659 do { 20660 mp = mp1; 20661 mp1 = mp1->b_next; 20662 mp->b_next = NULL; 20663 mp->b_prev = NULL; 20664 CALL_IP_WPUT(tcp->tcp_connp, 20665 q, mp); 20666 } while (mp1 != NULL); 20667 20668 fw_mp_head = NULL; 20669 } else { 20670 if (fw_mp_head == NULL) 20671 fw_mp_head = mp; 20672 else 20673 fw_mp_head->b_prev->b_next = mp; 20674 fw_mp_head->b_prev = mp; 20675 } 20676 } 20677 20678 /* advance header offset */ 20679 cur_hdr_off += hdr_frag_sz; 20680 20681 obbytes += tcp->tcp_last_sent_len; 20682 ++obsegs; 20683 } while (!done && *usable > 0 && --num_burst_seg > 0 && 20684 *tail_unsent > 0); 20685 20686 if ((*xmit_tail)->b_next == NULL) { 20687 /* 20688 * Store the lbolt used for RTT estimation. We can only 20689 * record one timestamp per mblk so we do it when we 20690 * reach the end of the payload buffer. Also we only 20691 * take a new timestamp sample when the previous timed 20692 * data from the same mblk has been ack'ed. 20693 */ 20694 (*xmit_tail)->b_prev = local_time; 20695 (*xmit_tail)->b_next = (mblk_t *)(uintptr_t)first_snxt; 20696 } 20697 20698 ASSERT(*tail_unsent >= 0); 20699 if (*tail_unsent > 0) { 20700 /* 20701 * We got here because we broke out of the above 20702 * loop due to of one of the following cases: 20703 * 20704 * 1. len < adjusted MSS (i.e. small), 20705 * 2. Sender SWS avoidance, 20706 * 3. max_pld is zero. 20707 * 20708 * We are done for this Multidata, so trim our 20709 * last payload buffer (if any) accordingly. 20710 */ 20711 if (md_pbuf != NULL) 20712 md_pbuf->b_wptr -= *tail_unsent; 20713 } else if (*usable > 0) { 20714 *xmit_tail = (*xmit_tail)->b_cont; 20715 ASSERT((uintptr_t)MBLKL(*xmit_tail) <= 20716 (uintptr_t)INT_MAX); 20717 *tail_unsent = (int)MBLKL(*xmit_tail); 20718 add_buffer = B_TRUE; 20719 } 20720 20721 while (fw_mp_head) { 20722 mp = fw_mp_head; 20723 fw_mp_head = fw_mp_head->b_next; 20724 mp->b_prev = mp->b_next = NULL; 20725 freemsg(mp); 20726 } 20727 if (buf_trunked) { 20728 TCP_STAT(tcps, tcp_mdt_discarded); 20729 freeb(md_mp); 20730 buf_trunked = B_FALSE; 20731 } 20732 } while (!done && *usable > 0 && num_burst_seg > 0 && 20733 (tcp_mdt_chain || max_pld > 0)); 20734 20735 if (md_mp_head != NULL) { 20736 /* send everything down */ 20737 tcp_multisend_data(tcp, ire, ill, md_mp_head, obsegs, obbytes, 20738 &rconfirm); 20739 } 20740 20741 #undef PREP_NEW_MULTIDATA 20742 #undef PREP_NEW_PBUF 20743 #undef IPVER 20744 20745 IRE_REFRELE(ire); 20746 return (0); 20747 } 20748 20749 /* 20750 * A wrapper function for sending one or more Multidata messages down to 20751 * the module below ip; this routine does not release the reference of the 20752 * IRE (caller does that). This routine is analogous to tcp_send_data(). 20753 */ 20754 static void 20755 tcp_multisend_data(tcp_t *tcp, ire_t *ire, const ill_t *ill, mblk_t *md_mp_head, 20756 const uint_t obsegs, const uint_t obbytes, boolean_t *rconfirm) 20757 { 20758 uint64_t delta; 20759 nce_t *nce; 20760 tcp_stack_t *tcps = tcp->tcp_tcps; 20761 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 20762 20763 ASSERT(ire != NULL && ill != NULL); 20764 ASSERT(ire->ire_stq != NULL); 20765 ASSERT(md_mp_head != NULL); 20766 ASSERT(rconfirm != NULL); 20767 20768 /* adjust MIBs and IRE timestamp */ 20769 TCP_RECORD_TRACE(tcp, md_mp_head, TCP_TRACE_SEND_PKT); 20770 tcp->tcp_obsegs += obsegs; 20771 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataSegs, obsegs); 20772 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, obbytes); 20773 TCP_STAT_UPDATE(tcps, tcp_mdt_pkt_out, obsegs); 20774 20775 if (tcp->tcp_ipversion == IPV4_VERSION) { 20776 TCP_STAT_UPDATE(tcps, tcp_mdt_pkt_out_v4, obsegs); 20777 } else { 20778 TCP_STAT_UPDATE(tcps, tcp_mdt_pkt_out_v6, obsegs); 20779 } 20780 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutRequests, obsegs); 20781 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits, obsegs); 20782 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets, obbytes); 20783 20784 ire->ire_ob_pkt_count += obsegs; 20785 if (ire->ire_ipif != NULL) 20786 atomic_add_32(&ire->ire_ipif->ipif_ob_pkt_count, obsegs); 20787 ire->ire_last_used_time = lbolt; 20788 20789 /* send it down */ 20790 putnext(ire->ire_stq, md_mp_head); 20791 20792 /* we're done for TCP/IPv4 */ 20793 if (tcp->tcp_ipversion == IPV4_VERSION) 20794 return; 20795 20796 nce = ire->ire_nce; 20797 20798 ASSERT(nce != NULL); 20799 ASSERT(!(nce->nce_flags & (NCE_F_NONUD|NCE_F_PERMANENT))); 20800 ASSERT(nce->nce_state != ND_INCOMPLETE); 20801 20802 /* reachability confirmation? */ 20803 if (*rconfirm) { 20804 nce->nce_last = TICK_TO_MSEC(lbolt64); 20805 if (nce->nce_state != ND_REACHABLE) { 20806 mutex_enter(&nce->nce_lock); 20807 nce->nce_state = ND_REACHABLE; 20808 nce->nce_pcnt = ND_MAX_UNICAST_SOLICIT; 20809 mutex_exit(&nce->nce_lock); 20810 (void) untimeout(nce->nce_timeout_id); 20811 if (ip_debug > 2) { 20812 /* ip1dbg */ 20813 pr_addr_dbg("tcp_multisend_data: state " 20814 "for %s changed to REACHABLE\n", 20815 AF_INET6, &ire->ire_addr_v6); 20816 } 20817 } 20818 /* reset transport reachability confirmation */ 20819 *rconfirm = B_FALSE; 20820 } 20821 20822 delta = TICK_TO_MSEC(lbolt64) - nce->nce_last; 20823 ip1dbg(("tcp_multisend_data: delta = %" PRId64 20824 " ill_reachable_time = %d \n", delta, ill->ill_reachable_time)); 20825 20826 if (delta > (uint64_t)ill->ill_reachable_time) { 20827 mutex_enter(&nce->nce_lock); 20828 switch (nce->nce_state) { 20829 case ND_REACHABLE: 20830 case ND_STALE: 20831 /* 20832 * ND_REACHABLE is identical to ND_STALE in this 20833 * specific case. If reachable time has expired for 20834 * this neighbor (delta is greater than reachable 20835 * time), conceptually, the neighbor cache is no 20836 * longer in REACHABLE state, but already in STALE 20837 * state. So the correct transition here is to 20838 * ND_DELAY. 20839 */ 20840 nce->nce_state = ND_DELAY; 20841 mutex_exit(&nce->nce_lock); 20842 NDP_RESTART_TIMER(nce, 20843 ipst->ips_delay_first_probe_time); 20844 if (ip_debug > 3) { 20845 /* ip2dbg */ 20846 pr_addr_dbg("tcp_multisend_data: state " 20847 "for %s changed to DELAY\n", 20848 AF_INET6, &ire->ire_addr_v6); 20849 } 20850 break; 20851 case ND_DELAY: 20852 case ND_PROBE: 20853 mutex_exit(&nce->nce_lock); 20854 /* Timers have already started */ 20855 break; 20856 case ND_UNREACHABLE: 20857 /* 20858 * ndp timer has detected that this nce is 20859 * unreachable and initiated deleting this nce 20860 * and all its associated IREs. This is a race 20861 * where we found the ire before it was deleted 20862 * and have just sent out a packet using this 20863 * unreachable nce. 20864 */ 20865 mutex_exit(&nce->nce_lock); 20866 break; 20867 default: 20868 ASSERT(0); 20869 } 20870 } 20871 } 20872 20873 /* 20874 * Derived from tcp_send_data(). 20875 */ 20876 static void 20877 tcp_lsosend_data(tcp_t *tcp, mblk_t *mp, ire_t *ire, ill_t *ill, const int mss, 20878 int num_lso_seg) 20879 { 20880 ipha_t *ipha; 20881 mblk_t *ire_fp_mp; 20882 uint_t ire_fp_mp_len; 20883 uint32_t hcksum_txflags = 0; 20884 ipaddr_t src; 20885 ipaddr_t dst; 20886 uint32_t cksum; 20887 uint16_t *up; 20888 tcp_stack_t *tcps = tcp->tcp_tcps; 20889 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 20890 20891 ASSERT(DB_TYPE(mp) == M_DATA); 20892 ASSERT(tcp->tcp_state == TCPS_ESTABLISHED); 20893 ASSERT(tcp->tcp_ipversion == IPV4_VERSION); 20894 ASSERT(tcp->tcp_connp != NULL); 20895 ASSERT(CONN_IS_LSO_MD_FASTPATH(tcp->tcp_connp)); 20896 20897 ipha = (ipha_t *)mp->b_rptr; 20898 src = ipha->ipha_src; 20899 dst = ipha->ipha_dst; 20900 20901 ASSERT(ipha->ipha_ident == 0 || ipha->ipha_ident == IP_HDR_INCLUDED); 20902 ipha->ipha_ident = (uint16_t)atomic_add_32_nv(&ire->ire_ident, 20903 num_lso_seg); 20904 #ifndef _BIG_ENDIAN 20905 ipha->ipha_ident = (ipha->ipha_ident << 8) | (ipha->ipha_ident >> 8); 20906 #endif 20907 if (tcp->tcp_snd_zcopy_aware) { 20908 if ((ill->ill_capabilities & ILL_CAPAB_ZEROCOPY) == 0 || 20909 (ill->ill_zerocopy_capab->ill_zerocopy_flags == 0)) 20910 mp = tcp_zcopy_disable(tcp, mp); 20911 } 20912 20913 if (ILL_HCKSUM_CAPABLE(ill) && dohwcksum) { 20914 ASSERT(ill->ill_hcksum_capab != NULL); 20915 hcksum_txflags = ill->ill_hcksum_capab->ill_hcksum_txflags; 20916 } 20917 20918 /* 20919 * Since the TCP checksum should be recalculated by h/w, we can just 20920 * zero the checksum field for HCK_FULLCKSUM, or calculate partial 20921 * pseudo-header checksum for HCK_PARTIALCKSUM. 20922 * The partial pseudo-header excludes TCP length, that was calculated 20923 * in tcp_send(), so to zero *up before further processing. 20924 */ 20925 cksum = (dst >> 16) + (dst & 0xFFFF) + (src >> 16) + (src & 0xFFFF); 20926 20927 up = IPH_TCPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH); 20928 *up = 0; 20929 20930 IP_CKSUM_XMIT_FAST(ire->ire_ipversion, hcksum_txflags, mp, ipha, up, 20931 IPPROTO_TCP, IP_SIMPLE_HDR_LENGTH, ntohs(ipha->ipha_length), cksum); 20932 20933 /* 20934 * Append LSO flag to DB_LSOFLAGS(mp) and set the mss to DB_LSOMSS(mp). 20935 */ 20936 DB_LSOFLAGS(mp) |= HW_LSO; 20937 DB_LSOMSS(mp) = mss; 20938 20939 ipha->ipha_fragment_offset_and_flags |= 20940 (uint32_t)htons(ire->ire_frag_flag); 20941 20942 ire_fp_mp = ire->ire_nce->nce_fp_mp; 20943 ire_fp_mp_len = MBLKL(ire_fp_mp); 20944 ASSERT(DB_TYPE(ire_fp_mp) == M_DATA); 20945 mp->b_rptr = (uchar_t *)ipha - ire_fp_mp_len; 20946 bcopy(ire_fp_mp->b_rptr, mp->b_rptr, ire_fp_mp_len); 20947 20948 UPDATE_OB_PKT_COUNT(ire); 20949 ire->ire_last_used_time = lbolt; 20950 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutRequests); 20951 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits); 20952 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets, 20953 ntohs(ipha->ipha_length)); 20954 20955 if (ILL_DLS_CAPABLE(ill)) { 20956 /* 20957 * Send the packet directly to DLD, where it may be queued 20958 * depending on the availability of transmit resources at 20959 * the media layer. 20960 */ 20961 IP_DLS_ILL_TX(ill, ipha, mp, ipst); 20962 } else { 20963 ill_t *out_ill = (ill_t *)ire->ire_stq->q_ptr; 20964 DTRACE_PROBE4(ip4__physical__out__start, 20965 ill_t *, NULL, ill_t *, out_ill, 20966 ipha_t *, ipha, mblk_t *, mp); 20967 FW_HOOKS(ipst->ips_ip4_physical_out_event, 20968 ipst->ips_ipv4firewall_physical_out, 20969 NULL, out_ill, ipha, mp, mp, ipst); 20970 DTRACE_PROBE1(ip4__physical__out__end, mblk_t *, mp); 20971 if (mp != NULL) 20972 putnext(ire->ire_stq, mp); 20973 } 20974 } 20975 20976 /* 20977 * tcp_send() is called by tcp_wput_data() for non-Multidata transmission 20978 * scheme, and returns one of the following: 20979 * 20980 * -1 = failed allocation. 20981 * 0 = success; burst count reached, or usable send window is too small, 20982 * and that we'd rather wait until later before sending again. 20983 * 1 = success; we are called from tcp_multisend(), and both usable send 20984 * window and tail_unsent are greater than the MDT threshold, and thus 20985 * Multidata Transmit should be used instead. 20986 */ 20987 static int 20988 tcp_send(queue_t *q, tcp_t *tcp, const int mss, const int tcp_hdr_len, 20989 const int tcp_tcp_hdr_len, const int num_sack_blk, int *usable, 20990 uint_t *snxt, int *tail_unsent, mblk_t **xmit_tail, mblk_t *local_time, 20991 const int mdt_thres) 20992 { 20993 int num_burst_seg = tcp->tcp_snd_burst; 20994 ire_t *ire = NULL; 20995 ill_t *ill = NULL; 20996 mblk_t *ire_fp_mp = NULL; 20997 uint_t ire_fp_mp_len = 0; 20998 int num_lso_seg = 1; 20999 uint_t lso_usable; 21000 boolean_t do_lso_send = B_FALSE; 21001 tcp_stack_t *tcps = tcp->tcp_tcps; 21002 21003 /* 21004 * Check LSO capability before any further work. And the similar check 21005 * need to be done in for(;;) loop. 21006 * LSO will be deployed when therer is more than one mss of available 21007 * data and a burst transmission is allowed. 21008 */ 21009 if (tcp->tcp_lso && 21010 (tcp->tcp_valid_bits == 0 || 21011 tcp->tcp_valid_bits == TCP_FSS_VALID) && 21012 num_burst_seg >= 2 && (*usable - 1) / mss >= 1) { 21013 /* 21014 * Try to find usable IRE/ILL and do basic check to the ILL. 21015 */ 21016 if (tcp_send_find_ire_ill(tcp, NULL, &ire, &ill)) { 21017 /* 21018 * Enable LSO with this transmission. 21019 * Since IRE has been hold in 21020 * tcp_send_find_ire_ill(), IRE_REFRELE(ire) 21021 * should be called before return. 21022 */ 21023 do_lso_send = B_TRUE; 21024 ire_fp_mp = ire->ire_nce->nce_fp_mp; 21025 ire_fp_mp_len = MBLKL(ire_fp_mp); 21026 /* Round up to multiple of 4 */ 21027 ire_fp_mp_len = ((ire_fp_mp_len + 3) / 4) * 4; 21028 } else { 21029 do_lso_send = B_FALSE; 21030 ill = NULL; 21031 } 21032 } 21033 21034 for (;;) { 21035 struct datab *db; 21036 tcph_t *tcph; 21037 uint32_t sum; 21038 mblk_t *mp, *mp1; 21039 uchar_t *rptr; 21040 int len; 21041 21042 /* 21043 * If we're called by tcp_multisend(), and the amount of 21044 * sendable data as well as the size of current xmit_tail 21045 * is beyond the MDT threshold, return to the caller and 21046 * let the large data transmit be done using MDT. 21047 */ 21048 if (*usable > 0 && *usable > mdt_thres && 21049 (*tail_unsent > mdt_thres || (*tail_unsent == 0 && 21050 MBLKL((*xmit_tail)->b_cont) > mdt_thres))) { 21051 ASSERT(tcp->tcp_mdt); 21052 return (1); /* success; do large send */ 21053 } 21054 21055 if (num_burst_seg == 0) 21056 break; /* success; burst count reached */ 21057 21058 /* 21059 * Calculate the maximum payload length we can send in *one* 21060 * time. 21061 */ 21062 if (do_lso_send) { 21063 /* 21064 * Check whether need to do LSO any more. 21065 */ 21066 if (num_burst_seg >= 2 && (*usable - 1) / mss >= 1) { 21067 lso_usable = MIN(tcp->tcp_lso_max, *usable); 21068 lso_usable = MIN(lso_usable, 21069 num_burst_seg * mss); 21070 21071 num_lso_seg = lso_usable / mss; 21072 if (lso_usable % mss) { 21073 num_lso_seg++; 21074 tcp->tcp_last_sent_len = (ushort_t) 21075 (lso_usable % mss); 21076 } else { 21077 tcp->tcp_last_sent_len = (ushort_t)mss; 21078 } 21079 } else { 21080 do_lso_send = B_FALSE; 21081 num_lso_seg = 1; 21082 lso_usable = mss; 21083 } 21084 } 21085 21086 ASSERT(num_lso_seg <= IP_MAXPACKET / mss + 1); 21087 21088 /* 21089 * Adjust num_burst_seg here. 21090 */ 21091 num_burst_seg -= num_lso_seg; 21092 21093 len = mss; 21094 if (len > *usable) { 21095 ASSERT(do_lso_send == B_FALSE); 21096 21097 len = *usable; 21098 if (len <= 0) { 21099 /* Terminate the loop */ 21100 break; /* success; too small */ 21101 } 21102 /* 21103 * Sender silly-window avoidance. 21104 * Ignore this if we are going to send a 21105 * zero window probe out. 21106 * 21107 * TODO: force data into microscopic window? 21108 * ==> (!pushed || (unsent > usable)) 21109 */ 21110 if (len < (tcp->tcp_max_swnd >> 1) && 21111 (tcp->tcp_unsent - (*snxt - tcp->tcp_snxt)) > len && 21112 !((tcp->tcp_valid_bits & TCP_URG_VALID) && 21113 len == 1) && (! tcp->tcp_zero_win_probe)) { 21114 /* 21115 * If the retransmit timer is not running 21116 * we start it so that we will retransmit 21117 * in the case when the the receiver has 21118 * decremented the window. 21119 */ 21120 if (*snxt == tcp->tcp_snxt && 21121 *snxt == tcp->tcp_suna) { 21122 /* 21123 * We are not supposed to send 21124 * anything. So let's wait a little 21125 * bit longer before breaking SWS 21126 * avoidance. 21127 * 21128 * What should the value be? 21129 * Suggestion: MAX(init rexmit time, 21130 * tcp->tcp_rto) 21131 */ 21132 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 21133 } 21134 break; /* success; too small */ 21135 } 21136 } 21137 21138 tcph = tcp->tcp_tcph; 21139 21140 /* 21141 * The reason to adjust len here is that we need to set flags 21142 * and calculate checksum. 21143 */ 21144 if (do_lso_send) 21145 len = lso_usable; 21146 21147 *usable -= len; /* Approximate - can be adjusted later */ 21148 if (*usable > 0) 21149 tcph->th_flags[0] = TH_ACK; 21150 else 21151 tcph->th_flags[0] = (TH_ACK | TH_PUSH); 21152 21153 /* 21154 * Prime pump for IP's checksumming on our behalf 21155 * Include the adjustment for a source route if any. 21156 */ 21157 sum = len + tcp_tcp_hdr_len + tcp->tcp_sum; 21158 sum = (sum >> 16) + (sum & 0xFFFF); 21159 U16_TO_ABE16(sum, tcph->th_sum); 21160 21161 U32_TO_ABE32(*snxt, tcph->th_seq); 21162 21163 /* 21164 * Branch off to tcp_xmit_mp() if any of the VALID bits is 21165 * set. For the case when TCP_FSS_VALID is the only valid 21166 * bit (normal active close), branch off only when we think 21167 * that the FIN flag needs to be set. Note for this case, 21168 * that (snxt + len) may not reflect the actual seg_len, 21169 * as len may be further reduced in tcp_xmit_mp(). If len 21170 * gets modified, we will end up here again. 21171 */ 21172 if (tcp->tcp_valid_bits != 0 && 21173 (tcp->tcp_valid_bits != TCP_FSS_VALID || 21174 ((*snxt + len) == tcp->tcp_fss))) { 21175 uchar_t *prev_rptr; 21176 uint32_t prev_snxt = tcp->tcp_snxt; 21177 21178 if (*tail_unsent == 0) { 21179 ASSERT((*xmit_tail)->b_cont != NULL); 21180 *xmit_tail = (*xmit_tail)->b_cont; 21181 prev_rptr = (*xmit_tail)->b_rptr; 21182 *tail_unsent = (int)((*xmit_tail)->b_wptr - 21183 (*xmit_tail)->b_rptr); 21184 } else { 21185 prev_rptr = (*xmit_tail)->b_rptr; 21186 (*xmit_tail)->b_rptr = (*xmit_tail)->b_wptr - 21187 *tail_unsent; 21188 } 21189 mp = tcp_xmit_mp(tcp, *xmit_tail, len, NULL, NULL, 21190 *snxt, B_FALSE, (uint32_t *)&len, B_FALSE); 21191 /* Restore tcp_snxt so we get amount sent right. */ 21192 tcp->tcp_snxt = prev_snxt; 21193 if (prev_rptr == (*xmit_tail)->b_rptr) { 21194 /* 21195 * If the previous timestamp is still in use, 21196 * don't stomp on it. 21197 */ 21198 if ((*xmit_tail)->b_next == NULL) { 21199 (*xmit_tail)->b_prev = local_time; 21200 (*xmit_tail)->b_next = 21201 (mblk_t *)(uintptr_t)(*snxt); 21202 } 21203 } else 21204 (*xmit_tail)->b_rptr = prev_rptr; 21205 21206 if (mp == NULL) { 21207 if (ire != NULL) 21208 IRE_REFRELE(ire); 21209 return (-1); 21210 } 21211 mp1 = mp->b_cont; 21212 21213 if (len <= mss) /* LSO is unusable (!do_lso_send) */ 21214 tcp->tcp_last_sent_len = (ushort_t)len; 21215 while (mp1->b_cont) { 21216 *xmit_tail = (*xmit_tail)->b_cont; 21217 (*xmit_tail)->b_prev = local_time; 21218 (*xmit_tail)->b_next = 21219 (mblk_t *)(uintptr_t)(*snxt); 21220 mp1 = mp1->b_cont; 21221 } 21222 *snxt += len; 21223 *tail_unsent = (*xmit_tail)->b_wptr - mp1->b_wptr; 21224 BUMP_LOCAL(tcp->tcp_obsegs); 21225 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 21226 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len); 21227 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 21228 tcp_send_data(tcp, q, mp); 21229 continue; 21230 } 21231 21232 *snxt += len; /* Adjust later if we don't send all of len */ 21233 BUMP_MIB(&tcps->tcps_mib, tcpOutDataSegs); 21234 UPDATE_MIB(&tcps->tcps_mib, tcpOutDataBytes, len); 21235 21236 if (*tail_unsent) { 21237 /* Are the bytes above us in flight? */ 21238 rptr = (*xmit_tail)->b_wptr - *tail_unsent; 21239 if (rptr != (*xmit_tail)->b_rptr) { 21240 *tail_unsent -= len; 21241 if (len <= mss) /* LSO is unusable */ 21242 tcp->tcp_last_sent_len = (ushort_t)len; 21243 len += tcp_hdr_len; 21244 if (tcp->tcp_ipversion == IPV4_VERSION) 21245 tcp->tcp_ipha->ipha_length = htons(len); 21246 else 21247 tcp->tcp_ip6h->ip6_plen = 21248 htons(len - 21249 ((char *)&tcp->tcp_ip6h[1] - 21250 tcp->tcp_iphc)); 21251 mp = dupb(*xmit_tail); 21252 if (mp == NULL) { 21253 if (ire != NULL) 21254 IRE_REFRELE(ire); 21255 return (-1); /* out_of_mem */ 21256 } 21257 mp->b_rptr = rptr; 21258 /* 21259 * If the old timestamp is no longer in use, 21260 * sample a new timestamp now. 21261 */ 21262 if ((*xmit_tail)->b_next == NULL) { 21263 (*xmit_tail)->b_prev = local_time; 21264 (*xmit_tail)->b_next = 21265 (mblk_t *)(uintptr_t)(*snxt-len); 21266 } 21267 goto must_alloc; 21268 } 21269 } else { 21270 *xmit_tail = (*xmit_tail)->b_cont; 21271 ASSERT((uintptr_t)((*xmit_tail)->b_wptr - 21272 (*xmit_tail)->b_rptr) <= (uintptr_t)INT_MAX); 21273 *tail_unsent = (int)((*xmit_tail)->b_wptr - 21274 (*xmit_tail)->b_rptr); 21275 } 21276 21277 (*xmit_tail)->b_prev = local_time; 21278 (*xmit_tail)->b_next = (mblk_t *)(uintptr_t)(*snxt - len); 21279 21280 *tail_unsent -= len; 21281 if (len <= mss) /* LSO is unusable (!do_lso_send) */ 21282 tcp->tcp_last_sent_len = (ushort_t)len; 21283 21284 len += tcp_hdr_len; 21285 if (tcp->tcp_ipversion == IPV4_VERSION) 21286 tcp->tcp_ipha->ipha_length = htons(len); 21287 else 21288 tcp->tcp_ip6h->ip6_plen = htons(len - 21289 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 21290 21291 mp = dupb(*xmit_tail); 21292 if (mp == NULL) { 21293 if (ire != NULL) 21294 IRE_REFRELE(ire); 21295 return (-1); /* out_of_mem */ 21296 } 21297 21298 len = tcp_hdr_len; 21299 /* 21300 * There are four reasons to allocate a new hdr mblk: 21301 * 1) The bytes above us are in use by another packet 21302 * 2) We don't have good alignment 21303 * 3) The mblk is being shared 21304 * 4) We don't have enough room for a header 21305 */ 21306 rptr = mp->b_rptr - len; 21307 if (!OK_32PTR(rptr) || 21308 ((db = mp->b_datap), db->db_ref != 2) || 21309 rptr < db->db_base + ire_fp_mp_len) { 21310 /* NOTE: we assume allocb returns an OK_32PTR */ 21311 21312 must_alloc:; 21313 mp1 = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 21314 tcps->tcps_wroff_xtra + ire_fp_mp_len, BPRI_MED); 21315 if (mp1 == NULL) { 21316 freemsg(mp); 21317 if (ire != NULL) 21318 IRE_REFRELE(ire); 21319 return (-1); /* out_of_mem */ 21320 } 21321 mp1->b_cont = mp; 21322 mp = mp1; 21323 /* Leave room for Link Level header */ 21324 len = tcp_hdr_len; 21325 rptr = 21326 &mp->b_rptr[tcps->tcps_wroff_xtra + ire_fp_mp_len]; 21327 mp->b_wptr = &rptr[len]; 21328 } 21329 21330 /* 21331 * Fill in the header using the template header, and add 21332 * options such as time-stamp, ECN and/or SACK, as needed. 21333 */ 21334 tcp_fill_header(tcp, rptr, (clock_t)local_time, num_sack_blk); 21335 21336 mp->b_rptr = rptr; 21337 21338 if (*tail_unsent) { 21339 int spill = *tail_unsent; 21340 21341 mp1 = mp->b_cont; 21342 if (mp1 == NULL) 21343 mp1 = mp; 21344 21345 /* 21346 * If we're a little short, tack on more mblks until 21347 * there is no more spillover. 21348 */ 21349 while (spill < 0) { 21350 mblk_t *nmp; 21351 int nmpsz; 21352 21353 nmp = (*xmit_tail)->b_cont; 21354 nmpsz = MBLKL(nmp); 21355 21356 /* 21357 * Excess data in mblk; can we split it? 21358 * If MDT is enabled for the connection, 21359 * keep on splitting as this is a transient 21360 * send path. 21361 */ 21362 if (!do_lso_send && !tcp->tcp_mdt && 21363 (spill + nmpsz > 0)) { 21364 /* 21365 * Don't split if stream head was 21366 * told to break up larger writes 21367 * into smaller ones. 21368 */ 21369 if (tcp->tcp_maxpsz > 0) 21370 break; 21371 21372 /* 21373 * Next mblk is less than SMSS/2 21374 * rounded up to nearest 64-byte; 21375 * let it get sent as part of the 21376 * next segment. 21377 */ 21378 if (tcp->tcp_localnet && 21379 !tcp->tcp_cork && 21380 (nmpsz < roundup((mss >> 1), 64))) 21381 break; 21382 } 21383 21384 *xmit_tail = nmp; 21385 ASSERT((uintptr_t)nmpsz <= (uintptr_t)INT_MAX); 21386 /* Stash for rtt use later */ 21387 (*xmit_tail)->b_prev = local_time; 21388 (*xmit_tail)->b_next = 21389 (mblk_t *)(uintptr_t)(*snxt - len); 21390 mp1->b_cont = dupb(*xmit_tail); 21391 mp1 = mp1->b_cont; 21392 21393 spill += nmpsz; 21394 if (mp1 == NULL) { 21395 *tail_unsent = spill; 21396 freemsg(mp); 21397 if (ire != NULL) 21398 IRE_REFRELE(ire); 21399 return (-1); /* out_of_mem */ 21400 } 21401 } 21402 21403 /* Trim back any surplus on the last mblk */ 21404 if (spill >= 0) { 21405 mp1->b_wptr -= spill; 21406 *tail_unsent = spill; 21407 } else { 21408 /* 21409 * We did not send everything we could in 21410 * order to remain within the b_cont limit. 21411 */ 21412 *usable -= spill; 21413 *snxt += spill; 21414 tcp->tcp_last_sent_len += spill; 21415 UPDATE_MIB(&tcps->tcps_mib, 21416 tcpOutDataBytes, spill); 21417 /* 21418 * Adjust the checksum 21419 */ 21420 tcph = (tcph_t *)(rptr + tcp->tcp_ip_hdr_len); 21421 sum += spill; 21422 sum = (sum >> 16) + (sum & 0xFFFF); 21423 U16_TO_ABE16(sum, tcph->th_sum); 21424 if (tcp->tcp_ipversion == IPV4_VERSION) { 21425 sum = ntohs( 21426 ((ipha_t *)rptr)->ipha_length) + 21427 spill; 21428 ((ipha_t *)rptr)->ipha_length = 21429 htons(sum); 21430 } else { 21431 sum = ntohs( 21432 ((ip6_t *)rptr)->ip6_plen) + 21433 spill; 21434 ((ip6_t *)rptr)->ip6_plen = 21435 htons(sum); 21436 } 21437 *tail_unsent = 0; 21438 } 21439 } 21440 if (tcp->tcp_ip_forward_progress) { 21441 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 21442 *(uint32_t *)mp->b_rptr |= IP_FORWARD_PROG; 21443 tcp->tcp_ip_forward_progress = B_FALSE; 21444 } 21445 21446 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 21447 if (do_lso_send) { 21448 tcp_lsosend_data(tcp, mp, ire, ill, mss, 21449 num_lso_seg); 21450 tcp->tcp_obsegs += num_lso_seg; 21451 21452 TCP_STAT(tcps, tcp_lso_times); 21453 TCP_STAT_UPDATE(tcps, tcp_lso_pkt_out, num_lso_seg); 21454 } else { 21455 tcp_send_data(tcp, q, mp); 21456 BUMP_LOCAL(tcp->tcp_obsegs); 21457 } 21458 } 21459 21460 if (ire != NULL) 21461 IRE_REFRELE(ire); 21462 return (0); 21463 } 21464 21465 /* Unlink and return any mblk that looks like it contains a MDT info */ 21466 static mblk_t * 21467 tcp_mdt_info_mp(mblk_t *mp) 21468 { 21469 mblk_t *prev_mp; 21470 21471 for (;;) { 21472 prev_mp = mp; 21473 /* no more to process? */ 21474 if ((mp = mp->b_cont) == NULL) 21475 break; 21476 21477 switch (DB_TYPE(mp)) { 21478 case M_CTL: 21479 if (*(uint32_t *)mp->b_rptr != MDT_IOC_INFO_UPDATE) 21480 continue; 21481 ASSERT(prev_mp != NULL); 21482 prev_mp->b_cont = mp->b_cont; 21483 mp->b_cont = NULL; 21484 return (mp); 21485 default: 21486 break; 21487 } 21488 } 21489 return (mp); 21490 } 21491 21492 /* MDT info update routine, called when IP notifies us about MDT */ 21493 static void 21494 tcp_mdt_update(tcp_t *tcp, ill_mdt_capab_t *mdt_capab, boolean_t first) 21495 { 21496 boolean_t prev_state; 21497 tcp_stack_t *tcps = tcp->tcp_tcps; 21498 21499 /* 21500 * IP is telling us to abort MDT on this connection? We know 21501 * this because the capability is only turned off when IP 21502 * encounters some pathological cases, e.g. link-layer change 21503 * where the new driver doesn't support MDT, or in situation 21504 * where MDT usage on the link-layer has been switched off. 21505 * IP would not have sent us the initial MDT_IOC_INFO_UPDATE 21506 * if the link-layer doesn't support MDT, and if it does, it 21507 * will indicate that the feature is to be turned on. 21508 */ 21509 prev_state = tcp->tcp_mdt; 21510 tcp->tcp_mdt = (mdt_capab->ill_mdt_on != 0); 21511 if (!tcp->tcp_mdt && !first) { 21512 TCP_STAT(tcps, tcp_mdt_conn_halted3); 21513 ip1dbg(("tcp_mdt_update: disabling MDT for connp %p\n", 21514 (void *)tcp->tcp_connp)); 21515 } 21516 21517 /* 21518 * We currently only support MDT on simple TCP/{IPv4,IPv6}, 21519 * so disable MDT otherwise. The checks are done here 21520 * and in tcp_wput_data(). 21521 */ 21522 if (tcp->tcp_mdt && 21523 (tcp->tcp_ipversion == IPV4_VERSION && 21524 tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) || 21525 (tcp->tcp_ipversion == IPV6_VERSION && 21526 tcp->tcp_ip_hdr_len != IPV6_HDR_LEN)) 21527 tcp->tcp_mdt = B_FALSE; 21528 21529 if (tcp->tcp_mdt) { 21530 if (mdt_capab->ill_mdt_version != MDT_VERSION_2) { 21531 cmn_err(CE_NOTE, "tcp_mdt_update: unknown MDT " 21532 "version (%d), expected version is %d", 21533 mdt_capab->ill_mdt_version, MDT_VERSION_2); 21534 tcp->tcp_mdt = B_FALSE; 21535 return; 21536 } 21537 21538 /* 21539 * We need the driver to be able to handle at least three 21540 * spans per packet in order for tcp MDT to be utilized. 21541 * The first is for the header portion, while the rest are 21542 * needed to handle a packet that straddles across two 21543 * virtually non-contiguous buffers; a typical tcp packet 21544 * therefore consists of only two spans. Note that we take 21545 * a zero as "don't care". 21546 */ 21547 if (mdt_capab->ill_mdt_span_limit > 0 && 21548 mdt_capab->ill_mdt_span_limit < 3) { 21549 tcp->tcp_mdt = B_FALSE; 21550 return; 21551 } 21552 21553 /* a zero means driver wants default value */ 21554 tcp->tcp_mdt_max_pld = MIN(mdt_capab->ill_mdt_max_pld, 21555 tcps->tcps_mdt_max_pbufs); 21556 if (tcp->tcp_mdt_max_pld == 0) 21557 tcp->tcp_mdt_max_pld = tcps->tcps_mdt_max_pbufs; 21558 21559 /* ensure 32-bit alignment */ 21560 tcp->tcp_mdt_hdr_head = roundup(MAX(tcps->tcps_mdt_hdr_head_min, 21561 mdt_capab->ill_mdt_hdr_head), 4); 21562 tcp->tcp_mdt_hdr_tail = roundup(MAX(tcps->tcps_mdt_hdr_tail_min, 21563 mdt_capab->ill_mdt_hdr_tail), 4); 21564 21565 if (!first && !prev_state) { 21566 TCP_STAT(tcps, tcp_mdt_conn_resumed2); 21567 ip1dbg(("tcp_mdt_update: reenabling MDT for connp %p\n", 21568 (void *)tcp->tcp_connp)); 21569 } 21570 } 21571 } 21572 21573 /* Unlink and return any mblk that looks like it contains a LSO info */ 21574 static mblk_t * 21575 tcp_lso_info_mp(mblk_t *mp) 21576 { 21577 mblk_t *prev_mp; 21578 21579 for (;;) { 21580 prev_mp = mp; 21581 /* no more to process? */ 21582 if ((mp = mp->b_cont) == NULL) 21583 break; 21584 21585 switch (DB_TYPE(mp)) { 21586 case M_CTL: 21587 if (*(uint32_t *)mp->b_rptr != LSO_IOC_INFO_UPDATE) 21588 continue; 21589 ASSERT(prev_mp != NULL); 21590 prev_mp->b_cont = mp->b_cont; 21591 mp->b_cont = NULL; 21592 return (mp); 21593 default: 21594 break; 21595 } 21596 } 21597 21598 return (mp); 21599 } 21600 21601 /* LSO info update routine, called when IP notifies us about LSO */ 21602 static void 21603 tcp_lso_update(tcp_t *tcp, ill_lso_capab_t *lso_capab) 21604 { 21605 tcp_stack_t *tcps = tcp->tcp_tcps; 21606 21607 /* 21608 * IP is telling us to abort LSO on this connection? We know 21609 * this because the capability is only turned off when IP 21610 * encounters some pathological cases, e.g. link-layer change 21611 * where the new NIC/driver doesn't support LSO, or in situation 21612 * where LSO usage on the link-layer has been switched off. 21613 * IP would not have sent us the initial LSO_IOC_INFO_UPDATE 21614 * if the link-layer doesn't support LSO, and if it does, it 21615 * will indicate that the feature is to be turned on. 21616 */ 21617 tcp->tcp_lso = (lso_capab->ill_lso_on != 0); 21618 TCP_STAT(tcps, tcp_lso_enabled); 21619 21620 /* 21621 * We currently only support LSO on simple TCP/IPv4, 21622 * so disable LSO otherwise. The checks are done here 21623 * and in tcp_wput_data(). 21624 */ 21625 if (tcp->tcp_lso && 21626 (tcp->tcp_ipversion == IPV4_VERSION && 21627 tcp->tcp_ip_hdr_len != IP_SIMPLE_HDR_LENGTH) || 21628 (tcp->tcp_ipversion == IPV6_VERSION)) { 21629 tcp->tcp_lso = B_FALSE; 21630 TCP_STAT(tcps, tcp_lso_disabled); 21631 } else { 21632 tcp->tcp_lso_max = MIN(TCP_MAX_LSO_LENGTH, 21633 lso_capab->ill_lso_max); 21634 } 21635 } 21636 21637 static void 21638 tcp_ire_ill_check(tcp_t *tcp, ire_t *ire, ill_t *ill, boolean_t check_lso_mdt) 21639 { 21640 conn_t *connp = tcp->tcp_connp; 21641 tcp_stack_t *tcps = tcp->tcp_tcps; 21642 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 21643 21644 ASSERT(ire != NULL); 21645 21646 /* 21647 * We may be in the fastpath here, and although we essentially do 21648 * similar checks as in ip_bind_connected{_v6}/ip_xxinfo_return, 21649 * we try to keep things as brief as possible. After all, these 21650 * are only best-effort checks, and we do more thorough ones prior 21651 * to calling tcp_send()/tcp_multisend(). 21652 */ 21653 if ((ipst->ips_ip_lso_outbound || ipst->ips_ip_multidata_outbound) && 21654 check_lso_mdt && !(ire->ire_type & (IRE_LOCAL | IRE_LOOPBACK)) && 21655 ill != NULL && !CONN_IPSEC_OUT_ENCAPSULATED(connp) && 21656 !(ire->ire_flags & RTF_MULTIRT) && 21657 !IPP_ENABLED(IPP_LOCAL_OUT, ipst) && 21658 CONN_IS_LSO_MD_FASTPATH(connp)) { 21659 if (ipst->ips_ip_lso_outbound && ILL_LSO_CAPABLE(ill)) { 21660 /* Cache the result */ 21661 connp->conn_lso_ok = B_TRUE; 21662 21663 ASSERT(ill->ill_lso_capab != NULL); 21664 if (!ill->ill_lso_capab->ill_lso_on) { 21665 ill->ill_lso_capab->ill_lso_on = 1; 21666 ip1dbg(("tcp_ire_ill_check: connp %p enables " 21667 "LSO for interface %s\n", (void *)connp, 21668 ill->ill_name)); 21669 } 21670 tcp_lso_update(tcp, ill->ill_lso_capab); 21671 } else if (ipst->ips_ip_multidata_outbound && 21672 ILL_MDT_CAPABLE(ill)) { 21673 /* Cache the result */ 21674 connp->conn_mdt_ok = B_TRUE; 21675 21676 ASSERT(ill->ill_mdt_capab != NULL); 21677 if (!ill->ill_mdt_capab->ill_mdt_on) { 21678 ill->ill_mdt_capab->ill_mdt_on = 1; 21679 ip1dbg(("tcp_ire_ill_check: connp %p enables " 21680 "MDT for interface %s\n", (void *)connp, 21681 ill->ill_name)); 21682 } 21683 tcp_mdt_update(tcp, ill->ill_mdt_capab, B_TRUE); 21684 } 21685 } 21686 21687 /* 21688 * The goal is to reduce the number of generated tcp segments by 21689 * setting the maxpsz multiplier to 0; this will have an affect on 21690 * tcp_maxpsz_set(). With this behavior, tcp will pack more data 21691 * into each packet, up to SMSS bytes. Doing this reduces the number 21692 * of outbound segments and incoming ACKs, thus allowing for better 21693 * network and system performance. In contrast the legacy behavior 21694 * may result in sending less than SMSS size, because the last mblk 21695 * for some packets may have more data than needed to make up SMSS, 21696 * and the legacy code refused to "split" it. 21697 * 21698 * We apply the new behavior on following situations: 21699 * 21700 * 1) Loopback connections, 21701 * 2) Connections in which the remote peer is not on local subnet, 21702 * 3) Local subnet connections over the bge interface (see below). 21703 * 21704 * Ideally, we would like this behavior to apply for interfaces other 21705 * than bge. However, doing so would negatively impact drivers which 21706 * perform dynamic mapping and unmapping of DMA resources, which are 21707 * increased by setting the maxpsz multiplier to 0 (more mblks per 21708 * packet will be generated by tcp). The bge driver does not suffer 21709 * from this, as it copies the mblks into pre-mapped buffers, and 21710 * therefore does not require more I/O resources than before. 21711 * 21712 * Otherwise, this behavior is present on all network interfaces when 21713 * the destination endpoint is non-local, since reducing the number 21714 * of packets in general is good for the network. 21715 * 21716 * TODO We need to remove this hard-coded conditional for bge once 21717 * a better "self-tuning" mechanism, or a way to comprehend 21718 * the driver transmit strategy is devised. Until the solution 21719 * is found and well understood, we live with this hack. 21720 */ 21721 if (!tcp_static_maxpsz && 21722 (tcp->tcp_loopback || !tcp->tcp_localnet || 21723 (ill->ill_name_length > 3 && bcmp(ill->ill_name, "bge", 3) == 0))) { 21724 /* override the default value */ 21725 tcp->tcp_maxpsz = 0; 21726 21727 ip3dbg(("tcp_ire_ill_check: connp %p tcp_maxpsz %d on " 21728 "interface %s\n", (void *)connp, tcp->tcp_maxpsz, 21729 ill != NULL ? ill->ill_name : ipif_loopback_name)); 21730 } 21731 21732 /* set the stream head parameters accordingly */ 21733 (void) tcp_maxpsz_set(tcp, B_TRUE); 21734 } 21735 21736 /* tcp_wput_flush is called by tcp_wput_nondata to handle M_FLUSH messages. */ 21737 static void 21738 tcp_wput_flush(tcp_t *tcp, mblk_t *mp) 21739 { 21740 uchar_t fval = *mp->b_rptr; 21741 mblk_t *tail; 21742 queue_t *q = tcp->tcp_wq; 21743 21744 /* TODO: How should flush interact with urgent data? */ 21745 if ((fval & FLUSHW) && tcp->tcp_xmit_head && 21746 !(tcp->tcp_valid_bits & TCP_URG_VALID)) { 21747 /* 21748 * Flush only data that has not yet been put on the wire. If 21749 * we flush data that we have already transmitted, life, as we 21750 * know it, may come to an end. 21751 */ 21752 tail = tcp->tcp_xmit_tail; 21753 tail->b_wptr -= tcp->tcp_xmit_tail_unsent; 21754 tcp->tcp_xmit_tail_unsent = 0; 21755 tcp->tcp_unsent = 0; 21756 if (tail->b_wptr != tail->b_rptr) 21757 tail = tail->b_cont; 21758 if (tail) { 21759 mblk_t **excess = &tcp->tcp_xmit_head; 21760 for (;;) { 21761 mblk_t *mp1 = *excess; 21762 if (mp1 == tail) 21763 break; 21764 tcp->tcp_xmit_tail = mp1; 21765 tcp->tcp_xmit_last = mp1; 21766 excess = &mp1->b_cont; 21767 } 21768 *excess = NULL; 21769 tcp_close_mpp(&tail); 21770 if (tcp->tcp_snd_zcopy_aware) 21771 tcp_zcopy_notify(tcp); 21772 } 21773 /* 21774 * We have no unsent data, so unsent must be less than 21775 * tcp_xmit_lowater, so re-enable flow. 21776 */ 21777 mutex_enter(&tcp->tcp_non_sq_lock); 21778 if (tcp->tcp_flow_stopped) { 21779 tcp_clrqfull(tcp); 21780 } 21781 mutex_exit(&tcp->tcp_non_sq_lock); 21782 } 21783 /* 21784 * TODO: you can't just flush these, you have to increase rwnd for one 21785 * thing. For another, how should urgent data interact? 21786 */ 21787 if (fval & FLUSHR) { 21788 *mp->b_rptr = fval & ~FLUSHW; 21789 /* XXX */ 21790 qreply(q, mp); 21791 return; 21792 } 21793 freemsg(mp); 21794 } 21795 21796 /* 21797 * tcp_wput_iocdata is called by tcp_wput_nondata to handle all M_IOCDATA 21798 * messages. 21799 */ 21800 static void 21801 tcp_wput_iocdata(tcp_t *tcp, mblk_t *mp) 21802 { 21803 mblk_t *mp1; 21804 STRUCT_HANDLE(strbuf, sb); 21805 uint16_t port; 21806 queue_t *q = tcp->tcp_wq; 21807 in6_addr_t v6addr; 21808 ipaddr_t v4addr; 21809 uint32_t flowinfo = 0; 21810 int addrlen; 21811 21812 /* Make sure it is one of ours. */ 21813 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 21814 case TI_GETMYNAME: 21815 case TI_GETPEERNAME: 21816 break; 21817 default: 21818 CALL_IP_WPUT(tcp->tcp_connp, q, mp); 21819 return; 21820 } 21821 switch (mi_copy_state(q, mp, &mp1)) { 21822 case -1: 21823 return; 21824 case MI_COPY_CASE(MI_COPY_IN, 1): 21825 break; 21826 case MI_COPY_CASE(MI_COPY_OUT, 1): 21827 /* Copy out the strbuf. */ 21828 mi_copyout(q, mp); 21829 return; 21830 case MI_COPY_CASE(MI_COPY_OUT, 2): 21831 /* All done. */ 21832 mi_copy_done(q, mp, 0); 21833 return; 21834 default: 21835 mi_copy_done(q, mp, EPROTO); 21836 return; 21837 } 21838 /* Check alignment of the strbuf */ 21839 if (!OK_32PTR(mp1->b_rptr)) { 21840 mi_copy_done(q, mp, EINVAL); 21841 return; 21842 } 21843 21844 STRUCT_SET_HANDLE(sb, ((struct iocblk *)mp->b_rptr)->ioc_flag, 21845 (void *)mp1->b_rptr); 21846 addrlen = tcp->tcp_family == AF_INET ? sizeof (sin_t) : sizeof (sin6_t); 21847 21848 if (STRUCT_FGET(sb, maxlen) < addrlen) { 21849 mi_copy_done(q, mp, EINVAL); 21850 return; 21851 } 21852 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 21853 case TI_GETMYNAME: 21854 if (tcp->tcp_family == AF_INET) { 21855 if (tcp->tcp_ipversion == IPV4_VERSION) { 21856 v4addr = tcp->tcp_ipha->ipha_src; 21857 } else { 21858 /* can't return an address in this case */ 21859 v4addr = 0; 21860 } 21861 } else { 21862 /* tcp->tcp_family == AF_INET6 */ 21863 if (tcp->tcp_ipversion == IPV4_VERSION) { 21864 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 21865 &v6addr); 21866 } else { 21867 v6addr = tcp->tcp_ip6h->ip6_src; 21868 } 21869 } 21870 port = tcp->tcp_lport; 21871 break; 21872 case TI_GETPEERNAME: 21873 if (tcp->tcp_family == AF_INET) { 21874 if (tcp->tcp_ipversion == IPV4_VERSION) { 21875 IN6_V4MAPPED_TO_IPADDR(&tcp->tcp_remote_v6, 21876 v4addr); 21877 } else { 21878 /* can't return an address in this case */ 21879 v4addr = 0; 21880 } 21881 } else { 21882 /* tcp->tcp_family == AF_INET6) */ 21883 v6addr = tcp->tcp_remote_v6; 21884 if (tcp->tcp_ipversion == IPV6_VERSION) { 21885 /* 21886 * No flowinfo if tcp->tcp_ipversion is v4. 21887 * 21888 * flowinfo was already initialized to zero 21889 * where it was declared above, so only 21890 * set it if ipversion is v6. 21891 */ 21892 flowinfo = tcp->tcp_ip6h->ip6_vcf & 21893 ~IPV6_VERS_AND_FLOW_MASK; 21894 } 21895 } 21896 port = tcp->tcp_fport; 21897 break; 21898 default: 21899 mi_copy_done(q, mp, EPROTO); 21900 return; 21901 } 21902 mp1 = mi_copyout_alloc(q, mp, STRUCT_FGETP(sb, buf), addrlen, B_TRUE); 21903 if (!mp1) 21904 return; 21905 21906 if (tcp->tcp_family == AF_INET) { 21907 sin_t *sin; 21908 21909 STRUCT_FSET(sb, len, (int)sizeof (sin_t)); 21910 sin = (sin_t *)mp1->b_rptr; 21911 mp1->b_wptr = (uchar_t *)&sin[1]; 21912 *sin = sin_null; 21913 sin->sin_family = AF_INET; 21914 sin->sin_addr.s_addr = v4addr; 21915 sin->sin_port = port; 21916 } else { 21917 /* tcp->tcp_family == AF_INET6 */ 21918 sin6_t *sin6; 21919 21920 STRUCT_FSET(sb, len, (int)sizeof (sin6_t)); 21921 sin6 = (sin6_t *)mp1->b_rptr; 21922 mp1->b_wptr = (uchar_t *)&sin6[1]; 21923 *sin6 = sin6_null; 21924 sin6->sin6_family = AF_INET6; 21925 sin6->sin6_flowinfo = flowinfo; 21926 sin6->sin6_addr = v6addr; 21927 sin6->sin6_port = port; 21928 } 21929 /* Copy out the address */ 21930 mi_copyout(q, mp); 21931 } 21932 21933 /* 21934 * tcp_wput_ioctl is called by tcp_wput_nondata() to handle all M_IOCTL 21935 * messages. 21936 */ 21937 /* ARGSUSED */ 21938 static void 21939 tcp_wput_ioctl(void *arg, mblk_t *mp, void *arg2) 21940 { 21941 conn_t *connp = (conn_t *)arg; 21942 tcp_t *tcp = connp->conn_tcp; 21943 queue_t *q = tcp->tcp_wq; 21944 struct iocblk *iocp; 21945 tcp_stack_t *tcps = tcp->tcp_tcps; 21946 21947 ASSERT(DB_TYPE(mp) == M_IOCTL); 21948 /* 21949 * Try and ASSERT the minimum possible references on the 21950 * conn early enough. Since we are executing on write side, 21951 * the connection is obviously not detached and that means 21952 * there is a ref each for TCP and IP. Since we are behind 21953 * the squeue, the minimum references needed are 3. If the 21954 * conn is in classifier hash list, there should be an 21955 * extra ref for that (we check both the possibilities). 21956 */ 21957 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 21958 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 21959 21960 iocp = (struct iocblk *)mp->b_rptr; 21961 switch (iocp->ioc_cmd) { 21962 case TCP_IOC_DEFAULT_Q: 21963 /* Wants to be the default wq. */ 21964 if (secpolicy_ip_config(iocp->ioc_cr, B_FALSE) != 0) { 21965 iocp->ioc_error = EPERM; 21966 iocp->ioc_count = 0; 21967 mp->b_datap->db_type = M_IOCACK; 21968 qreply(q, mp); 21969 return; 21970 } 21971 tcp_def_q_set(tcp, mp); 21972 return; 21973 case _SIOCSOCKFALLBACK: 21974 /* 21975 * Either sockmod is about to be popped and the socket 21976 * would now be treated as a plain stream, or a module 21977 * is about to be pushed so we could no longer use read- 21978 * side synchronous streams for fused loopback tcp. 21979 * Drain any queued data and disable direct sockfs 21980 * interface from now on. 21981 */ 21982 if (!tcp->tcp_issocket) { 21983 DB_TYPE(mp) = M_IOCNAK; 21984 iocp->ioc_error = EINVAL; 21985 } else { 21986 #ifdef _ILP32 21987 tcp->tcp_acceptor_id = (t_uscalar_t)RD(q); 21988 #else 21989 tcp->tcp_acceptor_id = tcp->tcp_connp->conn_dev; 21990 #endif 21991 /* 21992 * Insert this socket into the acceptor hash. 21993 * We might need it for T_CONN_RES message 21994 */ 21995 tcp_acceptor_hash_insert(tcp->tcp_acceptor_id, tcp); 21996 21997 if (tcp->tcp_fused) { 21998 /* 21999 * This is a fused loopback tcp; disable 22000 * read-side synchronous streams interface 22001 * and drain any queued data. It is okay 22002 * to do this for non-synchronous streams 22003 * fused tcp as well. 22004 */ 22005 tcp_fuse_disable_pair(tcp, B_FALSE); 22006 } 22007 tcp->tcp_issocket = B_FALSE; 22008 TCP_STAT(tcps, tcp_sock_fallback); 22009 22010 DB_TYPE(mp) = M_IOCACK; 22011 iocp->ioc_error = 0; 22012 } 22013 iocp->ioc_count = 0; 22014 iocp->ioc_rval = 0; 22015 qreply(q, mp); 22016 return; 22017 } 22018 CALL_IP_WPUT(connp, q, mp); 22019 } 22020 22021 /* 22022 * This routine is called by tcp_wput() to handle all TPI requests. 22023 */ 22024 /* ARGSUSED */ 22025 static void 22026 tcp_wput_proto(void *arg, mblk_t *mp, void *arg2) 22027 { 22028 conn_t *connp = (conn_t *)arg; 22029 tcp_t *tcp = connp->conn_tcp; 22030 union T_primitives *tprim = (union T_primitives *)mp->b_rptr; 22031 uchar_t *rptr; 22032 t_scalar_t type; 22033 int len; 22034 cred_t *cr = DB_CREDDEF(mp, tcp->tcp_cred); 22035 22036 /* 22037 * Try and ASSERT the minimum possible references on the 22038 * conn early enough. Since we are executing on write side, 22039 * the connection is obviously not detached and that means 22040 * there is a ref each for TCP and IP. Since we are behind 22041 * the squeue, the minimum references needed are 3. If the 22042 * conn is in classifier hash list, there should be an 22043 * extra ref for that (we check both the possibilities). 22044 */ 22045 ASSERT((connp->conn_fanout != NULL && connp->conn_ref >= 4) || 22046 (connp->conn_fanout == NULL && connp->conn_ref >= 3)); 22047 22048 rptr = mp->b_rptr; 22049 ASSERT((uintptr_t)(mp->b_wptr - rptr) <= (uintptr_t)INT_MAX); 22050 if ((mp->b_wptr - rptr) >= sizeof (t_scalar_t)) { 22051 type = ((union T_primitives *)rptr)->type; 22052 if (type == T_EXDATA_REQ) { 22053 uint32_t msize = msgdsize(mp->b_cont); 22054 22055 len = msize - 1; 22056 if (len < 0) { 22057 freemsg(mp); 22058 return; 22059 } 22060 /* 22061 * Try to force urgent data out on the wire. 22062 * Even if we have unsent data this will 22063 * at least send the urgent flag. 22064 * XXX does not handle more flag correctly. 22065 */ 22066 len += tcp->tcp_unsent; 22067 len += tcp->tcp_snxt; 22068 tcp->tcp_urg = len; 22069 tcp->tcp_valid_bits |= TCP_URG_VALID; 22070 22071 /* Bypass tcp protocol for fused tcp loopback */ 22072 if (tcp->tcp_fused && tcp_fuse_output(tcp, mp, msize)) 22073 return; 22074 } else if (type != T_DATA_REQ) { 22075 goto non_urgent_data; 22076 } 22077 /* TODO: options, flags, ... from user */ 22078 /* Set length to zero for reclamation below */ 22079 tcp_wput_data(tcp, mp->b_cont, B_TRUE); 22080 freeb(mp); 22081 return; 22082 } else { 22083 if (tcp->tcp_debug) { 22084 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 22085 "tcp_wput_proto, dropping one..."); 22086 } 22087 freemsg(mp); 22088 return; 22089 } 22090 22091 non_urgent_data: 22092 22093 switch ((int)tprim->type) { 22094 case T_SSL_PROXY_BIND_REQ: /* an SSL proxy endpoint bind request */ 22095 /* 22096 * save the kssl_ent_t from the next block, and convert this 22097 * back to a normal bind_req. 22098 */ 22099 if (mp->b_cont != NULL) { 22100 ASSERT(MBLKL(mp->b_cont) >= sizeof (kssl_ent_t)); 22101 22102 if (tcp->tcp_kssl_ent != NULL) { 22103 kssl_release_ent(tcp->tcp_kssl_ent, NULL, 22104 KSSL_NO_PROXY); 22105 tcp->tcp_kssl_ent = NULL; 22106 } 22107 bcopy(mp->b_cont->b_rptr, &tcp->tcp_kssl_ent, 22108 sizeof (kssl_ent_t)); 22109 kssl_hold_ent(tcp->tcp_kssl_ent); 22110 freemsg(mp->b_cont); 22111 mp->b_cont = NULL; 22112 } 22113 tprim->type = T_BIND_REQ; 22114 22115 /* FALLTHROUGH */ 22116 case O_T_BIND_REQ: /* bind request */ 22117 case T_BIND_REQ: /* new semantics bind request */ 22118 tcp_bind(tcp, mp); 22119 break; 22120 case T_UNBIND_REQ: /* unbind request */ 22121 tcp_unbind(tcp, mp); 22122 break; 22123 case O_T_CONN_RES: /* old connection response XXX */ 22124 case T_CONN_RES: /* connection response */ 22125 tcp_accept(tcp, mp); 22126 break; 22127 case T_CONN_REQ: /* connection request */ 22128 tcp_connect(tcp, mp); 22129 break; 22130 case T_DISCON_REQ: /* disconnect request */ 22131 tcp_disconnect(tcp, mp); 22132 break; 22133 case T_CAPABILITY_REQ: 22134 tcp_capability_req(tcp, mp); /* capability request */ 22135 break; 22136 case T_INFO_REQ: /* information request */ 22137 tcp_info_req(tcp, mp); 22138 break; 22139 case T_SVR4_OPTMGMT_REQ: /* manage options req */ 22140 /* Only IP is allowed to return meaningful value */ 22141 (void) svr4_optcom_req(tcp->tcp_wq, mp, cr, &tcp_opt_obj); 22142 break; 22143 case T_OPTMGMT_REQ: 22144 /* 22145 * Note: no support for snmpcom_req() through new 22146 * T_OPTMGMT_REQ. See comments in ip.c 22147 */ 22148 /* Only IP is allowed to return meaningful value */ 22149 (void) tpi_optcom_req(tcp->tcp_wq, mp, cr, &tcp_opt_obj); 22150 break; 22151 22152 case T_UNITDATA_REQ: /* unitdata request */ 22153 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0); 22154 break; 22155 case T_ORDREL_REQ: /* orderly release req */ 22156 freemsg(mp); 22157 22158 if (tcp->tcp_fused) 22159 tcp_unfuse(tcp); 22160 22161 if (tcp_xmit_end(tcp) != 0) { 22162 /* 22163 * We were crossing FINs and got a reset from 22164 * the other side. Just ignore it. 22165 */ 22166 if (tcp->tcp_debug) { 22167 (void) strlog(TCP_MOD_ID, 0, 1, 22168 SL_ERROR|SL_TRACE, 22169 "tcp_wput_proto, T_ORDREL_REQ out of " 22170 "state %s", 22171 tcp_display(tcp, NULL, 22172 DISP_ADDR_AND_PORT)); 22173 } 22174 } 22175 break; 22176 case T_ADDR_REQ: 22177 tcp_addr_req(tcp, mp); 22178 break; 22179 default: 22180 if (tcp->tcp_debug) { 22181 (void) strlog(TCP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 22182 "tcp_wput_proto, bogus TPI msg, type %d", 22183 tprim->type); 22184 } 22185 /* 22186 * We used to M_ERROR. Sending TNOTSUPPORT gives the user 22187 * to recover. 22188 */ 22189 tcp_err_ack(tcp, mp, TNOTSUPPORT, 0); 22190 break; 22191 } 22192 } 22193 22194 /* 22195 * The TCP write service routine should never be called... 22196 */ 22197 /* ARGSUSED */ 22198 static void 22199 tcp_wsrv(queue_t *q) 22200 { 22201 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 22202 22203 TCP_STAT(tcps, tcp_wsrv_called); 22204 } 22205 22206 /* Non overlapping byte exchanger */ 22207 static void 22208 tcp_xchg(uchar_t *a, uchar_t *b, int len) 22209 { 22210 uchar_t uch; 22211 22212 while (len-- > 0) { 22213 uch = a[len]; 22214 a[len] = b[len]; 22215 b[len] = uch; 22216 } 22217 } 22218 22219 /* 22220 * Send out a control packet on the tcp connection specified. This routine 22221 * is typically called where we need a simple ACK or RST generated. 22222 */ 22223 static void 22224 tcp_xmit_ctl(char *str, tcp_t *tcp, uint32_t seq, uint32_t ack, int ctl) 22225 { 22226 uchar_t *rptr; 22227 tcph_t *tcph; 22228 ipha_t *ipha = NULL; 22229 ip6_t *ip6h = NULL; 22230 uint32_t sum; 22231 int tcp_hdr_len; 22232 int tcp_ip_hdr_len; 22233 mblk_t *mp; 22234 tcp_stack_t *tcps = tcp->tcp_tcps; 22235 22236 /* 22237 * Save sum for use in source route later. 22238 */ 22239 ASSERT(tcp != NULL); 22240 sum = tcp->tcp_tcp_hdr_len + tcp->tcp_sum; 22241 tcp_hdr_len = tcp->tcp_hdr_len; 22242 tcp_ip_hdr_len = tcp->tcp_ip_hdr_len; 22243 22244 /* If a text string is passed in with the request, pass it to strlog. */ 22245 if (str != NULL && tcp->tcp_debug) { 22246 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 22247 "tcp_xmit_ctl: '%s', seq 0x%x, ack 0x%x, ctl 0x%x", 22248 str, seq, ack, ctl); 22249 } 22250 mp = allocb(tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + tcps->tcps_wroff_xtra, 22251 BPRI_MED); 22252 if (mp == NULL) { 22253 return; 22254 } 22255 rptr = &mp->b_rptr[tcps->tcps_wroff_xtra]; 22256 mp->b_rptr = rptr; 22257 mp->b_wptr = &rptr[tcp_hdr_len]; 22258 bcopy(tcp->tcp_iphc, rptr, tcp_hdr_len); 22259 22260 if (tcp->tcp_ipversion == IPV4_VERSION) { 22261 ipha = (ipha_t *)rptr; 22262 ipha->ipha_length = htons(tcp_hdr_len); 22263 } else { 22264 ip6h = (ip6_t *)rptr; 22265 ASSERT(tcp != NULL); 22266 ip6h->ip6_plen = htons(tcp->tcp_hdr_len - 22267 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 22268 } 22269 tcph = (tcph_t *)&rptr[tcp_ip_hdr_len]; 22270 tcph->th_flags[0] = (uint8_t)ctl; 22271 if (ctl & TH_RST) { 22272 BUMP_MIB(&tcps->tcps_mib, tcpOutRsts); 22273 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 22274 /* 22275 * Don't send TSopt w/ TH_RST packets per RFC 1323. 22276 */ 22277 if (tcp->tcp_snd_ts_ok && 22278 tcp->tcp_state > TCPS_SYN_SENT) { 22279 mp->b_wptr = &rptr[tcp_hdr_len - TCPOPT_REAL_TS_LEN]; 22280 *(mp->b_wptr) = TCPOPT_EOL; 22281 if (tcp->tcp_ipversion == IPV4_VERSION) { 22282 ipha->ipha_length = htons(tcp_hdr_len - 22283 TCPOPT_REAL_TS_LEN); 22284 } else { 22285 ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) - 22286 TCPOPT_REAL_TS_LEN); 22287 } 22288 tcph->th_offset_and_rsrvd[0] -= (3 << 4); 22289 sum -= TCPOPT_REAL_TS_LEN; 22290 } 22291 } 22292 if (ctl & TH_ACK) { 22293 if (tcp->tcp_snd_ts_ok) { 22294 U32_TO_BE32(lbolt, 22295 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 22296 U32_TO_BE32(tcp->tcp_ts_recent, 22297 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 22298 } 22299 22300 /* Update the latest receive window size in TCP header. */ 22301 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 22302 tcph->th_win); 22303 tcp->tcp_rack = ack; 22304 tcp->tcp_rack_cnt = 0; 22305 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 22306 } 22307 BUMP_LOCAL(tcp->tcp_obsegs); 22308 U32_TO_BE32(seq, tcph->th_seq); 22309 U32_TO_BE32(ack, tcph->th_ack); 22310 /* 22311 * Include the adjustment for a source route if any. 22312 */ 22313 sum = (sum >> 16) + (sum & 0xFFFF); 22314 U16_TO_BE16(sum, tcph->th_sum); 22315 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 22316 tcp_send_data(tcp, tcp->tcp_wq, mp); 22317 } 22318 22319 /* 22320 * If this routine returns B_TRUE, TCP can generate a RST in response 22321 * to a segment. If it returns B_FALSE, TCP should not respond. 22322 */ 22323 static boolean_t 22324 tcp_send_rst_chk(tcp_stack_t *tcps) 22325 { 22326 clock_t now; 22327 22328 /* 22329 * TCP needs to protect itself from generating too many RSTs. 22330 * This can be a DoS attack by sending us random segments 22331 * soliciting RSTs. 22332 * 22333 * What we do here is to have a limit of tcp_rst_sent_rate RSTs 22334 * in each 1 second interval. In this way, TCP still generate 22335 * RSTs in normal cases but when under attack, the impact is 22336 * limited. 22337 */ 22338 if (tcps->tcps_rst_sent_rate_enabled != 0) { 22339 now = lbolt; 22340 /* lbolt can wrap around. */ 22341 if ((tcps->tcps_last_rst_intrvl > now) || 22342 (TICK_TO_MSEC(now - tcps->tcps_last_rst_intrvl) > 22343 1*SECONDS)) { 22344 tcps->tcps_last_rst_intrvl = now; 22345 tcps->tcps_rst_cnt = 1; 22346 } else if (++tcps->tcps_rst_cnt > tcps->tcps_rst_sent_rate) { 22347 return (B_FALSE); 22348 } 22349 } 22350 return (B_TRUE); 22351 } 22352 22353 /* 22354 * Send down the advice IP ioctl to tell IP to mark an IRE temporary. 22355 */ 22356 static void 22357 tcp_ip_ire_mark_advice(tcp_t *tcp) 22358 { 22359 mblk_t *mp; 22360 ipic_t *ipic; 22361 22362 if (tcp->tcp_ipversion == IPV4_VERSION) { 22363 mp = tcp_ip_advise_mblk(&tcp->tcp_ipha->ipha_dst, IP_ADDR_LEN, 22364 &ipic); 22365 } else { 22366 mp = tcp_ip_advise_mblk(&tcp->tcp_ip6h->ip6_dst, IPV6_ADDR_LEN, 22367 &ipic); 22368 } 22369 if (mp == NULL) 22370 return; 22371 ipic->ipic_ire_marks |= IRE_MARK_TEMPORARY; 22372 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 22373 } 22374 22375 /* 22376 * Return an IP advice ioctl mblk and set ipic to be the pointer 22377 * to the advice structure. 22378 */ 22379 static mblk_t * 22380 tcp_ip_advise_mblk(void *addr, int addr_len, ipic_t **ipic) 22381 { 22382 struct iocblk *ioc; 22383 mblk_t *mp, *mp1; 22384 22385 mp = allocb(sizeof (ipic_t) + addr_len, BPRI_HI); 22386 if (mp == NULL) 22387 return (NULL); 22388 bzero(mp->b_rptr, sizeof (ipic_t) + addr_len); 22389 *ipic = (ipic_t *)mp->b_rptr; 22390 (*ipic)->ipic_cmd = IP_IOC_IRE_ADVISE_NO_REPLY; 22391 (*ipic)->ipic_addr_offset = sizeof (ipic_t); 22392 22393 bcopy(addr, *ipic + 1, addr_len); 22394 22395 (*ipic)->ipic_addr_length = addr_len; 22396 mp->b_wptr = &mp->b_rptr[sizeof (ipic_t) + addr_len]; 22397 22398 mp1 = mkiocb(IP_IOCTL); 22399 if (mp1 == NULL) { 22400 freemsg(mp); 22401 return (NULL); 22402 } 22403 mp1->b_cont = mp; 22404 ioc = (struct iocblk *)mp1->b_rptr; 22405 ioc->ioc_count = sizeof (ipic_t) + addr_len; 22406 22407 return (mp1); 22408 } 22409 22410 /* 22411 * Generate a reset based on an inbound packet for which there is no active 22412 * tcp state that we can find. 22413 * 22414 * IPSEC NOTE : Try to send the reply with the same protection as it came 22415 * in. We still have the ipsec_mp that the packet was attached to. Thus 22416 * the packet will go out at the same level of protection as it came in by 22417 * converting the IPSEC_IN to IPSEC_OUT. 22418 */ 22419 static void 22420 tcp_xmit_early_reset(char *str, mblk_t *mp, uint32_t seq, 22421 uint32_t ack, int ctl, uint_t ip_hdr_len, zoneid_t zoneid, 22422 tcp_stack_t *tcps) 22423 { 22424 ipha_t *ipha = NULL; 22425 ip6_t *ip6h = NULL; 22426 ushort_t len; 22427 tcph_t *tcph; 22428 int i; 22429 mblk_t *ipsec_mp; 22430 boolean_t mctl_present; 22431 ipic_t *ipic; 22432 ipaddr_t v4addr; 22433 in6_addr_t v6addr; 22434 int addr_len; 22435 void *addr; 22436 queue_t *q = tcps->tcps_g_q; 22437 tcp_t *tcp; 22438 cred_t *cr; 22439 mblk_t *nmp; 22440 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 22441 22442 if (tcps->tcps_g_q == NULL) { 22443 /* 22444 * For non-zero stackids the default queue isn't created 22445 * until the first open, thus there can be a need to send 22446 * a reset before then. But we can't do that, hence we just 22447 * drop the packet. Later during boot, when the default queue 22448 * has been setup, a retransmitted packet from the peer 22449 * will result in a reset. 22450 */ 22451 ASSERT(tcps->tcps_netstack->netstack_stackid != 22452 GLOBAL_NETSTACKID); 22453 freemsg(mp); 22454 return; 22455 } 22456 22457 tcp = Q_TO_TCP(q); 22458 22459 if (!tcp_send_rst_chk(tcps)) { 22460 tcps->tcps_rst_unsent++; 22461 freemsg(mp); 22462 return; 22463 } 22464 22465 if (mp->b_datap->db_type == M_CTL) { 22466 ipsec_mp = mp; 22467 mp = mp->b_cont; 22468 mctl_present = B_TRUE; 22469 } else { 22470 ipsec_mp = mp; 22471 mctl_present = B_FALSE; 22472 } 22473 22474 if (str && q && tcps->tcps_dbg) { 22475 (void) strlog(TCP_MOD_ID, 0, 1, SL_TRACE, 22476 "tcp_xmit_early_reset: '%s', seq 0x%x, ack 0x%x, " 22477 "flags 0x%x", 22478 str, seq, ack, ctl); 22479 } 22480 if (mp->b_datap->db_ref != 1) { 22481 mblk_t *mp1 = copyb(mp); 22482 freemsg(mp); 22483 mp = mp1; 22484 if (!mp) { 22485 if (mctl_present) 22486 freeb(ipsec_mp); 22487 return; 22488 } else { 22489 if (mctl_present) { 22490 ipsec_mp->b_cont = mp; 22491 } else { 22492 ipsec_mp = mp; 22493 } 22494 } 22495 } else if (mp->b_cont) { 22496 freemsg(mp->b_cont); 22497 mp->b_cont = NULL; 22498 } 22499 /* 22500 * We skip reversing source route here. 22501 * (for now we replace all IP options with EOL) 22502 */ 22503 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22504 ipha = (ipha_t *)mp->b_rptr; 22505 for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++) 22506 mp->b_rptr[i] = IPOPT_EOL; 22507 /* 22508 * Make sure that src address isn't flagrantly invalid. 22509 * Not all broadcast address checking for the src address 22510 * is possible, since we don't know the netmask of the src 22511 * addr. No check for destination address is done, since 22512 * IP will not pass up a packet with a broadcast dest 22513 * address to TCP. Similar checks are done below for IPv6. 22514 */ 22515 if (ipha->ipha_src == 0 || ipha->ipha_src == INADDR_BROADCAST || 22516 CLASSD(ipha->ipha_src)) { 22517 freemsg(ipsec_mp); 22518 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInDiscards); 22519 return; 22520 } 22521 } else { 22522 ip6h = (ip6_t *)mp->b_rptr; 22523 22524 if (IN6_IS_ADDR_UNSPECIFIED(&ip6h->ip6_src) || 22525 IN6_IS_ADDR_MULTICAST(&ip6h->ip6_src)) { 22526 freemsg(ipsec_mp); 22527 BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsInDiscards); 22528 return; 22529 } 22530 22531 /* Remove any extension headers assuming partial overlay */ 22532 if (ip_hdr_len > IPV6_HDR_LEN) { 22533 uint8_t *to; 22534 22535 to = mp->b_rptr + ip_hdr_len - IPV6_HDR_LEN; 22536 ovbcopy(ip6h, to, IPV6_HDR_LEN); 22537 mp->b_rptr += ip_hdr_len - IPV6_HDR_LEN; 22538 ip_hdr_len = IPV6_HDR_LEN; 22539 ip6h = (ip6_t *)mp->b_rptr; 22540 ip6h->ip6_nxt = IPPROTO_TCP; 22541 } 22542 } 22543 tcph = (tcph_t *)&mp->b_rptr[ip_hdr_len]; 22544 if (tcph->th_flags[0] & TH_RST) { 22545 freemsg(ipsec_mp); 22546 return; 22547 } 22548 tcph->th_offset_and_rsrvd[0] = (5 << 4); 22549 len = ip_hdr_len + sizeof (tcph_t); 22550 mp->b_wptr = &mp->b_rptr[len]; 22551 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22552 ipha->ipha_length = htons(len); 22553 /* Swap addresses */ 22554 v4addr = ipha->ipha_src; 22555 ipha->ipha_src = ipha->ipha_dst; 22556 ipha->ipha_dst = v4addr; 22557 ipha->ipha_ident = 0; 22558 ipha->ipha_ttl = (uchar_t)tcps->tcps_ipv4_ttl; 22559 addr_len = IP_ADDR_LEN; 22560 addr = &v4addr; 22561 } else { 22562 /* No ip6i_t in this case */ 22563 ip6h->ip6_plen = htons(len - IPV6_HDR_LEN); 22564 /* Swap addresses */ 22565 v6addr = ip6h->ip6_src; 22566 ip6h->ip6_src = ip6h->ip6_dst; 22567 ip6h->ip6_dst = v6addr; 22568 ip6h->ip6_hops = (uchar_t)tcps->tcps_ipv6_hoplimit; 22569 addr_len = IPV6_ADDR_LEN; 22570 addr = &v6addr; 22571 } 22572 tcp_xchg(tcph->th_fport, tcph->th_lport, 2); 22573 U32_TO_BE32(ack, tcph->th_ack); 22574 U32_TO_BE32(seq, tcph->th_seq); 22575 U16_TO_BE16(0, tcph->th_win); 22576 U16_TO_BE16(sizeof (tcph_t), tcph->th_sum); 22577 tcph->th_flags[0] = (uint8_t)ctl; 22578 if (ctl & TH_RST) { 22579 BUMP_MIB(&tcps->tcps_mib, tcpOutRsts); 22580 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 22581 } 22582 22583 /* IP trusts us to set up labels when required. */ 22584 if (is_system_labeled() && (cr = DB_CRED(mp)) != NULL && 22585 crgetlabel(cr) != NULL) { 22586 int err, adjust; 22587 22588 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) 22589 err = tsol_check_label(cr, &mp, &adjust, 22590 tcp->tcp_connp->conn_mac_exempt, 22591 tcps->tcps_netstack->netstack_ip); 22592 else 22593 err = tsol_check_label_v6(cr, &mp, &adjust, 22594 tcp->tcp_connp->conn_mac_exempt, 22595 tcps->tcps_netstack->netstack_ip); 22596 if (mctl_present) 22597 ipsec_mp->b_cont = mp; 22598 else 22599 ipsec_mp = mp; 22600 if (err != 0) { 22601 freemsg(ipsec_mp); 22602 return; 22603 } 22604 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22605 ipha = (ipha_t *)mp->b_rptr; 22606 adjust += ntohs(ipha->ipha_length); 22607 ipha->ipha_length = htons(adjust); 22608 } else { 22609 ip6h = (ip6_t *)mp->b_rptr; 22610 } 22611 } 22612 22613 if (mctl_present) { 22614 ipsec_in_t *ii = (ipsec_in_t *)ipsec_mp->b_rptr; 22615 22616 ASSERT(ii->ipsec_in_type == IPSEC_IN); 22617 if (!ipsec_in_to_out(ipsec_mp, ipha, ip6h)) { 22618 return; 22619 } 22620 } 22621 if (zoneid == ALL_ZONES) 22622 zoneid = GLOBAL_ZONEID; 22623 22624 /* Add the zoneid so ip_output routes it properly */ 22625 if ((nmp = ip_prepend_zoneid(ipsec_mp, zoneid, ipst)) == NULL) { 22626 freemsg(ipsec_mp); 22627 return; 22628 } 22629 ipsec_mp = nmp; 22630 22631 /* 22632 * NOTE: one might consider tracing a TCP packet here, but 22633 * this function has no active TCP state and no tcp structure 22634 * that has a trace buffer. If we traced here, we would have 22635 * to keep a local trace buffer in tcp_record_trace(). 22636 * 22637 * TSol note: The mblk that contains the incoming packet was 22638 * reused by tcp_xmit_listener_reset, so it already contains 22639 * the right credentials and we don't need to call mblk_setcred. 22640 * Also the conn's cred is not right since it is associated 22641 * with tcps_g_q. 22642 */ 22643 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, ipsec_mp); 22644 22645 /* 22646 * Tell IP to mark the IRE used for this destination temporary. 22647 * This way, we can limit our exposure to DoS attack because IP 22648 * creates an IRE for each destination. If there are too many, 22649 * the time to do any routing lookup will be extremely long. And 22650 * the lookup can be in interrupt context. 22651 * 22652 * Note that in normal circumstances, this marking should not 22653 * affect anything. It would be nice if only 1 message is 22654 * needed to inform IP that the IRE created for this RST should 22655 * not be added to the cache table. But there is currently 22656 * not such communication mechanism between TCP and IP. So 22657 * the best we can do now is to send the advice ioctl to IP 22658 * to mark the IRE temporary. 22659 */ 22660 if ((mp = tcp_ip_advise_mblk(addr, addr_len, &ipic)) != NULL) { 22661 ipic->ipic_ire_marks |= IRE_MARK_TEMPORARY; 22662 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 22663 } 22664 } 22665 22666 /* 22667 * Initiate closedown sequence on an active connection. (May be called as 22668 * writer.) Return value zero for OK return, non-zero for error return. 22669 */ 22670 static int 22671 tcp_xmit_end(tcp_t *tcp) 22672 { 22673 ipic_t *ipic; 22674 mblk_t *mp; 22675 tcp_stack_t *tcps = tcp->tcp_tcps; 22676 22677 if (tcp->tcp_state < TCPS_SYN_RCVD || 22678 tcp->tcp_state > TCPS_CLOSE_WAIT) { 22679 /* 22680 * Invalid state, only states TCPS_SYN_RCVD, 22681 * TCPS_ESTABLISHED and TCPS_CLOSE_WAIT are valid 22682 */ 22683 return (-1); 22684 } 22685 22686 tcp->tcp_fss = tcp->tcp_snxt + tcp->tcp_unsent; 22687 tcp->tcp_valid_bits |= TCP_FSS_VALID; 22688 /* 22689 * If there is nothing more unsent, send the FIN now. 22690 * Otherwise, it will go out with the last segment. 22691 */ 22692 if (tcp->tcp_unsent == 0) { 22693 mp = tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, 22694 tcp->tcp_fss, B_FALSE, NULL, B_FALSE); 22695 22696 if (mp) { 22697 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 22698 tcp_send_data(tcp, tcp->tcp_wq, mp); 22699 } else { 22700 /* 22701 * Couldn't allocate msg. Pretend we got it out. 22702 * Wait for rexmit timeout. 22703 */ 22704 tcp->tcp_snxt = tcp->tcp_fss + 1; 22705 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 22706 } 22707 22708 /* 22709 * If needed, update tcp_rexmit_snxt as tcp_snxt is 22710 * changed. 22711 */ 22712 if (tcp->tcp_rexmit && tcp->tcp_rexmit_nxt == tcp->tcp_fss) { 22713 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 22714 } 22715 } else { 22716 /* 22717 * If tcp->tcp_cork is set, then the data will not get sent, 22718 * so we have to check that and unset it first. 22719 */ 22720 if (tcp->tcp_cork) 22721 tcp->tcp_cork = B_FALSE; 22722 tcp_wput_data(tcp, NULL, B_FALSE); 22723 } 22724 22725 /* 22726 * If TCP does not get enough samples of RTT or tcp_rtt_updates 22727 * is 0, don't update the cache. 22728 */ 22729 if (tcps->tcps_rtt_updates == 0 || 22730 tcp->tcp_rtt_update < tcps->tcps_rtt_updates) 22731 return (0); 22732 22733 /* 22734 * NOTE: should not update if source routes i.e. if tcp_remote if 22735 * different from the destination. 22736 */ 22737 if (tcp->tcp_ipversion == IPV4_VERSION) { 22738 if (tcp->tcp_remote != tcp->tcp_ipha->ipha_dst) { 22739 return (0); 22740 } 22741 mp = tcp_ip_advise_mblk(&tcp->tcp_ipha->ipha_dst, IP_ADDR_LEN, 22742 &ipic); 22743 } else { 22744 if (!(IN6_ARE_ADDR_EQUAL(&tcp->tcp_remote_v6, 22745 &tcp->tcp_ip6h->ip6_dst))) { 22746 return (0); 22747 } 22748 mp = tcp_ip_advise_mblk(&tcp->tcp_ip6h->ip6_dst, IPV6_ADDR_LEN, 22749 &ipic); 22750 } 22751 22752 /* Record route attributes in the IRE for use by future connections. */ 22753 if (mp == NULL) 22754 return (0); 22755 22756 /* 22757 * We do not have a good algorithm to update ssthresh at this time. 22758 * So don't do any update. 22759 */ 22760 ipic->ipic_rtt = tcp->tcp_rtt_sa; 22761 ipic->ipic_rtt_sd = tcp->tcp_rtt_sd; 22762 22763 CALL_IP_WPUT(tcp->tcp_connp, tcp->tcp_wq, mp); 22764 return (0); 22765 } 22766 22767 /* 22768 * Generate a "no listener here" RST in response to an "unknown" segment. 22769 * Note that we are reusing the incoming mp to construct the outgoing 22770 * RST. 22771 */ 22772 void 22773 tcp_xmit_listeners_reset(mblk_t *mp, uint_t ip_hdr_len, zoneid_t zoneid, 22774 tcp_stack_t *tcps) 22775 { 22776 uchar_t *rptr; 22777 uint32_t seg_len; 22778 tcph_t *tcph; 22779 uint32_t seg_seq; 22780 uint32_t seg_ack; 22781 uint_t flags; 22782 mblk_t *ipsec_mp; 22783 ipha_t *ipha; 22784 ip6_t *ip6h; 22785 boolean_t mctl_present = B_FALSE; 22786 boolean_t check = B_TRUE; 22787 boolean_t policy_present; 22788 ipsec_stack_t *ipss = tcps->tcps_netstack->netstack_ipsec; 22789 22790 TCP_STAT(tcps, tcp_no_listener); 22791 22792 ipsec_mp = mp; 22793 22794 if (mp->b_datap->db_type == M_CTL) { 22795 ipsec_in_t *ii; 22796 22797 mctl_present = B_TRUE; 22798 mp = mp->b_cont; 22799 22800 ii = (ipsec_in_t *)ipsec_mp->b_rptr; 22801 ASSERT(ii->ipsec_in_type == IPSEC_IN); 22802 if (ii->ipsec_in_dont_check) { 22803 check = B_FALSE; 22804 if (!ii->ipsec_in_secure) { 22805 freeb(ipsec_mp); 22806 mctl_present = B_FALSE; 22807 ipsec_mp = mp; 22808 } 22809 } 22810 } 22811 22812 if (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION) { 22813 policy_present = ipss->ipsec_inbound_v4_policy_present; 22814 ipha = (ipha_t *)mp->b_rptr; 22815 ip6h = NULL; 22816 } else { 22817 policy_present = ipss->ipsec_inbound_v6_policy_present; 22818 ipha = NULL; 22819 ip6h = (ip6_t *)mp->b_rptr; 22820 } 22821 22822 if (check && policy_present) { 22823 /* 22824 * The conn_t parameter is NULL because we already know 22825 * nobody's home. 22826 */ 22827 ipsec_mp = ipsec_check_global_policy( 22828 ipsec_mp, (conn_t *)NULL, ipha, ip6h, mctl_present, 22829 tcps->tcps_netstack); 22830 if (ipsec_mp == NULL) 22831 return; 22832 } 22833 if (is_system_labeled() && !tsol_can_reply_error(mp)) { 22834 DTRACE_PROBE2( 22835 tx__ip__log__error__nolistener__tcp, 22836 char *, "Could not reply with RST to mp(1)", 22837 mblk_t *, mp); 22838 ip2dbg(("tcp_xmit_listeners_reset: not permitted to reply\n")); 22839 freemsg(ipsec_mp); 22840 return; 22841 } 22842 22843 rptr = mp->b_rptr; 22844 22845 tcph = (tcph_t *)&rptr[ip_hdr_len]; 22846 seg_seq = BE32_TO_U32(tcph->th_seq); 22847 seg_ack = BE32_TO_U32(tcph->th_ack); 22848 flags = tcph->th_flags[0]; 22849 22850 seg_len = msgdsize(mp) - (TCP_HDR_LENGTH(tcph) + ip_hdr_len); 22851 if (flags & TH_RST) { 22852 freemsg(ipsec_mp); 22853 } else if (flags & TH_ACK) { 22854 tcp_xmit_early_reset("no tcp, reset", 22855 ipsec_mp, seg_ack, 0, TH_RST, ip_hdr_len, zoneid, tcps); 22856 } else { 22857 if (flags & TH_SYN) { 22858 seg_len++; 22859 } else { 22860 /* 22861 * Here we violate the RFC. Note that a normal 22862 * TCP will never send a segment without the ACK 22863 * flag, except for RST or SYN segment. This 22864 * segment is neither. Just drop it on the 22865 * floor. 22866 */ 22867 freemsg(ipsec_mp); 22868 tcps->tcps_rst_unsent++; 22869 return; 22870 } 22871 22872 tcp_xmit_early_reset("no tcp, reset/ack", 22873 ipsec_mp, 0, seg_seq + seg_len, 22874 TH_RST | TH_ACK, ip_hdr_len, zoneid, tcps); 22875 } 22876 } 22877 22878 /* 22879 * tcp_xmit_mp is called to return a pointer to an mblk chain complete with 22880 * ip and tcp header ready to pass down to IP. If the mp passed in is 22881 * non-NULL, then up to max_to_send bytes of data will be dup'ed off that 22882 * mblk. (If sendall is not set the dup'ing will stop at an mblk boundary 22883 * otherwise it will dup partial mblks.) 22884 * Otherwise, an appropriate ACK packet will be generated. This 22885 * routine is not usually called to send new data for the first time. It 22886 * is mostly called out of the timer for retransmits, and to generate ACKs. 22887 * 22888 * If offset is not NULL, the returned mblk chain's first mblk's b_rptr will 22889 * be adjusted by *offset. And after dupb(), the offset and the ending mblk 22890 * of the original mblk chain will be returned in *offset and *end_mp. 22891 */ 22892 mblk_t * 22893 tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send, int32_t *offset, 22894 mblk_t **end_mp, uint32_t seq, boolean_t sendall, uint32_t *seg_len, 22895 boolean_t rexmit) 22896 { 22897 int data_length; 22898 int32_t off = 0; 22899 uint_t flags; 22900 mblk_t *mp1; 22901 mblk_t *mp2; 22902 uchar_t *rptr; 22903 tcph_t *tcph; 22904 int32_t num_sack_blk = 0; 22905 int32_t sack_opt_len = 0; 22906 tcp_stack_t *tcps = tcp->tcp_tcps; 22907 22908 /* Allocate for our maximum TCP header + link-level */ 22909 mp1 = allocb(tcp->tcp_ip_hdr_len + TCP_MAX_HDR_LENGTH + 22910 tcps->tcps_wroff_xtra, BPRI_MED); 22911 if (!mp1) 22912 return (NULL); 22913 data_length = 0; 22914 22915 /* 22916 * Note that tcp_mss has been adjusted to take into account the 22917 * timestamp option if applicable. Because SACK options do not 22918 * appear in every TCP segments and they are of variable lengths, 22919 * they cannot be included in tcp_mss. Thus we need to calculate 22920 * the actual segment length when we need to send a segment which 22921 * includes SACK options. 22922 */ 22923 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 22924 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 22925 tcp->tcp_num_sack_blk); 22926 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 22927 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 22928 if (max_to_send + sack_opt_len > tcp->tcp_mss) 22929 max_to_send -= sack_opt_len; 22930 } 22931 22932 if (offset != NULL) { 22933 off = *offset; 22934 /* We use offset as an indicator that end_mp is not NULL. */ 22935 *end_mp = NULL; 22936 } 22937 for (mp2 = mp1; mp && data_length != max_to_send; mp = mp->b_cont) { 22938 /* This could be faster with cooperation from downstream */ 22939 if (mp2 != mp1 && !sendall && 22940 data_length + (int)(mp->b_wptr - mp->b_rptr) > 22941 max_to_send) 22942 /* 22943 * Don't send the next mblk since the whole mblk 22944 * does not fit. 22945 */ 22946 break; 22947 mp2->b_cont = dupb(mp); 22948 mp2 = mp2->b_cont; 22949 if (!mp2) { 22950 freemsg(mp1); 22951 return (NULL); 22952 } 22953 mp2->b_rptr += off; 22954 ASSERT((uintptr_t)(mp2->b_wptr - mp2->b_rptr) <= 22955 (uintptr_t)INT_MAX); 22956 22957 data_length += (int)(mp2->b_wptr - mp2->b_rptr); 22958 if (data_length > max_to_send) { 22959 mp2->b_wptr -= data_length - max_to_send; 22960 data_length = max_to_send; 22961 off = mp2->b_wptr - mp->b_rptr; 22962 break; 22963 } else { 22964 off = 0; 22965 } 22966 } 22967 if (offset != NULL) { 22968 *offset = off; 22969 *end_mp = mp; 22970 } 22971 if (seg_len != NULL) { 22972 *seg_len = data_length; 22973 } 22974 22975 /* Update the latest receive window size in TCP header. */ 22976 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 22977 tcp->tcp_tcph->th_win); 22978 22979 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra; 22980 mp1->b_rptr = rptr; 22981 mp1->b_wptr = rptr + tcp->tcp_hdr_len + sack_opt_len; 22982 bcopy(tcp->tcp_iphc, rptr, tcp->tcp_hdr_len); 22983 tcph = (tcph_t *)&rptr[tcp->tcp_ip_hdr_len]; 22984 U32_TO_ABE32(seq, tcph->th_seq); 22985 22986 /* 22987 * Use tcp_unsent to determine if the PUSH bit should be used assumes 22988 * that this function was called from tcp_wput_data. Thus, when called 22989 * to retransmit data the setting of the PUSH bit may appear some 22990 * what random in that it might get set when it should not. This 22991 * should not pose any performance issues. 22992 */ 22993 if (data_length != 0 && (tcp->tcp_unsent == 0 || 22994 tcp->tcp_unsent == data_length)) { 22995 flags = TH_ACK | TH_PUSH; 22996 } else { 22997 flags = TH_ACK; 22998 } 22999 23000 if (tcp->tcp_ecn_ok) { 23001 if (tcp->tcp_ecn_echo_on) 23002 flags |= TH_ECE; 23003 23004 /* 23005 * Only set ECT bit and ECN_CWR if a segment contains new data. 23006 * There is no TCP flow control for non-data segments, and 23007 * only data segment is transmitted reliably. 23008 */ 23009 if (data_length > 0 && !rexmit) { 23010 SET_ECT(tcp, rptr); 23011 if (tcp->tcp_cwr && !tcp->tcp_ecn_cwr_sent) { 23012 flags |= TH_CWR; 23013 tcp->tcp_ecn_cwr_sent = B_TRUE; 23014 } 23015 } 23016 } 23017 23018 if (tcp->tcp_valid_bits) { 23019 uint32_t u1; 23020 23021 if ((tcp->tcp_valid_bits & TCP_ISS_VALID) && 23022 seq == tcp->tcp_iss) { 23023 uchar_t *wptr; 23024 23025 /* 23026 * If TCP_ISS_VALID and the seq number is tcp_iss, 23027 * TCP can only be in SYN-SENT, SYN-RCVD or 23028 * FIN-WAIT-1 state. It can be FIN-WAIT-1 if 23029 * our SYN is not ack'ed but the app closes this 23030 * TCP connection. 23031 */ 23032 ASSERT(tcp->tcp_state == TCPS_SYN_SENT || 23033 tcp->tcp_state == TCPS_SYN_RCVD || 23034 tcp->tcp_state == TCPS_FIN_WAIT_1); 23035 23036 /* 23037 * Tack on the MSS option. It is always needed 23038 * for both active and passive open. 23039 * 23040 * MSS option value should be interface MTU - MIN 23041 * TCP/IP header according to RFC 793 as it means 23042 * the maximum segment size TCP can receive. But 23043 * to get around some broken middle boxes/end hosts 23044 * out there, we allow the option value to be the 23045 * same as the MSS option size on the peer side. 23046 * In this way, the other side will not send 23047 * anything larger than they can receive. 23048 * 23049 * Note that for SYN_SENT state, the ndd param 23050 * tcp_use_smss_as_mss_opt has no effect as we 23051 * don't know the peer's MSS option value. So 23052 * the only case we need to take care of is in 23053 * SYN_RCVD state, which is done later. 23054 */ 23055 wptr = mp1->b_wptr; 23056 wptr[0] = TCPOPT_MAXSEG; 23057 wptr[1] = TCPOPT_MAXSEG_LEN; 23058 wptr += 2; 23059 u1 = tcp->tcp_if_mtu - 23060 (tcp->tcp_ipversion == IPV4_VERSION ? 23061 IP_SIMPLE_HDR_LENGTH : IPV6_HDR_LEN) - 23062 TCP_MIN_HEADER_LENGTH; 23063 U16_TO_BE16(u1, wptr); 23064 mp1->b_wptr = wptr + 2; 23065 /* Update the offset to cover the additional word */ 23066 tcph->th_offset_and_rsrvd[0] += (1 << 4); 23067 23068 /* 23069 * Note that the following way of filling in 23070 * TCP options are not optimal. Some NOPs can 23071 * be saved. But there is no need at this time 23072 * to optimize it. When it is needed, we will 23073 * do it. 23074 */ 23075 switch (tcp->tcp_state) { 23076 case TCPS_SYN_SENT: 23077 flags = TH_SYN; 23078 23079 if (tcp->tcp_snd_ts_ok) { 23080 uint32_t llbolt = (uint32_t)lbolt; 23081 23082 wptr = mp1->b_wptr; 23083 wptr[0] = TCPOPT_NOP; 23084 wptr[1] = TCPOPT_NOP; 23085 wptr[2] = TCPOPT_TSTAMP; 23086 wptr[3] = TCPOPT_TSTAMP_LEN; 23087 wptr += 4; 23088 U32_TO_BE32(llbolt, wptr); 23089 wptr += 4; 23090 ASSERT(tcp->tcp_ts_recent == 0); 23091 U32_TO_BE32(0L, wptr); 23092 mp1->b_wptr += TCPOPT_REAL_TS_LEN; 23093 tcph->th_offset_and_rsrvd[0] += 23094 (3 << 4); 23095 } 23096 23097 /* 23098 * Set up all the bits to tell other side 23099 * we are ECN capable. 23100 */ 23101 if (tcp->tcp_ecn_ok) { 23102 flags |= (TH_ECE | TH_CWR); 23103 } 23104 break; 23105 case TCPS_SYN_RCVD: 23106 flags |= TH_SYN; 23107 23108 /* 23109 * Reset the MSS option value to be SMSS 23110 * We should probably add back the bytes 23111 * for timestamp option and IPsec. We 23112 * don't do that as this is a workaround 23113 * for broken middle boxes/end hosts, it 23114 * is better for us to be more cautious. 23115 * They may not take these things into 23116 * account in their SMSS calculation. Thus 23117 * the peer's calculated SMSS may be smaller 23118 * than what it can be. This should be OK. 23119 */ 23120 if (tcps->tcps_use_smss_as_mss_opt) { 23121 u1 = tcp->tcp_mss; 23122 U16_TO_BE16(u1, wptr); 23123 } 23124 23125 /* 23126 * If the other side is ECN capable, reply 23127 * that we are also ECN capable. 23128 */ 23129 if (tcp->tcp_ecn_ok) 23130 flags |= TH_ECE; 23131 break; 23132 default: 23133 /* 23134 * The above ASSERT() makes sure that this 23135 * must be FIN-WAIT-1 state. Our SYN has 23136 * not been ack'ed so retransmit it. 23137 */ 23138 flags |= TH_SYN; 23139 break; 23140 } 23141 23142 if (tcp->tcp_snd_ws_ok) { 23143 wptr = mp1->b_wptr; 23144 wptr[0] = TCPOPT_NOP; 23145 wptr[1] = TCPOPT_WSCALE; 23146 wptr[2] = TCPOPT_WS_LEN; 23147 wptr[3] = (uchar_t)tcp->tcp_rcv_ws; 23148 mp1->b_wptr += TCPOPT_REAL_WS_LEN; 23149 tcph->th_offset_and_rsrvd[0] += (1 << 4); 23150 } 23151 23152 if (tcp->tcp_snd_sack_ok) { 23153 wptr = mp1->b_wptr; 23154 wptr[0] = TCPOPT_NOP; 23155 wptr[1] = TCPOPT_NOP; 23156 wptr[2] = TCPOPT_SACK_PERMITTED; 23157 wptr[3] = TCPOPT_SACK_OK_LEN; 23158 mp1->b_wptr += TCPOPT_REAL_SACK_OK_LEN; 23159 tcph->th_offset_and_rsrvd[0] += (1 << 4); 23160 } 23161 23162 /* allocb() of adequate mblk assures space */ 23163 ASSERT((uintptr_t)(mp1->b_wptr - mp1->b_rptr) <= 23164 (uintptr_t)INT_MAX); 23165 u1 = (int)(mp1->b_wptr - mp1->b_rptr); 23166 /* 23167 * Get IP set to checksum on our behalf 23168 * Include the adjustment for a source route if any. 23169 */ 23170 u1 += tcp->tcp_sum; 23171 u1 = (u1 >> 16) + (u1 & 0xFFFF); 23172 U16_TO_BE16(u1, tcph->th_sum); 23173 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 23174 } 23175 if ((tcp->tcp_valid_bits & TCP_FSS_VALID) && 23176 (seq + data_length) == tcp->tcp_fss) { 23177 if (!tcp->tcp_fin_acked) { 23178 flags |= TH_FIN; 23179 BUMP_MIB(&tcps->tcps_mib, tcpOutControl); 23180 } 23181 if (!tcp->tcp_fin_sent) { 23182 tcp->tcp_fin_sent = B_TRUE; 23183 switch (tcp->tcp_state) { 23184 case TCPS_SYN_RCVD: 23185 case TCPS_ESTABLISHED: 23186 tcp->tcp_state = TCPS_FIN_WAIT_1; 23187 break; 23188 case TCPS_CLOSE_WAIT: 23189 tcp->tcp_state = TCPS_LAST_ACK; 23190 break; 23191 } 23192 if (tcp->tcp_suna == tcp->tcp_snxt) 23193 TCP_TIMER_RESTART(tcp, tcp->tcp_rto); 23194 tcp->tcp_snxt = tcp->tcp_fss + 1; 23195 } 23196 } 23197 /* 23198 * Note the trick here. u1 is unsigned. When tcp_urg 23199 * is smaller than seq, u1 will become a very huge value. 23200 * So the comparison will fail. Also note that tcp_urp 23201 * should be positive, see RFC 793 page 17. 23202 */ 23203 u1 = tcp->tcp_urg - seq + TCP_OLD_URP_INTERPRETATION; 23204 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && u1 != 0 && 23205 u1 < (uint32_t)(64 * 1024)) { 23206 flags |= TH_URG; 23207 BUMP_MIB(&tcps->tcps_mib, tcpOutUrg); 23208 U32_TO_ABE16(u1, tcph->th_urp); 23209 } 23210 } 23211 tcph->th_flags[0] = (uchar_t)flags; 23212 tcp->tcp_rack = tcp->tcp_rnxt; 23213 tcp->tcp_rack_cnt = 0; 23214 23215 if (tcp->tcp_snd_ts_ok) { 23216 if (tcp->tcp_state != TCPS_SYN_SENT) { 23217 uint32_t llbolt = (uint32_t)lbolt; 23218 23219 U32_TO_BE32(llbolt, 23220 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 23221 U32_TO_BE32(tcp->tcp_ts_recent, 23222 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 23223 } 23224 } 23225 23226 if (num_sack_blk > 0) { 23227 uchar_t *wptr = (uchar_t *)tcph + tcp->tcp_tcp_hdr_len; 23228 sack_blk_t *tmp; 23229 int32_t i; 23230 23231 wptr[0] = TCPOPT_NOP; 23232 wptr[1] = TCPOPT_NOP; 23233 wptr[2] = TCPOPT_SACK; 23234 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 23235 sizeof (sack_blk_t); 23236 wptr += TCPOPT_REAL_SACK_LEN; 23237 23238 tmp = tcp->tcp_sack_list; 23239 for (i = 0; i < num_sack_blk; i++) { 23240 U32_TO_BE32(tmp[i].begin, wptr); 23241 wptr += sizeof (tcp_seq); 23242 U32_TO_BE32(tmp[i].end, wptr); 23243 wptr += sizeof (tcp_seq); 23244 } 23245 tcph->th_offset_and_rsrvd[0] += ((num_sack_blk * 2 + 1) << 4); 23246 } 23247 ASSERT((uintptr_t)(mp1->b_wptr - rptr) <= (uintptr_t)INT_MAX); 23248 data_length += (int)(mp1->b_wptr - rptr); 23249 if (tcp->tcp_ipversion == IPV4_VERSION) { 23250 ((ipha_t *)rptr)->ipha_length = htons(data_length); 23251 } else { 23252 ip6_t *ip6 = (ip6_t *)(rptr + 23253 (((ip6_t *)rptr)->ip6_nxt == IPPROTO_RAW ? 23254 sizeof (ip6i_t) : 0)); 23255 23256 ip6->ip6_plen = htons(data_length - 23257 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 23258 } 23259 23260 /* 23261 * Prime pump for IP 23262 * Include the adjustment for a source route if any. 23263 */ 23264 data_length -= tcp->tcp_ip_hdr_len; 23265 data_length += tcp->tcp_sum; 23266 data_length = (data_length >> 16) + (data_length & 0xFFFF); 23267 U16_TO_ABE16(data_length, tcph->th_sum); 23268 if (tcp->tcp_ip_forward_progress) { 23269 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 23270 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 23271 tcp->tcp_ip_forward_progress = B_FALSE; 23272 } 23273 return (mp1); 23274 } 23275 23276 /* This function handles the push timeout. */ 23277 void 23278 tcp_push_timer(void *arg) 23279 { 23280 conn_t *connp = (conn_t *)arg; 23281 tcp_t *tcp = connp->conn_tcp; 23282 tcp_stack_t *tcps = tcp->tcp_tcps; 23283 23284 TCP_DBGSTAT(tcps, tcp_push_timer_cnt); 23285 23286 ASSERT(tcp->tcp_listener == NULL); 23287 23288 /* 23289 * We need to plug synchronous streams during our drain to prevent 23290 * a race with tcp_fuse_rrw() or tcp_fusion_rinfop(). 23291 */ 23292 TCP_FUSE_SYNCSTR_PLUG_DRAIN(tcp); 23293 tcp->tcp_push_tid = 0; 23294 if ((tcp->tcp_rcv_list != NULL) && 23295 (tcp_rcv_drain(tcp->tcp_rq, tcp) == TH_ACK_NEEDED)) 23296 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, tcp->tcp_rnxt, TH_ACK); 23297 TCP_FUSE_SYNCSTR_UNPLUG_DRAIN(tcp); 23298 } 23299 23300 /* 23301 * This function handles delayed ACK timeout. 23302 */ 23303 static void 23304 tcp_ack_timer(void *arg) 23305 { 23306 conn_t *connp = (conn_t *)arg; 23307 tcp_t *tcp = connp->conn_tcp; 23308 mblk_t *mp; 23309 tcp_stack_t *tcps = tcp->tcp_tcps; 23310 23311 TCP_DBGSTAT(tcps, tcp_ack_timer_cnt); 23312 23313 tcp->tcp_ack_tid = 0; 23314 23315 if (tcp->tcp_fused) 23316 return; 23317 23318 /* 23319 * Do not send ACK if there is no outstanding unack'ed data. 23320 */ 23321 if (tcp->tcp_rnxt == tcp->tcp_rack) { 23322 return; 23323 } 23324 23325 if ((tcp->tcp_rnxt - tcp->tcp_rack) > tcp->tcp_mss) { 23326 /* 23327 * Make sure we don't allow deferred ACKs to result in 23328 * timer-based ACKing. If we have held off an ACK 23329 * when there was more than an mss here, and the timer 23330 * goes off, we have to worry about the possibility 23331 * that the sender isn't doing slow-start, or is out 23332 * of step with us for some other reason. We fall 23333 * permanently back in the direction of 23334 * ACK-every-other-packet as suggested in RFC 1122. 23335 */ 23336 if (tcp->tcp_rack_abs_max > 2) 23337 tcp->tcp_rack_abs_max--; 23338 tcp->tcp_rack_cur_max = 2; 23339 } 23340 mp = tcp_ack_mp(tcp); 23341 23342 if (mp != NULL) { 23343 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_SEND_PKT); 23344 BUMP_LOCAL(tcp->tcp_obsegs); 23345 BUMP_MIB(&tcps->tcps_mib, tcpOutAck); 23346 BUMP_MIB(&tcps->tcps_mib, tcpOutAckDelayed); 23347 tcp_send_data(tcp, tcp->tcp_wq, mp); 23348 } 23349 } 23350 23351 23352 /* Generate an ACK-only (no data) segment for a TCP endpoint */ 23353 static mblk_t * 23354 tcp_ack_mp(tcp_t *tcp) 23355 { 23356 uint32_t seq_no; 23357 tcp_stack_t *tcps = tcp->tcp_tcps; 23358 23359 /* 23360 * There are a few cases to be considered while setting the sequence no. 23361 * Essentially, we can come here while processing an unacceptable pkt 23362 * in the TCPS_SYN_RCVD state, in which case we set the sequence number 23363 * to snxt (per RFC 793), note the swnd wouldn't have been set yet. 23364 * If we are here for a zero window probe, stick with suna. In all 23365 * other cases, we check if suna + swnd encompasses snxt and set 23366 * the sequence number to snxt, if so. If snxt falls outside the 23367 * window (the receiver probably shrunk its window), we will go with 23368 * suna + swnd, otherwise the sequence no will be unacceptable to the 23369 * receiver. 23370 */ 23371 if (tcp->tcp_zero_win_probe) { 23372 seq_no = tcp->tcp_suna; 23373 } else if (tcp->tcp_state == TCPS_SYN_RCVD) { 23374 ASSERT(tcp->tcp_swnd == 0); 23375 seq_no = tcp->tcp_snxt; 23376 } else { 23377 seq_no = SEQ_GT(tcp->tcp_snxt, 23378 (tcp->tcp_suna + tcp->tcp_swnd)) ? 23379 (tcp->tcp_suna + tcp->tcp_swnd) : tcp->tcp_snxt; 23380 } 23381 23382 if (tcp->tcp_valid_bits) { 23383 /* 23384 * For the complex case where we have to send some 23385 * controls (FIN or SYN), let tcp_xmit_mp do it. 23386 */ 23387 return (tcp_xmit_mp(tcp, NULL, 0, NULL, NULL, seq_no, B_FALSE, 23388 NULL, B_FALSE)); 23389 } else { 23390 /* Generate a simple ACK */ 23391 int data_length; 23392 uchar_t *rptr; 23393 tcph_t *tcph; 23394 mblk_t *mp1; 23395 int32_t tcp_hdr_len; 23396 int32_t tcp_tcp_hdr_len; 23397 int32_t num_sack_blk = 0; 23398 int32_t sack_opt_len; 23399 23400 /* 23401 * Allocate space for TCP + IP headers 23402 * and link-level header 23403 */ 23404 if (tcp->tcp_snd_sack_ok && tcp->tcp_num_sack_blk > 0) { 23405 num_sack_blk = MIN(tcp->tcp_max_sack_blk, 23406 tcp->tcp_num_sack_blk); 23407 sack_opt_len = num_sack_blk * sizeof (sack_blk_t) + 23408 TCPOPT_NOP_LEN * 2 + TCPOPT_HEADER_LEN; 23409 tcp_hdr_len = tcp->tcp_hdr_len + sack_opt_len; 23410 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len + sack_opt_len; 23411 } else { 23412 tcp_hdr_len = tcp->tcp_hdr_len; 23413 tcp_tcp_hdr_len = tcp->tcp_tcp_hdr_len; 23414 } 23415 mp1 = allocb(tcp_hdr_len + tcps->tcps_wroff_xtra, BPRI_MED); 23416 if (!mp1) 23417 return (NULL); 23418 23419 /* Update the latest receive window size in TCP header. */ 23420 U32_TO_ABE16(tcp->tcp_rwnd >> tcp->tcp_rcv_ws, 23421 tcp->tcp_tcph->th_win); 23422 /* copy in prototype TCP + IP header */ 23423 rptr = mp1->b_rptr + tcps->tcps_wroff_xtra; 23424 mp1->b_rptr = rptr; 23425 mp1->b_wptr = rptr + tcp_hdr_len; 23426 bcopy(tcp->tcp_iphc, rptr, tcp->tcp_hdr_len); 23427 23428 tcph = (tcph_t *)&rptr[tcp->tcp_ip_hdr_len]; 23429 23430 /* Set the TCP sequence number. */ 23431 U32_TO_ABE32(seq_no, tcph->th_seq); 23432 23433 /* Set up the TCP flag field. */ 23434 tcph->th_flags[0] = (uchar_t)TH_ACK; 23435 if (tcp->tcp_ecn_echo_on) 23436 tcph->th_flags[0] |= TH_ECE; 23437 23438 tcp->tcp_rack = tcp->tcp_rnxt; 23439 tcp->tcp_rack_cnt = 0; 23440 23441 /* fill in timestamp option if in use */ 23442 if (tcp->tcp_snd_ts_ok) { 23443 uint32_t llbolt = (uint32_t)lbolt; 23444 23445 U32_TO_BE32(llbolt, 23446 (char *)tcph+TCP_MIN_HEADER_LENGTH+4); 23447 U32_TO_BE32(tcp->tcp_ts_recent, 23448 (char *)tcph+TCP_MIN_HEADER_LENGTH+8); 23449 } 23450 23451 /* Fill in SACK options */ 23452 if (num_sack_blk > 0) { 23453 uchar_t *wptr = (uchar_t *)tcph + tcp->tcp_tcp_hdr_len; 23454 sack_blk_t *tmp; 23455 int32_t i; 23456 23457 wptr[0] = TCPOPT_NOP; 23458 wptr[1] = TCPOPT_NOP; 23459 wptr[2] = TCPOPT_SACK; 23460 wptr[3] = TCPOPT_HEADER_LEN + num_sack_blk * 23461 sizeof (sack_blk_t); 23462 wptr += TCPOPT_REAL_SACK_LEN; 23463 23464 tmp = tcp->tcp_sack_list; 23465 for (i = 0; i < num_sack_blk; i++) { 23466 U32_TO_BE32(tmp[i].begin, wptr); 23467 wptr += sizeof (tcp_seq); 23468 U32_TO_BE32(tmp[i].end, wptr); 23469 wptr += sizeof (tcp_seq); 23470 } 23471 tcph->th_offset_and_rsrvd[0] += ((num_sack_blk * 2 + 1) 23472 << 4); 23473 } 23474 23475 if (tcp->tcp_ipversion == IPV4_VERSION) { 23476 ((ipha_t *)rptr)->ipha_length = htons(tcp_hdr_len); 23477 } else { 23478 /* Check for ip6i_t header in sticky hdrs */ 23479 ip6_t *ip6 = (ip6_t *)(rptr + 23480 (((ip6_t *)rptr)->ip6_nxt == IPPROTO_RAW ? 23481 sizeof (ip6i_t) : 0)); 23482 23483 ip6->ip6_plen = htons(tcp_hdr_len - 23484 ((char *)&tcp->tcp_ip6h[1] - tcp->tcp_iphc)); 23485 } 23486 23487 /* 23488 * Prime pump for checksum calculation in IP. Include the 23489 * adjustment for a source route if any. 23490 */ 23491 data_length = tcp_tcp_hdr_len + tcp->tcp_sum; 23492 data_length = (data_length >> 16) + (data_length & 0xFFFF); 23493 U16_TO_ABE16(data_length, tcph->th_sum); 23494 23495 if (tcp->tcp_ip_forward_progress) { 23496 ASSERT(tcp->tcp_ipversion == IPV6_VERSION); 23497 *(uint32_t *)mp1->b_rptr |= IP_FORWARD_PROG; 23498 tcp->tcp_ip_forward_progress = B_FALSE; 23499 } 23500 return (mp1); 23501 } 23502 } 23503 23504 /* 23505 * To create a temporary tcp structure for inserting into bind hash list. 23506 * The parameter is assumed to be in network byte order, ready for use. 23507 */ 23508 /* ARGSUSED */ 23509 static tcp_t * 23510 tcp_alloc_temp_tcp(in_port_t port, tcp_stack_t *tcps) 23511 { 23512 conn_t *connp; 23513 tcp_t *tcp; 23514 23515 connp = ipcl_conn_create(IPCL_TCPCONN, KM_SLEEP, tcps->tcps_netstack); 23516 if (connp == NULL) 23517 return (NULL); 23518 23519 tcp = connp->conn_tcp; 23520 tcp->tcp_tcps = tcps; 23521 TCPS_REFHOLD(tcps); 23522 23523 /* 23524 * Only initialize the necessary info in those structures. Note 23525 * that since INADDR_ANY is all 0, we do not need to set 23526 * tcp_bound_source to INADDR_ANY here. 23527 */ 23528 tcp->tcp_state = TCPS_BOUND; 23529 tcp->tcp_lport = port; 23530 tcp->tcp_exclbind = 1; 23531 tcp->tcp_reserved_port = 1; 23532 23533 /* Just for place holding... */ 23534 tcp->tcp_ipversion = IPV4_VERSION; 23535 23536 return (tcp); 23537 } 23538 23539 /* 23540 * To remove a port range specified by lo_port and hi_port from the 23541 * reserved port ranges. This is one of the three public functions of 23542 * the reserved port interface. Note that a port range has to be removed 23543 * as a whole. Ports in a range cannot be removed individually. 23544 * 23545 * Params: 23546 * in_port_t lo_port: the beginning port of the reserved port range to 23547 * be deleted. 23548 * in_port_t hi_port: the ending port of the reserved port range to 23549 * be deleted. 23550 * 23551 * Return: 23552 * B_TRUE if the deletion is successful, B_FALSE otherwise. 23553 * 23554 * Assumes that nca is only for zoneid=0 23555 */ 23556 boolean_t 23557 tcp_reserved_port_del(in_port_t lo_port, in_port_t hi_port) 23558 { 23559 int i, j; 23560 int size; 23561 tcp_t **temp_tcp_array; 23562 tcp_t *tcp; 23563 tcp_stack_t *tcps; 23564 23565 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)->netstack_tcp; 23566 ASSERT(tcps != NULL); 23567 23568 rw_enter(&tcps->tcps_reserved_port_lock, RW_WRITER); 23569 23570 /* First make sure that the port ranage is indeed reserved. */ 23571 for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) { 23572 if (tcps->tcps_reserved_port[i].lo_port == lo_port) { 23573 hi_port = tcps->tcps_reserved_port[i].hi_port; 23574 temp_tcp_array = 23575 tcps->tcps_reserved_port[i].temp_tcp_array; 23576 break; 23577 } 23578 } 23579 if (i == tcps->tcps_reserved_port_array_size) { 23580 rw_exit(&tcps->tcps_reserved_port_lock); 23581 netstack_rele(tcps->tcps_netstack); 23582 return (B_FALSE); 23583 } 23584 23585 /* 23586 * Remove the range from the array. This simple loop is possible 23587 * because port ranges are inserted in ascending order. 23588 */ 23589 for (j = i; j < tcps->tcps_reserved_port_array_size - 1; j++) { 23590 tcps->tcps_reserved_port[j].lo_port = 23591 tcps->tcps_reserved_port[j+1].lo_port; 23592 tcps->tcps_reserved_port[j].hi_port = 23593 tcps->tcps_reserved_port[j+1].hi_port; 23594 tcps->tcps_reserved_port[j].temp_tcp_array = 23595 tcps->tcps_reserved_port[j+1].temp_tcp_array; 23596 } 23597 23598 /* Remove all the temporary tcp structures. */ 23599 size = hi_port - lo_port + 1; 23600 while (size > 0) { 23601 tcp = temp_tcp_array[size - 1]; 23602 ASSERT(tcp != NULL); 23603 tcp_bind_hash_remove(tcp); 23604 CONN_DEC_REF(tcp->tcp_connp); 23605 size--; 23606 } 23607 kmem_free(temp_tcp_array, (hi_port - lo_port + 1) * sizeof (tcp_t *)); 23608 tcps->tcps_reserved_port_array_size--; 23609 rw_exit(&tcps->tcps_reserved_port_lock); 23610 netstack_rele(tcps->tcps_netstack); 23611 return (B_TRUE); 23612 } 23613 23614 /* 23615 * Macro to remove temporary tcp structure from the bind hash list. The 23616 * first parameter is the list of tcp to be removed. The second parameter 23617 * is the number of tcps in the array. 23618 */ 23619 #define TCP_TMP_TCP_REMOVE(tcp_array, num, tcps) \ 23620 { \ 23621 while ((num) > 0) { \ 23622 tcp_t *tcp = (tcp_array)[(num) - 1]; \ 23623 tf_t *tbf; \ 23624 tcp_t *tcpnext; \ 23625 tbf = &tcps->tcps_bind_fanout[TCP_BIND_HASH(tcp->tcp_lport)]; \ 23626 mutex_enter(&tbf->tf_lock); \ 23627 tcpnext = tcp->tcp_bind_hash; \ 23628 if (tcpnext) { \ 23629 tcpnext->tcp_ptpbhn = \ 23630 tcp->tcp_ptpbhn; \ 23631 } \ 23632 *tcp->tcp_ptpbhn = tcpnext; \ 23633 mutex_exit(&tbf->tf_lock); \ 23634 kmem_free(tcp, sizeof (tcp_t)); \ 23635 (tcp_array)[(num) - 1] = NULL; \ 23636 (num)--; \ 23637 } \ 23638 } 23639 23640 /* 23641 * The public interface for other modules to call to reserve a port range 23642 * in TCP. The caller passes in how large a port range it wants. TCP 23643 * will try to find a range and return it via lo_port and hi_port. This is 23644 * used by NCA's nca_conn_init. 23645 * NCA can only be used in the global zone so this only affects the global 23646 * zone's ports. 23647 * 23648 * Params: 23649 * int size: the size of the port range to be reserved. 23650 * in_port_t *lo_port (referenced): returns the beginning port of the 23651 * reserved port range added. 23652 * in_port_t *hi_port (referenced): returns the ending port of the 23653 * reserved port range added. 23654 * 23655 * Return: 23656 * B_TRUE if the port reservation is successful, B_FALSE otherwise. 23657 * 23658 * Assumes that nca is only for zoneid=0 23659 */ 23660 boolean_t 23661 tcp_reserved_port_add(int size, in_port_t *lo_port, in_port_t *hi_port) 23662 { 23663 tcp_t *tcp; 23664 tcp_t *tmp_tcp; 23665 tcp_t **temp_tcp_array; 23666 tf_t *tbf; 23667 in_port_t net_port; 23668 in_port_t port; 23669 int32_t cur_size; 23670 int i, j; 23671 boolean_t used; 23672 tcp_rport_t tmp_ports[TCP_RESERVED_PORTS_ARRAY_MAX_SIZE]; 23673 zoneid_t zoneid = GLOBAL_ZONEID; 23674 tcp_stack_t *tcps; 23675 23676 /* Sanity check. */ 23677 if (size <= 0 || size > TCP_RESERVED_PORTS_RANGE_MAX) { 23678 return (B_FALSE); 23679 } 23680 23681 tcps = netstack_find_by_stackid(GLOBAL_NETSTACKID)->netstack_tcp; 23682 ASSERT(tcps != NULL); 23683 23684 rw_enter(&tcps->tcps_reserved_port_lock, RW_WRITER); 23685 if (tcps->tcps_reserved_port_array_size == 23686 TCP_RESERVED_PORTS_ARRAY_MAX_SIZE) { 23687 rw_exit(&tcps->tcps_reserved_port_lock); 23688 netstack_rele(tcps->tcps_netstack); 23689 return (B_FALSE); 23690 } 23691 23692 /* 23693 * Find the starting port to try. Since the port ranges are ordered 23694 * in the reserved port array, we can do a simple search here. 23695 */ 23696 *lo_port = TCP_SMALLEST_RESERVED_PORT; 23697 *hi_port = TCP_LARGEST_RESERVED_PORT; 23698 for (i = 0; i < tcps->tcps_reserved_port_array_size; 23699 *lo_port = tcps->tcps_reserved_port[i].hi_port + 1, i++) { 23700 if (tcps->tcps_reserved_port[i].lo_port - *lo_port >= size) { 23701 *hi_port = tcps->tcps_reserved_port[i].lo_port - 1; 23702 break; 23703 } 23704 } 23705 /* No available port range. */ 23706 if (i == tcps->tcps_reserved_port_array_size && 23707 *hi_port - *lo_port < size) { 23708 rw_exit(&tcps->tcps_reserved_port_lock); 23709 netstack_rele(tcps->tcps_netstack); 23710 return (B_FALSE); 23711 } 23712 23713 temp_tcp_array = kmem_zalloc(size * sizeof (tcp_t *), KM_NOSLEEP); 23714 if (temp_tcp_array == NULL) { 23715 rw_exit(&tcps->tcps_reserved_port_lock); 23716 netstack_rele(tcps->tcps_netstack); 23717 return (B_FALSE); 23718 } 23719 23720 /* Go thru the port range to see if some ports are already bound. */ 23721 for (port = *lo_port, cur_size = 0; 23722 cur_size < size && port <= *hi_port; 23723 cur_size++, port++) { 23724 used = B_FALSE; 23725 net_port = htons(port); 23726 tbf = &tcps->tcps_bind_fanout[TCP_BIND_HASH(net_port)]; 23727 mutex_enter(&tbf->tf_lock); 23728 for (tcp = tbf->tf_tcp; tcp != NULL; 23729 tcp = tcp->tcp_bind_hash) { 23730 if (IPCL_ZONE_MATCH(tcp->tcp_connp, zoneid) && 23731 net_port == tcp->tcp_lport) { 23732 /* 23733 * A port is already bound. Search again 23734 * starting from port + 1. Release all 23735 * temporary tcps. 23736 */ 23737 mutex_exit(&tbf->tf_lock); 23738 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size, 23739 tcps); 23740 *lo_port = port + 1; 23741 cur_size = -1; 23742 used = B_TRUE; 23743 break; 23744 } 23745 } 23746 if (!used) { 23747 if ((tmp_tcp = tcp_alloc_temp_tcp(net_port, tcps)) == 23748 NULL) { 23749 /* 23750 * Allocation failure. Just fail the request. 23751 * Need to remove all those temporary tcp 23752 * structures. 23753 */ 23754 mutex_exit(&tbf->tf_lock); 23755 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size, 23756 tcps); 23757 rw_exit(&tcps->tcps_reserved_port_lock); 23758 kmem_free(temp_tcp_array, 23759 (hi_port - lo_port + 1) * 23760 sizeof (tcp_t *)); 23761 netstack_rele(tcps->tcps_netstack); 23762 return (B_FALSE); 23763 } 23764 temp_tcp_array[cur_size] = tmp_tcp; 23765 tcp_bind_hash_insert(tbf, tmp_tcp, B_TRUE); 23766 mutex_exit(&tbf->tf_lock); 23767 } 23768 } 23769 23770 /* 23771 * The current range is not large enough. We can actually do another 23772 * search if this search is done between 2 reserved port ranges. But 23773 * for first release, we just stop here and return saying that no port 23774 * range is available. 23775 */ 23776 if (cur_size < size) { 23777 TCP_TMP_TCP_REMOVE(temp_tcp_array, cur_size, tcps); 23778 rw_exit(&tcps->tcps_reserved_port_lock); 23779 kmem_free(temp_tcp_array, size * sizeof (tcp_t *)); 23780 netstack_rele(tcps->tcps_netstack); 23781 return (B_FALSE); 23782 } 23783 *hi_port = port - 1; 23784 23785 /* 23786 * Insert range into array in ascending order. Since this function 23787 * must not be called often, we choose to use the simplest method. 23788 * The above array should not consume excessive stack space as 23789 * the size must be very small. If in future releases, we find 23790 * that we should provide more reserved port ranges, this function 23791 * has to be modified to be more efficient. 23792 */ 23793 if (tcps->tcps_reserved_port_array_size == 0) { 23794 tcps->tcps_reserved_port[0].lo_port = *lo_port; 23795 tcps->tcps_reserved_port[0].hi_port = *hi_port; 23796 tcps->tcps_reserved_port[0].temp_tcp_array = temp_tcp_array; 23797 } else { 23798 for (i = 0, j = 0; i < tcps->tcps_reserved_port_array_size; 23799 i++, j++) { 23800 if (*lo_port < tcps->tcps_reserved_port[i].lo_port && 23801 i == j) { 23802 tmp_ports[j].lo_port = *lo_port; 23803 tmp_ports[j].hi_port = *hi_port; 23804 tmp_ports[j].temp_tcp_array = temp_tcp_array; 23805 j++; 23806 } 23807 tmp_ports[j].lo_port = 23808 tcps->tcps_reserved_port[i].lo_port; 23809 tmp_ports[j].hi_port = 23810 tcps->tcps_reserved_port[i].hi_port; 23811 tmp_ports[j].temp_tcp_array = 23812 tcps->tcps_reserved_port[i].temp_tcp_array; 23813 } 23814 if (j == i) { 23815 tmp_ports[j].lo_port = *lo_port; 23816 tmp_ports[j].hi_port = *hi_port; 23817 tmp_ports[j].temp_tcp_array = temp_tcp_array; 23818 } 23819 bcopy(tmp_ports, tcps->tcps_reserved_port, sizeof (tmp_ports)); 23820 } 23821 tcps->tcps_reserved_port_array_size++; 23822 rw_exit(&tcps->tcps_reserved_port_lock); 23823 netstack_rele(tcps->tcps_netstack); 23824 return (B_TRUE); 23825 } 23826 23827 /* 23828 * Check to see if a port is in any reserved port range. 23829 * 23830 * Params: 23831 * in_port_t port: the port to be verified. 23832 * 23833 * Return: 23834 * B_TRUE is the port is inside a reserved port range, B_FALSE otherwise. 23835 */ 23836 boolean_t 23837 tcp_reserved_port_check(in_port_t port, tcp_stack_t *tcps) 23838 { 23839 int i; 23840 23841 rw_enter(&tcps->tcps_reserved_port_lock, RW_READER); 23842 for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) { 23843 if (port >= tcps->tcps_reserved_port[i].lo_port || 23844 port <= tcps->tcps_reserved_port[i].hi_port) { 23845 rw_exit(&tcps->tcps_reserved_port_lock); 23846 return (B_TRUE); 23847 } 23848 } 23849 rw_exit(&tcps->tcps_reserved_port_lock); 23850 return (B_FALSE); 23851 } 23852 23853 /* 23854 * To list all reserved port ranges. This is the function to handle 23855 * ndd tcp_reserved_port_list. 23856 */ 23857 /* ARGSUSED */ 23858 static int 23859 tcp_reserved_port_list(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 23860 { 23861 int i; 23862 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 23863 23864 rw_enter(&tcps->tcps_reserved_port_lock, RW_READER); 23865 if (tcps->tcps_reserved_port_array_size > 0) 23866 (void) mi_mpprintf(mp, "The following ports are reserved:"); 23867 else 23868 (void) mi_mpprintf(mp, "No port is reserved."); 23869 for (i = 0; i < tcps->tcps_reserved_port_array_size; i++) { 23870 (void) mi_mpprintf(mp, "%d-%d", 23871 tcps->tcps_reserved_port[i].lo_port, 23872 tcps->tcps_reserved_port[i].hi_port); 23873 } 23874 rw_exit(&tcps->tcps_reserved_port_lock); 23875 return (0); 23876 } 23877 23878 /* 23879 * Hash list insertion routine for tcp_t structures. 23880 * Inserts entries with the ones bound to a specific IP address first 23881 * followed by those bound to INADDR_ANY. 23882 */ 23883 static void 23884 tcp_bind_hash_insert(tf_t *tbf, tcp_t *tcp, int caller_holds_lock) 23885 { 23886 tcp_t **tcpp; 23887 tcp_t *tcpnext; 23888 23889 if (tcp->tcp_ptpbhn != NULL) { 23890 ASSERT(!caller_holds_lock); 23891 tcp_bind_hash_remove(tcp); 23892 } 23893 tcpp = &tbf->tf_tcp; 23894 if (!caller_holds_lock) { 23895 mutex_enter(&tbf->tf_lock); 23896 } else { 23897 ASSERT(MUTEX_HELD(&tbf->tf_lock)); 23898 } 23899 tcpnext = tcpp[0]; 23900 if (tcpnext) { 23901 /* 23902 * If the new tcp bound to the INADDR_ANY address 23903 * and the first one in the list is not bound to 23904 * INADDR_ANY we skip all entries until we find the 23905 * first one bound to INADDR_ANY. 23906 * This makes sure that applications binding to a 23907 * specific address get preference over those binding to 23908 * INADDR_ANY. 23909 */ 23910 if (V6_OR_V4_INADDR_ANY(tcp->tcp_bound_source_v6) && 23911 !V6_OR_V4_INADDR_ANY(tcpnext->tcp_bound_source_v6)) { 23912 while ((tcpnext = tcpp[0]) != NULL && 23913 !V6_OR_V4_INADDR_ANY(tcpnext->tcp_bound_source_v6)) 23914 tcpp = &(tcpnext->tcp_bind_hash); 23915 if (tcpnext) 23916 tcpnext->tcp_ptpbhn = &tcp->tcp_bind_hash; 23917 } else 23918 tcpnext->tcp_ptpbhn = &tcp->tcp_bind_hash; 23919 } 23920 tcp->tcp_bind_hash = tcpnext; 23921 tcp->tcp_ptpbhn = tcpp; 23922 tcpp[0] = tcp; 23923 if (!caller_holds_lock) 23924 mutex_exit(&tbf->tf_lock); 23925 } 23926 23927 /* 23928 * Hash list removal routine for tcp_t structures. 23929 */ 23930 static void 23931 tcp_bind_hash_remove(tcp_t *tcp) 23932 { 23933 tcp_t *tcpnext; 23934 kmutex_t *lockp; 23935 tcp_stack_t *tcps = tcp->tcp_tcps; 23936 23937 if (tcp->tcp_ptpbhn == NULL) 23938 return; 23939 23940 /* 23941 * Extract the lock pointer in case there are concurrent 23942 * hash_remove's for this instance. 23943 */ 23944 ASSERT(tcp->tcp_lport != 0); 23945 lockp = &tcps->tcps_bind_fanout[TCP_BIND_HASH(tcp->tcp_lport)].tf_lock; 23946 23947 ASSERT(lockp != NULL); 23948 mutex_enter(lockp); 23949 if (tcp->tcp_ptpbhn) { 23950 tcpnext = tcp->tcp_bind_hash; 23951 if (tcpnext) { 23952 tcpnext->tcp_ptpbhn = tcp->tcp_ptpbhn; 23953 tcp->tcp_bind_hash = NULL; 23954 } 23955 *tcp->tcp_ptpbhn = tcpnext; 23956 tcp->tcp_ptpbhn = NULL; 23957 } 23958 mutex_exit(lockp); 23959 } 23960 23961 23962 /* 23963 * Hash list lookup routine for tcp_t structures. 23964 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF. 23965 */ 23966 static tcp_t * 23967 tcp_acceptor_hash_lookup(t_uscalar_t id, tcp_stack_t *tcps) 23968 { 23969 tf_t *tf; 23970 tcp_t *tcp; 23971 23972 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)]; 23973 mutex_enter(&tf->tf_lock); 23974 for (tcp = tf->tf_tcp; tcp != NULL; 23975 tcp = tcp->tcp_acceptor_hash) { 23976 if (tcp->tcp_acceptor_id == id) { 23977 CONN_INC_REF(tcp->tcp_connp); 23978 mutex_exit(&tf->tf_lock); 23979 return (tcp); 23980 } 23981 } 23982 mutex_exit(&tf->tf_lock); 23983 return (NULL); 23984 } 23985 23986 23987 /* 23988 * Hash list insertion routine for tcp_t structures. 23989 */ 23990 void 23991 tcp_acceptor_hash_insert(t_uscalar_t id, tcp_t *tcp) 23992 { 23993 tf_t *tf; 23994 tcp_t **tcpp; 23995 tcp_t *tcpnext; 23996 tcp_stack_t *tcps = tcp->tcp_tcps; 23997 23998 tf = &tcps->tcps_acceptor_fanout[TCP_ACCEPTOR_HASH(id)]; 23999 24000 if (tcp->tcp_ptpahn != NULL) 24001 tcp_acceptor_hash_remove(tcp); 24002 tcpp = &tf->tf_tcp; 24003 mutex_enter(&tf->tf_lock); 24004 tcpnext = tcpp[0]; 24005 if (tcpnext) 24006 tcpnext->tcp_ptpahn = &tcp->tcp_acceptor_hash; 24007 tcp->tcp_acceptor_hash = tcpnext; 24008 tcp->tcp_ptpahn = tcpp; 24009 tcpp[0] = tcp; 24010 tcp->tcp_acceptor_lockp = &tf->tf_lock; /* For tcp_*_hash_remove */ 24011 mutex_exit(&tf->tf_lock); 24012 } 24013 24014 /* 24015 * Hash list removal routine for tcp_t structures. 24016 */ 24017 static void 24018 tcp_acceptor_hash_remove(tcp_t *tcp) 24019 { 24020 tcp_t *tcpnext; 24021 kmutex_t *lockp; 24022 24023 /* 24024 * Extract the lock pointer in case there are concurrent 24025 * hash_remove's for this instance. 24026 */ 24027 lockp = tcp->tcp_acceptor_lockp; 24028 24029 if (tcp->tcp_ptpahn == NULL) 24030 return; 24031 24032 ASSERT(lockp != NULL); 24033 mutex_enter(lockp); 24034 if (tcp->tcp_ptpahn) { 24035 tcpnext = tcp->tcp_acceptor_hash; 24036 if (tcpnext) { 24037 tcpnext->tcp_ptpahn = tcp->tcp_ptpahn; 24038 tcp->tcp_acceptor_hash = NULL; 24039 } 24040 *tcp->tcp_ptpahn = tcpnext; 24041 tcp->tcp_ptpahn = NULL; 24042 } 24043 mutex_exit(lockp); 24044 tcp->tcp_acceptor_lockp = NULL; 24045 } 24046 24047 /* ARGSUSED */ 24048 static int 24049 tcp_host_param_setvalue(queue_t *q, mblk_t *mp, char *value, caddr_t cp, int af) 24050 { 24051 int error = 0; 24052 int retval; 24053 char *end; 24054 tcp_hsp_t *hsp; 24055 tcp_hsp_t *hspprev; 24056 ipaddr_t addr = 0; /* Address we're looking for */ 24057 in6_addr_t v6addr; /* Address we're looking for */ 24058 uint32_t hash; /* Hash of that address */ 24059 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 24060 24061 /* 24062 * If the following variables are still zero after parsing the input 24063 * string, the user didn't specify them and we don't change them in 24064 * the HSP. 24065 */ 24066 24067 ipaddr_t mask = 0; /* Subnet mask */ 24068 in6_addr_t v6mask; 24069 long sendspace = 0; /* Send buffer size */ 24070 long recvspace = 0; /* Receive buffer size */ 24071 long timestamp = 0; /* Originate TCP TSTAMP option, 1 = yes */ 24072 boolean_t delete = B_FALSE; /* User asked to delete this HSP */ 24073 24074 rw_enter(&tcps->tcps_hsp_lock, RW_WRITER); 24075 24076 /* Parse and validate address */ 24077 if (af == AF_INET) { 24078 retval = inet_pton(af, value, &addr); 24079 if (retval == 1) 24080 IN6_IPADDR_TO_V4MAPPED(addr, &v6addr); 24081 } else if (af == AF_INET6) { 24082 retval = inet_pton(af, value, &v6addr); 24083 } else { 24084 error = EINVAL; 24085 goto done; 24086 } 24087 if (retval == 0) { 24088 error = EINVAL; 24089 goto done; 24090 } 24091 24092 while ((*value) && *value != ' ') 24093 value++; 24094 24095 /* Parse individual keywords, set variables if found */ 24096 while (*value) { 24097 /* Skip leading blanks */ 24098 24099 while (*value == ' ' || *value == '\t') 24100 value++; 24101 24102 /* If at end of string, we're done */ 24103 24104 if (!*value) 24105 break; 24106 24107 /* We have a word, figure out what it is */ 24108 24109 if (strncmp("mask", value, 4) == 0) { 24110 value += 4; 24111 while (*value == ' ' || *value == '\t') 24112 value++; 24113 /* Parse subnet mask */ 24114 if (af == AF_INET) { 24115 retval = inet_pton(af, value, &mask); 24116 if (retval == 1) { 24117 V4MASK_TO_V6(mask, v6mask); 24118 } 24119 } else if (af == AF_INET6) { 24120 retval = inet_pton(af, value, &v6mask); 24121 } 24122 if (retval != 1) { 24123 error = EINVAL; 24124 goto done; 24125 } 24126 while ((*value) && *value != ' ') 24127 value++; 24128 } else if (strncmp("sendspace", value, 9) == 0) { 24129 value += 9; 24130 24131 if (ddi_strtol(value, &end, 0, &sendspace) != 0 || 24132 sendspace < TCP_XMIT_HIWATER || 24133 sendspace >= (1L<<30)) { 24134 error = EINVAL; 24135 goto done; 24136 } 24137 value = end; 24138 } else if (strncmp("recvspace", value, 9) == 0) { 24139 value += 9; 24140 24141 if (ddi_strtol(value, &end, 0, &recvspace) != 0 || 24142 recvspace < TCP_RECV_HIWATER || 24143 recvspace >= (1L<<30)) { 24144 error = EINVAL; 24145 goto done; 24146 } 24147 value = end; 24148 } else if (strncmp("timestamp", value, 9) == 0) { 24149 value += 9; 24150 24151 if (ddi_strtol(value, &end, 0, ×tamp) != 0 || 24152 timestamp < 0 || timestamp > 1) { 24153 error = EINVAL; 24154 goto done; 24155 } 24156 24157 /* 24158 * We increment timestamp so we know it's been set; 24159 * this is undone when we put it in the HSP 24160 */ 24161 timestamp++; 24162 value = end; 24163 } else if (strncmp("delete", value, 6) == 0) { 24164 value += 6; 24165 delete = B_TRUE; 24166 } else { 24167 error = EINVAL; 24168 goto done; 24169 } 24170 } 24171 24172 /* Hash address for lookup */ 24173 24174 hash = TCP_HSP_HASH(addr); 24175 24176 if (delete) { 24177 /* 24178 * Note that deletes don't return an error if the thing 24179 * we're trying to delete isn't there. 24180 */ 24181 if (tcps->tcps_hsp_hash == NULL) 24182 goto done; 24183 hsp = tcps->tcps_hsp_hash[hash]; 24184 24185 if (hsp) { 24186 if (IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, 24187 &v6addr)) { 24188 tcps->tcps_hsp_hash[hash] = hsp->tcp_hsp_next; 24189 mi_free((char *)hsp); 24190 } else { 24191 hspprev = hsp; 24192 while ((hsp = hsp->tcp_hsp_next) != NULL) { 24193 if (IN6_ARE_ADDR_EQUAL( 24194 &hsp->tcp_hsp_addr_v6, &v6addr)) { 24195 hspprev->tcp_hsp_next = 24196 hsp->tcp_hsp_next; 24197 mi_free((char *)hsp); 24198 break; 24199 } 24200 hspprev = hsp; 24201 } 24202 } 24203 } 24204 } else { 24205 /* 24206 * We're adding/modifying an HSP. If we haven't already done 24207 * so, allocate the hash table. 24208 */ 24209 24210 if (!tcps->tcps_hsp_hash) { 24211 tcps->tcps_hsp_hash = (tcp_hsp_t **) 24212 mi_zalloc(sizeof (tcp_hsp_t *) * TCP_HSP_HASH_SIZE); 24213 if (!tcps->tcps_hsp_hash) { 24214 error = EINVAL; 24215 goto done; 24216 } 24217 } 24218 24219 /* Get head of hash chain */ 24220 24221 hsp = tcps->tcps_hsp_hash[hash]; 24222 24223 /* Try to find pre-existing hsp on hash chain */ 24224 /* Doesn't handle CIDR prefixes. */ 24225 while (hsp) { 24226 if (IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, &v6addr)) 24227 break; 24228 hsp = hsp->tcp_hsp_next; 24229 } 24230 24231 /* 24232 * If we didn't, create one with default values and put it 24233 * at head of hash chain 24234 */ 24235 24236 if (!hsp) { 24237 hsp = (tcp_hsp_t *)mi_zalloc(sizeof (tcp_hsp_t)); 24238 if (!hsp) { 24239 error = EINVAL; 24240 goto done; 24241 } 24242 hsp->tcp_hsp_next = tcps->tcps_hsp_hash[hash]; 24243 tcps->tcps_hsp_hash[hash] = hsp; 24244 } 24245 24246 /* Set values that the user asked us to change */ 24247 24248 hsp->tcp_hsp_addr_v6 = v6addr; 24249 if (IN6_IS_ADDR_V4MAPPED(&v6addr)) 24250 hsp->tcp_hsp_vers = IPV4_VERSION; 24251 else 24252 hsp->tcp_hsp_vers = IPV6_VERSION; 24253 hsp->tcp_hsp_subnet_v6 = v6mask; 24254 if (sendspace > 0) 24255 hsp->tcp_hsp_sendspace = sendspace; 24256 if (recvspace > 0) 24257 hsp->tcp_hsp_recvspace = recvspace; 24258 if (timestamp > 0) 24259 hsp->tcp_hsp_tstamp = timestamp - 1; 24260 } 24261 24262 done: 24263 rw_exit(&tcps->tcps_hsp_lock); 24264 return (error); 24265 } 24266 24267 /* Set callback routine passed to nd_load by tcp_param_register. */ 24268 /* ARGSUSED */ 24269 static int 24270 tcp_host_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr) 24271 { 24272 return (tcp_host_param_setvalue(q, mp, value, cp, AF_INET)); 24273 } 24274 /* ARGSUSED */ 24275 static int 24276 tcp_host_param_set_ipv6(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 24277 cred_t *cr) 24278 { 24279 return (tcp_host_param_setvalue(q, mp, value, cp, AF_INET6)); 24280 } 24281 24282 /* TCP host parameters report triggered via the Named Dispatch mechanism. */ 24283 /* ARGSUSED */ 24284 static int 24285 tcp_host_param_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 24286 { 24287 tcp_hsp_t *hsp; 24288 int i; 24289 char addrbuf[INET6_ADDRSTRLEN], subnetbuf[INET6_ADDRSTRLEN]; 24290 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 24291 24292 rw_enter(&tcps->tcps_hsp_lock, RW_READER); 24293 (void) mi_mpprintf(mp, 24294 "Hash HSP " MI_COL_HDRPAD_STR 24295 "Address Subnet Mask Send Receive TStamp"); 24296 if (tcps->tcps_hsp_hash) { 24297 for (i = 0; i < TCP_HSP_HASH_SIZE; i++) { 24298 hsp = tcps->tcps_hsp_hash[i]; 24299 while (hsp) { 24300 if (hsp->tcp_hsp_vers == IPV4_VERSION) { 24301 (void) inet_ntop(AF_INET, 24302 &hsp->tcp_hsp_addr, 24303 addrbuf, sizeof (addrbuf)); 24304 (void) inet_ntop(AF_INET, 24305 &hsp->tcp_hsp_subnet, 24306 subnetbuf, sizeof (subnetbuf)); 24307 } else { 24308 (void) inet_ntop(AF_INET6, 24309 &hsp->tcp_hsp_addr_v6, 24310 addrbuf, sizeof (addrbuf)); 24311 (void) inet_ntop(AF_INET6, 24312 &hsp->tcp_hsp_subnet_v6, 24313 subnetbuf, sizeof (subnetbuf)); 24314 } 24315 (void) mi_mpprintf(mp, 24316 " %03d " MI_COL_PTRFMT_STR 24317 "%s %s %010d %010d %d", 24318 i, 24319 (void *)hsp, 24320 addrbuf, 24321 subnetbuf, 24322 hsp->tcp_hsp_sendspace, 24323 hsp->tcp_hsp_recvspace, 24324 hsp->tcp_hsp_tstamp); 24325 24326 hsp = hsp->tcp_hsp_next; 24327 } 24328 } 24329 } 24330 rw_exit(&tcps->tcps_hsp_lock); 24331 return (0); 24332 } 24333 24334 24335 /* Data for fast netmask macro used by tcp_hsp_lookup */ 24336 24337 static ipaddr_t netmasks[] = { 24338 IN_CLASSA_NET, IN_CLASSA_NET, IN_CLASSB_NET, 24339 IN_CLASSC_NET | IN_CLASSD_NET /* Class C,D,E */ 24340 }; 24341 24342 #define netmask(addr) (netmasks[(ipaddr_t)(addr) >> 30]) 24343 24344 /* 24345 * XXX This routine should go away and instead we should use the metrics 24346 * associated with the routes to determine the default sndspace and rcvspace. 24347 */ 24348 static tcp_hsp_t * 24349 tcp_hsp_lookup(ipaddr_t addr, tcp_stack_t *tcps) 24350 { 24351 tcp_hsp_t *hsp = NULL; 24352 24353 /* Quick check without acquiring the lock. */ 24354 if (tcps->tcps_hsp_hash == NULL) 24355 return (NULL); 24356 24357 rw_enter(&tcps->tcps_hsp_lock, RW_READER); 24358 24359 /* This routine finds the best-matching HSP for address addr. */ 24360 24361 if (tcps->tcps_hsp_hash) { 24362 int i; 24363 ipaddr_t srchaddr; 24364 tcp_hsp_t *hsp_net; 24365 24366 /* We do three passes: host, network, and subnet. */ 24367 24368 srchaddr = addr; 24369 24370 for (i = 1; i <= 3; i++) { 24371 /* Look for exact match on srchaddr */ 24372 24373 hsp = tcps->tcps_hsp_hash[TCP_HSP_HASH(srchaddr)]; 24374 while (hsp) { 24375 if (hsp->tcp_hsp_vers == IPV4_VERSION && 24376 hsp->tcp_hsp_addr == srchaddr) 24377 break; 24378 hsp = hsp->tcp_hsp_next; 24379 } 24380 ASSERT(hsp == NULL || 24381 hsp->tcp_hsp_vers == IPV4_VERSION); 24382 24383 /* 24384 * If this is the first pass: 24385 * If we found a match, great, return it. 24386 * If not, search for the network on the second pass. 24387 */ 24388 24389 if (i == 1) 24390 if (hsp) 24391 break; 24392 else 24393 { 24394 srchaddr = addr & netmask(addr); 24395 continue; 24396 } 24397 24398 /* 24399 * If this is the second pass: 24400 * If we found a match, but there's a subnet mask, 24401 * save the match but try again using the subnet 24402 * mask on the third pass. 24403 * Otherwise, return whatever we found. 24404 */ 24405 24406 if (i == 2) { 24407 if (hsp && hsp->tcp_hsp_subnet) { 24408 hsp_net = hsp; 24409 srchaddr = addr & hsp->tcp_hsp_subnet; 24410 continue; 24411 } else { 24412 break; 24413 } 24414 } 24415 24416 /* 24417 * This must be the third pass. If we didn't find 24418 * anything, return the saved network HSP instead. 24419 */ 24420 24421 if (!hsp) 24422 hsp = hsp_net; 24423 } 24424 } 24425 24426 rw_exit(&tcps->tcps_hsp_lock); 24427 return (hsp); 24428 } 24429 24430 /* 24431 * XXX Equally broken as the IPv4 routine. Doesn't handle longest 24432 * match lookup. 24433 */ 24434 static tcp_hsp_t * 24435 tcp_hsp_lookup_ipv6(in6_addr_t *v6addr, tcp_stack_t *tcps) 24436 { 24437 tcp_hsp_t *hsp = NULL; 24438 24439 /* Quick check without acquiring the lock. */ 24440 if (tcps->tcps_hsp_hash == NULL) 24441 return (NULL); 24442 24443 rw_enter(&tcps->tcps_hsp_lock, RW_READER); 24444 24445 /* This routine finds the best-matching HSP for address addr. */ 24446 24447 if (tcps->tcps_hsp_hash) { 24448 int i; 24449 in6_addr_t v6srchaddr; 24450 tcp_hsp_t *hsp_net; 24451 24452 /* We do three passes: host, network, and subnet. */ 24453 24454 v6srchaddr = *v6addr; 24455 24456 for (i = 1; i <= 3; i++) { 24457 /* Look for exact match on srchaddr */ 24458 24459 hsp = tcps->tcps_hsp_hash[TCP_HSP_HASH( 24460 V4_PART_OF_V6(v6srchaddr))]; 24461 while (hsp) { 24462 if (hsp->tcp_hsp_vers == IPV6_VERSION && 24463 IN6_ARE_ADDR_EQUAL(&hsp->tcp_hsp_addr_v6, 24464 &v6srchaddr)) 24465 break; 24466 hsp = hsp->tcp_hsp_next; 24467 } 24468 24469 /* 24470 * If this is the first pass: 24471 * If we found a match, great, return it. 24472 * If not, search for the network on the second pass. 24473 */ 24474 24475 if (i == 1) 24476 if (hsp) 24477 break; 24478 else { 24479 /* Assume a 64 bit mask */ 24480 v6srchaddr.s6_addr32[0] = 24481 v6addr->s6_addr32[0]; 24482 v6srchaddr.s6_addr32[1] = 24483 v6addr->s6_addr32[1]; 24484 v6srchaddr.s6_addr32[2] = 0; 24485 v6srchaddr.s6_addr32[3] = 0; 24486 continue; 24487 } 24488 24489 /* 24490 * If this is the second pass: 24491 * If we found a match, but there's a subnet mask, 24492 * save the match but try again using the subnet 24493 * mask on the third pass. 24494 * Otherwise, return whatever we found. 24495 */ 24496 24497 if (i == 2) { 24498 ASSERT(hsp == NULL || 24499 hsp->tcp_hsp_vers == IPV6_VERSION); 24500 if (hsp && 24501 !IN6_IS_ADDR_UNSPECIFIED( 24502 &hsp->tcp_hsp_subnet_v6)) { 24503 hsp_net = hsp; 24504 V6_MASK_COPY(*v6addr, 24505 hsp->tcp_hsp_subnet_v6, v6srchaddr); 24506 continue; 24507 } else { 24508 break; 24509 } 24510 } 24511 24512 /* 24513 * This must be the third pass. If we didn't find 24514 * anything, return the saved network HSP instead. 24515 */ 24516 24517 if (!hsp) 24518 hsp = hsp_net; 24519 } 24520 } 24521 24522 rw_exit(&tcps->tcps_hsp_lock); 24523 return (hsp); 24524 } 24525 24526 /* 24527 * Type three generator adapted from the random() function in 4.4 BSD: 24528 */ 24529 24530 /* 24531 * Copyright (c) 1983, 1993 24532 * The Regents of the University of California. All rights reserved. 24533 * 24534 * Redistribution and use in source and binary forms, with or without 24535 * modification, are permitted provided that the following conditions 24536 * are met: 24537 * 1. Redistributions of source code must retain the above copyright 24538 * notice, this list of conditions and the following disclaimer. 24539 * 2. Redistributions in binary form must reproduce the above copyright 24540 * notice, this list of conditions and the following disclaimer in the 24541 * documentation and/or other materials provided with the distribution. 24542 * 3. All advertising materials mentioning features or use of this software 24543 * must display the following acknowledgement: 24544 * This product includes software developed by the University of 24545 * California, Berkeley and its contributors. 24546 * 4. Neither the name of the University nor the names of its contributors 24547 * may be used to endorse or promote products derived from this software 24548 * without specific prior written permission. 24549 * 24550 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24551 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24552 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24553 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24554 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24555 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24556 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24557 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24558 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24559 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24560 * SUCH DAMAGE. 24561 */ 24562 24563 /* Type 3 -- x**31 + x**3 + 1 */ 24564 #define DEG_3 31 24565 #define SEP_3 3 24566 24567 24568 /* Protected by tcp_random_lock */ 24569 static int tcp_randtbl[DEG_3 + 1]; 24570 24571 static int *tcp_random_fptr = &tcp_randtbl[SEP_3 + 1]; 24572 static int *tcp_random_rptr = &tcp_randtbl[1]; 24573 24574 static int *tcp_random_state = &tcp_randtbl[1]; 24575 static int *tcp_random_end_ptr = &tcp_randtbl[DEG_3 + 1]; 24576 24577 kmutex_t tcp_random_lock; 24578 24579 void 24580 tcp_random_init(void) 24581 { 24582 int i; 24583 hrtime_t hrt; 24584 time_t wallclock; 24585 uint64_t result; 24586 24587 /* 24588 * Use high-res timer and current time for seed. Gethrtime() returns 24589 * a longlong, which may contain resolution down to nanoseconds. 24590 * The current time will either be a 32-bit or a 64-bit quantity. 24591 * XOR the two together in a 64-bit result variable. 24592 * Convert the result to a 32-bit value by multiplying the high-order 24593 * 32-bits by the low-order 32-bits. 24594 */ 24595 24596 hrt = gethrtime(); 24597 (void) drv_getparm(TIME, &wallclock); 24598 result = (uint64_t)wallclock ^ (uint64_t)hrt; 24599 mutex_enter(&tcp_random_lock); 24600 tcp_random_state[0] = ((result >> 32) & 0xffffffff) * 24601 (result & 0xffffffff); 24602 24603 for (i = 1; i < DEG_3; i++) 24604 tcp_random_state[i] = 1103515245 * tcp_random_state[i - 1] 24605 + 12345; 24606 tcp_random_fptr = &tcp_random_state[SEP_3]; 24607 tcp_random_rptr = &tcp_random_state[0]; 24608 mutex_exit(&tcp_random_lock); 24609 for (i = 0; i < 10 * DEG_3; i++) 24610 (void) tcp_random(); 24611 } 24612 24613 /* 24614 * tcp_random: Return a random number in the range [1 - (128K + 1)]. 24615 * This range is selected to be approximately centered on TCP_ISS / 2, 24616 * and easy to compute. We get this value by generating a 32-bit random 24617 * number, selecting out the high-order 17 bits, and then adding one so 24618 * that we never return zero. 24619 */ 24620 int 24621 tcp_random(void) 24622 { 24623 int i; 24624 24625 mutex_enter(&tcp_random_lock); 24626 *tcp_random_fptr += *tcp_random_rptr; 24627 24628 /* 24629 * The high-order bits are more random than the low-order bits, 24630 * so we select out the high-order 17 bits and add one so that 24631 * we never return zero. 24632 */ 24633 i = ((*tcp_random_fptr >> 15) & 0x1ffff) + 1; 24634 if (++tcp_random_fptr >= tcp_random_end_ptr) { 24635 tcp_random_fptr = tcp_random_state; 24636 ++tcp_random_rptr; 24637 } else if (++tcp_random_rptr >= tcp_random_end_ptr) 24638 tcp_random_rptr = tcp_random_state; 24639 24640 mutex_exit(&tcp_random_lock); 24641 return (i); 24642 } 24643 24644 /* 24645 * XXX This will go away when TPI is extended to send 24646 * info reqs to sockfs/timod ..... 24647 * Given a queue, set the max packet size for the write 24648 * side of the queue below stream head. This value is 24649 * cached on the stream head. 24650 * Returns 1 on success, 0 otherwise. 24651 */ 24652 static int 24653 setmaxps(queue_t *q, int maxpsz) 24654 { 24655 struct stdata *stp; 24656 queue_t *wq; 24657 stp = STREAM(q); 24658 24659 /* 24660 * At this point change of a queue parameter is not allowed 24661 * when a multiplexor is sitting on top. 24662 */ 24663 if (stp->sd_flag & STPLEX) 24664 return (0); 24665 24666 claimstr(stp->sd_wrq); 24667 wq = stp->sd_wrq->q_next; 24668 ASSERT(wq != NULL); 24669 (void) strqset(wq, QMAXPSZ, 0, maxpsz); 24670 releasestr(stp->sd_wrq); 24671 return (1); 24672 } 24673 24674 static int 24675 tcp_conprim_opt_process(tcp_t *tcp, mblk_t *mp, int *do_disconnectp, 24676 int *t_errorp, int *sys_errorp) 24677 { 24678 int error; 24679 int is_absreq_failure; 24680 t_scalar_t *opt_lenp; 24681 t_scalar_t opt_offset; 24682 int prim_type; 24683 struct T_conn_req *tcreqp; 24684 struct T_conn_res *tcresp; 24685 cred_t *cr; 24686 24687 cr = DB_CREDDEF(mp, tcp->tcp_cred); 24688 24689 prim_type = ((union T_primitives *)mp->b_rptr)->type; 24690 ASSERT(prim_type == T_CONN_REQ || prim_type == O_T_CONN_RES || 24691 prim_type == T_CONN_RES); 24692 24693 switch (prim_type) { 24694 case T_CONN_REQ: 24695 tcreqp = (struct T_conn_req *)mp->b_rptr; 24696 opt_offset = tcreqp->OPT_offset; 24697 opt_lenp = (t_scalar_t *)&tcreqp->OPT_length; 24698 break; 24699 case O_T_CONN_RES: 24700 case T_CONN_RES: 24701 tcresp = (struct T_conn_res *)mp->b_rptr; 24702 opt_offset = tcresp->OPT_offset; 24703 opt_lenp = (t_scalar_t *)&tcresp->OPT_length; 24704 break; 24705 } 24706 24707 *t_errorp = 0; 24708 *sys_errorp = 0; 24709 *do_disconnectp = 0; 24710 24711 error = tpi_optcom_buf(tcp->tcp_wq, mp, opt_lenp, 24712 opt_offset, cr, &tcp_opt_obj, 24713 NULL, &is_absreq_failure); 24714 24715 switch (error) { 24716 case 0: /* no error */ 24717 ASSERT(is_absreq_failure == 0); 24718 return (0); 24719 case ENOPROTOOPT: 24720 *t_errorp = TBADOPT; 24721 break; 24722 case EACCES: 24723 *t_errorp = TACCES; 24724 break; 24725 default: 24726 *t_errorp = TSYSERR; *sys_errorp = error; 24727 break; 24728 } 24729 if (is_absreq_failure != 0) { 24730 /* 24731 * The connection request should get the local ack 24732 * T_OK_ACK and then a T_DISCON_IND. 24733 */ 24734 *do_disconnectp = 1; 24735 } 24736 return (-1); 24737 } 24738 24739 /* 24740 * Split this function out so that if the secret changes, I'm okay. 24741 * 24742 * Initialize the tcp_iss_cookie and tcp_iss_key. 24743 */ 24744 24745 #define PASSWD_SIZE 16 /* MUST be multiple of 4 */ 24746 24747 static void 24748 tcp_iss_key_init(uint8_t *phrase, int len, tcp_stack_t *tcps) 24749 { 24750 struct { 24751 int32_t current_time; 24752 uint32_t randnum; 24753 uint16_t pad; 24754 uint8_t ether[6]; 24755 uint8_t passwd[PASSWD_SIZE]; 24756 } tcp_iss_cookie; 24757 time_t t; 24758 24759 /* 24760 * Start with the current absolute time. 24761 */ 24762 (void) drv_getparm(TIME, &t); 24763 tcp_iss_cookie.current_time = t; 24764 24765 /* 24766 * XXX - Need a more random number per RFC 1750, not this crap. 24767 * OTOH, if what follows is pretty random, then I'm in better shape. 24768 */ 24769 tcp_iss_cookie.randnum = (uint32_t)(gethrtime() + tcp_random()); 24770 tcp_iss_cookie.pad = 0x365c; /* Picked from HMAC pad values. */ 24771 24772 /* 24773 * The cpu_type_info is pretty non-random. Ugggh. It does serve 24774 * as a good template. 24775 */ 24776 bcopy(&cpu_list->cpu_type_info, &tcp_iss_cookie.passwd, 24777 min(PASSWD_SIZE, sizeof (cpu_list->cpu_type_info))); 24778 24779 /* 24780 * The pass-phrase. Normally this is supplied by user-called NDD. 24781 */ 24782 bcopy(phrase, &tcp_iss_cookie.passwd, min(PASSWD_SIZE, len)); 24783 24784 /* 24785 * See 4010593 if this section becomes a problem again, 24786 * but the local ethernet address is useful here. 24787 */ 24788 (void) localetheraddr(NULL, 24789 (struct ether_addr *)&tcp_iss_cookie.ether); 24790 24791 /* 24792 * Hash 'em all together. The MD5Final is called per-connection. 24793 */ 24794 mutex_enter(&tcps->tcps_iss_key_lock); 24795 MD5Init(&tcps->tcps_iss_key); 24796 MD5Update(&tcps->tcps_iss_key, (uchar_t *)&tcp_iss_cookie, 24797 sizeof (tcp_iss_cookie)); 24798 mutex_exit(&tcps->tcps_iss_key_lock); 24799 } 24800 24801 /* 24802 * Set the RFC 1948 pass phrase 24803 */ 24804 /* ARGSUSED */ 24805 static int 24806 tcp_1948_phrase_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 24807 cred_t *cr) 24808 { 24809 tcp_stack_t *tcps = Q_TO_TCP(q)->tcp_tcps; 24810 24811 /* 24812 * Basically, value contains a new pass phrase. Pass it along! 24813 */ 24814 tcp_iss_key_init((uint8_t *)value, strlen(value), tcps); 24815 return (0); 24816 } 24817 24818 /* ARGSUSED */ 24819 static int 24820 tcp_sack_info_constructor(void *buf, void *cdrarg, int kmflags) 24821 { 24822 bzero(buf, sizeof (tcp_sack_info_t)); 24823 return (0); 24824 } 24825 24826 /* ARGSUSED */ 24827 static int 24828 tcp_iphc_constructor(void *buf, void *cdrarg, int kmflags) 24829 { 24830 bzero(buf, TCP_MAX_COMBINED_HEADER_LENGTH); 24831 return (0); 24832 } 24833 24834 /* 24835 * Make sure we wait until the default queue is setup, yet allow 24836 * tcp_g_q_create() to open a TCP stream. 24837 * We need to allow tcp_g_q_create() do do an open 24838 * of tcp, hence we compare curhread. 24839 * All others have to wait until the tcps_g_q has been 24840 * setup. 24841 */ 24842 void 24843 tcp_g_q_setup(tcp_stack_t *tcps) 24844 { 24845 mutex_enter(&tcps->tcps_g_q_lock); 24846 if (tcps->tcps_g_q != NULL) { 24847 mutex_exit(&tcps->tcps_g_q_lock); 24848 return; 24849 } 24850 if (tcps->tcps_g_q_creator == NULL) { 24851 /* This thread will set it up */ 24852 tcps->tcps_g_q_creator = curthread; 24853 mutex_exit(&tcps->tcps_g_q_lock); 24854 tcp_g_q_create(tcps); 24855 mutex_enter(&tcps->tcps_g_q_lock); 24856 ASSERT(tcps->tcps_g_q_creator == curthread); 24857 tcps->tcps_g_q_creator = NULL; 24858 cv_signal(&tcps->tcps_g_q_cv); 24859 ASSERT(tcps->tcps_g_q != NULL); 24860 mutex_exit(&tcps->tcps_g_q_lock); 24861 return; 24862 } 24863 /* Everybody but the creator has to wait */ 24864 if (tcps->tcps_g_q_creator != curthread) { 24865 while (tcps->tcps_g_q == NULL) 24866 cv_wait(&tcps->tcps_g_q_cv, &tcps->tcps_g_q_lock); 24867 } 24868 mutex_exit(&tcps->tcps_g_q_lock); 24869 } 24870 24871 major_t IP_MAJ; 24872 #define IP "ip" 24873 24874 #define TCP6DEV "/devices/pseudo/tcp6@0:tcp6" 24875 24876 /* 24877 * Create a default tcp queue here instead of in strplumb 24878 */ 24879 void 24880 tcp_g_q_create(tcp_stack_t *tcps) 24881 { 24882 int error; 24883 ldi_handle_t lh = NULL; 24884 ldi_ident_t li = NULL; 24885 int rval; 24886 cred_t *cr; 24887 24888 #ifdef NS_DEBUG 24889 (void) printf("tcp_g_q_create()\n"); 24890 #endif 24891 24892 ASSERT(tcps->tcps_g_q_creator == curthread); 24893 24894 error = ldi_ident_from_major(IP_MAJ, &li); 24895 if (error) { 24896 #ifdef DEBUG 24897 printf("tcp_g_q_create: lyr ident get failed error %d\n", 24898 error); 24899 #endif 24900 return; 24901 } 24902 24903 cr = zone_get_kcred(netstackid_to_zoneid( 24904 tcps->tcps_netstack->netstack_stackid)); 24905 ASSERT(cr != NULL); 24906 /* 24907 * We set the tcp default queue to IPv6 because IPv4 falls 24908 * back to IPv6 when it can't find a client, but 24909 * IPv6 does not fall back to IPv4. 24910 */ 24911 error = ldi_open_by_name(TCP6DEV, FREAD|FWRITE, cr, &lh, li); 24912 if (error) { 24913 #ifdef DEBUG 24914 printf("tcp_g_q_create: open of TCP6DEV failed error %d\n", 24915 error); 24916 #endif 24917 goto out; 24918 } 24919 24920 /* 24921 * This ioctl causes the tcp framework to cache a pointer to 24922 * this stream, so we don't want to close the stream after 24923 * this operation. 24924 * Use the kernel credentials that are for the zone we're in. 24925 */ 24926 error = ldi_ioctl(lh, TCP_IOC_DEFAULT_Q, 24927 (intptr_t)0, FKIOCTL, cr, &rval); 24928 if (error) { 24929 #ifdef DEBUG 24930 printf("tcp_g_q_create: ioctl TCP_IOC_DEFAULT_Q failed " 24931 "error %d\n", error); 24932 #endif 24933 goto out; 24934 } 24935 tcps->tcps_g_q_lh = lh; /* For tcp_g_q_close */ 24936 lh = NULL; 24937 out: 24938 /* Close layered handles */ 24939 if (li) 24940 ldi_ident_release(li); 24941 /* Keep cred around until _inactive needs it */ 24942 tcps->tcps_g_q_cr = cr; 24943 } 24944 24945 /* 24946 * We keep tcp_g_q set until all other tcp_t's in the zone 24947 * has gone away, and then when tcp_g_q_inactive() is called 24948 * we clear it. 24949 */ 24950 void 24951 tcp_g_q_destroy(tcp_stack_t *tcps) 24952 { 24953 #ifdef NS_DEBUG 24954 (void) printf("tcp_g_q_destroy()for stack %d\n", 24955 tcps->tcps_netstack->netstack_stackid); 24956 #endif 24957 24958 if (tcps->tcps_g_q == NULL) { 24959 return; /* Nothing to cleanup */ 24960 } 24961 /* 24962 * Drop reference corresponding to the default queue. 24963 * This reference was added from tcp_open when the default queue 24964 * was created, hence we compensate for this extra drop in 24965 * tcp_g_q_close. If the refcnt drops to zero here it means 24966 * the default queue was the last one to be open, in which 24967 * case, then tcp_g_q_inactive will be 24968 * called as a result of the refrele. 24969 */ 24970 TCPS_REFRELE(tcps); 24971 } 24972 24973 /* 24974 * Called when last tcp_t drops reference count using TCPS_REFRELE. 24975 * Run by tcp_q_q_inactive using a taskq. 24976 */ 24977 static void 24978 tcp_g_q_close(void *arg) 24979 { 24980 tcp_stack_t *tcps = arg; 24981 int error; 24982 ldi_handle_t lh = NULL; 24983 ldi_ident_t li = NULL; 24984 cred_t *cr; 24985 24986 #ifdef NS_DEBUG 24987 (void) printf("tcp_g_q_inactive() for stack %d refcnt %d\n", 24988 tcps->tcps_netstack->netstack_stackid, 24989 tcps->tcps_netstack->netstack_refcnt); 24990 #endif 24991 lh = tcps->tcps_g_q_lh; 24992 if (lh == NULL) 24993 return; /* Nothing to cleanup */ 24994 24995 ASSERT(tcps->tcps_refcnt == 1); 24996 ASSERT(tcps->tcps_g_q != NULL); 24997 24998 error = ldi_ident_from_major(IP_MAJ, &li); 24999 if (error) { 25000 #ifdef DEBUG 25001 printf("tcp_g_q_inactive: lyr ident get failed error %d\n", 25002 error); 25003 #endif 25004 return; 25005 } 25006 25007 cr = tcps->tcps_g_q_cr; 25008 tcps->tcps_g_q_cr = NULL; 25009 ASSERT(cr != NULL); 25010 25011 /* 25012 * Make sure we can break the recursion when tcp_close decrements 25013 * the reference count causing g_q_inactive to be called again. 25014 */ 25015 tcps->tcps_g_q_lh = NULL; 25016 25017 /* close the default queue */ 25018 (void) ldi_close(lh, FREAD|FWRITE, cr); 25019 /* 25020 * At this point in time tcps and the rest of netstack_t might 25021 * have been deleted. 25022 */ 25023 tcps = NULL; 25024 25025 /* Close layered handles */ 25026 ldi_ident_release(li); 25027 crfree(cr); 25028 } 25029 25030 /* 25031 * Called when last tcp_t drops reference count using TCPS_REFRELE. 25032 * 25033 * Have to ensure that the ldi routines are not used by an 25034 * interrupt thread by using a taskq. 25035 */ 25036 void 25037 tcp_g_q_inactive(tcp_stack_t *tcps) 25038 { 25039 if (tcps->tcps_g_q_lh == NULL) 25040 return; /* Nothing to cleanup */ 25041 25042 ASSERT(tcps->tcps_refcnt == 0); 25043 TCPS_REFHOLD(tcps); /* Compensate for what g_q_destroy did */ 25044 25045 if (servicing_interrupt()) { 25046 (void) taskq_dispatch(tcp_taskq, tcp_g_q_close, 25047 (void *) tcps, TQ_SLEEP); 25048 } else { 25049 tcp_g_q_close(tcps); 25050 } 25051 } 25052 25053 /* 25054 * Called by IP when IP is loaded into the kernel 25055 */ 25056 void 25057 tcp_ddi_g_init(void) 25058 { 25059 IP_MAJ = ddi_name_to_major(IP); 25060 25061 tcp_timercache = kmem_cache_create("tcp_timercache", 25062 sizeof (tcp_timer_t) + sizeof (mblk_t), 0, 25063 NULL, NULL, NULL, NULL, NULL, 0); 25064 25065 tcp_sack_info_cache = kmem_cache_create("tcp_sack_info_cache", 25066 sizeof (tcp_sack_info_t), 0, 25067 tcp_sack_info_constructor, NULL, NULL, NULL, NULL, 0); 25068 25069 tcp_iphc_cache = kmem_cache_create("tcp_iphc_cache", 25070 TCP_MAX_COMBINED_HEADER_LENGTH, 0, 25071 tcp_iphc_constructor, NULL, NULL, NULL, NULL, 0); 25072 25073 mutex_init(&tcp_random_lock, NULL, MUTEX_DEFAULT, NULL); 25074 25075 /* Initialize the random number generator */ 25076 tcp_random_init(); 25077 25078 tcp_squeue_wput_proc = tcp_squeue_switch(tcp_squeue_wput); 25079 tcp_squeue_close_proc = tcp_squeue_switch(tcp_squeue_close); 25080 25081 /* A single callback independently of how many netstacks we have */ 25082 ip_squeue_init(tcp_squeue_add); 25083 25084 tcp_g_kstat = tcp_g_kstat_init(&tcp_g_statistics); 25085 25086 tcp_taskq = taskq_create("tcp_taskq", 1, minclsyspri, 1, 1, 25087 TASKQ_PREPOPULATE); 25088 25089 /* 25090 * We want to be informed each time a stack is created or 25091 * destroyed in the kernel, so we can maintain the 25092 * set of tcp_stack_t's. 25093 */ 25094 netstack_register(NS_TCP, tcp_stack_init, tcp_stack_shutdown, 25095 tcp_stack_fini); 25096 } 25097 25098 25099 /* 25100 * Initialize the TCP stack instance. 25101 */ 25102 static void * 25103 tcp_stack_init(netstackid_t stackid, netstack_t *ns) 25104 { 25105 tcp_stack_t *tcps; 25106 tcpparam_t *pa; 25107 int i; 25108 25109 tcps = (tcp_stack_t *)kmem_zalloc(sizeof (*tcps), KM_SLEEP); 25110 tcps->tcps_netstack = ns; 25111 25112 /* Initialize locks */ 25113 rw_init(&tcps->tcps_hsp_lock, NULL, RW_DEFAULT, NULL); 25114 mutex_init(&tcps->tcps_g_q_lock, NULL, MUTEX_DEFAULT, NULL); 25115 cv_init(&tcps->tcps_g_q_cv, NULL, CV_DEFAULT, NULL); 25116 mutex_init(&tcps->tcps_iss_key_lock, NULL, MUTEX_DEFAULT, NULL); 25117 mutex_init(&tcps->tcps_epriv_port_lock, NULL, MUTEX_DEFAULT, NULL); 25118 rw_init(&tcps->tcps_reserved_port_lock, NULL, RW_DEFAULT, NULL); 25119 25120 tcps->tcps_g_num_epriv_ports = TCP_NUM_EPRIV_PORTS; 25121 tcps->tcps_g_epriv_ports[0] = 2049; 25122 tcps->tcps_g_epriv_ports[1] = 4045; 25123 tcps->tcps_min_anonpriv_port = 512; 25124 25125 tcps->tcps_bind_fanout = kmem_zalloc(sizeof (tf_t) * 25126 TCP_BIND_FANOUT_SIZE, KM_SLEEP); 25127 tcps->tcps_acceptor_fanout = kmem_zalloc(sizeof (tf_t) * 25128 TCP_FANOUT_SIZE, KM_SLEEP); 25129 tcps->tcps_reserved_port = kmem_zalloc(sizeof (tcp_rport_t) * 25130 TCP_RESERVED_PORTS_ARRAY_MAX_SIZE, KM_SLEEP); 25131 25132 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) { 25133 mutex_init(&tcps->tcps_bind_fanout[i].tf_lock, NULL, 25134 MUTEX_DEFAULT, NULL); 25135 } 25136 25137 for (i = 0; i < TCP_FANOUT_SIZE; i++) { 25138 mutex_init(&tcps->tcps_acceptor_fanout[i].tf_lock, NULL, 25139 MUTEX_DEFAULT, NULL); 25140 } 25141 25142 /* TCP's IPsec code calls the packet dropper. */ 25143 ip_drop_register(&tcps->tcps_dropper, "TCP IPsec policy enforcement"); 25144 25145 pa = (tcpparam_t *)kmem_alloc(sizeof (lcl_tcp_param_arr), KM_SLEEP); 25146 tcps->tcps_params = pa; 25147 bcopy(lcl_tcp_param_arr, tcps->tcps_params, sizeof (lcl_tcp_param_arr)); 25148 25149 (void) tcp_param_register(&tcps->tcps_g_nd, tcps->tcps_params, 25150 A_CNT(lcl_tcp_param_arr), tcps); 25151 25152 /* 25153 * Note: To really walk the device tree you need the devinfo 25154 * pointer to your device which is only available after probe/attach. 25155 * The following is safe only because it uses ddi_root_node() 25156 */ 25157 tcp_max_optsize = optcom_max_optsize(tcp_opt_obj.odb_opt_des_arr, 25158 tcp_opt_obj.odb_opt_arr_cnt); 25159 25160 /* 25161 * Initialize RFC 1948 secret values. This will probably be reset once 25162 * by the boot scripts. 25163 * 25164 * Use NULL name, as the name is caught by the new lockstats. 25165 * 25166 * Initialize with some random, non-guessable string, like the global 25167 * T_INFO_ACK. 25168 */ 25169 25170 tcp_iss_key_init((uint8_t *)&tcp_g_t_info_ack, 25171 sizeof (tcp_g_t_info_ack), tcps); 25172 25173 tcps->tcps_kstat = tcp_kstat2_init(stackid, &tcps->tcps_statistics); 25174 tcps->tcps_mibkp = tcp_kstat_init(stackid, tcps); 25175 25176 return (tcps); 25177 } 25178 25179 /* 25180 * Called when the IP module is about to be unloaded. 25181 */ 25182 void 25183 tcp_ddi_g_destroy(void) 25184 { 25185 tcp_g_kstat_fini(tcp_g_kstat); 25186 tcp_g_kstat = NULL; 25187 bzero(&tcp_g_statistics, sizeof (tcp_g_statistics)); 25188 25189 mutex_destroy(&tcp_random_lock); 25190 25191 kmem_cache_destroy(tcp_timercache); 25192 kmem_cache_destroy(tcp_sack_info_cache); 25193 kmem_cache_destroy(tcp_iphc_cache); 25194 25195 netstack_unregister(NS_TCP); 25196 taskq_destroy(tcp_taskq); 25197 } 25198 25199 /* 25200 * Shut down the TCP stack instance. 25201 */ 25202 /* ARGSUSED */ 25203 static void 25204 tcp_stack_shutdown(netstackid_t stackid, void *arg) 25205 { 25206 tcp_stack_t *tcps = (tcp_stack_t *)arg; 25207 25208 tcp_g_q_destroy(tcps); 25209 } 25210 25211 /* 25212 * Free the TCP stack instance. 25213 */ 25214 static void 25215 tcp_stack_fini(netstackid_t stackid, void *arg) 25216 { 25217 tcp_stack_t *tcps = (tcp_stack_t *)arg; 25218 int i; 25219 25220 nd_free(&tcps->tcps_g_nd); 25221 kmem_free(tcps->tcps_params, sizeof (lcl_tcp_param_arr)); 25222 tcps->tcps_params = NULL; 25223 kmem_free(tcps->tcps_wroff_xtra_param, sizeof (tcpparam_t)); 25224 tcps->tcps_wroff_xtra_param = NULL; 25225 kmem_free(tcps->tcps_mdt_head_param, sizeof (tcpparam_t)); 25226 tcps->tcps_mdt_head_param = NULL; 25227 kmem_free(tcps->tcps_mdt_tail_param, sizeof (tcpparam_t)); 25228 tcps->tcps_mdt_tail_param = NULL; 25229 kmem_free(tcps->tcps_mdt_max_pbufs_param, sizeof (tcpparam_t)); 25230 tcps->tcps_mdt_max_pbufs_param = NULL; 25231 25232 for (i = 0; i < TCP_BIND_FANOUT_SIZE; i++) { 25233 ASSERT(tcps->tcps_bind_fanout[i].tf_tcp == NULL); 25234 mutex_destroy(&tcps->tcps_bind_fanout[i].tf_lock); 25235 } 25236 25237 for (i = 0; i < TCP_FANOUT_SIZE; i++) { 25238 ASSERT(tcps->tcps_acceptor_fanout[i].tf_tcp == NULL); 25239 mutex_destroy(&tcps->tcps_acceptor_fanout[i].tf_lock); 25240 } 25241 25242 kmem_free(tcps->tcps_bind_fanout, sizeof (tf_t) * TCP_BIND_FANOUT_SIZE); 25243 tcps->tcps_bind_fanout = NULL; 25244 25245 kmem_free(tcps->tcps_acceptor_fanout, sizeof (tf_t) * TCP_FANOUT_SIZE); 25246 tcps->tcps_acceptor_fanout = NULL; 25247 25248 kmem_free(tcps->tcps_reserved_port, sizeof (tcp_rport_t) * 25249 TCP_RESERVED_PORTS_ARRAY_MAX_SIZE); 25250 tcps->tcps_reserved_port = NULL; 25251 25252 mutex_destroy(&tcps->tcps_iss_key_lock); 25253 rw_destroy(&tcps->tcps_hsp_lock); 25254 mutex_destroy(&tcps->tcps_g_q_lock); 25255 cv_destroy(&tcps->tcps_g_q_cv); 25256 mutex_destroy(&tcps->tcps_epriv_port_lock); 25257 rw_destroy(&tcps->tcps_reserved_port_lock); 25258 25259 ip_drop_unregister(&tcps->tcps_dropper); 25260 25261 tcp_kstat2_fini(stackid, tcps->tcps_kstat); 25262 tcps->tcps_kstat = NULL; 25263 bzero(&tcps->tcps_statistics, sizeof (tcps->tcps_statistics)); 25264 25265 tcp_kstat_fini(stackid, tcps->tcps_mibkp); 25266 tcps->tcps_mibkp = NULL; 25267 25268 kmem_free(tcps, sizeof (*tcps)); 25269 } 25270 25271 /* 25272 * Generate ISS, taking into account NDD changes may happen halfway through. 25273 * (If the iss is not zero, set it.) 25274 */ 25275 25276 static void 25277 tcp_iss_init(tcp_t *tcp) 25278 { 25279 MD5_CTX context; 25280 struct { uint32_t ports; in6_addr_t src; in6_addr_t dst; } arg; 25281 uint32_t answer[4]; 25282 tcp_stack_t *tcps = tcp->tcp_tcps; 25283 25284 tcps->tcps_iss_incr_extra += (ISS_INCR >> 1); 25285 tcp->tcp_iss = tcps->tcps_iss_incr_extra; 25286 switch (tcps->tcps_strong_iss) { 25287 case 2: 25288 mutex_enter(&tcps->tcps_iss_key_lock); 25289 context = tcps->tcps_iss_key; 25290 mutex_exit(&tcps->tcps_iss_key_lock); 25291 arg.ports = tcp->tcp_ports; 25292 if (tcp->tcp_ipversion == IPV4_VERSION) { 25293 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_src, 25294 &arg.src); 25295 IN6_IPADDR_TO_V4MAPPED(tcp->tcp_ipha->ipha_dst, 25296 &arg.dst); 25297 } else { 25298 arg.src = tcp->tcp_ip6h->ip6_src; 25299 arg.dst = tcp->tcp_ip6h->ip6_dst; 25300 } 25301 MD5Update(&context, (uchar_t *)&arg, sizeof (arg)); 25302 MD5Final((uchar_t *)answer, &context); 25303 tcp->tcp_iss += answer[0] ^ answer[1] ^ answer[2] ^ answer[3]; 25304 /* 25305 * Now that we've hashed into a unique per-connection sequence 25306 * space, add a random increment per strong_iss == 1. So I 25307 * guess we'll have to... 25308 */ 25309 /* FALLTHRU */ 25310 case 1: 25311 tcp->tcp_iss += (gethrtime() >> ISS_NSEC_SHT) + tcp_random(); 25312 break; 25313 default: 25314 tcp->tcp_iss += (uint32_t)gethrestime_sec() * ISS_INCR; 25315 break; 25316 } 25317 tcp->tcp_valid_bits = TCP_ISS_VALID; 25318 tcp->tcp_fss = tcp->tcp_iss - 1; 25319 tcp->tcp_suna = tcp->tcp_iss; 25320 tcp->tcp_snxt = tcp->tcp_iss + 1; 25321 tcp->tcp_rexmit_nxt = tcp->tcp_snxt; 25322 tcp->tcp_csuna = tcp->tcp_snxt; 25323 } 25324 25325 /* 25326 * Exported routine for extracting active tcp connection status. 25327 * 25328 * This is used by the Solaris Cluster Networking software to 25329 * gather a list of connections that need to be forwarded to 25330 * specific nodes in the cluster when configuration changes occur. 25331 * 25332 * The callback is invoked for each tcp_t structure. Returning 25333 * non-zero from the callback routine terminates the search. 25334 */ 25335 int 25336 cl_tcp_walk_list(int (*cl_callback)(cl_tcp_info_t *, void *), 25337 void *arg) 25338 { 25339 netstack_handle_t nh; 25340 netstack_t *ns; 25341 int ret = 0; 25342 25343 netstack_next_init(&nh); 25344 while ((ns = netstack_next(&nh)) != NULL) { 25345 ret = cl_tcp_walk_list_stack(cl_callback, arg, 25346 ns->netstack_tcp); 25347 netstack_rele(ns); 25348 } 25349 netstack_next_fini(&nh); 25350 return (ret); 25351 } 25352 25353 static int 25354 cl_tcp_walk_list_stack(int (*callback)(cl_tcp_info_t *, void *), void *arg, 25355 tcp_stack_t *tcps) 25356 { 25357 tcp_t *tcp; 25358 cl_tcp_info_t cl_tcpi; 25359 connf_t *connfp; 25360 conn_t *connp; 25361 int i; 25362 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 25363 25364 ASSERT(callback != NULL); 25365 25366 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 25367 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 25368 connp = NULL; 25369 25370 while ((connp = 25371 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 25372 25373 tcp = connp->conn_tcp; 25374 cl_tcpi.cl_tcpi_version = CL_TCPI_V1; 25375 cl_tcpi.cl_tcpi_ipversion = tcp->tcp_ipversion; 25376 cl_tcpi.cl_tcpi_state = tcp->tcp_state; 25377 cl_tcpi.cl_tcpi_lport = tcp->tcp_lport; 25378 cl_tcpi.cl_tcpi_fport = tcp->tcp_fport; 25379 /* 25380 * The macros tcp_laddr and tcp_faddr give the IPv4 25381 * addresses. They are copied implicitly below as 25382 * mapped addresses. 25383 */ 25384 cl_tcpi.cl_tcpi_laddr_v6 = tcp->tcp_ip_src_v6; 25385 if (tcp->tcp_ipversion == IPV4_VERSION) { 25386 cl_tcpi.cl_tcpi_faddr = 25387 tcp->tcp_ipha->ipha_dst; 25388 } else { 25389 cl_tcpi.cl_tcpi_faddr_v6 = 25390 tcp->tcp_ip6h->ip6_dst; 25391 } 25392 25393 /* 25394 * If the callback returns non-zero 25395 * we terminate the traversal. 25396 */ 25397 if ((*callback)(&cl_tcpi, arg) != 0) { 25398 CONN_DEC_REF(tcp->tcp_connp); 25399 return (1); 25400 } 25401 } 25402 } 25403 25404 return (0); 25405 } 25406 25407 /* 25408 * Macros used for accessing the different types of sockaddr 25409 * structures inside a tcp_ioc_abort_conn_t. 25410 */ 25411 #define TCP_AC_V4LADDR(acp) ((sin_t *)&(acp)->ac_local) 25412 #define TCP_AC_V4RADDR(acp) ((sin_t *)&(acp)->ac_remote) 25413 #define TCP_AC_V4LOCAL(acp) (TCP_AC_V4LADDR(acp)->sin_addr.s_addr) 25414 #define TCP_AC_V4REMOTE(acp) (TCP_AC_V4RADDR(acp)->sin_addr.s_addr) 25415 #define TCP_AC_V4LPORT(acp) (TCP_AC_V4LADDR(acp)->sin_port) 25416 #define TCP_AC_V4RPORT(acp) (TCP_AC_V4RADDR(acp)->sin_port) 25417 #define TCP_AC_V6LADDR(acp) ((sin6_t *)&(acp)->ac_local) 25418 #define TCP_AC_V6RADDR(acp) ((sin6_t *)&(acp)->ac_remote) 25419 #define TCP_AC_V6LOCAL(acp) (TCP_AC_V6LADDR(acp)->sin6_addr) 25420 #define TCP_AC_V6REMOTE(acp) (TCP_AC_V6RADDR(acp)->sin6_addr) 25421 #define TCP_AC_V6LPORT(acp) (TCP_AC_V6LADDR(acp)->sin6_port) 25422 #define TCP_AC_V6RPORT(acp) (TCP_AC_V6RADDR(acp)->sin6_port) 25423 25424 /* 25425 * Return the correct error code to mimic the behavior 25426 * of a connection reset. 25427 */ 25428 #define TCP_AC_GET_ERRCODE(state, err) { \ 25429 switch ((state)) { \ 25430 case TCPS_SYN_SENT: \ 25431 case TCPS_SYN_RCVD: \ 25432 (err) = ECONNREFUSED; \ 25433 break; \ 25434 case TCPS_ESTABLISHED: \ 25435 case TCPS_FIN_WAIT_1: \ 25436 case TCPS_FIN_WAIT_2: \ 25437 case TCPS_CLOSE_WAIT: \ 25438 (err) = ECONNRESET; \ 25439 break; \ 25440 case TCPS_CLOSING: \ 25441 case TCPS_LAST_ACK: \ 25442 case TCPS_TIME_WAIT: \ 25443 (err) = 0; \ 25444 break; \ 25445 default: \ 25446 (err) = ENXIO; \ 25447 } \ 25448 } 25449 25450 /* 25451 * Check if a tcp structure matches the info in acp. 25452 */ 25453 #define TCP_AC_ADDR_MATCH(acp, tcp) \ 25454 (((acp)->ac_local.ss_family == AF_INET) ? \ 25455 ((TCP_AC_V4LOCAL((acp)) == INADDR_ANY || \ 25456 TCP_AC_V4LOCAL((acp)) == (tcp)->tcp_ip_src) && \ 25457 (TCP_AC_V4REMOTE((acp)) == INADDR_ANY || \ 25458 TCP_AC_V4REMOTE((acp)) == (tcp)->tcp_remote) && \ 25459 (TCP_AC_V4LPORT((acp)) == 0 || \ 25460 TCP_AC_V4LPORT((acp)) == (tcp)->tcp_lport) && \ 25461 (TCP_AC_V4RPORT((acp)) == 0 || \ 25462 TCP_AC_V4RPORT((acp)) == (tcp)->tcp_fport) && \ 25463 (acp)->ac_start <= (tcp)->tcp_state && \ 25464 (acp)->ac_end >= (tcp)->tcp_state) : \ 25465 ((IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL((acp))) || \ 25466 IN6_ARE_ADDR_EQUAL(&TCP_AC_V6LOCAL((acp)), \ 25467 &(tcp)->tcp_ip_src_v6)) && \ 25468 (IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE((acp))) || \ 25469 IN6_ARE_ADDR_EQUAL(&TCP_AC_V6REMOTE((acp)), \ 25470 &(tcp)->tcp_remote_v6)) && \ 25471 (TCP_AC_V6LPORT((acp)) == 0 || \ 25472 TCP_AC_V6LPORT((acp)) == (tcp)->tcp_lport) && \ 25473 (TCP_AC_V6RPORT((acp)) == 0 || \ 25474 TCP_AC_V6RPORT((acp)) == (tcp)->tcp_fport) && \ 25475 (acp)->ac_start <= (tcp)->tcp_state && \ 25476 (acp)->ac_end >= (tcp)->tcp_state)) 25477 25478 #define TCP_AC_MATCH(acp, tcp) \ 25479 (((acp)->ac_zoneid == ALL_ZONES || \ 25480 (acp)->ac_zoneid == tcp->tcp_connp->conn_zoneid) ? \ 25481 TCP_AC_ADDR_MATCH(acp, tcp) : 0) 25482 25483 /* 25484 * Build a message containing a tcp_ioc_abort_conn_t structure 25485 * which is filled in with information from acp and tp. 25486 */ 25487 static mblk_t * 25488 tcp_ioctl_abort_build_msg(tcp_ioc_abort_conn_t *acp, tcp_t *tp) 25489 { 25490 mblk_t *mp; 25491 tcp_ioc_abort_conn_t *tacp; 25492 25493 mp = allocb(sizeof (uint32_t) + sizeof (*acp), BPRI_LO); 25494 if (mp == NULL) 25495 return (NULL); 25496 25497 mp->b_datap->db_type = M_CTL; 25498 25499 *((uint32_t *)mp->b_rptr) = TCP_IOC_ABORT_CONN; 25500 tacp = (tcp_ioc_abort_conn_t *)((uchar_t *)mp->b_rptr + 25501 sizeof (uint32_t)); 25502 25503 tacp->ac_start = acp->ac_start; 25504 tacp->ac_end = acp->ac_end; 25505 tacp->ac_zoneid = acp->ac_zoneid; 25506 25507 if (acp->ac_local.ss_family == AF_INET) { 25508 tacp->ac_local.ss_family = AF_INET; 25509 tacp->ac_remote.ss_family = AF_INET; 25510 TCP_AC_V4LOCAL(tacp) = tp->tcp_ip_src; 25511 TCP_AC_V4REMOTE(tacp) = tp->tcp_remote; 25512 TCP_AC_V4LPORT(tacp) = tp->tcp_lport; 25513 TCP_AC_V4RPORT(tacp) = tp->tcp_fport; 25514 } else { 25515 tacp->ac_local.ss_family = AF_INET6; 25516 tacp->ac_remote.ss_family = AF_INET6; 25517 TCP_AC_V6LOCAL(tacp) = tp->tcp_ip_src_v6; 25518 TCP_AC_V6REMOTE(tacp) = tp->tcp_remote_v6; 25519 TCP_AC_V6LPORT(tacp) = tp->tcp_lport; 25520 TCP_AC_V6RPORT(tacp) = tp->tcp_fport; 25521 } 25522 mp->b_wptr = (uchar_t *)mp->b_rptr + sizeof (uint32_t) + sizeof (*acp); 25523 return (mp); 25524 } 25525 25526 /* 25527 * Print a tcp_ioc_abort_conn_t structure. 25528 */ 25529 static void 25530 tcp_ioctl_abort_dump(tcp_ioc_abort_conn_t *acp) 25531 { 25532 char lbuf[128]; 25533 char rbuf[128]; 25534 sa_family_t af; 25535 in_port_t lport, rport; 25536 ushort_t logflags; 25537 25538 af = acp->ac_local.ss_family; 25539 25540 if (af == AF_INET) { 25541 (void) inet_ntop(af, (const void *)&TCP_AC_V4LOCAL(acp), 25542 lbuf, 128); 25543 (void) inet_ntop(af, (const void *)&TCP_AC_V4REMOTE(acp), 25544 rbuf, 128); 25545 lport = ntohs(TCP_AC_V4LPORT(acp)); 25546 rport = ntohs(TCP_AC_V4RPORT(acp)); 25547 } else { 25548 (void) inet_ntop(af, (const void *)&TCP_AC_V6LOCAL(acp), 25549 lbuf, 128); 25550 (void) inet_ntop(af, (const void *)&TCP_AC_V6REMOTE(acp), 25551 rbuf, 128); 25552 lport = ntohs(TCP_AC_V6LPORT(acp)); 25553 rport = ntohs(TCP_AC_V6RPORT(acp)); 25554 } 25555 25556 logflags = SL_TRACE | SL_NOTE; 25557 /* 25558 * Don't print this message to the console if the operation was done 25559 * to a non-global zone. 25560 */ 25561 if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES) 25562 logflags |= SL_CONSOLE; 25563 (void) strlog(TCP_MOD_ID, 0, 1, logflags, 25564 "TCP_IOC_ABORT_CONN: local = %s:%d, remote = %s:%d, " 25565 "start = %d, end = %d\n", lbuf, lport, rbuf, rport, 25566 acp->ac_start, acp->ac_end); 25567 } 25568 25569 /* 25570 * Called inside tcp_rput when a message built using 25571 * tcp_ioctl_abort_build_msg is put into a queue. 25572 * Note that when we get here there is no wildcard in acp any more. 25573 */ 25574 static void 25575 tcp_ioctl_abort_handler(tcp_t *tcp, mblk_t *mp) 25576 { 25577 tcp_ioc_abort_conn_t *acp; 25578 25579 acp = (tcp_ioc_abort_conn_t *)(mp->b_rptr + sizeof (uint32_t)); 25580 if (tcp->tcp_state <= acp->ac_end) { 25581 /* 25582 * If we get here, we are already on the correct 25583 * squeue. This ioctl follows the following path 25584 * tcp_wput -> tcp_wput_ioctl -> tcp_ioctl_abort_conn 25585 * ->tcp_ioctl_abort->squeue_fill (if on a 25586 * different squeue) 25587 */ 25588 int errcode; 25589 25590 TCP_AC_GET_ERRCODE(tcp->tcp_state, errcode); 25591 (void) tcp_clean_death(tcp, errcode, 26); 25592 } 25593 freemsg(mp); 25594 } 25595 25596 /* 25597 * Abort all matching connections on a hash chain. 25598 */ 25599 static int 25600 tcp_ioctl_abort_bucket(tcp_ioc_abort_conn_t *acp, int index, int *count, 25601 boolean_t exact, tcp_stack_t *tcps) 25602 { 25603 int nmatch, err = 0; 25604 tcp_t *tcp; 25605 MBLKP mp, last, listhead = NULL; 25606 conn_t *tconnp; 25607 connf_t *connfp; 25608 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 25609 25610 connfp = &ipst->ips_ipcl_conn_fanout[index]; 25611 25612 startover: 25613 nmatch = 0; 25614 25615 mutex_enter(&connfp->connf_lock); 25616 for (tconnp = connfp->connf_head; tconnp != NULL; 25617 tconnp = tconnp->conn_next) { 25618 tcp = tconnp->conn_tcp; 25619 if (TCP_AC_MATCH(acp, tcp)) { 25620 CONN_INC_REF(tcp->tcp_connp); 25621 mp = tcp_ioctl_abort_build_msg(acp, tcp); 25622 if (mp == NULL) { 25623 err = ENOMEM; 25624 CONN_DEC_REF(tcp->tcp_connp); 25625 break; 25626 } 25627 mp->b_prev = (mblk_t *)tcp; 25628 25629 if (listhead == NULL) { 25630 listhead = mp; 25631 last = mp; 25632 } else { 25633 last->b_next = mp; 25634 last = mp; 25635 } 25636 nmatch++; 25637 if (exact) 25638 break; 25639 } 25640 25641 /* Avoid holding lock for too long. */ 25642 if (nmatch >= 500) 25643 break; 25644 } 25645 mutex_exit(&connfp->connf_lock); 25646 25647 /* Pass mp into the correct tcp */ 25648 while ((mp = listhead) != NULL) { 25649 listhead = listhead->b_next; 25650 tcp = (tcp_t *)mp->b_prev; 25651 mp->b_next = mp->b_prev = NULL; 25652 squeue_fill(tcp->tcp_connp->conn_sqp, mp, 25653 tcp_input, tcp->tcp_connp, SQTAG_TCP_ABORT_BUCKET); 25654 } 25655 25656 *count += nmatch; 25657 if (nmatch >= 500 && err == 0) 25658 goto startover; 25659 return (err); 25660 } 25661 25662 /* 25663 * Abort all connections that matches the attributes specified in acp. 25664 */ 25665 static int 25666 tcp_ioctl_abort(tcp_ioc_abort_conn_t *acp, tcp_stack_t *tcps) 25667 { 25668 sa_family_t af; 25669 uint32_t ports; 25670 uint16_t *pports; 25671 int err = 0, count = 0; 25672 boolean_t exact = B_FALSE; /* set when there is no wildcard */ 25673 int index = -1; 25674 ushort_t logflags; 25675 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 25676 25677 af = acp->ac_local.ss_family; 25678 25679 if (af == AF_INET) { 25680 if (TCP_AC_V4REMOTE(acp) != INADDR_ANY && 25681 TCP_AC_V4LPORT(acp) != 0 && TCP_AC_V4RPORT(acp) != 0) { 25682 pports = (uint16_t *)&ports; 25683 pports[1] = TCP_AC_V4LPORT(acp); 25684 pports[0] = TCP_AC_V4RPORT(acp); 25685 exact = (TCP_AC_V4LOCAL(acp) != INADDR_ANY); 25686 } 25687 } else { 25688 if (!IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6REMOTE(acp)) && 25689 TCP_AC_V6LPORT(acp) != 0 && TCP_AC_V6RPORT(acp) != 0) { 25690 pports = (uint16_t *)&ports; 25691 pports[1] = TCP_AC_V6LPORT(acp); 25692 pports[0] = TCP_AC_V6RPORT(acp); 25693 exact = !IN6_IS_ADDR_UNSPECIFIED(&TCP_AC_V6LOCAL(acp)); 25694 } 25695 } 25696 25697 /* 25698 * For cases where remote addr, local port, and remote port are non- 25699 * wildcards, tcp_ioctl_abort_bucket will only be called once. 25700 */ 25701 if (index != -1) { 25702 err = tcp_ioctl_abort_bucket(acp, index, 25703 &count, exact, tcps); 25704 } else { 25705 /* 25706 * loop through all entries for wildcard case 25707 */ 25708 for (index = 0; 25709 index < ipst->ips_ipcl_conn_fanout_size; 25710 index++) { 25711 err = tcp_ioctl_abort_bucket(acp, index, 25712 &count, exact, tcps); 25713 if (err != 0) 25714 break; 25715 } 25716 } 25717 25718 logflags = SL_TRACE | SL_NOTE; 25719 /* 25720 * Don't print this message to the console if the operation was done 25721 * to a non-global zone. 25722 */ 25723 if (acp->ac_zoneid == GLOBAL_ZONEID || acp->ac_zoneid == ALL_ZONES) 25724 logflags |= SL_CONSOLE; 25725 (void) strlog(TCP_MOD_ID, 0, 1, logflags, "TCP_IOC_ABORT_CONN: " 25726 "aborted %d connection%c\n", count, ((count > 1) ? 's' : ' ')); 25727 if (err == 0 && count == 0) 25728 err = ENOENT; 25729 return (err); 25730 } 25731 25732 /* 25733 * Process the TCP_IOC_ABORT_CONN ioctl request. 25734 */ 25735 static void 25736 tcp_ioctl_abort_conn(queue_t *q, mblk_t *mp) 25737 { 25738 int err; 25739 IOCP iocp; 25740 MBLKP mp1; 25741 sa_family_t laf, raf; 25742 tcp_ioc_abort_conn_t *acp; 25743 zone_t *zptr; 25744 conn_t *connp = Q_TO_CONN(q); 25745 zoneid_t zoneid = connp->conn_zoneid; 25746 tcp_t *tcp = connp->conn_tcp; 25747 tcp_stack_t *tcps = tcp->tcp_tcps; 25748 25749 iocp = (IOCP)mp->b_rptr; 25750 25751 if ((mp1 = mp->b_cont) == NULL || 25752 iocp->ioc_count != sizeof (tcp_ioc_abort_conn_t)) { 25753 err = EINVAL; 25754 goto out; 25755 } 25756 25757 /* check permissions */ 25758 if (secpolicy_ip_config(iocp->ioc_cr, B_FALSE) != 0) { 25759 err = EPERM; 25760 goto out; 25761 } 25762 25763 if (mp1->b_cont != NULL) { 25764 freemsg(mp1->b_cont); 25765 mp1->b_cont = NULL; 25766 } 25767 25768 acp = (tcp_ioc_abort_conn_t *)mp1->b_rptr; 25769 laf = acp->ac_local.ss_family; 25770 raf = acp->ac_remote.ss_family; 25771 25772 /* check that a zone with the supplied zoneid exists */ 25773 if (acp->ac_zoneid != GLOBAL_ZONEID && acp->ac_zoneid != ALL_ZONES) { 25774 zptr = zone_find_by_id(zoneid); 25775 if (zptr != NULL) { 25776 zone_rele(zptr); 25777 } else { 25778 err = EINVAL; 25779 goto out; 25780 } 25781 } 25782 25783 /* 25784 * For exclusive stacks we set the zoneid to zero 25785 * to make TCP operate as if in the global zone. 25786 */ 25787 if (tcps->tcps_netstack->netstack_stackid != GLOBAL_NETSTACKID) 25788 acp->ac_zoneid = GLOBAL_ZONEID; 25789 25790 if (acp->ac_start < TCPS_SYN_SENT || acp->ac_end > TCPS_TIME_WAIT || 25791 acp->ac_start > acp->ac_end || laf != raf || 25792 (laf != AF_INET && laf != AF_INET6)) { 25793 err = EINVAL; 25794 goto out; 25795 } 25796 25797 tcp_ioctl_abort_dump(acp); 25798 err = tcp_ioctl_abort(acp, tcps); 25799 25800 out: 25801 if (mp1 != NULL) { 25802 freemsg(mp1); 25803 mp->b_cont = NULL; 25804 } 25805 25806 if (err != 0) 25807 miocnak(q, mp, 0, err); 25808 else 25809 miocack(q, mp, 0, 0); 25810 } 25811 25812 /* 25813 * tcp_time_wait_processing() handles processing of incoming packets when 25814 * the tcp is in the TIME_WAIT state. 25815 * A TIME_WAIT tcp that has an associated open TCP stream is never put 25816 * on the time wait list. 25817 */ 25818 void 25819 tcp_time_wait_processing(tcp_t *tcp, mblk_t *mp, uint32_t seg_seq, 25820 uint32_t seg_ack, int seg_len, tcph_t *tcph) 25821 { 25822 int32_t bytes_acked; 25823 int32_t gap; 25824 int32_t rgap; 25825 tcp_opt_t tcpopt; 25826 uint_t flags; 25827 uint32_t new_swnd = 0; 25828 conn_t *connp; 25829 tcp_stack_t *tcps = tcp->tcp_tcps; 25830 25831 BUMP_LOCAL(tcp->tcp_ibsegs); 25832 TCP_RECORD_TRACE(tcp, mp, TCP_TRACE_RECV_PKT); 25833 25834 flags = (unsigned int)tcph->th_flags[0] & 0xFF; 25835 new_swnd = BE16_TO_U16(tcph->th_win) << 25836 ((tcph->th_flags[0] & TH_SYN) ? 0 : tcp->tcp_snd_ws); 25837 if (tcp->tcp_snd_ts_ok) { 25838 if (!tcp_paws_check(tcp, tcph, &tcpopt)) { 25839 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 25840 tcp->tcp_rnxt, TH_ACK); 25841 goto done; 25842 } 25843 } 25844 gap = seg_seq - tcp->tcp_rnxt; 25845 rgap = tcp->tcp_rwnd - (gap + seg_len); 25846 if (gap < 0) { 25847 BUMP_MIB(&tcps->tcps_mib, tcpInDataDupSegs); 25848 UPDATE_MIB(&tcps->tcps_mib, tcpInDataDupBytes, 25849 (seg_len > -gap ? -gap : seg_len)); 25850 seg_len += gap; 25851 if (seg_len < 0 || (seg_len == 0 && !(flags & TH_FIN))) { 25852 if (flags & TH_RST) { 25853 goto done; 25854 } 25855 if ((flags & TH_FIN) && seg_len == -1) { 25856 /* 25857 * When TCP receives a duplicate FIN in 25858 * TIME_WAIT state, restart the 2 MSL timer. 25859 * See page 73 in RFC 793. Make sure this TCP 25860 * is already on the TIME_WAIT list. If not, 25861 * just restart the timer. 25862 */ 25863 if (TCP_IS_DETACHED(tcp)) { 25864 if (tcp_time_wait_remove(tcp, NULL) == 25865 B_TRUE) { 25866 tcp_time_wait_append(tcp); 25867 TCP_DBGSTAT(tcps, 25868 tcp_rput_time_wait); 25869 } 25870 } else { 25871 ASSERT(tcp != NULL); 25872 TCP_TIMER_RESTART(tcp, 25873 tcps->tcps_time_wait_interval); 25874 } 25875 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 25876 tcp->tcp_rnxt, TH_ACK); 25877 goto done; 25878 } 25879 flags |= TH_ACK_NEEDED; 25880 seg_len = 0; 25881 goto process_ack; 25882 } 25883 25884 /* Fix seg_seq, and chew the gap off the front. */ 25885 seg_seq = tcp->tcp_rnxt; 25886 } 25887 25888 if ((flags & TH_SYN) && gap > 0 && rgap < 0) { 25889 /* 25890 * Make sure that when we accept the connection, pick 25891 * an ISS greater than (tcp_snxt + ISS_INCR/2) for the 25892 * old connection. 25893 * 25894 * The next ISS generated is equal to tcp_iss_incr_extra 25895 * + ISS_INCR/2 + other components depending on the 25896 * value of tcp_strong_iss. We pre-calculate the new 25897 * ISS here and compare with tcp_snxt to determine if 25898 * we need to make adjustment to tcp_iss_incr_extra. 25899 * 25900 * The above calculation is ugly and is a 25901 * waste of CPU cycles... 25902 */ 25903 uint32_t new_iss = tcps->tcps_iss_incr_extra; 25904 int32_t adj; 25905 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 25906 25907 switch (tcps->tcps_strong_iss) { 25908 case 2: { 25909 /* Add time and MD5 components. */ 25910 uint32_t answer[4]; 25911 struct { 25912 uint32_t ports; 25913 in6_addr_t src; 25914 in6_addr_t dst; 25915 } arg; 25916 MD5_CTX context; 25917 25918 mutex_enter(&tcps->tcps_iss_key_lock); 25919 context = tcps->tcps_iss_key; 25920 mutex_exit(&tcps->tcps_iss_key_lock); 25921 arg.ports = tcp->tcp_ports; 25922 /* We use MAPPED addresses in tcp_iss_init */ 25923 arg.src = tcp->tcp_ip_src_v6; 25924 if (tcp->tcp_ipversion == IPV4_VERSION) { 25925 IN6_IPADDR_TO_V4MAPPED( 25926 tcp->tcp_ipha->ipha_dst, 25927 &arg.dst); 25928 } else { 25929 arg.dst = 25930 tcp->tcp_ip6h->ip6_dst; 25931 } 25932 MD5Update(&context, (uchar_t *)&arg, 25933 sizeof (arg)); 25934 MD5Final((uchar_t *)answer, &context); 25935 answer[0] ^= answer[1] ^ answer[2] ^ answer[3]; 25936 new_iss += (gethrtime() >> ISS_NSEC_SHT) + answer[0]; 25937 break; 25938 } 25939 case 1: 25940 /* Add time component and min random (i.e. 1). */ 25941 new_iss += (gethrtime() >> ISS_NSEC_SHT) + 1; 25942 break; 25943 default: 25944 /* Add only time component. */ 25945 new_iss += (uint32_t)gethrestime_sec() * ISS_INCR; 25946 break; 25947 } 25948 if ((adj = (int32_t)(tcp->tcp_snxt - new_iss)) > 0) { 25949 /* 25950 * New ISS not guaranteed to be ISS_INCR/2 25951 * ahead of the current tcp_snxt, so add the 25952 * difference to tcp_iss_incr_extra. 25953 */ 25954 tcps->tcps_iss_incr_extra += adj; 25955 } 25956 /* 25957 * If tcp_clean_death() can not perform the task now, 25958 * drop the SYN packet and let the other side re-xmit. 25959 * Otherwise pass the SYN packet back in, since the 25960 * old tcp state has been cleaned up or freed. 25961 */ 25962 if (tcp_clean_death(tcp, 0, 27) == -1) 25963 goto done; 25964 /* 25965 * We will come back to tcp_rput_data 25966 * on the global queue. Packets destined 25967 * for the global queue will be checked 25968 * with global policy. But the policy for 25969 * this packet has already been checked as 25970 * this was destined for the detached 25971 * connection. We need to bypass policy 25972 * check this time by attaching a dummy 25973 * ipsec_in with ipsec_in_dont_check set. 25974 */ 25975 connp = ipcl_classify(mp, tcp->tcp_connp->conn_zoneid, ipst); 25976 if (connp != NULL) { 25977 TCP_STAT(tcps, tcp_time_wait_syn_success); 25978 tcp_reinput(connp, mp, tcp->tcp_connp->conn_sqp); 25979 return; 25980 } 25981 goto done; 25982 } 25983 25984 /* 25985 * rgap is the amount of stuff received out of window. A negative 25986 * value is the amount out of window. 25987 */ 25988 if (rgap < 0) { 25989 BUMP_MIB(&tcps->tcps_mib, tcpInDataPastWinSegs); 25990 UPDATE_MIB(&tcps->tcps_mib, tcpInDataPastWinBytes, -rgap); 25991 /* Fix seg_len and make sure there is something left. */ 25992 seg_len += rgap; 25993 if (seg_len <= 0) { 25994 if (flags & TH_RST) { 25995 goto done; 25996 } 25997 flags |= TH_ACK_NEEDED; 25998 seg_len = 0; 25999 goto process_ack; 26000 } 26001 } 26002 /* 26003 * Check whether we can update tcp_ts_recent. This test is 26004 * NOT the one in RFC 1323 3.4. It is from Braden, 1993, "TCP 26005 * Extensions for High Performance: An Update", Internet Draft. 26006 */ 26007 if (tcp->tcp_snd_ts_ok && 26008 TSTMP_GEQ(tcpopt.tcp_opt_ts_val, tcp->tcp_ts_recent) && 26009 SEQ_LEQ(seg_seq, tcp->tcp_rack)) { 26010 tcp->tcp_ts_recent = tcpopt.tcp_opt_ts_val; 26011 tcp->tcp_last_rcv_lbolt = lbolt64; 26012 } 26013 26014 if (seg_seq != tcp->tcp_rnxt && seg_len > 0) { 26015 /* Always ack out of order packets */ 26016 flags |= TH_ACK_NEEDED; 26017 seg_len = 0; 26018 } else if (seg_len > 0) { 26019 BUMP_MIB(&tcps->tcps_mib, tcpInClosed); 26020 BUMP_MIB(&tcps->tcps_mib, tcpInDataInorderSegs); 26021 UPDATE_MIB(&tcps->tcps_mib, tcpInDataInorderBytes, seg_len); 26022 } 26023 if (flags & TH_RST) { 26024 (void) tcp_clean_death(tcp, 0, 28); 26025 goto done; 26026 } 26027 if (flags & TH_SYN) { 26028 tcp_xmit_ctl("TH_SYN", tcp, seg_ack, seg_seq + 1, 26029 TH_RST|TH_ACK); 26030 /* 26031 * Do not delete the TCP structure if it is in 26032 * TIME_WAIT state. Refer to RFC 1122, 4.2.2.13. 26033 */ 26034 goto done; 26035 } 26036 process_ack: 26037 if (flags & TH_ACK) { 26038 bytes_acked = (int)(seg_ack - tcp->tcp_suna); 26039 if (bytes_acked <= 0) { 26040 if (bytes_acked == 0 && seg_len == 0 && 26041 new_swnd == tcp->tcp_swnd) 26042 BUMP_MIB(&tcps->tcps_mib, tcpInDupAck); 26043 } else { 26044 /* Acks something not sent */ 26045 flags |= TH_ACK_NEEDED; 26046 } 26047 } 26048 if (flags & TH_ACK_NEEDED) { 26049 /* 26050 * Time to send an ack for some reason. 26051 */ 26052 tcp_xmit_ctl(NULL, tcp, tcp->tcp_snxt, 26053 tcp->tcp_rnxt, TH_ACK); 26054 } 26055 done: 26056 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 26057 DB_CKSUMSTART(mp) = 0; 26058 mp->b_datap->db_struioflag &= ~STRUIO_EAGER; 26059 TCP_STAT(tcps, tcp_time_wait_syn_fail); 26060 } 26061 freemsg(mp); 26062 } 26063 26064 /* 26065 * Allocate a T_SVR4_OPTMGMT_REQ. 26066 * The caller needs to increment tcp_drop_opt_ack_cnt when sending these so 26067 * that tcp_rput_other can drop the acks. 26068 */ 26069 static mblk_t * 26070 tcp_setsockopt_mp(int level, int cmd, char *opt, int optlen) 26071 { 26072 mblk_t *mp; 26073 struct T_optmgmt_req *tor; 26074 struct opthdr *oh; 26075 uint_t size; 26076 char *optptr; 26077 26078 size = sizeof (*tor) + sizeof (*oh) + optlen; 26079 mp = allocb(size, BPRI_MED); 26080 if (mp == NULL) 26081 return (NULL); 26082 26083 mp->b_wptr += size; 26084 mp->b_datap->db_type = M_PROTO; 26085 tor = (struct T_optmgmt_req *)mp->b_rptr; 26086 tor->PRIM_type = T_SVR4_OPTMGMT_REQ; 26087 tor->MGMT_flags = T_NEGOTIATE; 26088 tor->OPT_length = sizeof (*oh) + optlen; 26089 tor->OPT_offset = (t_scalar_t)sizeof (*tor); 26090 26091 oh = (struct opthdr *)&tor[1]; 26092 oh->level = level; 26093 oh->name = cmd; 26094 oh->len = optlen; 26095 if (optlen != 0) { 26096 optptr = (char *)&oh[1]; 26097 bcopy(opt, optptr, optlen); 26098 } 26099 return (mp); 26100 } 26101 26102 /* 26103 * TCP Timers Implementation. 26104 */ 26105 timeout_id_t 26106 tcp_timeout(conn_t *connp, void (*f)(void *), clock_t tim) 26107 { 26108 mblk_t *mp; 26109 tcp_timer_t *tcpt; 26110 tcp_t *tcp = connp->conn_tcp; 26111 tcp_stack_t *tcps = tcp->tcp_tcps; 26112 26113 ASSERT(connp->conn_sqp != NULL); 26114 26115 TCP_DBGSTAT(tcps, tcp_timeout_calls); 26116 26117 if (tcp->tcp_timercache == NULL) { 26118 mp = tcp_timermp_alloc(KM_NOSLEEP | KM_PANIC); 26119 } else { 26120 TCP_DBGSTAT(tcps, tcp_timeout_cached_alloc); 26121 mp = tcp->tcp_timercache; 26122 tcp->tcp_timercache = mp->b_next; 26123 mp->b_next = NULL; 26124 ASSERT(mp->b_wptr == NULL); 26125 } 26126 26127 CONN_INC_REF(connp); 26128 tcpt = (tcp_timer_t *)mp->b_rptr; 26129 tcpt->connp = connp; 26130 tcpt->tcpt_proc = f; 26131 tcpt->tcpt_tid = timeout(tcp_timer_callback, mp, tim); 26132 return ((timeout_id_t)mp); 26133 } 26134 26135 static void 26136 tcp_timer_callback(void *arg) 26137 { 26138 mblk_t *mp = (mblk_t *)arg; 26139 tcp_timer_t *tcpt; 26140 conn_t *connp; 26141 26142 tcpt = (tcp_timer_t *)mp->b_rptr; 26143 connp = tcpt->connp; 26144 squeue_fill(connp->conn_sqp, mp, 26145 tcp_timer_handler, connp, SQTAG_TCP_TIMER); 26146 } 26147 26148 static void 26149 tcp_timer_handler(void *arg, mblk_t *mp, void *arg2) 26150 { 26151 tcp_timer_t *tcpt; 26152 conn_t *connp = (conn_t *)arg; 26153 tcp_t *tcp = connp->conn_tcp; 26154 26155 tcpt = (tcp_timer_t *)mp->b_rptr; 26156 ASSERT(connp == tcpt->connp); 26157 ASSERT((squeue_t *)arg2 == connp->conn_sqp); 26158 26159 /* 26160 * If the TCP has reached the closed state, don't proceed any 26161 * further. This TCP logically does not exist on the system. 26162 * tcpt_proc could for example access queues, that have already 26163 * been qprocoff'ed off. Also see comments at the start of tcp_input 26164 */ 26165 if (tcp->tcp_state != TCPS_CLOSED) { 26166 (*tcpt->tcpt_proc)(connp); 26167 } else { 26168 tcp->tcp_timer_tid = 0; 26169 } 26170 tcp_timer_free(connp->conn_tcp, mp); 26171 } 26172 26173 /* 26174 * There is potential race with untimeout and the handler firing at the same 26175 * time. The mblock may be freed by the handler while we are trying to use 26176 * it. But since both should execute on the same squeue, this race should not 26177 * occur. 26178 */ 26179 clock_t 26180 tcp_timeout_cancel(conn_t *connp, timeout_id_t id) 26181 { 26182 mblk_t *mp = (mblk_t *)id; 26183 tcp_timer_t *tcpt; 26184 clock_t delta; 26185 tcp_stack_t *tcps = connp->conn_tcp->tcp_tcps; 26186 26187 TCP_DBGSTAT(tcps, tcp_timeout_cancel_reqs); 26188 26189 if (mp == NULL) 26190 return (-1); 26191 26192 tcpt = (tcp_timer_t *)mp->b_rptr; 26193 ASSERT(tcpt->connp == connp); 26194 26195 delta = untimeout(tcpt->tcpt_tid); 26196 26197 if (delta >= 0) { 26198 TCP_DBGSTAT(tcps, tcp_timeout_canceled); 26199 tcp_timer_free(connp->conn_tcp, mp); 26200 CONN_DEC_REF(connp); 26201 } 26202 26203 return (delta); 26204 } 26205 26206 /* 26207 * Allocate space for the timer event. The allocation looks like mblk, but it is 26208 * not a proper mblk. To avoid confusion we set b_wptr to NULL. 26209 * 26210 * Dealing with failures: If we can't allocate from the timer cache we try 26211 * allocating from dblock caches using allocb_tryhard(). In this case b_wptr 26212 * points to b_rptr. 26213 * If we can't allocate anything using allocb_tryhard(), we perform a last 26214 * attempt and use kmem_alloc_tryhard(). In this case we set b_wptr to -1 and 26215 * save the actual allocation size in b_datap. 26216 */ 26217 mblk_t * 26218 tcp_timermp_alloc(int kmflags) 26219 { 26220 mblk_t *mp = (mblk_t *)kmem_cache_alloc(tcp_timercache, 26221 kmflags & ~KM_PANIC); 26222 26223 if (mp != NULL) { 26224 mp->b_next = mp->b_prev = NULL; 26225 mp->b_rptr = (uchar_t *)(&mp[1]); 26226 mp->b_wptr = NULL; 26227 mp->b_datap = NULL; 26228 mp->b_queue = NULL; 26229 mp->b_cont = NULL; 26230 } else if (kmflags & KM_PANIC) { 26231 /* 26232 * Failed to allocate memory for the timer. Try allocating from 26233 * dblock caches. 26234 */ 26235 /* ipclassifier calls this from a constructor - hence no tcps */ 26236 TCP_G_STAT(tcp_timermp_allocfail); 26237 mp = allocb_tryhard(sizeof (tcp_timer_t)); 26238 if (mp == NULL) { 26239 size_t size = 0; 26240 /* 26241 * Memory is really low. Try tryhard allocation. 26242 * 26243 * ipclassifier calls this from a constructor - 26244 * hence no tcps 26245 */ 26246 TCP_G_STAT(tcp_timermp_allocdblfail); 26247 mp = kmem_alloc_tryhard(sizeof (mblk_t) + 26248 sizeof (tcp_timer_t), &size, kmflags); 26249 mp->b_rptr = (uchar_t *)(&mp[1]); 26250 mp->b_next = mp->b_prev = NULL; 26251 mp->b_wptr = (uchar_t *)-1; 26252 mp->b_datap = (dblk_t *)size; 26253 mp->b_queue = NULL; 26254 mp->b_cont = NULL; 26255 } 26256 ASSERT(mp->b_wptr != NULL); 26257 } 26258 /* ipclassifier calls this from a constructor - hence no tcps */ 26259 TCP_G_DBGSTAT(tcp_timermp_alloced); 26260 26261 return (mp); 26262 } 26263 26264 /* 26265 * Free per-tcp timer cache. 26266 * It can only contain entries from tcp_timercache. 26267 */ 26268 void 26269 tcp_timermp_free(tcp_t *tcp) 26270 { 26271 mblk_t *mp; 26272 26273 while ((mp = tcp->tcp_timercache) != NULL) { 26274 ASSERT(mp->b_wptr == NULL); 26275 tcp->tcp_timercache = tcp->tcp_timercache->b_next; 26276 kmem_cache_free(tcp_timercache, mp); 26277 } 26278 } 26279 26280 /* 26281 * Free timer event. Put it on the per-tcp timer cache if there is not too many 26282 * events there already (currently at most two events are cached). 26283 * If the event is not allocated from the timer cache, free it right away. 26284 */ 26285 static void 26286 tcp_timer_free(tcp_t *tcp, mblk_t *mp) 26287 { 26288 mblk_t *mp1 = tcp->tcp_timercache; 26289 tcp_stack_t *tcps = tcp->tcp_tcps; 26290 26291 if (mp->b_wptr != NULL) { 26292 /* 26293 * This allocation is not from a timer cache, free it right 26294 * away. 26295 */ 26296 if (mp->b_wptr != (uchar_t *)-1) 26297 freeb(mp); 26298 else 26299 kmem_free(mp, (size_t)mp->b_datap); 26300 } else if (mp1 == NULL || mp1->b_next == NULL) { 26301 /* Cache this timer block for future allocations */ 26302 mp->b_rptr = (uchar_t *)(&mp[1]); 26303 mp->b_next = mp1; 26304 tcp->tcp_timercache = mp; 26305 } else { 26306 kmem_cache_free(tcp_timercache, mp); 26307 TCP_DBGSTAT(tcps, tcp_timermp_freed); 26308 } 26309 } 26310 26311 /* 26312 * End of TCP Timers implementation. 26313 */ 26314 26315 /* 26316 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL 26317 * on the specified backing STREAMS q. Note, the caller may make the 26318 * decision to call based on the tcp_t.tcp_flow_stopped value which 26319 * when check outside the q's lock is only an advisory check ... 26320 */ 26321 26322 void 26323 tcp_setqfull(tcp_t *tcp) 26324 { 26325 queue_t *q = tcp->tcp_wq; 26326 tcp_stack_t *tcps = tcp->tcp_tcps; 26327 26328 if (!(q->q_flag & QFULL)) { 26329 mutex_enter(QLOCK(q)); 26330 if (!(q->q_flag & QFULL)) { 26331 /* still need to set QFULL */ 26332 q->q_flag |= QFULL; 26333 tcp->tcp_flow_stopped = B_TRUE; 26334 mutex_exit(QLOCK(q)); 26335 TCP_STAT(tcps, tcp_flwctl_on); 26336 } else { 26337 mutex_exit(QLOCK(q)); 26338 } 26339 } 26340 } 26341 26342 void 26343 tcp_clrqfull(tcp_t *tcp) 26344 { 26345 queue_t *q = tcp->tcp_wq; 26346 26347 if (q->q_flag & QFULL) { 26348 mutex_enter(QLOCK(q)); 26349 if (q->q_flag & QFULL) { 26350 q->q_flag &= ~QFULL; 26351 tcp->tcp_flow_stopped = B_FALSE; 26352 mutex_exit(QLOCK(q)); 26353 if (q->q_flag & QWANTW) 26354 qbackenable(q, 0); 26355 } else { 26356 mutex_exit(QLOCK(q)); 26357 } 26358 } 26359 } 26360 26361 26362 /* 26363 * kstats related to squeues i.e. not per IP instance 26364 */ 26365 static void * 26366 tcp_g_kstat_init(tcp_g_stat_t *tcp_g_statp) 26367 { 26368 kstat_t *ksp; 26369 26370 tcp_g_stat_t template = { 26371 { "tcp_timermp_alloced", KSTAT_DATA_UINT64 }, 26372 { "tcp_timermp_allocfail", KSTAT_DATA_UINT64 }, 26373 { "tcp_timermp_allocdblfail", KSTAT_DATA_UINT64 }, 26374 { "tcp_freelist_cleanup", KSTAT_DATA_UINT64 }, 26375 }; 26376 26377 ksp = kstat_create(TCP_MOD_NAME, 0, "tcpstat_g", "net", 26378 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 26379 KSTAT_FLAG_VIRTUAL); 26380 26381 if (ksp == NULL) 26382 return (NULL); 26383 26384 bcopy(&template, tcp_g_statp, sizeof (template)); 26385 ksp->ks_data = (void *)tcp_g_statp; 26386 26387 kstat_install(ksp); 26388 return (ksp); 26389 } 26390 26391 static void 26392 tcp_g_kstat_fini(kstat_t *ksp) 26393 { 26394 if (ksp != NULL) { 26395 kstat_delete(ksp); 26396 } 26397 } 26398 26399 26400 static void * 26401 tcp_kstat2_init(netstackid_t stackid, tcp_stat_t *tcps_statisticsp) 26402 { 26403 kstat_t *ksp; 26404 26405 tcp_stat_t template = { 26406 { "tcp_time_wait", KSTAT_DATA_UINT64 }, 26407 { "tcp_time_wait_syn", KSTAT_DATA_UINT64 }, 26408 { "tcp_time_wait_success", KSTAT_DATA_UINT64 }, 26409 { "tcp_time_wait_fail", KSTAT_DATA_UINT64 }, 26410 { "tcp_reinput_syn", KSTAT_DATA_UINT64 }, 26411 { "tcp_ip_output", KSTAT_DATA_UINT64 }, 26412 { "tcp_detach_non_time_wait", KSTAT_DATA_UINT64 }, 26413 { "tcp_detach_time_wait", KSTAT_DATA_UINT64 }, 26414 { "tcp_time_wait_reap", KSTAT_DATA_UINT64 }, 26415 { "tcp_clean_death_nondetached", KSTAT_DATA_UINT64 }, 26416 { "tcp_reinit_calls", KSTAT_DATA_UINT64 }, 26417 { "tcp_eager_err1", KSTAT_DATA_UINT64 }, 26418 { "tcp_eager_err2", KSTAT_DATA_UINT64 }, 26419 { "tcp_eager_blowoff_calls", KSTAT_DATA_UINT64 }, 26420 { "tcp_eager_blowoff_q", KSTAT_DATA_UINT64 }, 26421 { "tcp_eager_blowoff_q0", KSTAT_DATA_UINT64 }, 26422 { "tcp_not_hard_bound", KSTAT_DATA_UINT64 }, 26423 { "tcp_no_listener", KSTAT_DATA_UINT64 }, 26424 { "tcp_found_eager", KSTAT_DATA_UINT64 }, 26425 { "tcp_wrong_queue", KSTAT_DATA_UINT64 }, 26426 { "tcp_found_eager_binding1", KSTAT_DATA_UINT64 }, 26427 { "tcp_found_eager_bound1", KSTAT_DATA_UINT64 }, 26428 { "tcp_eager_has_listener1", KSTAT_DATA_UINT64 }, 26429 { "tcp_open_alloc", KSTAT_DATA_UINT64 }, 26430 { "tcp_open_detached_alloc", KSTAT_DATA_UINT64 }, 26431 { "tcp_rput_time_wait", KSTAT_DATA_UINT64 }, 26432 { "tcp_listendrop", KSTAT_DATA_UINT64 }, 26433 { "tcp_listendropq0", KSTAT_DATA_UINT64 }, 26434 { "tcp_wrong_rq", KSTAT_DATA_UINT64 }, 26435 { "tcp_rsrv_calls", KSTAT_DATA_UINT64 }, 26436 { "tcp_eagerfree2", KSTAT_DATA_UINT64 }, 26437 { "tcp_eagerfree3", KSTAT_DATA_UINT64 }, 26438 { "tcp_eagerfree4", KSTAT_DATA_UINT64 }, 26439 { "tcp_eagerfree5", KSTAT_DATA_UINT64 }, 26440 { "tcp_timewait_syn_fail", KSTAT_DATA_UINT64 }, 26441 { "tcp_listen_badflags", KSTAT_DATA_UINT64 }, 26442 { "tcp_timeout_calls", KSTAT_DATA_UINT64 }, 26443 { "tcp_timeout_cached_alloc", KSTAT_DATA_UINT64 }, 26444 { "tcp_timeout_cancel_reqs", KSTAT_DATA_UINT64 }, 26445 { "tcp_timeout_canceled", KSTAT_DATA_UINT64 }, 26446 { "tcp_timermp_freed", KSTAT_DATA_UINT64 }, 26447 { "tcp_push_timer_cnt", KSTAT_DATA_UINT64 }, 26448 { "tcp_ack_timer_cnt", KSTAT_DATA_UINT64 }, 26449 { "tcp_ire_null1", KSTAT_DATA_UINT64 }, 26450 { "tcp_ire_null", KSTAT_DATA_UINT64 }, 26451 { "tcp_ip_send", KSTAT_DATA_UINT64 }, 26452 { "tcp_ip_ire_send", KSTAT_DATA_UINT64 }, 26453 { "tcp_wsrv_called", KSTAT_DATA_UINT64 }, 26454 { "tcp_flwctl_on", KSTAT_DATA_UINT64 }, 26455 { "tcp_timer_fire_early", KSTAT_DATA_UINT64 }, 26456 { "tcp_timer_fire_miss", KSTAT_DATA_UINT64 }, 26457 { "tcp_rput_v6_error", KSTAT_DATA_UINT64 }, 26458 { "tcp_out_sw_cksum", KSTAT_DATA_UINT64 }, 26459 { "tcp_out_sw_cksum_bytes", KSTAT_DATA_UINT64 }, 26460 { "tcp_zcopy_on", KSTAT_DATA_UINT64 }, 26461 { "tcp_zcopy_off", KSTAT_DATA_UINT64 }, 26462 { "tcp_zcopy_backoff", KSTAT_DATA_UINT64 }, 26463 { "tcp_zcopy_disable", KSTAT_DATA_UINT64 }, 26464 { "tcp_mdt_pkt_out", KSTAT_DATA_UINT64 }, 26465 { "tcp_mdt_pkt_out_v4", KSTAT_DATA_UINT64 }, 26466 { "tcp_mdt_pkt_out_v6", KSTAT_DATA_UINT64 }, 26467 { "tcp_mdt_discarded", KSTAT_DATA_UINT64 }, 26468 { "tcp_mdt_conn_halted1", KSTAT_DATA_UINT64 }, 26469 { "tcp_mdt_conn_halted2", KSTAT_DATA_UINT64 }, 26470 { "tcp_mdt_conn_halted3", KSTAT_DATA_UINT64 }, 26471 { "tcp_mdt_conn_resumed1", KSTAT_DATA_UINT64 }, 26472 { "tcp_mdt_conn_resumed2", KSTAT_DATA_UINT64 }, 26473 { "tcp_mdt_legacy_small", KSTAT_DATA_UINT64 }, 26474 { "tcp_mdt_legacy_all", KSTAT_DATA_UINT64 }, 26475 { "tcp_mdt_legacy_ret", KSTAT_DATA_UINT64 }, 26476 { "tcp_mdt_allocfail", KSTAT_DATA_UINT64 }, 26477 { "tcp_mdt_addpdescfail", KSTAT_DATA_UINT64 }, 26478 { "tcp_mdt_allocd", KSTAT_DATA_UINT64 }, 26479 { "tcp_mdt_linked", KSTAT_DATA_UINT64 }, 26480 { "tcp_fusion_flowctl", KSTAT_DATA_UINT64 }, 26481 { "tcp_fusion_backenabled", KSTAT_DATA_UINT64 }, 26482 { "tcp_fusion_urg", KSTAT_DATA_UINT64 }, 26483 { "tcp_fusion_putnext", KSTAT_DATA_UINT64 }, 26484 { "tcp_fusion_unfusable", KSTAT_DATA_UINT64 }, 26485 { "tcp_fusion_aborted", KSTAT_DATA_UINT64 }, 26486 { "tcp_fusion_unqualified", KSTAT_DATA_UINT64 }, 26487 { "tcp_fusion_rrw_busy", KSTAT_DATA_UINT64 }, 26488 { "tcp_fusion_rrw_msgcnt", KSTAT_DATA_UINT64 }, 26489 { "tcp_fusion_rrw_plugged", KSTAT_DATA_UINT64 }, 26490 { "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64 }, 26491 { "tcp_sock_fallback", KSTAT_DATA_UINT64 }, 26492 { "tcp_lso_enabled", KSTAT_DATA_UINT64 }, 26493 { "tcp_lso_disabled", KSTAT_DATA_UINT64 }, 26494 { "tcp_lso_times", KSTAT_DATA_UINT64 }, 26495 { "tcp_lso_pkt_out", KSTAT_DATA_UINT64 }, 26496 }; 26497 26498 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, "tcpstat", "net", 26499 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 26500 KSTAT_FLAG_VIRTUAL, stackid); 26501 26502 if (ksp == NULL) 26503 return (NULL); 26504 26505 bcopy(&template, tcps_statisticsp, sizeof (template)); 26506 ksp->ks_data = (void *)tcps_statisticsp; 26507 ksp->ks_private = (void *)(uintptr_t)stackid; 26508 26509 kstat_install(ksp); 26510 return (ksp); 26511 } 26512 26513 static void 26514 tcp_kstat2_fini(netstackid_t stackid, kstat_t *ksp) 26515 { 26516 if (ksp != NULL) { 26517 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 26518 kstat_delete_netstack(ksp, stackid); 26519 } 26520 } 26521 26522 /* 26523 * TCP Kstats implementation 26524 */ 26525 static void * 26526 tcp_kstat_init(netstackid_t stackid, tcp_stack_t *tcps) 26527 { 26528 kstat_t *ksp; 26529 26530 tcp_named_kstat_t template = { 26531 { "rtoAlgorithm", KSTAT_DATA_INT32, 0 }, 26532 { "rtoMin", KSTAT_DATA_INT32, 0 }, 26533 { "rtoMax", KSTAT_DATA_INT32, 0 }, 26534 { "maxConn", KSTAT_DATA_INT32, 0 }, 26535 { "activeOpens", KSTAT_DATA_UINT32, 0 }, 26536 { "passiveOpens", KSTAT_DATA_UINT32, 0 }, 26537 { "attemptFails", KSTAT_DATA_UINT32, 0 }, 26538 { "estabResets", KSTAT_DATA_UINT32, 0 }, 26539 { "currEstab", KSTAT_DATA_UINT32, 0 }, 26540 { "inSegs", KSTAT_DATA_UINT64, 0 }, 26541 { "outSegs", KSTAT_DATA_UINT64, 0 }, 26542 { "retransSegs", KSTAT_DATA_UINT32, 0 }, 26543 { "connTableSize", KSTAT_DATA_INT32, 0 }, 26544 { "outRsts", KSTAT_DATA_UINT32, 0 }, 26545 { "outDataSegs", KSTAT_DATA_UINT32, 0 }, 26546 { "outDataBytes", KSTAT_DATA_UINT32, 0 }, 26547 { "retransBytes", KSTAT_DATA_UINT32, 0 }, 26548 { "outAck", KSTAT_DATA_UINT32, 0 }, 26549 { "outAckDelayed", KSTAT_DATA_UINT32, 0 }, 26550 { "outUrg", KSTAT_DATA_UINT32, 0 }, 26551 { "outWinUpdate", KSTAT_DATA_UINT32, 0 }, 26552 { "outWinProbe", KSTAT_DATA_UINT32, 0 }, 26553 { "outControl", KSTAT_DATA_UINT32, 0 }, 26554 { "outFastRetrans", KSTAT_DATA_UINT32, 0 }, 26555 { "inAckSegs", KSTAT_DATA_UINT32, 0 }, 26556 { "inAckBytes", KSTAT_DATA_UINT32, 0 }, 26557 { "inDupAck", KSTAT_DATA_UINT32, 0 }, 26558 { "inAckUnsent", KSTAT_DATA_UINT32, 0 }, 26559 { "inDataInorderSegs", KSTAT_DATA_UINT32, 0 }, 26560 { "inDataInorderBytes", KSTAT_DATA_UINT32, 0 }, 26561 { "inDataUnorderSegs", KSTAT_DATA_UINT32, 0 }, 26562 { "inDataUnorderBytes", KSTAT_DATA_UINT32, 0 }, 26563 { "inDataDupSegs", KSTAT_DATA_UINT32, 0 }, 26564 { "inDataDupBytes", KSTAT_DATA_UINT32, 0 }, 26565 { "inDataPartDupSegs", KSTAT_DATA_UINT32, 0 }, 26566 { "inDataPartDupBytes", KSTAT_DATA_UINT32, 0 }, 26567 { "inDataPastWinSegs", KSTAT_DATA_UINT32, 0 }, 26568 { "inDataPastWinBytes", KSTAT_DATA_UINT32, 0 }, 26569 { "inWinProbe", KSTAT_DATA_UINT32, 0 }, 26570 { "inWinUpdate", KSTAT_DATA_UINT32, 0 }, 26571 { "inClosed", KSTAT_DATA_UINT32, 0 }, 26572 { "rttUpdate", KSTAT_DATA_UINT32, 0 }, 26573 { "rttNoUpdate", KSTAT_DATA_UINT32, 0 }, 26574 { "timRetrans", KSTAT_DATA_UINT32, 0 }, 26575 { "timRetransDrop", KSTAT_DATA_UINT32, 0 }, 26576 { "timKeepalive", KSTAT_DATA_UINT32, 0 }, 26577 { "timKeepaliveProbe", KSTAT_DATA_UINT32, 0 }, 26578 { "timKeepaliveDrop", KSTAT_DATA_UINT32, 0 }, 26579 { "listenDrop", KSTAT_DATA_UINT32, 0 }, 26580 { "listenDropQ0", KSTAT_DATA_UINT32, 0 }, 26581 { "halfOpenDrop", KSTAT_DATA_UINT32, 0 }, 26582 { "outSackRetransSegs", KSTAT_DATA_UINT32, 0 }, 26583 { "connTableSize6", KSTAT_DATA_INT32, 0 } 26584 }; 26585 26586 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, TCP_MOD_NAME, "mib2", 26587 KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0, stackid); 26588 26589 if (ksp == NULL) 26590 return (NULL); 26591 26592 template.rtoAlgorithm.value.ui32 = 4; 26593 template.rtoMin.value.ui32 = tcps->tcps_rexmit_interval_min; 26594 template.rtoMax.value.ui32 = tcps->tcps_rexmit_interval_max; 26595 template.maxConn.value.i32 = -1; 26596 26597 bcopy(&template, ksp->ks_data, sizeof (template)); 26598 ksp->ks_update = tcp_kstat_update; 26599 ksp->ks_private = (void *)(uintptr_t)stackid; 26600 26601 kstat_install(ksp); 26602 return (ksp); 26603 } 26604 26605 static void 26606 tcp_kstat_fini(netstackid_t stackid, kstat_t *ksp) 26607 { 26608 if (ksp != NULL) { 26609 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 26610 kstat_delete_netstack(ksp, stackid); 26611 } 26612 } 26613 26614 static int 26615 tcp_kstat_update(kstat_t *kp, int rw) 26616 { 26617 tcp_named_kstat_t *tcpkp; 26618 tcp_t *tcp; 26619 connf_t *connfp; 26620 conn_t *connp; 26621 int i; 26622 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 26623 netstack_t *ns; 26624 tcp_stack_t *tcps; 26625 ip_stack_t *ipst; 26626 26627 if ((kp == NULL) || (kp->ks_data == NULL)) 26628 return (EIO); 26629 26630 if (rw == KSTAT_WRITE) 26631 return (EACCES); 26632 26633 ns = netstack_find_by_stackid(stackid); 26634 if (ns == NULL) 26635 return (-1); 26636 tcps = ns->netstack_tcp; 26637 if (tcps == NULL) { 26638 netstack_rele(ns); 26639 return (-1); 26640 } 26641 tcpkp = (tcp_named_kstat_t *)kp->ks_data; 26642 26643 tcpkp->currEstab.value.ui32 = 0; 26644 26645 ipst = ns->netstack_ip; 26646 26647 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 26648 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 26649 connp = NULL; 26650 while ((connp = 26651 ipcl_get_next_conn(connfp, connp, IPCL_TCP)) != NULL) { 26652 tcp = connp->conn_tcp; 26653 switch (tcp_snmp_state(tcp)) { 26654 case MIB2_TCP_established: 26655 case MIB2_TCP_closeWait: 26656 tcpkp->currEstab.value.ui32++; 26657 break; 26658 } 26659 } 26660 } 26661 26662 tcpkp->activeOpens.value.ui32 = tcps->tcps_mib.tcpActiveOpens; 26663 tcpkp->passiveOpens.value.ui32 = tcps->tcps_mib.tcpPassiveOpens; 26664 tcpkp->attemptFails.value.ui32 = tcps->tcps_mib.tcpAttemptFails; 26665 tcpkp->estabResets.value.ui32 = tcps->tcps_mib.tcpEstabResets; 26666 tcpkp->inSegs.value.ui64 = tcps->tcps_mib.tcpHCInSegs; 26667 tcpkp->outSegs.value.ui64 = tcps->tcps_mib.tcpHCOutSegs; 26668 tcpkp->retransSegs.value.ui32 = tcps->tcps_mib.tcpRetransSegs; 26669 tcpkp->connTableSize.value.i32 = tcps->tcps_mib.tcpConnTableSize; 26670 tcpkp->outRsts.value.ui32 = tcps->tcps_mib.tcpOutRsts; 26671 tcpkp->outDataSegs.value.ui32 = tcps->tcps_mib.tcpOutDataSegs; 26672 tcpkp->outDataBytes.value.ui32 = tcps->tcps_mib.tcpOutDataBytes; 26673 tcpkp->retransBytes.value.ui32 = tcps->tcps_mib.tcpRetransBytes; 26674 tcpkp->outAck.value.ui32 = tcps->tcps_mib.tcpOutAck; 26675 tcpkp->outAckDelayed.value.ui32 = tcps->tcps_mib.tcpOutAckDelayed; 26676 tcpkp->outUrg.value.ui32 = tcps->tcps_mib.tcpOutUrg; 26677 tcpkp->outWinUpdate.value.ui32 = tcps->tcps_mib.tcpOutWinUpdate; 26678 tcpkp->outWinProbe.value.ui32 = tcps->tcps_mib.tcpOutWinProbe; 26679 tcpkp->outControl.value.ui32 = tcps->tcps_mib.tcpOutControl; 26680 tcpkp->outFastRetrans.value.ui32 = tcps->tcps_mib.tcpOutFastRetrans; 26681 tcpkp->inAckSegs.value.ui32 = tcps->tcps_mib.tcpInAckSegs; 26682 tcpkp->inAckBytes.value.ui32 = tcps->tcps_mib.tcpInAckBytes; 26683 tcpkp->inDupAck.value.ui32 = tcps->tcps_mib.tcpInDupAck; 26684 tcpkp->inAckUnsent.value.ui32 = tcps->tcps_mib.tcpInAckUnsent; 26685 tcpkp->inDataInorderSegs.value.ui32 = 26686 tcps->tcps_mib.tcpInDataInorderSegs; 26687 tcpkp->inDataInorderBytes.value.ui32 = 26688 tcps->tcps_mib.tcpInDataInorderBytes; 26689 tcpkp->inDataUnorderSegs.value.ui32 = 26690 tcps->tcps_mib.tcpInDataUnorderSegs; 26691 tcpkp->inDataUnorderBytes.value.ui32 = 26692 tcps->tcps_mib.tcpInDataUnorderBytes; 26693 tcpkp->inDataDupSegs.value.ui32 = tcps->tcps_mib.tcpInDataDupSegs; 26694 tcpkp->inDataDupBytes.value.ui32 = tcps->tcps_mib.tcpInDataDupBytes; 26695 tcpkp->inDataPartDupSegs.value.ui32 = 26696 tcps->tcps_mib.tcpInDataPartDupSegs; 26697 tcpkp->inDataPartDupBytes.value.ui32 = 26698 tcps->tcps_mib.tcpInDataPartDupBytes; 26699 tcpkp->inDataPastWinSegs.value.ui32 = 26700 tcps->tcps_mib.tcpInDataPastWinSegs; 26701 tcpkp->inDataPastWinBytes.value.ui32 = 26702 tcps->tcps_mib.tcpInDataPastWinBytes; 26703 tcpkp->inWinProbe.value.ui32 = tcps->tcps_mib.tcpInWinProbe; 26704 tcpkp->inWinUpdate.value.ui32 = tcps->tcps_mib.tcpInWinUpdate; 26705 tcpkp->inClosed.value.ui32 = tcps->tcps_mib.tcpInClosed; 26706 tcpkp->rttNoUpdate.value.ui32 = tcps->tcps_mib.tcpRttNoUpdate; 26707 tcpkp->rttUpdate.value.ui32 = tcps->tcps_mib.tcpRttUpdate; 26708 tcpkp->timRetrans.value.ui32 = tcps->tcps_mib.tcpTimRetrans; 26709 tcpkp->timRetransDrop.value.ui32 = tcps->tcps_mib.tcpTimRetransDrop; 26710 tcpkp->timKeepalive.value.ui32 = tcps->tcps_mib.tcpTimKeepalive; 26711 tcpkp->timKeepaliveProbe.value.ui32 = 26712 tcps->tcps_mib.tcpTimKeepaliveProbe; 26713 tcpkp->timKeepaliveDrop.value.ui32 = 26714 tcps->tcps_mib.tcpTimKeepaliveDrop; 26715 tcpkp->listenDrop.value.ui32 = tcps->tcps_mib.tcpListenDrop; 26716 tcpkp->listenDropQ0.value.ui32 = tcps->tcps_mib.tcpListenDropQ0; 26717 tcpkp->halfOpenDrop.value.ui32 = tcps->tcps_mib.tcpHalfOpenDrop; 26718 tcpkp->outSackRetransSegs.value.ui32 = 26719 tcps->tcps_mib.tcpOutSackRetransSegs; 26720 tcpkp->connTableSize6.value.i32 = tcps->tcps_mib.tcp6ConnTableSize; 26721 26722 netstack_rele(ns); 26723 return (0); 26724 } 26725 26726 void 26727 tcp_reinput(conn_t *connp, mblk_t *mp, squeue_t *sqp) 26728 { 26729 uint16_t hdr_len; 26730 ipha_t *ipha; 26731 uint8_t *nexthdrp; 26732 tcph_t *tcph; 26733 tcp_stack_t *tcps = connp->conn_tcp->tcp_tcps; 26734 26735 /* Already has an eager */ 26736 if ((mp->b_datap->db_struioflag & STRUIO_EAGER) != 0) { 26737 TCP_STAT(tcps, tcp_reinput_syn); 26738 squeue_enter(connp->conn_sqp, mp, connp->conn_recv, 26739 connp, SQTAG_TCP_REINPUT_EAGER); 26740 return; 26741 } 26742 26743 switch (IPH_HDR_VERSION(mp->b_rptr)) { 26744 case IPV4_VERSION: 26745 ipha = (ipha_t *)mp->b_rptr; 26746 hdr_len = IPH_HDR_LENGTH(ipha); 26747 break; 26748 case IPV6_VERSION: 26749 if (!ip_hdr_length_nexthdr_v6(mp, (ip6_t *)mp->b_rptr, 26750 &hdr_len, &nexthdrp)) { 26751 CONN_DEC_REF(connp); 26752 freemsg(mp); 26753 return; 26754 } 26755 break; 26756 } 26757 26758 tcph = (tcph_t *)&mp->b_rptr[hdr_len]; 26759 if ((tcph->th_flags[0] & (TH_SYN|TH_ACK|TH_RST|TH_URG)) == TH_SYN) { 26760 mp->b_datap->db_struioflag |= STRUIO_EAGER; 26761 DB_CKSUMSTART(mp) = (intptr_t)sqp; 26762 } 26763 26764 squeue_fill(connp->conn_sqp, mp, connp->conn_recv, connp, 26765 SQTAG_TCP_REINPUT); 26766 } 26767 26768 static squeue_func_t 26769 tcp_squeue_switch(int val) 26770 { 26771 squeue_func_t rval = squeue_fill; 26772 26773 switch (val) { 26774 case 1: 26775 rval = squeue_enter_nodrain; 26776 break; 26777 case 2: 26778 rval = squeue_enter; 26779 break; 26780 default: 26781 break; 26782 } 26783 return (rval); 26784 } 26785 26786 /* 26787 * This is called once for each squeue - globally for all stack 26788 * instances. 26789 */ 26790 static void 26791 tcp_squeue_add(squeue_t *sqp) 26792 { 26793 tcp_squeue_priv_t *tcp_time_wait = kmem_zalloc( 26794 sizeof (tcp_squeue_priv_t), KM_SLEEP); 26795 26796 *squeue_getprivate(sqp, SQPRIVATE_TCP) = (intptr_t)tcp_time_wait; 26797 tcp_time_wait->tcp_time_wait_tid = timeout(tcp_time_wait_collector, 26798 sqp, TCP_TIME_WAIT_DELAY); 26799 if (tcp_free_list_max_cnt == 0) { 26800 int tcp_ncpus = ((boot_max_ncpus == -1) ? 26801 max_ncpus : boot_max_ncpus); 26802 26803 /* 26804 * Limit number of entries to 1% of availble memory / tcp_ncpus 26805 */ 26806 tcp_free_list_max_cnt = (freemem * PAGESIZE) / 26807 (tcp_ncpus * sizeof (tcp_t) * 100); 26808 } 26809 tcp_time_wait->tcp_free_list_cnt = 0; 26810 } 26811